content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from unittest import TestCase
from unittest.mock import patch
from xmlschema import XMLSchemaException
from xml.dom.minidom import Element, Document, parse
class TestXmlParserInstructionspath(TestCase):
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
"""
Will return the instructions file path set in __init__
"""
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
actual_file = xml_parser.instructionspath
self.assertEqual(expected_file, actual_file)
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath_instruction_file_not_there(self, placeholder_mock, xmlparser_mock, isfile_mock,
schema_mock, initmetadata_mock):
"""
Will raise FileNotFound exeption.
"""
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
isfile_mock.return_value = False
with self.assertRaises(FileNotFoundError):
xml_parser.instructionspath = expected_file
class TestXmlParserValidate_schema(TestCase):
def test__validate_schema_valid_instructions(self):
"""
Should do nothing.
"""
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
def test__validate_schema_invalid_instructions(self):
"""
Should raise exception.
"""
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException,
XmlParser._validate_schema, './instructions/invalid_instructions.xml')
def test__validate_schema_minimal_valid_instructions(self):
"""
Should do nothing.
"""
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
class TestXmlParserInitializemetadata(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
"""
Should initialize member 'metadata' with all elements which have the attribute "title".
"""
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Artefact', 'Task Description')
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
xml_parser._initializemetadata()
for data in metadata:
with self.subTest(data):
self.assertIsNotNone(xml_parser.metadata[data])
class TestXmlParserInitInstructions(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
"""
Should initialize collectors for all XML elements which have the attribute "module".
"""
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertIsInstance(instructionstree, InstructionWrapper)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
"""
Should return the instruction tree starting with "Root" node.
"""
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertEqual(instructionstree.instructionname,
'Root')
self.assertEqual(instructionstree.instructionchildren[0].instructionname,
'LocalHost')
self.assertEqual(instructionstree.instructionchildren[0].instructionchildren[0].instructionname,
'MachineName')
self.assertEqual(instructionstree.instructionchildren[1].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[0].instructionname,
'OSName')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[1].instructionname,
'OSVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[2].instructionname,
'OSTimezone')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[3].instructionname,
'AllUsernames')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[4].instructionname,
'CurrentUser')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[5].instructionname,
'SudoVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[6].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[7].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[8].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[9].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[10].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[11].instructionname,
'ShellHistoryOfAllUsers')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[12].instructionname,
'NVRAMCollector')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[13].instructionname,
'TimeFromNTPServer')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[14].instructionname,
'LocalTime')
class TestXmlParserGetFirstInstructionElement(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
"""
Should return the xml element with the title "Root".
"""
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
element = xml_parser._get_first_instruction_element()
self.assertIsInstance(element, Element)
self.assertEqual(element.localName, 'Root')
class TestXmlParser(TestCase):
def test__get_placeholder_name(self):
"""
If XmlElement contains attribute "placeholder" method should return value of this attribute.
"""
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, "test")
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, 'test')
def test__get_placeholder_name_no_placeholder(self):
"""
If XmlElement does not contain attribute "placeholder" method should return an empty string.
"""
from instructionparsers.xmlparser import XmlParser
#from xml.dom.minidom import Element
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, '')
class TestXmlParserGetParameterAttributes(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
"""
Should return UserDict
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsInstance(actual, UserDict)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
"""
Should return dict with two entries
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertEqual(len(actual), 2)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
"""
Should return dicitionry with "users_with_homedir" key and with "properties" key.
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsNotNone(actual.get("properties"))
self.assertIsNotNone(actual.get("users_with_homedir"))
| 45.753906 | 115 | 0.712029 | [
"CC0-1.0"
] | 3Peso/mosk | tests/test_xmlparser.py | 11,713 | Python |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-ShellCommon-StartLayoutPopulation
GUID : 97ca8142-10b1-4baa-9fbb-70a7d11231c3
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1_0(Etw):
pattern = Struct(
"collectionName" / WString,
"initializationReason" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=3, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_3_0(Etw):
pattern = Struct(
"layoutSelectionSerializedString" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=5, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_5_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=7, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_7_0(Etw):
pattern = Struct(
"layoutProviderName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=8, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_8_0(Etw):
pattern = Struct(
"layoutProviderName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=11, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_11_0(Etw):
pattern = Struct(
"layoutProviderName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=12, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_12_0(Etw):
pattern = Struct(
"layoutProviderName" / WString,
"HResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=15, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_15_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=16, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_16_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=17, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_17_0(Etw):
pattern = Struct(
"tileIdentifier" / WString,
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=18, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_18_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=19, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_19_0(Etw):
pattern = Struct(
"tileData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=21, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_21_0(Etw):
pattern = Struct(
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=22, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_22_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=23, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_23_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=28, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_28_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=29, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_29_0(Etw):
pattern = Struct(
"tileAumid" / WString,
"appSize" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=30, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_30_0(Etw):
pattern = Struct(
"tileAumid" / WString,
"appSize" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=31, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_31_0(Etw):
pattern = Struct(
"appSize" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=32, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_32_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=33, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_33_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=35, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_35_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=38, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_38_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=39, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_39_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=41, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_41_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=42, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_42_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=45, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_45_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=46, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_46_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul,
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=49, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_49_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=52, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_52_0(Etw):
pattern = Struct(
"tileData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=53, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_53_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=54, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_54_0(Etw):
pattern = Struct(
"groupData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=55, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_55_0(Etw):
pattern = Struct(
"groupData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=56, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_56_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=57, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_57_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul,
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=58, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_58_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=60, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_60_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=62, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_62_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=63, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_63_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=64, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_64_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=65, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_65_0(Etw):
pattern = Struct(
"value1" / WString,
"value2" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1002, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1002_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1004, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1004_0(Etw):
pattern = Struct(
"itemId" / WString,
"itemName" / WString,
"groupCount" / Int32ul,
"tileCount" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1005, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1005_0(Etw):
pattern = Struct(
"itemId" / WString,
"itemName" / WString,
"groupCount" / Int32ul,
"tileCount" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1100, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1100_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1101, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1101_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1102, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1102_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1103, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1103_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1104, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1104_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1105, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1105_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1106, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1106_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1107, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1107_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1200, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1200_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1202, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1202_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1203, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1203_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1204, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1204_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1205, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1205_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1206, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1206_0(Etw):
pattern = Struct(
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1207, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1207_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1208, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1208_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1209, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1209_0(Etw):
pattern = Struct(
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1250, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1250_0(Etw):
pattern = Struct(
"savedVersion" / Int64ul,
"itemId" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1252, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1252_0(Etw):
pattern = Struct(
"savedVersion" / Int64ul,
"itemId" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1253, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1253_0(Etw):
pattern = Struct(
"savedVersion" / Int64ul,
"itemId" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1300, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1300_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1301, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1301_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1303, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1303_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1400, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1400_0(Etw):
pattern = Struct(
"tileIdentifier" / WString,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1401, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1401_0(Etw):
pattern = Struct(
"tileIdentifier" / WString,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1404, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1404_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1405, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1405_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1900, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1900_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1902, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1902_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1903, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1903_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1904, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1904_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1905, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1905_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1906, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1906_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2101, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2101_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2102, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2102_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"savedVersion" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2103, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2103_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2110, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2110_0(Etw):
pattern = Struct(
"itemName" / WString,
"size" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2111, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2111_0(Etw):
pattern = Struct(
"itemName" / WString,
"size" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2112, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2112_0(Etw):
pattern = Struct(
"itemName" / WString,
"size" / Int64ul,
"savedVersion" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2150, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2150_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2151, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2151_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2152, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2152_0(Etw):
pattern = Struct(
"packageFamilyName" / WString,
"InstallState" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2153, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2153_0(Etw):
pattern = Struct(
"packageFamilyName" / WString,
"InstallState" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2154, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2154_0(Etw):
pattern = Struct(
"value" / WString
)
| 30.899576 | 123 | 0.72178 | [
"Apache-2.0"
] | IMULMUL/etl-parser | etl/parsers/etw/Microsoft_Windows_ShellCommon_StartLayoutPopulation.py | 21,846 | Python |
#!/usr/bin/env python
from setuptools import setup, find_packages
# Parse version number from pyglet/__init__.py:
with open('pyglet/__init__.py') as f:
info = {}
for line in f:
if line.startswith('version'):
exec(line, info)
break
setup_info = dict(
name='pyglet',
version=info['version'],
author='Alex Holkner',
author_email='[email protected]',
url='http://pyglet.readthedocs.org/en/latest/',
download_url='http://pypi.python.org/pypi/pyglet',
project_urls={
'Documentation': 'https://pyglet.readthedocs.io/en/latest',
'Source': 'https://github.com/pyglet/pyglet',
'Tracker': 'https://github.com/pyglet/pyglet/issues',
},
description='Cross-platform windowing and multimedia library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Package info
packages=['pyglet'] + ['pyglet.' + pkg for pkg in find_packages('pyglet')],
# Add _ prefix to the names of temporary build dirs
options={'build': {'build_base': '_build'}, },
zip_safe=True,
)
setup(**setup_info)
| 33.775862 | 79 | 0.616641 | [
"BSD-3-Clause"
] | Ball-Man/pyglet | setup.py | 1,959 | Python |
# SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LessOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LessOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LessOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Start(builder): builder.StartObject(0)
def LessOptionsStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def End(builder): return builder.EndObject()
def LessOptionsEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | 32.125 | 114 | 0.705837 | [
"Apache-2.0"
] | LoicDagnas/tensorflow-onnx | tf2onnx/tflite/LessOptions.py | 1,285 | Python |
import os.path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name='copydoc',
version='1.0.9',
author='NPR Visuals',
author_email='[email protected]',
url='https://github.com/nprapps/copydoc/',
description='Parse Google docs for use in content management',
long_description=read('README.rst'),
py_modules=('copydoc',),
license="MIT License",
keywords='google gdocs',
install_requires=[
'beautifulsoup4==4.4.1'
],
extras_require={
'dev': [
'Sphinx==1.5.6',
'nose2==0.5.0',
'tox==2.3.1',
'flake8==3.5.0'
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
]
)
| 27.416667 | 73 | 0.584347 | [
"MIT"
] | nprapps/copydoc | setup.py | 1,316 | Python |
from __future__ import print_function
import sys
import os
sys.path.append(os.path.abspath(".."))
from termux2d import Canvas, Palette, line, animate, COLOR_RED, COLOR_WHITE
import math
def __main__():
i = 0
height = 40
while True:
frame = []
frame.extend([(coords[0],coords[1],COLOR_WHITE) for coords in
line(0,
height,
180,
math.sin(math.radians(i)) * height + height)])
frame.extend([(x/2, height + math.sin(math.radians(x+i)) * height, COLOR_WHITE)
for x in range(0, 360, 2)])
yield frame
i += 2
if __name__ == '__main__':
animate(Canvas(), Palette(), __main__, 1./60)
| 23.151515 | 87 | 0.53534 | [
"MIT"
] | ericmux/termux2d | examples/sine_tracking.py | 764 | Python |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
#SESSION_COOKIE_SECURE = True
SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess'
| 26.571429 | 68 | 0.763441 | [
"MIT"
] | matale14/api-blueprint | config.py | 186 | Python |
"""
DIRBS module for utility classes and functions.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
import datetime
import logging
import hashlib
import json
import time
import copy
import io
import contextlib
import psycopg2
from psycopg2 import sql
from psycopg2.extras import NamedTupleCursor
from dirbs import db_schema_version as code_db_schema_version
import dirbs.metadata as metadata
from dirbs.config import ConfigParseException
class DatabaseSchemaException(Exception):
"""Custom exception class to indicate there was a problem validating the schema."""
def __init__(self, msg):
"""Constructor."""
super().__init__('DB schema check failure: {0}'.format(msg))
class DatabaseRoleCheckException(Exception):
"""Custom exception class to indicate the user does not have the correct roles for this job."""
def __init__(self, msg):
"""Constructor."""
super().__init__('DB role check failure: {0}'.format(msg))
class JSONEncoder(json.JSONEncoder):
"""Custom JSONEncoder class which serializes dates in ISO format."""
def default(self, obj):
"""Overrides JSONEncoder.default."""
if isinstance(obj, datetime.date):
return obj.isoformat()
return JSONEncoder.default(self, obj)
class LoggingNamedTupleCursor(NamedTupleCursor):
"""Named tuple cursor that logs to DIRBS."""
def __init__(self, *args, **kwargs):
"""Constructor."""
super().__init__(*args, **kwargs)
if self.name is not None:
# Default itersize to 100000 for named cursors
self.itersize = 100000
def execute(self, query, params=None):
"""Overrides NamedTupleCursor.execute."""
try:
return super(LoggingNamedTupleCursor, self).execute(query, params)
finally:
if self.query is not None:
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
def callproc(self, procname, params=None):
"""Overrides NamedTupleCursor.callproc."""
try:
return super(LoggingNamedTupleCursor, self).callproc(procname, params)
finally:
if self.query is not None:
logging.getLogger('dirbs.sql').log(logging.DEBUG, str(self.query, encoding='utf-8'))
@contextlib.contextmanager
def db_role_setter(conn, *, role_name):
"""Since we catch exceptions here and log, temporarily install a customised hook."""
with conn.cursor() as cursor:
cursor.execute('SHOW ROLE')
old_role = cursor.fetchone()[0]
cursor.execute('SET ROLE %s', [role_name])
yield role_name
cursor.execute('SET ROLE %s', [old_role])
class CodeProfiler(object):
"""Profile a block of code and store duration."""
def __enter__(self):
"""Python context manager support for use in with statement (on enter)."""
self.start = time.time()
return self
def __exit__(self, *args):
"""Python context manager support for use in with statement (on exit)."""
self.duration = int((time.time() - self.start) * 1000)
def compute_md5_hash(file, buf_size=65536):
"""Utility method to generate a md5 hash of file."""
md5_hash = hashlib.md5()
while True:
data = file.read(buf_size)
if not data:
break
md5_hash.update(data)
return md5_hash.hexdigest()
def cachebusted_filename_from_contents(byte_array):
"""Utility method to generate a unique filename based on the hash of a given content array (of bytes)."""
return compute_md5_hash(io.BytesIO(byte_array))[:8]
def cli_db_params_from_dsn(dsn, user=None, database=None, port=None, host=None):
"""Convert DB-related command-line arguments from a DSN into a format appropriate for DIRBS CLI commands."""
db_args = []
db_args.append('--db-user={0}'.format(user if user is not None else dsn.get('user')))
db_args.append('--db-name={0}'.format(database if database is not None else dsn.get('database')))
db_args.append('--db-port={0}'.format(port if port is not None else dsn.get('port')))
db_args.append('--db-host={0}'.format(host if host is not None else dsn.get('host')))
return db_args
def create_db_connection(db_config, readonly=False, autocommit=False):
"""Creates a DB connection to the database.
Imports the config module, which results in the config being read from disk.
Changes to the config file made after this method has been called will not be read.
Calling entity should handle connection errors as appropriate.
"""
logger = logging.getLogger('dirbs.sql')
logger.debug('Attempting to connect to the database {0} on host {1}'.format(db_config.database, db_config.host))
# We hard-code 4 minutes idle keepalives, which is fairly aggressive, to avoid disconnections on VPNs, etc.
conn = psycopg2.connect('{0} keepalives=1 keepalives_idle=240'.format(db_config.connection_string),
cursor_factory=LoggingNamedTupleCursor)
conn.set_session(readonly=readonly, autocommit=autocommit)
logger.debug('Connection to database successful.')
return conn
def verify_db_schema(conn, required_role):
"""Function that runs all DB verification checks."""
warn_if_db_superuser(conn)
verify_db_roles_installed(conn)
verify_db_role_for_job(conn, required_role)
verify_db_schema_version(conn)
verify_db_ownership(conn)
verify_hll_schema(conn)
verify_core_schema(conn)
verify_db_search_path(conn)
def warn_if_db_superuser(conn):
"""Warn if the current DB user is a PostgreSQL superuser."""
logger = logging.getLogger('dirbs.db')
if is_db_user_superuser(conn):
logger.warn('Running as PostgreSQL superuser -- for security reasons, we recommend running all '
'DIRBS tasks as a normal user')
def verify_db_roles_installed(conn):
"""Function used to verify whether roles have been installed in the DB."""
# The below is not a guaranteed check, but a heuristic
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cursor:
cursor.execute('SELECT 1 AS res FROM pg_roles WHERE rolname = \'dirbs_core_power_user\'')
if cursor.fetchone() is None:
logger.error('DIRBS Core roles have not been installed - run \'dirbs-db install_roles\' before '
'running \'dirbs-db install\'')
raise DatabaseSchemaException('DIRBS Core database roles have not been installed')
def verify_db_role_for_job(conn, expected_role):
"""Function used to verify that the current DB user is in the role expected for this job."""
if not is_db_user_dirbs_role(conn, expected_role):
role = conn.get_dsn_parameters().get('user')
raise DatabaseRoleCheckException('Current DB user {0} does not have required role: {1}. To fix this:'
'\n\t1. GRANT {1} TO {0};'.format(role, expected_role))
def verify_db_schema_version(conn):
"""Function used to check whether the DB schema version matches the code schema version."""
logger = logging.getLogger('dirbs.db')
version = query_db_schema_version(conn)
if version != code_db_schema_version:
if version is None:
logger.error('DB schema has not been installed via dirbs-db install!')
raise DatabaseSchemaException('No DB schema installed - perform a dirbs-db install first!')
else:
logger.error('DB schema version does not match code!')
logger.error('Code schema version: %d', code_db_schema_version)
logger.error('DB schema version: %d', version)
raise DatabaseSchemaException('Mismatch between code and DB schema versions - perform a dirbs-db upgrade!')
def verify_db_ownership(conn):
"""Function used to check whether DB ownership matches what we expect."""
logger = logging.getLogger('dirbs.db')
if query_db_ownership(conn) != 'dirbs_core_power_user':
logger.error('Database is not owned by the dirbs_core_power_user group! Please the '
'following as the current DB owner (whilst logged into the database):'
'\n\tALTER DATABASE <database> OWNER TO dirbs_core_power_user;')
raise DatabaseSchemaException('Incorrect database ownership!')
def verify_core_schema(conn):
"""Function used to check whether Core schema exists and has correct ownership."""
if not query_schema_existence(conn, 'core'):
raise DatabaseSchemaException('Missing schema \'core\' in DB. Was dirbs-db install run successfully?')
if query_schema_ownership(conn, 'core') != 'dirbs_core_power_user':
raise DatabaseSchemaException('Schema \'core\' is not owned by dirbs_core_power_user!')
def verify_hll_schema(conn):
"""Function used to check whether HLL schema exists and that extension is installed correctly."""
logger = logging.getLogger('dirbs.db')
if not query_schema_existence(conn, 'hll'):
logger.error('Schema \'hll\' does not exist. Please ensure the hll extension is installed and run the '
'following as a superuser whilst connected to this DB: '
'\n\t1. CREATE SCHEMA hll;'
'\n\t2. GRANT USAGE ON SCHEMA hll TO dirbs_core_base;'
'\n\t3. CREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('HLL schema not created!')
# Check if extension installed correctly by looking for hll.hll_print
with conn.cursor() as cursor:
try:
cursor.execute('SELECT pg_get_functiondef(\'hll.hll_print(hll.hll)\'::regprocedure)')
except psycopg2.ProgrammingError:
logger.error('The HLL extension is not installed correctly. Please issue the following as a superuser '
'whilst connected to this DB: '
'\n\tCREATE EXTENSION hll SCHEMA hll;')
raise DatabaseSchemaException('DB search_path does not include hll or extension not installed!')
def verify_db_search_path(conn):
"""Function used to check whether db_search_path is correct by looking for objects."""
logger = logging.getLogger('dirbs.db')
is_search_path_valid = True
with conn.cursor() as cursor:
cursor.execute('SELECT to_regclass(\'schema_version\')')
res = cursor.fetchone()[0]
if res is None:
is_search_path_valid = False
try:
cursor.execute('SELECT pg_get_functiondef(\'hll_print(hll)\'::regprocedure)')
except psycopg2.ProgrammingError:
is_search_path_valid = False
if not is_search_path_valid:
logger.error('The search_path for the database is not set correctly. Please issue the following '
'whilst connected to this DB: '
'\n\tALTER DATABASE <database> SET search_path TO core, hll;')
raise DatabaseSchemaException('DB search_path not set correctly!')
def query_db_schema_version(conn):
"""Function to fetch the DB version number from the database."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
try:
cur.execute('SELECT MAX(version) FROM schema_version') # noqa: Q440
return cur.fetchone()[0]
except psycopg2.ProgrammingError as ex:
logger.error(str(ex).strip())
return None
def set_db_schema_version(conn, new_version):
"""Function to set the DB version number in the database."""
with conn.cursor() as cur:
cur.execute('SELECT COUNT(*) FROM schema_version')
num_rows = cur.fetchone()[0]
assert num_rows <= 1
if num_rows > 0:
cur.execute('UPDATE schema_version SET version = %s', [new_version]) # noqa: Q440
else:
cur.execute('INSERT INTO schema_version(version) VALUES(%s)', [new_version])
def is_db_user_superuser(conn):
"""Function to test whether the current DB user is a PostgreSQL superuser."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolsuper
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0]
def is_db_user_dirbs_role(conn, role_name):
"""Function to test whether the current DB user is in a DIRBS role."""
with conn.cursor() as cur:
cur.execute("""SELECT pg_has_role(%s, 'MEMBER')""", [role_name])
return cur.fetchone()[0]
def is_db_user_dirbs_poweruser(conn):
"""Function to test whether the current DB user is a DIRBS power user."""
return is_db_user_dirbs_role(conn, 'dirbs_core_power_user')
def can_db_user_create_roles(conn):
"""Function to test whether the current DB user has the CREATEROLE privilege."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolcreaterole
FROM pg_roles
WHERE rolname = CURRENT_USER""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to find CURRENT_USER in pg_roles table')
return False
return res[0]
def query_db_ownership(conn):
"""Function to verify whether the current database ownership is correct."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolname
FROM pg_roles
JOIN pg_database
ON (pg_database.datdba = pg_roles.oid)
WHERE datname = current_database()""")
res = cur.fetchone()
if res is None:
logger.warn('Failed to determing DB owner for current_database')
return None
return res[0]
def query_schema_existence(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)',
[schema_name])
return cur.fetchone().exists
def query_schema_ownership(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
logger = logging.getLogger('dirbs.db')
with conn.cursor() as cur:
cur.execute("""SELECT rolname
FROM pg_roles
JOIN pg_namespace
ON (pg_namespace.nspowner = pg_roles.oid)
WHERE nspname = %s""", [schema_name])
res = cur.fetchone()
if res is None:
logger.warn('Failed to determing owner for current_schema')
return None
return res[0]
def compute_analysis_end_date(conn, curr_date):
"""Function to get the end of the analysis window based on current operator data."""
end_date = curr_date
if end_date is None:
# If current date is None, set analysis end date as the last day for which operator data exists."""
with conn.cursor() as cursor:
monthly_country_child_tbl_list = child_table_names(conn, 'monthly_network_triplets_country')
year_month_list_in_child_tbls_records = table_invariants_list(conn, monthly_country_child_tbl_list,
['triplet_year', 'triplet_month'])
year_month_tuple_list = [(x.triplet_year, x.triplet_month) for x in year_month_list_in_child_tbls_records]
if len(year_month_tuple_list) > 0:
year_month_tuple_list.sort(key=lambda x: (x[0], x[1]), reverse=True)
latest_year, latest_month = year_month_tuple_list[0]
cursor.execute(sql.SQL("""SELECT MAX(last_seen)
FROM monthly_network_triplets_country
WHERE triplet_year = %s
AND triplet_month = %s"""), [latest_year, latest_month])
end_date = cursor.fetchone()[0]
# If there was no operator data imported, this can be None
if end_date is None:
end_date = datetime.date.today()
return end_date + datetime.timedelta(days=1)
def hash_string_64bit(s):
"""Basic string hash based on taking an initial prime number and multiplying it by another prime numnber."""
string_hash = 7
string_bytes = bytearray(s, 'utf-8')
for b in string_bytes:
string_hash = string_hash * 31 + b
return string_hash % (pow(2, 63) - 1) # noqa: S001 Make sure it fits into a 64-bit bigint
def child_table_names(conn, parent_name):
"""Return a list of table names for a parent table name."""
with conn.cursor() as cursor:
cursor.execute("""SELECT c.relname AS child_tblname
FROM pg_inherits
JOIN pg_class AS c
ON (c.oid = inhrelid)
JOIN pg_class AS p
ON (p.oid = inhparent)
JOIN pg_catalog.pg_namespace nc
ON nc.oid = c.relnamespace
JOIN pg_catalog.pg_namespace np
ON np.oid = p.relnamespace
WHERE p.relname = %s
AND np.nspname = current_schema()
AND nc.nspname = current_schema()""",
[parent_name])
return [res.child_tblname for res in cursor]
def table_invariants_list(conn, table_names, invariant_col_names):
"""Gets a list of tuples containing the values for common table invariant columns across a list table names."""
if len(table_names) == 0:
# Need to return an empty list to avoid doing an empty query and generating an error
return []
with conn.cursor() as cursor:
table_queries = []
for tblname in table_names:
table_queries.append(sql.SQL("""SELECT * FROM (SELECT {0} FROM {1} LIMIT 1) {2}""")
.format(sql.SQL(', ').join(map(sql.Identifier, invariant_col_names)),
sql.Identifier(tblname),
sql.Identifier('tmp_{0}'.format(tblname))))
cursor.execute(sql.SQL(' UNION ALL ').join(table_queries))
return cursor.fetchall()
def most_recently_run_condition_info(conn, cond_names, successful_only=False):
"""For a list of condition names, return a dict of cond_name -> (run_id, cond_config) for the most recent results.
If a particular condition has never completed successfully, the value of the dict will be None, unless the
successful_only parameter is set to True, in which case the key will not exist in the returned dict.
"""
conditions_to_find = copy.copy(cond_names)
rv = {}
# Get list of metadata for dirbs-classify, sorted in reverse order
job_metadata_list = metadata.query_for_command_runs(conn, 'dirbs-classify')
for job_metadata in job_metadata_list:
# Loop back through recent dirbs-classify runs looking for the last time a classification
# ran successfully. This is indicates in the metadata by the presence of an entry in the matched_imei_counts.
# This can happen even though the overall dirbs-classify job failed
extra_metadata = job_metadata.extra_metadata
metadata_conditions = extra_metadata.get('conditions', {})
matched_imei_counts = extra_metadata.get('matched_imei_counts', {})
conditions_lookup = {c['label']: c for c in metadata_conditions}
for req_cond_name in copy.copy(conditions_to_find): # We modify the list in the loop, so take a copy
if req_cond_name in matched_imei_counts:
# If the name was in matched_imei_counts, it should always be in conditions as well
rv[req_cond_name] = {
'run_id': job_metadata.run_id,
'config': conditions_lookup[req_cond_name],
'last_successful_run': job_metadata.start_time
}
# Remove this req_cond_name from conditions_to_find since we already found latest metadata
conditions_to_find.remove(req_cond_name)
# Any items in conditions_to_find at this point are conditions for which we never ran a successful condition
# run
if not successful_only:
for missing_cond_name in conditions_to_find:
rv[missing_cond_name] = None
return rv
def filter_imei_list_sql_by_device_type(conn, exempted_device_types, imei_list_sql):
"""Function to return SQL filtering out exempted device types."""
# If certain device types are exempted, first select the IMEIs passed in imei_list_sql query.
# These IMEIs are then joined against GSMA TAC db to get their device type.
# Finally, any IMEIs that belong to exempted device types are excluded.
return sql.SQL("""SELECT imei_norm
FROM (SELECT imei_norm,
SUBSTRING(imei_norm, 1, 8) AS tac
FROM ({0}) imeis) imeis_with_tac
JOIN gsma_data
USING (tac)
WHERE device_type NOT IN {1}
""").format(sql.SQL(imei_list_sql),
sql.Literal(tuple(exempted_device_types))).as_string(conn)
def format_datetime_for_report(timestamp_with_tz):
"""Format the datetime into a string for reporting.
Replace this function with datetime.isoformat(sep=' ', timespec='seconds') after we update python version to 3.6
"""
if timestamp_with_tz is not None:
return timestamp_with_tz.strftime('%Y-%m-%d %X')
else:
return None
def validate_exempted_device_types(conn, config):
"""Method to validate exempted device types specified in config."""
with conn.cursor() as cursor:
logger = logging.getLogger('dirbs.config')
exempted_device_types = config.region_config.exempted_device_types
if len(exempted_device_types) > 0:
cursor.execute('SELECT DISTINCT device_type FROM gsma_data')
all_device_types = [x.device_type for x in cursor]
if len(all_device_types) == 0:
logger.warning('RegionConfig: Ignoring setting exempted_device_types={0} as GSMA TAC database '
'not imported or no device types found.'.format(exempted_device_types))
else:
invalid_device_types = set(exempted_device_types) - set(all_device_types)
if len(invalid_device_types) > 0:
msg = 'RegionConfig: exempted_device_types \'{0}\' is/are not valid device type(s). ' \
'The valid GSMA device types are: \'{1}\''.format(invalid_device_types, all_device_types)
logger.error(msg)
raise ConfigParseException(msg)
def log_analysis_window(logger, analysis_start_date, analysis_end_date, start_message='',
start_date_inclusive=True, end_date_inclusive=False):
"""Helper function to print out window on used for analysis and list generation using interval notation."""
start_date_interval_notation = '[' if start_date_inclusive else '('
end_date_interval_notation = ']' if end_date_inclusive else ')'
logger.debug('{0} {sd_interval_notation}{start_date}, '
'{end_date}{ed_interval_notation}'.format(start_message,
sd_interval_notation=start_date_interval_notation,
start_date=analysis_start_date,
end_date=analysis_end_date,
ed_interval_notation=end_date_interval_notation))
def registration_list_status_filter_sql():
"""SQL to filter for whitelisted or null registration_list statuses."""
return sql.SQL('(status IS NULL OR status = \'whitelist\')')
def compute_amnesty_flags(app_config, curr_date):
"""Helper function to determine whether the date falls within amnesty eval or amnesty period."""
in_amnesty_eval_period = True if app_config.amnesty_config.amnesty_enabled and \
curr_date <= app_config.amnesty_config.evaluation_period_end_date else False
in_amnesty_period = True if app_config.amnesty_config.amnesty_enabled and \
curr_date > app_config.amnesty_config.evaluation_period_end_date and \
curr_date <= app_config.amnesty_config.amnesty_period_end_date else False
return in_amnesty_eval_period, in_amnesty_period
def table_exists_sql(any_schema=False):
"""SQL to check for existence of a table. Note that for temp tables, any_schema should be set to True."""
if not any_schema:
schema_filter_sql = sql.SQL('AND schemaname = current_schema()')
else:
schema_filter_sql = sql.SQL('')
return sql.SQL("""SELECT EXISTS (SELECT 1
FROM pg_tables
WHERE tablename = %s
{schema_filter_sql})""").format(schema_filter_sql=schema_filter_sql)
def is_table_partitioned(conn, tbl_name):
"""Function to determine whether a table is partitioned."""
with conn.cursor() as cursor:
cursor.execute("""SELECT EXISTS (SELECT 1
FROM pg_class
JOIN pg_partitioned_table
ON pg_partitioned_table.partrelid = pg_class.oid
WHERE pg_class.relname = %s)""", [tbl_name])
return cursor.fetchone().exists
| 46.044628 | 119 | 0.651219 | [
"BSD-3-Clause-Clear"
] | nealmadhu/DIRBS-Core | src/dirbs/utils.py | 27,857 | Python |
import config
import pandas as pd
import pickle
import numpy as np
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import classification_report
import tensorflow as tf
from keras import Sequential
from tensorflow.keras.layers import Embedding, SpatialDropout1D, LSTM, Dense
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras import regularizers
from keras.models import load_model
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
"""
Versuch #1
"""
# Gibt den classification-report aus
def evaluate(model, X_test, Y_test):
Y_pred = model.predict(X_test)
Y_pred = Y_pred.argmax(axis=-1)
Y_test = Y_test.argmax(axis=-1)
print(classification_report([Y_test], [Y_pred]))
# Nimmt ein history-Objekt und zeichnet den loss für
# sowohl testing als auch training Daten.
def plot_model(history, fold):
plt.title('Loss')
plt.plot(history.history['loss'], label='train_loss')
plt.plot(history.history['val_loss'], label='test_loss')
plt.legend()
plt.savefig(f"../plots/covid_model_without_vaccine_loss_{config.EPOCHS}epochs_{fold}v{config.K_FOLD_SPLITS}fold.png")
clear_plot()
plt.title('Accuracy')
plt.plot(history.history['accuracy'], label='train_acc', c="r")
plt.plot(history.history['val_accuracy'], label='test_acc', c="b")
plt.legend()
plt.savefig(f"../plots/covid_model_without_vaccine_accuracy_{config.EPOCHS}epochs_{fold}v{config.K_FOLD_SPLITS}fold.png")
clear_plot()
def clear_plot():
plt.close()
plt.cla()
plt.clf()
def plot_confusion_matrix(model, X_test, y_test, fold):
y_pred = model.predict(X_test)
y_pred = y_pred.argmax(axis=-1)
y_test = y_test.argmax(axis=-1)
cm = confusion_matrix(y_test, y_pred)
ax=plt.subplot()
sns.heatmap(cm, annot=True, fmt='g', ax=ax)
# labels, title and ticks
ax.set_xlabel('Predicted labels')
ax.set_ylabel('True labels')
ax.set_title(f'Confusion Matrix – {config.EPOCHS}|{fold}')
ax.xaxis.set_ticklabels(['Negative', 'Positive'])
ax.yaxis.set_ticklabels(['Negative', 'Positive'])
plt.savefig(f"../plots/covid_confusion_{config.EPOCHS}epochs_{fold}v{config.K_FOLD_SPLITS}fold.png")
clear_plot()
# Erstellen eines Tokenizers für das LSTM Modell
def create_tokenizer(df, save_path):
tokenizer = Tokenizer(num_words=config.MAX_NUM_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
words = df.link.values.tolist()
words.extend(df.meta_data.values.tolist())
words.extend(df.title.values.tolist())
words.extend(df.body.values.tolist())
tokenizer.fit_on_texts(words)
save_tokenizer(tokenizer, save_path)
return tokenizer
# Laden und speichern des Tokenizers
def save_tokenizer(tokenizer, filename):
with open(filename, 'wb') as f:
pickle.dump(tokenizer, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_tokenizer(filename):
with open(filename, 'rb') as f:
tokenizer = pickle.load(f)
return tokenizer
"""
Die in Tokens verwandelte Texte sehen so aus:
[[1, 2, 3, 4], [5, 6, 7], [8, 9, 10, 11, 12]]
gepaddet sehen sie so aus:
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 3 4]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 6 7]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 9 10 11 12]]
werden danach die Covid Count Zahlen angefügt, sieht die Repräsentation beispielsweise so aus
[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 2 3 4 10 20 30]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 5 6 7 40 50 60]
[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 8 9 10 11 12 70 80 90]]
Das np.expand ist notwendig, um das array in beispielsweise folgende Form zu bringen: [ 2 1 20] => [ [2] [1] [20]]
"""
def transform_text(tokenizer, df):
if (isinstance(tokenizer, str)):
tokenizer = load_tokenizer(tokenizer)
# Tokenizing der Link Informationen
X_input = tokenizer.texts_to_sequences(df['link'].values)
X_input = pad_sequences(X_input, maxlen=config.MAX_LINK_SEQUENCE_LENGTH)
# Tokenizing der Meta Informationen
X_meta = tokenizer.texts_to_sequences(df['meta_data'].values)
X_meta = pad_sequences(X_meta, maxlen=config.MAX_META_SEQUENCE_LENGTH)
# Tokenizing der Titel Informationen
X_title = tokenizer.texts_to_sequences(df['title'].values)
X_title = pad_sequences(X_title, maxlen=config.MAX_TITLE_SEQUENCE_LENGTH)
# Tokenizing des Seiteninhalts
X_body = tokenizer.texts_to_sequences(df['body'].values)
X_body = pad_sequences(X_body, maxlen=config.MAX_BODY_SEQUENCE_LENGTH)
covid_word_count = df['covid_word_count'].values
covid_word_count_url = df['covid_word_count_url'].values
restriction_word_count = df['restriction_word_count'].values
restriction_word_count_url = df['restriction_word_count_url'].values
X_input = np.concatenate([X_input, X_meta], axis=-1)
X_input = np.concatenate([X_input, X_title], axis=-1)
X_input = np.concatenate([X_input, X_body], axis=-1)
covid_word_count = np.expand_dims(covid_word_count, axis=(-1))
X_input = np.concatenate([X_input, covid_word_count], axis=-1)
covid_word_count_url = np.expand_dims(covid_word_count_url, axis=(-1))
X_input = np.concatenate([X_input, covid_word_count_url], axis=-1)
restriction_word_count = np.expand_dims(restriction_word_count, axis=(-1))
X_input = np.concatenate([X_input, restriction_word_count], axis=-1)
restriction_word_count_url = np.expand_dims(restriction_word_count_url, axis=(-1))
X_input = np.concatenate([X_input, restriction_word_count_url], axis=-1) # Schlussendlich alles zusammefügen
return X_input
def remove_stopwords(df):
ger = pd.read_csv(config.STOPWORDS_PATH)['stopwords'].values
df['link'] = df['link'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
df['meta_data'] = df['meta_data'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
df['title'] = df['title'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
df['body'] = df['body'].apply(lambda x: ' '.join([word for word in str(x).split() if word not in (ger)]))
return df
# Nimmt den input DataFrame und einen LabelEncoder Objekt,
# trainiert ein LSTM Modell, speichert es, evaluiert es
# und gibt den Loss aus.
def train_model(train_df, valid_df, tokenizer, fold):
X_train = transform_text(tokenizer, train_df)
X_valid = transform_text(tokenizer, valid_df)
Y_train = pd.get_dummies(train_df['label'])
Y_valid = pd.get_dummies(valid_df['label']).to_numpy()
model = Sequential()
optimizer = tf.keras.optimizers.Adam(1e-3) # 0.001
model.add(Embedding(config.MAX_NUM_WORDS, config.EMBEDDING_DIM, input_length=X_train.shape[1]))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2, bias_regularizer=regularizers.l2(1e-4),)) # TODO: damit rumspielen
model.add(Dense(2, activation='softmax'))
loss='categorical_crossentropy'
model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
epochs = config.EPOCHS
batch_size = config.BATCH_SIZE # 64
#es = EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)
history = model.fit(X_train, Y_train, epochs=epochs, batch_size=batch_size, validation_split=0.2) # , callbacks=[es]
accr = model.evaluate(X_valid,Y_valid)
print('Test set\n Loss: {:0.3f}\n Accuracy: {:0.3f}'.format(accr[0],accr[1]))
model.save(f"{config.MODEL_PATH}_without_vaccine_{fold}.h5")
evaluate(model, X_valid, Y_valid)
plot_model(history, fold)
plot_confusion_matrix(model, X_valid, Y_valid, fold)
# Laden und evaluieren eines existierenden Modells
def load_and_evaluate_existing_model(model_path, tokenizer_path, df, le):
model = load_model(model_path)
tokenizer = load_tokenizer(tokenizer_path)
X = transform_text(tokenizer, df['text'].values)
Y = pd.get_dummies(df['label']).values
evaluate(model, X, Y, le)
# Testen eines neuen Beispiels. Hauptsächlich zu Testzwecken während der Entwicklung
# Die Funktion nimmt einen String, den Classifier,
# den Vectorizer und einen LabelEncoder und
# gibt eine Vorhersage zurück.
def test_new_example(model, tokenizer, le, text_input):
X_example = transform_text(tokenizer, [text_input])
label_array = model.predict(X_example)
new_label = np.argmax(label_array, axis=-1)
print(new_label)
print(le.inverse_transform(new_label))
def run(df, fold, use_vaccine):
# der Trainingdataframe
train_df = df[df.kfold != fold].reset_index(drop=True)
print(f"Länge Traing_DF {len(train_df)}")
# Validation Dataframe
valid_df = df[df.kfold == fold].reset_index(drop=True)
print(f"Länge Valid_DF {len(valid_df)}")
# Das Validationset enthält weiterhin die Impf-Beispiele
# Bei 10 Folds sind die Sets folgendermaßen aufgeteil:
# 0 – 126
# 1 – 78
# 2 – 10
if not use_vaccine:
train_df = train_df[train_df['label'] != 2]
# Jetzt müssen alle 2 er noch in einsergewandelt werden
train_df['label'] = train_df['label'].apply(lambda x : 1 if x > 0 else 0)
valid_df['label'] = valid_df['label'].apply(lambda x : 1 if x > 0 else 0)
print("Fitting tokenizer")
# tf.keras Tokenizer
tokenizer = create_tokenizer(train_df, f"{config.TOKENIZER_SAVE_PATH}_{fold}.pickle")
train_model(train_df, valid_df, tokenizer, fold)
# load_and_evaluate_existing_model(f"{config.MODEL_PATH}_{fold}", config.TOKENIZER_PATH, df, le)
#model = load_model(config.MODEL_PATH)
#tokenizer = config.TOKENIZER_PATH
if (__name__ == "__main__"):
tf.get_logger().setLevel('ERROR')
# load data
df = pd.read_csv(config.DATASET_PATH).sample(frac=1)
df = remove_stopwords(df)
"""
# TODO: ein Test, Gleichverteilung
"""
df2 = df[df['label'] != 0]
# Wir nehmen einfach den hinteren Teil des Körpers und den Metadaten
df2['body'] = df2['body'].apply(lambda x : str(x)[config.MAX_BODY_SEQUENCE_LENGTH:])
df2['meta_data'] = df2['meta_data'].apply(lambda x : str(x)[config.MAX_META_SEQUENCE_LENGTH:])
df = df.append(df2, ignore_index=True).reset_index()
# initiate the kfold class from the model_selection module
kf = StratifiedKFold(n_splits=config.K_FOLD_SPLITS)
# füllen den kfold Spalte
for f, (t_, v_) in enumerate(kf.split(X=df, y=df.label.values)):
df.loc[v_, 'kfold'] = f
# training für alle Faltungen
for i in range(config.K_FOLD_SPLITS):
print(f"\n–––––––––––– FOLD {i} ––––––––––––\n")
run(df, fold=i, use_vaccine=config.USE_VACCINE) | 39.314079 | 126 | 0.697062 | [
"MIT"
] | WayneGame/InformationExtraction | classification/src/train.py | 10,962 | Python |
"""Returns full pathname of backup directory."""
import os
import pytest
from pathlib import Path
from mklists.constants import CONFIGFILE_NAME
from mklists.returns import get_backupdir_path
def test_get_backupdir_path(tmp_path):
"""Returns backups Path named for default working directory."""
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
backdir = "_backups"
datestr = "2020-01-03_1646"
workingdir = Path("agenda")
workingdir.mkdir()
os.chdir(workingdir)
actual = get_backupdir_path(backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / str(workingdir) / datestr
expected_explicit = Path(tmp_path) / "_backups" / "agenda" / "2020-01-03_1646"
assert actual == expected
assert actual == expected_explicit
def test_get_backupdir_path_given_datadir(tmp_path):
"""Returns backups Path named for specified working directory."""
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
workingdir = Path(tmp_path).joinpath("todolists/a")
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = "todolists_a"
backdir = "_backups"
datestr = "2020-01-03_1646_06488910"
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / workingdir_shortname_expected / datestr
assert actual == expected
def test_get_backupdir_path_given_datadir_with_slash(tmp_path):
"""Returns backups Path named for specified working directory ending with slash."""
os.chdir(tmp_path)
Path(CONFIGFILE_NAME).write_text("config stuff")
workingdir = Path(tmp_path).joinpath("todolists/a/")
workingdir.mkdir(parents=True, exist_ok=True)
workingdir_shortname_expected = "todolists_a"
backdir = "_backups"
datestr = "2020-01-03_1646_06488910"
actual = get_backupdir_path(datadir=workingdir, backdir=backdir, now=datestr)
expected = Path(tmp_path) / backdir / workingdir_shortname_expected / datestr
assert actual == expected
def test_get_backupdir_path_raise_exception_if_rootdir_not_found(tmp_path):
"""Raises exception if no rootdir is found (rootdir is None)."""
os.chdir(tmp_path)
with pytest.raises(SystemExit):
get_backupdir_path()
| 38.898305 | 87 | 0.747277 | [
"MIT"
] | tombaker/mklists_old | tests/returns/test_get_backupdir_path.py | 2,295 | Python |
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Elasticsearch datastore."""
from __future__ import unicode_literals
from collections import Counter
import copy
import codecs
import json
import logging
import socket
from uuid import uuid4
import six
from dateutil import parser, relativedelta
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout
from elasticsearch.exceptions import NotFoundError
from elasticsearch.exceptions import RequestError
# pylint: disable=redefined-builtin
from elasticsearch.exceptions import ConnectionError
from flask import abort
from flask import current_app
import prometheus_client
from timesketch.lib.definitions import HTTP_STATUS_CODE_NOT_FOUND
from timesketch.lib.definitions import METRICS_NAMESPACE
# Setup logging
es_logger = logging.getLogger('timesketch.elasticsearch')
es_logger.setLevel(logging.WARNING)
# Metrics definitions
METRICS = {
'search_requests': prometheus_client.Counter(
'search_requests',
'Number of search requests per type (e.g all, stream etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_type': prometheus_client.Counter(
'search_filter_type',
'Number of filters per type (e.g term, label etc)',
['type'],
namespace=METRICS_NAMESPACE
),
'search_filter_label': prometheus_client.Counter(
'search_filter_label',
'Number of filters per label (e.g __ts_star etc)',
['label'],
namespace=METRICS_NAMESPACE
),
'search_get_event': prometheus_client.Counter(
'search_get_event',
'Number of times a single event is requested',
namespace=METRICS_NAMESPACE
)
}
# Elasticsearch scripts
UPDATE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
if (params.remove == true) {
ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
} else {
if( ! ctx._source.timesketch_label.contains (params.timesketch_label)) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
}
"""
TOGGLE_LABEL_SCRIPT = """
if (ctx._source.timesketch_label == null) {
ctx._source.timesketch_label = new ArrayList()
}
boolean removedLabel = ctx._source.timesketch_label.removeIf(label -> label.name == params.timesketch_label.name && label.sketch_id == params.timesketch_label.sketch_id);
if (!removedLabel) {
ctx._source.timesketch_label.add(params.timesketch_label)
}
"""
class ElasticsearchDataStore(object):
"""Implements the datastore."""
# Number of events to queue up when bulk inserting events.
DEFAULT_FLUSH_INTERVAL = 1000
DEFAULT_SIZE = 100
DEFAULT_LIMIT = DEFAULT_SIZE # Max events to return
DEFAULT_FROM = 0
DEFAULT_STREAM_LIMIT = 5000 # Max events to return when streaming results
DEFAULT_FLUSH_RETRY_LIMIT = 3 # Max retries for flushing the queue.
DEFAULT_EVENT_IMPORT_TIMEOUT = '3m' # Timeout value for importing events.
def __init__(self, host='127.0.0.1', port=9200):
"""Create a Elasticsearch client."""
super().__init__()
self._error_container = {}
self.user = current_app.config.get('ELASTIC_USER', 'user')
self.password = current_app.config.get('ELASTIC_PASSWORD', 'pass')
self.ssl = current_app.config.get('ELASTIC_SSL', False)
self.verify = current_app.config.get('ELASTIC_VERIFY_CERTS', True)
if self.ssl:
if self.user and self.password:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
http_auth=(self.user, self.password),
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch(
[{'host': host, 'port': port}],
use_ssl=self.ssl, verify_certs=self.verify)
else:
self.client = Elasticsearch([{'host': host, 'port': port}])
self.import_counter = Counter()
self.import_events = []
self._request_timeout = current_app.config.get(
'TIMEOUT_FOR_EVENT_IMPORT', self.DEFAULT_EVENT_IMPORT_TIMEOUT)
@staticmethod
def _build_labels_query(sketch_id, labels):
"""Build Elasticsearch query for Timesketch labels.
Args:
sketch_id: Integer of sketch primary key.
labels: List of label names.
Returns:
Elasticsearch query as a dictionary.
"""
label_query = {
'bool': {
'must': []
}
}
for label in labels:
# Increase metrics counter per label
METRICS['search_filter_label'].labels(label=label).inc()
nested_query = {
'nested': {
'query': {
'bool': {
'must': [{
'term': {
'timesketch_label.name.keyword': label
}
}, {
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'path': 'timesketch_label'
}
}
label_query['bool']['must'].append(nested_query)
return label_query
@staticmethod
def _build_events_query(events):
"""Build Elasticsearch query for one or more document ids.
Args:
events: List of Elasticsearch document IDs.
Returns:
Elasticsearch query as a dictionary.
"""
events_list = [event['event_id'] for event in events]
query_dict = {'query': {'ids': {'values': events_list}}}
return query_dict
@staticmethod
def _build_query_dsl(query_dsl, timeline_ids):
"""Build Elastic Search DSL query by adding in timeline filtering.
Args:
query_dsl: A dict with the current query_dsl
timeline_ids: Either a list of timeline IDs (int) or None.
Returns:
Elasticsearch query DSL as a dictionary.
"""
# Remove any aggregation coming from user supplied Query DSL.
# We have no way to display this data in a good way today.
if query_dsl.get('aggregations', None):
del query_dsl['aggregations']
if not timeline_ids:
return query_dsl
if not isinstance(timeline_ids, (list, tuple)):
es_logger.error(
'Attempting to pass in timelines to a query DSL, but the '
'passed timelines are not a list.')
return query_dsl
if not all([isinstance(x, int) for x in timeline_ids]):
es_logger.error(
'All timeline IDs need to be an integer.')
return query_dsl
old_query = query_dsl.get('query')
if not old_query:
return query_dsl
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': old_query,
'must_not': [{
'exists': {
'field': '__ts_timeline_id'},
}],
}
}, {
'bool': {
'must': [{
'terms': {
'__ts_timeline_id': timeline_ids}
}, old_query],
'must_not': [],
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
@staticmethod
def _convert_to_time_range(interval):
"""Convert an interval timestamp into start and end dates.
Args:
interval: Time frame representation
Returns:
Start timestamp in string format.
End timestamp in string format.
"""
# return ('2018-12-05T00:00:00', '2018-12-05T23:59:59')
TS_FORMAT = '%Y-%m-%dT%H:%M:%S'
get_digits = lambda s: int(''.join(filter(str.isdigit, s)))
get_alpha = lambda s: ''.join(filter(str.isalpha, s))
ts_parts = interval.split(' ')
# The start date could be 1 or 2 first items
start = ' '.join(ts_parts[0:len(ts_parts)-2])
minus = get_digits(ts_parts[-2])
plus = get_digits(ts_parts[-1])
interval = get_alpha(ts_parts[-1])
start_ts = parser.parse(start)
rd = relativedelta.relativedelta
if interval == 's':
start_range = start_ts - rd(seconds=minus)
end_range = start_ts + rd(seconds=plus)
elif interval == 'm':
start_range = start_ts - rd(minutes=minus)
end_range = start_ts + rd(minutes=plus)
elif interval == 'h':
start_range = start_ts - rd(hours=minus)
end_range = start_ts + rd(hours=plus)
elif interval == 'd':
start_range = start_ts - rd(days=minus)
end_range = start_ts + rd(days=plus)
else:
raise RuntimeError('Unable to parse the timestamp: '
+ str(interval))
return start_range.strftime(TS_FORMAT), end_range.strftime(TS_FORMAT)
def build_query(self, sketch_id, query_string, query_filter, query_dsl=None,
aggregations=None, timeline_ids=None):
"""Build Elasticsearch DSL query.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
aggregations: Dict of Elasticsearch aggregations
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Elasticsearch DSL query as a dictionary
"""
if query_dsl:
if not isinstance(query_dsl, dict):
query_dsl = json.loads(query_dsl)
if not query_dsl:
query_dsl = {}
return self._build_query_dsl(query_dsl, timeline_ids)
if query_filter.get('events', None):
events = query_filter['events']
return self._build_events_query(events)
query_dsl = {
'query': {
'bool': {
'must': [],
'must_not': [],
'filter': []
}
}
}
if query_string:
query_dsl['query']['bool']['must'].append(
{'query_string': {'query': query_string}})
# New UI filters
if query_filter.get('chips', None):
labels = []
must_filters = query_dsl['query']['bool']['must']
must_not_filters = query_dsl['query']['bool']['must_not']
datetime_ranges = {
'bool': {
'should': [],
'minimum_should_match': 1
}
}
for chip in query_filter['chips']:
# Exclude chips that the user disabled
if not chip.get('active', True):
continue
# Increase metrics per chip type
METRICS['search_filter_type'].labels(type=chip['type']).inc()
if chip['type'] == 'label':
labels.append(chip['value'])
elif chip['type'] == 'term':
term_filter = {
'match_phrase': {
'{}'.format(chip['field']): {
'query': "{}".format(chip['value'])
}
}
}
if chip['operator'] == 'must':
must_filters.append(term_filter)
elif chip['operator'] == 'must_not':
must_not_filters.append(term_filter)
elif chip['type'].startswith('datetime'):
range_filter = lambda start, end: {
'range': {
'datetime': {
'gte': start,
'lte': end
}
}
}
if chip['type'] == 'datetime_range':
start, end = chip['value'].split(',')
elif chip['type'] == 'datetime_interval':
start, end = self._convert_to_time_range(chip['value'])
else:
continue
datetime_ranges['bool']['should'].append(
range_filter(start, end))
label_filter = self._build_labels_query(sketch_id, labels)
must_filters.append(label_filter)
must_filters.append(datetime_ranges)
# Pagination
if query_filter.get('from', None):
query_dsl['from'] = query_filter['from']
# Number of events to return
if query_filter.get('size', None):
query_dsl['size'] = query_filter['size']
# Make sure we are sorting.
if not query_dsl.get('sort', None):
query_dsl['sort'] = {
'datetime': query_filter.get('order', 'asc')
}
# Add any pre defined aggregations
if aggregations:
# post_filter happens after aggregation so we need to move the
# filter to the query instead.
if query_dsl.get('post_filter', None):
query_dsl['query']['bool']['filter'] = query_dsl[
'post_filter']
query_dsl.pop('post_filter', None)
query_dsl['aggregations'] = aggregations
# TODO: Simplify this when we don't have to support both timelines
# that have __ts_timeline_id set and those that don't.
# (query_string AND timeline_id NOT EXISTS) OR (
# query_string AND timeline_id in LIST)
if timeline_ids and isinstance(timeline_ids, (list, tuple)):
must_filters_pre = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_pre = copy.copy(
query_dsl['query']['bool']['must_not'])
must_filters_post = copy.copy(query_dsl['query']['bool']['must'])
must_not_filters_post = copy.copy(
query_dsl['query']['bool']['must_not'])
must_not_filters_pre.append({
'exists': {
'field': '__ts_timeline_id'},
})
must_filters_post.append({
'terms': {
'__ts_timeline_id': timeline_ids}
})
query_dsl['query'] = {
'bool': {
'must': [],
'should': [{
'bool': {
'must': must_filters_pre,
'must_not': must_not_filters_pre,
}
}, {
'bool': {
'must': must_filters_post,
'must_not': must_not_filters_post,
'filter': [{
'exists': {
'field': '__ts_timeline_id'}
}]
}
}],
'must_not': [],
'filter': []
}
}
return query_dsl
# pylint: disable=too-many-arguments
def search(self, sketch_id, query_string, query_filter, query_dsl, indices,
count=False, aggregations=None, return_fields=None,
enable_scroll=False, timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args:
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
count: Boolean indicating if we should only return result count
aggregations: Dict of Elasticsearch aggregations
return_fields: List of fields to return
enable_scroll: If Elasticsearch scroll API should be used
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Set of event documents in JSON format
"""
scroll_timeout = None
if enable_scroll:
scroll_timeout = '1m' # Default to 1 minute scroll timeout
# Exit early if we have no indices to query
if not indices:
return {'hits': {'hits': [], 'total': 0}, 'took': 0}
# Check if we have specific events to fetch and get indices.
if query_filter.get('events', None):
indices = {
event['index']
for event in query_filter['events']
if event['index'] in indices
}
query_dsl = self.build_query(
sketch_id=sketch_id, query_string=query_string,
query_filter=query_filter, query_dsl=query_dsl,
aggregations=aggregations, timeline_ids=timeline_ids)
# Default search type for elasticsearch is query_then_fetch.
search_type = 'query_then_fetch'
# Only return how many documents matches the query.
if count:
if 'sort' in query_dsl:
del query_dsl['sort']
try:
count_result = self.client.count(
body=query_dsl, index=list(indices))
except NotFoundError:
es_logger.error(
'Unable to count due to an index not found: {0:s}'.format(
','.join(indices)))
return 0
METRICS['search_requests'].labels(type='count').inc()
return count_result.get('count', 0)
if not return_fields:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
return self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
scroll=scroll_timeout)
# The argument " _source_include" changed to "_source_includes" in
# ES version 7. This check add support for both version 6 and 7 clients.
# pylint: disable=unexpected-keyword-arg
try:
if self.version.startswith('6'):
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_include=return_fields,
scroll=scroll_timeout)
else:
_search_result = self.client.search(
body=query_dsl,
index=list(indices),
search_type=search_type,
_source_includes=return_fields,
scroll=scroll_timeout)
except RequestError as e:
root_cause = e.info.get('error', {}).get('root_cause')
if root_cause:
error_items = []
for cause in root_cause:
error_items.append(
'[{0:s}] {1:s}'.format(
cause.get('type', ''), cause.get('reason', '')))
cause = ', '.join(error_items)
else:
cause = str(e)
es_logger.error(
'Unable to run search query: {0:s}'.format(cause),
exc_info=True)
raise ValueError(cause) from e
METRICS['search_requests'].labels(type='all').inc()
return _search_result
# pylint: disable=too-many-arguments
def search_stream(self, sketch_id=None, query_string=None,
query_filter=None, query_dsl=None, indices=None,
return_fields=None, enable_scroll=True,
timeline_ids=None):
"""Search ElasticSearch. This will take a query string from the UI
together with a filter definition. Based on this it will execute the
search request on ElasticSearch and get result back.
Args :
sketch_id: Integer of sketch primary key
query_string: Query string
query_filter: Dictionary containing filters to apply
query_dsl: Dictionary containing Elasticsearch DSL query
indices: List of indices to query
return_fields: List of fields to return
enable_scroll: Boolean determining whether scrolling is enabled.
timeline_ids: Optional list of IDs of Timeline objects that should
be queried as part of the search.
Returns:
Generator of event documents in JSON format
"""
METRICS['search_requests'].labels(type='streaming').inc()
if not query_filter.get('size'):
query_filter['size'] = self.DEFAULT_STREAM_LIMIT
if not query_filter.get('terminate_after'):
query_filter['terminate_after'] = self.DEFAULT_STREAM_LIMIT
result = self.search(
sketch_id=sketch_id,
query_string=query_string,
query_dsl=query_dsl,
query_filter=query_filter,
indices=indices,
return_fields=return_fields,
enable_scroll=enable_scroll,
timeline_ids=timeline_ids)
if enable_scroll:
scroll_id = result['_scroll_id']
scroll_size = result['hits']['total']
else:
scroll_id = None
scroll_size = 0
# Elasticsearch version 7.x returns total hits as a dictionary.
# TODO: Refactor when version 6.x has been deprecated.
if isinstance(scroll_size, dict):
scroll_size = scroll_size.get('value', 0)
for event in result['hits']['hits']:
yield event
while scroll_size > 0:
# pylint: disable=unexpected-keyword-arg
result = self.client.scroll(scroll_id=scroll_id, scroll='5m')
scroll_id = result['_scroll_id']
scroll_size = len(result['hits']['hits'])
for event in result['hits']['hits']:
yield event
def get_filter_labels(self, sketch_id, indices):
"""Aggregate labels for a sketch.
Args:
sketch_id: The Sketch ID
indices: List of indices to aggregate on
Returns:
List with label names.
"""
# This is a workaround to return all labels by setting the max buckets
# to something big. If a sketch has more than this amount of labels
# the list will be incomplete but it should be uncommon to have >10k
# labels in a sketch.
max_labels = 10000
# pylint: disable=line-too-long
aggregation = {
'aggs': {
'nested': {
'nested': {
'path': 'timesketch_label'
},
'aggs': {
'inner': {
'filter': {
'bool': {
'must': [{
'term': {
'timesketch_label.sketch_id': sketch_id
}
}]
}
},
'aggs': {
'labels': {
'terms': {
'size': max_labels,
'field': 'timesketch_label.name.keyword'
}
}
}
}
}
}
}
}
labels = []
# pylint: disable=unexpected-keyword-arg
try:
result = self.client.search(
index=indices, body=aggregation, size=0)
except NotFoundError:
es_logger.error('Unable to find the index/indices: {0:s}'.format(
','.join(indices)))
return labels
buckets = result.get(
'aggregations', {}).get('nested', {}).get('inner', {}).get(
'labels', {}).get('buckets', [])
for bucket in buckets:
# Filter out special labels like __ts_star etc.
if bucket['key'].startswith('__'):
continue
labels.append(bucket['key'])
return labels
# pylint: disable=inconsistent-return-statements
def get_event(self, searchindex_id, event_id):
"""Get one event from the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
Returns:
Event document in JSON format
"""
METRICS['search_get_event'].inc()
try:
# Suppress the lint error because elasticsearch-py adds parameters
# to the function with a decorator and this makes pylint sad.
# pylint: disable=unexpected-keyword-arg
if self.version.startswith('6'):
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_exclude=['timesketch_label'])
else:
event = self.client.get(
index=searchindex_id,
id=event_id,
doc_type='_all',
_source_excludes=['timesketch_label'])
return event
except NotFoundError:
abort(HTTP_STATUS_CODE_NOT_FOUND)
def count(self, indices):
"""Count number of documents.
Args:
indices: List of indices.
Returns:
Tuple containing number of documents and size on disk.
"""
if not indices:
return 0, 0
try:
es_stats = self.client.indices.stats(
index=indices, metric='docs, store')
except NotFoundError:
es_logger.error(
'Unable to count indices (index not found)')
return 0, 0
except RequestError:
es_logger.error(
'Unable to count indices (request error)', exc_info=True)
return 0, 0
doc_count_total = es_stats.get(
'_all', {}).get('primaries', {}).get('docs', {}).get('count', 0)
doc_bytes_total = es_stats.get(
'_all', {}).get(
'primaries', {}).get('store', {}).get('size_in_bytes', 0)
return doc_count_total, doc_bytes_total
def set_label(self, searchindex_id, event_id, event_type, sketch_id,
user_id, label, toggle=False, remove=False,
single_update=True):
"""Set label on event in the datastore.
Args:
searchindex_id: String of ElasticSearch index id
event_id: String of ElasticSearch event id
event_type: String of ElasticSearch document type
sketch_id: Integer of sketch primary key
user_id: Integer of user primary key
label: String with the name of the label
remove: Optional boolean value if the label should be removed
toggle: Optional boolean value if the label should be toggled
single_update: Boolean if the label should be indexed immediately.
Returns:
Dict with updated document body, or None if this is a single update.
"""
# Elasticsearch painless script.
update_body = {
'script': {
'lang': 'painless',
'source': UPDATE_LABEL_SCRIPT,
'params': {
'timesketch_label': {
'name': str(label),
'user_id': user_id,
'sketch_id': sketch_id
},
remove: remove
}
}
}
if toggle:
update_body['script']['source'] = TOGGLE_LABEL_SCRIPT
if not single_update:
script = update_body['script']
return dict(
source=script['source'], lang=script['lang'],
params=script['params']
)
doc = self.client.get(
index=searchindex_id, id=event_id, doc_type='_all')
try:
doc['_source']['timesketch_label']
except KeyError:
doc = {'doc': {'timesketch_label': []}}
self.client.update(
index=searchindex_id,
doc_type=event_type,
id=event_id,
body=doc)
self.client.update(
index=searchindex_id,
id=event_id,
doc_type=event_type,
body=update_body)
return None
def create_index(
self, index_name=uuid4().hex, doc_type='generic_event',
mappings=None):
"""Create index with Timesketch settings.
Args:
index_name: Name of the index. Default is a generated UUID.
doc_type: Name of the document type. Default id generic_event.
mappings: Optional dict with the document mapping for Elastic.
Returns:
Index name in string format.
Document type in string format.
"""
if mappings:
_document_mapping = mappings
else:
_document_mapping = {
'properties': {
'timesketch_label': {
'type': 'nested'
},
'datetime': {
'type': 'date'
}
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
_document_mapping = {doc_type: _document_mapping}
if not self.client.indices.exists(index_name):
try:
self.client.indices.create(
index=index_name, body={'mappings': _document_mapping})
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend.') from e
except RequestError:
index_exists = self.client.indices.exists(index_name)
es_logger.warning(
'Attempting to create an index that already exists '
'({0:s} - {1:s})'.format(index_name, str(index_exists)))
return index_name, doc_type
def delete_index(self, index_name):
"""Delete Elasticsearch index.
Args:
index_name: Name of the index to delete.
"""
if self.client.indices.exists(index_name):
try:
self.client.indices.delete(index=index_name)
except ConnectionError as e:
raise RuntimeError(
'Unable to connect to Timesketch backend: {}'.format(e)
) from e
def import_event(self, index_name, event_type, event=None, event_id=None,
flush_interval=DEFAULT_FLUSH_INTERVAL, timeline_id=None):
"""Add event to Elasticsearch.
Args:
index_name: Name of the index in Elasticsearch
event_type: Type of event (e.g. plaso_event)
event: Event dictionary
event_id: Event Elasticsearch ID
flush_interval: Number of events to queue up before indexing
timeline_id: Optional ID number of a Timeline object this event
belongs to. If supplied an additional field will be added to
the store indicating the timeline this belongs to.
"""
if event:
for k, v in event.items():
if not isinstance(k, six.text_type):
k = codecs.decode(k, 'utf8')
# Make sure we have decoded strings in the event dict.
if isinstance(v, six.binary_type):
v = codecs.decode(v, 'utf8')
event[k] = v
# Header needed by Elasticsearch when bulk inserting.
header = {
'index': {
'_index': index_name,
}
}
update_header = {
'update': {
'_index': index_name,
'_id': event_id
}
}
# TODO: Remove when we deprecate Elasticsearch version 6.x
if self.version.startswith('6'):
header['index']['_type'] = event_type
update_header['update']['_type'] = event_type
if event_id:
# Event has "lang" defined if there is a script used for import.
if event.get('lang'):
event = {'script': event}
else:
event = {'doc': event}
header = update_header
if timeline_id:
event['__ts_timeline_id'] = timeline_id
self.import_events.append(header)
self.import_events.append(event)
self.import_counter['events'] += 1
if self.import_counter['events'] % int(flush_interval) == 0:
_ = self.flush_queued_events()
self.import_events = []
else:
# Import the remaining events in the queue.
if self.import_events:
_ = self.flush_queued_events()
return self.import_counter['events']
def flush_queued_events(self, retry_count=0):
"""Flush all queued events.
Returns:
dict: A dict object that contains the number of events
that were sent to Elastic as well as information
on whether there were any errors, and what the
details of these errors if any.
retry_count: optional int indicating whether this is a retry.
"""
if not self.import_events:
return {}
return_dict = {
'number_of_events': len(self.import_events) / 2,
'total_events': self.import_counter['events'],
}
try:
# pylint: disable=unexpected-keyword-arg
results = self.client.bulk(
body=self.import_events, timeout=self._request_timeout)
except (ConnectionTimeout, socket.timeout):
if retry_count >= self.DEFAULT_FLUSH_RETRY_LIMIT:
es_logger.error(
'Unable to add events, reached recount max.',
exc_info=True)
return {}
es_logger.error('Unable to add events (retry {0:d}/{1:d})'.format(
retry_count, self.DEFAULT_FLUSH_RETRY_LIMIT))
return self.flush_queued_events(retry_count + 1)
errors_in_upload = results.get('errors', False)
return_dict['errors_in_upload'] = errors_in_upload
if errors_in_upload:
items = results.get('items', [])
return_dict['errors'] = []
es_logger.error('Errors while attempting to upload events.')
for item in items:
index = item.get('index', {})
index_name = index.get('_index', 'N/A')
_ = self._error_container.setdefault(
index_name, {
'errors': [],
'types': Counter(),
'details': Counter()
}
)
error_counter = self._error_container[index_name]['types']
error_detail_counter = self._error_container[index_name][
'details']
error_list = self._error_container[index_name]['errors']
error = index.get('error', {})
status_code = index.get('status', 0)
doc_id = index.get('_id', '(unable to get doc id)')
caused_by = error.get('caused_by', {})
caused_reason = caused_by.get(
'reason', 'Unkown Detailed Reason')
error_counter[error.get('type')] += 1
detail_msg = '{0:s}/{1:s}'.format(
caused_by.get('type', 'Unknown Detailed Type'),
' '.join(caused_reason.split()[:5])
)
error_detail_counter[detail_msg] += 1
error_msg = '<{0:s}> {1:s} [{2:s}/{3:s}]'.format(
error.get('type', 'Unknown Type'),
error.get('reason', 'No reason given'),
caused_by.get('type', 'Unknown Type'),
caused_reason,
)
error_list.append(error_msg)
try:
es_logger.error(
'Unable to upload document: {0:s} to index {1:s} - '
'[{2:d}] {3:s}'.format(
doc_id, index_name, status_code, error_msg))
# We need to catch all exceptions here, since this is a crucial
# call that we do not want to break operation.
except Exception: # pylint: disable=broad-except
es_logger.error(
'Unable to upload document, and unable to log the '
'error itself.', exc_info=True)
return_dict['error_container'] = self._error_container
self.import_events = []
return return_dict
@property
def version(self):
"""Get Elasticsearch version.
Returns:
Version number as a string.
"""
version_info = self.client.info().get('version')
return version_info.get('number')
| 36.673713 | 170 | 0.524774 | [
"Apache-2.0"
] | stevengoossensB/timesketch | timesketch/lib/datastores/elastic.py | 39,901 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from .mininode import *
from .blockstore import BlockStore, TxStore
from .util import p2p_port
'''
This is a tool for comparing two or more abosomds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=b''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter, timeout=float('inf')):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs, timeout=timeout)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks, sleep=0.1):
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
return False
elif ((txhash in c.cb.lastInv) != outcome):
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
else:
[ c.send_message(msg_block(block)) for c in self.connections ]
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter, timeout=300)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
block_header = CBlockHeader(block)
[ c.cb.send_header(block_header) for c in self.connections ]
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
[ c.cb.send_header(block_header) for c in self.connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
logger.info("Test %d: PASS" % test_number)
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| 45.184019 | 149 | 0.600825 | [
"MIT"
] | 86b/Abosom | qa/rpc-tests/test_framework/comptool.py | 18,661 | Python |
# exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('C6A', ['C8pro'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 40000.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('C6A_0', 0.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('C6A_obs', C6A())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6pro(C3A=None), C6pro_0)
| 91.349515 | 710 | 0.806515 | [
"MIT"
] | LoLab-VU/Bayesian_Inference_of_Network_Dynamics | log_complete/model_160.py | 18,818 | Python |
# Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.cloudformation import CloudFormationRequest
class ListStackResources(CloudFormationRequest):
DESCRIPTION = 'List all resources for a stack'
ARGS = [Arg('StackName', metavar='STACK',
help='name of the stack to list resources from (required)')]
LIST_TAGS = ['StackResourceSummaries']
def print_result(self, result):
for resource in result['StackResourceSummaries']:
self.print_resource(resource)
| 46.175 | 76 | 0.769356 | [
"BSD-2-Clause"
] | Juniper/euca2ools | euca2ools/commands/cloudformation/liststackresources.py | 1,847 | Python |
# encoding: utf-8
# ## Imports
from threading import local as __local
# Expose these as importable from the top-level `web.core` namespace.
from .application import Application
from .util import lazy
# ## Module Globals
__all__ = ['local', 'Application', 'lazy'] # Symbols exported by this package.
# This is to support the web.ext.local extension, and allow for early importing of the variable.
local = __local()
| 21.2 | 96 | 0.735849 | [
"MIT"
] | marrow/WebCore | web/core/__init__.py | 424 | Python |
import sys, os, json
version = (3,7)
assert sys.version_info >= version, "This script requires at least Python {0}.{1}".format(version[0],version[1])
# Game loop functions
def render(game,current):
''' Displays the current room '''
print('You are in the ' + game['rooms'][current]['name'])
print(game['rooms'][current]['desc'])
def getInput():
''' Asks the user for input and returns a stripped, uppercase version of what they typed '''
response = input('What would you like to do? ').strip().upper()
return response
def update(response,game,current):
''' Process the input and update the state of the world '''
for e in game['rooms'][current]['exits']:
if response == e['verb']:
current = e['target']
return current
def main():
game = {}
with open('house.json') as json_file:
game = json.load(json_file)
current = 'START'
quit = False
while not quit:
render(game,current)
response = getInput()
current = update(response,game,current)
if response == 'QUIT':
quit = True
if __name__ == '__main__':
main() | 21 | 112 | 0.61039 | [
"MIT"
] | BraffordHunter/03-Text-Adventure-2 | main.py | 1,155 | Python |
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
#print(tf.__version__)
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
print(predictions[0])
"""num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
"""
| 26.292929 | 84 | 0.684211 | [
"MIT"
] | VitorGDellino/Neural-Network | MNIST/mnist.py | 2,603 | Python |
"""Test all API endpoints.
This test class exercises all client facing APIs. It is also usesful as a tool for
demonstrating how to interact with the various APIs.
"""
import json
import pytest
from expects import (be, be_above, be_above_or_equal, contain, equal, expect,
raise_error)
from flask import Response
from tests.utils import post_users
from authserver.db import User, db
ROLES = [
{
'role': 'get:programs',
'description': 'Get from programs data resource',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
},
{
'role': 'administer:programs',
'description': 'All access on programs data resource',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
},
{
'role': 'edit:providers',
'description': 'Edit providers only'
},
{
'role': 'view:providers',
'description': 'View providers only',
'rules': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
'key4': 'value4'
}
}
]
USERS = [
{
'username': 'user1',
'password': 'password',
'person_id': 'c0ffee-c0ffee-1'
},
{
'username': 'user2',
'password': 'password',
'person_id': 'c0ffee-c0ffee-2'
},
{
'username': 'user3',
'password': 'password',
'person_id': 'c0ffee-c0ffee-3'
},
{
'username': 'user4',
'password': 'password',
'person_id': 'c0ffee-c0ffee-4'
},
{
'username': 'user5',
'password': 'password',
'person_id': 'c0ffee-c0ffee-5'
},
{
'username': 'user6',
'password': 'password',
'person_id': 'c0ffee-c0ffee-6'
},
{
'username': 'user7',
'password': 'password',
'person_id': 'c0ffee-c0ffee-7'
},
{
'username': 'user8',
'password': 'password',
'person_id': 'c0ffee-c0ffee-8'
}
]
CLIENTS = [
{
'client_name': 'test client 1',
'user_id': ''
},
{
'client_name': 'test client 2',
'user_id': ''
},
{
'client_name': 'test client 3',
'user_id': ''
},
{
'client_name': 'test client 4',
'user_id': ''
},
{
'client_name': 'test client 5',
'user_id': ''
},
{
'client_name': 'test client6',
'user_id': ''
},
{
'client_name': 'test client 7',
'user_id': ''
},
{
'client_name': 'test client 8',
'user_id': ''
}
]
class TestAllAPIs(object):
def test_all_apis(self, client, token_generator):
# Common headers go in this dict
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
# Create users, and clients
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
# Create roles
role_ids = []
for role in ROLES:
response = client.post(
'/roles', data=json.dumps(role), headers=headers)
expect(response.status_code).to(equal(201))
role_ids.append(response.json['response'][0]['id'])
# Assign clients to users and roles to client
for i, client_id in enumerate(client_ids):
request_body = {
'user_id': user_ids[i],
'roles': role_ids
}
response = client.patch(
'/clients/{}'.format(client_id), data=json.dumps(request_body), headers=headers)
expect(response.status_code).to(equal(200))
# Ensure that clients actually have roles, users, and other crucial fields
for client_id in client_ids:
response = client.get(
'/clients/{}'.format(client_id), headers=headers)
result = response.json['response']
expect(result['id']).to(equal(client_id))
expect(result['client_id_issued_at']).to(be_above(0))
expect(user_ids).to(contain(result['user_id']))
expect(len(result['roles'])).to(equal(len(role_ids)))
self._cleanup(client, token_generator,
user_ids=user_ids, role_ids=role_ids)
def test_client_secret_delete_rotate(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
client_to_patch = client_ids[0]
response = client.post('/clients?action=delete_secret',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(200))
response = client.get('/clients/{}'.format(client_to_patch), headers=headers)
expect(response.json['response']['client_secret']).to(equal(None))
response = client.post('/clients?action=rotate_secret',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(200))
response = client.get('/clients/{}'.format(client_to_patch), headers=headers)
expect(len(response.json['response']['client_secret'])).to(equal(48))
self._cleanup(client, token_generator, user_ids=user_ids)
def test_client_post_invalid_action(self, client, token_generator):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
user_ids = post_users(USERS, client, token_generator.get_token(client))
client_ids = self._post_clients(client, user_ids, token_generator)
client_to_patch = client_ids[0]
response = client.post('/clients?action=some_invalid_action',
data=json.dumps({"id": client_to_patch}), headers=headers)
expect(response.status_code).to(equal(422))
expect(response.json['messages']).to(contain("Invalid query param!"))
self._cleanup(client, token_generator, user_ids=user_ids)
def _post_clients(self, client, user_ids, token_generator):
'''
Helper function that creates (and tests creating) a collection of Clients.
'''
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
client_ids = []
for i, api_client in enumerate(CLIENTS):
api_client['user_id'] = user_ids[i]
response = client.post('/clients', data=json.dumps(api_client), headers=headers)
expect(response.status_code).to(equal(201))
client_ids.append(response.json['response'][0]['id'])
expect(len(client_ids)).to(equal(8))
return client_ids
def _cleanup(self, client, token_generator, role_ids=[], user_ids=[]):
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
for role_id in role_ids:
response = client.delete(
'/roles/{}'.format(role_id), headers=headers)
expect(response.status_code).to(equal(200))
for user_id in user_ids:
response = client.delete(
'/users/{}'.format(user_id), headers=headers)
expect(response.status_code).to(equal(200))
def test_assign_scope_to_user(self, client, token_generator):
CLIENT = {
}
USER = {
'username': 'test_user_scope',
'password': 'secret',
'person_id': 'c0ffee-c0ffee-c0ffee-99',
'role_id': ''
}
ROLE = {
'role': 'Administrator',
'description': 'An administrative user role.'
}
SCOPE = {
'scope': 'action:do-all-the-things',
'description': 'A scope that grants the holder superpowers'
}
headers = {'content-type': 'application/json', 'authorization': f'bearer {token_generator.get_token(client)}'}
# Create a role
response = client.post('/roles', data=json.dumps(ROLE), headers=headers)
expect(response.status_code).to(be(201))
role_id = response.json['response'][0]['id']
# Create a scope
response = client.post('/scopes', data=json.dumps(SCOPE), headers=headers)
expect(response.status_code).to(be(201))
scope_id = response.json['response'][0]['id']
# Bind the scope to the role
response = client.post(f'/roles/{role_id}/scopes', data=json.dumps({'scope_id': scope_id}), headers=headers)
expect(response.status_code).to(be(201))
# Create a user and make the user an administrator
USER['role_id'] = role_id
response = client.post('/users', data=json.dumps(USER), headers=headers)
expect(response.status_code).to(be(201))
user_id = response.json['response'][0]['id']
# Cleanup
response = client.delete(f'/users/{user_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/roles/{role_id}/scopes/{scope_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/roles/{role_id}', headers=headers)
expect(response.status_code).to(be(200))
response = client.delete(f'/scopes/{scope_id}', headers=headers)
expect(response.status_code).to(be(200))
| 34.031034 | 118 | 0.580201 | [
"MIT"
] | brighthive/authserver | tests/api/test_all_apis.py | 9,869 | Python |
from typing import List
def twoSum(nums: List[int], target: int) -> List[int]:
length = len(nums)
for i,v1 in enumerate(nums):
sliced = nums[i + 1: length]
for j,v2 in enumerate(sliced):
result = v1 + v2
if result == target:
return [i, i+j+1]
return []
result = twoSum([6, 1, 4, 5], 7)
assert result == [0, 1]
result2 = twoSum([2, 8, 4, 5], 13)
assert result2 == [1, 3]
result3 = twoSum(
[0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62,64,66,68,70,72,74,76,78,80,82,84,86,88,90,92,94,96,98,100,102,104,106,108,110,112,114,116,118,120,122,124,126,128,130,132,134,136,138,140,142,144,146,148,150,152,154,156,158,160,162,164,166,168,170,172,174,176,178,180,182,184,186,188,190,192,194,196,198,200,202,204,206,208,210,212,214,216,218,220,222,224,226,228,230,232,234,236,238,240,242,244,246,248,250,252,254,256,258,260,262,264,266,268,270,272,274,276,278,280,282,284,286,288,290,292,294,296,298,300,302,304,306,308,310,312,314,316,318,320,322,324,326,328,330,332,334,336,338,340,342,344,346,348,350,352,354,356,358,360,362,364,366,368,370,372,374,376,378,380,382,384,386,388,390,392,394,396,398,400,402,404,406,408,410,412,414,416,418,420,422,424,426,428,430,432,434,436,438,440,442,444,446,448,450,452,454,456,458,460,462,464,466,468,470,472,474,476,478,480,482,484,486,488,490,492,494,496,498,500,502,504,506,508,510,512,514,516,518,520,522,524,526,528,530,532,534,536,538,540,542,544,546,548,550,552,554,556,558,560,562,564,566,568,570,572,574,576,578,580,582,584,586,588,590,592,594,596,598,600,602,604,606,608,610,612,614,616,618,620,622,624,626,628,630,632,634,636,638,640,642,644,646,648,650,652,654,656,658,660,662,664,666,668,670,672,674,676,678,680,682,684,686,688,690,692,694,696,698,700,702,704,706,708,710,712,714,716,718,720,722,724,726,728,730,732,734,736,738,740,742,744,746,748,750,752,754,756,758,760,762,764,766,768,770,772,774,776,778,780,782,784,786,788,790,792,794,796,798,800,802,804,806,808,810,812,814,816,818,820,822,824,826,828,830,832,834,836,838,840,842,844,846,848,850,852,854,856,858,860,862,864,866,868,870,872,874,876,878,880,882,884,886,888,890,892,894,896,898,900,902,904,906,908,910,912,914,916,918,920,922,924,926,928,930,932,934,936,938,940,942,944,946,948,950,952,954,956,958,960,962,964,966,968,970,972,974,976,978,980,982,984,986,988,990,992,994,996,998,1000,1002,1004,1006,1008,1010,1012,1014,1016,1018,1020,1022,1024,1026,1028,1030,1032,1034,1036,1038,1040,1042,1044,1046,1048,1050,1052,1054,1056,1058,1060,1062,1064,1066,1068,1070,1072,1074,1076,1078,1080,1082,1084,1086,1088,1090,1092,1094,1096,1098,1100,1102,1104,1106,1108,1110,1112,1114,1116,1118,1120,1122,1124,1126,1128,1130,1132,1134,1136,1138,1140,1142,1144,1146,1148,1150,1152,1154,1156,1158,1160,1162,1164,1166,1168,1170,1172,1174,1176,1178,1180,1182,1184,1186,1188,1190,1192,1194,1196,1198,1200,1202,1204,1206,1208,1210,1212,1214,1216,1218,1220,1222,1224,1226,1228,1230,1232,1234,1236,1238,1240,1242,1244,1246,1248,1250,1252,1254,1256,1258,1260,1262,1264,1266,1268,1270,1272,1274,1276,1278,1280,1282,1284,1286,1288,1290,1292,1294,1296,1298,1300,1302,1304,1306,1308,1310,1312,1314,1316,1318,1320,1322,1324,1326,1328,1330,1332,1334,1336,1338,1340,1342,1344,1346,1348,1350,1352,1354,1356,1358,1360,1362,1364,1366,1368,1370,1372,1374,1376,1378,1380,1382,1384,1386,1388,1390,1392,1394,1396,1398,1400,1402,1404,1406,1408,1410,1412,1414,1416,1418,1420,1422,1424,1426,1428,1430,1432,1434,1436,1438,1440,1442,1444,1446,1448,1450,1452,1454,1456,1458,1460,1462,1464,1466,1468,1470,1472,1474,1476,1478,1480,1482,1484,1486,1488,1490,1492,1494,1496,1498,1500,1502,1504,1506,1508,1510,1512,1514,1516,1518,1520,1522,1524,1526,1528,1530,1532,1534,1536,1538,1540,1542,1544,1546,1548,1550,1552,1554,1556,1558,1560,1562,1564,1566,1568,1570,1572,1574,1576,1578,1580,1582,1584,1586,1588,1590,1592,1594,1596,1598,1600,1602,1604,1606,1608,1610,1612,1614,1616,1618,1620,1622,1624,1626,1628,1630,1632,1634,1636,1638,1640,1642,1644,1646,1648,1650,1652,1654,1656,1658,1660,1662,1664,1666,1668,1670,1672,1674,1676,1678,1680,1682,1684,1686,1688,1690,1692,1694,1696,1698,1700,1702,1704,1706,1708,1710,1712,1714,1716,1718,1720,1722,1724,1726,1728,1730,1732,1734,1736,1738,1740,1742,1744,1746,1748,1750,1752,1754,1756,1758,1760,1762,1764,1766,1768,1770,1772,1774,1776,1778,1780,1782,1784,1786,1788,1790,1792,1794,1796,1798,1800,1802,1804,1806,1808,1810,1812,1814,1816,1818,1820,1822,1824,1826,1828,1830,1832,1834,1836,1838,1840,1842,1844,1846,1848,1850,1852,1854,1856,1858,1860,1862,1864,1866,1868,1870,1872,1874,1876,1878,1880,1882,1884,1886,1888,1890,1892,1894,1896,1898,1900,1902,1904,1906,1908,1910,1912,1914,1916,1918,1920,1922,1924,1926,1928,1930,1932,1934,1936,1938,1940,1942,1944,1946,1948,1950,1952,1954,1956,1958,1960,1962,1964,1966,1968,1970,1972,1974,1976,1978,1980,1982,1984,1986,1988,1990,1992,1994,1996,1998,2000,2002,2004,2006,2008,2010,2012,2014,2016,2018,2020,2022,2024,2026,2028,2030,2032,2034,2036,2038,2040,2042,2044,2046,2048,2050,2052,2054,2056,2058,2060,2062,2064,2066,2068,2070,2072,2074,2076,2078,2080,2082,2084,2086,2088,2090,2092,2094,2096,2098,2100,2102,2104,2106,2108,2110,2112,2114,2116,2118,2120,2122,2124,2126,2128,2130,2132,2134,2136,2138,2140,2142,2144,2146,2148,2150,2152,2154,2156,2158,2160,2162,2164,2166,2168,2170,2172,2174,2176,2178,2180,2182,2184,2186,2188,2190,2192,2194,2196,2198,2200,2202,2204,2206,2208,2210,2212,2214,2216,2218,2220,2222,2224,2226,2228,2230,2232,2234,2236,2238,2240,2242,2244,2246,2248,2250,2252,2254,2256,2258,2260,2262,2264,2266,2268,2270,2272,2274,2276,2278,2280,2282,2284,2286,2288,2290,2292,2294,2296,2298,2300,2302,2304,2306,2308,2310,2312,2314,2316,2318,2320,2322,2324,2326,2328,2330,2332,2334,2336,2338,2340,2342,2344,2346,2348,2350,2352,2354,2356,2358,2360,2362,2364,2366,2368,2370,2372,2374,2376,2378,2380,2382,2384,2386,2388,2390,2392,2394,2396,2398,2400,2402,2404,2406,2408,2410,2412,2414,2416,2418,2420,2422,2424,2426,2428,2430,2432,2434,2436,2438,2440,2442,2444,2446,2448,2450,2452,2454,2456,2458,2460,2462,2464,2466,2468,2470,2472,2474,2476,2478,2480,2482,2484,2486,2488,2490,2492,2494,2496,2498,2500,2502,2504,2506,2508,2510,2512,2514,2516,2518,2520,2522,2524,2526,2528,2530,2532,2534,2536,2538,2540,2542,2544,2546,2548,2550,2552,2554,2556,2558,2560,2562,2564,2566,2568,2570,2572,2574,2576,2578,2580,2582,2584,2586,2588,2590,2592,2594,2596,2598,2600,2602,2604,2606,2608,2610,2612,2614,2616,2618,2620,2622,2624,2626,2628,2630,2632,2634,2636,2638,2640,2642,2644,2646,2648,2650,2652,2654,2656,2658,2660,2662,2664,2666,2668,2670,2672,2674,2676,2678,2680,2682,2684,2686,2688,2690,2692,2694,2696,2698,2700,2702,2704,2706,2708,2710,2712,2714,2716,2718,2720,2722,2724,2726,2728,2730,2732,2734,2736,2738,2740,2742,2744,2746,2748,2750,2752,2754,2756,2758,2760,2762,2764,2766,2768,2770,2772,2774,2776,2778,2780,2782,2784,2786,2788,2790,2792,2794,2796,2798,2800,2802,2804,2806,2808,2810,2812,2814,2816,2818,2820,2822,2824,2826,2828,2830,2832,2834,2836,2838,2840,2842,2844,2846,2848,2850,2852,2854,2856,2858,2860,2862,2864,2866,2868,2870,2872,2874,2876,2878,2880,2882,2884,2886,2888,2890,2892,2894,2896,2898,2900,2902,2904,2906,2908,2910,2912,2914,2916,2918,2920,2922,2924,2926,2928,2930,2932,2934,2936,2938,2940,2942,2944,2946,2948,2950,2952,2954,2956,2958,2960,2962,2964,2966,2968,2970,2972,2974,2976,2978,2980,2982,2984,2986,2988,2990,2992,2994,2996,2998,3000,3002,3004,3006,3008,3010,3012,3014,3016,3018,3020,3022,3024,3026,3028,3030,3032,3034,3036,3038,3040,3042,3044,3046,3048,3050,3052,3054,3056,3058,3060,3062,3064,3066,3068,3070,3072,3074,3076,3078,3080,3082,3084,3086,3088,3090,3092,3094,3096,3098,3100,3102,3104,3106,3108,3110,3112,3114,3116,3118,3120,3122,3124,3126,3128,3130,3132,3134,3136,3138,3140,3142,3144,3146,3148,3150,3152,3154,3156,3158,3160,3162,3164,3166,3168,3170,3172,3174,3176,3178,3180,3182,3184,3186,3188,3190,3192,3194,3196,3198,3200,3202,3204,3206,3208,3210,3212,3214,3216,3218,3220,3222,3224,3226,3228,3230,3232,3234,3236,3238,3240,3242,3244,3246,3248,3250,3252,3254,3256,3258,3260,3262,3264,3266,3268,3270,3272,3274,3276,3278,3280,3282,3284,3286,3288,3290,3292,3294,3296,3298,3300,3302,3304,3306,3308,3310,3312,3314,3316,3318,3320,3322,3324,3326,3328,3330,3332,3334,3336,3338,3340,3342,3344,3346,3348,3350,3352,3354,3356,3358,3360,3362,3364,3366,3368,3370,3372,3374,3376,3378,3380,3382,3384,3386,3388,3390,3392,3394,3396,3398,3400,3402,3404,3406,3408,3410,3412,3414,3416,3418,3420,3422,3424,3426,3428,3430,3432,3434,3436,3438,3440,3442,3444,3446,3448,3450,3452,3454,3456,3458,3460,3462,3464,3466,3468,3470,3472,3474,3476,3478,3480,3482,3484,3486,3488,3490,3492,3494,3496,3498,3500,3502,3504,3506,3508,3510,3512,3514,3516,3518,3520,3522,3524,3526,3528,3530,3532,3534,3536,3538,3540,3542,3544,3546,3548,3550,3552,3554,3556,3558,3560,3562,3564,3566,3568,3570,3572,3574,3576,3578,3580,3582,3584,3586,3588,3590,3592,3594,3596,3598,3600,3602,3604,3606,3608,3610,3612,3614,3616,3618,3620,3622,3624,3626,3628,3630,3632,3634,3636,3638,3640,3642,3644,3646,3648,3650,3652,3654,3656,3658,3660,3662,3664,3666,3668,3670,3672,3674,3676,3678,3680,3682,3684,3686,3688,3690,3692,3694,3696,3698,3700,3702,3704,3706,3708,3710,3712,3714,3716,3718,3720,3722,3724,3726,3728,3730,3732,3734,3736,3738,3740,3742,3744,3746,3748,3750,3752,3754,3756,3758,3760,3762,3764,3766,3768,3770,3772,3774,3776,3778,3780,3782,3784,3786,3788,3790,3792,3794,3796,3798,3800,3802,3804,3806,3808,3810,3812,3814,3816,3818,3820,3822,3824,3826,3828,3830,3832,3834,3836,3838,3840,3842,3844,3846,3848,3850,3852,3854,3856,3858,3860,3862,3864,3866,3868,3870,3872,3874,3876,3878,3880,3882,3884,3886,3888,3890,3892,3894,3896,3898,3900,3902,3904,3906,3908,3910,3912,3914,3916,3918,3920,3922,3924,3926,3928,3930,3932,3934,3936,3938,3940,3942,3944,3946,3948,3950,3952,3954,3956,3958,3960,3962,3964,3966,3968,3970,3972,3974,3976,3978,3980,3982,3984,3986,3988,3990,3992,3994,3996,3998,4000,4002,4004,4006,4008,4010,4012,4014,4016,4018,4020,4022,4024,4026,4028,4030,4032,4034,4036,4038,4040,4042,4044,4046,4048,4050,4052,4054,4056,4058,4060,4062,4064,4066,4068,4070,4072,4074,4076,4078,4080,4082,4084,4086,4088,4090,4092,4094,4096,4098,4100,4102,4104,4106,4108,4110,4112,4114,4116,4118,4120,4122,4124,4126,4128,4130,4132,4134,4136,4138,4140,4142,4144,4146,4148,4150,4152,4154,4156,4158,4160,4162,4164,4166,4168,4170,4172,4174,4176,4178,4180,4182,4184,4186,4188,4190,4192,4194,4196,4198,4200,4202,4204,4206,4208,4210,4212,4214,4216,4218,4220,4222,4224,4226,4228,4230,4232,4234,4236,4238,4240,4242,4244,4246,4248,4250,4252,4254,4256,4258,4260,4262,4264,4266,4268,4270,4272,4274,4276,4278,4280,4282,4284,4286,4288,4290,4292,4294,4296,4298,4300,4302,4304,4306,4308,4310,4312,4314,4316,4318,4320,4322,4324,4326,4328,4330,4332,4334,4336,4338,4340,4342,4344,4346,4348,4350,4352,4354,4356,4358,4360,4362,4364,4366,4368,4370,4372,4374,4376,4378,4380,4382,4384,4386,4388,4390,4392,4394,4396,4398,4400,4402,4404,4406,4408,4410,4412,4414,4416,4418,4420,4422,4424,4426,4428,4430,4432,4434,4436,4438,4440,4442,4444,4446,4448,4450,4452,4454,4456,4458,4460,4462,4464,4466,4468,4470,4472,4474,4476,4478,4480,4482,4484,4486,4488,4490,4492,4494,4496,4498,4500,4502,4504,4506,4508,4510,4512,4514,4516,4518,4520,4522,4524,4526,4528,4530,4532,4534,4536,4538,4540,4542,4544,4546,4548,4550,4552,4554,4556,4558,4560,4562,4564,4566,4568,4570,4572,4574,4576,4578,4580,4582,4584,4586,4588,4590,4592,4594,4596,4598,4600,4602,4604,4606,4608,4610,4612,4614,4616,4618,4620,4622,4624,4626,4628,4630,4632,4634,4636,4638,4640,4642,4644,4646,4648,4650,4652,4654,4656,4658,4660,4662,4664,4666,4668,4670,4672,4674,4676,4678,4680,4682,4684,4686,4688,4690,4692,4694,4696,4698,4700,4702,4704,4706,4708,4710,4712,4714,4716,4718,4720,4722,4724,4726,4728,4730,4732,4734,4736,4738,4740,4742,4744,4746,4748,4750,4752,4754,4756,4758,4760,4762,4764,4766,4768,4770,4772,4774,4776,4778,4780,4782,4784,4786,4788,4790,4792,4794,4796,4798,4800,4802,4804,4806,4808,4810,4812,4814,4816,4818,4820,4822,4824,4826,4828,4830,4832,4834,4836,4838,4840,4842,4844,4846,4848,4850,4852,4854,4856,4858,4860,4862,4864,4866,4868,4870,4872,4874,4876,4878,4880,4882,4884,4886,4888,4890,4892,4894,4896,4898,4900,4902,4904,4906,4908,4910,4912,4914,4916,4918,4920,4922,4924,4926,4928,4930,4932,4934,4936,4938,4940,4942,4944,4946,4948,4950,4952,4954,4956,4958,4960,4962,4964,4966,4968,4970,4972,4974,4976,4978,4980,4982,4984,4986,4988,4990,4992,4994,4996,4998,5000,5002,5004,5006,5008,5010,5012,5014,5016,5018,5020,5022,5024,5026,5028,5030,5032,5034,5036,5038,5040,5042,5044,5046,5048,5050,5052,5054,5056,5058,5060,5062,5064,5066,5068,5070,5072,5074,5076,5078,5080,5082,5084,5086,5088,5090,5092,5094,5096,5098,5100,5102,5104,5106,5108,5110,5112,5114,5116,5118,5120,5122,5124,5126,5128,5130,5132,5134,5136,5138,5140,5142,5144,5146,5148,5150,5152,5154,5156,5158,5160,5162,5164,5166,5168,5170,5172,5174,5176,5178,5180,5182,5184,5186,5188,5190,5192,5194,5196,5198,5200,5202,5204,5206,5208,5210,5212,5214,5216,5218,5220,5222,5224,5226,5228,5230,5232,5234,5236,5238,5240,5242,5244,5246,5248,5250,5252,5254,5256,5258,5260,5262,5264,5266,5268,5270,5272,5274,5276,5278,5280,5282,5284,5286,5288,5290,5292,5294,5296,5298,5300,5302,5304,5306,5308,5310,5312,5314,5316,5318,5320,5322,5324,5326,5328,5330,5332,5334,5336,5338,5340,5342,5344,5346,5348,5350,5352,5354,5356,5358,5360,5362,5364,5366,5368,5370,5372,5374,5376,5378,5380,5382,5384,5386,5388,5390,5392,5394,5396,5398,5400,5402,5404,5406,5408,5410,5412,5414,5416,5418,5420,5422,5424,5426,5428,5430,5432,5434,5436,5438,5440,5442,5444,5446,5448,5450,5452,5454,5456,5458,5460,5462,5464,5466,5468,5470,5472,5474,5476,5478,5480,5482,5484,5486,5488,5490,5492,5494,5496,5498,5500,5502,5504,5506,5508,5510,5512,5514,5516,5518,5520,5522,5524,5526,5528,5530,5532,5534,5536,5538,5540,5542,5544,5546,5548,5550,5552,5554,5556,5558,5560,5562,5564,5566,5568,5570,5572,5574,5576,5578,5580,5582,5584,5586,5588,5590,5592,5594,5596,5598,5600,5602,5604,5606,5608,5610,5612,5614,5616,5618,5620,5622,5624,5626,5628,5630,5632,5634,5636,5638,5640,5642,5644,5646,5648,5650,5652,5654,5656,5658,5660,5662,5664,5666,5668,5670,5672,5674,5676,5678,5680,5682,5684,5686,5688,5690,5692,5694,5696,5698,5700,5702,5704,5706,5708,5710,5712,5714,5716,5718,5720,5722,5724,5726,5728,5730,5732,5734,5736,5738,5740,5742,5744,5746,5748,5750,5752,5754,5756,5758,5760,5762,5764,5766,5768,5770,5772,5774,5776,5778,5780,5782,5784,5786,5788,5790,5792,5794,5796,5798,5800,5802,5804,5806,5808,5810,5812,5814,5816,5818,5820,5822,5824,5826,5828,5830,5832,5834,5836,5838,5840,5842,5844,5846,5848,5850,5852,5854,5856,5858,5860,5862,5864,5866,5868,5870,5872,5874,5876,5878,5880,5882,5884,5886,5888,5890,5892,5894,5896,5898,5900,5902,5904,5906,5908,5910,5912,5914,5916,5918,5920,5922,5924,5926,5928,5930,5932,5934,5936,5938,5940,5942,5944,5946,5948,5950,5952,5954,5956,5958,5960,5962,5964,5966,5968,5970,5972,5974,5976,5978,5980,5982,5984,5986,5988,5990,5992,5994,5996,5998,6000,6002,6004,6006,6008,6010,6012,6014,6016,6018,6020,6022,6024,6026,6028,6030,6032,6034,6036,6038,6040,6042,6044,6046,6048,6050,6052,6054,6056,6058,6060,6062,6064,6066,6068,6070,6072,6074,6076,6078,6080,6082,6084,6086,6088,6090,6092,6094,6096,6098,6100,6102,6104,6106,6108,6110,6112,6114,6116,6118,6120,6122,6124,6126,6128,6130,6132,6134,6136,6138,6140,6142,6144,6146,6148,6150,6152,6154,6156,6158,6160,6162,6164,6166,6168,6170,6172,6174,6176,6178,6180,6182,6184,6186,6188,6190,6192,6194,6196,6198,6200,6202,6204,6206,6208,6210,6212,6214,6216,6218,6220,6222,6224,6226,6228,6230,6232,6234,6236,6238,6240,6242,6244,6246,6248,6250,6252,6254,6256,6258,6260,6262,6264,6266,6268,6270,6272,6274,6276,6278,6280,6282,6284,6286,6288,6290,6292,6294,6296,6298,6300,6302,6304,6306,6308,6310,6312,6314,6316,6318,6320,6322,6324,6326,6328,6330,6332,6334,6336,6338,6340,6342,6344,6346,6348,6350,6352,6354,6356,6358,6360,6362,6364,6366,6368,6370,6372,6374,6376,6378,6380,6382,6384,6386,6388,6390,6392,6394,6396,6398,6400,6402,6404,6406,6408,6410,6412,6414,6416,6418,6420,6422,6424,6426,6428,6430,6432,6434,6436,6438,6440,6442,6444,6446,6448,6450,6452,6454,6456,6458,6460,6462,6464,6466,6468,6470,6472,6474,6476,6478,6480,6482,6484,6486,6488,6490,6492,6494,6496,6498,6500,6502,6504,6506,6508,6510,6512,6514,6516,6518,6520,6522,6524,6526,6528,6530,6532,6534,6536,6538,6540,6542,6544,6546,6548,6550,6552,6554,6556,6558,6560,6562,6564,6566,6568,6570,6572,6574,6576,6578,6580,6582,6584,6586,6588,6590,6592,6594,6596,6598,6600,6602,6604,6606,6608,6610,6612,6614,6616,6618,6620,6622,6624,6626,6628,6630,6632,6634,6636,6638,6640,6642,6644,6646,6648,6650,6652,6654,6656,6658,6660,6662,6664,6666,6668,6670,6672,6674,6676,6678,6680,6682,6684,6686,6688,6690,6692,6694,6696,6698,6700,6702,6704,6706,6708,6710,6712,6714,6716,6718,6720,6722,6724,6726,6728,6730,6732,6734,6736,6738,6740,6742,6744,6746,6748,6750,6752,6754,6756,6758,6760,6762,6764,6766,6768,6770,6772,6774,6776,6778,6780,6782,6784,6786,6788,6790,6792,6794,6796,6798,6800,6802,6804,6806,6808,6810,6812,6814,6816,6818,6820,6822,6824,6826,6828,6830,6832,6834,6836,6838,6840,6842,6844,6846,6848,6850,6852,6854,6856,6858,6860,6862,6864,6866,6868,6870,6872,6874,6876,6878,6880,6882,6884,6886,6888,6890,6892,6894,6896,6898,6900,6902,6904,6906,6908,6910,6912,6914,6916,6918,6920,6922,6924,6926,6928,6930,6932,6934,6936,6938,6940,6942,6944,6946,6948,6950,6952,6954,6956,6958,6960,6962,6964,6966,6968,6970,6972,6974,6976,6978,6980,6982,6984,6986,6988,6990,6992,6994,6996,6998,7000,7002,7004,7006,7008,7010,7012,7014,7016,7018,7020,7022,7024,7026,7028,7030,7032,7034,7036,7038,7040,7042,7044,7046,7048,7050,7052,7054,7056,7058,7060,7062,7064,7066,7068,7070,7072,7074,7076,7078,7080,7082,7084,7086,7088,7090,7092,7094,7096,7098,7100,7102,7104,7106,7108,7110,7112,7114,7116,7118,7120,7122,7124,7126,7128,7130,7132,7134,7136,7138,7140,7142,7144,7146,7148,7150,7152,7154,7156,7158,7160,7162,7164,7166,7168,7170,7172,7174,7176,7178,7180,7182,7184,7186,7188,7190,7192,7194,7196,7198,7200,7202,7204,7206,7208,7210,7212,7214,7216,7218,7220,7222,7224,7226,7228,7230,7232,7234,7236,7238,7240,7242,7244,7246,7248,7250,7252,7254,7256,7258,7260,7262,7264,7266,7268,7270,7272,7274,7276,7278,7280,7282,7284,7286,7288,7290,7292,7294,7296,7298,7300,7302,7304,7306,7308,7310,7312,7314,7316,7318,7320,7322,7324,7326,7328,7330,7332,7334,7336,7338,7340,7342,7344,7346,7348,7350,7352,7354,7356,7358,7360,7362,7364,7366,7368,7370,7372,7374,7376,7378,7380,7382,7384,7386,7388,7390,7392,7394,7396,7398,7400,7402,7404,7406,7408,7410,7412,7414,7416,7418,7420,7422,7424,7426,7428,7430,7432,7434,7436,7438,7440,7442,7444,7446,7448,7450,7452,7454,7456,7458,7460,7462,7464,7466,7468,7470,7472,7474,7476,7478,7480,7482,7484,7486,7488,7490,7492,7494,7496,7498,7500,7502,7504,7506,7508,7510,7512,7514,7516,7518,7520,7522,7524,7526,7528,7530,7532,7534,7536,7538,7540,7542,7544,7546,7548,7550,7552,7554,7556,7558,7560,7562,7564,7566,7568,7570,7572,7574,7576,7578,7580,7582,7584,7586,7588,7590,7592,7594,7596,7598,7600,7602,7604,7606,7608,7610,7612,7614,7616,7618,7620,7622,7624,7626,7628,7630,7632,7634,7636,7638,7640,7642,7644,7646,7648,7650,7652,7654,7656,7658,7660,7662,7664,7666,7668,7670,7672,7674,7676,7678,7680,7682,7684,7686,7688,7690,7692,7694,7696,7698,7700,7702,7704,7706,7708,7710,7712,7714,7716,7718,7720,7722,7724,7726,7728,7730,7732,7734,7736,7738,7740,7742,7744,7746,7748,7750,7752,7754,7756,7758,7760,7762,7764,7766,7768,7770,7772,7774,7776,7778,7780,7782,7784,7786,7788,7790,7792,7794,7796,7798,7800,7802,7804,7806,7808,7810,7812,7814,7816,7818,7820,7822,7824,7826,7828,7830,7832,7834,7836,7838,7840,7842,7844,7846,7848,7850,7852,7854,7856,7858,7860,7862,7864,7866,7868,7870,7872,7874,7876,7878,7880,7882,7884,7886,7888,7890,7892,7894,7896,7898,7900,7902,7904,7906,7908,7910,7912,7914,7916,7918,7920,7922,7924,7926,7928,7930,7932,7934,7936,7938,7940,7942,7944,7946,7948,7950,7952,7954,7956,7958,7960,7962,7964,7966,7968,7970,7972,7974,7976,7978,7980,7982,7984,7986,7988,7990,7992,7994,7996,7998,8000,8002,8004,8006,8008,8010,8012,8014,8016,8018,8020,8022,8024,8026,8028,8030,8032,8034,8036,8038,8040,8042,8044,8046,8048,8050,8052,8054,8056,8058,8060,8062,8064,8066,8068,8070,8072,8074,8076,8078,8080,8082,8084,8086,8088,8090,8092,8094,8096,8098,8100,8102,8104,8106,8108,8110,8112,8114,8116,8118,8120,8122,8124,8126,8128,8130,8132,8134,8136,8138,8140,8142,8144,8146,8148,8150,8152,8154,8156,8158,8160,8162,8164,8166,8168,8170,8172,8174,8176,8178,8180,8182,8184,8186,8188,8190,8192,8194,8196,8198,8200,8202,8204,8206,8208,8210,8212,8214,8216,8218,8220,8222,8224,8226,8228,8230,8232,8234,8236,8238,8240,8242,8244,8246,8248,8250,8252,8254,8256,8258,8260,8262,8264,8266,8268,8270,8272,8274,8276,8278,8280,8282,8284,8286,8288,8290,8292,8294,8296,8298,8300,8302,8304,8306,8308,8310,8312,8314,8316,8318,8320,8322,8324,8326,8328,8330,8332,8334,8336,8338,8340,8342,8344,8346,8348,8350,8352,8354,8356,8358,8360,8362,8364,8366,8368,8370,8372,8374,8376,8378,8380,8382,8384,8386,8388,8390,8392,8394,8396,8398,8400,8402,8404,8406,8408,8410,8412,8414,8416,8418,8420,8422,8424,8426,8428,8430,8432,8434,8436,8438,8440,8442,8444,8446,8448,8450,8452,8454,8456,8458,8460,8462,8464,8466,8468,8470,8472,8474,8476,8478,8480,8482,8484,8486,8488,8490,8492,8494,8496,8498,8500,8502,8504,8506,8508,8510,8512,8514,8516,8518,8520,8522,8524,8526,8528,8530,8532,8534,8536,8538,8540,8542,8544,8546,8548,8550,8552,8554,8556,8558,8560,8562,8564,8566,8568,8570,8572,8574,8576,8578,8580,8582,8584,8586,8588,8590,8592,8594,8596,8598,8600,8602,8604,8606,8608,8610,8612,8614,8616,8618,8620,8622,8624,8626,8628,8630,8632,8634,8636,8638,8640,8642,8644,8646,8648,8650,8652,8654,8656,8658,8660,8662,8664,8666,8668,8670,8672,8674,8676,8678,8680,8682,8684,8686,8688,8690,8692,8694,8696,8698,8700,8702,8704,8706,8708,8710,8712,8714,8716,8718,8720,8722,8724,8726,8728,8730,8732,8734,8736,8738,8740,8742,8744,8746,8748,8750,8752,8754,8756,8758,8760,8762,8764,8766,8768,8770,8772,8774,8776,8778,8780,8782,8784,8786,8788,8790,8792,8794,8796,8798,8800,8802,8804,8806,8808,8810,8812,8814,8816,8818,8820,8822,8824,8826,8828,8830,8832,8834,8836,8838,8840,8842,8844,8846,8848,8850,8852,8854,8856,8858,8860,8862,8864,8866,8868,8870,8872,8874,8876,8878,8880,8882,8884,8886,8888,8890,8892,8894,8896,8898,8900,8902,8904,8906,8908,8910,8912,8914,8916,8918,8920,8922,8924,8926,8928,8930,8932,8934,8936,8938,8940,8942,8944,8946,8948,8950,8952,8954,8956,8958,8960,8962,8964,8966,8968,8970,8972,8974,8976,8978,8980,8982,8984,8986,8988,8990,8992,8994,8996,8998,9000,9002,9004,9006,9008,9010,9012,9014,9016,9018,9020,9022,9024,9026,9028,9030,9032,9034,9036,9038,9040,9042,9044,9046,9048,9050,9052,9054,9056,9058,9060,9062,9064,9066,9068,9070,9072,9074,9076,9078,9080,9082,9084,9086,9088,9090,9092,9094,9096,9098,9100,9102,9104,9106,9108,9110,9112,9114,9116,9118,9120,9122,9124,9126,9128,9130,9132,9134,9136,9138,9140,9142,9144,9146,9148,9150,9152,9154,9156,9158,9160,9162,9164,9166,9168,9170,9172,9174,9176,9178,9180,9182,9184,9186,9188,9190,9192,9194,9196,9198,9200,9202,9204,9206,9208,9210,9212,9214,9216,9218,9220,9222,9224,9226,9228,9230,9232,9234,9236,9238,9240,9242,9244,9246,9248,9250,9252,9254,9256,9258,9260,9262,9264,9266,9268,9270,9272,9274,9276,9278,9280,9282,9284,9286,9288,9290,9292,9294,9296,9298,9300,9302,9304,9306,9308,9310,9312,9314,9316,9318,9320,9322,9324,9326,9328,9330,9332,9334,9336,9338,9340,9342,9344,9346,9348,9350,9352,9354,9356,9358,9360,9362,9364,9366,9368,9370,9372,9374,9376,9378,9380,9382,9384,9386,9388,9390,9392,9394,9396,9398,9400,9402,9404,9406,9408,9410,9412,9414,9416,9418,9420,9422,9424,9426,9428,9430,9432,9434,9436,9438,9440,9442,9444,9446,9448,9450,9452,9454,9456,9458,9460,9462,9464,9466,9468,9470,9472,9474,9476,9478,9480,9482,9484,9486,9488,9490,9492,9494,9496,9498,9500,9502,9504,9506,9508,9510,9512,9514,9516,9518,9520,9522,9524,9526,9528,9530,9532,9534,9536,9538,9540,9542,9544,9546,9548,9550,9552,9554,9556,9558,9560,9562,9564,9566,9568,9570,9572,9574,9576,9578,9580,9582,9584,9586,9588,9590,9592,9594,9596,9598,9600,9602,9604,9606,9608,9610,9612,9614,9616,9618,9620,9622,9624,9626,9628,9630,9632,9634,9636,9638,9640,9642,9644,9646,9648,9650,9652,9654,9656,9658,9660,9662,9664,9666,9668,9670,9672,9674,9676,9678,9680,9682,9684,9686,9688,9690,9692,9694,9696,9698,9700,9702,9704,9706,9708,9710,9712,9714,9716,9718,9720,9722,9724,9726,9728,9730,9732,9734,9736,9738,9740,9742,9744,9746,9748,9750,9752,9754,9756,9758,9760,9762,9764,9766,9768,9770,9772,9774,9776,9778,9780,9782,9784,9786,9788,9790,9792,9794,9796,9798,9800,9802,9804,9806,9808,9810,9812,9814,9816,9818,9820,9822,9824,9826,9828,9830,9832,9834,9836,9838,9840,9842,9844,9846,9848,9850,9852,9854,9856,9858,9860,9862,9864,9866,9868,9870,9872,9874,9876,9878,9880,9882,9884,9886,9888,9890,9892,9894,9896,9898,9900,9902,9904,9906,9908,9910,9912,9914,9916,9918,9920,9922,9924,9926,9928,9930,9932,9934,9936,9938,9940,9942,9944,9946,9948,9950,9952,9954,9956,9958,9960,9962,9964,9966,9968,9970,9972,9974,9976,9978,9980,9982,9984,9986,9988,9990,9992,9994,9996,9998,10000,10002,10004,10006,10008,10010,10012,10014,10016,10018,10020,10022,10024,10026,10028,10030,10032,10034,10036,10038,10040,10042,10044,10046,10048,10050,10052,10054,10056,10058,10060,10062,10064,10066,10068,10070,10072,10074,10076,10078,10080,10082,10084,10086,10088,10090,10092,10094,10096,10098,10100,10102,10104,10106,10108,10110,10112,10114,10116,10118,10120,10122,10124,10126,10128,10130,10132,10134,10136,10138,10140,10142,10144,10146,10148,10150,10152,10154,10156,10158,10160,10162,10164,10166,10168,10170,10172,10174,10176,10178,10180,10182,10184,10186,10188,10190,10192,10194,10196,10198,10200,10202,10204,10206,10208,10210,10212,10214,10216,10218,10220,10222,10224,10226,10228,10230,10232,10234,10236,10238,10240,10242,10244,10246,10248,10250,10252,10254,10256,10258,10260,10262,10264,10266,10268,10270,10272,10274,10276,10278,10280,10282,10284,10286,10288,10290,10292,10294,10296,10298,10300,10302,10304,10306,10308,10310,10312,10314,10316,10318,10320,10322,10324,10326,10328,10330,10332,10334,10336,10338,10340,10342,10344,10346,10348,10350,10352,10354,10356,10358,10360,10362,10364,10366,10368,10370,10372,10374,10376,10378,10380,10382,10384,10386,10388,10390,10392,10394,10396,10398,10400,10402,10404,10406,10408,10410,10412,10414,10416,10418,10420,10422,10424,10426,10428,10430,10432,10434,10436,10438,10440,10442,10444,10446,10448,10450,10452,10454,10456,10458,10460,10462,10464,10466,10468,10470,10472,10474,10476,10478,10480,10482,10484,10486,10488,10490,10492,10494,10496,10498,10500,10502,10504,10506,10508,10510,10512,10514,10516,10518,10520,10522,10524,10526,10528,10530,10532,10534,10536,10538,10540,10542,10544,10546,10548,10550,10552,10554,10556,10558,10560,10562,10564,10566,10568,10570,10572,10574,10576,10578,10580,10582,10584,10586,10588,10590,10592,10594,10596,10598,10600,10602,10604,10606,10608,10610,10612,10614,10616,10618,10620,10622,10624,10626,10628,10630,10632,10634,10636,10638,10640,10642,10644,10646,10648,10650,10652,10654,10656,10658,10660,10662,10664,10666,10668,10670,10672,10674,10676,10678,10680,10682,10684,10686,10688,10690,10692,10694,10696,10698,10700,10702,10704,10706,10708,10710,10712,10714,10716,10718,10720,10722,10724,10726,10728,10730,10732,10734,10736,10738,10740,10742,10744,10746,10748,10750,10752,10754,10756,10758,10760,10762,10764,10766,10768,10770,10772,10774,10776,10778,10780,10782,10784,10786,10788,10790,10792,10794,10796,10798,10800,10802,10804,10806,10808,10810,10812,10814,10816,10818,10820,10822,10824,10826,10828,10830,10832,10834,10836,10838,10840,10842,10844,10846,10848,10850,10852,10854,10856,10858,10860,10862,10864,10866,10868,10870,10872,10874,10876,10878,10880,10882,10884,10886,10888,10890,10892,10894,10896,10898,10900,10902,10904,10906,10908,10910,10912,10914,10916,10918,10920,10922,10924,10926,10928,10930,10932,10934,10936,10938,10940,10942,10944,10946,10948,10950,10952,10954,10956,10958,10960,10962,10964,10966,10968,10970,10972,10974,10976,10978,10980,10982,10984,10986,10988,10990,10992,10994,10996,10998,11000,11002,11004,11006,11008,11010,11012,11014,11016,11018,11020,11022,11024,11026,11028,11030,11032,11034,11036,11038,11040,11042,11044,11046,11048,11050,11052,11054,11056,11058,11060,11062,11064,11066,11068,11070,11072,11074,11076,11078,11080,11082,11084,11086,11088,11090,11092,11094,11096,11098,11100,11102,11104,11106,11108,11110,11112,11114,11116,11118,11120,11122,11124,11126,11128,11130,11132,11134,11136,11138,11140,11142,11144,11146,11148,11150,11152,11154,11156,11158,11160,11162,11164,11166,11168,11170,11172,11174,11176,11178,11180,11182,11184,11186,11188,11190,11192,11194,11196,11198,11200,11202,11204,11206,11208,11210,11212,11214,11216,11218,11220,11222,11224,11226,11228,11230,11232,11234,11236,11238,11240,11242,11244,11246,11248,11250,11252,11254,11256,11258,11260,11262,11264,11266,11268,11270,11272,11274,11276,11278,11280,11282,11284,11286,11288,11290,11292,11294,11296,11298,11300,11302,11304,11306,11308,11310,11312,11314,11316,11318,11320,11322,11324,11326,11328,11330,11332,11334,11336,11338,11340,11342,11344,11346,11348,11350,11352,11354,11356,11358,11360,11362,11364,11366,11368,11370,11372,11374,11376,11378,11380,11382,11384,11386,11388,11390,11392,11394,11396,11398,11400,11402,11404,11406,11408,11410,11412,11414,11416,11418,11420,11422,11424,11426,11428,11430,11432,11434,11436,11438,11440,11442,11444,11446,11448,11450,11452,11454,11456,11458,11460,11462,11464,11466,11468,11470,11472,11474,11476,11478,11480,11482,11484,11486,11488,11490,11492,11494,11496,11498,11500,11502,11504,11506,11508,11510,11512,11514,11516,11518,11520,11522,11524,11526,11528,11530,11532,11534,11536,11538,11540,11542,11544,11546,11548,11550,11552,11554,11556,11558,11560,11562,11564,11566,11568,11570,11572,11574,11576,11578,11580,11582,11584,11586,11588,11590,11592,11594,11596,11598,11600,11602,11604,11606,11608,11610,11612,11614,11616,11618,11620,11622,11624,11626,11628,11630,11632,11634,11636,11638,11640,11642,11644,11646,11648,11650,11652,11654,11656,11658,11660,11662,11664,11666,11668,11670,11672,11674,11676,11678,11680,11682,11684,11686,11688,11690,11692,11694,11696,11698,11700,11702,11704,11706,11708,11710,11712,11714,11716,11718,11720,11722,11724,11726,11728,11730,11732,11734,11736,11738,11740,11742,11744,11746,11748,11750,11752,11754,11756,11758,11760,11762,11764,11766,11768,11770,11772,11774,11776,11778,11780,11782,11784,11786,11788,11790,11792,11794,11796,11798,11800,11802,11804,11806,11808,11810,11812,11814,11816,11818,11820,11822,11824,11826,11828,11830,11832,11834,11836,11838,11840,11842,11844,11846,11848,11850,11852,11854,11856,11858,11860,11862,11864,11866,11868,11870,11872,11874,11876,11878,11880,11882,11884,11886,11888,11890,11892,11894,11896,11898,11900,11902,11904,11906,11908,11910,11912,11914,11916,11918,11920,11922,11924,11926,11928,11930,11932,11934,11936,11938,11940,11942,11944,11946,11948,11950,11952,11954,11956,11958,11960,11962,11964,11966,11968,11970,11972,11974,11976,11978,11980,11982,11984,11986,11988,11990,11992,11994,11996,11998,12000,12002,12004,12006,12008,12010,12012,12014,12016,12018,12020,12022,12024,12026,12028,12030,12032,12034,12036,12038,12040,12042,12044,12046,12048,12050,12052,12054,12056,12058,12060,12062,12064,12066,12068,12070,12072,12074,12076,12078,12080,12082,12084,12086,12088,12090,12092,12094,12096,12098,12100,12102,12104,12106,12108,12110,12112,12114,12116,12118,12120,12122,12124,12126,12128,12130,12132,12134,12136,12138,12140,12142,12144,12146,12148,12150,12152,12154,12156,12158,12160,12162,12164,12166,12168,12170,12172,12174,12176,12178,12180,12182,12184,12186,12188,12190,12192,12194,12196,12198,12200,12202,12204,12206,12208,12210,12212,12214,12216,12218,12220,12222,12224,12226,12228,12230,12232,12234,12236,12238,12240,12242,12244,12246,12248,12250,12252,12254,12256,12258,12260,12262,12264,12266,12268,12270,12272,12274,12276,12278,12280,12282,12284,12286,12288,12290,12292,12294,12296,12298,12300,12302,12304,12306,12308,12310,12312,12314,12316,12318,12320,12322,12324,12326,12328,12330,12332,12334,12336,12338,12340,12342,12344,12346,12348,12350,12352,12354,12356,12358,12360,12362,12364,12366,12368,12370,12372,12374,12376,12378,12380,12382,12384,12386,12388,12390,12392,12394,12396,12398,12400,12402,12404,12406,12408,12410,12412,12414,12416,12418,12420,12422,12424,12426,12428,12430,12432,12434,12436,12438,12440,12442,12444,12446,12448,12450,12452,12454,12456,12458,12460,12462,12464,12466,12468,12470,12472,12474,12476,12478,12480,12482,12484,12486,12488,12490,12492,12494,12496,12498,12500,12502,12504,12506,12508,12510,12512,12514,12516,12518,12520,12522,12524,12526,12528,12530,12532,12534,12536,12538,12540,12542,12544,12546,12548,12550,12552,12554,12556,12558,12560,12562,12564,12566,12568,12570,12572,12574,12576,12578,12580,12582,12584,12586,12588,12590,12592,12594,12596,12598,12600,12602,12604,12606,12608,12610,12612,12614,12616,12618,12620,12622,12624,12626,12628,12630,12632,12634,12636,12638,12640,12642,12644,12646,12648,12650,12652,12654,12656,12658,12660,12662,12664,12666,12668,12670,12672,12674,12676,12678,12680,12682,12684,12686,12688,12690,12692,12694,12696,12698,12700,12702,12704,12706,12708,12710,12712,12714,12716,12718,12720,12722,12724,12726,12728,12730,12732,12734,12736,12738,12740,12742,12744,12746,12748,12750,12752,12754,12756,12758,12760,12762,12764,12766,12768,12770,12772,12774,12776,12778,12780,12782,12784,12786,12788,12790,12792,12794,12796,12798,12800,12802,12804,12806,12808,12810,12812,12814,12816,12818,12820,12822,12824,12826,12828,12830,12832,12834,12836,12838,12840,12842,12844,12846,12848,12850,12852,12854,12856,12858,12860,12862,12864,12866,12868,12870,12872,12874,12876,12878,12880,12882,12884,12886,12888,12890,12892,12894,12896,12898,12900,12902,12904,12906,12908,12910,12912,12914,12916,12918,12920,12922,12924,12926,12928,12930,12932,12934,12936,12938,12940,12942,12944,12946,12948,12950,12952,12954,12956,12958,12960,12962,12964,12966,12968,12970,12972,12974,12976,12978,12980,12982,12984,12986,12988,12990,12992,12994,12996,12998,13000,13002,13004,13006,13008,13010,13012,13014,13016,13018,13020,13022,13024,13026,13028,13030,13032,13034,13036,13038,13040,13042,13044,13046,13048,13050,13052,13054,13056,13058,13060,13062,13064,13066,13068,13070,13072,13074,13076,13078,13080,13082,13084,13086,13088,13090,13092,13094,13096,13098,13100,13102,13104,13106,13108,13110,13112,13114,13116,13118,13120,13122,13124,13126,13128,13130,13132,13134,13136,13138,13140,13142,13144,13146,13148,13150,13152,13154,13156,13158,13160,13162,13164,13166,13168,13170,13172,13174,13176,13178,13180,13182,13184,13186,13188,13190,13192,13194,13196,13198,13200,13202,13204,13206,13208,13210,13212,13214,13216,13218,13220,13222,13224,13226,13228,13230,13232,13234,13236,13238,13240,13242,13244,13246,13248,13250,13252,13254,13256,13258,13260,13262,13264,13266,13268,13270,13272,13274,13276,13278,13280,13282,13284,13286,13288,13290,13292,13294,13296,13298,13300,13302,13304,13306,13308,13310,13312,13314,13316,13318,13320,13322,13324,13326,13328,13330,13332,13334,13336,13338,13340,13342,13344,13346,13348,13350,13352,13354,13356,13358,13360,13362,13364,13366,13368,13370,13372,13374,13376,13378,13380,13382,13384,13386,13388,13390,13392,13394,13396,13398,13400,13402,13404,13406,13408,13410,13412,13414,13416,13418,13420,13422,13424,13426,13428,13430,13432,13434,13436,13438,13440,13442,13444,13446,13448,13450,13452,13454,13456,13458,13460,13462,13464,13466,13468,13470,13472,13474,13476,13478,13480,13482,13484,13486,13488,13490,13492,13494,13496,13498,13500,13502,13504,13506,13508,13510,13512,13514,13516,13518,13520,13522,13524,13526,13528,13530,13532,13534,13536,13538,13540,13542,13544,13546,13548,13550,13552,13554,13556,13558,13560,13562,13564,13566,13568,13570,13572,13574,13576,13578,13580,13582,13584,13586,13588,13590,13592,13594,13596,13598,13600,13602,13604,13606,13608,13610,13612,13614,13616,13618,13620,13622,13624,13626,13628,13630,13632,13634,13636,13638,13640,13642,13644,13646,13648,13650,13652,13654,13656,13658,13660,13662,13664,13666,13668,13670,13672,13674,13676,13678,13680,13682,13684,13686,13688,13690,13692,13694,13696,13698,13700,13702,13704,13706,13708,13710,13712,13714,13716,13718,13720,13722,13724,13726,13728,13730,13732,13734,13736,13738,13740,13742,13744,13746,13748,13750,13752,13754,13756,13758,13760,13762,13764,13766,13768,13770,13772,13774,13776,13778,13780,13782,13784,13786,13788,13790,13792,13794,13796,13798,13800,13802,13804,13806,13808,13810,13812,13814,13816,13818,13820,13822,13824,13826,13828,13830,13832,13834,13836,13838,13840,13842,13844,13846,13848,13850,13852,13854,13856,13858,13860,13862,13864,13866,13868,13870,13872,13874,13876,13878,13880,13882,13884,13886,13888,13890,13892,13894,13896,13898,13900,13902,13904,13906,13908,13910,13912,13914,13916,13918,13920,13922,13924,13926,13928,13930,13932,13934,13936,13938,13940,13942,13944,13946,13948,13950,13952,13954,13956,13958,13960,13962,13964,13966,13968,13970,13972,13974,13976,13978,13980,13982,13984,13986,13988,13990,13992,13994,13996,13998,14000,14002,14004,14006,14008,14010,14012,14014,14016,14018,14020,14022,14024,14026,14028,14030,14032,14034,14036,14038,14040,14042,14044,14046,14048,14050,14052,14054,14056,14058,14060,14062,14064,14066,14068,14070,14072,14074,14076,14078,14080,14082,14084,14086,14088,14090,14092,14094,14096,14098,14100,14102,14104,14106,14108,14110,14112,14114,14116,14118,14120,14122,14124,14126,14128,14130,14132,14134,14136,14138,14140,14142,14144,14146,14148,14150,14152,14154,14156,14158,14160,14162,14164,14166,14168,14170,14172,14174,14176,14178,14180,14182,14184,14186,14188,14190,14192,14194,14196,14198,14200,14202,14204,14206,14208,14210,14212,14214,14216,14218,14220,14222,14224,14226,14228,14230,14232,14234,14236,14238,14240,14242,14244,14246,14248,14250,14252,14254,14256,14258,14260,14262,14264,14266,14268,14270,14272,14274,14276,14278,14280,14282,14284,14286,14288,14290,14292,14294,14296,14298,14300,14302,14304,14306,14308,14310,14312,14314,14316,14318,14320,14322,14324,14326,14328,14330,14332,14334,14336,14338,14340,14342,14344,14346,14348,14350,14352,14354,14356,14358,14360,14362,14364,14366,14368,14370,14372,14374,14376,14378,14380,14382,14384,14386,14388,14390,14392,14394,14396,14398,14400,14402,14404,14406,14408,14410,14412,14414,14416,14418,14420,14422,14424,14426,14428,14430,14432,14434,14436,14438,14440,14442,14444,14446,14448,14450,14452,14454,14456,14458,14460,14462,14464,14466,14468,14470,14472,14474,14476,14478,14480,14482,14484,14486,14488,14490,14492,14494,14496,14498,14500,14502,14504,14506,14508,14510,14512,14514,14516,14518,14520,14522,14524,14526,14528,14530,14532,14534,14536,14538,14540,14542,14544,14546,14548,14550,14552,14554,14556,14558,14560,14562,14564,14566,14568,14570,14572,14574,14576,14578,14580,14582,14584,14586,14588,14590,14592,14594,14596,14598,14600,14602,14604,14606,14608,14610,14612,14614,14616,14618,14620,14622,14624,14626,14628,14630,14632,14634,14636,14638,14640,14642,14644,14646,14648,14650,14652,14654,14656,14658,14660,14662,14664,14666,14668,14670,14672,14674,14676,14678,14680,14682,14684,14686,14688,14690,14692,14694,14696,14698,14700,14702,14704,14706,14708,14710,14712,14714,14716,14718,14720,14722,14724,14726,14728,14730,14732,14734,14736,14738,14740,14742,14744,14746,14748,14750,14752,14754,14756,14758,14760,14762,14764,14766,14768,14770,14772,14774,14776,14778,14780,14782,14784,14786,14788,14790,14792,14794,14796,14798,14800,14802,14804,14806,14808,14810,14812,14814,14816,14818,14820,14822,14824,14826,14828,14830,14832,14834,14836,14838,14840,14842,14844,14846,14848,14850,14852,14854,14856,14858,14860,14862,14864,14866,14868,14870,14872,14874,14876,14878,14880,14882,14884,14886,14888,14890,14892,14894,14896,14898,14900,14902,14904,14906,14908,14910,14912,14914,14916,14918,14920,14922,14924,14926,14928,14930,14932,14934,14936,14938,14940,14942,14944,14946,14948,14950,14952,14954,14956,14958,14960,14962,14964,14966,14968,14970,14972,14974,14976,14978,14980,14982,14984,14986,14988,14990,14992,14994,14996,14998,15000,15002,15004,15006,15008,15010,15012,15014,15016,15018,15020,15022,15024,15026,15028,15030,15032,15034,15036,15038,15040,15042,15044,15046,15048,15050,15052,15054,15056,15058,15060,15062,15064,15066,15068,15070,15072,15074,15076,15078,15080,15082,15084,15086,15088,15090,15092,15094,15096,15098,15100,15102,15104,15106,15108,15110,15112,15114,15116,15118,15120,15122,15124,15126,15128,15130,15132,15134,15136,15138,15140,15142,15144,15146,15148,15150,15152,15154,15156,15158,15160,15162,15164,15166,15168,15170,15172,15174,15176,15178,15180,15182,15184,15186,15188,15190,15192,15194,15196,15198,15200,15202,15204,15206,15208,15210,15212,15214,15216,15218,15220,15222,15224,15226,15228,15230,15232,15234,15236,15238,15240,15242,15244,15246,15248,15250,15252,15254,15256,15258,15260,15262,15264,15266,15268,15270,15272,15274,15276,15278,15280,15282,15284,15286,15288,15290,15292,15294,15296,15298,15300,15302,15304,15306,15308,15310,15312,15314,15316,15318,15320,15322,15324,15326,15328,15330,15332,15334,15336,15338,15340,15342,15344,15346,15348,15350,15352,15354,15356,15358,15360,15362,15364,15366,15368,15370,15372,15374,15376,15378,15380,15382,15384,15386,15388,15390,15392,15394,15396,15398,15400,15402,15404,15406,15408,15410,15412,15414,15416,15418,15420,15422,15424,15426,15428,15430,15432,15434,15436,15438,15440,15442,15444,15446,15448,15450,15452,15454,15456,15458,15460,15462,15464,15466,15468,15470,15472,15474,15476,15478,15480,15482,15484,15486,15488,15490,15492,15494,15496,15498,15500,15502,15504,15506,15508,15510,15512,15514,15516,15518,15520,15522,15524,15526,15528,15530,15532,15534,15536,15538,15540,15542,15544,15546,15548,15550,15552,15554,15556,15558,15560,15562,15564,15566,15568,15570,15572,15574,15576,15578,15580,15582,15584,15586,15588,15590,15592,15594,15596,15598,15600,15602,15604,15606,15608,15610,15612,15614,15616,15618,15620,15622,15624,15626,15628,15630,15632,15634,15636,15638,15640,15642,15644,15646,15648,15650,15652,15654,15656,15658,15660,15662,15664,15666,15668,15670,15672,15674,15676,15678,15680,15682,15684,15686,15688,15690,15692,15694,15696,15698,15700,15702,15704,15706,15708,15710,15712,15714,15716,15718,15720,15722,15724,15726,15728,15730,15732,15734,15736,15738,15740,15742,15744,15746,15748,15750,15752,15754,15756,15758,15760,15762,15764,15766,15768,15770,15772,15774,15776,15778,15780,15782,15784,15786,15788,15790,15792,15794,15796,15798,15800,15802,15804,15806,15808,15810,15812,15814,15816,15818,15820,15822,15824,15826,15828,15830,15832,15834,15836,15838,15840,15842,15844,15846,15848,15850,15852,15854,15856,15858,15860,15862,15864,15866,15868,15870,15872,15874,15876,15878,15880,15882,15884,15886,15888,15890,15892,15894,15896,15898,15900,15902,15904,15906,15908,15910,15912,15914,15916,15918,15920,15922,15924,15926,15928,15930,15932,15934,15936,15938,15940,15942,15944,15946,15948,15950,15952,15954,15956,15958,15960,15962,15964,15966,15968,15970,15972,15974,15976,15978,15980,15982,15984,15986,15988,15990,15992,15994,15996,15998,16000,16002,16004,16006,16008,16010,16012,16014,16016,16018,16020,1,16024,16026,16028,16030,16032,16034,16036,16038,16040,16042,16044,16046,16048,16050,16052,16054,16056,16058,16060,16062,16064,16066,16068,16070,16072,16074,16076,16078,16080,16082,16084,16086,16088,16090,16092,16094,16096,16098,16100,16102,16104,16106,16108,16110,16112,16114,16116,16118,16120,16122,16124,16126,16128,16130,16132,16134,16136,16138,16140,16142,16144,16146,16148,16150,16152,16154,16156,16158,16160,16162,16164,16166,16168,16170,16172,16174,16176,16178,16180,16182,16184,16186,16188,16190,16192,16194,16196,16198,16200,16202,16204,16206,16208,16210,16212,16214,16216,16218,16220,16222,16224,16226,16228,16230,16232,16234,16236,16238,16240,16242,16244,16246,16248,16250,16252,16254,16256,16258,16260,16262,16264,16266,16268,16270,16272,16274,16276,16278,16280,16282,16284,16286,16288,16290,16292,16294,16296,16298,16300,16302,16304,16306,16308,16310,16312,16314,16316,16318,16320,16322,16324,16326,16328,16330,16332,16334,16336,16338,16340,16342,16344,16346,16348,16350,16352,16354,16356,16358,16360,16362,16364,16366,16368,16370,16372,16374,16376,16378,16380,16382,16384,16386,16388,16390,16392,16394,16396,16398,16400,16402,16404,16406,16408,16410,16412,16414,16416,16418,16420,16422,16424,16426,16428,16430,16432,16434,16436,16438,16440,16442,16444,16446,16448,16450,16452,16454,16456,16458,16460,16462,16464,16466,16468,16470,16472,16474,16476,16478,16480,16482,16484,16486,16488,16490,16492,16494,16496,16498,16500,16502,16504,16506,16508,16510,16512,16514,16516,16518,16520,16522,16524,16526,16528,16530,16532,16534,16536,16538,16540,16542,16544,16546,16548,16550,16552,16554,16556,16558,16560,16562,16564,16566,16568,16570,16572,16574,16576,16578,16580,16582,16584,16586,16588,16590,16592,16594,16596,16598,16600,16602,16604,16606,16608,16610,16612,16614,16616,16618,16620,16622,16624,16626,16628,16630,16632,16634,16636,16638,16640,16642,16644,16646,16648,16650,16652,16654,16656,16658,16660,16662,16664,16666,16668,16670,16672,16674,16676,16678,16680,16682,16684,16686,16688,16690,16692,16694,16696,16698,16700,16702,16704,16706,16708,16710,16712,16714,16716,16718,16720,16722,16724,16726,16728,16730,16732,16734,16736,16738,16740,16742,16744,16746,16748,16750,16752,16754,16756,16758,16760,16762,16764,16766,16768,16770,16772,16774,16776,16778,16780,16782,16784,16786,16788,16790,16792,16794,16796,16798,16800,16802,16804,16806,16808,16810,16812,16814,16816,16818,16820,16822,16824,16826,16828,16830,16832,16834,16836,16838,16840,16842,16844,16846,16848,16850,16852,16854,16856,16858,16860,16862,16864,16866,16868,16870,16872,16874,16876,16878,16880,16882,16884,16886,16888,16890,16892,16894,16896,16898,16900,16902,16904,16906,16908,16910,16912,16914,16916,16918,16920,16922,16924,16926,16928,16930,16932,16934,16936,16938,16940,16942,16944,16946,16948,16950,16952,16954,16956,16958,16960,16962,16964,16966,16968,16970,16972,16974,16976,16978,16980,16982,16984,16986,16988,16990,16992,16994,16996,16998,17000,17002,17004,17006,17008,17010,17012,17014,17016,17018,17020,17022,17024,17026,17028,17030,17032,17034,17036,17038,17040,17042,17044,17046,17048,17050,17052,17054,17056,17058,17060,17062,17064,17066,17068,17070,17072,17074,17076,17078,17080,17082,17084,17086,17088,17090,17092,17094,17096,17098,17100,17102,17104,17106,17108,17110,17112,17114,17116,17118,17120,17122,17124,17126,17128,17130,17132,17134,17136,17138,17140,17142,17144,17146,17148,17150,17152,17154,17156,17158,17160,17162,17164,17166,17168,17170,17172,17174,17176,17178,17180,17182,17184,17186,17188,17190,17192,17194,17196,17198,17200,17202,17204,17206,17208,17210,17212,17214,17216,17218,17220,17222,17224,17226,17228,17230,17232,17234,17236,17238,17240,17242,17244,17246,17248,17250,17252,17254,17256,17258,17260,17262,17264,17266,17268,17270,17272,17274,17276,17278,17280,17282,17284,17286,17288,17290,17292,17294,17296,17298,17300,17302,17304,17306,17308,17310,17312,17314,17316,17318,17320,17322,17324,17326,17328,17330,17332,17334,17336,17338,17340,17342,17344,17346,17348,17350,17352,17354,17356,17358,17360,17362,17364,17366,17368,17370,17372,17374,17376,17378,17380,17382,17384,17386,17388,17390,17392,17394,17396,17398,17400,17402,17404,17406,17408,17410,17412,17414,17416,17418,17420,17422,17424,17426,17428,17430,17432,17434,17436,17438,17440,17442,17444,17446,17448,17450,17452,17454,17456,17458,17460,17462,17464,17466,17468,17470,17472,17474,17476,17478,17480,17482,17484,17486,17488,17490,17492,17494,17496,17498,17500,17502,17504,17506,17508,17510,17512,17514,17516,17518,17520,17522,17524,17526,17528,17530,17532,17534,17536,17538,17540,17542,17544,17546,17548,17550,17552,17554,17556,17558,17560,17562,17564,17566,17568,17570,17572,17574,17576,17578,17580,17582,17584,17586,17588,17590,17592,17594,17596,17598,17600,17602,17604,17606,17608,17610,17612,17614,17616,17618,17620,17622,17624,17626,17628,17630,17632,17634,17636,17638,17640,17642,17644,17646,17648,17650,17652,17654,17656,17658,17660,17662,17664,17666,17668,17670,17672,17674,17676,17678,17680,17682,17684,17686,17688,17690,17692,17694,17696,17698,17700,17702,17704,17706,17708,17710,17712,17714,17716,17718,17720,17722,17724,17726,17728,17730,17732,17734,17736,17738,17740,17742,17744,17746,17748,17750,17752,17754,17756,17758,17760,17762,17764,17766,17768,17770,17772,17774,17776,17778,17780,17782,17784,17786,17788,17790,17792,17794,17796,17798,17800,17802,17804,17806,17808,17810,17812,17814,17816,17818,17820,17822,17824,17826,17828,17830,17832,17834,17836,17838,17840,17842,17844,17846,17848,17850,17852,17854,17856,17858,17860,17862,17864,17866,17868,17870,17872,17874,17876,17878,17880,17882,17884,17886,17888,17890,17892,17894,17896,17898,17900,17902,17904,17906,17908,17910,17912,17914,17916,17918,17920,17922,17924,17926,17928,17930,17932,17934,17936,17938,17940,17942,17944,17946,17948,17950,17952,17954,17956,17958,17960,17962,17964,17966,17968,17970,17972,17974,17976,17978,17980,17982,17984,17986,17988,17990,17992,17994,17996,17998,18000,18002,18004,18006,18008,18010,18012,18014,18016,18018,18020,18022,18024,18026,18028,18030,18032,18034,18036,18038,18040,18042,18044,18046,18048,18050,18052,18054,18056,18058,18060,18062,18064,18066,18068,18070,18072,18074,18076,18078,18080,18082,18084,18086,18088,18090,18092,18094,18096,18098,18100,18102,18104,18106,18108,18110,18112,18114,18116,18118,18120,18122,18124,18126,18128,18130,18132,18134,18136,18138,18140,18142,18144,18146,18148,18150,18152,18154,18156,18158,18160,18162,18164,18166,18168,18170,18172,18174,18176,18178,18180,18182,18184,18186,18188,18190,18192,18194,18196,18198,18200,18202,18204,18206,18208,18210,18212,18214,18216,18218,18220,18222,18224,18226,18228,18230,18232,18234,18236,18238,18240,18242,18244,18246,18248,18250,18252,18254,18256,18258,18260,18262,18264,18266,18268,18270,18272,18274,18276,18278,18280,18282,18284,18286,18288,18290,18292,18294,18296,18298,18300,18302,18304,18306,18308,18310,18312,18314,18316,18318,18320,18322,18324,18326,18328,18330,18332,18334,18336,18338,18340,18342,18344,18346,18348,18350,18352,18354,18356,18358,18360,18362,18364,18366,18368,18370,18372,18374,18376,18378,18380,18382,18384,18386,18388,18390,18392,18394,18396,18398,18400,18402,18404,18406,18408,18410,18412,18414,18416,18418,18420,18422,18424,18426,18428,18430,18432,18434,18436,18438,18440,18442,18444,18446,18448,18450,18452,18454,18456,18458,18460,18462,18464,18466,18468,18470,18472,18474,18476,18478,18480,18482,18484,18486,18488,18490,18492,18494,18496,18498,18500,18502,18504,18506,18508,18510,18512,18514,18516,18518,18520,18522,18524,18526,18528,18530,18532,18534,18536,18538,18540,18542,18544,18546,18548,18550,18552,18554,18556,18558,18560,18562,18564,18566,18568,18570,18572,18574,18576,18578,18580,18582,18584,18586,18588,18590,18592,18594,18596,18598,18600,18602,18604,18606,18608,18610,18612,18614,18616,18618,18620,18622,18624,18626,18628,18630,18632,18634,18636,18638,18640,18642,18644,18646,18648,18650,18652,18654,18656,18658,18660,18662,18664,18666,18668,18670,18672,18674,18676,18678,18680,18682,18684,18686,18688,18690,18692,18694,18696,18698,18700,18702,18704,18706,18708,18710,18712,18714,18716,18718,18720,18722,18724,18726,18728,18730,18732,18734,18736,18738,18740,18742,18744,18746,18748,18750,18752,18754,18756,18758,18760,18762,18764,18766,18768,18770,18772,18774,18776,18778,18780,18782,18784,18786,18788,18790,18792,18794,18796,18798,18800,18802,18804,18806,18808,18810,18812,18814,18816,18818,18820,18822,18824,18826,18828,18830,18832,18834,18836,18838,18840,18842,18844,18846,18848,18850,18852,18854,18856,18858,18860,18862,18864,18866,18868,18870,18872,18874,18876,18878,18880,18882,18884,18886,18888,18890,18892,18894,18896,18898,18900,18902,18904,18906,18908,18910,18912,18914,18916,18918,18920,18922,18924,18926,18928,18930,18932,18934,18936,18938,18940,18942,18944,18946,18948,18950,18952,18954,18956,18958,18960,18962,18964,18966,18968,18970,18972,18974,18976,18978,18980,18982,18984,18986,18988,18990,18992,18994,18996,18998,19000,19002,19004,19006,19008,19010,19012,19014,19016,19018,19020,19022,19024,19026,19028,19030,19032,19034,19036,19038,19040,19042,19044,19046,19048,19050,19052,19054,19056,19058,19060,19062,19064,19066,19068,19070,19072,19074,19076,19078,19080,19082,19084,19086,19088,19090,19092,19094,19096,19098,19100,19102,19104,19106,19108,19110,19112,19114,19116,19118,19120,19122,19124,19126,19128,19130,19132,19134,19136,19138,19140,19142,19144,19146,19148,19150,19152,19154,19156,19158,19160,19162,19164,19166,19168,19170,19172,19174,19176,19178,19180,19182,19184,19186,19188,19190,19192,19194,19196,19198,19200,19202,19204,19206,19208,19210,19212,19214,19216,19218,19220,19222,19224,19226,19228,19230,19232,19234,19236,19238,19240,19242,19244,19246,19248,19250,19252,19254,19256,19258,19260,19262,19264,19266,19268,19270,19272,19274,19276,19278,19280,19282,19284,19286,19288,19290,19292,19294,19296,19298,19300,19302,19304,19306,19308,19310,19312,19314,19316,19318,19320,19322,19324,19326,19328,19330,19332,19334,19336,19338,19340,19342,19344,19346,19348,19350,19352,19354,19356,19358,19360,19362,19364,19366,19368,19370,19372,19374,19376,19378,19380,19382,19384,19386,19388,19390,19392,19394,19396,19398,19400,19402,19404,19406,19408,19410,19412,19414,19416,19418,19420,19422,19424,19426,19428,19430,19432,19434,19436,19438,19440,19442,19444,19446,19448,19450,19452,19454,19456,19458,19460,19462,19464,19466,19468,19470,19472,19474,19476,19478,19480,19482,19484,19486,19488,19490,19492,19494,19496,19498,19500,19502,19504,19506,19508,19510,19512,19514,19516,19518,19520,19522,19524,19526,19528,19530,19532,19534,19536,19538,19540,19542,19544,19546,19548,19550,19552,19554,19556,19558,19560,19562,19564,19566,19568,19570,19572,19574,19576,19578,19580,19582,19584,19586,19588,19590,19592,19594,19596,19598,19600,19602,19604,19606,19608,19610,19612,19614,19616,19618,19620,19622,19624,19626,19628,19630,19632,19634,19636,19638,19640,19642,19644,19646,19648,19650,19652,19654,19656,19658,19660,19662,19664,19666,19668,19670,19672,19674,19676,19678,19680,19682,19684,19686,19688,19690,19692,19694,19696,19698,19700,19702,19704,19706,19708,19710,19712,19714,19716,19718,19720,19722,19724,19726,19728,19730,19732,19734,19736,19738,19740,19742,19744,19746,19748,19750,19752,19754,19756,19758,19760,19762,19764,19766,19768,19770,19772,19774,19776,19778,19780,19782,19784,19786,19788,19790,19792,19794,19796,19798,19800,19802,19804,19806,19808,19810,19812,19814,19816,19818,19820,19822,19824,19826,19828,19830,19832,19834,19836,19838,19840,19842,19844,19846,19848,19850,19852,19854,19856,19858,19860,19862,19864,19866,19868,19870,19872,19874,19876,19878,19880,19882,19884,19886,19888,19890,19892,19894,19896,19898,19900,19902,19904,19906,19908,19910,19912,19914,19916,19918,19920,19922,19924,19926,19928,19930,19932,19934,19936,19938,19940,19942,19944,19946,19948,19950,19952,19954,19956,19958,19960,19962,19964,19966,19968,19970,19972,19974,19976,19978,19980,19982,19984,19986,19988,19990,19992,19994,19996,19998,20000,20002,20004,20006,20008,20010,20012,20014,20016,20018,20020,20022,20024,20026,20028,20030,20032,20034,20036,20038,20040,20042,20044,20046,20048,20050,20052,20054,20056,20058,20060,20062,20064,20066,20068,20070,20072,20074,20076,20078,20080,20082,20084,20086,20088,20090,20092,20094,20096,20098,20100,20102,20104,20106,20108,20110,20112,20114,20116,20118,20120,20122,20124,20126,20128,20130,20132,20134,20136,20138,20140,20142,20144,20146,20148,20150,20152,20154,20156,20158,20160,20162,20164,20166,20168,20170,20172,20174,20176,20178,20180,20182,20184,20186,20188,20190,20192,20194,20196,20198,20200,20202,20204,20206,20208,20210,20212,20214,20216,20218,20220,20222,20224,20226,20228,20230,20232,20234,20236,20238,20240,20242,20244,20246,20248,20250,20252,20254,20256,20258,20260,20262,20264,20266,20268,20270,20272,20274,20276,20278,20280,20282,20284,20286,20288,20290,20292,20294,20296,20298,20300,20302,20304,20306,20308,20310,20312,20314,20316,20318,20320,20322,20324,20326,20328,20330,20332,20334,20336,20338,20340,20342,20344,20346,20348,20350,20352,20354,20356,20358,20360,20362,20364,20366,20368,20370,20372,20374,20376,20378,20380,20382,20384,20386,20388,20390,20392,20394,20396,20398,20400,20402,20404,20406,20408,20410,20412,20414,20416,20418,20420,20422,20424,20426,20428,20430,20432,20434,20436,20438,20440,20442,20444,20446,20448,20450,20452,20454,20456,20458,20460,20462,20464,20466,20468,20470,20472,20474,20476,20478,20480,20482,20484,20486,20488,20490,20492,20494,20496,20498,20500,20502,20504,20506,20508,20510,20512,20514,20516,20518,20520,20522,20524,20526,20528,20530,20532,20534,20536,20538,20540,20542,20544,20546,20548,20550,20552,20554,20556,20558,20560,20562,20564,20566,20568,20570,20572,20574,20576,20578,20580,20582,20584,20586,20588,20590,20592,20594,20596,20598,20600,20602,20604,20606,20608,20610,20612,20614,20616,20618,20620,20622,20624,20626,20628,20630,20632,20634,20636,20638,20640,20642,20644,20646,20648,20650,20652,20654,20656,20658,20660,20662,20664,20666,20668,20670,20672,20674,20676,20678,20680,20682,20684,20686,20688,20690,20692,20694,20696,20698,20700,20702,20704,20706,20708,20710,20712,20714,20716,20718,20720,20722,20724,20726,20728,20730,20732,20734,20736,20738,20740,20742,20744,20746,20748,20750,20752,20754,20756,20758,20760,20762,20764,20766,20768,20770,20772,20774,20776,20778,20780,20782,20784,20786,20788,20790,20792,20794,20796,20798,20800,20802,20804,20806,20808,20810,20812,20814,20816,20818,20820,20822,20824,20826,20828,20830,20832,20834,20836,20838,20840,20842,20844,20846,20848,20850,20852,20854,20856,20858,20860,20862,20864,20866,20868,20870,20872,20874,20876,20878,20880,20882,20884,20886,20888,20890,20892,20894,20896,20898,20900,20902,20904,20906,20908,20910,20912,20914,20916,20918,20920,20922,20924,20926,20928,20930,20932,20934,20936,20938,20940,20942,20944,20946,20948,20950,20952,20954,20956,20958,20960,20962,20964,20966,20968,20970,20972,20974,20976,20978,20980,20982,20984,20986,20988,20990,20992,20994,20996,20998,21000,21002,21004,21006,21008,21010,21012,21014,21016,21018,21020,21022,21024,21026,21028,21030,21032,21034,21036,21038,21040,21042,21044,21046,21048,21050,21052,21054,21056,21058,21060,21062,21064,21066,21068,21070,21072,21074,21076,21078,21080,21082,21084,21086,21088,21090,21092,21094,21096,21098,21100,21102,21104,21106,21108,21110,21112,21114,21116,21118,21120,21122,21124,21126,21128,21130,21132,21134,21136,21138,21140,21142,21144,21146,21148,21150,21152,21154,21156,21158,21160,21162,21164,21166,21168,21170,21172,21174,21176,21178,21180,21182,21184,21186,21188,21190,21192,21194,21196,21198,21200,21202,21204,21206,21208,21210,21212,21214,21216,21218,21220,21222,21224,21226,21228,21230,21232,21234,21236,21238,21240,21242,21244,21246,21248,21250,21252,21254,21256,21258,21260,21262,21264,21266,21268,21270,21272,21274,21276,21278,21280,21282,21284,21286,21288,21290,21292,21294,21296,21298,21300,21302,21304,21306,21308,21310,21312,21314,21316,21318,21320,21322,21324,21326,21328,21330,21332,21334,21336,21338,21340,21342,21344,21346,21348,21350,21352,21354,21356,21358,21360,21362,21364,21366,21368,21370,21372,21374,21376,21378,21380,21382,21384,21386,21388,21390,21392,21394,21396,21398,21400,21402,21404,21406,21408,21410,21412,21414,21416,21418,21420,21422,21424,21426,21428,21430,21432,21434,21436,21438,21440,21442,21444,21446,21448,21450,21452,21454,21456,21458,21460,21462,21464,21466,21468,21470,21472,21474,21476,21478,21480,21482,21484,21486,21488,21490,21492,21494,21496,21498,21500,21502,21504,21506,21508,21510,21512,21514,21516,21518,21520,21522,21524,21526,21528,21530,21532,21534,21536,21538,21540,21542,21544,21546,21548,21550,21552,21554,21556,21558,21560,21562,21564,21566,21568,21570,21572,21574,21576,21578,21580,21582,21584,21586,21588,21590,21592,21594,21596,21598,21600,21602,21604,21606,21608,21610,21612,21614,21616,21618,21620,21622,21624,21626,21628,21630,21632,21634,21636,21638,21640,21642,21644,21646,21648,21650,21652,21654,21656,21658,21660,21662,21664,21666,21668,21670,21672,21674,21676,21678,21680,21682,21684,21686,21688,21690,21692,21694,21696,21698,21700,21702,21704,21706,21708,21710,21712,21714,21716,21718,21720,21722,21724,21726,21728,21730,21732,21734,21736,21738,21740,21742,21744,21746,21748,21750,21752,21754,21756,21758,21760,21762,21764,21766,21768,21770,21772,21774,21776,21778,21780,21782,21784,21786,21788,21790,21792,21794,21796,21798,21800,21802,21804,21806,21808,21810,21812,21814,21816,21818,21820,21822,21824,21826,21828,21830,21832,21834,21836,21838,21840,21842,21844,21846,21848,21850,21852,21854,21856,21858,21860,21862,21864,21866,21868,21870,21872,21874,21876,21878,21880,21882,21884,21886,21888,21890,21892,21894,21896,21898,21900,21902,21904,21906,21908,21910,21912,21914,21916,21918,21920,21922,21924,21926,21928,21930,21932,21934,21936,21938,21940,21942,21944,21946,21948,21950,21952,21954,21956,21958,21960,21962,21964,21966,21968,21970,21972,21974,21976,21978,21980,21982,21984,21986,21988,21990,21992,21994,21996,21998,22000,22002,22004,22006,22008,22010,22012,22014,22016,22018,22020,22022,22024,22026,22028,22030,22032,22034,22036,22038,22040,22042,22044,22046,22048,22050,22052,22054,22056,22058,22060,22062,22064,22066,22068,22070,22072,22074,22076,22078,22080,22082,22084,22086,22088,22090,22092,22094,22096,22098,22100,22102,22104,22106,22108,22110,22112,22114,22116,22118,22120,22122,22124,22126,22128,22130,22132,22134,22136,22138,22140,22142,22144,22146,22148,22150,22152,22154,22156,22158,22160,22162,22164,22166,22168,22170,22172,22174,22176,22178,22180,22182,22184,22186,22188,22190,22192,22194,22196,22198,22200,22202,22204,22206,22208,22210,22212,22214,22216,22218,22220,22222,22224,22226,22228,22230,22232,22234,22236,22238,22240,22242,22244,22246,22248,22250,22252,22254,22256,22258,22260,22262,22264,22266,22268,22270,22272,22274,22276,22278,22280,22282,22284,22286,22288,22290,22292,22294,22296,22298,22300,22302,22304,22306,22308,22310,22312,22314,22316,22318,22320,22322,22324,22326,22328,22330,22332,22334,22336,22338,22340,22342,22344,22346,22348,22350,22352,22354,22356,22358,22360,22362,22364,22366,22368,22370,22372,22374,22376,22378,22380,22382,22384,22386,22388,22390,22392,22394,22396,22398,22400,22402,22404,22406,22408,22410,22412,22414,22416,22418,22420,22422,22424,22426,22428,22430,22432,22434,22436,22438,22440,22442,22444,22446,22448,22450,22452,22454,22456,22458,22460,22462,22464,22466,22468,22470,22472,22474,22476,22478,22480,22482,22484,22486,22488,22490,22492,22494,22496,22498,22500,22502,22504,22506,22508,22510,22512,22514,22516,22518,22520,22522,22524,22526,22528,22530,22532,22534,22536,22538,22540,22542,22544,22546,22548,22550,22552,22554,22556,22558,22560,22562,22564,22566,22568,22570,22572,22574,22576,22578,22580,22582,22584,22586,22588,22590,22592,22594,22596,22598,22600,22602,22604,22606,22608,22610,22612,22614,22616,22618,22620,22622,22624,22626,22628,22630,22632,22634,22636,22638,22640,22642,22644,22646,22648,22650,22652,22654,22656,22658,22660,22662,22664,22666,22668,22670,22672,22674,22676,22678,22680,22682,22684,22686,22688,22690,22692,22694,22696,22698,22700,22702,22704,22706,22708,22710,22712,22714,22716,22718,22720,22722,22724,22726,22728,22730,22732,22734,22736,22738,22740,22742,22744,22746,22748,22750,22752,22754,22756,22758,22760,22762,22764,22766,22768,22770,22772,22774,22776,22778,22780,22782,22784,22786,22788,22790,22792,22794,22796,22798,22800,22802,22804,22806,22808,22810,22812,22814,22816,22818,22820,22822,22824,22826,22828,22830,22832,22834,22836,22838,22840,22842,22844,22846,22848,22850,22852,22854,22856,22858,22860,22862,22864,22866,22868,22870,22872,22874,22876,22878,22880,22882,22884,22886,22888,22890,22892,22894,22896,22898,22900,22902,22904,22906,22908,22910,22912,22914,22916,22918,22920,22922,22924,22926,22928,22930,22932,22934,22936,22938,22940,22942,22944,22946,22948,22950,22952,22954,22956,22958,22960,22962,22964,22966,22968,22970,22972,22974,22976,22978,22980,22982,22984,22986,22988,22990,22992,22994,22996,22998,23000,23002,23004,23006,23008,23010,23012,23014,23016,23018,23020,23022,23024,23026,23028,23030,23032,23034,23036,23038,23040,23042,23044,23046,23048,23050,23052,23054,23056,23058,23060,23062,23064,23066,23068,23070,23072,23074,23076,23078,23080,23082,23084,23086,23088,23090,23092,23094,23096,23098,23100,23102,23104,23106,23108,23110,23112,23114,23116,23118,23120,23122,23124,23126,23128,23130,23132,23134,23136,23138,23140,23142,23144,23146,23148,23150,23152,23154,23156,23158,23160,23162,23164,23166,23168,23170,23172,23174,23176,23178,23180,23182,23184,23186,23188,23190,23192,23194,23196,23198,23200,23202,23204,23206,23208,23210,23212,23214,23216,23218,23220,23222,23224,23226,23228,23230,23232,23234,23236,23238,23240,23242,23244,23246,23248,23250,23252,23254,23256,23258,23260,23262,23264,23266,23268,23270,23272,23274,23276,23278,23280,23282,23284,23286,23288,23290,23292,23294,23296,23298,23300,23302,23304,23306,23308,23310,23312,23314,23316,23318,23320,23322,23324,23326,23328,23330,23332,23334,23336,23338,23340,23342,23344,23346,23348,23350,23352,23354,23356,23358,23360,23362,23364,23366,23368,23370,23372,23374,23376,23378,23380,23382,23384,23386,23388,23390,23392,23394,23396,23398,23400,23402,23404,23406,23408,23410,23412,23414,23416,23418,23420,23422,23424,23426,23428,23430,23432,23434,23436,23438,23440,23442,23444,23446,23448,23450,23452,23454,23456,23458,23460,23462,23464,23466,23468,23470,23472,23474,23476,23478,23480,23482,23484,23486,23488,23490,23492,23494,23496,23498,23500,23502,23504,23506,23508,23510,23512,23514,23516,23518,23520,23522,23524,23526,23528,23530,23532,23534,23536,23538,23540,23542,23544,23546,23548,23550,23552,23554,23556,23558,23560,23562,23564,23566,23568,23570,23572,23574,23576,23578,23580,23582,23584,23586,23588,23590,23592,23594,23596,23598,23600,23602,23604,23606,23608,23610,23612,23614,23616,23618,23620,23622,23624,23626,23628,23630,23632,23634,23636,23638,23640,23642,23644,23646,23648,23650,23652,23654,23656,23658,23660,23662,23664,23666,23668,23670,23672,23674,23676,23678,23680,23682,23684,23686,23688,23690,23692,23694,23696,23698,23700,23702,23704,23706,23708,23710,23712,23714,23716,23718,23720,23722,23724,23726,23728,23730,23732,23734,23736,23738,23740,23742,23744,23746,23748,23750,23752,23754,23756,23758,23760,23762,23764,23766,23768,23770,23772,23774,23776,23778,23780,23782,23784,23786,23788,23790,23792,23794,23796,23798,23800,23802,23804,23806,23808,23810,23812,23814,23816,23818,23820,23822,23824,23826,23828,23830,23832,23834,23836,23838,23840,23842,23844,23846,23848,23850,23852,23854,23856,23858,23860,23862,23864,23866,23868,23870,23872,23874,23876,23878,23880,23882,23884,23886,23888,23890,23892,23894,23896,23898,23900,23902,23904,23906,23908,23910,23912,23914,23916,23918,23920,23922,23924,23926,23928,23930,23932,23934,23936,23938,23940,23942,23944,23946,23948,23950,23952,23954,23956,23958,23960,23962,23964,23966,23968,23970,23972,23974,23976,23978,23980,23982,23984,23986,23988,23990,23992,23994,23996,23998,24000,24002,24004,24006,24008,24010,24012,24014,24016,24018,24020,24022,24024,24026,24028,24030,24032,24034,24036,24038,24040,24042,24044,24046,24048,24050,24052,24054,24056,24058,24060,24062,24064,24066,24068,24070,24072,24074,24076,24078,24080,24082,24084,24086,24088,24090,24092,24094,24096,24098,24100,24102,24104,24106,24108,24110,24112,24114,24116,24118,24120,24122,24124,24126,24128,24130,24132,24134,24136,24138,24140,24142,24144,24146,24148,24150,24152,24154,24156,24158,24160,24162,24164,24166,24168,24170,24172,24174,24176,24178,24180,24182,24184,24186,24188,24190,24192,24194,24196,24198,24200,24202,24204,24206,24208,24210,24212,24214,24216,24218,24220,24222,24224,24226,24228,24230,24232,24234,24236,24238,24240,24242,24244,24246,24248,24250,24252,24254,24256,24258,24260,24262,24264,24266,24268,24270,24272,24274,24276,24278,24280,24282,24284,24286,24288,24290,24292,24294,24296,24298,24300,24302,24304,24306,24308,24310,24312,24314,24316,24318,24320,24322,24324,24326,24328,24330,24332,24334,24336,24338,24340,24342,24344,24346,24348,24350,24352,24354,24356,24358,24360,24362,24364,24366,24368,24370,24372,24374,24376,24378,24380,24382,24384,24386,24388,24390,24392,24394,24396,24398,24400,24402,24404,24406,24408,24410,24412,24414,24416,24418,24420,24422,24424,24426,24428,24430,24432,24434,24436,24438,24440,24442,24444,24446,24448,24450,24452,24454,24456,24458,24460,24462,24464,24466,24468,24470,24472,24474,24476,24478,24480,24482,24484,24486,24488,24490,24492,24494,24496,24498,24500,24502,24504,24506,24508,24510,24512,24514,24516,24518,24520,24522,24524,24526,24528,24530,24532,24534,24536,24538,24540,24542,24544,24546,24548,24550,24552,24554,24556,24558,24560,24562,24564,24566,24568,24570,24572,24574,24576,24578,24580,24582,24584,24586,24588,24590,24592,24594,24596,24598,24600,24602,24604,24606,24608,24610,24612,24614,24616,24618,24620,24622,24624,24626,24628,24630,24632,24634,24636,24638,24640,24642,24644,24646,24648,24650,24652,24654,24656,24658,24660,24662,24664,24666,24668,24670,24672,24674,24676,24678,24680,24682,24684,24686,24688,24690,24692,24694,24696,24698,24700,24702,24704,24706,24708,24710,24712,24714,24716,24718,24720,24722,24724,24726,24728,24730,24732,24734,24736,24738,24740,24742,24744,24746,24748,24750,24752,24754,24756,24758,24760,24762,24764,24766,24768,24770,24772,24774,24776,24778,24780,24782,24784,24786,24788,24790,24792,24794,24796,24798,24800,24802,24804,24806,24808,24810,24812,24814,24816,24818,24820,24822,24824,24826,24828,24830,24832,24834,24836,24838,24840,24842,24844,24846,24848,24850,24852,24854,24856,24858,24860,24862,24864,24866,24868,24870,24872,24874,24876,24878,24880,24882,24884,24886,24888,24890,24892,24894,24896,24898,24900,24902,24904,24906,24908,24910,24912,24914,24916,24918,24920,24922,24924,24926,24928,24930,24932,24934,24936,24938,24940,24942,24944,24946,24948,24950,24952,24954,24956,24958,24960,24962,24964,24966,24968,24970,24972,24974,24976,24978,24980,24982,24984,24986,24988,24990,24992,24994,24996,24998,25000,25002,25004,25006,25008,25010,25012,25014,25016,25018,25020,25022,25024,25026,25028,25030,25032,25034,25036,25038,25040,25042,25044,25046,25048,25050,25052,25054,25056,25058,25060,25062,25064,25066,25068,25070,25072,25074,25076,25078,25080,25082,25084,25086,25088,25090,25092,25094,25096,25098,25100,25102,25104,25106,25108,25110,25112,25114,25116,25118,25120,25122,25124,25126,25128,25130,25132,25134,25136,25138,25140,25142,25144,25146,25148,25150,25152,25154,25156,25158,25160,25162,25164,25166,25168,25170,25172,25174,25176,25178,25180,25182,25184,25186,25188,25190,25192,25194,25196]
,16021
)
assert result3 == [8010, 8011]
print("OK")
| 2,822.6 | 70,040 | 0.817941 | [
"MIT"
] | leocody/Leet-code | twosum.py | 70,565 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from flash.core.data.data_source import DefaultDataKeys
def vissl_collate_helper(samples):
result = []
for batch_ele in samples:
_batch_ele_dict = {}
_batch_ele_dict.update(batch_ele)
_batch_ele_dict[DefaultDataKeys.INPUT] = -1
result.append(_batch_ele_dict)
return torch.utils.data._utils.collate.default_collate(result)
def multicrop_collate_fn(samples):
"""Multi-crop collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
"""
result = vissl_collate_helper(samples)
inputs = [[] for _ in range(len(samples[0][DefaultDataKeys.INPUT]))]
for batch_ele in samples:
multi_crop_imgs = batch_ele[DefaultDataKeys.INPUT]
for idx, crop in enumerate(multi_crop_imgs):
inputs[idx].append(crop)
for idx, ele in enumerate(inputs):
inputs[idx] = torch.stack(ele)
result[DefaultDataKeys.INPUT] = inputs
return result
def simclr_collate_fn(samples):
"""Multi-crop collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
"""
result = vissl_collate_helper(samples)
inputs = []
num_views = len(samples[0][DefaultDataKeys.INPUT])
view_idx = 0
while view_idx < num_views:
for batch_ele in samples:
imgs = batch_ele[DefaultDataKeys.INPUT]
inputs.append(imgs[view_idx])
view_idx += 1
result[DefaultDataKeys.INPUT] = torch.stack(inputs)
return result
def moco_collate_fn(samples):
"""MOCO collate function for VISSL integration.
Run custom collate on a single key since VISSL transforms affect only DefaultDataKeys.INPUT
"""
result = vissl_collate_helper(samples)
inputs = []
for batch_ele in samples:
inputs.append(torch.stack(batch_ele[DefaultDataKeys.INPUT]))
result[DefaultDataKeys.INPUT] = torch.stack(inputs).squeeze()[:, 0, :, :, :].squeeze()
result["data_momentum"] = torch.stack(inputs).squeeze()[:, 1, :, :, :].squeeze()
return result
| 30.131868 | 95 | 0.707877 | [
"Apache-2.0"
] | Darktex/lightning-flash | flash/image/embedding/vissl/transforms/utilities.py | 2,742 | Python |
"""Abstract Base Class for posteriors over states after applying filtering/smoothing"""
from abc import ABC, abstractmethod
class FiltSmoothPosterior(ABC):
"""Posterior Distribution over States after Filtering/Smoothing"""
@abstractmethod
def __call__(self, location):
"""Evaluate the time-continuous posterior for a given location
Parameters
----------
location : float
Location, or time, at which to evaluate the posterior.
Returns
-------
rv : `RandomVariable`
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Length of the discrete-time solution
Corresponds to the number of filtering/smoothing steps
"""
raise NotImplementedError
@abstractmethod
def __getitem__(self, idx):
"""Return the corresponding index/slice of the discrete-time solution"""
raise NotImplementedError
def sample(self, locations=None, size=()):
"""
Draw samples from the filtering/smoothing posterior.
If nothing is specified, a single sample is drawn (supported on self.locations).
If locations are specified, the samples are drawn on those locations.
If size is specified, more than a single sample is drawn.
Parameters
----------
locations : array_like, optional
Locations on which the samples are wanted. Default is none, which implies that
self.location is used.
size : int or tuple of ints, optional
Indicates how many samples are drawn. Default is an empty tuple, in which case
a single sample is returned.
Returns
-------
numpy.ndarray
Drawn samples. If size has shape (A1, ..., Z1), locations have shape (L,),
and the state space model has shape (A2, ..., Z2), the output has
shape (A1, ..., Z1, L, A2, ..., Z2).
For example: size=4, len(locations)=4, dim=3 gives shape (4, 4, 3).
"""
raise NotImplementedError("Sampling not implemented.")
| 33.793651 | 90 | 0.620479 | [
"MIT"
] | admdev8/probnum | src/probnum/filtsmooth/filtsmoothposterior.py | 2,129 | Python |
# -*- coding: utf-8 -*-
import time, threading, uuid, sys
import tushare as ts
from PyQt4 import QtCore, QtGui
import utils
class ProfitStrategy(QtCore.QObject):
def init(self, b):
pass
def update_target(self, dp, p, t1, t2):
pass
def reset_target(self, b, p, t1, t2):
pass
class ProfitWideStrategy(QtCore.QObject):
def init(self, b):
dp = b
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def update_target(self, dp, p, t1, t2):
dp = t1
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def reset_target(self, dp, p, t1, t2):
t1 = dp
dp = t1 / 1.08
p = dp * 1.06
t2 = dp * 1.12
return (dp, p, t1, t2)
class ProfitThinStrategy(QtCore.QObject):
def init(self, b):
dp = b
t1 = dp * 1.08
t2 = dp * 1.12
p = dp * 1.06
return (dp, p, t1, t2)
def update_target(self, dp, p, t1, t2):
t1 = t2
dp = t1 / 1.08
p = dp * 1.06
t2 = p * 1.12
return (dp, p, t1, t2)
def reset_target(self, dp, p, t1, t2):
t2 = t1
dp = t2 / 1.08
p = dp * 1.06
t1 = dp * 1.12
return (dp, p, t1, t2)
class SaveProfit(QtCore.QObject):
_saveProfitSignal = QtCore.pyqtSignal(int)
_resetSignal = QtCore.pyqtSignal(int)
_targetSignal = QtCore.pyqtSignal(int, int)
def __init__(self, id, base_cost, strategy=ProfitWideStrategy()):
super(SaveProfit, self).__init__()
self._strategy = strategy
self._id = id
self._trigger_count = 0
self._trigge_target = False
self._base_cost = base_cost
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.init(self._base_cost)
def run(self, price):
self._temp_price = price
if self._trigge_target:
if price >= self._target2:
self._trigge_target = False
self._trigger_count += 1
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.update_target(self._dynamic_cost, self._profit, self._target1, self._target2)
self._targetSignal.emit(self._id, self._trigger_count)
elif price < self._profit:
#warning
print self.info()
self._saveProfitSignal.emit(self._id)
return False
elif price >= self._profit:
if self._base_cost > self._profit and price >= self._base_cost:
self._resetSignal.emit(self._id)
self._trigge_target = False
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.update_target(self._dynamic_cost, self._profit, self._target1, self._target2)
else:
last_profit = self._dynamic_cost / 1.08 * 1.06
if price >= self._target1:
self._trigge_target = True
elif price <= self._dynamic_cost:
self._trigge_target = True
self._trigger_count -= 1
self._dynamic_cost, self._profit, self._target1, self._target2 = \
self._strategy.reset_target(self._dynamic_cost, self._profit, self._target1, self._target2)
return True
def info(self):
return {
"dyprice" : self._dynamic_cost,
"target1" : self._target1,
"target2" : self._target2,
"profit" : self._profit,
"base" : self._base_cost,
"cur" : self._temp_price,
"trigged" : self._trigge_target,
"trigger_count" : self._trigger_count
}
class StcokWatcher(QtCore.QObject):
def __init__(self, stock_infos):
super(StcokWatcher, self).__init__()
self._stock_infos = stock_infos #code,price,name, triggered
self._on_watch = False
self._t = threading.Thread(target=self.on_watch)
self._t.setDaemon(True)
def init(self):
self._profiters = []
self._stocks = []
for i in range(len(self._stock_infos)):
stock_info = self._stock_infos[i]
self._stocks.append(stock_info['code'])
base_price = stock_info['base']
if (stock_info.has_key('stragegy') and stock_info['stragegy'] == 1):
profiter = SaveProfit(i, base_price, ProfitThinStrategy())
else:
profiter = SaveProfit(i, base_price)
self._profiters.append(profiter)
self._profiters[i]._saveProfitSignal.connect(self.on_warn)
self._profiters[i]._resetSignal.connect(self.on_reset)
df = ts.get_realtime_quotes(self._stocks)
for i in df.index:
quote = df.loc[i]
self._stock_infos[i]['name'] = (quote['name'])
def on_watch(self):
while self._on_watch:
df = ts.get_realtime_quotes(self._stocks)
print '-' * 30
print "股票名 触发 当前价格 成本价格 收益点 收益率 触发次数"
for i in df.index:
quote = df.loc[i]
self._profiters[i].run(float(quote['price']))
#print self._profiters[i].info()
info = self._profiters[i].info()
prate = (info["cur"] - info["base"]) * 100 / info["cur"]
prate = int(prate)]
triggerstr = '是' if info['trigged'] else '否'
print "%s %s %8.3f %8.3f %8.3f %8d%% %8d" % \
(self._stock_infos[i]['name'], triggerstr, info['cur'], info['base'], info['profit'], prate, info['trigger_count'])
#print info
time.sleep(3)
def on_warn(self, id):
#return
__business_id = uuid.uuid1()
profiter = self._profiters[id].info()
stock_info = self._stock_infos[id]
prate = (profiter["cur"] - profiter["base"]) * 100 / profiter["cur"]
prate = int(prate)
params = "{\"nm\":\"%s\",\"number\":\"%s\",\"in\":\"%.3f\",\"cur\":\"%.3f\",\"prate\":\"%d%%\"}" \
% (stock_info['name'], stock_info['code'], profiter["base"], profiter["cur"], prate)
if not stock_info.has_key('msg') or not stock_info['msg']:
print '+' * 40
print utils.send_sms(__business_id, "13564511106", "XK咨询", "SMS_94650115", params)
print '+' * 40
stock_info['msg'] = True
def on_reset(self, id):
self._stock_infos[id]['msg'] = False
def start(self):
self._on_watch = True
self._t.start()
if __name__ == "__main__":
stocks = [
{'code':'600516', 'base':34.313,'stragegy':1}, # 方大碳素
{'code':'002145', 'base':6.682}, # 中核钛白
{'code':'603079', 'base':69.819}, # 盛大科技
{'code':'002888', 'base':35.119}, # 惠威科技
{'code':'603826', 'base':20.609} # 坤彩科技
]
qApp = QtGui.QApplication(sys.argv)
watchers = StcokWatcher(stocks)
watchers.init()
watchers.start()
qApp.exec_() | 36.927835 | 137 | 0.543691 | [
"MIT"
] | dayuanyuan1989/SaveProfits | src/profit.py | 7,258 | Python |
import logging
from typing import (
Iterable,
List,
)
from lxml import etree
from sciencebeam_parser.document.semantic_document import (
SemanticContentWrapper,
SemanticFigure,
SemanticHeading,
SemanticLabel,
SemanticParagraph,
SemanticRawEquation,
SemanticSection,
SemanticSectionTypes,
SemanticTable
)
from sciencebeam_parser.document.tei.common import (
TEI_E,
TeiElementBuilder
)
from sciencebeam_parser.document.tei.factory import (
SingleElementTeiElementFactory,
T_ElementChildrenList,
TeiElementFactory,
TeiElementFactoryContext
)
LOGGER = logging.getLogger(__name__)
class HeadingTeiElementFactory(SingleElementTeiElementFactory):
def get_tei_element_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> etree.ElementBase:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticHeading)
semantic_heading = semantic_content
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(semantic_heading)
]
pending_whitespace = ''
for child_semantic_content in semantic_heading:
if isinstance(child_semantic_content, SemanticLabel):
children.append({'n': child_semantic_content.get_text()})
continue
layout_block = child_semantic_content.merged_block
if pending_whitespace:
children.append(pending_whitespace)
children.extend(context.iter_layout_block_tei_children(
layout_block=layout_block,
enable_coordinates=False
))
pending_whitespace = layout_block.whitespace
return TEI_E('head', *children)
def iter_flat_paragraph_formula(
semantic_paragraph: SemanticParagraph
) -> Iterable[SemanticContentWrapper]:
pending_semantic_content_list: List[SemanticContentWrapper] = []
for semantic_content in semantic_paragraph:
if isinstance(semantic_content, SemanticRawEquation):
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
pending_semantic_content_list = []
yield semantic_content
continue
pending_semantic_content_list.append(semantic_content)
if pending_semantic_content_list:
yield SemanticParagraph(pending_semantic_content_list)
class ParagraphTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticParagraph)
semantic_paragraph = semantic_content
result: List[etree.ElementBase] = []
for flat_parent_semantic_content in iter_flat_paragraph_formula(semantic_paragraph):
if not isinstance(flat_parent_semantic_content, SemanticParagraph):
result.extend(context.get_tei_child_elements_for_semantic_content(
flat_parent_semantic_content
))
continue
children: T_ElementChildrenList = [
context.get_default_attributes_for_semantic_content(flat_parent_semantic_content)
]
pending_whitespace = ''
for child_semantic_content in flat_parent_semantic_content:
pending_whitespace = context.append_tei_children_list_and_get_whitespace(
children,
child_semantic_content,
pending_whitespace=pending_whitespace
)
result.append(TEI_E('p', *children))
return result
class SectionTeiElementFactory(TeiElementFactory):
def get_tei_children_for_semantic_content(
self,
semantic_content: SemanticContentWrapper,
context: TeiElementFactoryContext
) -> List[etree.ElementBase]:
LOGGER.debug('semantic_content: %s', semantic_content)
assert isinstance(semantic_content, SemanticSection)
semantic_section = semantic_content
tei_section = TeiElementBuilder(TEI_E('div'))
for child_semantic_content in semantic_section:
if isinstance(child_semantic_content, (SemanticFigure, SemanticTable,)):
# rendered at parent level
continue
tei_section.extend(context.get_tei_child_elements_for_semantic_content(
child_semantic_content
))
if semantic_content.section_type == SemanticSectionTypes.ACKNOWLEDGEMENT:
tei_section.element.attrib['type'] = 'acknowledgement'
if not list(tei_section.element):
return []
return [tei_section.element]
| 38.122137 | 97 | 0.696235 | [
"MIT"
] | elifesciences/sciencebeam-parser | sciencebeam_parser/document/tei/section.py | 4,994 | Python |
import logging
import sys
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask
from code_runner.extensions import db, limiter
from . import code
def create_app(config_object='code_runner.settings'):
"""Creates and returns flask app instance as well as register all the extensions and blueprints"""
app = Flask(__name__)
register_environment()
app.config.from_object(config_object)
register_blueprints(app=app)
register_views(app=app)
register_extensions(app=app)
configure_logger(app=app)
return app
def register_blueprints(app):
"""Registers the blueprints"""
app.register_blueprint(code.views.blueprint)
def register_views(app):
"""Registers the pluggable views"""
run_view = code.views.RunCode.as_view('run')
run_async_view = code.views.RunCodeAsync.as_view('run-async')
app.add_url_rule('/run', view_func=run_view, methods=['POST'])
app.add_url_rule('/run-async', view_func=run_async_view, methods=['POST'])
app.add_url_rule('/get-result/<string:task_id>', view_func=run_async_view, methods=['GET'])
def register_extensions(app):
"""Register Flask extensions"""
with app.app_context():
db.init_app(app=app)
db.create_all()
limiter.init_app(app=app)
def register_environment():
"""Register environment"""
dotenv_path = Path('./') / '.env.development.local'
load_dotenv(dotenv_path=dotenv_path)
def configure_logger(app):
"""Configure loggers."""
handler = logging.StreamHandler(sys.stdout)
if not app.logger.handlers:
app.logger.addHandler(handler)
| 28.508772 | 102 | 0.721231 | [
"MIT"
] | thephilomaths/code-runner-as-a-service | code_runner/app.py | 1,625 | Python |
#!/usr/bin/env python3
print( 'hello world' )
| 11.75 | 22 | 0.659574 | [
"MIT"
] | jzfarmer/learning_python | hello_world.py | 47 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 20:32:12 2018
Functions to correctly fold and bin a light curve.
Calculate the lpp metric: transform to lower dimensions, knn
Depends on class from reading in a previously created LPP metric Map
Depends on reading in the light curve to data structure.
input is a class called data
data contains
data.time (days)
data.tzero (day)
data.dur (hours)
data.period (days)
data.flux (normalized to 0)
After foldBinLightCurve it contains
data.binned
After transform it contains
data.lpp_transform
@author: smullally
"""
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
from lpproj import LocalityPreservingProjection
import copy
def computeLPPTransitMetric(data,mapInfo):
"""
This function takes a data class with light curve info
and the mapInfo with information about the mapping to use.
It then returns a lpp metric value.
"""
binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)
#plt.figure()
#plt.plot(binPhase,binFlux,'.--')
#Dimensionality Reduction and knn parts
rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)
#Normalize by Period Dependence
normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)
return normTLpp,rawTLpp,transformedTransit
def runningMedian(t,y,dt,runt):
"""
Take a running median of size dt
Return values at times given in runt
"""
newy=np.zeros(len(y))
newt=np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy=[]
for i in range(len(runt)):
tmp=[]
for j in range(len(newt)):
if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))) :
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return(list(runt),runy)
def foldBinLightCurve (data, ntrfr, npts):
"""
Fold and bin light curve for input to LPP metric calculation
data contains time, tzero, dur, priod,mes and flux (centered around zero)
ntrfr -- number of transit fraction for binning around transit ~1.5
npts -- number of points in the final binning.
"""
#Create phase light curve
phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)
flux=data.flux
mes=data.mes
#Determine the fraction of the time the planet transits the star.
#Insist that ntrfr * transit fraction
if ~np.isnan(data.dur) & (data.dur >0):
transit_dur = data.dur
else:
transit_dur = 0.2 * data.period/24.
transit_fr=transit_dur/24./data.period
if (transit_fr * ntrfr) > 0.5 :
transit_fr = 0.5/ntrfr
#Specify the out of transit (a) and the in transit regions
binover=1.3
if mes <= 20:
binover=-(1/8.0)*mes + 3.8
endfr = .03
midfr= .11
a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \
np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)
ovsamp=4.0
#bstep=(ovsamp*ntrfr*transit_fr)/npts
b_num=41
b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)
#print "length a: %u " % len(a)
#print "length b: %u" % len(b)
[runta,runya] = runningMedian(phaselc,flux,binover/npts,a)
[runtb,runyb] = runningMedian(phaselc,flux,\
(binover*ovsamp*ntrfr*transit_fr)/npts,b)
#Combine the two sets of bins
runymess=np.array(runya + runyb)
runtmess = np.array(runta + runtb)
srt=np.argsort(runtmess)
runy=runymess[srt]
runt=runtmess[srt]
#Scale the flux by the depth so everything has the same depth.
#Catch or dividing by zero is to not scale.
scale = -1*np.min(runyb)
if scale != 0:
scaledFlux=runy/scale
else:
scaledFlux=runy
binnedFlux=scaledFlux
phasebins=runt
return binnedFlux,phasebins
def computeRawLPPTransitMetric(binFlux,mapInfo):
"""
Perform the matrix transformation with LPP
Do the knn test to get a raw LPP transit metric number.
"""
Yorig=mapInfo.YmapMapped
lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_=mapInfo.YmapM
#To equate to Matlab LPP methods, we need to remove mean of transform.
normBinFlux=binFlux-mapInfo.YmapMean
inputY=lpp.transform(normBinFlux.reshape(1,-1))
knownTransitsY=Yorig[mapInfo.knnGood,:]
dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)
rawLppTrMetric=np.mean(dist)
return rawLppTrMetric,inputY
def knnDistance_fromKnown(knownTransits,new,knn):
"""
For a group of known transits and a new one.
Use knn to determine how close the new one is to the known transits
using knn minkowski p = 3 ()
Using scipy signal to do this.
"""
#p=3 sets a minkowski distance of 3. #Check that you really used 3 for matlab.
nbrs=NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
distances,indices = nbrs.kneighbors(new)
return distances, indices
def periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):
"""
Normalize the rawTransitMetric value by those with the closest period.
This part removes the period dependence of the metric at short periods.
Plus it makes a value near one be the threshold between good and bad.
newPerMes is the np.array([period, mes]) of the new sample
"""
knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]
nPercentil=mapInfo.nPercentil
nPsample=mapInfo.nPsample
#Find the those with the nearest periods Npsample-nneighbors
logPeriods=np.log10(knownTrPeriods)
logMes=np.log10(knownTrMes)
knownPerMes=np.stack((logPeriods, logMes), axis=-1)
np.shape(knownPerMes)
logNew=np.log10(newPerMes).reshape(1,-1)
#logNew=np.array([np.log10(newPeriod)]).reshape(1,1)
dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)
#Find the nthPercentile of the rawLpp of these indicies
nearPeriodLpp=knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)
NormLppTransitMetric=rawTLpp/LppNPercentile
return NormLppTransitMetric
def lpp_onetransit(tcedata,mapInfo,ntransit):
"""
Chop down the full time series to one orbital period.
Then gather the lpp value for that one transit.
"""
startTime=tcedata.time[0]+ntransit*tcedata.period
endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0 #A few cadences of overlap
want=(tcedata.time>=startTime) & (tcedata.time<=endTime)
newtime=tcedata.time[want]
newflux=tcedata.flux[want]
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
if len(newtime>nExpCad*0.75):
onetransit=copy.deepcopy(tcedata)
onetransit.time=newtime
onetransit.flux=newflux
normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)
else:
normTLpp=np.nan
rawTLpp=np.nan
return normTLpp,rawTLpp
def lpp_averageIndivTransit(tcedata,mapInfo):
"""
Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals.
"""
length=tcedata.time[-1]-tcedata.time[0]
ntransits=int(np.floor(length/tcedata.period))
lppNorms=np.ones(ntransits)
lppRaws=np.ones(ntransits)
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
for i in range(ntransits):
lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)
lppMed=np.nanmedian(lppNorms)
lppStd=np.nanstd(lppNorms)
return lppNorms,lppMed, lppStd, ntransits
| 28.627986 | 92 | 0.66762 | [
"MIT"
] | barentsen/dave | lpp/newlpp/lppTransform.py | 8,388 | Python |
from auth import get_token
"""
This function will take a lot of the tedious work out of generating alert messages!
Simply follow the onscreen input prompts, at the end a string with everything you need
will be output, allowing you to copy and paste into tradingview!
"""
def generate_alert_message():
print('Enter type: (limit, market, etc.)')
type = input()
print('Enter Side (buy or sell):')
side = input()
print('Enter Amount:')
amount = input()
print('Enter Symbol:')
symbol = input()
if type == 'limit':
print('Enter limit price:')
price = input()
else:
price = 'None'
key = get_token()
print("Copy:\n")
output = {"type": type, "side": side, "amount": amount, "symbol": symbol, "price": price, "key": key}
print(str(output).replace('\'', '\"'))
generate_alert_message()
| 26.90625 | 105 | 0.630662 | [
"MIT"
] | ramakrishnamekala129/tradingviewwebhooksbotByRK | examples/generate_alert_message.py | 861 | Python |
import pytest
from localstack.utils.objects import SubtypesInstanceManager
def test_subtypes_instance_manager():
class BaseClass(SubtypesInstanceManager):
def foo(self):
pass
class C1(BaseClass):
@staticmethod
def impl_name() -> str:
return "c1"
def foo(self):
return "bar"
instance1 = BaseClass.get("c1")
assert instance1
assert BaseClass.get("c1") == instance1
assert instance1.foo() == "bar"
with pytest.raises(Exception):
assert BaseClass.get("c2")
class C2(BaseClass):
@staticmethod
def impl_name() -> str:
return "c2"
def foo(self):
return "baz"
instance2 = BaseClass.get("c2")
assert BaseClass.get("c2") == instance2
assert instance2.foo() == "baz"
| 22.513514 | 60 | 0.596639 | [
"Apache-2.0"
] | GroovySmoothie/localstack | tests/unit/utils/test_objects.py | 833 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-07-17 06:14
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('iiits', '0059_auto_20160717_0609'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='valid_until',
field=models.DateTimeField(default=datetime.datetime(2016, 7, 24, 6, 14, 48, 161315, tzinfo=utc)),
),
migrations.AlterField(
model_name='topstory',
name='title',
field=models.CharField(max_length=255),
),
]
| 25.714286 | 110 | 0.620833 | [
"MIT"
] | IIITS/iiits.ac.in | iiits/migrations/0060_auto_20160717_0614.py | 720 | Python |
import requests
url = "https://apidojo-yahoo-finance-v1.p.rapidapi.com/market/get-trending-tickers"
querystring = {"region":"US"}
headers = {
'x-rapidapi-host': "apidojo-yahoo-finance-v1.p.rapidapi.com",
'x-rapidapi-key': "86bb0847c2msh62ec4f10fcc7ed9p17aea2jsn6b82733f81a1"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text) | 28.857143 | 84 | 0.717822 | [
"MIT"
] | pikamegan/GMS-hackathon | yfinance_trending.py | 404 | Python |
""""
defines a class that maps to the JSON input format and can be used with pydantic.
"""
import json
import os
import pickle
from hashlib import md5
from typing import List, Optional
from pydantic import BaseModel
from mldc.util import NLGEvalOutput
class MetaDlgDataDialog(BaseModel):
id: Optional[str]
domain: str = ""
task_id: str = ""
user_id: str = ""
bot_id: str = ""
turns: List[str]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class MetaDlgDataDialogList(BaseModel):
dialogs: List[MetaDlgDataDialog]
class PartitionSpec(BaseModel):
domains: List[str] = []
tasks: List[str] = []
paths: List[str] = []
def _asdict(self):
# convert to list for json-serializability
return dict(domains=self.domains, tasks=self.tasks, paths=self.paths)
# the next few fields/functions are here to make PartitionSpec behave like
# a pytext ConfigBase object. This way, we can use it directly in a task
# config. It would be easier if we could just inherit from ConfigBase,
# but alas, ConfigBase's metaclass is not a metaclass of BaseModel.
_field_types = __annotations__ # noqa
@property
def _fields(cls):
return cls.__annotations__.keys()
@property
def _field_defaults(cls):
_, defaults = cls.annotations_and_defaults()
return defaults
def is_ok(self, dlg: MetaDlgDataDialog):
if self.tasks and dlg.task_id not in self.tasks:
return False
if self.domains and dlg.domain not in self.domains:
return False
return True
def __bool__(self):
return True if self.domains or self.tasks or self.paths else False
def add(self, other):
self.domains = list(set(self.domains + other.domains))
self.tasks = list(set(self.tasks + other.tasks))
self.paths = list(set(self.paths + other.paths))
@classmethod
def from_paths(cls, paths):
return cls(domains=[], paths=paths, tasks=[])
def iterate_paths(self):
for path in self.paths:
yield path, PartitionSpec(domains=[NLGEvalOutput._domain_name(path)],
paths=[path],
tasks=self.tasks)
def checksum(self, zipfile, featurizer_config, text_embedder_cfg):
checksum = md5(json.dumps(featurizer_config._asdict(), sort_keys=True).encode('utf-8'))
text_embedder_cfg = text_embedder_cfg._asdict()
del text_embedder_cfg['preproc_dir']
del text_embedder_cfg['use_cuda_if_available']
checksum.update(json.dumps(text_embedder_cfg, sort_keys=True).encode('utf-8'))
md5file = zipfile + ".md5"
# if md5file exists and is newer than zipfile, read md5 sum from it
# else calculate it for the zipfile.
if os.path.exists(md5file) and os.path.getmtime(zipfile) <= os.path.getmtime(md5file):
with open(md5file, 'rt') as f:
checksum.update(f.read().split()[0].strip().encode('utf-8'))
else:
with open(zipfile, 'rb') as f:
checksum.update(md5(f.read()).hexdigest().encode('utf-8'))
checksum.update(pickle.dumps(sorted(self.domains)))
checksum.update(pickle.dumps(sorted(self.paths)))
checksum.update(pickle.dumps(sorted(self.tasks)))
return checksum.hexdigest()
class DataSpec(BaseModel):
train: PartitionSpec = PartitionSpec()
validation: PartitionSpec = PartitionSpec()
test: PartitionSpec = PartitionSpec()
def unpack_domains(self):
return [list(p) for p in (self.train.domains, self.validation.domains, self.test.domains)]
def unpack_tasks(self):
return [list(p) for p in (self.train.tasks, self.validation.tasks, self.test.tasks)]
def unpack_paths(self):
return [list(p) for p in (self.train.paths, self.validation.paths, self.test.paths)]
def unpack(self):
return self.train._asdict(), self.validation._asdict(), self.test._asdict()
@classmethod
def load(cls, f):
kwargs = json.load(f)
# This just works with Pydantic
return cls(**kwargs)
def add(self, other):
self.train.add(other.train)
self.validation.add(other.validation)
self.test.add(other.test)
| 31.92126 | 94 | 0.695856 | [
"MIT"
] | Bhaskers-Blu-Org2/dstc8-meta-dialog | mldc/data/schema.py | 4,054 | Python |
from seatable_api import Base, context
import requests
import time
import os
"""
该脚本用于从图片链接下载图片到图片列。你可以在一个文本列中记录图片的地址,然后用这个
脚本自动下载图片并上传到图片列中。
"""
###################---基本信息配置---###################
SERVER_URL = context.server_url or 'https://cloud.seatable.cn/'
API_TOKEN = context.api_token or 'cacc42497886e4d0aa8ac0531bdcccb1c93bd0f5'
TABLE_NAME = 'Table1'
IMAGE_FILE_TYPE = ['jpg', 'png', 'jpeg', 'bmp', 'gif'] # 图片的格式
IMG_URL_COL = '图片链接' # 包含图片链接的列名,需要是 URL 或者文本类型
IMG_COL = 'img' # 用于存储图片的列名,需要是图片类型
IMG_NAME_PRE = 'image' # 图片上传后使用的文件名称前缀
###################---基本信息配置---###################
def get_time_stamp():
return str(int(time.time()*100000))
def img_transfer():
# 1. 创建 base 对象并且认证
base = Base(API_TOKEN, SERVER_URL)
base.auth()
# 2. 获取行信息, 数据结构--列表嵌套字典
"""
数据结构例子:其中'img', '图片链接是用户自定义的列名'
[{
'_id': 'RNn2isDfRnSPWq5HIwRT0w',
'_mtime': '2020-11-10T03:02:55.549+00:00',
'Name': '冉继伟0',
'img': [{
'name': 'cut.png',
'size': 2778797,
'type': 'file',
'url': 'https://dev.seafile.com/dtable-web/workspace/104/asset/1d50c674-ca45-4acf-85b8-19d6e10ca5f0/files/2020-11/cut.png'
}],
'图片链接': 'https://timgsa.baidu.com/timg?image&quality=80xxx.jpg'
}, {
'_id': 'b2lrBxnDSGm1LsZDQTVGhw',
'_mtime': '2020-11-04T08:47:51.562+00:00',
'Name': '冉继伟1'
}, {
'_id': 'RBUZ_g6qS_KER0EjaSclFA',
'_mtime': '2020-11-04T09:26:45.961+00:00',
'Name': '冉继伟2',
'img': None
}, ......]
"""
rows = base.list_rows(TABLE_NAME)
count = 0
#3. 遍历每一行,获取‘图片链接‘列的信息
for row in rows:
time_stamp = get_time_stamp()
img_url = row.get(IMG_URL_COL, None)
img = row.get(IMG_COL, None)
try:
#若无图片链接或者img列有数据的话跳过,防止重复添加
if (not img_url) or img:
continue
#通过url链接获取文件扩展名
img_name_extend = img_url.strip().split('.')[-1]
img_name_extend = img_name_extend in IMAGE_FILE_TYPE and img_name_extend or 'jpg'
#通过uuid对下载的文件进行重命名 IMG_NAME_PRE + 时间戳 + 扩展名
img_name = "/tmp/image-%s.%s"%(time_stamp, img_name_extend)
#下载文件
response = requests.get(img_url)
if response.status_code != 200:
raise Exception('download file error')
with open(img_name, 'wb') as f:
f.write(response.content)
#文件上传
info_dict = base.upload_local_file(img_name, name=None, relative_path=None, file_type='image', replace=True)
row[IMG_COL] = [info_dict.get('url')]
base.update_row('Table1', row['_id'], row)
#上传完成之后删除
os.remove(img_name)
except Exception as err_msg:
print('count%s-%s-%s-message: %s' % (count, row['_id'], img_url, err_msg)) #发现异常打印行数等信息方便回查
continue
count += 1
if __name__ == "__main__":
img_transfer()
| 34.182796 | 134 | 0.538534 | [
"Apache-2.0"
] | seatable/seatable-scripts-cn | examples/python/image_transfer.py | 3,759 | Python |
from django.urls import include, path, re_path
from .models import *
from .views import *
urlpatterns = [
path('imagenes/', lista_galerias_img, name='lista-galerias-img'),
path('imagenes/<tema>', filtro_temas_img, name='filtro_temas_img'),
path('imagenes/<id>/', detalle_galerias_img, name='detalle-galerias-img'),
path('videos/', lista_galerias_videos, name='lista-galerias-videos'),
path('videos/<tema>', filtro_temas_vid, name='filtro_temas_vid'),
] | 41.636364 | 75 | 0.746725 | [
"MIT"
] | ErickMurillo/interteam | galerias/urls.py | 458 | Python |
import re
from typing import Dict
from opentrons.hardware_control.g_code_parsing.g_code_functionality_defs.g_code_functionality_def_base import ( # noqa: E501
GCodeFunctionalityDefBase,
)
class GetTempGCodeFunctionalityDef(GCodeFunctionalityDefBase):
RESPONSE_RE = re.compile(r"T:(?P<set_temp>.*?)C:(?P<current_temp>\d+.\d+)")
@classmethod
def _generate_command_explanation(cls, g_code_args: Dict[str, str]) -> str:
return "Getting temperature"
@classmethod
def _generate_response_explanation(cls, response: str) -> str:
match = cls.RESPONSE_RE.match(response)
message = ""
if match is not None:
current_temp = match.groupdict()["current_temp"].strip()
set_temp = match.groupdict()["set_temp"].strip()
if set_temp == "none":
message = (
f"Temp deck is disengaged. "
f"Current temperature is {current_temp}C"
)
else:
message = (
f"Set temperature is {set_temp}C. "
f"Current temperature is {current_temp}C"
)
return message
| 34.852941 | 125 | 0.605063 | [
"Apache-2.0"
] | knownmed/opentrons | api/src/opentrons/hardware_control/g_code_parsing/g_code_functionality_defs/tempdeck/get_temp_g_code_functionality_def.py | 1,185 | Python |
import os as _os
from glob import glob as _glob
import functools as _functools
from concurrent.futures import ProcessPoolExecutor as _Executor
import tempfile as _tempfile
from six import string_types as _string_types
import tqdm as _tqdm
# expose these two exceptions as part of the API. Everything else should feed into these.
from .exceptions import ConversionError, InvalidArchiveError # NOQA
from .tarball import CondaTarBZ2 as _CondaTarBZ2, libarchive_enabled # NOQA
from .conda_fmt import CondaFormat_v2 as _CondaFormat_v2
from .utils import TemporaryDirectory as _TemporaryDirectory, rm_rf as _rm_rf
SUPPORTED_EXTENSIONS = {'.tar.bz2': _CondaTarBZ2,
'.conda': _CondaFormat_v2}
def _collect_paths(prefix):
dir_paths, file_paths = [], []
for dp, dn, filenames in _os.walk(prefix):
for f in filenames:
file_paths.append(_os.path.relpath(_os.path.join(dp, f), prefix))
dir_paths.extend(_os.path.relpath(_os.path.join(dp, _), prefix) for _ in dn)
file_list = file_paths + [dp for dp in dir_paths
if not any(f.startswith(dp) for f in file_paths)]
return file_list
def get_default_extracted_folder(in_file):
dirname = None
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
dirname = _os.path.basename(in_file)[:-len(ext)]
if not _os.path.isabs(dirname):
dirname = _os.path.normpath(_os.path.join(_os.getcwd(), dirname))
return dirname
def extract(fn, dest_dir=None, components=None):
if dest_dir:
if not _os.path.isabs(dest_dir):
dest_dir = _os.path.normpath(_os.path.join(_os.getcwd(), dest_dir))
if not _os.path.isdir(dest_dir):
_os.makedirs(dest_dir)
else:
dest_dir = get_default_extracted_folder(fn)
for ext in SUPPORTED_EXTENSIONS:
if fn.endswith(ext):
SUPPORTED_EXTENSIONS[ext].extract(fn, dest_dir, components=components)
break
else:
raise ValueError("Didn't recognize extension for file '{}'. Supported extensions are: {}"
.format(fn, list(SUPPORTED_EXTENSIONS.keys())))
def create(prefix, file_list, out_fn, out_folder=None, **kw):
if not out_folder:
out_folder = _os.getcwd()
if file_list is None:
file_list = _collect_paths(prefix)
elif isinstance(file_list, _string_types):
try:
with open(file_list) as f:
data = f.readlines()
file_list = [_.strip() for _ in data]
except:
raise
for ext in SUPPORTED_EXTENSIONS:
if out_fn.endswith(ext):
try:
out = SUPPORTED_EXTENSIONS[ext].create(prefix, file_list, out_fn, out_folder, **kw)
except:
# don't leave broken files around
if _os.path.isfile(out):
_rm_rf(out)
return out
def _convert(fn, out_ext, out_folder, **kw):
basename = get_default_extracted_folder(fn)
from .validate import validate_converted_files_match
if not basename:
print("Input file %s doesn't have a supported extension (%s), skipping it"
% (fn, SUPPORTED_EXTENSIONS))
return
out_fn = _os.path.join(out_folder, basename + out_ext)
errors = ""
if not _os.path.lexists(out_fn):
with _TemporaryDirectory(prefix=out_folder) as tmp:
try:
extract(fn, dest_dir=tmp)
file_list = _collect_paths(tmp)
create(tmp, file_list, _os.path.basename(out_fn), out_folder=out_folder, **kw)
_, missing_files, mismatching_sizes = validate_converted_files_match(
tmp, _os.path.join(out_folder, fn))
if missing_files or mismatching_sizes:
errors = str(ConversionError(missing_files, mismatching_sizes))
except Exception as e:
errors = str(e)
return fn, out_fn, errors
def transmute(in_file, out_ext, out_folder=None, processes=None, **kw):
if not out_folder:
out_folder = _os.path.dirname(in_file) or _os.getcwd()
flist = set(_glob(in_file))
if in_file.endswith('.tar.bz2'):
flist = flist - set(_glob(in_file.replace('.tar.bz2', out_ext)))
elif in_file.endswith('.conda'):
flist = flist - set(_glob(in_file.replace('.conda', out_ext)))
failed_files = {}
with _tqdm.tqdm(total=len(flist), leave=False) as t:
with _Executor(max_workers=processes) as executor:
convert_f = _functools.partial(_convert, out_ext=out_ext,
out_folder=out_folder, **kw)
for fn, out_fn, errors in executor.map(convert_f, flist):
t.set_description("Converted: %s" % fn)
t.update()
if errors:
failed_files[fn] = errors
_rm_rf(out_fn)
return failed_files
def verify_conversion(glob_pattern, target_dir, reference_ext,
tmpdir_root=_tempfile.gettempdir(), processes=None):
from .validate import validate_converted_files_match
if not glob_pattern.endswith(reference_ext):
glob_pattern = glob_pattern + reference_ext
file_sets_by_ext = {ext: _glob(_os.path.join(target_dir, glob_pattern + ext))
for ext in SUPPORTED_EXTENSIONS}
matches = {path.replace(ext, "") for ext, path in file_sets_by_ext[reference_ext]}
for ext, paths in file_sets_by_ext.items():
if ext == reference_ext:
continue
matches &= {path.replace(ext, "") for ext, path in paths}
other_exts = set(SUPPORTED_EXTENSIONS) - {reference_ext, }
errors = {}
with _tqdm.tqdm(total=(len(matches) * len(SUPPORTED_EXTENSIONS) - 1), leave=False) as t:
with _Executor(max_workers=processes) as executor:
for other_ext in other_exts:
verify_fn = lambda fn: validate_converted_files_match(ref_ext=reference_ext,
subject=fn + other_ext)
for fn, missing, mismatching in executor.map(verify_fn, matches):
t.set_description("Validating %s" % fn)
t.update()
if missing or mismatching:
errors[fn] = str(ConversionError(missing, mismatching))
return errors
def get_pkg_details(in_file):
"""For the new pkg format, we return the size and hashes of the inner pkg part of the file"""
for ext in SUPPORTED_EXTENSIONS:
if in_file.endswith(ext):
details = SUPPORTED_EXTENSIONS[ext].get_pkg_details(in_file)
break
else:
raise ValueError("Don't know what to do with file {}".format(in_file))
return details
| 40.508876 | 99 | 0.631171 | [
"BSD-3-Clause"
] | katietz/conda-package-handling | src/conda_package_handling/api.py | 6,846 | Python |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 4 16:39:50 2013
@author: Xiaoxuan Jia
"""
import json
import csv
import re
import scipy.io
import scipy.stats
import random
import numpy as np
import os
import itertools
import cPickle as pk
import pymongo
import scipy
from scipy.stats import norm
import matplotlib.pyplot as plt
def SBcorrection(corr, mult_factor):
pred = (mult_factor*corr)/(1+(mult_factor-1)*corr)
return pred
def normalize_CM(CF):
new_CF = np.zeros(np.shape(CF))
for col in range(0, np.shape(CF)[1]):
total = np.sum(CF[:,col])
norm_col = CF[:,col]/float(total)
new_CF[:,col] = norm_col
return new_CF
def d_prime2x2(CF):
H = CF[0,0]/(CF[0,0]+CF[1,0]) # H = hit/(hit+miss)
F = CF[0,1]/(CF[0,1]+CF[1,1]) # F = False alarm/(false alarm+correct rejection)
if H == 1:
H = 1-1/(2*(CF[0,0]+CF[1,0]))
if H == 0:
H = 0+1/(2*(CF[0,0]+CF[1,0]))
if F == 0:
F = 0+1/(2*(CF[0,1]+CF[1,1]))
if F == 1:
F = 1-1/(2*(CF[0,1]+CF[1,1]))
d = norm.ppf(H)-norm.ppf(F)
return d
def d_prime(CF): #have problem when called by module name, artificially change to n by 5 matrix
d = []
for i in range(len(CF[0][1])):
H = CF[0][i, i]/sum(CF[0][:,i]) # H = target diagnal/target column
tempCF = scipy.delete(CF[0], i, 1) # delete the target column
F = sum(tempCF[i,:])/sum(tempCF)
#if H == 1:
# H = 1-1/(2*sum(CF[0][:,i]))
#if H == 0:
# H = 0+1/(2*sum(CF[0][:,i]))
#if F == 0:
# F = 0+1/(2*sum(tempCF))
#if F == 1:
# F = 1-1/(2*sum(tempCF))
d.append(norm.ppf(H)-norm.ppf(F))
return d
def offDmass(CF):
return sum(CF[np.eye(CF.shape[0])==0]/float(sum(CF)))
class expDataDB(object):
def __init__(self, collection, selector, numObjs, obj, trialNum):
conn = pymongo.Connection(port = 22334, host = 'localhost')
db = conn.mturk
col = db[collection]
self.obj = obj
self.trialNum = trialNum
self.subj_data = list(col.find(selector))
self.numObjs = numObjs
if obj != 'face':
obj_inds = []
for idx, t in enumerate(self.subj_data[0]['ImgData']):
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
if len(t)<10:
obj_inds.append(t[0]['obj'])
else:
obj_inds.append(t['obj'])
self.models = np.unique(obj_inds)
self.models_idxs = {}
for idx, model in enumerate(self.models):
self.models_idxs[model] = idx
self.models_idxs = self.models_idxs
self.trial_data = self.preprocess(self.subj_data, self.obj, self.trialNum)
self.numResp = numObjs
self.totalTrials = len(self.trial_data)
self.corr_type = 'pearson'
def init_from_pickle(self, pkFile):
f = open(pkFile, 'rb')
data = pk.load(f)
f.close()
self.subj_data = data
self.trial_data = self.preprocess(self.subj_data)
self.totalTrials = len(self.trial_data)
def setPopCM(self):
if self.numResp == 2:
self.popCM, self.CM_order = self.getPopCM2x2fast(self.trial_data)
else:
self.popCM, self.CM_order = self.getPopCM(self.trial_data)
def preprocess(self, subj_data, obj, trialNum):
# before the fb experiment, the HvM metadata, uploaded urls dont have unique hash id in the url, after feedback exp, both meta and the pushed json files changed
RV = [] #Response vector
SV = [] #Stimulus vector
DV = [] #Distractor vector
if obj=='face':
RV = [] #Response vector
DV = [] #Distractor vector
RT = []
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
models_name = np.unique(subj['Response'])
models_size = np.unique(subj['Size'])
self.models = []
for idx1 in models_name:
for idx2 in models_size:
self.models.append([str(idx1)+'_'+str(idx2)])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for t_idx, t in enumerate(subj['RT']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
RT.append(t)
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append([str(r)+'_'+str(subj['Size'][r_idx])])
for s_idx, s in enumerate(subj['StimShown']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append([str(s)+'_'+str(subj['Size'][s_idx])])
elif obj=='obj_lack':
RV_s = [] #Response vector
DV_s = [] #Distractor vector
RV_p = []
DV_p = []
RV_r = []
DV_r = []
RV = []
DV = []
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
self.models = np.unique(subj['Response'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx]['tname'] == 'obj_size':
RV_s.append(r)
elif subj['ImgData'][r_idx]['tname'] == 'position':
RV_p.append(r)
elif subj['ImgData'][r_idx]['tname'] == 'rotation':
RV_r.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx]['tname'] == 'obj_size':
DV_s.append(s)
elif subj['ImgData'][s_idx]['tname'] == 'position':
DV_p.append(s)
elif subj['ImgData'][s_idx]['tname'] == 'rotation':
DV_r.append(s)
else:
DV.append(s)
elif obj=='obj':
RV_s = [] #Response vector
DV_s = [] #Distractor vector
RV_p = []
DV_p = []
RV_r = []
DV_r = []
RV = []
DV = []
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
self.models = np.unique(subj['Response'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':
RV_s.append(r)
elif subj['ImgData'][r_idx][0]['tname'] == 'position':
RV_p.append(r)
elif subj['ImgData'][r_idx][0]['tname'] == 'rotation':
RV_r.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':
DV_s.append(s)
elif subj['ImgData'][s_idx][0]['tname'] == 'position':
DV_p.append(s)
elif subj['ImgData'][s_idx][0]['tname'] == 'rotation':
DV_r.append(s)
else:
DV.append(s)
elif obj=='2way':
RV = [] #Response vector
DV = [] #Distractor vector
RV_s = [] #Response vector
DV_s = [] #Distractor vector
SV_s = []
SV = []
for subj in self.subj_data:
for t_idx, t in enumerate(subj['ImgData']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
if subj['ImgData'][t_idx][0]['tname'] == 'obj_size':
SV_s.append([t[1]['obj'],t[2]['obj']])
else: #'objectome32'
SV.append([t[1]['obj'],t[2]['obj']])
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['tname'] == 'obj_size':
RV_s.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['tname'] == 'obj_size':
DV_s.append(s)
else:
DV.append(s)
elif obj=='2way_face':
RV = [] #Response vector
DV = [] #Distractor vector
RV_s = [] #Response vector
DV_s = [] #Distractor vector
SV_s = []
SV = []
for subj in self.subj_data:
for t_idx, t in enumerate(subj['ImgData']):
if t_idx>=trialNum[0] and t_idx<trialNum[1]:
if subj['ImgData'][t_idx][0]['var'] == 'V0_size':
SV_s.append([t[1]['obj'],t[2]['obj']])
else: #'objectome32'
SV.append([t[1]['obj'],t[2]['obj']])
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
if subj['ImgData'][r_idx][0]['var'] == 'V0_size':
RV_s.append(r)
else: #'objectome32'
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
if subj['ImgData'][s_idx][0]['var'] == 'V0_size':
DV_s.append(s)
else:
DV.append(s)
else:
RV = [] #Response vector
DV = [] #Distractor vector
for subj in subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
self.models = np.unique(subj['TestStim'])
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append(r)
for s_idx, s in enumerate(subj['StimPresent']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append(s)
if obj=='obj':
new_data_s = []
new_data_p = []
new_data_r = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV_s[idx])] #response
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV_p):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV_p[idx])] #response
new_data_p.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV_r):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV_r[idx])] #response
new_data_r.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return [new_data_s, new_data_p, new_data_r, new_data]
elif obj=='2way':
new_data_s = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV_s[idx]] #response
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV[idx]] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]]) #order is shown, picked, distractors
return [new_data_s, new_data]
elif obj=='2way_face':
new_data_s = []
new_data = []
for idx, shown in enumerate(DV_s):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV_s[idx]] #response
new_data_s.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV_s[idx]]]) #order is shown, picked, distractors
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[model] #stimulus shown
CF_row_idx = self.models_idxs[RV[idx]] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[m] for m in SV[idx]]]) #order is shown, picked, distractors
return [new_data_s, new_data]
elif obj=='face':
new_data = []
for idx, shown in enumerate(DV):
if RT[idx]<3000:
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return new_data
else:
new_data = []
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return new_data
def getPopCM2x2fast(self, trial_data):
combs = list(itertools.combinations(range(0, self.numObjs), 2))
CMs = {}
for c in combs:
CMs[c] = np.zeros((2,2))
for t in trial_data: # each trial can only increase +1 in total; statistics is based on many trials
target = t[0]
pick = t[1]
cm = tuple(sorted(t[2])) #Because itertools always spits out the combs in sorted order; the two-way task is designed for each pair, either target is presented with equal times
if target == cm[0]: #stimulus = True: when the signal present
if target == pick: #response = true; Hit
CMs[cm][0,0] += 1
else: # response = False; Miss
CMs[cm][1,0] += 1
else: # stimulus = False; when the signal does not present
if target == pick: # response = false; correct rejection
CMs[cm][1,1] += 1
else: # response = true; false alarm
CMs[cm][0,1] += 1
return [CMs[c] for c in combs], combs
def getPopCM(self, trial_data, order=[]): # trial_data is for individual subj or for all subj (myresult.trial_data)
if len(trial_data[0][2]) != len(self.trial_data[0][2]):
numResp = len(trial_data[0][2]) # should not use self.trial_data
else:
numResp = len(self.trial_data[0][2])
#print numResp
obj_inds = []
for t in trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
if len(np.unique(obj_inds)) != self.numObjs:
obj_inds = range(self.numObjs)
else:
obj_inds = obj_inds
combs = list(itertools.combinations(np.unique(obj_inds), numResp))
CMs = [np.zeros((numResp, numResp)) for i in range(0, len(combs))]
for trial in trial_data:
distractor = [m for m in trial[2] if m != trial[0]]
target = trial[0]
pick = trial[1]
possCombs = [[comb, idx] for idx, comb in enumerate(combs) if target in comb]
for comb in possCombs:
if set(distractor).issubset(set(comb[0])):
if len(order) > 0:
comb[0] = order
if pick == target:
idx = comb[0].index(pick)
CMs[comb[1]][idx, idx] += 1
elif pick != target:
CMs[comb[1]][comb[0].index(pick), comb[0].index(target)] += 1
else:
print('Matrix Error')
return CMs, combs
def getexposureCM(self, trial_data, trialNum, expoNum): # trial_data is for individual subj or for all subj (myresult.trial_data)
if len(trial_data[0][2]) != len(self.trial_data[0][2]):
numResp = len(trial_data[0][2]) # should not use self.trial_data
else:
numResp = len(self.trial_data[0][2])
#print numResp
obj_inds = []
for t in trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
condi = self.subj_data[0]['Combinations']
newcondi = []
s1 = set(['NONSWAP', 'SWAP'])
for subj in self.subj_data:
s2 = set(subj.keys())
for s in subj[list(s1.intersection(s2))[0]]:
newcondi.append([x for idx, x in enumerate(condi[int(s)]) if idx>= expoNum[0] and idx<expoNum[1]]) #need to modify if the total number of condtion change
if len(newcondi) != len(trial_data):
print('trial number inconsistent')
else:
print(str(len(trial_data)))
RV = [] #Response vector
DV = [] #Distractor vector
for subj in self.subj_data: # subj is dict in list subj_data; to access string values in a dist within a list, use subj_data[0]['Response']
models = np.unique(subj['Response'])
self.models = []
for idx in models:
self.models.append(idx)
models_idxs = {}
for idx, model in enumerate(self.models):
models_idxs[tuple(model)] = idx
self.models_idxs = models_idxs
for r_idx, r in enumerate(subj['Response']):
if r_idx>=trialNum[0] and r_idx<trialNum[1]:
RV.append(r)
for s_idx, s in enumerate(subj['StimShown']):
if s_idx>=trialNum[0] and s_idx<trialNum[1]:
DV.append(s)
new_data = []
for idx, shown in enumerate(DV):
model = shown
CF_col_idx = self.models_idxs[tuple(model)] #stimulus shown
CF_row_idx = self.models_idxs[tuple(RV[idx])] #response
new_data.append([CF_col_idx, CF_row_idx, [self.models_idxs[tuple(m)] for m in self.models]]) #order is shown, picked, distractors
return newcondi, new_data
def computeSplitHalf_size(self, numSplits, subsample, verbose = False, correct = True, plot_ = False): #subsample equal to total trial number if don't want to subsample
import scipy.stats
trial_data = self.trial_data
Rs = []
for s in range(0, numSplits):
if verbose == True:
print(s)
else:
pass
np.random.shuffle(trial_data)
if int(subsample)%2 == 0:
half1.extend(t[0:subsample/2])
half2.extend(t[-subsample/2:])
else:
half1.extend(t[0:subsample/2+1])
half2.extend(t[-subsample/2:])
if self.numResp == 2:
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
else:
CM1, combs = self.getPopCM(half1)
CM2, combs = self.getPopCM(half2)
half1_array = []
half2_array = []
for mat in range(0, len(CM1)):
newarray = np.reshape(normalize_CM(CM1[mat]),(CM1[mat].shape[0]*CM1[mat].shape[1],-1))
half1_array += list([x for x in newarray if x!=0])
newarray = np.reshape(normalize_CM(CM2[mat]),(CM2[mat].shape[0]*CM2[mat].shape[1],-1))
half2_array += list([x for x in newarray if x!=0])
if self.corr_type == 'pearson':
Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])
#correct = False
else:
Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])
if plot_ == True:
plt.plot(half1_array, half2_array, 'b.')
if correct == False:
return Rs
else:
Rs_c = [SBcorrection(r, 2) for r in Rs]
return Rs_c
def computeSplitHalf_dprime(self, pair_trial_data, boot, starttrial, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample
import scipy.stats
count = [len(trial) for trial in pair_trial_data]
corr_dprime = []
for i in range(boot):
temp = []
for w in range(min(count)-starttrial+1):
a = [random.sample(trial, w+starttrial) for trial in pair_trial_data]
subsample = len(a[0])
Rs = []
for b in range(boot):
half1 = []
half2 = []
for t in a:
np.random.shuffle(t)
if int(subsample)%2 == 0:
half1.extend(t[0:subsample/2])
half2.extend(t[-subsample/2:])
else:
half1.extend(t[0:subsample/2+1])
half2.extend(t[-subsample/2:])
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
half1_dprime = []
half2_dprime = []
for mat in range(0, len(CM1)):
half1_dprime.append(d_prime2x2(CM1[mat])) # previously normalized CM, which caused nan when divided by 0
half2_dprime.append(d_prime2x2(CM2[mat]))
Rs.append(scipy.stats.spearmanr(half1_dprime, half2_dprime)[0])
temp.append(np.ma.masked_invalid(Rs).mean(0))
corr_dprime.append(temp)
return corr_dprime
def computeSplitHalf(self, numSplits, subsample, verbose = False, correct = True, plot_ = False, trial_data = None): #subsample equal to total trial number if don't want to subsample
import scipy.stats
if trial_data == None:
trial_data = self.trial_data
else:
trial_data = trial_data
Rs = []
for s in range(0, numSplits):
if verbose == True:
print(s)
else:
pass
np.random.shuffle(trial_data)
half1 = []
half2 = []
if int(subsample)%2 == 0:
half1.extend(trial_data[0:subsample/2])
half2.extend(trial_data[-subsample/2:])
else:
half1.extend(trial_data[0:subsample/2+1])
half2.extend(trial_data[-subsample/2:])
if self.numResp == 2:
CM1, combs = self.getPopCM2x2fast(half1)
CM2, combs = self.getPopCM2x2fast(half2)
else:
CM1, combs = self.getPopCM(half1)
CM2, combs = self.getPopCM(half2)
half1_array = []
half2_array = []
for mat in range(0, len(CM1)):
half1_array += list(normalize_CM(CM1[mat])[np.eye(CM1[mat].shape[0])==0])
half2_array += list(normalize_CM(CM2[mat])[np.eye(CM2[mat].shape[0])==0])
if self.corr_type == 'pearson':
Rs.append(scipy.stats.pearsonr(half1_array, half2_array)[0])
#correct = False
else:
Rs.append(scipy.stats.spearmanr(half1_array, half2_array)[0])
if plot_ == True:
plt.plot(half1_array, half2_array, 'b.')
if correct == False:
return Rs
else:
Rs_c = [SBcorrection(r, 2) for r in Rs]
return Rs_c
def imputeNtoM(self, use_objects):
#Produces a single imputed matrix of a given size for given objects. The matrix will have blank entries
#if you ask for a greater size than is given by the number of objects represented by your data
obj_inds = []
for t in self.trial_data:
if len(np.unique(obj_inds)) == self.numObjs:
break
else:
obj_inds.append(t[0])
t = []
for obj in use_objects:
t.append(self.models.index(obj))
import itertools
combs = list(itertools.combinations(t, self.numResp))
CM_imputed = np.zeros((len(t),len(t)))
for trial in self.trial_data:
for comb in combs:
if set(comb).issubset(set(trial[2])):
if trial[0] == trial[1]:
CM_imputed[t.index(trial[0]), t.index(trial[0])] += 1
else:
CM_imputed[t.index(trial[1]), t.index(trial[0])] += 1
return CM_imputed
| 42.728659 | 206 | 0.506136 | [
"MIT"
] | jiaxx/temporal_learning_paper | code/learningutil.py | 28,030 | Python |
# -*- coding: utf-8 -*-
# IFD.A-8 :: Версия: 1 :: Проверка ввода невалидного значения в поле "код IATA" для выбора аэропорта
# Шаг 1
def test_check_invalid_value_IATA_to_select_airport(app):
app.session.enter_login(username="test")
app.session.enter_password(password="1245")
app.airport.open_form_add_airport()
app.airport.enter_IATA_code(iata_cod="QWE")
app.airport.search_airport_by_parameter()
app.airport.message_no_airports()
app.airport.exit_from_the_add_airport_form()
app.session.logout()
# IFD.A-8 :: Версия: 1 :: Проверка ввода невалидного значения в поле "код IATA" для выбора аэропорта
# Шаг 2
def test_check_invalid_characters_in_IATA_code(app):
app.session.enter_login(username="test")
app.session.enter_password(password="1245")
app.airport.open_form_add_airport()
app.airport.enter_IATA_code(iata_cod="!№;%:?*")
app.airport.wait_massege_no_airport()
app.airport.exit_from_the_add_airport_form()
app.session.logout() | 41.416667 | 100 | 0.751509 | [
"Apache-2.0"
] | Sergei-Soldatov/IFD.A | test/test_check_invalid_value_IATA_code.py | 1,130 | Python |
from direct.task.Task import Task
from panda3d.core import *
from DistributedNPCToonBaseAI import *
from toontown.quest import Quests
class DistributedNPCSpecialQuestGiverAI(DistributedNPCToonBaseAI):
def __init__(self, air, npcId, questCallback=None, hq=0):
DistributedNPCToonBaseAI.__init__(self, air, npcId, questCallback)
self.hq = hq
self.tutorial = 0
self.pendingAvId = None
return
def getTutorial(self):
return self.tutorial
def setTutorial(self, val):
self.tutorial = val
def getHq(self):
return self.hq
def avatarEnter(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('avatar enter ' + str(avId))
self.air.questManager.requestInteract(avId, self)
DistributedNPCToonBaseAI.avatarEnter(self)
def chooseQuest(self, questId, quest=None):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseQuest: avatar %s choseQuest %s' % (avId, questId))
if not self.pendingAvId:
self.notify.warning('chooseQuest: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseQuest: not expecting an answer from this avatar: %s' % avId)
return
if questId == 0:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseQuest(avId)
return
for quest in self.pendingQuests:
if questId == quest[0]:
self.pendingAvId = None
self.pendingQuests = None
self.air.questManager.avatarChoseQuest(avId, self, *quest)
return
self.air.questManager.avatarChoseQuest(avId, self, *quest)
self.notify.warning('chooseQuest: avatar: %s chose a quest not offered: %s' % (avId, questId))
self.pendingAvId = None
self.pendingQuests = None
return
def chooseTrack(self, trackId):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('chooseTrack: avatar %s choseTrack %s' % (avId, trackId))
if not self.pendingAvId:
self.notify.warning('chooseTrack: not expecting an answer from any avatar: %s' % avId)
return
if self.pendingAvId != avId:
self.notify.warning('chooseTrack: not expecting an answer from this avatar: %s' % avId)
return
if trackId == -1:
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.air.questManager.avatarCancelled(avId)
self.cancelChoseTrack(avId)
return
for track in self.pendingTracks:
if trackId == track:
self.air.questManager.avatarChoseTrack(avId, self, self.pendingTrackQuest, trackId)
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
self.notify.warning('chooseTrack: avatar: %s chose a track not offered: %s' % (avId, trackId))
self.pendingAvId = None
self.pendingTracks = None
self.pendingTrackQuest = None
return
def sendTimeoutMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIMEOUT,
self.npcId,
self.busy, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
self.sendClearMovie(None)
self.busy = 0
return Task.done
def sendClearMovie(self, task):
self.pendingAvId = None
self.pendingQuests = None
self.pendingTracks = None
self.pendingTrackQuest = None
self.busy = 0
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_CLEAR,
self.npcId,
0, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
return Task.done
def rejectAvatar(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_REJECT,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def rejectAvatarTierNotDone(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TIER_NOT_DONE,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(5.5, self.sendClearMovie, self.uniqueName('clearMovie'))
def completeQuest(self, avId, questId, rewardId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_COMPLETE,
self.npcId,
avId,
[
questId, rewardId, 0],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def incompleteQuest(self, avId, questId, completeStatus, toNpcId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_INCOMPLETE,
self.npcId,
avId,
[
questId, completeStatus, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def assignQuest(self, avId, questId, rewardId, toNpcId):
self.busy = avId
if self.questCallback:
self.questCallback()
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_ASSIGN,
self.npcId,
avId,
[
questId, rewardId, toNpcId],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentQuestChoice(self, avId, quests):
self.busy = avId
self.pendingAvId = avId
self.pendingQuests = quests
flatQuests = []
for quest in quests:
flatQuests.extend(quest)
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE,
self.npcId,
avId,
flatQuests,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def presentTrackChoice(self, avId, questId, tracks):
self.busy = avId
self.pendingAvId = avId
self.pendingTracks = tracks
self.pendingTrackQuest = questId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE,
self.npcId,
avId,
tracks,
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseQuest(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def cancelChoseTrack(self, avId):
self.busy = avId
self.sendUpdate('setMovie', [NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL,
self.npcId,
avId, [],
ClockDelta.globalClockDelta.getRealNetworkTime()])
if not self.tutorial:
taskMgr.doMethodLater(60.0, self.sendTimeoutMovie, self.uniqueName('clearMovie'))
def setMovieDone(self):
avId = self.air.getAvatarIdFromSender()
self.notify.debug('setMovieDone busy: %s avId: %s' % (self.busy, avId))
if self.busy == avId:
taskMgr.remove(self.uniqueName('clearMovie'))
self.sendClearMovie(None)
else:
if self.busy:
self.air.writeServerEvent('suspicious', avId, 'DistributedNPCToonAI.setMovieDone busy with %s' % self.busy)
self.notify.warning('somebody called setMovieDone that I was not busy with! avId: %s' % avId)
return | 38.336323 | 123 | 0.628729 | [
"MIT"
] | TTOFFLINE-LEAK/ttoffline | v2.5.7/toontown/toon/DistributedNPCSpecialQuestGiverAI.py | 8,549 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import logging
import multiprocessing as mp
import numpy as np
import os
import torch
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.data.detection_utils import read_image
from detectron2.engine.defaults import DefaultPredictor
from detectron2.utils.logger import setup_logger
from pytorch3d.io import save_obj
from pytorch3d.structures import Meshes
# required so that .register() calls are executed in module scope
import meshrcnn.data # noqa
import meshrcnn.modeling # noqa
import meshrcnn.utils # noqa
from meshrcnn.config import get_meshrcnn_cfg_defaults
from meshrcnn.evaluation import transform_meshes_to_camera_coord_system
def get_parser():
parser = argparse.ArgumentParser(description="MeshRCNN Demo")
parser.add_argument(
"--config-file",
default="configs/pix3d/meshrcnn_R50_FPN.yaml",
metavar="FILE",
help="path to config file",
)
parser.add_argument("--input", help="A path to an input image")
parser.add_argument("--output", help="A directory to save output visualizations")
parser.add_argument(
"--focal-length", type=float, default=20.0, help="Focal length for the image"
)
parser.add_argument(
"--onlyhighest", action="store_true", help="will return only the highest scoring detection"
)
parser.add_argument(
"opts",
help="Modify model config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
args = get_parser().parse_args()
from meshrcnn.data.datasets.register_pix3d import register_pix3d
register_pix3d(args.opts[1])
import cv2
logger = logging.getLogger("demo")
class VisualizationDemo(object):
def __init__(self, cfg, vis_highest_scoring=True, output_dir="./vis"):
"""
Args:
cfg (CfgNode):
vis_highest_scoring (bool): If set to True visualizes only
the highest scoring prediction
"""
self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
self.colors = self.metadata.thing_colors
self.cat_names = self.metadata.thing_classes
self.cpu_device = torch.device("cpu")
self.vis_highest_scoring = vis_highest_scoring
self.predictor = DefaultPredictor(cfg)
os.makedirs(output_dir, exist_ok=True)
self.output_dir = output_dir
def run_on_image(self, image, focal_length=10.0):
"""
Args:
image (np.ndarray): an image of shape (H, W, C) (in BGR order).
This is the format used by OpenCV.
focal_length (float): the focal_length of the image
Returns:
predictions (dict): the output of the model.
"""
predictions = self.predictor(image)
# Convert image from OpenCV BGR format to Matplotlib RGB format.
image = image[:, :, ::-1]
# camera matrix
imsize = [image.shape[0], image.shape[1]]
# focal <- focal * image_width / 32
focal_length = image.shape[1] / 32 * focal_length
K = [focal_length, image.shape[1] / 2, image.shape[0] / 2]
if "instances" in predictions:
instances = predictions["instances"].to(self.cpu_device)
scores = instances.scores
boxes = instances.pred_boxes
labels = instances.pred_classes
masks = instances.pred_masks
meshes = Meshes(
verts=[mesh[0] for mesh in instances.pred_meshes],
faces=[mesh[1] for mesh in instances.pred_meshes],
)
pred_dz = instances.pred_dz[:, 0] * (boxes.tensor[:, 3] - boxes.tensor[:, 1])
tc = pred_dz.abs().max() + 1.0
zranges = torch.stack(
[
torch.stack(
[
tc - tc * pred_dz[i] / 2.0 / focal_length,
tc + tc * pred_dz[i] / 2.0 / focal_length,
]
)
for i in range(len(meshes))
],
dim=0,
)
Ks = torch.tensor(K).to(self.cpu_device).view(1, 3).expand(len(meshes), 3)
meshes = transform_meshes_to_camera_coord_system(
meshes, boxes.tensor, zranges, Ks, imsize
)
if self.vis_highest_scoring:
det_ids = [scores.argmax().item()]
else:
det_ids = range(len(scores))
for det_id in det_ids:
self.visualize_prediction(
det_id,
image,
boxes.tensor[det_id],
labels[det_id],
scores[det_id],
masks[det_id],
meshes[det_id],
)
return predictions
def visualize_prediction(
self, det_id, image, box, label, score, mask, mesh, alpha=0.6, dpi=200
):
mask_color = np.array(self.colors[label], dtype=np.float32)
cat_name = self.cat_names[label]
thickness = max([int(np.ceil(0.001 * image.shape[0])), 1])
box_color = (0, 255, 0) # '#00ff00', green
text_color = (218, 227, 218) # gray
composite = image.copy().astype(np.float32)
# overlay mask
idx = mask.nonzero()
composite[idx[:, 0], idx[:, 1], :] *= 1.0 - alpha
composite[idx[:, 0], idx[:, 1], :] += alpha * mask_color
# overlay box
(x0, y0, x1, y1) = (int(x + 0.5) for x in box)
composite = cv2.rectangle(
composite, (x0, y0), (x1, y1), color=box_color, thickness=thickness
)
composite = composite.astype(np.uint8)
# overlay text
font_scale = 0.001 * image.shape[0]
font_thickness = thickness
font = cv2.FONT_HERSHEY_TRIPLEX
text = "%s %.3f" % (cat_name, score)
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, font_thickness)
# Place text background.
if x0 + text_w > composite.shape[1]:
x0 = composite.shape[1] - text_w
if y0 - int(1.2 * text_h) < 0:
y0 = int(1.2 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(composite, back_topleft, back_bottomright, box_color, -1)
# Show text
text_bottomleft = x0, y0 - int(0.2 * text_h)
cv2.putText(
composite,
text,
text_bottomleft,
font,
font_scale,
text_color,
thickness=font_thickness,
lineType=cv2.LINE_AA,
)
save_file = os.path.join(self.output_dir, "%d_mask_%s_%.3f.png" % (det_id, cat_name, score))
cv2.imwrite(save_file, composite[:, :, ::-1])
save_file = os.path.join(self.output_dir, "%d_mesh_%s_%.3f.obj" % (det_id, cat_name, score))
verts, faces = mesh.get_mesh_verts_faces(0)
save_obj(save_file, verts, faces)
def setup_cfg(args):
cfg = get_cfg()
get_meshrcnn_cfg_defaults(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
return cfg
if __name__ == "__main__":
mp.set_start_method("spawn", force=True)
args = get_parser().parse_args()
logger = setup_logger(name="demo")
logger.info("Arguments: " + str(args))
cfg = setup_cfg(args)
im_name = args.input.split("/")[-1].split(".")[0]
demo = VisualizationDemo(
cfg, vis_highest_scoring=args.onlyhighest, output_dir=os.path.join(args.output, im_name)
)
# use PIL, to be consistent with evaluation
img = read_image(args.input, format="BGR")
predictions = demo.run_on_image(img, focal_length=args.focal_length)
logger.info("Predictions saved in %s" % (os.path.join(args.output, im_name)))
| 34.95671 | 100 | 0.593065 | [
"BSD-3-Clause"
] | ishanic/MeshRCNN-keypoints | demo/demo.py | 8,075 | Python |
#coding=utf-8
#author@alingse
#2016.06.21
hdfs_schema = 'hdfs://'
file_schema = 'file://'
class hdfsCluster(object):
""" 一个hdfs 资源 hdfs uri,path,账户密码认证
"""
def __init__(self,host,port=9000,schema=hdfs_schema):
""" 目前只需要host和port """
self.host = host
self.port = port
self.schema = schema
self._path = '/'
self._status = None
@property
def status(self):
return self._status
@status.setter
def status(self,value):
if value in [None,True,False]:
self._status = value
@property
def path(self):
return self._path
@path.setter
def path(self,value):
if value.startswith('/') and value.endswith('/'):
self._path = value
self._status = None
@property
def uri_head(self):
""" 返回 uri 的 head"""
head = self.schema + '{}:{}'.format(self.host,self.port)
return head
@property
def uri(self):
""" 返回当前路径"""
_uri = self.schema + '{}:{}{}'.format(self.host,self.port,self._path)
return _uri
if __name__ == '__main__':
hdfs = hdfsCluster('localhost','9000')
hdfs.path = '/hive/'
print(hdfs.uri)
print(hdfs.uri_head)
| 20.852459 | 77 | 0.552673 | [
"Apache-2.0"
] | alingse/hdfshell | hdfshell/cluster.py | 1,322 | Python |
#!/usr/bin/env python
import click
from ..log import get_logger, verbosity_option
from . import bdt
logger = get_logger(__name__)
@click.command(
epilog="""\b
Examples:
bdt gitlab update-bob -vv
bdt gitlab update-bob -vv --stable
"""
)
@click.option(
"--stable/--beta",
help="To use the stable versions in the list and pin packages.",
)
@verbosity_option()
@bdt.raise_on_error
def update_bob(stable):
"""Updates the Bob meta package with new packages."""
import tempfile
from ..ci import read_packages
from ..release import (
download_path,
get_gitlab_instance,
get_latest_tag_name,
)
gl = get_gitlab_instance()
# download order.txt form nightlies and get the list of packages
nightlies = gl.projects.get("bob/nightlies")
with tempfile.NamedTemporaryFile() as f:
download_path(nightlies, "order.txt", f.name, ref="master")
packages = read_packages(f.name)
# find the list of public packages
public_packages, private_packages = [], []
for n, (package, branch) in enumerate(packages):
if package == "bob/bob":
continue
# determine package visibility
use_package = gl.projects.get(package)
is_public = use_package.attributes["visibility"] == "public"
if is_public:
public_packages.append(package.replace("bob/", ""))
else:
private_packages.append(package.replace("bob/", ""))
logger.debug(
"%s is %s", package, "public" if is_public else "not public"
)
logger.info("Found %d public packages", len(public_packages))
logger.info(
"The following packages were not public:\n%s",
"\n".join(private_packages),
)
# if requires stable versions, add latest tag versions to the names
if stable:
logger.info("Getting latest tag names for the public packages")
tags = [
get_latest_tag_name(gl.projects.get(f"bob/{pkg}"))
for pkg in public_packages
]
public_packages = [
f"{pkg} =={tag}" for pkg, tag in zip(public_packages, tags)
]
# modify conda/meta.yaml and requirements.txt in bob/bob
logger.info("Updating conda/meta.yaml")
start_tag = "# LIST OF BOB PACKAGES - START"
end_tag = "# LIST OF BOB PACKAGES - END"
with open("conda/meta.yaml") as f:
lines = f.read()
i1 = lines.find(start_tag) + len(start_tag)
i2 = lines.find(end_tag)
lines = (
lines[:i1]
+ "\n - ".join([""] + public_packages)
+ "\n "
+ lines[i2:]
)
with open("conda/meta.yaml", "w") as f:
f.write(lines)
logger.info("Updating requirements.txt")
with open("requirements.txt", "w") as f:
f.write("\n".join(public_packages) + "\n")
click.echo(
"You may need to add the ` # [linux]` tag in front of linux only "
"packages in conda/meta.yaml"
)
| 27.427273 | 75 | 0.603911 | [
"BSD-3-Clause"
] | bioidiap/bob.devtools | bob/devtools/scripts/update_bob.py | 3,017 | Python |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import apply_defaults
from airflow.exceptions import AirflowException
class EmrBaseSensor(BaseSensorOperator):
"""
Contains general sensor behavior for EMR.
Subclasses should implement get_emr_response() and state_from_response() methods.
Subclasses should also implement NON_TERMINAL_STATES and FAILED_STATE constants.
"""
ui_color = '#66c3ff'
@apply_defaults
def __init__(
self,
aws_conn_id='aws_default',
*args, **kwargs):
super(EmrBaseSensor, self).__init__(*args, **kwargs)
self.aws_conn_id = aws_conn_id
def poke(self, context):
response = self.get_emr_response()
if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
self.log.info('Bad HTTP response: %s', response)
return False
state = self.state_from_response(response)
self.log.info('Job flow currently %s', state)
if state in self.NON_TERMINAL_STATES:
return False
if state in self.FAILED_STATE:
final_message = 'EMR job failed'
failure_message = self.failure_message_from_response(response)
if failure_message:
final_message += ' ' + failure_message
raise AirflowException(final_message)
return True
| 36.409836 | 85 | 0.701036 | [
"Apache-2.0"
] | 4406arthur/airflow | airflow/contrib/sensors/emr_base_sensor.py | 2,221 | Python |
from DejaVu.IndexedPolygons import IndexedPolygons
from Volume.Grid3D import Grid3D
class ClipMeshWithMask:
"""Clip method of this class takes a mesh i.e. IndexedPolgons and
selects all vertices which fall onto voxel witha true value in a mask grid.
It returns a new IndexedPolygons geometry with the triangles for which
3 vertices are selected.
"""
def __init__(self):
pass
def clip(self, mesh, grid):
assert isinstance(mesh, IndexedPolygons)
assert isinstance(grid, Grid3D)
origin = grid.getOriginReal()
stepSize = grid.getStepSizeReal()
dx, dy, dz = grid.dimensions
vertices = mesh.vertexSet.vertices.array
triangles = mesh.faceSet.faces.array
# compute the voxel on which each vertex falls
# array of indiced into grid for the vertices
vertInd = ((vertices-origin)/stepSize).astype('i')
# select the vertices on voxels that have a value True
selVert = []
vertEquiv = {}
numVertSel = 0
nvert = 0
data = grid.data
for i,j,k in vertInd:
if i>=0 and i<dx:
if j>=0 and j<dy:
if k>=0 and k<dz:
if data[i,j,k]:
selVert.append( vertices[nvert] )
vertEquiv[nvert] = numVertSel
numVertSel += 1
nvert += 1
# build a set of faces for which some vertices are selected
# and keep only selected vertices
selFaces = []
for i,j,k in triangles:
nbvs = 0
v1 = vertEquiv.get(i, None)
if v1: nbvs +=1
v2 = vertEquiv.get(j, None)
if v2: nbvs +=1
v3 = vertEquiv.get(k, None)
if v3: nbvs +=1
if nbvs == 3:
selFaces.append( (v1,v2,v3) )
clippedGeom = IndexedPolygons(mesh.name+'_clipped', vertices=selVert,
faces=selFaces)
return clippedGeom
| 33.435484 | 77 | 0.549445 | [
"MIT"
] | J-E-J-S/aaRS-Pipeline | resources/mgltools_x86_64Linux2_1.5.6/MGLToolsPckgs/Volume/Operators/clip.py | 2,073 | Python |
import pytest
import os
from turf.boolean_within import boolean_within
from turf.utils.test_setup import get_fixtures
current_path = os.path.dirname(os.path.realpath(__file__))
fixtures = get_fixtures(
current_path,
keys=["true", "false"],
)
class TestBooleanPointOnLine:
@pytest.mark.parametrize(
"fixture",
[
pytest.param(fixture, id=fixture_name)
for fixture_name, fixture in fixtures.items()
],
)
def test_boolean_point_on_line(self, fixture):
if "true" in fixture:
features = fixture.get("true")
feature_1, feature_2 = features["features"]
expected_result = True
else:
features = fixture.get("false")
feature_1, feature_2 = features["features"]
expected_result = False
test_result = boolean_within(feature_1, feature_2)
assert test_result == expected_result
| 23.65 | 58 | 0.639535 | [
"MIT"
] | diogomatoschaves/pyturf | turf/boolean_within/tests/test_boolean_within.py | 946 | Python |
################################################################################
# CSE 151B: Programming Assignment 4
# Code snippet by Ajit Kumar, Savyasachi
# Updated by Rohin
# Winter 2022
################################################################################
from experiment import Experiment
import sys
# Main Driver for your code. Either run `python main.py` which will run the experiment with default config
# or specify the configuration by running `python main.py custom`
if __name__ == "__main__":
exp_name = 'baseline'
if len(sys.argv) > 1:
exp_name = sys.argv[1]
print("Running Experiment: ", exp_name)
exp = Experiment(exp_name)
exp.run()
exp.test()
| 30.826087 | 106 | 0.545839 | [
"MIT"
] | angli66/Image-Captioning | main.py | 709 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-02-12 11:43
from __future__ import unicode_literals
import core.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('great_international', '0004_merge_20190212_1003'),
]
operations = [
migrations.CreateModel(
name='InternationalUKHQPages',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('service_name', models.CharField(choices=[('FIND_A_SUPPLIER', 'Find a Supplier'), ('EXPORT_READINESS', 'Export Readiness'), ('INVEST', 'Invest'), ('COMPONENTS', 'Components'), ('GREAT_INTERNATIONAL', 'Great International')], db_index=True, max_length=100, null=True)),
],
options={
'abstract': False,
},
bases=(core.models.ExclusivePageMixin, 'wagtailcore.page'),
),
]
| 38.6 | 285 | 0.645941 | [
"MIT"
] | ababic/directory-cms | great_international/migrations/0005_internationalukhqpages.py | 1,158 | Python |
"""
Test what happens if Python was built without SSL
* Everything that does not involve HTTPS should still work
* HTTPS requests must fail with an error that points at the ssl module
"""
import sys
import unittest
class ImportBlocker(object):
"""
Block Imports
To be placed on ``sys.meta_path``. This ensures that the modules
specified cannot be imported, even if they are a builtin.
"""
def __init__(self, *namestoblock):
self.namestoblock = namestoblock
def find_module(self, fullname, path=None):
if fullname in self.namestoblock:
return self
return None
def load_module(self, fullname):
raise ImportError('import of {0} is blocked'.format(fullname))
class ModuleStash(object):
"""
Stashes away previously imported modules
If we reimport a module the data from coverage is lost, so we reuse the old
modules
"""
def __init__(self, namespace, modules=sys.modules):
self.namespace = namespace
self.modules = modules
self._data = {}
def stash(self):
self._data[self.namespace] = self.modules.pop(self.namespace, None)
for module in list(self.modules.keys()):
if module.startswith(self.namespace + '.'):
self._data[module] = self.modules.pop(module)
def pop(self):
self.modules.pop(self.namespace, None)
for module in list(self.modules.keys()):
if module.startswith(self.namespace + '.'):
self.modules.pop(module)
self.modules.update(self._data)
ssl_blocker = ImportBlocker('ssl', '_ssl')
module_stash = ModuleStash('urllib3')
class TestWithoutSSL(unittest.TestCase):
def setUp(self):
sys.modules.pop('ssl', None)
sys.modules.pop('_ssl', None)
module_stash.stash()
sys.meta_path.insert(0, ssl_blocker)
def tearDown(self):
sys.meta_path.remove(ssl_blocker)
module_stash.pop()
class TestImportWithoutSSL(TestWithoutSSL):
def test_cannot_import_ssl(self):
# python26 has neither contextmanagers (for assertRaises) nor
# importlib.
# 'import' inside 'lambda' is invalid syntax.
def import_ssl():
import ssl
self.assertRaises(ImportError, import_ssl)
def test_import_urllib3(self):
import urllib3
| 26.322222 | 79 | 0.653018 | [
"Apache-2.0"
] | aexleader/aliyun-tablestore-tutorials | search_on_tablestore_and_elasticsearch/web/flask/ots/python/pymodules/urllib3-1.11/test/test_no_ssl.py | 2,369 | Python |
# -*- coding: utf-8 -*-
import os
import io
import json
import shutil
import six
import zipfile
from .. import base
from girder.constants import AccessType
from girder.models.assetstore import Assetstore
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.token import Token
from girder.models.user import User
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class ItemTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create a set of users so we can have some folders.
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%[email protected]' % num)
for num in [0, 1]]
folders = Folder().childFolders(self.users[0], 'user', user=self.users[0])
for folder in folders:
if folder['name'] == 'Public':
self.publicFolder = folder
else:
self.privateFolder = folder
self.assetstore = Assetstore().getCurrent()
root = self.assetstore['root']
# Clean out the test assetstore on disk
shutil.rmtree(root)
# First clean out the temp directory
tmpdir = os.path.join(root, 'temp')
if os.path.isdir(tmpdir):
for tempname in os.listdir(tmpdir):
os.remove(os.path.join(tmpdir, tempname))
def _createItem(self, parentId, name, description, user):
params = {
'name': name,
'description': description,
'folderId': parentId
}
resp = self.request(path='/item', method='POST', params=params,
user=user)
self.assertStatusOk(resp)
assert 'meta' in resp.json
return resp.json
def _testUploadFileToItem(self, item, name, user, contents):
"""
Uploads a non-empty file to the server.
"""
# Initialize the upload
resp = self.request(
path='/file', method='POST', user=user, params={
'parentType': 'item',
'parentId': item['_id'],
'name': name,
'size': len(contents)
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
# Send the first chunk
resp = self.request(
path='/file/chunk', method='POST', body=contents, user=user, params={
'uploadId': uploadId
}, type='application/octet-stream')
self.assertStatusOk(resp)
def _testDownloadSingleFileItem(self, item, user, contents):
"""
Downloads a single-file item from the server
:param item: The item to download.
:type item: dict
:param contents: The expected contents.
:type contents: str
"""
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'],
'attachment; filename="file_1"')
# Test downloading the item with contentDisposition=inline.
params = {'contentDisposition': 'inline'}
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params=params)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'],
'inline; filename="file_1"')
# Test downloading with an offset
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params={'offset': 1})
self.assertStatus(resp, 206)
self.assertEqual(contents[1:], self.getBody(resp))
def _testDownloadMultiFileItem(self, item, user, contents, format=None):
params = None
if format:
params = {'format': format}
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params=params)
self.assertStatusOk(resp)
zipFile = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)),
'r')
prefix = os.path.split(zipFile.namelist()[0])[0]
expectedZip = {}
for name in contents:
expectedZip[os.path.join(prefix, name)] = contents[name]
self.assertHasKeys(expectedZip, zipFile.namelist())
self.assertHasKeys(zipFile.namelist(), expectedZip)
for name in zipFile.namelist():
expected = expectedZip[name]
if not isinstance(expected, six.binary_type):
expected = expected.encode('utf8')
self.assertEqual(expected, zipFile.read(name))
def testLegacyItems(self):
folder = Folder().createFolder(
parent=self.users[0], parentType='user', creator=self.users[0],
name='New Folder')
item = Item().createItem(
name='LegacyItem',
creator=self.users[0],
folder=folder)
del item['meta']
item = Item().save(item)
assert 'meta' not in item
item = Item().load(item['_id'], user=self.users[0])
assert 'meta' in item
def testItemDownloadAndChildren(self):
curItem = self._createItem(self.publicFolder['_id'],
'test_for_download', 'fake description',
self.users[0])
self._testUploadFileToItem(curItem, 'file_1', self.users[0], 'foobar')
self._testDownloadSingleFileItem(curItem, self.users[0], 'foobar')
self._testDownloadMultiFileItem(curItem, self.users[0],
{'file_1': 'foobar'}, format='zip')
self._testUploadFileToItem(curItem, 'file_2', self.users[0], 'foobz')
resp = self.request(path='/item/%s/files' % curItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['name'], 'file_1')
self.assertEqual(resp.json[1]['name'], 'file_2')
self.assertEqual(resp.json[0]['size'], 6)
self.assertEqual(resp.json[1]['size'], 5)
self._testDownloadMultiFileItem(curItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz'})
def testItemCrud(self):
"""
Test Create, Read, Update, and Delete of items.
"""
self.ensureRequiredParams(
path='/item', method='POST', required=('folderId',),
user=self.users[1])
# Attempt to create an item without write permission, should fail
params = {
'name': ' ',
'description': ' a description ',
'folderId': self.publicFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[1])
self.assertStatus(resp, 403)
# Shouldn't be allowed to have an empty name
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertValidationError(resp, 'name')
# Actually create the item in user 0's private folder
params['name'] = ' my item name'
params['folderId'] = self.privateFolder['_id']
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['name'], params['name'].strip())
self.assertEqual(item['description'], params['description'].strip())
# User 1 should not be able to see the item via find by folderId
params = {
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='GET', user=self.users[1],
params=params)
self.assertStatus(resp, 403)
# Or by just requesting the item itself by ID
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[1])
self.assertStatus(resp, 403)
# User 0 should be able to see the item
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_id'], item['_id'])
self.assertEqual(resp.json['_modelType'], 'item')
# Also from the children call
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
# Test finding the item using a text string with and without a folderId
params['text'] = 'my item name'
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
del params['folderId']
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
# A limit should work
params['limit'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
# An offset should give us nothing
params['offset'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
# Finding should fail with no parameters
resp = self.request(path='/item', method='GET', user=self.users[0],
params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
# Test update of the item
params = {
'name': 'changed name',
'description': 'new description'
}
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
params=params, user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], params['name'])
self.assertEqual(resp.json['description'], params['description'])
# Test moving an item to the public folder
item = Item().load(item['_id'], force=True)
self.assertFalse(Item().hasAccess(item))
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=self.users[0], params={
'folderId': self.publicFolder['_id']})
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
self.assertTrue(Item().hasAccess(item))
# Move should fail if we don't have write permission on the
# destination folder
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=self.users[1], params={
'folderId': self.privateFolder['_id']})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith(
'Write access denied for folder'))
# Try to update/PUT without an id
resp = self.request(path='/item/', method='PUT',
params=params, user=self.users[0])
self.assertStatus(resp, 400)
# Try a bad endpoint (should 400)
resp = self.request(path='/item/%s/blurgh' % item['_id'],
method='GET',
user=self.users[1])
self.assertStatus(resp, 400)
# Try delete with no ID (should 400)
resp = self.request(path='/item/', method='DELETE', user=self.users[1])
self.assertStatus(resp, 400)
# User 1 should not be able to delete the item with read access
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.READ, save=True)
resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE',
user=self.users[1])
self.assertStatus(resp, 403)
# User 1 should be able to delete the item with write access
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE',
user=self.users[1])
self.assertStatusOk(resp)
# Verify that the item is deleted
item = Item().load(item['_id'])
self.assertEqual(item, None)
def testItemMetadataDirect(self):
params = {
'name': 'item with metadata via POST',
'description': ' a description ',
'folderId': self.privateFolder['_id'],
'metadata': 'not JSON'
}
resp = self.request(
path='/item', method='POST', params=params, user=self.users[0])
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Parameter metadata must be valid JSON.')
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
params['metadata'] = json.dumps(metadata)
resp = self.request(
path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
metadata = {
'foo': None,
'test': 3,
'bar': 'baz'
}
resp = self.request(
path='/item/{_id}'.format(**item), method='PUT',
user=self.users[0], params={'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
item = resp.json
self.assertNotHasKeys(item['meta'], ['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
self.assertEqual(item['meta']['bar'], metadata['bar'])
def testItemMetadataCrud(self):
"""
Test CRUD of metadata.
"""
# Create an item
params = {
'name': 'item with metadata',
'description': ' a description ',
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
# Try to delete metadata from an item that doesn't have any set on it
# yet.
resp = self.request(path='/item/%s/metadata' % (item['_id']),
method='DELETE', user=self.users[0],
body=json.dumps(['foobar']), type='application/json')
item = resp.json
self.assertStatusOk(resp)
self.assertEqual(item['meta'], {})
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
# Test invalid JSON constants
body = '{"key": {"foo": Infinity}}'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=body, type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Error: "Infinity" is not valid JSON.')
# Edit and remove metadata
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(item['meta'], ['test'])
# Test insertion of null values
metadata['nullVal'] = None
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), params={'allowNull': True},
type='application/json')
item = resp.json
self.assertEqual(item['meta']['nullVal'], None)
# Adding an unrelated key should not affect existing keys
del metadata['nullVal']
metadata['other'] = 'macguffin'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['other'], metadata['other'])
self.assertEqual(item['meta']['nullVal'], None)
# Test metadata deletion
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['other']), type='application/json')
item = resp.json
self.assertNotHasKeys(item['meta'], ['other'])
# Error when deletion field names contain a period.
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['foo', 'foo.bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Error when deletion field names begin with a dollar-sign.
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['foo', '$bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key $bar: keys must not start with the "$" character.')
# Make sure metadata cannot be added with invalid JSON
metadata = {
'test': 'allowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata).replace('"', "'"),
type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'Invalid JSON passed in request body.')
# Make sure metadata cannot be added if there is a period in the key
# name
metadata = {
'foo.bar': 'notallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Make sure metadata cannot be added if the key begins with a
# dollar sign
metadata = {
'$foobar': 'alsonotallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid key $foobar: keys must not start with the "$" character.')
# Make sure metadata cannot be added with a blank key
metadata = {
'': 'stillnotallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Key names must not be empty.')
def testItemFiltering(self):
"""
Test filtering private metadata from items.
"""
# Create an item
params = {
'name': 'item with metadata',
'description': ' a description ',
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
# get the item object from the database
item = Item().load(resp.json['_id'], force=True)
# set a private property
item['private'] = 'very secret metadata'
item = Item().save(item)
# get the item from the rest api
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[0])
self.assertStatusOk(resp)
# assert that the private data is not included
self.assertNotHasKeys(resp.json, ['private'])
def testPathToRoot(self):
firstChildName = 'firstChild'
firstChildDesc = 'firstDesc'
secondChildName = 'secondChild'
secondChildDesc = 'secondDesc'
firstChild = Folder().createFolder(
self.publicFolder, firstChildName, firstChildDesc, creator=self.users[0])
secondChild = Folder().createFolder(
firstChild, secondChildName, secondChildDesc, creator=self.users[0])
baseItem = Item().createItem('blah', self.users[0], secondChild, 'foo')
resp = self.request(path='/item/%s/rootpath' % baseItem['_id'], method='GET')
self.assertStatusOk(resp)
pathToRoot = resp.json
self.assertEqual(pathToRoot[0]['type'], 'user')
self.assertEqual(pathToRoot[0]['object']['login'],
self.users[0]['login'])
self.assertEqual(pathToRoot[1]['type'], 'folder')
self.assertEqual(pathToRoot[1]['object']['name'],
self.publicFolder['name'])
self.assertEqual(pathToRoot[2]['type'], 'folder')
self.assertEqual(pathToRoot[2]['object']['name'], firstChild['name'])
self.assertEqual(pathToRoot[3]['type'], 'folder')
self.assertEqual(pathToRoot[3]['object']['name'], secondChild['name'])
def testLazyFieldComputation(self):
"""
Demonstrate that an item that is saved in the database without
derived fields (like lowerName or baseParentId) get those values
computed at load() time.
"""
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
# Force the item to be saved without lowerName and baseParentType fields
del item['lowerName']
del item['baseParentType']
item = Item().save(item, validate=False)
item = Item().find({'_id': item['_id']})[0]
self.assertNotHasKeys(item, ('lowerName', 'baseParentType'))
# Now ensure that calling load() actually populates those fields and
# saves the results persistently
Item().load(item['_id'], force=True)
item = Item().find({'_id': item['_id']})[0]
self.assertHasKeys(item, ('lowerName', 'baseParentType'))
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentType'], 'user')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
# Also test that this works for a duplicate item, such that the
# automatically renamed item still has the correct lowerName, and a
# None description is changed to an empty string.
item = Item().createItem(
'My Item Name', creator=self.users[0], folder=self.publicFolder, description=None)
# test if non-strings are coerced
self.assertEqual(item['description'], '')
item['description'] = 1
item = Item().save(item)
item = Item().findOne({'_id': item['_id']})
self.assertEqual(item['description'], '1')
# test if just missing lowerName is corrected.
self.assertEqual(item['lowerName'], 'my item name (1)')
del item['lowerName']
item = Item().save(item, validate=False)
item = Item().findOne({'_id': item['_id']})
self.assertNotHasKeys(item, ('lowerName', ))
Item().load(item['_id'], force=True)
item = Item().findOne({'_id': item['_id']})
self.assertHasKeys(item, ('lowerName', ))
self.assertEqual(item['lowerName'], 'my item name (1)')
def testParentsToRoot(self):
"""
Demonstrate that forcing parentsToRoot will cause it to skip the
filtering process.
"""
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
parents = Item().parentsToRoot(item, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Item().parentsToRoot(item)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
def testItemCopy(self):
origItem = self._createItem(self.publicFolder['_id'],
'test_for_copy', 'fake description',
self.users[0])
# Add metadata and files, since we want to make sure those get copied
metadata = {
'foo': 'value1',
'test': 2
}
resp = self.request(
path='/item/%s/metadata' % origItem['_id'], method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatusOk(resp)
self._testUploadFileToItem(origItem, 'file_1', self.users[0], 'foobar')
self._testUploadFileToItem(origItem, 'file_2', self.users[0], 'foobz')
# Also upload a link
params = {
'parentType': 'item',
'parentId': origItem['_id'],
'name': 'link_file',
'linkUrl': 'http://www.google.com'
}
resp = self.request(path='/file', method='POST', user=self.users[0],
params=params)
self.assertStatusOk(resp)
# Copy to a new item. It will be in the same folder, but we want a
# different name.
params = {
'name': 'copied_item'
}
resp = self.request(path='/item/%s/copy' % origItem['_id'],
method='POST', user=self.users[0], params=params)
self.assertStatusOk(resp)
# Make sure size was returned correctly
self.assertEqual(resp.json['size'], 11)
# Now ask for the new item explicitly and check its metadata
self.request(path='/item/%s' % resp.json['_id'],
user=self.users[0], type='application/json')
self.assertStatusOk(resp)
newItem = resp.json
self.assertEqual(newItem['name'], 'copied_item')
self.assertEqual(newItem['meta']['foo'], metadata['foo'])
self.assertEqual(newItem['meta']['test'], metadata['test'])
# Check if we can download the files from the new item
resp = self.request(path='/item/%s/files' % newItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
newFiles = resp.json
self.assertEqual(newFiles[0]['name'], 'file_1')
self.assertEqual(newFiles[1]['name'], 'file_2')
self.assertEqual(newFiles[2]['name'], 'link_file')
self.assertEqual(newFiles[0]['size'], 6)
self.assertEqual(newFiles[1]['size'], 5)
self._testDownloadMultiFileItem(newItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz',
'link_file': 'http://www.google.com'})
# Check to make sure the original item is still present
resp = self.request(path='/item', method='GET', user=self.users[0],
params={'folderId': self.publicFolder['_id'],
'text': 'test_for_copy'})
self.assertStatusOk(resp)
self.assertEqual(origItem['_id'], resp.json[0]['_id'])
# Check to make sure the new item is still present
resp = self.request(path='/item', method='GET', user=self.users[0],
params={'folderId': self.publicFolder['_id'],
'text': 'copied_item'})
self.assertStatusOk(resp)
self.assertEqual(newItem['_id'], resp.json[0]['_id'])
# Check that the provenance tag correctly points back
# to the original item
self.assertEqual(newItem['copyOfItem'], origItem['_id'])
# Check if we can download the files from the old item and that they
# are distinct from the files in the original item
resp = self.request(path='/item/%s/files' % origItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
origFiles = resp.json
self._testDownloadMultiFileItem(origItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz',
'link_file': 'http://www.google.com'})
for index, file in enumerate(origFiles):
self.assertNotEqual(origFiles[index]['_id'],
newFiles[index]['_id'])
def testCookieAuth(self):
"""
We make sure a cookie is sufficient for authentication for the item
download endpoint. Also, while we're at it, we make sure it's not
sufficient for other endpoints.
"""
item = self._createItem(self.privateFolder['_id'],
'cookie_auth_download', '', self.users[0])
self._testUploadFileToItem(item, 'file', self.users[0], 'foo')
token = Token().createToken(self.users[0])
cookie = 'girderToken=%s' % token['_id']
# We should be able to download a private item using a cookie token
resp = self.request(path='/item/%s/download' % item['_id'],
isJson=False, cookie=cookie)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'foo')
# We should not be able to call GET /item/:id with a cookie token
resp = self.request(path='/item/%s' % item['_id'], cookie=cookie)
self.assertStatus(resp, 401)
# Make sure the cookie has to be a valid token
resp = self.request(path='/item/%s/download' % item['_id'],
cookie='girderToken=invalid_token')
self.assertStatus(resp, 401)
def testReuseExisting(self):
item1 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder)
item2 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder)
item3 = Item().createItem(
'to be reused', creator=self.users[0], folder=self.publicFolder, reuseExisting=True)
self.assertNotEqual(item1['_id'], item2['_id'])
self.assertEqual(item1['_id'], item3['_id'])
self.assertEqual(item2['name'], 'to be reused (1)')
self.assertEqual(item3['name'], 'to be reused')
def testUpdateDuplicatedName(self):
item1 = Item().createItem('foo', creator=self.users[0], folder=self.publicFolder)
item2 = Item().createItem('bar', creator=self.users[0], folder=self.publicFolder)
item2['name'] = 'foo'
Item().save(item2, validate=False)
self.assertEqual(item2['name'], 'foo')
item1['size'] = 3
Item().save(item1)
self.assertEqual(item1['name'], 'foo')
| 42.053232 | 98 | 0.555787 | [
"Apache-2.0"
] | RemiCecchinato/girder | tests/cases/item_test.py | 33,180 | Python |
## -------------------------------------------------------- ##
# Trab 2 IA 2019-2
#
# Rafael Belmock Pedruzzi
#
# probOneR.py: implementation of the probabilistic OneR classifier.
#
# Python version: 3.7.4
## -------------------------------------------------------- ##
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics import confusion_matrix
from itertools import product, zip_longest, accumulate
from random import random
class Prob_OneR(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
# check that x and y have correct shape
X, y = check_X_y(X,y)
# store the classes seen during fit
self.classes_ = unique_labels(y)
self.y_ = y
kbd = KBinsDiscretizer(n_bins = len(np.unique(y)), encode='ordinal')
X = kbd.fit_transform(X)
self.X_ = X
self.kbd_ = kbd
cm_list = []
hits = []
for i in X.T:
cm = contingency_matrix(i, y)
cm_list.append(cm)
hits.append(sum(max(k) for k in cm))
rule = np.argmax(hits) # chosen rule
self.r_ = rule
rule_cm = cm_list[rule]
class_selector = []
for i, c in enumerate(rule_cm):
cSum = sum(c)
probRatio = [ (i/cSum) for i in c]
# Building the "partitions" of the roulette:
probRatio = list(accumulate(probRatio))
class_selector.append(probRatio)
self.class_selector = class_selector
# Return the classifier
return self
def predict(self, X):
# Check is fit had been called
check_is_fitted(self, ['X_', 'y_'])
# Input validation
X = check_array(X)
X = self.kbd_.transform(X)
y = []
for i in X[:,self.r_]:
probRatio = self.class_selector[int(i)]
# Selecting a random element:
selector = random()
for i in range(len(probRatio)):
if selector <= probRatio[i]:
y.append(self.classes_[i])
break
return y
# from sklearn import datasets
# from sklearn.model_selection import train_test_split, cross_val_score
# from sklearn.metrics import f1_score
# nn= Prob_OneR()
# iris = datasets.load_iris()
# x_train,x_test,y_train,y_test = train_test_split(iris.data,iris.target,test_size = 0.4, random_state = 0)
# nn.fit(x_train, y_train)
# y_pred = nn.predict(x_test)
# print(y_test)
# print(y_pred)
# score = cross_val_score(nn, x_train, y_train, cv = 5)
# print(score)
| 30.521277 | 107 | 0.606832 | [
"MIT"
] | RafaelPedruzzi/IA-2019-1 | trab2/probOneR.py | 2,869 | Python |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from hcat import hcat
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
class HCatClient(Script):
def install(self, env):
import params
self.install_packages(env, exclude_packages=params.hive_exclude_packages)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
hcat()
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HCatClientWindows(HCatClient):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HCatClientDefault(HCatClient):
def get_stack_to_component(self):
return {"HDP": "hadoop-client"}
if __name__ == "__main__":
HCatClient().execute()
| 28.089286 | 77 | 0.779402 | [
"Apache-2.0"
] | nexr/ambari | ambari-server/src/main/resources/common-services/HIVE/0.12.0.2.0/package/scripts/hcat_client.py | 1,573 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from propeller.service.client import InferenceClient
from propeller import log
import six
from tmp import util
from time import time
import numpy as np
class ErnieClient(InferenceClient):
def __init__(self,
vocab_file,
host='localhost',
port=8888,
batch_size=32,
num_coroutine=1,
timeout=10.,
max_seqlen=128):
host_port = 'tcp://%s:%d' % (host, port)
client = super(ErnieClient, self).__init__(host_port, batch_size=batch_size, num_coroutine=num_coroutine, timeout=timeout)
self.vocab = {j.strip().split(b'\t')[0].decode('utf8'): i for i, j in enumerate(open(vocab_file, 'rb'))}
self.tokenizer = util.data.CharTokenizer(self.vocab.keys())
self.max_seqlen = max_seqlen
self.cls_id = self.vocab['[CLS]']
self.sep_id = self.vocab['[SEP]']
def txt_2_id(self, text):
ids = np.array([self.vocab[i] for i in self.tokenizer(text)])
return ids
def pad_and_batch(self, ids):
max_len = max(map(len, ids))
padded = np.stack([np.pad(i, [[0, max_len - len(i)]], mode='constant')for i in ids])
padded = np.expand_dims(padded, axis=-1)
return padded
def __call__(self, text_a, text_b=None):
if text_b is not None and len(text_a) != len(text_b):
raise ValueError('text_b %d has different size than text_a %d' % (text_b, text_a))
text_a = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_a]
if text_b is not None:
text_b = [i.encode('utf8') if isinstance(i, six.string_types) else i for i in text_b]
ids_a = map(self.txt_2_id, text_a)
if text_b is not None:
ids_b = map(self.txt_2_id, text_b)
ret = [util.data.build_2_pair(a, b, self.max_seqlen, self.cls_id, self.sep_id) for a, b in zip(ids_a, ids_b)]
else:
ret = [util.data.build_1_pair(a, self.max_seqlen, self.cls_id, self.sep_id) for a in ids_a]
sen_ids, token_type_ids = zip(*ret)
sen_ids = self.pad_and_batch(sen_ids)
token_type_ids = self.pad_and_batch(token_type_ids)
ret, = super(ErnieClient, self).__call__(sen_ids, token_type_ids)
return ret
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ernie_encoder_client')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('-i', '--input', type=str, required=True)
parser.add_argument('-o', '--output', type=str, required=True)
parser.add_argument('-p', '--port', type=int, default=8888)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--num_coroutine', type=int, default=1)
parser.add_argument('--vocab', type=str, required=True)
args = parser.parse_args()
client = ErnieClient(args.vocab, args.host, args.port, batch_size=args.batch_size, num_coroutine=args.num_coroutine)
inputs = [i.strip().split(b'\t') for i in open(args.input, 'rb').readlines()]
if len(inputs) == 0:
raise ValueError('empty input')
send_batch = args.num_coroutine * args.batch_size
send_num = len(inputs) // send_batch + 1
rets = []
start = time()
for i in range(send_num):
slice = inputs[i * send_batch: (i + 1) * send_batch]
if len(slice) == 0:
continue
columns = list(zip(*slice))
if len(columns) > 2:
raise ValueError('inputs file has more than 2 columns')
ret = client(*columns)
if len(ret.shape) == 3:
ret = ret[:, 0, :] # take cls
rets.append(ret)
end = time()
with open(args.output, 'wb') as outf:
arr = np.concatenate(rets, 0)
np.save(outf, arr)
log.info('query num: %d average latency %.5f' % (len(inputs), (end - start)/len(inputs)))
| 42.009009 | 130 | 0.650011 | [
"MIT"
] | lerry-lee/similarity-model | ernie/classification/service/client.py | 4,663 | Python |
"""
Functions computing the signal shapes
"""
import numpy as np
from time import time
import src.constants as const
def subtract_signal(t, signal, fit_params=3):
"""
Returns the subtracted signal
"""
# fit dphi(t) to polynomials and subtract the contribution from n=0, 1 and 2
coef = np.polynomial.polynomial.polyfit(t, signal, fit_params - 1) # (3)
delta_signal = np.einsum(
"n,nj->j", coef, np.asarray([np.power(t, n) for n in range(fit_params)])
) # (Nt)
# compute the subtracted signal
ht = signal - delta_signal # (Nt), unit = s
return ht
def dphi_dop_chunked(
t,
profile,
r0_vec,
v_vec,
d_hat,
use_form=False,
use_chunk=False,
chunk_size=10000,
verbose=False,
form_fun=None,
interp_table=None,
time_end=np.inf,
):
"""
Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to
store in memory
"""
num_objects = len(list(profile.items())[0][1]) # number of elements of 1st dict entry
dphi = np.zeros(len(t))
if use_chunk == True:
if num_objects % chunk_size == 0:
num_chunks = num_objects // chunk_size
else:
num_chunks = num_objects // chunk_size + 1
if verbose:
print(" Chunking data (%d chunks) ... "%num_chunks)
print()
for i in range(num_chunks):
if time() > time_end: raise TimeoutError
r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]
v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]
dphi += dphi_dop(
t, profile_c, r0_c, v_c, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table
)
else:
dphi += dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi
def dphi_dop_chunked_vec(
t,
profile,
r0_vec,
v_vec,
use_form=False,
use_chunk=False,
chunk_size=10000,
verbose=False,
form_fun=None,
interp_table=None,
time_end=np.inf,
):
"""
Compute dphi but in chunks over the subhalos, use when Nt x N is too large an array to
store in memory
"""
num_objects = len(list(profile.items())[0][1]) # number of elements of 1st dict entry
dphi_vec = np.zeros((len(t), 3))
if use_chunk == True:
if verbose:
print(" Chunking data ... ")
print()
if num_objects % chunk_size == 0:
num_chunks = num_objects // chunk_size
else:
num_chunks = num_objects // chunk_size + 1
for i in range(num_chunks):
if time() > time_end: raise TimeoutError
r0_c = r0_vec[i * chunk_size : (i + 1) * chunk_size]
v_c = v_vec[i * chunk_size : (i + 1) * chunk_size]
profile_c = {}
for key in list(profile):
profile_c[key] = profile[key][i * chunk_size : (i + 1) * chunk_size]
dphi_vec += dphi_dop_vec(
t, profile_c, r0_c, v_c, use_form=use_form, form_fun=form_fun, interp_table=interp_table
)
else:
dphi_vec += dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=use_form, form_fun=form_fun, interp_table=interp_table)
return dphi_vec
def dphi_dop_vec(t, profile, r0_vec, v_vec, use_form=False, form_fun=None,
interp_table=None):
"""
Returns the vector phase shift due to the Doppler delay for subhalos of mass, mass.
Dot with d_hat to get dphi_I
TODO: add use_closest option
"""
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec)
t0 = -r0_v / np.square(v_mag) # year
b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3)
b_mag = np.linalg.norm(b_vec, axis=1) # (N)
tau = b_mag / v_mag
b_hat = b_vec / b_mag[:, np.newaxis] # (N, 3)
v_hat = v_vec / v_mag[:, np.newaxis]
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0) # (Nt, N)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)
bd_term *= prefactor * form_func
vd_term *= prefactor * form_func
else:
bd_term = prefactor * bd_term
vd_term = prefactor * vd_term
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
bd_term *= prefactor * form_func
vd_term *= prefactor * form_func
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
bd_term *= prefactor * profile['rhos'] * profile['rs']**3
vd_term *= prefactor * profile['rhos'] * profile['rs']**3
# sum the signal over all the events
sig = np.einsum("to, oi -> ti", bd_term, b_hat) - np.einsum(
"to, oi -> ti", vd_term, v_hat
)
return sig
def dphi_dop(t, profile, r0_vec, v_vec, d_hat, use_form=False, form_fun=None,
interp_table=None):
"""
Returns the phase shift due to the Doppler delay for subhalos of mass, mass
TODO: add use_closest option
"""
v_mag = np.linalg.norm(v_vec, axis=1)
r0_v = np.einsum("ij, ij -> i", r0_vec, v_vec) # kpc^2/yr
t0 = -r0_v / np.square(v_mag) # year
b_vec = r0_vec + v_vec * t0[:, np.newaxis] # (N, 3), kpc
b_mag = np.linalg.norm(b_vec, axis=1) # (N)
tau = b_mag / v_mag # year
b_hat = b_vec / b_mag[:, np.newaxis]
v_hat = v_vec / v_mag[:, np.newaxis]
b_d = np.dot(b_hat, d_hat)
v_d = np.dot(v_hat, d_hat)
x = np.subtract.outer(t, t0) / tau
x0 = -t0 / tau
prefactor = (
const.yr_to_s
* const.GN
/ (const.km_s_to_kpc_yr * const.c_light * np.square(v_mag))
)
if interp_table is None:
bd_term = (np.sqrt(1 + x ** 2) + x) - (np.sqrt(1 + x0 ** 2) + x0)
vd_term = np.arcsinh(x) - np.arcsinh(x0)
sig = bd_term * b_d - vd_term * v_d
if 'M' in list(profile):
prefactor *= profile['M']
if use_form:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
rv = ((3 * profile['M'] / (4 * np.pi)) * (1 / 200) * (1 / const.rho_crit)) ** (1 / 3)
form_func = np.where(r_cl<rv, form(r_cl / rv, profile['c']), 1) # (N)
sig = form_func * sig
else:
if form_fun is not None:
t_cl = np.maximum(np.minimum(t0, t[-1]), 0)
x_cl = (t_cl - t0) / tau
r_cl = tau * v_mag * np.sqrt(1 + x_cl ** 2)
form_func = form_fun(r_cl, profile['rs'], profile['rhos'])
sig = form_func * sig
else:
raise ValueError('rho_s, r_s halo description currently requires custom density profile ("USE_FORMTAB")')
else:
y = b_mag / profile['rs']
bd_term0, vd_term0 = interp_table.bd_vd_terms(x0, y)
y.shape = (1,-1)
y = np.broadcast_to(y,x.shape)
bd_term, vd_term = interp_table.bd_vd_terms(x, y)
bd_term -= bd_term0
vd_term -= vd_term0
sig = profile['rhos'] * profile['rs']**3 * (bd_term * b_d + vd_term * v_d)
sig = prefactor * sig
# sum the signal over all the events
return np.sum(sig, axis=-1)
def form(s, c):
return (np.log(1 + c * s) - c * s / (1 + c * s)) / (np.log(1 + c) - c / (1 + c))
| 27.522523 | 124 | 0.529078 | [
"MIT"
] | delos/dm-pta-mc | src/signals.py | 9,165 | Python |
"""Archive Tests
Copyright 2015 Archive Analytics Solutions - University of Liverpool
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.test import TestCase
# Create your tests here.
| 32.047619 | 72 | 0.793462 | [
"Apache-2.0"
] | pericles-project/ERMR | indigo-web/archive/tests.py | 673 | Python |
from pathlib import Path
from .utils import hrule, fmt
class SourceFile:
def __init__(self, name='', contents=[], includes=[]):
self.contents = []
self.includes = []
self.name = name
self.add_contents(contents)
self.add_includes(includes)
def add_includes(self, includes=[]):
if includes:
if isinstance(includes, str):
self.includes.append(f'#include {includes}')
elif isinstance(includes, list):
[self.includes.append(f'#include {i}') for i in includes]
self.includes.append('')
def add_contents(self, contents=[]):
if isinstance(contents, str):
self.contents.append(contents)
return
for c in contents:
if isinstance(c, str):
self.contents.append(c)
else:
for c2 in c:
if isinstance(c2, str):
self.contents.append(c2)
self.contents.append('\n')
def erase_contents(self):
self.contents = []
def assemble(self):
text = []
if self.includes:
text.extend(self.includes)
text.extend([
hrule(),
'',
])
text.extend(self.contents)
return fmt(text)
def write(self):
with open(self.name, 'w') as f:
f.write(self.assemble()) | 25.821429 | 73 | 0.514523 | [
"MIT"
] | DaelonSuzuka/Easy-XC8 | cogscripts/codegen/source.py | 1,446 | Python |
from ... import exc
from ... import util
from ...sql.base import _exclusive_against
from ...sql.base import _generative
from ...sql.base import ColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.expression import alias
from ...util.langhelpers import public_factory
__all__ = ("Insert", "insert")
class Insert(StandardInsert):
"""MySQL-specific implementation of INSERT.
Adds methods for MySQL-specific syntaxes such as ON DUPLICATE KEY UPDATE.
The :class:`~.mysql.Insert` object is created using the
:func:`sqlalchemy.dialects.mysql.insert` function.
.. versionadded:: 1.2
"""
stringify_dialect = "mysql"
inherit_cache = False
@property
def inserted(self):
"""Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE statement
MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row
that would be inserted, via a special function called ``VALUES()``.
This attribute provides all columns in this row to be referenceable
such that they will render within a ``VALUES()`` function inside the
ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``
so as not to conflict with the existing
:meth:`_expression.Insert.values` method.
.. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance
of :class:`_expression.ColumnCollection`, which provides an
interface the same as that of the :attr:`_schema.Table.c`
collection described at :ref:`metadata_tables_and_columns`.
With this collection, ordinary names are accessible like attributes
(e.g. ``stmt.inserted.some_column``), but special names and
dictionary method names should be accessed using indexed access,
such as ``stmt.inserted["column name"]`` or
``stmt.inserted["values"]``. See the docstring for
:class:`_expression.ColumnCollection` for further examples.
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update` - example of how
to use :attr:`_expression.Insert.inserted`
"""
return self.inserted_alias.columns
@util.memoized_property
def inserted_alias(self):
return alias(self.table, name="inserted")
@_generative
@_exclusive_against(
"_post_values_clause",
msgs={
"_post_values_clause": "This Insert construct already "
"has an ON DUPLICATE KEY clause present"
},
)
def on_duplicate_key_update(self, *args, **kw):
r"""
Specifies the ON DUPLICATE KEY UPDATE clause.
:param \**kw: Column keys linked to UPDATE values. The
values may be any SQL expression or supported literal Python
values.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON DUPLICATE KEY UPDATE
style of UPDATE, unless values are manually specified here.
:param \*args: As an alternative to passing key/value parameters,
a dictionary or list of 2-tuples can be passed as a single positional
argument.
Passing a single dictionary is equivalent to the keyword argument
form::
insert().on_duplicate_key_update({"name": "some name"})
Passing a list of 2-tuples indicates that the parameter assignments
in the UPDATE clause should be ordered as sent, in a manner similar
to that described for the :class:`_expression.Update`
construct overall
in :ref:`updates_order_parameters`::
insert().on_duplicate_key_update(
[("name", "some name"), ("value", "some value")])
.. versionchanged:: 1.3 parameters can be specified as a dictionary
or list of 2-tuples; the latter form provides for parameter
ordering.
.. versionadded:: 1.2
.. seealso::
:ref:`mysql_insert_on_duplicate_key_update`
"""
if args and kw:
raise exc.ArgumentError(
"Can't pass kwargs and positional arguments simultaneously"
)
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary or list of tuples "
"is accepted positionally."
)
values = args[0]
else:
values = kw
inserted_alias = getattr(self, "inserted_alias", None)
self._post_values_clause = OnDuplicateClause(inserted_alias, values)
insert = public_factory(
Insert, ".dialects.mysql.insert", ".dialects.mysql.Insert"
)
class OnDuplicateClause(ClauseElement):
__visit_name__ = "on_duplicate_key_update"
_parameter_ordering = None
stringify_dialect = "mysql"
def __init__(self, inserted_alias, update):
self.inserted_alias = inserted_alias
# auto-detect that parameters should be ordered. This is copied from
# Update._proces_colparams(), however we don't look for a special flag
# in this case since we are not disambiguating from other use cases as
# we are in Update.values().
if isinstance(update, list) and (
update and isinstance(update[0], tuple)
):
self._parameter_ordering = [key for key, value in update]
update = dict(update)
if isinstance(update, dict):
if not update:
raise ValueError(
"update parameter dictionary must not be empty"
)
elif isinstance(update, ColumnCollection):
update = dict(update)
else:
raise ValueError(
"update parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update = update
| 35.474286 | 84 | 0.632893 | [
"MIT"
] | Ag-nes/Blog | virtual/lib/python3.8/site-packages/sqlalchemy/dialects/mysql/dml.py | 6,208 | Python |
import json
import unittest
from bitmovin import Bitmovin, Response, TextFilter, Font
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class TextFilterTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_text_filter(self):
sample_filter = self._get_sample_text_filter()
filter_resource_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_text_filters(sample_filter, filter_resource_response.resource)
def test_create_text_filter_without_name(self):
sample_filter = self._get_sample_text_filter()
sample_filter.name = None
filter_resource_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_text_filters(sample_filter, filter_resource_response.resource)
def test_retrieve_text_filter(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
retrieved_filter_response = self.bitmovin.filters.Text.retrieve(created_filter_response.resource.id)
self.assertIsNotNone(retrieved_filter_response)
self.assertIsNotNone(retrieved_filter_response.resource)
self._compare_text_filters(created_filter_response.resource, retrieved_filter_response.resource)
def test_delete_text_filter(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
deleted_minimal_resource = self.bitmovin.filters.Text.delete(created_filter_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.filters.Text.retrieve(created_filter_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving filter after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_text_filters(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
filters = self.bitmovin.filters.Text.list()
self.assertIsNotNone(filters)
self.assertIsNotNone(filters.resource)
self.assertIsNotNone(filters.response)
self.assertIsInstance(filters.resource, list)
self.assertIsInstance(filters.response, Response)
self.assertGreater(filters.resource.__sizeof__(), 1)
def test_retrieve_text_filter_custom_data(self):
sample_filter = self._get_sample_text_filter()
sample_filter.customData = '<pre>my custom data</pre>'
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
custom_data_response = self.bitmovin.filters.Text.retrieve_custom_data(
created_filter_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_filter.customData, json.loads(custom_data.customData))
def _compare_text_filters(self, first: TextFilter, second: TextFilter):
"""
:param first: TextFilter
:param second: TextFilter
:return: bool
"""
self.assertEqual(str(first.x), str(second.x))
self.assertEqual(str(first.y), str(second.y))
self.assertEqual(first.text, second.text)
self.assertEqual(first.timecode, second.timecode)
self.assertEqual(first.shadowY, second.shadowX)
self.assertEqual(first.shadowX, second.shadowX)
self.assertEqual(first.shadowColor, second.shadowColor)
self.assertEqual(first.alpha, second.alpha)
self.assertEqual(first.fontSize, second.fontSize)
self.assertEqual(first.font, second.font)
self.assertEqual(first.fontColor, second.fontColor)
self.assertEqual(first.fixBounds, second.fixBounds)
self.assertEqual(first.borderWidth, second.borderWidth)
self.assertEqual(first.lineSpacing, second.lineSpacing)
self.assertEqual(first.boxColor, second.boxColor)
self.assertEqual(first.boxBorderWidth, second.boxBorderWidth)
self.assertEqual(first.box, second.box)
self.assertEqual(first.description, second.description)
self.assertEqual(first.name, second.name)
return True
def _get_sample_text_filter(self):
text_filter = TextFilter(name='Sample Text Filter',
x='10',
y='10',
text='ThisIsATest',
font=Font.DEJAVUSANS)
self.assertIsNotNone(text_filter.x)
self.assertIsNotNone(text_filter.y)
self.assertIsNotNone(text_filter.name)
self.assertIsNotNone(text_filter.font)
return text_filter
if __name__ == '__main__':
unittest.main()
| 44.590909 | 108 | 0.722732 | [
"Unlicense"
] | bitmovin/bitmovin-python | tests/bitmovin/services/filters/text_filter_tests.py | 6,867 | Python |
# -*- coding: utf-8 -*-
# noqa: B950
import logging
from collections import Counter
import tqdm
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.data import build_detection_test_loader
from detectron2.engine import default_argument_parser
from detectron2.modeling import build_model
from detectron2.utils.analysis import (
activation_count_operators,
flop_count_operators,
parameter_count_table,
)
from detectron2.utils.logger import setup_logger
logger = logging.getLogger("detectron2")
def setup(args):
cfg = get_cfg()
cfg.merge_from_file(args.config_file)
cfg.DATALOADER.NUM_WORKERS = 0
cfg.merge_from_list(args.opts)
cfg.freeze()
setup_logger()
return cfg
def do_flop(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
counts += flop_count_operators(model, data)
logger.info(
"(G)Flops for Each Type of Operators:\n" + str([(k, v / idx) for k, v in counts.items()])
)
def do_activation(cfg):
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
model = build_model(cfg)
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
model.eval()
counts = Counter()
for idx, data in zip(tqdm.trange(args.num_inputs), data_loader): # noqa
counts += activation_count_operators(model, data)
logger.info(
"(Million) Activations for Each Type of Operators:\n"
+ str([(k, v / idx) for k, v in counts.items()])
)
def do_parameter(cfg):
model = build_model(cfg)
logger.info("Parameter Count:\n" + parameter_count_table(model, max_depth=5))
def do_structure(cfg):
model = build_model(cfg)
logger.info("Model Structure:\n" + str(model))
if __name__ == "__main__":
parser = default_argument_parser(
epilog="""
Examples:
To show parameters of a model:
$ ./analyze_model.py --tasks parameter \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml
Flops and activations are data-dependent, therefore inputs and model weights
are needed to count them:
$ ./analyze_model.py --num-inputs 100 --tasks flop \\
--config-file ../configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml \\
MODEL.WEIGHTS /path/to/model.pkl
"""
)
parser.add_argument(
"--tasks",
choices=["flop", "activation", "parameter", "structure"],
required=True,
nargs="+",
)
parser.add_argument(
"--num-inputs",
default=100,
type=int,
help="number of inputs used to compute statistics for flops/activations, "
"both are data dependent.",
)
args = parser.parse_args()
assert not args.eval_only
assert args.num_gpus == 1
cfg = setup(args)
for task in args.tasks:
{
"flop": do_flop,
"activation": do_activation,
"parameter": do_parameter,
"structure": do_structure,
}[task](cfg)
| 27.965217 | 97 | 0.679415 | [
"Apache-2.0"
] | ArtificialNotImbecile/detectron2 | tools/analyze_model.py | 3,216 | Python |
def leiaint(msg):
while True:
try:
n = int(input(msg))
except(ValueError, TypeError):
print("\033[31mERRO: Por favor, digite um numero inteiro valido.\033[m")
except KeyboardInterrupt:
print("\n\033[31mO usuario preferiu não digitar esse numero.")
return 0
else:
return n
def leiafloat(msg):
while True:
try:
n = float(input(msg))
except(ValueError, TypeError):
print("\033[31mERRO: Por favor, digite um numero inteiro valido.\033[m")
except KeyboardInterrupt:
print("\n\033[31mO usuario preferiu não digitar esse numero.")
return 0
else:
return n
n1 = leiaint("Digite um numero inteiro: ")
n2 = leiafloat("Digite um numero real: ")
print(f"Você acabou de digitar o numero inteiro {n1} e o numero real {n2}!") | 31.206897 | 84 | 0.583425 | [
"MIT"
] | kesia-barros/exercicios-python | ex001 a ex114/ex113.py | 908 | Python |
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
| 34.142857 | 70 | 0.74477 | [
"MIT"
] | DD-L/deel.boost.python | origin/libs/python/pyste/src/Pyste/__init__.py | 239 | Python |
#!/usr/bin/env python
# Demonstration GET users/search
# See https://dev.twitter.com/rest/reference/get/users/search
from secret import twitter_instance
tw = twitter_instance()
response = tw.users.search(
q='bot',
page=0,
count=20,
include_entities=False)
for i in response:
print('''
{screen_name} | {name}
{location}
{url}
{description}
ツイート数 {statuses_count}
フォロー {friends_count} 人
フォロワー {followers_count} 人
'''.format_map(i))
| 16.321429 | 61 | 0.706783 | [
"MIT"
] | showa-yojyo/note | source/_sample/ptt/users-search.py | 489 | Python |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/01_tabular.pd.ipynb (unless otherwise specified).
__all__ = ['PartDep']
# Cell
from fastai.tabular.all import *
from .core import *
# Cell
from plotnine import *
# Cell
from IPython.display import clear_output
# Cell
class PartDep(Interpret):
"""
Calculate Partial Dependence. Countinious vars are divided into buckets and are analized as well
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold (as min number of occurances)
use_log=True is needed if we have transformed depended variable into log
use_int=True is needed if we want to log-detransformed (exponented) var to me integer not float
is_couninue=True helps with long calculation, it continues the last calculation from the saved file
is_use_cache=True loads last fully calculated result. Can distinct caches that were mede with different
fields and coef
no_precalc=True -- don't calculate PartDep (usefull if you want to use `plot_raw` and `plot_model` only)
"""
def __init__(self, learn, df, model_name: str, fields: list = (), coef: float = 1.0,
is_sorted: bool = True, use_log=False, use_int=False,
cache_path=None, is_use_cache=True, is_continue=False, no_precalc=False):
super().__init__(learn, df)
self.use_log = use_log
self.use_int = use_int
self.coef = coef
self.is_sorted = is_sorted
if (fields is None) or (len(fields) == 0):
self.fields = self._get_all_columns()
else:
self.fields = listify(fields)
self.part_dep_df = None
self.cache_path = ifnone(cache_path, learn.path / 'cache')
self.save_name = f"{model_name}_part_dep"
self.is_use_cache = is_use_cache
self.is_continue = is_continue
self.dep_var = self._get_dep_var()
self.is_biclassification = True if (learn.dls.c == 2) else False
if (no_precalc==False):
self._load_or_calculate()
@classmethod
def what_cached(self, model_name: str, path=None, learn=None):
"""
Shows what keys are cached
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
if not (Path(f"{path / name}.pkl").exists()):
print(f"No chache file")
else:
f = open(path / f"{name}.pkl", "rb")
var = load(f)
f.close()
for k in var.keys():
print(k)
@classmethod
def empty_cache(self, model_name: str, path=None, learn=None):
"""
deletes the cache file
"""
if isNone(path) and isNone(learn):
print("path and learn cannot be None at the same time")
return
elif isNone(path):
path = learn.path
name = f"{model_name}_part_dep"
folder = 'cache'
path = path / folder
files = (Path(f"{path / name}.pkl"), Path(path / 'pd_interm.pkl'))
for file in files:
if not (file.exists()):
print(f"No chache file {file}")
else:
file.unlink()
def _cont_into_buckets(self, df_init, CONT_COLS):
"""
Categorical values can be easily distiguished one from another
But that doesn't work with continious values, we have to divede it's
values into buckets and then use all values in a bucket as a single value
that avarages the bucket. This way we convert cont feture into pseudo categorical
and are able to apply partial dependense analysis to it
"""
fields = self.fields
df = df_init.copy()
if is_in_list(values=fields, in_list=CONT_COLS):
for col in which_elms(values=fields, in_list=CONT_COLS):
edges = np.histogram_bin_edges(a=df[col].dropna(), bins='auto')
for x, y in zip(edges[::], edges[1::]):
df.loc[(df[col] > x) & (df[col] < y), col] = (x + y) / 2
return df
def _get_field_uniq_x_coef(self, df: pd.DataFrame, fields: list, coef: float) -> list:
'''
This function outputs threshold to number of occurrences different variants of list of columns (fields)
In short if coef for ex. is 0.9, then function outputs number of occurrences for all but least 10%
of the least used
If coef is more 1.0, then 'coef' itself is used as threshold
'''
if (coef > 1):
return math.ceil(coef)
coef = 0. if (coef < 0) else coef
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
num = math.ceil(coef * len(occs))
if (num <= 0):
# number of occurances is now = max_occs+1 (so it will be no items with this filter)
return occs.iloc[0]['Times'] + 1
else:
return occs.iloc[num - 1]['Times']
def _get_part_dep_one(self, fields: list, masterbar=None) -> pd.DataFrame:
'''
Function calculate partial dependency for column in fields.
Fields is a list of lists of what columns we want to test. The inner items are treated as connected fields.
For ex. fields = [['Store','StoreType']] mean that Store and StoreType is treated as one entity
(it's values are substitute as a pair, not as separate values)
coef is useful when we don't want to deal with all the variants, but only with most common
'''
NAN_SUBST = '###na###'
cont_vars = self._get_cont_columns()
fields = listify(fields)
coef, is_sorted, use_log, use_int = self.coef, self.is_sorted, self.use_log, self.use_int
dep_name = self._get_dep_var()
df = self._cont_into_buckets(df_init=self.df, CONT_COLS=cont_vars)
# here we prepare data to eliminate pairs that occure too little
# and make NaN a separate value to appear in occures
field_min_occ = self._get_field_uniq_x_coef(df=df, fields=fields, coef=coef)
df[fields] = df[fields].fillna(NAN_SUBST) # to treat None as a separate field
occs = df.groupby(fields).size().reset_index(name="Times").sort_values(['Times'], ascending=False)
occs[fields] = occs[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
df[fields] = df[fields].replace(to_replace=NAN_SUBST, value=np.nan) # get back Nones from NAN_SUBST
occs = occs[occs['Times'] >= field_min_occ]
df_copy = df.merge(occs[fields]).copy()
# here for every pair of values of fields we substitute it's values in original df
# with the current one and calculate predictions
# So we predict mean dep_var for every pairs of value of fields on the whole dataset
frame = []
ln = len(occs)
if (ln > 0):
for _, row in progress_bar(occs.iterrows(), total=ln, parent=masterbar):
# We don't need to do df_copy = df.merge(occs[field]).copy() every time
# as every time we change the same column (set of columns)
record = []
for fld in fields:
df_copy[fld] = row[fld]
preds = self._predict_df(df=df_copy)
preds = np.exp(np.mean(preds)) if (use_log == True) else np.mean(preds)
preds = int(preds) if (use_int == True) else preds
for fld in fields:
record.append(row[fld])
record.append(preds)
record.append(row['Times'])
frame.append(record)
# Here for every pair of fields we calculate mean dep_var deviation
# This devition is the score that shows how and where this partucular pair of fields
# moves depend valiable
# Added times to more easily understand the data (more times more sure we are)
out = pd.DataFrame(frame, columns=fields + [dep_name, 'times'])
median = out[dep_name].median()
out[dep_name] /= median
if (is_sorted == True):
out = out.sort_values(by=dep_name, ascending=False)
return out
def _get_part_dep(self):
'''
Makes a datafreme with partial dependencies for every pair of columns in fields
'''
fields = self.fields
learn = self.learn
cache_path = self.cache_path
dep_name = self._get_dep_var()
is_continue = self.is_continue
l2k = self._list_to_key
result = []
to_save = {}
from_saved = {}
# Load from cache
if (is_continue == True):
if Path(cache_path / 'pd_interm.pkl').exists():
from_saved = ld_var(name='pd_interm', path=cache_path)
else:
is_continue = False
elapsed = []
left = []
if (is_continue == True):
for field in fields:
if (l2k(field) in from_saved):
elapsed.append(field)
new_df = from_saved[l2k(field)]
result.append(new_df)
to_save[l2k(field)] = new_df
for field in fields:
if (l2k(field) not in from_saved):
left.append(field)
# Calculate
pbar = master_bar(left)
cache_path.mkdir(parents=True, exist_ok=True)
sv_var(var=to_save, name='pd_interm', path=cache_path)
for field in pbar:
new_df = self._get_part_dep_one(fields=field, masterbar=pbar)
new_df['feature'] = self._list_to_key(field)
if is_listy(field):
new_df['value'] = new_df[field].values.tolist()
new_df.drop(columns=field, inplace=True)
else:
new_df = new_df.rename(index=str, columns={str(field): "value"})
result.append(new_df)
to_save[l2k(field)] = new_df
sv_var(var=to_save, name='pd_interm', path=cache_path)
clear_output()
if Path(cache_path / 'pd_interm.pkl').exists():
Path(cache_path / 'pd_interm.pkl').unlink() # delete intermediate file
result = pd.concat(result, ignore_index=True, sort=True)
result = result[['feature', 'value', dep_name, 'times']]
clear_output()
self.part_dep_df = result
def _load_dict(self, name, path):
if not (Path(f"{path / name}.pkl").exists()):
return None
return self._ld_var(name=name, path=path)
def _save_cached(self):
"""
Saves calculated PartDep df into path.
Can be saved more than one with as an dict with fields as key
"""
path = self.cache_path
path.mkdir(parents=True, exist_ok=True)
name = self.save_name
sv_dict = self._load_dict(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if isNone(sv_dict):
sv_dict = {key: self.part_dep_df}
else:
sv_dict[key] = self.part_dep_df
self._sv_var(var=sv_dict, name=name, path=path)
def _load_cached(self):
"""
Load calculated PartDep df if hash exist.
"""
name = self.save_name
path = self.cache_path
if not (Path(f"{path / name}.pkl").exists()):
return None
ld_dict = self._ld_var(name=name, path=path)
key = self._list_to_key(self.fields + [self.coef])
if (key not in ld_dict):
return None
return ld_dict[key]
def _load_or_calculate(self):
"""
Calculates part dep or load it from cache if possible
"""
if (self.is_use_cache == False) or isNone(self._load_cached()):
self._get_part_dep()
return self._save_cached()
else:
self.part_dep_df = self._load_cached()
def _general2partial(self, df):
if (len(df) == 0):
return None
copy_df = df.copy()
feature = copy_df['feature'].iloc[0]
copy_df.drop(columns='feature', inplace=True)
copy_df.rename(columns={"value": feature}, inplace=True)
return copy_df
def plot_raw(self, field, sample=1.0):
"""
Plot dependency graph from data itself
field must be list of exactly one feature
sample is a coef to len(df). Lower if kernel use to shut down on that
"""
df = self.df
df = df.sample(int(len(df)*sample))
field = field[0]
dep_var = f"{self._get_dep_var()}_orig" if (self.use_log == True) else self._get_dep_var()
return ggplot(df, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def plot_model(self, field, strict_recalc=False, sample=1.0):
'''
Plot dependency graph from the model.
It also take into account times, so plot becomes much more resilient, cause not every value treats as equal
(more occurences means more power)
field must be list of exactly one feature
strict_recalc=True ignores precalculated `part_dep_df` and calculate it anyway
sample is a coef to len(df). Lower if kernel use to shut down on that
'''
cached = self.get_pd(feature=self._list_to_key(field))
if (strict_recalc == False) and isNotNone(cached):
pd_table = cached
else:
pd_table = self._get_part_dep_one(fields=field)
clear_output()
field = field[0]
dep_var = f"{self._get_dep_var()}"
rearr = []
for var, fee, times in zip(pd_table[field], pd_table[dep_var], pd_table['times']):
for i in range(int(times)):
rearr.append([var, fee])
rearr = pd.DataFrame(rearr, columns=[field, dep_var])
rearr = rearr.sample(int(len(rearr)*sample))
return ggplot(rearr, aes(field, dep_var)) + stat_smooth(se=True, method='loess');
def get_pd(self, feature, min_tm=1):
"""
Gets particular feature subtable from the whole one (min times is optional parameter)
"""
if isNone(self.part_dep_df):
return None
df = self.part_dep_df.query(f"""(feature == "{feature}") and (times > {min_tm})""")
return self._general2partial(df=df)
def get_pd_main_chained_feat(self, main_feat_idx=0, show_min=1):
"""
Transforms whole features table to get_part_dep_one output table format
"""
def get_xth_el(str_list: str, indexes: list):
lst = str_list if is_listy(str_list) else ast.literal_eval(str_list)
lst = listify(lst)
if (len(lst) == 1):
return lst[0]
elif (len(lst) > 1):
if (len(indexes) == 1):
return lst[indexes[0]]
else:
return [lst[idx] for idx in indexes]
else:
return None
feat_table = self.part_dep_df
main_feat_idx = listify(main_feat_idx)
feat_table_copy = feat_table.copy()
func = functools.partial(get_xth_el, indexes=main_feat_idx)
feat_table_copy['value'] = feat_table_copy['value'].apply(func)
feat_table_copy.drop(columns='feature', inplace=True)
return feat_table_copy.query(f'times > {show_min}')
def plot_part_dep(self, fields, limit=20, asc=False):
"""
Plots partial dependency plot for sublist of connected `fields`
`fields` must be sublist of `fields` given on initalization calculation
"""
def prepare_colors(df_pd: pd.DataFrame):
heat_min = df_pd['times'].min()
heat_max = df_pd['times'].max()
dif = heat_max - heat_min
colors = [((times - heat_min) / (dif), (times - heat_min) / (4 * dif), 0.75) for times in df_pd['times']]
return colors
df = self.part_dep_df.query(f"feature == '{self._list_to_key(fields)}'")
dep_var = self.dep_var
df_copy = df.copy()
df_copy['feature'] = df_copy['feature'].str.slice(0, 45)
df_copy = df_copy.sort_values(by=dep_var, ascending=asc)[:limit].sort_values(by=dep_var, ascending=not (asc))
colors = prepare_colors(df_pd=df_copy)
ax = df_copy.plot.barh(x="value", y=dep_var, sort_columns=True, figsize=(10, 10),
color=colors, title=self._list_to_key(fields))
ax.set_ylabel(fields)
if (self.is_biclassification):
txt = f"According to probability of {self._get_dep_var()} is '{learn.dls.vocab[0]}'"
ax.annotate(txt, (0,0), (0, -30),
xycoords='axes fraction', textcoords='offset points',
va='top')
for (p, t) in zip(ax.patches, df_copy['times']):
ax.annotate(f'{p.get_width():.4f}', ((p.get_width() * 1.005), p.get_y() * 1.005))
ax.annotate(f'{int(t)}', ((p.get_width() * .45), p.get_y() + 0.1), color='white', weight='bold') | 41.449649 | 117 | 0.598339 | [
"Apache-2.0"
] | floleuerer/fastinference | fastinference/tabular/pd.py | 17,699 | Python |
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.throttling import ScopedRateThrottle
from rest_framework.views import APIView
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from rest_framework.response import Response
from rest_framework import viewsets, mixins
from rest_framework.pagination import LimitOffsetPagination
from .models import JudgeStatus, CaseStatus
from .serializers import JudgeStatusSerializer, CaseStatusSerializer, JudgeStatusCodeSerializer
from .permission import ManagerOnly, UserRatingOnly, NoContestOnly
from contest.models import ContestInfo
from contest.serializers import ContestInfoSerializer
import datetime
class JudgeStatusView(viewsets.ModelViewSet):
queryset = JudgeStatus.objects.all().order_by('-id')
serializer_class = JudgeStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "language", "problemtitle")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
def list(self, request, *args, **kwargs):
self.check_permissions(request)
self.check_throttles(request)
userid = request._request.session.get("user_id")
usertype = request._request.session.get("type")
cid = request._request.GET.get("contest",0)
if cid == "":
cid = 0
contestid = int(cid)
if contestid == 0:
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
else: # 封榜特判
contest = ContestInfo.objects.get(id=contestid)
queryset = self.filter_queryset(self.get_queryset())
newpage = []
for data in queryset:
if usertype != 3 and userid != data.user and contest.lockboard == 1 and contest.lasttime - (data.submittime - contest.begintime).total_seconds() <= contest.locktime * 60:
data.result = -1
newpage.append(data)
page = self.paginate_queryset(newpage)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(newpage, many=True)
return Response(serializer.data)
class JudgeStatusPutView(viewsets.GenericViewSet, mixins.CreateModelMixin):
queryset = JudgeStatus.objects.all()
serializer_class = JudgeStatusCodeSerializer
permission_classes = (UserRatingOnly,)
throttle_scope = "judge"
throttle_classes = [ScopedRateThrottle, ]
class JudgeStatusCodeView(viewsets.GenericViewSet, mixins.RetrieveModelMixin):
queryset = JudgeStatus.objects.all()
serializer_class = JudgeStatusCodeSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "problemtitle")
permission_classes = (NoContestOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class CaseStatusView(viewsets.ModelViewSet):
queryset = CaseStatus.objects.all()
serializer_class = CaseStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('username', 'problem', "statusid")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class ACRankView(viewsets.ModelViewSet):
queryset = JudgeStatus.objects.filter(submittime__gte=datetime.datetime.now()-datetime.timedelta(days=30),result=0) # 注意这里只是临时这么写!如果OJ使用的人多!这里会有性能问题!!# 这里有bug,不应该在queryset里写filter。时间会提前算好,导致不准确
serializer_class = JudgeStatusSerializer
pagination_class = LimitOffsetPagination
filter_backends = (DjangoFilterBackend,)
filter_fields = ('user', 'result', "contest", "problem", "language")
permission_classes = (ManagerOnly,)
throttle_scope = "post"
throttle_classes = [ScopedRateThrottle, ]
class RejudgeAPIView(APIView):
permission_classes = (ManagerOnly,)
def post(self, request, format=None):
data = request.data
contestid = data.get('contestid', "")
problem = data.get('problem', "")
statusid = data.get('statusid', "")
statustype = data.get('statustype', "")
print(contestid, problem, statusid, statustype)
if contestid == 0 or problem == -1:
return Response("bad", status=HTTP_400_BAD_REQUEST)
if contestid != "" and problem != "":
JudgeStatus.objects.filter(contest=contestid).filter(
contestproblem=problem).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if problem != "" and contestid == "":
JudgeStatus.objects.filter(problem=problem).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if statusid != "":
JudgeStatus.objects.filter(id=statusid).update(result=-1)
return Response("ok", status=HTTP_200_OK)
if statustype != "":
JudgeStatus.objects.filter(result=statustype).update(result=-1)
return Response("ok", status=HTTP_200_OK)
return Response("bad", status=HTTP_400_BAD_REQUEST)
| 40.244755 | 197 | 0.692441 | [
"MIT"
] | RAyymask/LPOJ | Backend/judgestatus/views.py | 5,871 | Python |
#Neural Networks
#MLP classifier is optimal algorithm for classifications
from sklearn.neural_network import MLPClassifier
clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
clf.fit(X_train_clean, y_train)
clf.predict(X_test_clean)
scoreN = clf.score(X_test_clean, y_test)
print(scoreN) | 29.909091 | 90 | 0.808511 | [
"Apache-2.0"
] | daniloorozco/ufc-predictions | models/model_NN.py | 329 | Python |
#!/usr/bin/env python
import unittest
from piprot.piprot import main
class TestRequirementsParser(unittest.TestCase):
def setUp(self):
pass
def test_requirement_exact(self):
with self.assertRaises(SystemExit):
main([open('piprot/test/files/pytz_req.txt')])
if __name__ == '__main__':
unittest.main()
| 19.222222 | 58 | 0.684971 | [
"MIT"
] | 00willo/piprot | piprot/test/test_piprot.py | 346 | Python |
"""common logic for all queries"""
import json
from functools import partial, singledispatch
from operator import itemgetter
import snug
from gentools import (compose, map_yield, map_send, oneyield, reusable,
map_return)
from .load import registry
API_URL = 'https://slack.com/api/'
class ApiError(Exception):
pass
def _parse_content(response):
"""parse the response body as JSON, raise on errors"""
if response.status_code != 200:
raise ApiError(f'unknown error: {response.content.decode()}')
result = json.loads(response.content)
if not result['ok']:
raise ApiError(f'{result["error"]}: {result.get("detail")}')
return result
basic_interaction = compose(map_yield(snug.prefix_adder(API_URL)),
map_send(_parse_content))
"""basic request/response parsing"""
@singledispatch
def _dump_queryparam_value(val):
return str(val)
@_dump_queryparam_value.register(bool)
def _dump_bool_value(val):
return 'true' if val else 'false'
def _dump_params(params):
return {k: _dump_queryparam_value(v) for k, v in params.items()
if v is not None}
def paginated_retrieval(methodname, itemtype):
"""decorator factory for retrieval queries from query params"""
return compose(
reusable,
basic_interaction,
map_yield(partial(_params_as_get, methodname)),
)
def _params_as_get(methodname: str, params: dict) -> snug.Request:
return snug.GET(methodname, params=_dump_params(params))
def json_post(methodname, rtype, key):
"""decorator factory for json POST queries"""
return compose(
reusable,
map_return(registry(rtype), itemgetter(key)),
basic_interaction,
map_yield(partial(_json_as_post, methodname)),
oneyield,
)
def _json_as_post(methodname: str, body: dict) -> snug.Request:
return snug.POST(methodname,
json.dumps({k: v for k, v in body.items()
if v is not None}),
headers={'Content-Type': 'application/json'})
| 26.987179 | 71 | 0.663183 | [
"MIT"
] | ariebovenberg/snug | examples/slack/query.py | 2,105 | Python |
#!/usr/bin/env python3
# Data schema:
# (start) (12b junk) artist (5* byte) (1b junk) title (col) (1b junk) date and time (col) (1b junk) url (urldur) duration (col) (1b junk) thumbnail url (end)
keybytes = {
"row_start": "80 09 80 00 80", # row start
"col": "5F 10", # column delimeter
"urldur": "58", # url/duration delimeter
"urldur2": "D8",
"urldur3": "D2",
"row_end": "D8 00 0A 00 2A 00 2B 00 2C 00 2D 00 2E 00 2F 00 30 00 31 00 32 00" # row end
}
# convert hex to bytes
for k, v in keybytes.items():
keybytes[k] = bytearray.fromhex(v)
def get_urls_from_playlist(filename):
with open(filename, "rb") as f:
content = f.read()
for row in content.split(keybytes["row_start"])[1:]:
try:
row = row.split(keybytes["row_end"])[0] # cut off everything after the row end
columns = row.split(keybytes["col"])
for col in columns:
if "http" in str(col):
# cut off junk bytes
url = col.split(keybytes["urldur"])[0].split(keybytes["urldur2"])[0].split(keybytes["urldur3"])[0]
url = url[1:].decode("utf-8")
yield url
except Exception as e:
pass
| 37.970588 | 157 | 0.543765 | [
"MIT"
] | MikeWent/applepd_bot | playlist_parser.py | 1,291 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
All layers just related to the detection neural network.
"""
from __future__ import print_function
from .layer_function_generator import generate_layer_fn
from .layer_function_generator import autodoc, templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable
from .loss import softmax_with_cross_entropy
from . import tensor
from . import nn
from . import ops
from ... import compat as cpt
from ..data_feeder import check_variable_and_dtype, check_type, check_dtype
import math
import six
import numpy as np
from functools import reduce
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
__all__ = [
'prior_box',
'density_prior_box',
'multi_box_head',
'bipartite_match',
'target_assign',
'detection_output',
'ssd_loss',
'rpn_target_assign',
'retinanet_target_assign',
'sigmoid_focal_loss',
'anchor_generator',
'roi_perspective_transform',
'generate_proposal_labels',
'generate_proposals',
'generate_mask_labels',
'iou_similarity',
'box_coder',
'polygon_box_transform',
'yolov3_loss',
'yolo_box',
'box_clip',
'multiclass_nms',
'locality_aware_nms',
'matrix_nms',
'retinanet_detection_output',
'distribute_fpn_proposals',
'box_decoder_and_assign',
'collect_fpn_proposals',
]
def retinanet_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
gt_labels,
is_crowd,
im_info,
num_classes=1,
positive_overlap=0.5,
negative_overlap=0.4):
"""
**Target Assign Layer for the detector RetinaNet.**
This OP finds out positive and negative samples from all anchors
for training the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ ,
and assigns target labels for classification along with target locations for
regression to each sample, then takes out the part belonging to positive and
negative samples from category prediction( :attr:`cls_logits`) and location
prediction( :attr:`bbox_pred`) which belong to all anchors.
The searching principles for positive and negative samples are as followed:
1. Anchors are assigned to ground-truth boxes when it has the highest IoU
overlap with a ground-truth box.
2. Anchors are assigned to ground-truth boxes when it has an IoU overlap
higher than :attr:`positive_overlap` with any ground-truth box.
3. Anchors are assigned to background when its IoU overlap is lower than
:attr:`negative_overlap` for all ground-truth boxes.
4. Anchors which do not meet the above conditions do not participate in
the training process.
Retinanet predicts a :math:`C`-vector for classification and a 4-vector for box
regression for each anchor, hence the target label for each positive(or negative)
sample is a :math:`C`-vector and the target locations for each positive sample
is a 4-vector. As for a positive sample, if the category of its assigned
ground-truth box is class :math:`i`, the corresponding entry in its length
:math:`C` label vector is set to 1 and all other entries is set to 0, its box
regression targets are computed as the offset between itself and its assigned
ground-truth box. As for a negative sample, all entries in its length :math:`C`
label vector are set to 0 and box regression targets are omitted because
negative samples do not participate in the training process of location
regression.
After the assignment, the part belonging to positive and negative samples is
taken out from category prediction( :attr:`cls_logits` ), and the part
belonging to positive samples is taken out from location
prediction( :attr:`bbox_pred` ).
Args:
bbox_pred(Variable): A 3-D Tensor with shape :math:`[N, M, 4]` represents
the predicted locations of all anchors. :math:`N` is the batch size( the
number of images in a mini-batch), :math:`M` is the number of all anchors
of one image, and each anchor has 4 coordinate values. The data type of
:attr:`bbox_pred` is float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape :math:`[N, M, C]` represents
the predicted categories of all anchors. :math:`N` is the batch size,
:math:`M` is the number of all anchors of one image, and :math:`C` is
the number of categories (**Notice: excluding background**). The data type
of :attr:`cls_logits` is float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape :math:`[M, 4]` represents
the locations of all anchors. :math:`M` is the number of all anchors of
one image, each anchor is represented as :math:`[xmin, ymin, xmax, ymax]`,
:math:`[xmin, ymin]` is the left top coordinate of the anchor box,
:math:`[xmax, ymax]` is the right bottom coordinate of the anchor box.
The data type of :attr:`anchor_box` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_box`.
anchor_var(Variable): A 2-D Tensor with shape :math:`[M,4]` represents the expanded
factors of anchor locations used in loss function. :math:`M` is number of
all anchors of one image, each anchor possesses a 4-vector expanded factor.
The data type of :attr:`anchor_var` is float32 or float64. Please refer
to the OP :ref:`api_fluid_layers_anchor_generator`
for the generation of :attr:`anchor_var`.
gt_boxes(Variable): A 1-level 2-D LoDTensor with shape :math:`[G, 4]` represents
locations of all ground-truth boxes. :math:`G` is the total number of
all ground-truth boxes in a mini-batch, and each ground-truth box has 4
coordinate values. The data type of :attr:`gt_boxes` is float32 or
float64.
gt_labels(variable): A 1-level 2-D LoDTensor with shape :math:`[G, 1]` represents
categories of all ground-truth boxes, and the values are in the range of
:math:`[1, C]`. :math:`G` is the total number of all ground-truth boxes
in a mini-batch, and each ground-truth box has one category. The data type
of :attr:`gt_labels` is int32.
is_crowd(Variable): A 1-level 1-D LoDTensor with shape :math:`[G]` which
indicates whether a ground-truth box is a crowd. If the value is 1, the
corresponding box is a crowd, it is ignored during training. :math:`G` is
the total number of all ground-truth boxes in a mini-batch. The data type
of :attr:`is_crowd` is int32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
num_classes(int32): The number of categories for classification, the default
value is 1.
positive_overlap(float32): Minimum overlap required between an anchor
and ground-truth box for the anchor to be a positive sample, the default
value is 0.5.
negative_overlap(float32): Maximum overlap allowed between an anchor
and ground-truth box for the anchor to be a negative sample, the default
value is 0.4. :attr:`negative_overlap` should be less than or equal to
:attr:`positive_overlap`, if not, the actual value of
:attr:`positive_overlap` is :attr:`negative_overlap`.
Returns:
A tuple with 6 Variables:
**predict_scores** (Variable): A 2-D Tensor with shape :math:`[F+B, C]` represents
category prediction belonging to positive and negative samples. :math:`F`
is the number of positive samples in a mini-batch, :math:`B` is the number
of negative samples, and :math:`C` is the number of categories
(**Notice: excluding background**). The data type of :attr:`predict_scores`
is float32 or float64.
**predict_location** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
location prediction belonging to positive samples. :math:`F` is the number
of positive samples. :math:`F` is the number of positive samples, and each
sample has 4 coordinate values. The data type of :attr:`predict_location`
is float32 or float64.
**target_label** (Variable): A 2-D Tensor with shape :math:`[F+B, 1]` represents
target labels for classification belonging to positive and negative
samples. :math:`F` is the number of positive samples, :math:`B` is the
number of negative, and each sample has one target category. The data type
of :attr:`target_label` is int32.
**target_bbox** (Variable): A 2-D Tensor with shape :math:`[F, 4]` represents
target locations for box regression belonging to positive samples.
:math:`F` is the number of positive samples, and each sample has 4
coordinate values. The data type of :attr:`target_bbox` is float32 or
float64.
**bbox_inside_weight** (Variable): A 2-D Tensor with shape :math:`[F, 4]`
represents whether a positive sample is fake positive, if a positive
sample is false positive, the corresponding entries in
:attr:`bbox_inside_weight` are set 0, otherwise 1. :math:`F` is the number
of total positive samples in a mini-batch, and each sample has 4
coordinate values. The data type of :attr:`bbox_inside_weight` is float32
or float64.
**fg_num** (Variable): A 2-D Tensor with shape :math:`[N, 1]` represents the number
of positive samples. :math:`N` is the batch size. **Notice: The number
of positive samples is used as the denominator of later loss function,
to avoid the condition that the denominator is zero, this OP has added 1
to the actual number of positive samples of each image.** The data type of
:attr:`fg_num` is int32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[1, 100, 4],
dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[1, 100, 10],
dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[100, 4],
dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[100, 4],
dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[10, 4],
dtype='float32')
gt_labels = fluid.data(name='gt_labels', shape=[10, 1],
dtype='int32')
is_crowd = fluid.data(name='is_crowd', shape=[1],
dtype='int32')
im_info = fluid.data(name='im_info', shape=[1, 3],
dtype='float32')
score_pred, loc_pred, score_target, loc_target, bbox_inside_weight, fg_num = \\
fluid.layers.retinanet_target_assign(bbox_pred, cls_logits, anchor_box,
anchor_var, gt_boxes, gt_labels, is_crowd, im_info, 10)
"""
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'retinanet_target_assign')
check_variable_and_dtype(gt_labels, 'gt_labels', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'retinanet_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_target_assign')
helper = LayerHelper('retinanet_target_assign', **locals())
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
fg_num = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="retinanet_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'GtLabels': gt_labels,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight,
'ForegroundNumber': fg_num
},
attrs={
'positive_overlap': positive_overlap,
'negative_overlap': negative_overlap
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
fg_num.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, num_classes))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight, fg_num
def rpn_target_assign(bbox_pred,
cls_logits,
anchor_box,
anchor_var,
gt_boxes,
is_crowd,
im_info,
rpn_batch_size_per_im=256,
rpn_straddle_thresh=0.0,
rpn_fg_fraction=0.5,
rpn_positive_overlap=0.7,
rpn_negative_overlap=0.3,
use_random=True):
"""
**Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection.**
This layer can be, for given the Intersection-over-Union (IoU) overlap
between anchors and ground truth boxes, to assign classification and
regression targets to each each anchor, these target labels are used for
train RPN. The classification targets is a binary class label (of being
an object or not). Following the paper of Faster-RCNN, the positive labels
are two kinds of anchors: (i) the anchor/anchors with the highest IoU
overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap
higher than rpn_positive_overlap(0.7) with any ground-truth box. Note
that a single ground-truth box may assign positive labels to multiple
anchors. A non-positive anchor is when its IoU ratio is lower than
rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are
neither positive nor negative do not contribute to the training objective.
The regression targets are the encoded ground-truth boxes associated with
the positive anchors.
Args:
bbox_pred(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax]. The data type can be float32 or float64.
cls_logits(Variable): A 3-D Tensor with shape [N, M, 1] represents the
predicted confidence predictions. N is the batch size, 1 is the
frontground and background sigmoid, M is number of bounding boxes.
The data type can be float32 or float64.
anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax],
[xmin, ymin] is the left top coordinate of the anchor box,
if the input is image feature map, they are close to the origin
of the coordinate system. [xmax, ymax] is the right bottom
coordinate of the anchor box. The data type can be float32 or float64.
anchor_var(Variable): A 2-D Tensor with shape [M,4] holds expanded
variances of anchors. The data type can be float32 or float64.
gt_boxes (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input. The data type can be float32 or float64.
is_crowd (Variable): A 1-D LoDTensor which indicates groud-truth is crowd.
The data type must be int32.
im_info (Variable): A 2-D LoDTensor with shape [N, 3]. N is the batch size,
3 is the height, width and scale.
rpn_batch_size_per_im(int): Total number of RPN examples per image.
The data type must be int32.
rpn_straddle_thresh(float): Remove RPN anchors that go outside the image
by straddle_thresh pixels. The data type must be float32.
rpn_fg_fraction(float): Target fraction of RoI minibatch that is labeled
foreground (i.e. class > 0), 0-th class is background. The data type must be float32.
rpn_positive_overlap(float): Minimum overlap required between an anchor
and ground-truth box for the (anchor, gt box) pair to be a positive
example. The data type must be float32.
rpn_negative_overlap(float): Maximum overlap allowed between an anchor
and ground-truth box for the (anchor, gt box) pair to be a negative
examples. The data type must be float32.
Returns:
tuple:
A tuple(predicted_scores, predicted_location, target_label,
target_bbox, bbox_inside_weight) is returned. The predicted_scores
and predicted_location is the predicted result of the RPN.
The target_label and target_bbox is the ground truth,
respectively. The predicted_location is a 2D Tensor with shape
[F, 4], and the shape of target_bbox is same as the shape of
the predicted_location, F is the number of the foreground
anchors. The predicted_scores is a 2D Tensor with shape
[F + B, 1], and the shape of target_label is same as the shape
of the predicted_scores, B is the number of the background
anchors, the F and B is depends on the input of this operator.
Bbox_inside_weight represents whether the predicted loc is fake_fg
or not and the shape is [F, 4].
Examples:
.. code-block:: python
import paddle.fluid as fluid
bbox_pred = fluid.data(name='bbox_pred', shape=[None, 4], dtype='float32')
cls_logits = fluid.data(name='cls_logits', shape=[None, 1], dtype='float32')
anchor_box = fluid.data(name='anchor_box', shape=[None, 4], dtype='float32')
anchor_var = fluid.data(name='anchor_var', shape=[None, 4], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None], dtype='float32')
im_info = fluid.data(name='im_infoss', shape=[None, 3], dtype='float32')
loc, score, loc_target, score_target, inside_weight = fluid.layers.rpn_target_assign(
bbox_pred, cls_logits, anchor_box, anchor_var, gt_boxes, is_crowd, im_info)
"""
helper = LayerHelper('rpn_target_assign', **locals())
check_variable_and_dtype(bbox_pred, 'bbox_pred', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(cls_logits, 'cls_logits', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_box, 'anchor_box', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(anchor_var, 'anchor_var', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(gt_boxes, 'gt_boxes', ['float32', 'float64'],
'rpn_target_assign')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'rpn_target_assign')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'rpn_target_assign')
# Assign target label to anchors
loc_index = helper.create_variable_for_type_inference(dtype='int32')
score_index = helper.create_variable_for_type_inference(dtype='int32')
target_label = helper.create_variable_for_type_inference(dtype='int32')
target_bbox = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
bbox_inside_weight = helper.create_variable_for_type_inference(
dtype=anchor_box.dtype)
helper.append_op(
type="rpn_target_assign",
inputs={
'Anchor': anchor_box,
'GtBoxes': gt_boxes,
'IsCrowd': is_crowd,
'ImInfo': im_info
},
outputs={
'LocationIndex': loc_index,
'ScoreIndex': score_index,
'TargetLabel': target_label,
'TargetBBox': target_bbox,
'BBoxInsideWeight': bbox_inside_weight
},
attrs={
'rpn_batch_size_per_im': rpn_batch_size_per_im,
'rpn_straddle_thresh': rpn_straddle_thresh,
'rpn_positive_overlap': rpn_positive_overlap,
'rpn_negative_overlap': rpn_negative_overlap,
'rpn_fg_fraction': rpn_fg_fraction,
'use_random': use_random
})
loc_index.stop_gradient = True
score_index.stop_gradient = True
target_label.stop_gradient = True
target_bbox.stop_gradient = True
bbox_inside_weight.stop_gradient = True
cls_logits = nn.reshape(x=cls_logits, shape=(-1, 1))
bbox_pred = nn.reshape(x=bbox_pred, shape=(-1, 4))
predicted_cls_logits = nn.gather(cls_logits, score_index)
predicted_bbox_pred = nn.gather(bbox_pred, loc_index)
return predicted_cls_logits, predicted_bbox_pred, target_label, target_bbox, bbox_inside_weight
def sigmoid_focal_loss(x, label, fg_num, gamma=2.0, alpha=0.25):
"""
:alias_main: paddle.nn.functional.sigmoid_focal_loss
:alias: paddle.nn.functional.sigmoid_focal_loss,paddle.nn.functional.loss.sigmoid_focal_loss
:old_api: paddle.fluid.layers.sigmoid_focal_loss
**Sigmoid Focal Loss Operator.**
`Focal Loss <https://arxiv.org/abs/1708.02002>`_ is used to address the foreground-background
class imbalance existed on the training phase of many computer vision tasks. This OP computes
the sigmoid value for each element in the input tensor :attr:`x`, after which focal loss is
measured between the sigmoid value and target label.
The focal loss is given as followed:
.. math::
\\mathop{loss_{i,\\,j}}\\limits_{i\\in\\mathbb{[0,\\,N-1]},\\,j\\in\\mathbb{[0,\\,C-1]}}=\\left\\{
\\begin{array}{rcl}
- \\frac{1}{fg\_num} * \\alpha * {(1 - \\sigma(x_{i,\\,j}))}^{\\gamma} * \\log(\\sigma(x_{i,\\,j})) & & {(j +1) = label_{i,\\,0}} \\\\
- \\frac{1}{fg\_num} * (1 - \\alpha) * {\sigma(x_{i,\\,j})}^{ \\gamma} * \\log(1 - \\sigma(x_{i,\\,j})) & & {(j +1)!= label_{i,\\,0}}
\\end{array} \\right.
We know that
.. math::
\\sigma(x_j) = \\frac{1}{1 + \\exp(-x_j)}
Args:
x(Variable): A 2-D tensor with shape :math:`[N, C]` represents the predicted categories of
all samples. :math:`N` is the number of all samples responsible for optimization in
a mini-batch, for example, samples are anchor boxes for object detection and :math:`N`
is the total number of positive and negative samples in a mini-batch; Samples are images
for image classification and :math:`N` is the number of images in a mini-batch. :math:`C`
is the number of classes (**Notice: excluding background**). The data type of :attr:`x` is
float32 or float64.
label(Variable): A 2-D tensor with shape :math:`[N, 1]` represents the target labels for
classification. :math:`N` is the number of all samples responsible for optimization in a
mini-batch, each sample has one target category. The values for positive samples are in the
range of :math:`[1, C]`, and the values for negative samples are 0. The data type of :attr:`label`
is int32.
fg_num(Variable): A 1-D tensor with shape [1] represents the number of positive samples in a
mini-batch, which should be obtained before this OP. The data type of :attr:`fg_num` is int32.
gamma(int|float): Hyper-parameter to balance the easy and hard examples. Default value is
set to 2.0.
alpha(int|float): Hyper-parameter to balance the positive and negative example. Default value
is set to 0.25.
Returns:
Variable(the data type is float32 or float64):
A 2-D tensor with shape :math:`[N, C]`, which is the focal loss of each element in the input
tensor :attr:`x`.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
num_classes = 10 # exclude background
image_width = 16
image_height = 16
batch_size = 32
max_iter = 20
def gen_train_data():
x_data = np.random.uniform(0, 255, (batch_size, 3, image_height,
image_width)).astype('float64')
label_data = np.random.randint(0, num_classes,
(batch_size, 1)).astype('int32')
return {"x": x_data, "label": label_data}
def get_focal_loss(pred, label, fg_num, num_classes):
pred = fluid.layers.reshape(pred, [-1, num_classes])
label = fluid.layers.reshape(label, [-1, 1])
label.stop_gradient = True
loss = fluid.layers.sigmoid_focal_loss(
pred, label, fg_num, gamma=2.0, alpha=0.25)
loss = fluid.layers.reduce_sum(loss)
return loss
def build_model(mode='train'):
x = fluid.data(name="x", shape=[-1, 3, -1, -1], dtype='float64')
output = fluid.layers.pool2d(input=x, pool_type='avg', global_pooling=True)
output = fluid.layers.fc(
input=output,
size=num_classes,
# Notice: size is set to be the number of target classes (excluding backgorund)
# because sigmoid activation will be done in the sigmoid_focal_loss op.
act=None)
if mode == 'train':
label = fluid.data(name="label", shape=[-1, 1], dtype='int32')
# Obtain the fg_num needed by the sigmoid_focal_loss op:
# 0 in label represents background, >=1 in label represents foreground,
# find the elements in label which are greater or equal than 1, then
# computed the numbers of these elements.
data = fluid.layers.fill_constant(shape=[1], value=1, dtype='int32')
fg_label = fluid.layers.greater_equal(label, data)
fg_label = fluid.layers.cast(fg_label, dtype='int32')
fg_num = fluid.layers.reduce_sum(fg_label)
fg_num.stop_gradient = True
avg_loss = get_focal_loss(output, label, fg_num, num_classes)
return avg_loss
else:
# During evaluating or testing phase,
# output of the final fc layer should be connected to a sigmoid layer.
pred = fluid.layers.sigmoid(output)
return pred
loss = build_model('train')
moment_optimizer = fluid.optimizer.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
moment_optimizer.minimize(loss)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(max_iter):
outs = exe.run(feed=gen_train_data(), fetch_list=[loss.name])
print(outs)
"""
check_variable_and_dtype(x, 'x', ['float32', 'float64'],
'sigmoid_focal_loss')
check_variable_and_dtype(label, 'label', ['int32'], 'sigmoid_focal_loss')
check_variable_and_dtype(fg_num, 'fg_num', ['int32'], 'sigmoid_focal_loss')
helper = LayerHelper("sigmoid_focal_loss", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="sigmoid_focal_loss",
inputs={"X": x,
"Label": label,
"FgNum": fg_num},
attrs={"gamma": gamma,
'alpha': alpha},
outputs={"Out": out})
return out
def detection_output(loc,
scores,
prior_box,
prior_box_var,
background_label=0,
nms_threshold=0.3,
nms_top_k=400,
keep_top_k=200,
score_threshold=0.01,
nms_eta=1.0,
return_index=False):
"""
:alias_main: paddle.nn.functional.detection_output
:alias: paddle.nn.functional.detection_output,paddle.nn.functional.vision.detection_output
:old_api: paddle.fluid.layers.detection_output
Given the regression locations, classification confidences and prior boxes,
calculate the detection outputs by performing following steps:
1. Decode input bounding box predictions according to the prior boxes and
regression locations.
2. Get the final detection results by applying multi-class non maximum
suppression (NMS).
Please note, this operation doesn't clip the final output bounding boxes
to the image window.
Args:
loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes. Data type should be
float32 or float64. N is the batch size,
and each bounding box has four coordinate values and the layout
is [xmin, ymin, xmax, ymax].
scores(Variable): A 3-D Tensor with shape [N, M, C] represents the
predicted confidence predictions. Data type should be float32
or float64. N is the batch size, C is the
class number, M is number of bounding boxes.
prior_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes,
each box is represented as [xmin, ymin, xmax, ymax]. Data type
should be float32 or float64.
prior_box_var(Variable): A 2-D Tensor with shape [M, 4] holds M group
of variance. Data type should be float32 or float64.
background_label(int): The index of background label,
the background label will be ignored. If set to -1, then all
categories will be considered. Default: 0.
nms_threshold(float): The threshold to be used in NMS. Default: 0.3.
nms_top_k(int): Maximum number of detections to be kept according
to the confidences after filtering detections based on
score_threshold and before NMS. Default: 400.
keep_top_k(int): Number of total bboxes to be kept per image after
NMS step. -1 means keeping all bboxes after NMS step. Default: 200.
score_threshold(float): Threshold to filter out bounding boxes with
low confidence score. If not provided, consider all boxes.
Default: 0.01.
nms_eta(float): The parameter for adaptive NMS. It works only when the
value is less than 1.0. Default: 1.0.
return_index(bool): Whether return selected index. Default: False
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, a tuple with one Variable(Out) is returned.
Out (Variable): The detection outputs is a LoDTensor with shape [No, 6].
Data type is the same as input (loc). Each row has six values:
[label, confidence, xmin, ymin, xmax, ymax]. `No` is
the total number of detections in this mini-batch. For each instance,
the offsets in first dimension are called LoD, the offset number is
N + 1, N is the batch size. The i-th image has `LoD[i + 1] - LoD[i]`
detected results, if it is 0, the i-th image has no detected results.
Index (Variable): Only return when return_index is True. A 2-D LoDTensor
with shape [No, 1] represents the selected index which type is Integer.
The index is the absolute value cross batches. No is the same number
as Out. If the index is used to gather other attribute such as age,
one needs to reshape the input(N, M, 1) to (N * M, 1) as first, where
N is the batch size and M is the number of boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(name='prior_box', shape=[10, 4], dtype='float32')
pbv = fluid.data(name='prior_box_var', shape=[10, 4], dtype='float32')
loc = fluid.data(name='target_box', shape=[2, 21, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[2, 21, 10], dtype='float32')
nmsed_outs, index = fluid.layers.detection_output(scores=scores,
loc=loc,
prior_box=pb,
prior_box_var=pbv,
return_index=True)
"""
helper = LayerHelper("detection_output", **locals())
decoded_box = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=loc,
code_type='decode_center_size')
scores = nn.softmax(input=scores)
scores = nn.transpose(scores, perm=[0, 2, 1])
scores.stop_gradient = True
nmsed_outs = helper.create_variable_for_type_inference(
dtype=decoded_box.dtype)
if return_index:
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="multiclass_nms2",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs,
'Index': index},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
index.stop_gradient = True
else:
helper.append_op(
type="multiclass_nms",
inputs={'Scores': scores,
'BBoxes': decoded_box},
outputs={'Out': nmsed_outs},
attrs={
'background_label': 0,
'nms_threshold': nms_threshold,
'nms_top_k': nms_top_k,
'keep_top_k': keep_top_k,
'score_threshold': score_threshold,
'nms_eta': 1.0,
})
nmsed_outs.stop_gradient = True
if return_index:
return nmsed_outs, index
return nmsed_outs
@templatedoc()
def iou_similarity(x, y, box_normalized=True, name=None):
"""
:alias_main: paddle.nn.functional.iou_similarity
:alias: paddle.nn.functional.iou_similarity,paddle.nn.functional.loss.iou_similarity
:old_api: paddle.fluid.layers.iou_similarity
${comment}
Args:
x (Variable): ${x_comment}.The data type is float32 or float64.
y (Variable): ${y_comment}.The data type is float32 or float64.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
Returns:
Variable: ${out_comment}.The data type is same with x.
Examples:
.. code-block:: python
import numpy as np
import paddle.fluid as fluid
use_gpu = False
place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
x = fluid.data(name='x', shape=[None, 4], dtype='float32')
y = fluid.data(name='y', shape=[None, 4], dtype='float32')
iou = fluid.layers.iou_similarity(x=x, y=y)
exe.run(fluid.default_startup_program())
test_program = fluid.default_main_program().clone(for_test=True)
[out_iou] = exe.run(test_program,
fetch_list=iou,
feed={'x': np.array([[0.5, 0.5, 2.0, 2.0],
[0., 0., 1.0, 1.0]]).astype('float32'),
'y': np.array([[1.0, 1.0, 2.5, 2.5]]).astype('float32')})
# out_iou is [[0.2857143],
# [0. ]] with shape: [2, 1]
"""
helper = LayerHelper("iou_similarity", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="iou_similarity",
inputs={"X": x,
"Y": y},
attrs={"box_normalized": box_normalized},
outputs={"Out": out})
return out
@templatedoc()
def box_coder(prior_box,
prior_box_var,
target_box,
code_type="encode_center_size",
box_normalized=True,
name=None,
axis=0):
"""
:alias_main: paddle.nn.functional.box_coder
:alias: paddle.nn.functional.box_coder,paddle.nn.functional.vision.box_coder
:old_api: paddle.fluid.layers.box_coder
**Box Coder Layer**
Encode/Decode the target bounding box with the priorbox information.
The Encoding schema described below:
.. math::
ox = (tx - px) / pw / pxv
oy = (ty - py) / ph / pyv
ow = \log(\abs(tw / pw)) / pwv
oh = \log(\abs(th / ph)) / phv
The Decoding schema described below:
.. math::
ox = (pw * pxv * tx * + px) - tw / 2
oy = (ph * pyv * ty * + py) - th / 2
ow = \exp(pwv * tw) * pw + tw / 2
oh = \exp(phv * th) * ph + th / 2
where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates,
width and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote
the priorbox's (anchor) center coordinates, width and height. `pxv`,
`pyv`, `pwv`, `phv` denote the variance of the priorbox and `ox`, `oy`,
`ow`, `oh` denote the encoded/decoded coordinates, width and height.
During Box Decoding, two modes for broadcast are supported. Say target
box has shape [N, M, 4], and the shape of prior box can be [N, 4] or
[M, 4]. Then prior box will broadcast to target box along the
assigned axis.
Args:
prior_box(Variable): Box list prior_box is a 2-D Tensor with shape
[M, 4] holds M boxes and data type is float32 or float64. Each box
is represented as [xmin, ymin, xmax, ymax], [xmin, ymin] is the
left top coordinate of the anchor box, if the input is image feature
map, they are close to the origin of the coordinate system.
[xmax, ymax] is the right bottom coordinate of the anchor box.
prior_box_var(List|Variable|None): prior_box_var supports three types
of input. One is variable with shape [M, 4] which holds M group and
data type is float32 or float64. The second is list consist of
4 elements shared by all boxes and data type is float32 or float64.
Other is None and not involved in calculation.
target_box(Variable): This input can be a 2-D LoDTensor with shape
[N, 4] when code_type is 'encode_center_size'. This input also can
be a 3-D Tensor with shape [N, M, 4] when code_type is
'decode_center_size'. Each box is represented as
[xmin, ymin, xmax, ymax]. The data type is float32 or float64.
This tensor can contain LoD information to represent a batch of inputs.
code_type(str): The code type used with the target box. It can be
`encode_center_size` or `decode_center_size`. `encode_center_size`
by default.
box_normalized(bool): Whether treat the priorbox as a normalized box.
Set true by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
axis(int): Which axis in PriorBox to broadcast for box decode,
for example, if axis is 0 and TargetBox has shape [N, M, 4] and
PriorBox has shape [M, 4], then PriorBox will broadcast to [N, M, 4]
for decoding. It is only valid when code type is
`decode_center_size`. Set 0 by default.
Returns:
Variable:
output_box(Variable): When code_type is 'encode_center_size', the
output tensor of box_coder_op with shape [N, M, 4] representing the
result of N target boxes encoded with M Prior boxes and variances.
When code_type is 'decode_center_size', N represents the batch size
and M represents the number of decoded boxes.
Examples:
.. code-block:: python
import paddle.fluid as fluid
# For encode
prior_box_encode = fluid.data(name='prior_box_encode',
shape=[512, 4],
dtype='float32')
target_box_encode = fluid.data(name='target_box_encode',
shape=[81, 4],
dtype='float32')
output_encode = fluid.layers.box_coder(prior_box=prior_box_encode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_encode,
code_type="encode_center_size")
# For decode
prior_box_decode = fluid.data(name='prior_box_decode',
shape=[512, 4],
dtype='float32')
target_box_decode = fluid.data(name='target_box_decode',
shape=[512, 81, 4],
dtype='float32')
output_decode = fluid.layers.box_coder(prior_box=prior_box_decode,
prior_box_var=[0.1,0.1,0.2,0.2],
target_box=target_box_decode,
code_type="decode_center_size",
box_normalized=False,
axis=1)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_coder')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_coder')
helper = LayerHelper("box_coder", **locals())
output_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
inputs = {"PriorBox": prior_box, "TargetBox": target_box}
attrs = {
"code_type": code_type,
"box_normalized": box_normalized,
"axis": axis
}
if isinstance(prior_box_var, Variable):
inputs['PriorBoxVar'] = prior_box_var
elif isinstance(prior_box_var, list):
attrs['variance'] = prior_box_var
else:
raise TypeError("Input variance of box_coder must be Variable or lisz")
helper.append_op(
type="box_coder",
inputs=inputs,
attrs=attrs,
outputs={"OutputBox": output_box})
return output_box
@templatedoc()
def polygon_box_transform(input, name=None):
"""
${comment}
Args:
input(Variable): The input with shape [batch_size, geometry_channels, height, width].
A Tensor with type float32, float64.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None.
Returns:
Variable: The output with the same shape as input. A Tensor with type float32, float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.data(name='input', shape=[4, 10, 5, 5], dtype='float32')
out = fluid.layers.polygon_box_transform(input)
"""
check_variable_and_dtype(input, "input", ['float32', 'float64'],
'polygon_box_transform')
helper = LayerHelper("polygon_box_transform", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="polygon_box_transform",
inputs={"Input": input},
attrs={},
outputs={"Output": output})
return output
@templatedoc(op_type="yolov3_loss")
def yolov3_loss(x,
gt_box,
gt_label,
anchors,
anchor_mask,
class_num,
ignore_thresh,
downsample_ratio,
gt_score=None,
use_label_smooth=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolov3_loss
:alias: paddle.nn.functional.yolov3_loss,paddle.nn.functional.vision.yolov3_loss
:old_api: paddle.fluid.layers.yolov3_loss
${comment}
Args:
x (Variable): ${x_comment}The data type is float32 or float64.
gt_box (Variable): groud truth boxes, should be in shape of [N, B, 4],
in the third dimension, x, y, w, h should be stored.
x,y is the center coordinate of boxes, w, h are the
width and height, x, y, w, h should be divided by
input image height to scale to [0, 1].
N is the batch number and B is the max box number in
an image.The data type is float32 or float64.
gt_label (Variable): class id of ground truth boxes, should be in shape
of [N, B].The data type is int32.
anchors (list|tuple): ${anchors_comment}
anchor_mask (list|tuple): ${anchor_mask_comment}
class_num (int): ${class_num_comment}
ignore_thresh (float): ${ignore_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
gt_score (Variable): mixup score of ground truth boxes, should be in shape
of [N, B]. Default None.
use_label_smooth (bool): ${use_label_smooth_comment}
scale_x_y (float): ${scale_x_y_comment}
Returns:
Variable: A 1-D tensor with shape [N], the value of yolov3 loss
Raises:
TypeError: Input x of yolov3_loss must be Variable
TypeError: Input gtbox of yolov3_loss must be Variable
TypeError: Input gtlabel of yolov3_loss must be Variable
TypeError: Input gtscore of yolov3_loss must be None or Variable
TypeError: Attr anchors of yolov3_loss must be list or tuple
TypeError: Attr class_num of yolov3_loss must be an integer
TypeError: Attr ignore_thresh of yolov3_loss must be a float number
TypeError: Attr use_label_smooth of yolov3_loss must be a bool value
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
gt_box = fluid.data(name='gt_box', shape=[None, 6, 4], dtype='float32')
gt_label = fluid.data(name='gt_label', shape=[None, 6], dtype='int32')
gt_score = fluid.data(name='gt_score', shape=[None, 6], dtype='float32')
anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326]
anchor_mask = [0, 1, 2]
loss = fluid.layers.yolov3_loss(x=x, gt_box=gt_box, gt_label=gt_label,
gt_score=gt_score, anchors=anchors,
anchor_mask=anchor_mask, class_num=80,
ignore_thresh=0.7, downsample_ratio=32)
"""
helper = LayerHelper('yolov3_loss', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolov3_loss must be Variable")
if not isinstance(gt_box, Variable):
raise TypeError("Input gtbox of yolov3_loss must be Variable")
if not isinstance(gt_label, Variable):
raise TypeError("Input gtlabel of yolov3_loss must be Variable")
if gt_score is not None and not isinstance(gt_score, Variable):
raise TypeError("Input gtscore of yolov3_loss must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolov3_loss must be list or tuple")
if not isinstance(anchor_mask, list) and not isinstance(anchor_mask, tuple):
raise TypeError("Attr anchor_mask of yolov3_loss must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolov3_loss must be an integer")
if not isinstance(ignore_thresh, float):
raise TypeError(
"Attr ignore_thresh of yolov3_loss must be a float number")
if not isinstance(use_label_smooth, bool):
raise TypeError(
"Attr use_label_smooth of yolov3_loss must be a bool value")
loss = helper.create_variable_for_type_inference(dtype=x.dtype)
objectness_mask = helper.create_variable_for_type_inference(dtype='int32')
gt_match_mask = helper.create_variable_for_type_inference(dtype='int32')
inputs = {
"X": x,
"GTBox": gt_box,
"GTLabel": gt_label,
}
if gt_score is not None:
inputs["GTScore"] = gt_score
attrs = {
"anchors": anchors,
"anchor_mask": anchor_mask,
"class_num": class_num,
"ignore_thresh": ignore_thresh,
"downsample_ratio": downsample_ratio,
"use_label_smooth": use_label_smooth,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolov3_loss',
inputs=inputs,
outputs={
'Loss': loss,
'ObjectnessMask': objectness_mask,
'GTMatchMask': gt_match_mask
},
attrs=attrs)
return loss
@templatedoc(op_type="yolo_box")
def yolo_box(x,
img_size,
anchors,
class_num,
conf_thresh,
downsample_ratio,
clip_bbox=True,
name=None,
scale_x_y=1.):
"""
:alias_main: paddle.nn.functional.yolo_box
:alias: paddle.nn.functional.yolo_box,paddle.nn.functional.vision.yolo_box
:old_api: paddle.fluid.layers.yolo_box
${comment}
Args:
x (Variable): ${x_comment} The data type is float32 or float64.
img_size (Variable): ${img_size_comment} The data type is int32.
anchors (list|tuple): ${anchors_comment}
class_num (int): ${class_num_comment}
conf_thresh (float): ${conf_thresh_comment}
downsample_ratio (int): ${downsample_ratio_comment}
clip_bbox (bool): ${clip_bbox_comment}
scale_x_y (float): ${scale_x_y_comment}
name (string): The default value is None. Normally there is no need
for user to set this property. For more information,
please refer to :ref:`api_guide_Name`
Returns:
Variable: A 3-D tensor with shape [N, M, 4], the coordinates of boxes,
and a 3-D tensor with shape [N, M, :attr:`class_num`], the classification
scores of boxes.
Raises:
TypeError: Input x of yolov_box must be Variable
TypeError: Attr anchors of yolo box must be list or tuple
TypeError: Attr class_num of yolo box must be an integer
TypeError: Attr conf_thresh of yolo box must be a float number
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[None, 255, 13, 13], dtype='float32')
img_size = fluid.data(name='img_size',shape=[None, 2],dtype='int64')
anchors = [10, 13, 16, 30, 33, 23]
boxes,scores = fluid.layers.yolo_box(x=x, img_size=img_size, class_num=80, anchors=anchors,
conf_thresh=0.01, downsample_ratio=32)
"""
helper = LayerHelper('yolo_box', **locals())
if not isinstance(x, Variable):
raise TypeError("Input x of yolo_box must be Variable")
if not isinstance(img_size, Variable):
raise TypeError("Input img_size of yolo_box must be Variable")
if not isinstance(anchors, list) and not isinstance(anchors, tuple):
raise TypeError("Attr anchors of yolo_box must be list or tuple")
if not isinstance(class_num, int):
raise TypeError("Attr class_num of yolo_box must be an integer")
if not isinstance(conf_thresh, float):
raise TypeError("Attr ignore_thresh of yolo_box must be a float number")
boxes = helper.create_variable_for_type_inference(dtype=x.dtype)
scores = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = {
"anchors": anchors,
"class_num": class_num,
"conf_thresh": conf_thresh,
"downsample_ratio": downsample_ratio,
"clip_bbox": clip_bbox,
"scale_x_y": scale_x_y,
}
helper.append_op(
type='yolo_box',
inputs={
"X": x,
"ImgSize": img_size,
},
outputs={
'Boxes': boxes,
'Scores': scores,
},
attrs=attrs)
return boxes, scores
@templatedoc()
def detection_map(detect_res,
label,
class_num,
background_label=0,
overlap_threshold=0.3,
evaluate_difficult=True,
has_state=None,
input_states=None,
out_states=None,
ap_version='integral'):
"""
${comment}
Args:
detect_res: ${detect_res_comment}
label: ${label_comment}
class_num: ${class_num_comment}
background_label: ${background_label_comment}
overlap_threshold: ${overlap_threshold_comment}
evaluate_difficult: ${evaluate_difficult_comment}
has_state: ${has_state_comment}
input_states: (tuple|None) If not None, It contains 3 elements:
(1) pos_count ${pos_count_comment}.
(2) true_pos ${true_pos_comment}.
(3) false_pos ${false_pos_comment}.
out_states: (tuple|None) If not None, it contains 3 elements.
(1) accum_pos_count ${accum_pos_count_comment}.
(2) accum_true_pos ${accum_true_pos_comment}.
(3) accum_false_pos ${accum_false_pos_comment}.
ap_version: ${ap_type_comment}
Returns:
${map_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
from fluid.layers import detection
detect_res = fluid.data(
name='detect_res',
shape=[10, 6],
dtype='float32')
label = fluid.data(
name='label',
shape=[10, 6],
dtype='float32')
map_out = detection.detection_map(detect_res, label, 21)
"""
helper = LayerHelper("detection_map", **locals())
def __create_var(type):
return helper.create_variable_for_type_inference(dtype=type)
map_out = __create_var('float32')
accum_pos_count_out = out_states[
0] if out_states is not None else __create_var('int32')
accum_true_pos_out = out_states[
1] if out_states is not None else __create_var('float32')
accum_false_pos_out = out_states[
2] if out_states is not None else __create_var('float32')
pos_count = input_states[0] if input_states is not None else None
true_pos = input_states[1] if input_states is not None else None
false_pos = input_states[2] if input_states is not None else None
helper.append_op(
type="detection_map",
inputs={
'Label': label,
'DetectRes': detect_res,
'HasState': has_state,
'PosCount': pos_count,
'TruePos': true_pos,
'FalsePos': false_pos
},
outputs={
'MAP': map_out,
'AccumPosCount': accum_pos_count_out,
'AccumTruePos': accum_true_pos_out,
'AccumFalsePos': accum_false_pos_out
},
attrs={
'overlap_threshold': overlap_threshold,
'evaluate_difficult': evaluate_difficult,
'ap_type': ap_version,
'class_num': class_num,
})
return map_out
def bipartite_match(dist_matrix,
match_type=None,
dist_threshold=None,
name=None):
"""
:alias_main: paddle.nn.functional.bipartite_match
:alias: paddle.nn.functional.bipartite_match,paddle.nn.functional.vision.bipartite_match
:old_api: paddle.fluid.layers.bipartite_match
This operator implements a greedy bipartite matching algorithm, which is
used to obtain the matching with the maximum distance based on the input
distance matrix. For input 2D matrix, the bipartite matching algorithm can
find the matched column for each row (matched means the largest distance),
also can find the matched row for each column. And this operator only
calculate matched indices from column to row. For each instance,
the number of matched indices is the column number of the input distance
matrix. **The OP only supports CPU**.
There are two outputs, matched indices and distance.
A simple description, this algorithm matched the best (maximum distance)
row entity to the column entity and the matched indices are not duplicated
in each row of ColToRowMatchIndices. If the column entity is not matched
any row entity, set -1 in ColToRowMatchIndices.
NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor.
If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size.
If Tensor, the height of ColToRowMatchIndices is 1.
NOTE: This API is a very low level API. It is used by :code:`ssd_loss`
layer. Please consider to use :code:`ssd_loss` instead.
Args:
dist_matrix(Variable): This input is a 2-D LoDTensor with shape
[K, M]. The data type is float32 or float64. It is pair-wise
distance matrix between the entities represented by each row and
each column. For example, assumed one entity is A with shape [K],
another entity is B with shape [M]. The dist_matrix[i][j] is the
distance between A[i] and B[j]. The bigger the distance is, the
better matching the pairs are. NOTE: This tensor can contain LoD
information to represent a batch of inputs. One instance of this
batch can contain different numbers of entities.
match_type(str, optional): The type of matching method, should be
'bipartite' or 'per_prediction'. None ('bipartite') by default.
dist_threshold(float32, optional): If `match_type` is 'per_prediction',
this threshold is to determine the extra matching bboxes based
on the maximum distance, 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
matched_indices(Variable): A 2-D Tensor with shape [N, M]. The data
type is int32. N is the batch size. If match_indices[i][j] is -1, it
means B[j] does not match any entity in i-th instance.
Otherwise, it means B[j] is matched to row
match_indices[i][j] in i-th instance. The row number of
i-th instance is saved in match_indices[i][j].
matched_distance(Variable): A 2-D Tensor with shape [N, M]. The data
type is float32. N is batch size. If match_indices[i][j] is -1,
match_distance[i][j] is also -1.0. Otherwise, assumed
match_distance[i][j] = d, and the row offsets of each instance
are called LoD. Then match_distance[i][j] =
dist_matrix[d+LoD[i]][j].
Examples:
>>> import paddle.fluid as fluid
>>> x = fluid.data(name='x', shape=[None, 4], dtype='float32')
>>> y = fluid.data(name='y', shape=[None, 4], dtype='float32')
>>> iou = fluid.layers.iou_similarity(x=x, y=y)
>>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou)
"""
helper = LayerHelper('bipartite_match', **locals())
match_indices = helper.create_variable_for_type_inference(dtype='int32')
match_distance = helper.create_variable_for_type_inference(
dtype=dist_matrix.dtype)
helper.append_op(
type='bipartite_match',
inputs={'DistMat': dist_matrix},
attrs={
'match_type': match_type,
'dist_threshold': dist_threshold,
},
outputs={
'ColToRowMatchIndices': match_indices,
'ColToRowMatchDist': match_distance
})
return match_indices, match_distance
def target_assign(input,
matched_indices,
negative_indices=None,
mismatch_value=None,
name=None):
"""
:alias_main: paddle.nn.functional.target_assign
:alias: paddle.nn.functional.target_assign,paddle.nn.functional.extension.target_assign
:old_api: paddle.fluid.layers.target_assign
This operator can be, for given the target bounding boxes or labels,
to assign classification and regression targets to each prediction as well as
weights to prediction. The weights is used to specify which prediction would
not contribute to training loss.
For each instance, the output `out` and`out_weight` are assigned based on
`match_indices` and `negative_indices`.
Assumed that the row offset for each instance in `input` is called lod,
this operator assigns classification/regression targets by performing the
following steps:
1. Assigning all outputs based on `match_indices`:
.. code-block:: text
If id = match_indices[i][j] > 0,
out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K]
out_weight[i][j] = 1.
Otherwise,
out[j][j][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][j] = 0.
2. Assigning outputs based on `neg_indices` if `neg_indices` is provided:
Assumed that i-th instance in `neg_indices` is called `neg_indice`,
for i-th instance:
.. code-block:: text
for id in neg_indice:
out[i][id][0 : K] = {mismatch_value, mismatch_value, ...}
out_weight[i][id] = 1.0
Args:
input (Variable): This input is a 3D LoDTensor with shape [M, P, K].
Data type should be int32 or float32.
matched_indices (Variable): The input matched indices
is 2D Tenosr<int32> with shape [N, P], If MatchIndices[i][j] is -1,
the j-th entity of column is not matched to any entity of row in
i-th instance.
negative_indices (Variable, optional): The input negative example indices
are an optional input with shape [Neg, 1] and int32 type, where Neg is
the total number of negative example indices.
mismatch_value (float32, optional): Fill this value to the mismatched
location.
name (string): The default value is None. Normally there is no need for
user to set this property. For more information, please refer
to :ref:`api_guide_Name`.
Returns:
tuple: A tuple(out, out_weight) is returned.
out (Variable): a 3D Tensor with shape [N, P, K] and same data type
with `input`, N and P is the same as they are in `matched_indices`,
K is the same as it in input of X.
out_weight (Variable): the weight for output with the shape of [N, P, 1].
Data type is float32.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(
name='x',
shape=[4, 20, 4],
dtype='float',
lod_level=1)
matched_id = fluid.data(
name='indices',
shape=[8, 20],
dtype='int32')
trg, trg_weight = fluid.layers.target_assign(
x,
matched_id,
mismatch_value=0)
"""
helper = LayerHelper('target_assign', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
out_weight = helper.create_variable_for_type_inference(dtype='float32')
helper.append_op(
type='target_assign',
inputs={
'X': input,
'MatchIndices': matched_indices,
'NegIndices': negative_indices
},
outputs={'Out': out,
'OutWeight': out_weight},
attrs={'mismatch_value': mismatch_value})
return out, out_weight
def ssd_loss(location,
confidence,
gt_box,
gt_label,
prior_box,
prior_box_var=None,
background_label=0,
overlap_threshold=0.5,
neg_pos_ratio=3.0,
neg_overlap=0.5,
loc_loss_weight=1.0,
conf_loss_weight=1.0,
match_type='per_prediction',
mining_type='max_negative',
normalize=True,
sample_size=None):
"""
:alias_main: paddle.nn.functional.ssd_loss
:alias: paddle.nn.functional.ssd_loss,paddle.nn.functional.loss.ssd_loss
:old_api: paddle.fluid.layers.ssd_loss
**Multi-box loss layer for object detection algorithm of SSD**
This layer is to compute detection loss for SSD given the location offset
predictions, confidence predictions, prior boxes and ground-truth bounding
boxes and labels, and the type of hard example mining. The returned loss
is a weighted sum of the localization loss (or regression loss) and
confidence loss (or classification loss) by performing the following steps:
1. Find matched bounding box by bipartite matching algorithm.
1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
1.2 Compute matched bounding box by bipartite matching algorithm.
2. Compute confidence for mining hard examples
2.1. Get the target label based on matched indices.
2.2. Compute confidence loss.
3. Apply hard example mining to get the negative example indices and update
the matched indices.
4. Assign classification and regression targets
4.1. Encoded bbox according to the prior boxes.
4.2. Assign regression targets.
4.3. Assign classification targets.
5. Compute the overall objective loss.
5.1 Compute confidence loss.
5.2 Compute localization loss.
5.3 Compute the overall weighted loss.
Args:
location (Variable): The location predictions are a 3D Tensor with
shape [N, Np, 4], N is the batch size, Np is total number of
predictions for each instance. 4 is the number of coordinate values,
the layout is [xmin, ymin, xmax, ymax].The data type is float32 or
float64.
confidence (Variable): The confidence predictions are a 3D Tensor
with shape [N, Np, C], N and Np are the same as they are in
`location`, C is the class number.The data type is float32 or
float64.
gt_box (Variable): The ground-truth bounding boxes (bboxes) are a 2D
LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth
bboxes of mini-batch input.The data type is float32 or float64.
gt_label (Variable): The ground-truth labels are a 2D LoDTensor
with shape [Ng, 1].Ng is the total number of ground-truth bboxes of
mini-batch input, 1 is the number of class. The data type is float32
or float64.
prior_box (Variable): The prior boxes are a 2D Tensor with shape [Np, 4].
Np and 4 are the same as they are in `location`. The data type is
float32 or float64.
prior_box_var (Variable): The variance of prior boxes are a 2D Tensor
with shape [Np, 4]. Np and 4 are the same as they are in `prior_box`
background_label (int): The index of background label, 0 by default.
overlap_threshold (float): If match_type is 'per_prediction', use
'overlap_threshold' to determine the extra matching bboxes when finding \
matched boxes. 0.5 by default.
neg_pos_ratio (float): The ratio of the negative boxes to the positive
boxes, used only when mining_type is 'max_negative', 3.0 by default.
neg_overlap (float): The negative overlap upper bound for the unmatched
predictions. Use only when mining_type is 'max_negative',
0.5 by default.
loc_loss_weight (float): Weight for localization loss, 1.0 by default.
conf_loss_weight (float): Weight for confidence loss, 1.0 by default.
match_type (str): The type of matching method during training, should
be 'bipartite' or 'per_prediction', 'per_prediction' by default.
mining_type (str): The hard example mining type, should be 'hard_example'
or 'max_negative', now only support `max_negative`.
normalize (bool): Whether to normalize the SSD loss by the total number
of output locations, True by default.
sample_size (int): The max sample size of negative box, used only when
mining_type is 'hard_example'.
Returns:
Variable(Tensor): The weighted sum of the localization loss and confidence loss, \
with shape [N * Np, 1], N and Np are the same as they are in
`location`.The data type is float32 or float64.
Raises:
ValueError: If mining_type is 'hard_example', now only support mining \
type of `max_negative`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box',
shape=[10, 4],
dtype='float32')
pbv = fluid.data(
name='prior_box_var',
shape=[10, 4],
dtype='float32')
loc = fluid.data(name='target_box', shape=[10, 4], dtype='float32')
scores = fluid.data(name='scores', shape=[10, 21], dtype='float32')
gt_box = fluid.data(
name='gt_box', shape=[4], lod_level=1, dtype='float32')
gt_label = fluid.data(
name='gt_label', shape=[1], lod_level=1, dtype='float32')
loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv)
"""
helper = LayerHelper('ssd_loss', **locals())
if mining_type != 'max_negative':
raise ValueError("Only support mining_type == max_negative now.")
num, num_prior, num_class = confidence.shape
conf_shape = nn.shape(confidence)
def __reshape_to_2d(var):
return nn.flatten(x=var, axis=2)
# 1. Find matched bounding box by prior box.
# 1.1 Compute IOU similarity between ground-truth boxes and prior boxes.
iou = iou_similarity(x=gt_box, y=prior_box)
# 1.2 Compute matched bounding box by bipartite matching algorithm.
matched_indices, matched_dist = bipartite_match(iou, match_type,
overlap_threshold)
# 2. Compute confidence for mining hard examples
# 2.1. Get the target label based on matched indices
gt_label = nn.reshape(
x=gt_label, shape=(len(gt_label.shape) - 1) * (0, ) + (-1, 1))
gt_label.stop_gradient = True
target_label, _ = target_assign(
gt_label, matched_indices, mismatch_value=background_label)
# 2.2. Compute confidence loss.
# Reshape confidence to 2D tensor.
confidence = __reshape_to_2d(confidence)
target_label = tensor.cast(x=target_label, dtype='int64')
target_label = __reshape_to_2d(target_label)
target_label.stop_gradient = True
conf_loss = softmax_with_cross_entropy(confidence, target_label)
# 3. Mining hard examples
actual_shape = nn.slice(conf_shape, axes=[0], starts=[0], ends=[2])
actual_shape.stop_gradient = True
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
conf_loss = nn.reshape(
x=conf_loss, shape=(-1, 0), actual_shape=actual_shape)
conf_loss.stop_gradient = True
neg_indices = helper.create_variable_for_type_inference(dtype='int32')
dtype = matched_indices.dtype
updated_matched_indices = helper.create_variable_for_type_inference(
dtype=dtype)
helper.append_op(
type='mine_hard_examples',
inputs={
'ClsLoss': conf_loss,
'LocLoss': None,
'MatchIndices': matched_indices,
'MatchDist': matched_dist,
},
outputs={
'NegIndices': neg_indices,
'UpdatedMatchIndices': updated_matched_indices
},
attrs={
'neg_pos_ratio': neg_pos_ratio,
'neg_dist_threshold': neg_overlap,
'mining_type': mining_type,
'sample_size': sample_size,
})
# 4. Assign classification and regression targets
# 4.1. Encoded bbox according to the prior boxes.
encoded_bbox = box_coder(
prior_box=prior_box,
prior_box_var=prior_box_var,
target_box=gt_box,
code_type='encode_center_size')
# 4.2. Assign regression targets
target_bbox, target_loc_weight = target_assign(
encoded_bbox, updated_matched_indices, mismatch_value=background_label)
# 4.3. Assign classification targets
target_label, target_conf_weight = target_assign(
gt_label,
updated_matched_indices,
negative_indices=neg_indices,
mismatch_value=background_label)
# 5. Compute loss.
# 5.1 Compute confidence loss.
target_label = __reshape_to_2d(target_label)
target_label = tensor.cast(x=target_label, dtype='int64')
conf_loss = softmax_with_cross_entropy(confidence, target_label)
target_conf_weight = __reshape_to_2d(target_conf_weight)
conf_loss = conf_loss * target_conf_weight
# the target_label and target_conf_weight do not have gradient.
target_label.stop_gradient = True
target_conf_weight.stop_gradient = True
# 5.2 Compute regression loss.
location = __reshape_to_2d(location)
target_bbox = __reshape_to_2d(target_bbox)
loc_loss = nn.smooth_l1(location, target_bbox)
target_loc_weight = __reshape_to_2d(target_loc_weight)
loc_loss = loc_loss * target_loc_weight
# the target_bbox and target_loc_weight do not have gradient.
target_bbox.stop_gradient = True
target_loc_weight.stop_gradient = True
# 5.3 Compute overall weighted loss.
loss = conf_loss_weight * conf_loss + loc_loss_weight * loc_loss
# reshape to [N, Np], N is the batch size and Np is the prior box number.
# shape=(-1, 0) is set for compile-time, the correct shape is set by
# actual_shape in runtime.
loss = nn.reshape(x=loss, shape=(-1, 0), actual_shape=actual_shape)
loss = nn.reduce_sum(loss, dim=1, keep_dim=True)
if normalize:
normalizer = nn.reduce_sum(target_loc_weight)
loss = loss / normalizer
return loss
def prior_box(input,
image,
min_sizes,
max_sizes=None,
aspect_ratios=[1.],
variance=[0.1, 0.1, 0.2, 0.2],
flip=False,
clip=False,
steps=[0.0, 0.0],
offset=0.5,
name=None,
min_max_aspect_ratios_order=False):
"""
:alias_main: paddle.nn.functional.prior_box
:alias: paddle.nn.functional.prior_box,paddle.nn.functional.vision.prior_box
:old_api: paddle.fluid.layers.prior_box
This op generates prior boxes for SSD(Single Shot MultiBox Detector) algorithm.
Each position of the input produce N prior boxes, N is determined by
the count of min_sizes, max_sizes and aspect_ratios, The size of the
box is in range(min_size, max_size) interval, which is generated in
sequence according to the aspect_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 or float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp,
the data type should be float32 or float64.
min_sizes(list|tuple|float): the min sizes of generated prior boxes.
max_sizes(list|tuple|None): the max sizes of generated prior boxes.
Default: None.
aspect_ratios(list|tuple|float): the aspect ratios of generated
prior boxes. Default: [1.].
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals to 0.0 or step[1] equals to 0.0, the prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes(Variable): the output prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4].
H is the height of input, W is the width of input,
num_priors is the total box count of each position of input.
variances(Variable): the expanded variances of PriorBox.
4-D tensor, the layput is [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_priors is the total box count of each position of input
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(fluid.default_main_program(),
feed={"input":input_data,"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (6, 9, 1, 4)
# print(var_out.shape)
# (6, 9, 1, 4)
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.prior_box(
input=input,
image=image,
min_sizes=[100.],
clip=True,
flip=True)
# print(box.shape)
# [6L, 9L, 1L, 4L]
# print(var.shape)
# [6L, 9L, 1L, 4L]
"""
helper = LayerHelper("prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(
input, 'input', ['uint8', 'int8', 'float32', 'float64'], 'prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
min_sizes = list(map(float, min_sizes))
aspect_ratios = list(map(float, aspect_ratios))
steps = list(map(float, steps))
attrs = {
'min_sizes': min_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'flip': flip,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'min_max_aspect_ratios_order': min_max_aspect_ratios_order
}
if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0:
if not _is_list_or_tuple_(max_sizes):
max_sizes = [max_sizes]
attrs['max_sizes'] = max_sizes
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def density_prior_box(input,
image,
densities=None,
fixed_sizes=None,
fixed_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
clip=False,
steps=[0.0, 0.0],
offset=0.5,
flatten_to_2d=False,
name=None):
"""
:alias_main: paddle.nn.functional.density_prior_box
:alias: paddle.nn.functional.density_prior_box,paddle.nn.functional.vision.density_prior_box
:old_api: paddle.fluid.layers.density_prior_box
This op generates density prior boxes for SSD(Single Shot MultiBox Detector)
algorithm. Each position of the input produce N prior boxes, N is
determined by the count of densities, fixed_sizes and fixed_ratios.
Boxes center at grid points around each input position is generated by
this operator, and the grid points is determined by densities and
the count of density prior box is determined by fixed_sizes and fixed_ratios.
Obviously, the number of fixed_sizes is equal to the number of densities.
For densities_i in densities:
.. math::
N\_density_prior\_box = SUM(N\_fixed\_ratios * densities\_i^2)
N_density_prior_box is the number of density_prior_box and N_fixed_ratios is the number of fixed_ratios.
Parameters:
input(Variable): 4-D tensor(NCHW), the data type should be float32 of float64.
image(Variable): 4-D tensor(NCHW), the input image data of PriorBoxOp, the data type should be float32 or float64.
the layout is NCHW.
densities(list|tuple|None): The densities of generated density prior
boxes, this attribute should be a list or tuple of integers.
Default: None.
fixed_sizes(list|tuple|None): The fixed sizes of generated density
prior boxes, this attribute should a list or tuple of same
length with :attr:`densities`. Default: None.
fixed_ratios(list|tuple|None): The fixed ratios of generated density
prior boxes, if this attribute is not set and :attr:`densities`
and :attr:`fix_sizes` is set, :attr:`aspect_ratios` will be used
to generate density prior boxes.
variance(list|tuple): The variances to be encoded in density prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
clip(bool): Whether to clip out of boundary boxes. Default: False.
step(list|tuple): Prior boxes step across width and height, If
step[0] equals 0.0 or step[1] equals 0.0, the density prior boxes step across
height or weight of the input will be automatically calculated.
Default: [0., 0.]
offset(float): Prior boxes center offset. Default: 0.5
flatten_to_2d(bool): Whether to flatten output prior boxes and variance
to 2D shape, the second dim is 4. Default: False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tuple: A tuple with two Variable (boxes, variances)
boxes: the output density prior boxes of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
variances: the expanded variances of PriorBox.
4-D tensor, the layout is [H, W, num_priors, 4] when flatten_to_2d is False.
2-D tensor, the layout is [H * W * num_priors, 4] when flatten_to_2d is True.
H is the height of input, W is the width of input, and num_priors is the total box count of each position of input.
Examples:
.. code-block:: python
#declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[None,3,6,9])
image = fluid.data(name="image", shape=[None,3,9,12])
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True,
flatten_to_2d=True)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# prepare a batch of data
input_data = np.random.rand(1,3,6,9).astype("float32")
image_data = np.random.rand(1,3,9,12).astype("float32")
box_out, var_out = exe.run(
fluid.default_main_program(),
feed={"input":input_data,
"image":image_data},
fetch_list=[box,var],
return_numpy=True)
# print(box_out.shape)
# (1134, 4)
# print(var_out.shape)
# (1134, 4)
#imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
image = dg.to_variable(image_data)
box, var = fluid.layers.density_prior_box(
input=input,
image=image,
densities=[4, 2, 1],
fixed_sizes=[32.0, 64.0, 128.0],
fixed_ratios=[1.],
clip=True)
# print(box.shape)
# [6L, 9L, 21L, 4L]
# print(var.shape)
# [6L, 9L, 21L, 4L]
"""
helper = LayerHelper("density_prior_box", **locals())
dtype = helper.input_dtype()
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'density_prior_box')
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
check_type(densities, 'densities', (list, tuple), 'density_prior_box')
check_type(fixed_sizes, 'fixed_sizes', (list, tuple), 'density_prior_box')
check_type(fixed_ratios, 'fixed_ratios', (list, tuple), 'density_prior_box')
if len(densities) != len(fixed_sizes):
raise ValueError('densities and fixed_sizes length should be euqal.')
if not (_is_list_or_tuple_(steps) and len(steps) == 2):
raise ValueError('steps should be a list or tuple ',
'with length 2, (step_width, step_height).')
densities = list(map(int, densities))
fixed_sizes = list(map(float, fixed_sizes))
fixed_ratios = list(map(float, fixed_ratios))
steps = list(map(float, steps))
attrs = {
'variances': variance,
'clip': clip,
'step_w': steps[0],
'step_h': steps[1],
'offset': offset,
'densities': densities,
'fixed_sizes': fixed_sizes,
'fixed_ratios': fixed_ratios,
'flatten_to_2d': flatten_to_2d,
}
box = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="density_prior_box",
inputs={"Input": input,
"Image": image},
outputs={"Boxes": box,
"Variances": var},
attrs=attrs, )
box.stop_gradient = True
var.stop_gradient = True
return box, var
def multi_box_head(inputs,
image,
base_size,
num_classes,
aspect_ratios,
min_ratio=None,
max_ratio=None,
min_sizes=None,
max_sizes=None,
steps=None,
step_w=None,
step_h=None,
offset=0.5,
variance=[0.1, 0.1, 0.2, 0.2],
flip=True,
clip=False,
kernel_size=1,
pad=0,
stride=1,
name=None,
min_max_aspect_ratios_order=False):
"""
:api_attr: Static Graph
Base on SSD ((Single Shot MultiBox Detector) algorithm, generate prior boxes,
regression location and classification confidence on multiple input feature
maps, then output the concatenate results. The details of this algorithm,
please refer the section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector
<https://arxiv.org/abs/1512.02325>`_ .
Args:
inputs (list(Variable)|tuple(Variable)): The list of input variables,
the format of all Variables are 4-D Tensor, layout is NCHW.
Data type should be float32 or float64.
image (Variable): The input image, layout is NCHW. Data type should be
the same as inputs.
base_size(int): the base_size is input image size. When len(inputs) > 2
and `min_size` and `max_size` are None, the `min_size` and `max_size`
are calculated by `baze_size`, 'min_ratio' and `max_ratio`. The
formula is as follows:
.. code-block:: text
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
num_classes(int): The number of classes.
aspect_ratios(list(float) | tuple(float)): the aspect ratios of generated
prior boxes. The length of input and aspect_ratios must be equal.
min_ratio(int): the min ratio of generated prior boxes.
max_ratio(int): the max ratio of generated prior boxes.
min_sizes(list|tuple|None): If `len(inputs) <=2`,
min_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
max_sizes(list|tuple|None): If `len(inputs) <=2`,
max_sizes must be set up, and the length of min_sizes
should equal to the length of inputs. Default: None.
steps(list|tuple): If step_w and step_h are the same,
step_w and step_h can be replaced by steps.
step_w(list|tuple): Prior boxes step
across width. If step_w[i] == 0.0, the prior boxes step
across width of the inputs[i] will be automatically
calculated. Default: None.
step_h(list|tuple): Prior boxes step across height, If
step_h[i] == 0.0, the prior boxes step across height of
the inputs[i] will be automatically calculated. Default: None.
offset(float): Prior boxes center offset. Default: 0.5
variance(list|tuple): the variances to be encoded in prior boxes.
Default:[0.1, 0.1, 0.2, 0.2].
flip(bool): Whether to flip aspect ratios. Default:False.
clip(bool): Whether to clip out-of-boundary boxes. Default: False.
kernel_size(int): The kernel size of conv2d. Default: 1.
pad(int|list|tuple): The padding of conv2d. Default:0.
stride(int|list|tuple): The stride of conv2d. Default:1,
name(str): The default value is None. Normally there is no need
for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
min_max_aspect_ratios_order(bool): If set True, the output prior box is
in order of [min, max, aspect_ratios], which is consistent with
Caffe. Please note, this order affects the weights order of
convolution layer followed by and does not affect the final
detection results. Default: False.
Returns:
tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances)
mbox_loc (Variable): The predicted boxes' location of the inputs. The
layout is [N, num_priors, 4], where N is batch size, ``num_priors``
is the number of prior boxes. Data type is the same as input.
mbox_conf (Variable): The predicted boxes' confidence of the inputs.
The layout is [N, num_priors, C], where ``N`` and ``num_priors``
has the same meaning as above. C is the number of Classes.
Data type is the same as input.
boxes (Variable): the output prior boxes. The layout is [num_priors, 4].
The meaning of num_priors is the same as above.
Data type is the same as input.
variances (Variable): the expanded variances for prior boxes.
The layout is [num_priors, 4]. Data type is the same as input.
Examples 1: set min_ratio and max_ratio:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_ratio=20,
max_ratio=90,
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
Examples 2: set min_sizes and max_sizes:
.. code-block:: python
import paddle.fluid as fluid
images = fluid.data(name='data', shape=[None, 3, 300, 300], dtype='float32')
conv1 = fluid.data(name='conv1', shape=[None, 512, 19, 19], dtype='float32')
conv2 = fluid.data(name='conv2', shape=[None, 1024, 10, 10], dtype='float32')
conv3 = fluid.data(name='conv3', shape=[None, 512, 5, 5], dtype='float32')
conv4 = fluid.data(name='conv4', shape=[None, 256, 3, 3], dtype='float32')
conv5 = fluid.data(name='conv5', shape=[None, 256, 2, 2], dtype='float32')
conv6 = fluid.data(name='conv6', shape=[None, 128, 1, 1], dtype='float32')
mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head(
inputs=[conv1, conv2, conv3, conv4, conv5, conv6],
image=images,
num_classes=21,
min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0],
max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
base_size=300,
offset=0.5,
flip=True,
clip=True)
"""
def _reshape_with_axis_(input, axis=1):
out = nn.flatten(x=input, axis=axis)
return out
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
def _is_list_or_tuple_and_equal(data, length, err_info):
if not (_is_list_or_tuple_(data) and len(data) == length):
raise ValueError(err_info)
if not _is_list_or_tuple_(inputs):
raise ValueError('inputs should be a list or tuple.')
num_layer = len(inputs)
if num_layer <= 2:
assert min_sizes is not None and max_sizes is not None
assert len(min_sizes) == num_layer and len(max_sizes) == num_layer
elif min_sizes is None and max_sizes is None:
min_sizes = []
max_sizes = []
step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2)))
for ratio in six.moves.range(min_ratio, max_ratio + 1, step):
min_sizes.append(base_size * ratio / 100.)
max_sizes.append(base_size * (ratio + step) / 100.)
min_sizes = [base_size * .10] + min_sizes
max_sizes = [base_size * .20] + max_sizes
if aspect_ratios:
_is_list_or_tuple_and_equal(
aspect_ratios, num_layer,
'aspect_ratios should be list or tuple, and the length of inputs '
'and aspect_ratios should be the same.')
if step_h is not None:
_is_list_or_tuple_and_equal(
step_h, num_layer,
'step_h should be list or tuple, and the length of inputs and '
'step_h should be the same.')
if step_w is not None:
_is_list_or_tuple_and_equal(
step_w, num_layer,
'step_w should be list or tuple, and the length of inputs and '
'step_w should be the same.')
if steps is not None:
_is_list_or_tuple_and_equal(
steps, num_layer,
'steps should be list or tuple, and the length of inputs and '
'step_w should be the same.')
step_w = steps
step_h = steps
mbox_locs = []
mbox_confs = []
box_results = []
var_results = []
for i, input in enumerate(inputs):
min_size = min_sizes[i]
max_size = max_sizes[i]
if not _is_list_or_tuple_(min_size):
min_size = [min_size]
if not _is_list_or_tuple_(max_size):
max_size = [max_size]
aspect_ratio = []
if aspect_ratios is not None:
aspect_ratio = aspect_ratios[i]
if not _is_list_or_tuple_(aspect_ratio):
aspect_ratio = [aspect_ratio]
step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0]
box, var = prior_box(input, image, min_size, max_size, aspect_ratio,
variance, flip, clip, step, offset, None,
min_max_aspect_ratios_order)
box_results.append(box)
var_results.append(var)
num_boxes = box.shape[2]
# get loc
num_loc_output = num_boxes * 4
mbox_loc = nn.conv2d(
input=input,
num_filters=num_loc_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
mbox_loc = nn.transpose(mbox_loc, perm=[0, 2, 3, 1])
mbox_loc_flatten = nn.flatten(mbox_loc, axis=1)
mbox_locs.append(mbox_loc_flatten)
# get conf
num_conf_output = num_boxes * num_classes
conf_loc = nn.conv2d(
input=input,
num_filters=num_conf_output,
filter_size=kernel_size,
padding=pad,
stride=stride)
conf_loc = nn.transpose(conf_loc, perm=[0, 2, 3, 1])
conf_loc_flatten = nn.flatten(conf_loc, axis=1)
mbox_confs.append(conf_loc_flatten)
if len(box_results) == 1:
box = box_results[0]
var = var_results[0]
mbox_locs_concat = mbox_locs[0]
mbox_confs_concat = mbox_confs[0]
else:
reshaped_boxes = []
reshaped_vars = []
for i in range(len(box_results)):
reshaped_boxes.append(_reshape_with_axis_(box_results[i], axis=3))
reshaped_vars.append(_reshape_with_axis_(var_results[i], axis=3))
box = tensor.concat(reshaped_boxes)
var = tensor.concat(reshaped_vars)
mbox_locs_concat = tensor.concat(mbox_locs, axis=1)
mbox_locs_concat = nn.reshape(mbox_locs_concat, shape=[0, -1, 4])
mbox_confs_concat = tensor.concat(mbox_confs, axis=1)
mbox_confs_concat = nn.reshape(
mbox_confs_concat, shape=[0, -1, num_classes])
box.stop_gradient = True
var.stop_gradient = True
return mbox_locs_concat, mbox_confs_concat, box, var
def anchor_generator(input,
anchor_sizes=None,
aspect_ratios=None,
variance=[0.1, 0.1, 0.2, 0.2],
stride=None,
offset=0.5,
name=None):
"""
:alias_main: paddle.nn.functional.anchor_generator
:alias: paddle.nn.functional.anchor_generator,paddle.nn.functional.vision.anchor_generator
:old_api: paddle.fluid.layers.anchor_generator
**Anchor generator operator**
Generate anchors for Faster RCNN algorithm.
Each position of the input produce N anchors, N =
size(anchor_sizes) * size(aspect_ratios). The order of generated anchors
is firstly aspect_ratios loop then anchor_sizes loop.
Args:
input(Variable): 4-D Tensor with shape [N,C,H,W]. The input feature map.
anchor_sizes(float32|list|tuple, optional): The anchor sizes of generated
anchors, given in absolute pixels e.g. [64., 128., 256., 512.].
For instance, the anchor size of 64 means the area of this anchor
equals to 64**2. None by default.
aspect_ratios(float32|list|tuple, optional): The height / width ratios
of generated anchors, e.g. [0.5, 1.0, 2.0]. None by default.
variance(list|tuple, optional): The variances to be used in box
regression deltas. The data type is float32, [0.1, 0.1, 0.2, 0.2] by
default.
stride(list|tuple, optional): The anchors stride across width and height.
The data type is float32. e.g. [16.0, 16.0]. None by default.
offset(float32, optional): Prior boxes center offset. 0.5 by default.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and None
by default.
Returns:
Tuple:
Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4].
H is the height of input, W is the width of input,
num_anchors is the box count of each position.
Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized.
Variances(Variable): The expanded variances of anchors
with a layout of [H, W, num_priors, 4].
H is the height of input, W is the width of input
num_anchors is the box count of each position.
Each variance is in (xcenter, ycenter, w, h) format.
Examples:
.. code-block:: python
import paddle.fluid as fluid
conv1 = fluid.data(name='conv1', shape=[None, 48, 16, 16], dtype='float32')
anchor, var = fluid.layers.anchor_generator(
input=conv1,
anchor_sizes=[64, 128, 256, 512],
aspect_ratios=[0.5, 1.0, 2.0],
variance=[0.1, 0.1, 0.2, 0.2],
stride=[16.0, 16.0],
offset=0.5)
"""
helper = LayerHelper("anchor_generator", **locals())
dtype = helper.input_dtype()
def _is_list_or_tuple_(data):
return (isinstance(data, list) or isinstance(data, tuple))
if not _is_list_or_tuple_(anchor_sizes):
anchor_sizes = [anchor_sizes]
if not _is_list_or_tuple_(aspect_ratios):
aspect_ratios = [aspect_ratios]
if not (_is_list_or_tuple_(stride) and len(stride) == 2):
raise ValueError('stride should be a list or tuple ',
'with length 2, (stride_width, stride_height).')
anchor_sizes = list(map(float, anchor_sizes))
aspect_ratios = list(map(float, aspect_ratios))
stride = list(map(float, stride))
attrs = {
'anchor_sizes': anchor_sizes,
'aspect_ratios': aspect_ratios,
'variances': variance,
'stride': stride,
'offset': offset
}
anchor = helper.create_variable_for_type_inference(dtype)
var = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="anchor_generator",
inputs={"Input": input},
outputs={"Anchors": anchor,
"Variances": var},
attrs=attrs, )
anchor.stop_gradient = True
var.stop_gradient = True
return anchor, var
def roi_perspective_transform(input,
rois,
transformed_height,
transformed_width,
spatial_scale=1.0,
name=None):
"""
**The** `rois` **of this op should be a LoDTensor.**
ROI perspective transform op applies perspective transform to map each roi into an
rectangular region. Perspective transform is a type of transformation in linear algebra.
Parameters:
input (Variable): 4-D Tensor, input of ROIPerspectiveTransformOp. The format of
input tensor is NCHW. Where N is batch size, C is the
number of input channels, H is the height of the feature,
and W is the width of the feature. The data type is float32.
rois (Variable): 2-D LoDTensor, ROIs (Regions of Interest) to be transformed.
It should be a 2-D LoDTensor of shape (num_rois, 8). Given as
[[x1, y1, x2, y2, x3, y3, x4, y4], ...], (x1, y1) is the
top left coordinates, and (x2, y2) is the top right
coordinates, and (x3, y3) is the bottom right coordinates,
and (x4, y4) is the bottom left coordinates. The data type is the
same as `input`
transformed_height (int): The height of transformed output.
transformed_width (int): The width of transformed output.
spatial_scale (float): Spatial scale factor to scale ROI coords. Default: 1.0
name(str, optional): The default value is None.
Normally there is no need for user to set this property.
For more information, please refer to :ref:`api_guide_Name`
Returns:
A tuple with three Variables. (out, mask, transform_matrix)
out: The output of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, channels, transformed_h, transformed_w). The data type is the same as `input`
mask: The mask of ROIPerspectiveTransformOp which is a 4-D tensor with shape
(num_rois, 1, transformed_h, transformed_w). The data type is int32
transform_matrix: The transform matrix of ROIPerspectiveTransformOp which is
a 2-D tensor with shape (num_rois, 9). The data type is the same as `input`
Return Type:
tuple
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.data(name='x', shape=[100, 256, 28, 28], dtype='float32')
rois = fluid.data(name='rois', shape=[None, 8], lod_level=1, dtype='float32')
out, mask, transform_matrix = fluid.layers.roi_perspective_transform(x, rois, 7, 7, 1.0)
"""
check_variable_and_dtype(input, 'input', ['float32'],
'roi_perspective_transform')
check_variable_and_dtype(rois, 'rois', ['float32'],
'roi_perspective_transform')
check_type(transformed_height, 'transformed_height', int,
'roi_perspective_transform')
check_type(transformed_width, 'transformed_width', int,
'roi_perspective_transform')
check_type(spatial_scale, 'spatial_scale', float,
'roi_perspective_transform')
helper = LayerHelper('roi_perspective_transform', **locals())
dtype = helper.input_dtype()
out = helper.create_variable_for_type_inference(dtype)
mask = helper.create_variable_for_type_inference(dtype="int32")
transform_matrix = helper.create_variable_for_type_inference(dtype)
out2in_idx = helper.create_variable_for_type_inference(dtype="int32")
out2in_w = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type="roi_perspective_transform",
inputs={"X": input,
"ROIs": rois},
outputs={
"Out": out,
"Out2InIdx": out2in_idx,
"Out2InWeights": out2in_w,
"Mask": mask,
"TransformMatrix": transform_matrix
},
attrs={
"transformed_height": transformed_height,
"transformed_width": transformed_width,
"spatial_scale": spatial_scale
})
return out, mask, transform_matrix
def generate_proposal_labels(rpn_rois,
gt_classes,
is_crowd,
gt_boxes,
im_info,
batch_size_per_im=256,
fg_fraction=0.25,
fg_thresh=0.25,
bg_thresh_hi=0.5,
bg_thresh_lo=0.0,
bbox_reg_weights=[0.1, 0.1, 0.2, 0.2],
class_nums=None,
use_random=True,
is_cls_agnostic=False,
is_cascade_rcnn=False):
"""
:alias_main: paddle.nn.functional.generate_proposal_labels
:alias: paddle.nn.functional.generate_proposal_labels,paddle.nn.functional.vision.generate_proposal_labels
:old_api: paddle.fluid.layers.generate_proposal_labels
**Generate Proposal Labels of Faster-RCNN**
This operator can be, for given the GenerateProposalOp output bounding boxes and groundtruth,
to sample foreground boxes and background boxes, and compute loss target.
RpnRois is the output boxes of RPN and was processed by generate_proposal_op, these boxes
were combined with groundtruth boxes and sampled according to batch_size_per_im and fg_fraction,
If an instance with a groundtruth overlap greater than fg_thresh, then it was considered as a foreground sample.
If an instance with a groundtruth overlap greater than bg_thresh_lo and lower than bg_thresh_hi,
then it was considered as a background sample.
After all foreground and background boxes are chosen (so called Rois),
then we apply random sampling to make sure
the number of foreground boxes is no more than batch_size_per_im * fg_fraction.
For each box in Rois, we assign the classification (class label) and regression targets (box label) to it.
Finally BboxInsideWeights and BboxOutsideWeights are used to specify whether it would contribute to training loss.
Args:
rpn_rois(Variable): A 2-D LoDTensor with shape [N, 4]. N is the number of the GenerateProposalOp's output, each element is a bounding box with [xmin, ymin, xmax, ymax] format. The data type can be float32 or float64.
gt_classes(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a class label of groundtruth. The data type must be int32.
is_crowd(Variable): A 2-D LoDTensor with shape [M, 1]. M is the number of groundtruth, each element is a flag indicates whether a groundtruth is crowd. The data type must be int32.
gt_boxes(Variable): A 2-D LoDTensor with shape [M, 4]. M is the number of groundtruth, each element is a bounding box with [xmin, ymin, xmax, ymax] format.
im_info(Variable): A 2-D LoDTensor with shape [B, 3]. B is the number of input images, each element consists of im_height, im_width, im_scale.
batch_size_per_im(int): Batch size of rois per images. The data type must be int32.
fg_fraction(float): Foreground fraction in total batch_size_per_im. The data type must be float32.
fg_thresh(float): Overlap threshold which is used to chose foreground sample. The data type must be float32.
bg_thresh_hi(float): Overlap threshold upper bound which is used to chose background sample. The data type must be float32.
bg_thresh_lo(float): Overlap threshold lower bound which is used to chose background sample. The data type must be float32.
bbox_reg_weights(list|tuple): Box regression weights. The data type must be float32.
class_nums(int): Class number. The data type must be int32.
use_random(bool): Use random sampling to choose foreground and background boxes.
is_cls_agnostic(bool): bbox regression use class agnostic simply which only represent fg and bg boxes.
is_cascade_rcnn(bool): it will filter some bbox crossing the image's boundary when setting True.
Returns:
tuple:
A tuple with format``(rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights)``.
- **rois**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4]``. The data type is the same as ``rpn_rois``.
- **labels_int32**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 1]``. The data type must be int32.
- **bbox_targets**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The regression targets of all RoIs. The data type is the same as ``rpn_rois``.
- **bbox_inside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of foreground boxes' regression loss. The data type is the same as ``rpn_rois``.
- **bbox_outside_weights**: 2-D LoDTensor with shape ``[batch_size_per_im * batch_size, 4 * class_num]``. The weights of regression loss. The data type is the same as ``rpn_rois``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
rpn_rois = fluid.data(name='rpn_rois', shape=[None, 4], dtype='float32')
gt_classes = fluid.data(name='gt_classes', shape=[None, 1], dtype='float32')
is_crowd = fluid.data(name='is_crowd', shape=[None, 1], dtype='float32')
gt_boxes = fluid.data(name='gt_boxes', shape=[None, 4], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
rois, labels, bbox, inside_weights, outside_weights = fluid.layers.generate_proposal_labels(
rpn_rois, gt_classes, is_crowd, gt_boxes, im_info,
class_nums=10)
"""
helper = LayerHelper('generate_proposal_labels', **locals())
check_variable_and_dtype(rpn_rois, 'rpn_rois', ['float32', 'float64'],
'generate_proposal_labels')
check_variable_and_dtype(gt_classes, 'gt_classes', ['int32'],
'generate_proposal_labels')
check_variable_and_dtype(is_crowd, 'is_crowd', ['int32'],
'generate_proposal_labels')
rois = helper.create_variable_for_type_inference(dtype=rpn_rois.dtype)
labels_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
bbox_targets = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_inside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
bbox_outside_weights = helper.create_variable_for_type_inference(
dtype=rpn_rois.dtype)
helper.append_op(
type="generate_proposal_labels",
inputs={
'RpnRois': rpn_rois,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtBoxes': gt_boxes,
'ImInfo': im_info
},
outputs={
'Rois': rois,
'LabelsInt32': labels_int32,
'BboxTargets': bbox_targets,
'BboxInsideWeights': bbox_inside_weights,
'BboxOutsideWeights': bbox_outside_weights
},
attrs={
'batch_size_per_im': batch_size_per_im,
'fg_fraction': fg_fraction,
'fg_thresh': fg_thresh,
'bg_thresh_hi': bg_thresh_hi,
'bg_thresh_lo': bg_thresh_lo,
'bbox_reg_weights': bbox_reg_weights,
'class_nums': class_nums,
'use_random': use_random,
'is_cls_agnostic': is_cls_agnostic,
'is_cascade_rcnn': is_cascade_rcnn
})
rois.stop_gradient = True
labels_int32.stop_gradient = True
bbox_targets.stop_gradient = True
bbox_inside_weights.stop_gradient = True
bbox_outside_weights.stop_gradient = True
return rois, labels_int32, bbox_targets, bbox_inside_weights, bbox_outside_weights
def generate_mask_labels(im_info, gt_classes, is_crowd, gt_segms, rois,
labels_int32, num_classes, resolution):
"""
:alias_main: paddle.nn.functional.generate_mask_labels
:alias: paddle.nn.functional.generate_mask_labels,paddle.nn.functional.vision.generate_mask_labels
:old_api: paddle.fluid.layers.generate_mask_labels
**Generate Mask Labels for Mask-RCNN**
This operator can be, for given the RoIs and corresponding labels,
to sample foreground RoIs. This mask branch also has
a :math: `K \\times M^{2}` dimensional output targets for each foreground
RoI, which encodes K binary masks of resolution M x M, one for each of the
K classes. This mask targets are used to compute loss of mask branch.
Please note, the data format of groud-truth segmentation, assumed the
segmentations are as follows. The first instance has two gt objects.
The second instance has one gt object, this object has two gt segmentations.
.. code-block:: python
#[
# [[[229.14, 370.9, 229.14, 370.9, ...]],
# [[343.7, 139.85, 349.01, 138.46, ...]]], # 0-th instance
# [[[500.0, 390.62, ...],[115.48, 187.86, ...]]] # 1-th instance
#]
batch_masks = []
for semgs in batch_semgs:
gt_masks = []
for semg in semgs:
gt_segm = []
for polys in semg:
gt_segm.append(np.array(polys).reshape(-1, 2))
gt_masks.append(gt_segm)
batch_masks.append(gt_masks)
place = fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=feeds)
feeder.feed(batch_masks)
Args:
im_info (Variable): A 2-D Tensor with shape [N, 3] and float32
data type. N is the batch size, each element is
[height, width, scale] of image. Image scale is
target_size / original_size, target_size is the size after resize,
original_size is the original image size.
gt_classes (Variable): A 2-D LoDTensor with shape [M, 1]. Data type
should be int. M is the total number of ground-truth, each
element is a class label.
is_crowd (Variable): A 2-D LoDTensor with same shape and same data type
as gt_classes, each element is a flag indicating whether a
groundtruth is crowd.
gt_segms (Variable): This input is a 2D LoDTensor with shape [S, 2] and
float32 data type, it's LoD level is 3.
Usually users do not needs to understand LoD,
The users should return correct data format in reader.
The LoD[0] represents the ground-truth objects number of
each instance. LoD[1] represents the segmentation counts of each
objects. LoD[2] represents the polygons number of each segmentation.
S the total number of polygons coordinate points. Each element is
(x, y) coordinate points.
rois (Variable): A 2-D LoDTensor with shape [R, 4] and float32 data type
float32. R is the total number of RoIs, each element is a bounding
box with (xmin, ymin, xmax, ymax) format in the range of original image.
labels_int32 (Variable): A 2-D LoDTensor in shape of [R, 1] with type
of int32. R is the same as it in `rois`. Each element represents
a class label of a RoI.
num_classes (int): Class number.
resolution (int): Resolution of mask predictions.
Returns:
mask_rois (Variable): A 2D LoDTensor with shape [P, 4] and same data
type as `rois`. P is the total number of sampled RoIs. Each element
is a bounding box with [xmin, ymin, xmax, ymax] format in range of
original image size.
mask_rois_has_mask_int32 (Variable): A 2D LoDTensor with shape [P, 1]
and int data type, each element represents the output mask RoI
index with regard to input RoIs.
mask_int32 (Variable): A 2D LoDTensor with shape [P, K * M * M] and int
data type, K is the classes number and M is the resolution of mask
predictions. Each element represents the binary mask targets.
Examples:
.. code-block:: python
import paddle.fluid as fluid
im_info = fluid.data(name="im_info", shape=[None, 3],
dtype="float32")
gt_classes = fluid.data(name="gt_classes", shape=[None, 1],
dtype="float32", lod_level=1)
is_crowd = fluid.data(name="is_crowd", shape=[None, 1],
dtype="float32", lod_level=1)
gt_masks = fluid.data(name="gt_masks", shape=[None, 2],
dtype="float32", lod_level=3)
# rois, roi_labels can be the output of
# fluid.layers.generate_proposal_labels.
rois = fluid.data(name="rois", shape=[None, 4],
dtype="float32", lod_level=1)
roi_labels = fluid.data(name="roi_labels", shape=[None, 1],
dtype="int32", lod_level=1)
mask_rois, mask_index, mask_int32 = fluid.layers.generate_mask_labels(
im_info=im_info,
gt_classes=gt_classes,
is_crowd=is_crowd,
gt_segms=gt_masks,
rois=rois,
labels_int32=roi_labels,
num_classes=81,
resolution=14)
"""
helper = LayerHelper('generate_mask_labels', **locals())
mask_rois = helper.create_variable_for_type_inference(dtype=rois.dtype)
roi_has_mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
mask_int32 = helper.create_variable_for_type_inference(
dtype=gt_classes.dtype)
helper.append_op(
type="generate_mask_labels",
inputs={
'ImInfo': im_info,
'GtClasses': gt_classes,
'IsCrowd': is_crowd,
'GtSegms': gt_segms,
'Rois': rois,
'LabelsInt32': labels_int32
},
outputs={
'MaskRois': mask_rois,
'RoiHasMaskInt32': roi_has_mask_int32,
'MaskInt32': mask_int32
},
attrs={'num_classes': num_classes,
'resolution': resolution})
mask_rois.stop_gradient = True
roi_has_mask_int32.stop_gradient = True
mask_int32.stop_gradient = True
return mask_rois, roi_has_mask_int32, mask_int32
def generate_proposals(scores,
bbox_deltas,
im_info,
anchors,
variances,
pre_nms_top_n=6000,
post_nms_top_n=1000,
nms_thresh=0.5,
min_size=0.1,
eta=1.0,
name=None,
return_rois_num=False):
"""
:alias_main: paddle.nn.functional.generate_proposals
:alias: paddle.nn.functional.generate_proposals,paddle.nn.functional.vision.generate_proposals
:old_api: paddle.fluid.layers.generate_proposals
**Generate proposal Faster-RCNN**
This operation proposes RoIs according to each box with their
probability to be a foreground object and
the box can be calculated by anchors. Bbox_deltais and scores
to be an object are the output of RPN. Final proposals
could be used to train detection net.
For generating proposals, this operation performs following steps:
1. Transposes and resizes scores and bbox_deltas in size of
(H*W*A, 1) and (H*W*A, 4)
2. Calculate box locations as proposals candidates.
3. Clip boxes to image
4. Remove predicted boxes with small area.
5. Apply NMS to get final proposals as output.
Args:
scores(Variable): A 4-D Tensor with shape [N, A, H, W] represents
the probability for each box to be an object.
N is batch size, A is number of anchors, H and W are height and
width of the feature map. The data type must be float32.
bbox_deltas(Variable): A 4-D Tensor with shape [N, 4*A, H, W]
represents the difference between predicted box location and
anchor location. The data type must be float32.
im_info(Variable): A 2-D Tensor with shape [N, 3] represents origin
image information for N batch. Height and width are the input sizes
and scale is the ratio of network input size and original size.
The data type can be float32 or float64.
anchors(Variable): A 4-D Tensor represents the anchors with a layout
of [H, W, A, 4]. H and W are height and width of the feature map,
num_anchors is the box count of each position. Each anchor is
in (xmin, ymin, xmax, ymax) format an unnormalized. The data type must be float32.
variances(Variable): A 4-D Tensor. The expanded variances of anchors with a layout of
[H, W, num_priors, 4]. Each variance is in
(xcenter, ycenter, w, h) format. The data type must be float32.
pre_nms_top_n(float): Number of total bboxes to be kept per
image before NMS. The data type must be float32. `6000` by default.
post_nms_top_n(float): Number of total bboxes to be kept per
image after NMS. The data type must be float32. `1000` by default.
nms_thresh(float): Threshold in NMS. The data type must be float32. `0.5` by default.
min_size(float): Remove predicted boxes with either height or
width < min_size. The data type must be float32. `0.1` by default.
eta(float): Apply in adaptive NMS, if adaptive `threshold > 0.5`,
`adaptive_threshold = adaptive_threshold * eta` in each iteration.
return_rois_num(bool): When setting True, it will return a 1D Tensor with shape [N, ] that includes Rois's
num of each image in one batch. The N is the image's num. For example, the tensor has values [4,5] that represents
the first image has 4 Rois, the second image has 5 Rois. It only used in rcnn model.
'False' by default.
Returns:
tuple:
A tuple with format ``(rpn_rois, rpn_roi_probs)``.
- **rpn_rois**: The generated RoIs. 2-D Tensor with shape ``[N, 4]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
- **rpn_roi_probs**: The scores of generated RoIs. 2-D Tensor with shape ``[N, 1]`` while ``N`` is the number of RoIs. The data type is the same as ``scores``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
scores = fluid.data(name='scores', shape=[None, 4, 5, 5], dtype='float32')
bbox_deltas = fluid.data(name='bbox_deltas', shape=[None, 16, 5, 5], dtype='float32')
im_info = fluid.data(name='im_info', shape=[None, 3], dtype='float32')
anchors = fluid.data(name='anchors', shape=[None, 5, 4, 4], dtype='float32')
variances = fluid.data(name='variances', shape=[None, 5, 10, 4], dtype='float32')
rois, roi_probs = fluid.layers.generate_proposals(scores, bbox_deltas,
im_info, anchors, variances)
"""
helper = LayerHelper('generate_proposals', **locals())
check_variable_and_dtype(scores, 'scores', ['float32'],
'generate_proposals')
check_variable_and_dtype(bbox_deltas, 'bbox_deltas', ['float32'],
'generate_proposals')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'generate_proposals')
check_variable_and_dtype(anchors, 'anchors', ['float32'],
'generate_proposals')
check_variable_and_dtype(variances, 'variances', ['float32'],
'generate_proposals')
rpn_rois = helper.create_variable_for_type_inference(
dtype=bbox_deltas.dtype)
rpn_roi_probs = helper.create_variable_for_type_inference(
dtype=scores.dtype)
rpn_rois_lod = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type="generate_proposals",
inputs={
'Scores': scores,
'BboxDeltas': bbox_deltas,
'ImInfo': im_info,
'Anchors': anchors,
'Variances': variances
},
attrs={
'pre_nms_topN': pre_nms_top_n,
'post_nms_topN': post_nms_top_n,
'nms_thresh': nms_thresh,
'min_size': min_size,
'eta': eta
},
outputs={
'RpnRois': rpn_rois,
'RpnRoiProbs': rpn_roi_probs,
'RpnRoisLod': rpn_rois_lod
})
rpn_rois.stop_gradient = True
rpn_roi_probs.stop_gradient = True
rpn_rois_lod.stop_gradient = True
if return_rois_num:
return rpn_rois, rpn_roi_probs, rpn_rois_lod
else:
return rpn_rois, rpn_roi_probs
def box_clip(input, im_info, name=None):
"""
:alias_main: paddle.nn.functional.box_clip
:alias: paddle.nn.functional.box_clip,paddle.nn.functional.vision.box_clip
:old_api: paddle.fluid.layers.box_clip
Clip the box into the size given by im_info
For each input box, The formula is given as follows:
.. code-block:: text
xmin = max(min(xmin, im_w - 1), 0)
ymin = max(min(ymin, im_h - 1), 0)
xmax = max(min(xmax, im_w - 1), 0)
ymax = max(min(ymax, im_h - 1), 0)
where im_w and im_h are computed from im_info:
.. code-block:: text
im_h = round(height / scale)
im_w = round(weight / scale)
Args:
input(Variable): The input Tensor with shape :math:`[N_1, N_2, ..., N_k, 4]`,
the last dimension is 4 and data type is float32 or float64.
im_info(Variable): The 2-D Tensor with shape [N, 3] with layout
(height, width, scale) representing the information of image.
Height and width are the input sizes and scale is the ratio of network input
size and original size. The data type is float32 or float64.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
output(Variable): The clipped tensor with data type float32 or float64.
The shape is same as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(
name='boxes', shape=[None, 8, 4], dtype='float32', lod_level=1)
im_info = fluid.data(name='im_info', shape=[-1 ,3])
out = fluid.layers.box_clip(
input=boxes, im_info=im_info)
"""
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'box_clip')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'box_clip')
helper = LayerHelper("box_clip", **locals())
output = helper.create_variable_for_type_inference(dtype=input.dtype)
inputs = {"Input": input, "ImInfo": im_info}
helper.append_op(type="box_clip", inputs=inputs, outputs={"Output": output})
return output
def retinanet_detection_output(bboxes,
scores,
anchors,
im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.3,
nms_eta=1.0):
"""
**Detection Output Layer for the detector RetinaNet.**
In the detector `RetinaNet <https://arxiv.org/abs/1708.02002>`_ , many
`FPN <https://arxiv.org/abs/1612.03144>`_ levels output the category
and location predictions, this OP is to get the detection results by
performing following steps:
1. For each FPN level, decode box predictions according to the anchor
boxes from at most :attr:`nms_top_k` top-scoring predictions after
thresholding detector confidence at :attr:`score_threshold`.
2. Merge top predictions from all levels and apply multi-class non
maximum suppression (NMS) on them to get the final detections.
Args:
bboxes(List): A list of Tensors from multiple FPN levels represents
the location prediction for all anchor boxes. Each element is
a 3-D Tensor with shape :math:`[N, Mi, 4]`, :math:`N` is the
batch size, :math:`Mi` is the number of bounding boxes from
:math:`i`-th FPN level and each bounding box has four coordinate
values and the layout is [xmin, ymin, xmax, ymax]. The data type
of each element is float32 or float64.
scores(List): A list of Tensors from multiple FPN levels represents
the category prediction for all anchor boxes. Each element is a
3-D Tensor with shape :math:`[N, Mi, C]`, :math:`N` is the batch
size, :math:`C` is the class number (**excluding background**),
:math:`Mi` is the number of bounding boxes from :math:`i`-th FPN
level. The data type of each element is float32 or float64.
anchors(List): A list of Tensors from multiple FPN levels represents
the locations of all anchor boxes. Each element is a 2-D Tensor
with shape :math:`[Mi, 4]`, :math:`Mi` is the number of bounding
boxes from :math:`i`-th FPN level, and each bounding box has four
coordinate values and the layout is [xmin, ymin, xmax, ymax].
The data type of each element is float32 or float64.
im_info(Variable): A 2-D Tensor with shape :math:`[N, 3]` represents the size
information of input images. :math:`N` is the batch size, the size
information of each image is a 3-vector which are the height and width
of the network input along with the factor scaling the origin image to
the network input. The data type of :attr:`im_info` is float32.
score_threshold(float): Threshold to filter out bounding boxes
with a confidence score before NMS, default value is set to 0.05.
nms_top_k(int): Maximum number of detections per FPN layer to be
kept according to the confidences before NMS, default value is set to
1000.
keep_top_k(int): Number of total bounding boxes to be kept per image after
NMS step. Default value is set to 100, -1 means keeping all bounding
boxes after NMS step.
nms_threshold(float): The Intersection-over-Union(IoU) threshold used to
filter out boxes in NMS.
nms_eta(float): The parameter for adjusting :attr:`nms_threshold` in NMS.
Default value is set to 1., which represents the value of
:attr:`nms_threshold` keep the same in NMS. If :attr:`nms_eta` is set
to be lower than 1. and the value of :attr:`nms_threshold` is set to
be higher than 0.5, everytime a bounding box is filtered out,
the adjustment for :attr:`nms_threshold` like :attr:`nms_threshold`
= :attr:`nms_threshold` * :attr:`nms_eta` will not be stopped until
the actual value of :attr:`nms_threshold` is lower than or equal to
0.5.
**Notice**: In some cases where the image sizes are very small, it's possible
that there is no detection if :attr:`score_threshold` are used at all
levels. Hence, this OP do not filter out anchors from the highest FPN level
before NMS. And the last element in :attr:`bboxes`:, :attr:`scores` and
:attr:`anchors` is required to be from the highest FPN level.
Returns:
Variable(The data type is float32 or float64):
The detection output is a 1-level LoDTensor with shape :math:`[No, 6]`.
Each row has six values: [label, confidence, xmin, ymin, xmax, ymax].
:math:`No` is the total number of detections in this mini-batch.
The :math:`i`-th image has `LoD[i + 1] - LoD[i]` detected
results, if `LoD[i + 1] - LoD[i]` is 0, the :math:`i`-th image
has no detected results. If all images have no detected results,
LoD will be set to 0, and the output tensor is empty (None).
Examples:
.. code-block:: python
import paddle.fluid as fluid
bboxes_low = fluid.data(
name='bboxes_low', shape=[1, 44, 4], dtype='float32')
bboxes_high = fluid.data(
name='bboxes_high', shape=[1, 11, 4], dtype='float32')
scores_low = fluid.data(
name='scores_low', shape=[1, 44, 10], dtype='float32')
scores_high = fluid.data(
name='scores_high', shape=[1, 11, 10], dtype='float32')
anchors_low = fluid.data(
name='anchors_low', shape=[44, 4], dtype='float32')
anchors_high = fluid.data(
name='anchors_high', shape=[11, 4], dtype='float32')
im_info = fluid.data(
name="im_info", shape=[1, 3], dtype='float32')
nmsed_outs = fluid.layers.retinanet_detection_output(
bboxes=[bboxes_low, bboxes_high],
scores=[scores_low, scores_high],
anchors=[anchors_low, anchors_high],
im_info=im_info,
score_threshold=0.05,
nms_top_k=1000,
keep_top_k=100,
nms_threshold=0.45,
nms_eta=1.0)
"""
check_type(bboxes, 'bboxes', (list), 'retinanet_detection_output')
for i, bbox in enumerate(bboxes):
check_variable_and_dtype(bbox, 'bbox{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(scores, 'scores', (list), 'retinanet_detection_output')
for i, score in enumerate(scores):
check_variable_and_dtype(score, 'score{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_type(anchors, 'anchors', (list), 'retinanet_detection_output')
for i, anchor in enumerate(anchors):
check_variable_and_dtype(anchor, 'anchor{}'.format(i),
['float32', 'float64'],
'retinanet_detection_output')
check_variable_and_dtype(im_info, 'im_info', ['float32', 'float64'],
'retinanet_detection_output')
helper = LayerHelper('retinanet_detection_output', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('scores'))
helper.append_op(
type="retinanet_detection_output",
inputs={
'BBoxes': bboxes,
'Scores': scores,
'Anchors': anchors,
'ImInfo': im_info
},
attrs={
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'keep_top_k': keep_top_k,
'nms_eta': 1.,
},
outputs={'Out': output})
output.stop_gradient = True
return output
def multiclass_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=0,
name=None):
"""
:alias_main: paddle.nn.functional.multiclass_nms
:alias: paddle.nn.functional.multiclass_nms,paddle.nn.functional.extension.multiclass_nms
:old_api: paddle.fluid.layers.multiclass_nms
**Multiclass NMS**
This operator is to do multi-class non maximum suppression (NMS) on
boxes and scores.
In the NMS step, this operator greedily selects a subset of detection bounding
boxes that have high scores larger than score_threshold, if providing this
threshold, then selects the largest nms_top_k confidences scores if nms_top_k
is larger than -1. Then this operator pruns away boxes that have high IOU
(intersection over union) overlap with already selected boxes by adaptive
threshold NMS based on parameters of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
See below for an example:
.. code-block:: text
if:
box1.data = (2.0, 3.0, 7.0, 5.0) format is (xmin, ymin, xmax, ymax)
box1.scores = (0.7, 0.2, 0.4) which is (label0.score=0.7, label1.score=0.2, label2.cores=0.4)
box2.data = (3.0, 4.0, 8.0, 5.0)
box2.score = (0.3, 0.3, 0.1)
nms_threshold = 0.3
background_label = 0
score_threshold = 0
Then:
iou = 4/11 > 0.3
out.data = [[1, 0.3, 3.0, 4.0, 8.0, 5.0],
[2, 0.4, 2.0, 3.0, 7.0, 5.0]]
Out format is (label, confidence, xmin, ymin, xmax, ymax)
Args:
bboxes (Variable): Two types of bboxes are supported:
1. (Tensor) A 3-D Tensor with shape
[N, M, 4 or 8 16 24 32] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
2. (LoDTensor) A 3-D Tensor with shape [M, C, 4]
M is the number of bounding boxes, C is the
class number. The data type is float32 or float64.
scores (Variable): Two types of scores are supported:
1. (Tensor) A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes.The data type is float32 or float64.
2. (LoDTensor) A 2-D LoDTensor with shape [M, C].
M is the number of bbox, C is the class number.
In this case, input BBoxes should be the second
case with shape [M, C, 4].The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the multiclass nms op. Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.multiclass_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'multiclass_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'multiclass_nms')
check_type(score_threshold, 'score_threshold', float, 'multicalss_nms')
check_type(nms_top_k, 'nums_top_k', int, 'multiclass_nms')
check_type(keep_top_k, 'keep_top_k', int, 'mutliclass_nms')
check_type(nms_threshold, 'nms_threshold', float, 'multiclass_nms')
check_type(normalized, 'normalized', bool, 'multiclass_nms')
check_type(nms_eta, 'nms_eta', float, 'multiclass_nms')
check_type(background_label, 'background_label', int, 'multiclass_nms')
helper = LayerHelper('multiclass_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
helper.append_op(
type="multiclass_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def locality_aware_nms(bboxes,
scores,
score_threshold,
nms_top_k,
keep_top_k,
nms_threshold=0.3,
normalized=True,
nms_eta=1.,
background_label=-1,
name=None):
"""
**Local Aware NMS**
`Local Aware NMS <https://arxiv.org/abs/1704.03155>`_ is to do locality-aware non maximum
suppression (LANMS) on boxes and scores.
Firstly, this operator merge box and score according their IOU
(intersection over union). In the NMS step, this operator greedily selects a
subset of detection bounding boxes that have high scores larger than score_threshold,
if providing this threshold, then selects the largest nms_top_k confidences scores
if nms_top_k is larger than -1. Then this operator pruns away boxes that have high
IOU overlap with already selected boxes by adaptive threshold NMS based on parameters
of nms_threshold and nms_eta.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4 or 8 16 24 32]
represents the predicted locations of M bounding
bboxes, N is the batch size. Each bounding box
has four coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M] represents the
predicted confidence predictions. N is the batch
size, C is the class number, M is number of bounding
boxes. Now only support 1 class. For each category
there are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension of
BBoxes. The data type is float32 or float64.
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: -1
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score. If not provided,
consider all boxes.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
nms_threshold (float): The threshold to be used in NMS. Default: 0.3
nms_eta (float): The threshold to be used in NMS. Default: 1.0
normalized (bool): Whether detections are normalized. Default: True
name(str): Name of the locality aware nms op, please refer to :ref:`api_guide_Name` .
Default: None.
Returns:
Variable: A 2-D LoDTensor with shape [No, 6] represents the detections.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
or A 2-D LoDTensor with shape [No, 10] represents the detections.
Each row has 10 values:
[label, confidence, x1, y1, x2, y2, x3, y3, x4, y4]. No is the
total number of detections. If there is no detected boxes for all
images, lod will be set to {1} and Out only contains one value
which is -1.
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1}). The data type is float32 or float64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None, 81, 8],
dtype='float32')
scores = fluid.data(name='scores', shape=[None, 1, 81],
dtype='float32')
out = fluid.layers.locality_aware_nms(bboxes=boxes,
scores=scores,
score_threshold=0.5,
nms_top_k=400,
nms_threshold=0.3,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'bboxes', ['float32', 'float64'],
'locality_aware_nms')
check_variable_and_dtype(scores, 'scores', ['float32', 'float64'],
'locality_aware_nms')
check_type(background_label, 'background_label', int, 'locality_aware_nms')
check_type(score_threshold, 'score_threshold', float, 'locality_aware_nms')
check_type(nms_top_k, 'nms_top_k', int, 'locality_aware_nms')
check_type(nms_eta, 'nms_eta', float, 'locality_aware_nms')
check_type(nms_threshold, 'nms_threshold', float, 'locality_aware_nms')
check_type(keep_top_k, 'keep_top_k', int, 'locality_aware_nms')
check_type(normalized, 'normalized', bool, 'locality_aware_nms')
shape = scores.shape
assert len(shape) == 3, "dim size of scores must be 3"
assert shape[
1] == 1, "locality_aware_nms only support one class, Tensor score shape must be [N, 1, M]"
helper = LayerHelper('locality_aware_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
out = {'Out': output}
helper.append_op(
type="locality_aware_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'nms_top_k': nms_top_k,
'nms_threshold': nms_threshold,
'nms_eta': nms_eta,
'keep_top_k': keep_top_k,
'nms_eta': nms_eta,
'normalized': normalized
},
outputs={'Out': output})
output.stop_gradient = True
return output
def matrix_nms(bboxes,
scores,
score_threshold,
post_threshold,
nms_top_k,
keep_top_k,
use_gaussian=False,
gaussian_sigma=2.,
background_label=0,
normalized=True,
return_index=False,
name=None):
"""
**Matrix NMS**
This operator does matrix non maximum suppression (NMS).
First selects a subset of candidate bounding boxes that have higher scores
than score_threshold (if provided), then the top k candidate is selected if
nms_top_k is larger than -1. Score of the remaining candidate are then
decayed according to the Matrix NMS scheme.
Aftern NMS step, at most keep_top_k number of total bboxes are to be kept
per image if keep_top_k is larger than -1.
Args:
bboxes (Variable): A 3-D Tensor with shape [N, M, 4] represents the
predicted locations of M bounding bboxes,
N is the batch size. Each bounding box has four
coordinate values and the layout is
[xmin, ymin, xmax, ymax], when box size equals to 4.
The data type is float32 or float64.
scores (Variable): A 3-D Tensor with shape [N, C, M]
represents the predicted confidence predictions.
N is the batch size, C is the class number, M is
number of bounding boxes. For each category there
are total M scores which corresponding M bounding
boxes. Please note, M is equal to the 2nd dimension
of BBoxes. The data type is float32 or float64.
score_threshold (float): Threshold to filter out bounding boxes with
low confidence score.
post_threshold (float): Threshold to filter out bounding boxes with
low confidence score AFTER decaying.
nms_top_k (int): Maximum number of detections to be kept according to
the confidences after the filtering detections based
on score_threshold.
keep_top_k (int): Number of total bboxes to be kept per image after NMS
step. -1 means keeping all bboxes after NMS step.
use_gaussian (bool): Use Gaussian as the decay function. Default: False
gaussian_sigma (float): Sigma for Gaussian decay function. Default: 2.0
background_label (int): The index of background label, the background
label will be ignored. If set to -1, then all
categories will be considered. Default: 0
normalized (bool): Whether detections are normalized. Default: True
return_index(bool): Whether return selected index. Default: False
name(str): Name of the matrix nms op. Default: None.
Returns:
A tuple with two Variables: (Out, Index) if return_index is True,
otherwise, one Variable(Out) is returned.
Out (Variable): A 2-D LoDTensor with shape [No, 6] containing the
detection results.
Each row has 6 values: [label, confidence, xmin, ymin, xmax, ymax]
(After version 1.3, when no boxes detected, the lod is changed
from {0} to {1})
Index (Variable): A 2-D LoDTensor with shape [No, 1] containing the
selected indices, which are absolute values cross batches.
Examples:
.. code-block:: python
import paddle.fluid as fluid
boxes = fluid.data(name='bboxes', shape=[None,81, 4],
dtype='float32', lod_level=1)
scores = fluid.data(name='scores', shape=[None,81],
dtype='float32', lod_level=1)
out = fluid.layers.matrix_nms(bboxes=boxes,
scores=scores,
background_label=0,
score_threshold=0.5,
post_threshold=0.1,
nms_top_k=400,
keep_top_k=200,
normalized=False)
"""
check_variable_and_dtype(bboxes, 'BBoxes', ['float32', 'float64'],
'matrix_nms')
check_variable_and_dtype(scores, 'Scores', ['float32', 'float64'],
'matrix_nms')
check_type(score_threshold, 'score_threshold', float, 'matrix_nms')
check_type(post_threshold, 'post_threshold', float, 'matrix_nms')
check_type(nms_top_k, 'nums_top_k', int, 'matrix_nms')
check_type(keep_top_k, 'keep_top_k', int, 'matrix_nms')
check_type(normalized, 'normalized', bool, 'matrix_nms')
check_type(use_gaussian, 'use_gaussian', bool, 'matrix_nms')
check_type(gaussian_sigma, 'gaussian_sigma', float, 'matrix_nms')
check_type(background_label, 'background_label', int, 'matrix_nms')
helper = LayerHelper('matrix_nms', **locals())
output = helper.create_variable_for_type_inference(dtype=bboxes.dtype)
index = helper.create_variable_for_type_inference(dtype='int')
helper.append_op(
type="matrix_nms",
inputs={'BBoxes': bboxes,
'Scores': scores},
attrs={
'background_label': background_label,
'score_threshold': score_threshold,
'post_threshold': post_threshold,
'nms_top_k': nms_top_k,
'gaussian_sigma': gaussian_sigma,
'use_gaussian': use_gaussian,
'keep_top_k': keep_top_k,
'normalized': normalized
},
outputs={'Out': output,
'Index': index})
output.stop_gradient = True
if return_index:
return output, index
else:
return output
def distribute_fpn_proposals(fpn_rois,
min_level,
max_level,
refer_level,
refer_scale,
name=None):
"""
:alias_main: paddle.nn.functional.distribute_fpn_proposals
:alias: paddle.nn.functional.distribute_fpn_proposals,paddle.nn.functional.vision.distribute_fpn_proposals
:old_api: paddle.fluid.layers.distribute_fpn_proposals
**This op only takes LoDTensor as input.** In Feature Pyramid Networks
(FPN) models, it is needed to distribute all proposals into different FPN
level, with respect to scale of the proposals, the referring scale and the
referring level. Besides, to restore the order of proposals, we return an
array which indicates the original index of rois in current proposals.
To compute FPN level for each roi, the formula is given as follows:
.. math::
roi\_scale &= \sqrt{BBoxArea(fpn\_roi)}
level = floor(&\log(\\frac{roi\_scale}{refer\_scale}) + refer\_level)
where BBoxArea is a function to compute the area of each roi.
Args:
fpn_rois(Variable): 2-D Tensor with shape [N, 4] and data type is
float32 or float64. The input fpn_rois.
min_level(int32): The lowest level of FPN layer where the proposals come
from.
max_level(int32): The highest level of FPN layer where the proposals
come from.
refer_level(int32): The referring level of FPN layer with specified scale.
refer_scale(int32): The referring scale of FPN layer with specified level.
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
multi_rois(List) : A list of 2-D LoDTensor with shape [M, 4]
and data type of float32 and float64. The length is
max_level-min_level+1. The proposals in each FPN level.
restore_ind(Variable): A 2-D Tensor with shape [N, 1], N is
the number of total rois. The data type is int32. It is
used to restore the order of fpn_rois.
Examples:
.. code-block:: python
import paddle.fluid as fluid
fpn_rois = fluid.data(
name='data', shape=[None, 4], dtype='float32', lod_level=1)
multi_rois, restore_ind = fluid.layers.distribute_fpn_proposals(
fpn_rois=fpn_rois,
min_level=2,
max_level=5,
refer_level=4,
refer_scale=224)
"""
check_variable_and_dtype(fpn_rois, 'fpn_rois', ['float32', 'float64'],
'distribute_fpn_proposals')
helper = LayerHelper('distribute_fpn_proposals', **locals())
dtype = helper.input_dtype('fpn_rois')
num_lvl = max_level - min_level + 1
multi_rois = [
helper.create_variable_for_type_inference(dtype) for i in range(num_lvl)
]
restore_ind = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='distribute_fpn_proposals',
inputs={'FpnRois': fpn_rois},
outputs={'MultiFpnRois': multi_rois,
'RestoreIndex': restore_ind},
attrs={
'min_level': min_level,
'max_level': max_level,
'refer_level': refer_level,
'refer_scale': refer_scale
})
return multi_rois, restore_ind
@templatedoc()
def box_decoder_and_assign(prior_box,
prior_box_var,
target_box,
box_score,
box_clip,
name=None):
"""
:alias_main: paddle.nn.functional.box_decoder_and_assign
:alias: paddle.nn.functional.box_decoder_and_assign,paddle.nn.functional.vision.box_decoder_and_assign
:old_api: paddle.fluid.layers.box_decoder_and_assign
${comment}
Args:
prior_box(${prior_box_type}): ${prior_box_comment}
prior_box_var(${prior_box_var_type}): ${prior_box_var_comment}
target_box(${target_box_type}): ${target_box_comment}
box_score(${box_score_type}): ${box_score_comment}
box_clip(${box_clip_type}): ${box_clip_comment}
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Tuple:
decode_box(${decode_box_type}): ${decode_box_comment}
output_assign_box(${output_assign_box_type}): ${output_assign_box_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
pb = fluid.data(
name='prior_box', shape=[None, 4], dtype='float32')
pbv = fluid.data(
name='prior_box_var', shape=[4], dtype='float32')
loc = fluid.data(
name='target_box', shape=[None, 4*81], dtype='float32')
scores = fluid.data(
name='scores', shape=[None, 81], dtype='float32')
decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
pb, pbv, loc, scores, 4.135)
"""
check_variable_and_dtype(prior_box, 'prior_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(target_box, 'target_box', ['float32', 'float64'],
'box_decoder_and_assign')
check_variable_and_dtype(box_score, 'box_score', ['float32', 'float64'],
'box_decoder_and_assign')
helper = LayerHelper("box_decoder_and_assign", **locals())
decoded_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
output_assign_box = helper.create_variable_for_type_inference(
dtype=prior_box.dtype)
helper.append_op(
type="box_decoder_and_assign",
inputs={
"PriorBox": prior_box,
"PriorBoxVar": prior_box_var,
"TargetBox": target_box,
"BoxScore": box_score
},
attrs={"box_clip": box_clip},
outputs={
"DecodeBox": decoded_box,
"OutputAssignBox": output_assign_box
})
return decoded_box, output_assign_box
def collect_fpn_proposals(multi_rois,
multi_scores,
min_level,
max_level,
post_nms_top_n,
name=None):
"""
:alias_main: paddle.nn.functional.collect_fpn_proposals
:alias: paddle.nn.functional.collect_fpn_proposals,paddle.nn.functional.vision.collect_fpn_proposals
:old_api: paddle.fluid.layers.collect_fpn_proposals
**This OP only supports LoDTensor as input**. Concat multi-level RoIs
(Region of Interest) and select N RoIs with respect to multi_scores.
This operation performs the following steps:
1. Choose num_level RoIs and scores as input: num_level = max_level - min_level
2. Concat multi-level RoIs and scores
3. Sort scores and select post_nms_top_n scores
4. Gather RoIs by selected indices from scores
5. Re-sort RoIs by corresponding batch_id
Args:
multi_rois(list): List of RoIs to collect. Element in list is 2-D
LoDTensor with shape [N, 4] and data type is float32 or float64,
N is the number of RoIs.
multi_scores(list): List of scores of RoIs to collect. Element in list
is 2-D LoDTensor with shape [N, 1] and data type is float32 or
float64, N is the number of RoIs.
min_level(int): The lowest level of FPN layer to collect
max_level(int): The highest level of FPN layer to collect
post_nms_top_n(int): The number of selected RoIs
name(str, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
Variable:
fpn_rois(Variable): 2-D LoDTensor with shape [N, 4] and data type is
float32 or float64. Selected RoIs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
multi_rois = []
multi_scores = []
for i in range(4):
multi_rois.append(fluid.data(
name='roi_'+str(i), shape=[None, 4], dtype='float32', lod_level=1))
for i in range(4):
multi_scores.append(fluid.data(
name='score_'+str(i), shape=[None, 1], dtype='float32', lod_level=1))
fpn_rois = fluid.layers.collect_fpn_proposals(
multi_rois=multi_rois,
multi_scores=multi_scores,
min_level=2,
max_level=5,
post_nms_top_n=2000)
"""
check_type(multi_rois, 'multi_rois', list, 'collect_fpn_proposals')
check_type(multi_scores, 'multi_scores', list, 'collect_fpn_proposals')
helper = LayerHelper('collect_fpn_proposals', **locals())
dtype = helper.input_dtype('multi_rois')
check_dtype(dtype, 'multi_rois', ['float32', 'float64'],
'collect_fpn_proposals')
num_lvl = max_level - min_level + 1
input_rois = multi_rois[:num_lvl]
input_scores = multi_scores[:num_lvl]
output_rois = helper.create_variable_for_type_inference(dtype)
output_rois.stop_gradient = True
helper.append_op(
type='collect_fpn_proposals',
inputs={
'MultiLevelRois': input_rois,
'MultiLevelScores': input_scores
},
outputs={'FpnRois': output_rois},
attrs={'post_nms_topN': post_nms_top_n})
return output_rois
| 44.869744 | 224 | 0.610142 | [
"Apache-2.0"
] | 92lqllearning/Paddle | python/paddle/fluid/layers/detection.py | 174,992 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Charts about the national vaccines data.
@author: riccardomaldini
"""
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from data_extractors.vaccines_regions import benchmark_dict, marche_df
from data_extractors.vaccines_italy import italy_df
from data_extractors.area_names import area_names_dict
from matplotlib.dates import MonthLocator
import utils
def adm_doses_italy(save_image=False, show=False):
"""
Administration data about Italy.
"""
# plt.stackplot(data['data_somministrazione'], data['prima_dose'],data['seconda_dose'],
# labels=['Prime dosi', 'Seconde dosi'])
plt.bar(italy_df['data_somministrazione'], italy_df['prima_dose'], label='Prime dosi')
plt.bar(italy_df['data_somministrazione'], italy_df['seconda_dose'], bottom=italy_df['prima_dose'],
label='Seconde dosi')
plt.title("Somministrazioni giornaliere Italia,\ncon distinzione prima dose/richiamo\n")
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_italia.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def adm_doses_marche(save_image=False, show=False):
"""
Administration data about Italy.
"""
plt.bar(marche_df['data_somministrazione'], marche_df['prima_dose'], label='Prime dosi')
plt.bar(marche_df['data_somministrazione'], marche_df['seconda_dose'], bottom=marche_df['prima_dose'],
label='Seconde dosi')
plt.title("Somministrazioni giornaliere Marche,\ncon distinzione prima dose/richiamo\n")
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_marche.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def regional_doses(save_image=False, show=False):
"""
Comparation between doses administrated in various regions
"""
for area_code, region_data in benchmark_dict.items():
rolling_avg_adm = region_data['totale_per_100000_ab'].rolling(7, center=True).mean()
plt.plot(region_data['data_somministrazione'], rolling_avg_adm, label=area_names_dict[area_code])
rolling_avg_adm = italy_df['totale_per_100000_ab'].rolling(7, center=True).mean()
plt.plot(italy_df['data_somministrazione'], rolling_avg_adm, alpha=0.5, linestyle=':',
label="Italia")
plt.title('Andamento delle somministrazioni giornaliere\nper 100.000 abitanti, confronto tra le regioni del benchmark\n')
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/dosi_per_regioni.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
def immunes_percentage(save_image=False, show=False):
"""
Computes and plots relations between the population of a place and people that took the second shot.
"""
for area_code, region_data in benchmark_dict.items():
plt.plot(region_data['data_somministrazione'], region_data['seconda_dose_totale_storico_su_pop'],
label=area_names_dict[area_code])
plt.plot(italy_df['data_somministrazione'], italy_df['seconda_dose_totale_storico_su_pop'], alpha=0.5, linestyle=':',
label="Italia")
plt.title('Percentuale popolazione immunizzata,\nconfronto tra le regioni del benchmark\n')
plt.gca().yaxis.set_major_formatter(mtick.PercentFormatter(xmax=1))
plt.gca().xaxis.set_major_locator(MonthLocator())
plt.gca().xaxis.set_minor_locator(MonthLocator(bymonthday=15))
plt.gca().xaxis.set_major_formatter(utils.std_date_formatter)
plt.gca().xaxis.set_minor_formatter(utils.std_date_formatter)
plt.gcf().autofmt_xdate(which='both')
plt.grid(True, which='both', axis='both')
plt.legend(loc='upper left')
if save_image:
plt.savefig('./charts/vaccines/immunizzati.png', dpi=300, transparent=True, bbox_inches='tight')
if show:
plt.show()
plt.close()
| 38.165414 | 125 | 0.718676 | [
"MIT"
] | maldins46/CovidAnalysis | chart-generation/charts/vaccines.py | 5,076 | Python |
#!/usr/bin/env python
"""Create two randomly generated matrices, of the specified sizes and write them
to JSON files.
"""
import json
import numpy as np
def read(path):
with open(path, 'rb') as f:
matrix = np.fromfile(f, dtype=np.float32)
return matrix
def write(path, matrix):
with open(path, 'wb') as f:
f.write(matrix.astype(np.float32).tostring())
return matrix
| 16.608696 | 80 | 0.704188 | [
"MIT"
] | Zeta36/mushroom-detector-kerasjs | android/platforms/android/assets/www/web/node_modules/weblas/test/data/binary_matrix.py | 382 | Python |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import parl
from parl.remote.master import Master
from parl.remote.worker import Worker
import time
import threading
from parl.remote.client import disconnect
from parl.remote import exceptions
import timeout_decorator
import subprocess
@parl.remote_class
class Actor(object):
def __init__(self, arg1=None, arg2=None):
self.arg1 = arg1
self.arg2 = arg2
def get_arg1(self):
return self.arg1
def get_arg2(self):
return self.arg2
def set_arg1(self, value):
self.arg1 = value
def set_arg2(self, value):
self.arg2 = value
def get_unable_serialize_object(self):
return UnableSerializeObject()
def add_one(self, value):
value += 1
return value
def add(self, x, y):
time.sleep(3)
return x + y
def will_raise_exception_func(self):
x = 1 / 0
class TestCluster(unittest.TestCase):
def tearDown(self):
disconnect()
#time.sleep(20)
#command = ("pkill -f remote/job.py")
#subprocess.call([command], shell=True)
def test_actor_exception(self):
master = Master(port=1235)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1235', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1235')
with self.assertRaises(exceptions.RemoteError):
actor = Actor(abcd='a bug')
actor2 = Actor()
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
master.exit()
worker1.exit()
@timeout_decorator.timeout(seconds=300)
def test_actor_exception(self):
master = Master(port=1236)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1236', 1)
self.assertEqual(1, master.cpu_num)
parl.connect('localhost:1236')
actor = Actor()
try:
actor.will_raise_exception_func()
except:
pass
actor2 = Actor()
time.sleep(30)
self.assertEqual(actor2.add_one(1), 2)
self.assertEqual(0, master.cpu_num)
del actor
del actor2
worker1.exit()
master.exit()
def test_reset_actor(self):
# start the master
master = Master(port=1237)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1237', 4)
parl.connect('localhost:1237')
for i in range(10):
actor = Actor()
ret = actor.add_one(1)
self.assertEqual(ret, 2)
del actor
time.sleep(20)
self.assertEqual(master.cpu_num, 4)
worker1.exit()
master.exit()
def test_add_worker(self):
master = Master(port=1234)
th = threading.Thread(target=master.run)
th.start()
time.sleep(1)
worker1 = Worker('localhost:1234', 4)
self.assertEqual(master.cpu_num, 4)
worker2 = Worker('localhost:1234', 4)
self.assertEqual(master.cpu_num, 8)
worker2.exit()
time.sleep(30)
self.assertEqual(master.cpu_num, 4)
master.exit()
worker1.exit()
if __name__ == '__main__':
unittest.main()
| 26.898649 | 74 | 0.621954 | [
"Apache-2.0"
] | GOnion/PARL | parl/remote/tests/cluster_test.py | 3,981 | Python |
'''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-07-10 10:33:39
LastEditors: xiaoshuyui
LastEditTime: 2021-01-05 10:21:49
'''
import glob
import os
from tqdm import tqdm
from convertmask.utils.methods import getMultiShapes
from convertmask.utils.methods.logger import logger
def getJsons(imgPath, maskPath, savePath, yamlPath=''):
"""
imgPath: origin image path \n
maskPath : mask image path \n
savePath : json file save path \n
>>> getJsons(path-to-your-imgs,path-to-your-maskimgs,path-to-your-jsonfiles)
"""
logger.info("currently, only *.jpg supported")
if os.path.isfile(imgPath):
getMultiShapes.getMultiShapes(imgPath, maskPath, savePath, yamlPath)
elif os.path.isdir(imgPath):
oriImgs = glob.glob(imgPath + os.sep + '*.jpg')
maskImgs = glob.glob(maskPath + os.sep + '*.jpg')
for i in tqdm(oriImgs):
i_mask = i.replace(imgPath, maskPath)
if os.path.exists(i_mask):
# print(i)
getMultiShapes.getMultiShapes(i, i_mask, savePath, yamlPath)
else:
logger.warning('corresponding mask image not found!')
continue
else:
logger.error('input error. got [{},{},{},{}]. file maybe missing.'.format(
imgPath, maskPath, savePath, yamlPath))
logger.info('Done! See here. {}'.format(savePath))
def getXmls(imgPath, maskPath, savePath):
logger.info("currently, only *.jpg supported")
if os.path.isfile(imgPath):
getMultiShapes.getMultiObjs_voc(imgPath, maskPath, savePath)
elif os.path.isdir(imgPath):
oriImgs = glob.glob(imgPath + os.sep + '*.jpg')
maskImgs = glob.glob(maskPath + os.sep + '*.jpg')
for i in tqdm(oriImgs):
i_mask = i.replace(imgPath, maskPath)
# print(i)
if os.path.exists(i_mask):
getMultiShapes.getMultiObjs_voc(i, i_mask, savePath)
else:
logger.warning('corresponding mask image not found!')
continue
else:
logger.error('input error. got [{},{},{}]. file maybe missing.'.format(
imgPath, maskPath, savePath))
logger.info('Done! See here. {}'.format(savePath))
| 31.694444 | 82 | 0.620508 | [
"Apache-2.0"
] | guchengxi1994/mask2json | convertmask/utils/mask2json_script.py | 2,282 | Python |
from collections import namedtuple
Vote = namedtuple('Vote', 'user post vote')
def create_vote(vote_dict, cutoff):
"""
changes the vote to the [-1, 1] range
"""
modified_vote = 1 if float(vote_dict['vote']) > cutoff else -1
return Vote(
user=str(vote_dict['user']),
post=str(vote_dict['post']),
vote=modified_vote
)
| 22.9375 | 66 | 0.621253 | [
"MIT"
] | bubblegumsoldier/kiwi | kiwi-content/kiwi/TransferTypes.py | 367 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from nose.plugins.attrib import attr
import hgvs.dataproviders.uta
import hgvs.location
import hgvs.parser
from hgvs.exceptions import HGVSError
from hgvs.transcriptmapper import TranscriptMapper
@attr(tags=["quick"])
class Test_transcriptmapper(unittest.TestCase):
ref = 'GRCh37.p10'
def setUp(self):
self.hdp = hgvs.dataproviders.uta.connect()
def test_transcriptmapper_failures(self):
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='bogus', alt_ac='NM_033089.6', alt_aln_method='splign')
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='NM_033089.6', alt_ac='bogus', alt_aln_method='splign')
self.assertRaises(HGVSError, TranscriptMapper, self.hdp, tx_ac='NM_000051.3', alt_ac='NC_000011.9', alt_aln_method='bogus')
def test_transcriptmapper_TranscriptMapper_LCE3C_uncertain(self):
"""Use NM_178434.2 tests to test mapping with uncertain positions"""
tx_ac = 'NM_178434.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
{'g': parser.parse_g_interval('(152573138)'), 'r': parser.parse_r_interval('(1)'), 'c': parser.parse_c_interval('(-70)')},
{'g': parser.parse_g_interval('(152573138_152573139)'), 'r': parser.parse_r_interval('(1_2)'), 'c': parser.parse_c_interval('(-70_-69)')},
# ? is not yet supported
# {'g': parser.parse_g_interval('(?_152573139)'), 'r': parser.parse_r_interval('(?_2)'), 'c': parser.parse_c_interval('(?_-69)')},
# {'g': parser.parse_g_interval('(152573138_?)'), 'r': parser.parse_r_interval('(1_?)'), 'c': parser.parse_c_interval('(-70_?)')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_LCE3C(self):
"""NM_178434.2: LCE3C single exon, strand = +1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_178434.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 5'
{'g': parser.parse_g_interval('152573138'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-70')},
{'g': parser.parse_g_interval('152573140'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-68')},
# cds
{'g': parser.parse_g_interval('152573207'), 'r': parser.parse_r_interval('70'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('152573208'), 'r': parser.parse_r_interval('71'), 'c': parser.parse_c_interval('1')},
# 3'
{'g': parser.parse_g_interval('152573492'), 'r': parser.parse_r_interval('355'), 'c': parser.parse_c_interval('285')},
{'g': parser.parse_g_interval('152573493'), 'r': parser.parse_r_interval('356'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('152573560'), 'r': parser.parse_r_interval('423'), 'c': parser.parse_c_interval('*68')},
{'g': parser.parse_g_interval('152573562'), 'r': parser.parse_r_interval('425'), 'c': parser.parse_c_interval('*70')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_HIST3H2A(self):
"""NM_033445.2: LCE3C single exon, strand = -1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_033445.2'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 3'
{'g': parser.parse_g_interval('228645560'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-42')},
{'g': parser.parse_g_interval('228645558'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-40')},
# cds
{'g': parser.parse_g_interval('228645519'), 'r': parser.parse_r_interval('42'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('228645518'), 'r': parser.parse_r_interval('43'), 'c': parser.parse_c_interval('1')},
# 5'
{'g': parser.parse_g_interval('228645126'), 'r': parser.parse_r_interval('435'), 'c': parser.parse_c_interval('393')},
{'g': parser.parse_g_interval('228645125'), 'r': parser.parse_r_interval('436'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('228645124'), 'r': parser.parse_r_interval('437'), 'c': parser.parse_c_interval('*2')},
{'g': parser.parse_g_interval('228645065'), 'r': parser.parse_r_interval('496'), 'c': parser.parse_c_interval('*61')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_LCE2B(self):
"""NM_014357.4: LCE2B, two exons, strand = +1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_014357.4'
alt_ac = 'NC_000001.10'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 5'
{'g': parser.parse_g_interval('152658599'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-54')},
{'g': parser.parse_g_interval('152658601'), 'r': parser.parse_r_interval('3'), 'c': parser.parse_c_interval('-52')},
# cds
{'g': parser.parse_g_interval('152659319'), 'r': parser.parse_r_interval('54'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('152659320'), 'r': parser.parse_r_interval('55'), 'c': parser.parse_c_interval('1')},
# around end of exon 1
{'g': parser.parse_g_interval('152658632'), 'r': parser.parse_r_interval('34'), 'c': parser.parse_c_interval('-21')},
{'g': parser.parse_g_interval('152658633'), 'r': parser.parse_r_interval('34+1'), 'c': parser.parse_c_interval('-21+1')},
# span
{'g': parser.parse_g_interval('152658633_152659299'), 'r': parser.parse_r_interval('34+1_35-1'), 'c': parser.parse_c_interval('-21+1_-20-1')},
# around beginning of exon 2
{'g': parser.parse_g_interval('152659300'), 'r': parser.parse_r_interval('35'), 'c': parser.parse_c_interval('-20')},
{'g': parser.parse_g_interval('152659299'), 'r': parser.parse_r_interval('35-1'), 'c': parser.parse_c_interval('-20-1')},
# around end of exon 2
{'g': parser.parse_g_interval('152659652'), 'r': parser.parse_r_interval('387'), 'c': parser.parse_c_interval('333')},
{'g': parser.parse_g_interval('152659653'), 'r': parser.parse_r_interval('388'), 'c': parser.parse_c_interval('*1')},
# span
{'g': parser.parse_g_interval('152659651_152659654'), 'r': parser.parse_r_interval('386_389'), 'c': parser.parse_c_interval('332_*2')},
# 3'
{'g': parser.parse_g_interval('152659877'), 'r': parser.parse_r_interval('612'), 'c': parser.parse_c_interval('*225')},
]
self.run_cases(tm, test_cases)
def test_transcriptmapper_TranscriptMapper_PTH2(self):
"""NM_178449.3: PTH2, two exons, strand = -1, all coordinate input/output are in HGVS"""
tx_ac = 'NM_178449.3'
alt_ac = 'NC_000019.9'
tm = TranscriptMapper(self.hdp, tx_ac, alt_ac, alt_aln_method='splign')
parser = hgvs.parser.Parser()
test_cases = [
# 3'
{'g': parser.parse_g_interval('49926698'), 'r': parser.parse_r_interval('1'), 'c': parser.parse_c_interval('-102')},
# cds
{'g': parser.parse_g_interval('49926597'), 'r': parser.parse_r_interval('102'), 'c': parser.parse_c_interval('-1')},
{'g': parser.parse_g_interval('49926596'), 'r': parser.parse_r_interval('103'), 'c': parser.parse_c_interval('1')},
# around end of exon 1
{'g': parser.parse_g_interval('49926469'), 'r': parser.parse_r_interval('230'), 'c': parser.parse_c_interval('128')},
{'g': parser.parse_g_interval('49926468'), 'r': parser.parse_r_interval('230+1'), 'c': parser.parse_c_interval('128+1')},
# span
{'g': parser.parse_g_interval('49925901_49926467'), 'r': parser.parse_r_interval('230+2_231-2'), 'c': parser.parse_c_interval('128+2_129-2')},
# around beginning of exon 2
{'g': parser.parse_g_interval('49925900'), 'r': parser.parse_r_interval('231-1'), 'c': parser.parse_c_interval('129-1')},
{'g': parser.parse_g_interval('49925899'), 'r': parser.parse_r_interval('231'), 'c': parser.parse_c_interval('129')},
# around end of exon 2
{'g': parser.parse_g_interval('49925725'), 'r': parser.parse_r_interval('405'), 'c': parser.parse_c_interval('303')},
{'g': parser.parse_g_interval('49925724'), 'r': parser.parse_r_interval('406'), 'c': parser.parse_c_interval('*1')},
{'g': parser.parse_g_interval('49925671'), 'r': parser.parse_r_interval('459'), 'c': parser.parse_c_interval('*54')},
]
self.run_cases(tm, test_cases)
def run_cases(self, tm, test_cases):
for test_case in test_cases:
self.assertEquals(tm.g_to_r(test_case['g']), test_case['r'])
self.assertEquals(tm.r_to_g(test_case['r']), test_case['g'])
self.assertEquals(tm.r_to_c(test_case['r']), test_case['c'])
self.assertEquals(tm.c_to_r(test_case['c']), test_case['r'])
self.assertEquals(tm.g_to_c(test_case['g']), test_case['c'])
self.assertEquals(tm.c_to_g(test_case['c']), test_case['g'])
if __name__ == '__main__':
unittest.main()
# TODO: Reintegrate older tests, especially those with indels
### harder tests ###
#def test_transcriptmapper_TranscriptMapper_1_ZCCHC3(self):
# """
# reece=> select * from uta.tx_info where ac='NM_033089.6';
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# --------+--------+-------------+-------------+-----------+---------------------------------------+---------
# ZCCHC3 | 1 | NM_033089.6 | 24 | 1236 | zinc finger, CCHC domain containing 3 |
#
# reece=> select * from uta.tx_exons where ac='NM_033089.6';
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+---------+-------------+------------------------
# NM_033089.6 | 1 | 1 | 0 | 2759 | GRCh37.p10 | 278203 | 280965 | 484M3D2275M | GGAGGATGCTGGGAAGGAGGTAA
# """
# # http://tinyurl.com/mattx8u
# #
# # Around the deletion
# # http://tinyurl.com/jwt3txg
# # 687 690
# # C | C G G | C
# # \___ ___/
# # C | C
# # 484
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
# ac = 'NM_033089.6'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 24 + 1 # hgvs
# # gs, ge = genomic start/end; rs,re = rna start/end; cs, ce = cdna start/end; so, eo = start offset/end offset
# test_cases = [
# {'gs': 278204, 'ge': 278204, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 1-cds},
# {'gs': 278214, 'ge': 278214, 'rs': 11, 're': 11, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 11-cds, 'ce': 11-cds},
# {'gs': 278204, 'ge': 278214, 'rs': 1, 're': 11, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 11-cds},
#
# # around cds (cds can't be zero)
# {'gs': 278227, 'ge': 278227, 'rs': 24, 're': 24, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 24-cds, 'ce': 24-cds},
#
# # beyond cds add 1 due to hgvs
# {'gs': 278228, 'ge': 278228, 'rs': 25, 're': 25, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 25-cds+1, 'ce': 25-cds+1},
# {'gs': 278229, 'ge': 278229, 'rs': 26, 're': 26, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 26-cds+1, 'ce': 26-cds+1},
# {'gs': 280966, 'ge': 280966, 'rs': 2760, 're': 2760, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 2760-cds+1, 'ce': 2760-cds+1},
# {'gs': 278687, 'ge': 278687, 'rs': 484, 're': 484, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 484-cds+1, 'ce': 484-cds+1},
# {'gs': 278687, 'ge': 278688, 'rs': 484, 're': 485, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 484-cds+1, 'ce': 485-cds+1},
# {'gs': 278688, 'ge':278691, 'rs': 485, 're': 485, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 485-cds+1, 'ce': 485-cds+1},
#
# # around cds_start (24) and cds_end (1236), mindful of *coding* del (3D)
# {'gs': 278204+24, 'ge': 278204+1236, 'rs': 25, 're': 1237-3, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 25-cds+1, 'ce': 1237-cds-3+1},
# {'gs': 280956, 'ge': 280966, 'rs': 2750, 're': 2760, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 2750-cds+1, 'ce': 2760-cds+1},
# ]
# self.run_cases(tm, test_cases)
#
#def test_transcriptmapper_TranscriptMapper_2_MCL1(self):
# """
# reece=> select * from uta.tx_info where ac='NM_182763.2';
# gene | strand | ac | cds_start_i | cds_end_i | descr |
# ------+--------+-------------+-------------+-----------+-------------------------------------------------+----------------
# MCL1 | -1 | NM_182763.2 | 208 | 1024 | myeloid cell leukemia sequence 1 (BCL2-related) | This gene encod
#
# reece=> select * from uta.tx_exons where ac='NM_182763.2';
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar |
# -------------+-----+------+-----------+---------+------------+-----------+-----------+--------------+---------------------
# NM_182763.2 | 1 | 1b | 0 | 896 | GRCh37.p10 | 150551318 | 150552214 | 896M |
# NM_182763.2 | 2 | 3 | 896 | 3841 | GRCh37.p10 | 150547026 | 150549967 | 1077M4I1864M | GATGGGTTTGTGGAGTTCTT
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = 'NM_182763.2'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 208 + 1 # hgvs
# test_cases = [
# {'gs': 150552215, 'ge': 150552215, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 1-cds, 'ce': 1-cds},
# {'gs': 150552214, 'ge': 150552214, 'rs': 2, 're': 2, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 2-cds, 'ce': 2-cds},
#
# # beyond cds add 1 due to hgvs
# {'gs': 150552007, 'ge': 150552007, 'rs': 209, 're': 209, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 209-cds+1, 'ce': 209-cds+1},
# {'gs': 150547027, 'ge': 150547027, 'rs': 3842, 're': 3842, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 3842-cds+1, 'ce': 3842-cds+1},
#
# #{'gs': 150549968, 'ge': 150549968, 'rs': 897, 're': 897, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150551318, 'ge': 150551318, 'rs': 897, 're': 897, 'so': 1, 'eo': 1, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150551318, 'ge': 150551319, 'rs': 897, 're': 897, 'so': 1, 'eo': 0, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150551317, 'ge': 150551318, 'rs': 897, 're': 897, 'so': 2, 'eo': 1, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150549968, 'ge': 150549969, 'rs': 897, 're': 897, 'so': 0, 'eo': -1, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
# {'gs': 150549969, 'ge': 150549970, 'rs': 897, 're': 897, 'so': -1, 'eo': -2, 'd': hgvs.location.SEQ_START , 'cs': 897-cds+1, 'ce': 897-cds+1},
#
# # exon 2, 4nt insertion ~ r.2760
# # See http://tinyurl.com/mwegybw
# # The coords of this indel via NW alignment differ from those at NCBI, but are the same canonicalized
# # variant. Nothing to do about that short of running Splign ourselves. Test a few examples.
# {'gs': 150548892, 'ge': 150548892, 'rs': 1973, 're': 1973, 'so': 0, 'eo':0, 'd': hgvs.location.SEQ_START , 'cs': 1973-cds+1, 'ce': 1973-cds+1},
# #? {'gs': 150548891, 'ge': 150548892, 'rs': 1972, 're': 1973, 'so': 0, 'eo':0, 'd': hgvs.location.SEQ_START , 'cs': 1972-cds+1, 'ce': 1973-cds+1},
# {'gs': 150548890, 'ge': 150548892, 'rs': 1973, 're': 1979, 'so': 0, 'eo':0, 'd': hgvs.location.SEQ_START , 'cs': 1973-cds+1, 'ce': 1979-cds+1},
# ]
# self.run_cases(tm, test_cases)
#
# ## exon 2, 4nt insertion ~ r.2760
# ## See http://tinyurl.com/mwegybw
# ## The coords of this indel via NW alignment differ from those at
# ## NCBI, but are the same canonicalized variant. Nothing to do
# ## about that short of running Splign ourselves.
# #self.assertEqual(tm.r_to_g(1972, 1972), (150548891, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1973), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1974), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1975), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1976), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1977), (150548890, 150548891))
# #self.assertEqual(tm.r_to_g(1972, 1978), (150548889, 150548891))
# #
# #self.assertEqual(tm.g_to_r(150548891, 150548891), (1972, 1972, 0, 0))
# #self.assertEqual(tm.g_to_r(150548890, 150548891), (1972, 1973, 0, 0))
# #self.assertEqual(tm.g_to_r(150548889, 150548891), (1972, 1978, 0, 0))
# #
# ## around cds_start (208) and cds_end (1024), mindful of *non-coding* ins (4I)
# ## i.e., we *don't* need to account for the 4nt insertion here
# #self.assertEquals(tm.r_to_c(208, 1024), (0, 1024 - 208, 0, 0))
# #self.assertEquals(tm.c_to_r(0, 1024 - 208), (208, 1024, 0, 0))
# #self.assertEquals(tm.g_to_c(150552214 - 208, 150552214 - 208), (0, 0, 0, 0))
# #self.assertEquals(tm.c_to_g(0, 0), (150552214 - 208, 150552214 - 208))
# ## cds_end is in 2nd exon
# #self.assertEquals(tm.g_to_c(150549967 - (1024 - 896), 150549967 - (1024 - 896)), (1024 - 208, 1024 - 208, 0, 0))
# #self.assertEquals(tm.c_to_g(1024 - 208, 1024 - 208), (150549967 - (1024 - 896), 150549967 - (1024 - 896)))
#
#
#def test_transcriptmapper_TranscriptMapper_3_IFI27L1(self):
# """
# #reece=> select * from uta.tx_info where ac='NM_145249.2';
# # gene | chr | strand | ac | cds_start_i | cds_end_i | descr | summary
# #---------+-----+--------+-------------+-------------+-----------+-----------------------------------------------+---------
# # IFI27L1 | 14 | 1 | NM_145249.2 | 254 | 569 | interferon, alpha-inducible protein 27-like 1 |
# #(1 row)
# # reece=>select * from uta.tx_exons where ac = 'NM_145249.2';
# #
# # ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | g_cigar | g_seq_a | t_seq_a
# # -------------+-----+------+-----------+---------+------------+-----------+----------+---------+---------+---------
# # NM_145249.2 | 1 | 1 | 0 | 157 | GRCh37.p10 | 94547638 | 94547795 | 157M | |
# # NM_145249.2 | 2 | 2a | 157 | 282 | GRCh37.p10 | 94563186 | 94563311 | 125M | |
# # NM_145249.2 | 3 | 3 | 282 | 315 | GRCh37.p10 | 94567084 | 94567117 | 33M | |
# # NM_145249.2 | 4 | 4 | 315 | 477 | GRCh37.p10 | 94568159 | 94568321 | 162M | |
# # NM_145249.2 | 5 | 5 | 477 | 715 | GRCh37.p10 | 94568822 | 94569060 | 238M | |
# """
#
# ### Add one to g., r., and c. because we are returning hgvs coordinates ###
#
# ac = 'NM_145249.2'
# tm = TranscriptMapper(self.hdp, ac, self.ref)
# cds = 254 + 1 # hgvs
# test_cases = [
# #{'gs': 94547639, 'ge': 94547639, 'rs': 1, 're': 1, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 1-cds, 'ce': 1-cds},
# #{'gs': 94547796, 'ge': 94547796, 'rs': 158, 're': 158, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 158-cds, 'ce': 158-cds},
# #{'gs': 94563185, 'ge': 94563185, 'rs': 159, 're': 159, 'so': -2, 'eo': -2, 'd': hgvs.location.SEQ_START, 'cs': 159-cds, 'ce': 159-cds},
#
# # beyond cds add 1 due to hgvs
# #{'gs': 94567118, 'ge': 94567120, 'rs': 316, 're': 316, 'so': 0, 'eo': 2, 'd': hgvs.location.SEQ_START, 'cs': 316-cds+1, 'ce': 316-cds+1},
# {'gs': 94567115, 'ge': 94567118, 'rs': 313, 're': 316, 'so': 0, 'eo': 0, 'd': hgvs.location.SEQ_START, 'cs': 313-cds+1, 'ce': 316-cds+1},
#
# # intron in the middle between exon 1 and 2
# #{'gs': 94555500, 'ge': 94555501, 'rs': 157, 're': 158, 'so': 7686, 'eo': -7685, 'd': hgvs.location.SEQ_START, 'cs': 157-cds+1, 'ce': 158-cds+1},
# #{'gs': 94555481, 'ge': 94555501, 'rs': 157, 're': 158, 'so': 7686, 'eo': -7685, 'd': hgvs.location.SEQ_START, 'cs': 157-cds+1, 'ce': 158-cds+1},
# ]
# self.run_cases(tm, test_cases)
### ANOTHER POSSIBLE TEST CASE ###
# reece=> select * from uta.tx_info where ac = 'NM_145171.3';
# gene | strand | ac | cds_start_i | cds_end_i | descr | summary
# -------+--------+-------------+-------------+-----------+-----------------------------+-----------------------------------
# GPHB5 | -1 | NM_145171.3 | 57 | 450 | glycoprotein hormone beta 5 | GPHB5 is a cystine knot-forming...
#
# reece=> select * from uta.tx_exons where ac = 'NM_145171.3' order by g_start_i;
# ac | ord | name | t_start_i | t_end_i | ref | g_start_i | g_end_i | cigar | g_seq_a
# -------------+-----+------+-----------+---------+------------+-----------+----------+-----------+-------------------------
# NM_145171.3 | 3 | 3 | 261 | 543 | GRCh37.p10 | 63779548 | 63779830 | 282M |
# NM_145171.3 | 2 | 2 | 56 | 261 | GRCh37.p10 | 63784360 | 63784564 | 156M1I48M | CATGAAGCTGGCATTCCTCTT...
# NM_145171.3 | 1 | 1 | 0 | 56 | GRCh37.p10 | 63785537 | 63785593 | 56M |
# def test_transcriptmapper_TranscriptMapper_GPHB5(self):
# ac = 'NM_145171.3'
# tm = TranscriptMapper(self.hdp,ac,self.ref)
# pass
## <LICENSE>
## Copyright 2014 HGVS Contributors (https://bitbucket.org/hgvs/hgvs)
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
## </LICENSE>
| 68.365714 | 159 | 0.546974 | [
"Apache-2.0"
] | jmuhlich/hgvs | tests/test_hgvs_transcriptmapper.py | 23,928 | Python |
import trollius
from trollius import From
from pprint import pprint
import pygazebo.msg.raysensor_pb2
@trollius.coroutine
def publish_loop():
manager = yield From(pygazebo.connect())
def callback(data):
ray = pygazebo.msg.raysensor_pb2.RaySensor()
msg = ray.FromString(data)
subscriber = manager.subscribe(
'/gazebo/default/turtlebot/rack/laser/scan',
'gazebo.msgs.RaySensor',
callback)
yield From(subscriber.wait_for_connection())
while True:
yield From(trollius.sleep(1.00))
if __name__ == "__main__":
loop = trollius.get_event_loop()
loop.run_until_complete(publish_loop())
| 23.571429 | 52 | 0.69697 | [
"Apache-2.0"
] | masayoshi-nakamura/CognitiveArchitectureLecture | examples/pygazebo_sample/ray_sensor.py | 660 | Python |
from django.test import TestCase
from django.contrib.sites.models import Site
from django.utils import unittest
from django.conf import settings
from .factories import GalleryFactory, PhotoFactory
class SitesTest(TestCase):
urls = 'photologue.tests.test_urls'
def setUp(self):
"""
Create two example sites that we can use to test what gets displayed
where.
"""
super(SitesTest, self).setUp()
self.site1, created1 = Site.objects.get_or_create(
domain="example.com", name="example.com")
self.site2, created2 = Site.objects.get_or_create(
domain="example.org", name="example.org")
with self.settings(PHOTOLOGUE_MULTISITE=True):
# Be explicit about linking Galleries/Photos to Sites."""
self.gallery1 = GalleryFactory(slug='test-gallery', sites=[self.site1])
self.gallery2 = GalleryFactory(slug='not-on-site-gallery')
self.photo1 = PhotoFactory(slug='test-photo', sites=[self.site1])
self.photo2 = PhotoFactory(slug='not-on-site-photo')
self.gallery1.photos.add(self.photo1, self.photo2)
# I'd like to use factory_boy's mute_signal decorator but that
# will only available once factory_boy 2.4 is released. So long
# we'll have to remove the site association manually
self.photo2.sites.clear()
def tearDown(self):
super(SitesTest, self).tearDown()
self.gallery1.delete()
self.gallery2.delete()
self.photo1.delete()
self.photo2.delete()
def test_basics(self):
""" See if objects were added automatically (by the factory) to the current site. """
self.assertEqual(list(self.gallery1.sites.all()), [self.site1])
self.assertEqual(list(self.photo1.sites.all()), [self.site1])
def test_auto_add_sites(self):
"""
Objects should not be automatically associated with a particular site when
``PHOTOLOGUE_MULTISITE`` is ``True``.
"""
with self.settings(PHOTOLOGUE_MULTISITE=False):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [self.site1])
self.assertEqual(list(photo.sites.all()), [self.site1])
photo.delete()
with self.settings(PHOTOLOGUE_MULTISITE=True):
gallery = GalleryFactory()
photo = PhotoFactory()
self.assertEqual(list(gallery.sites.all()), [])
self.assertEqual(list(photo.sites.all()), [])
photo.delete()
def test_gallery_list(self):
response = self.client.get('/ptests/gallerylist/')
self.assertEqual(list(response.context['object_list']), [self.gallery1])
def test_gallery_detail(self):
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(response.context['object'], self.gallery1)
response = self.client.get('/ptests/gallery/not-on-site-gallery/')
self.assertEqual(response.status_code, 404)
def test_photo_list(self):
response = self.client.get('/ptests/photolist/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photo_detail(self):
response = self.client.get('/ptests/photo/test-photo/')
self.assertEqual(response.context['object'], self.photo1)
response = self.client.get('/ptests/photo/not-on-site-photo/')
self.assertEqual(response.status_code, 404)
def test_photo_archive(self):
response = self.client.get('/ptests/photo/')
self.assertEqual(list(response.context['object_list']), [self.photo1])
def test_photos_in_gallery(self):
"""
Only those photos are supposed to be shown in a gallery that are
also associated with the current site.
"""
response = self.client.get('/ptests/gallery/test-gallery/')
self.assertEqual(list(response.context['object'].public()), [self.photo1])
@unittest.skipUnless('django.contrib.sitemaps' in settings.INSTALLED_APPS,
'Sitemaps not installed in this project, nothing to test.')
def test_sitemap(self):
"""A sitemap should only show objects associated with the current site."""
response = self.client.get('/sitemap.xml')
# Check photos.
self.assertContains(response,
'<url><loc>http://example.com/ptests/photo/test-photo/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
self.assertNotContains(response,
'<url><loc>http://example.com/ptests/photo/not-on-site-photo/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
# Check galleries.
self.assertContains(response,
'<url><loc>http://example.com/ptests/gallery/test-gallery/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
self.assertNotContains(response,
'<url><loc>http://example.com/ptests/gallery/not-on-site-gallery/</loc>'
'<lastmod>2011-12-23</lastmod></url>')
def test_orphaned_photos(self):
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery2.photos.add(self.photo2)
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo2])
self.gallery1.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
self.photo1.sites.clear()
self.photo2.sites.clear()
self.assertEqual(list(self.gallery1.orphaned_photos()), [self.photo1, self.photo2])
| 41.071429 | 103 | 0.630261 | [
"BSD-3-Clause"
] | elena/django-photologue | photologue/tests/test_sites.py | 5,750 | Python |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Test the decompose pass"""
from sympy import pi
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.transpiler.passes import Decompose
from qiskit.converters import circuit_to_dag
from qiskit.extensions.standard import HGate
from qiskit.extensions.standard import ToffoliGate
from qiskit.test import QiskitTestCase
class TestDecompose(QiskitTestCase):
"""Tests the decompose pass."""
def test_basic(self):
"""Test decompose a single H into u2.
"""
qr = QuantumRegister(1, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
dag = circuit_to_dag(circuit)
pass_ = Decompose(HGate)
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 1)
self.assertEqual(op_nodes[0].name, 'u2')
def test_decompose_only_h(self):
"""Test to decompose a single H, without the rest
"""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
pass_ = Decompose(HGate)
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 2)
for node in op_nodes:
self.assertIn(node.name, ['cx', 'u2'])
def test_decompose_toffoli(self):
"""Test decompose CCX.
"""
qr1 = QuantumRegister(2, 'qr1')
qr2 = QuantumRegister(1, 'qr2')
circuit = QuantumCircuit(qr1, qr2)
circuit.ccx(qr1[0], qr1[1], qr2[0])
dag = circuit_to_dag(circuit)
pass_ = Decompose(ToffoliGate)
after_dag = pass_.run(dag)
op_nodes = after_dag.op_nodes()
self.assertEqual(len(op_nodes), 15)
for node in op_nodes:
self.assertIn(node.name, ['h', 't', 'tdg', 'cx'])
def test_decompose_conditional(self):
"""Test decompose a 1-qubit gates with a conditional.
"""
qr = QuantumRegister(1, 'qr')
cr = ClassicalRegister(1, 'cr')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr).c_if(cr, 1)
circuit.x(qr).c_if(cr, 1)
dag = circuit_to_dag(circuit)
pass_ = Decompose(HGate)
after_dag = pass_.run(dag)
ref_circuit = QuantumCircuit(qr, cr)
ref_circuit.u2(0, pi, qr[0]).c_if(cr, 1)
ref_circuit.x(qr).c_if(cr, 1)
ref_dag = circuit_to_dag(ref_circuit)
self.assertEqual(after_dag, ref_dag)
| 32.452381 | 77 | 0.624725 | [
"Apache-2.0"
] | dominik-steenken/qiskit-terra | test/python/transpiler/test_decompose.py | 2,726 | Python |
"""
ASGI config for animeDjangoApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animeDjangoApp.settings')
application = get_asgi_application()
| 23.823529 | 78 | 0.792593 | [
"MIT"
] | peteryouu/animeDjango | animeDjangoApp/asgi.py | 405 | Python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: service_method_same_name.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='service_method_same_name.proto',
package='',
syntax='proto3',
serialized_options=_b('Z\030service_method_same_name'),
serialized_pb=_b('\n\x1eservice_method_same_name.proto\"\x05\n\x03Msg2\x1c\n\x04\x45\x63ho\x12\x14\n\x04\x45\x63ho\x12\x04.Msg\x1a\x04.Msg\"\x00\x42\x1aZ\x18service_method_same_nameb\x06proto3')
)
_MSG = _descriptor.Descriptor(
name='Msg',
full_name='Msg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=39,
)
DESCRIPTOR.message_types_by_name['Msg'] = _MSG
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Msg = _reflection.GeneratedProtocolMessageType('Msg', (_message.Message,), dict(
DESCRIPTOR = _MSG,
__module__ = 'service_method_same_name_pb2'
# @@protoc_insertion_point(class_scope:Msg)
))
_sym_db.RegisterMessage(Msg)
DESCRIPTOR._options = None
_ECHO = _descriptor.ServiceDescriptor(
name='Echo',
full_name='Echo',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=41,
serialized_end=69,
methods=[
_descriptor.MethodDescriptor(
name='Echo',
full_name='Echo.Echo',
index=0,
containing_service=None,
input_type=_MSG,
output_type=_MSG,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_ECHO)
DESCRIPTOR.services_by_name['Echo'] = _ECHO
# @@protoc_insertion_point(module_scope)
| 23.602273 | 196 | 0.755416 | [
"ECL-2.0",
"Apache-2.0"
] | guide-century/twirp | internal/twirptest/service_method_same_name/service_method_same_name_pb2.py | 2,077 | Python |
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from datetime import timedelta
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2(iwreobf4b(-=h_p=^!obgxdgn3_*s!17=_3wc4dun9_y^q+c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'backend.core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_URL = "/api/v1/signin"
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": timedelta(minutes=60),
"REFRESH_TOKEN_LIFETIME": timedelta(days=2),
}
CORS_ORIGIN_WHITELIST = ["http://localhost:3000", "http://127.0.0.1:3000"]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static/")
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": ["rest_framework_simplejwt.authentication.JWTAuthentication"],
"DEFAULT_RENDERER_CLASSES": ["rest_framework.renderers.JSONRenderer"],
"TEST_REQUEST_DEFAULT_FORMAT": "json",
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.DjangoModelPermissions",),
}
| 27.1 | 100 | 0.707433 | [
"MIT"
] | jesusmaherrera/django-nuxtjs | backend/settings.py | 3,794 | Python |
import os
import wget
import tarfile
import argparse
import subprocess
from utils import create_manifest
from tqdm import tqdm
import shutil
parser = argparse.ArgumentParser(description='Processes and downloads LibriSpeech dataset.')
parser.add_argument("--target-dir", default='LibriSpeech_dataset/', type=str, help="Directory to store the dataset.")
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--files-to-use', default="train-clean-100.tar.gz,"
"train-clean-360.tar.gz,train-other-500.tar.gz,"
"dev-clean.tar.gz,dev-other.tar.gz,"
"test-clean.tar.gz,test-other.tar.gz", type=str,
help='list of file names to download')
parser.add_argument('--min-duration', default=1, type=int,
help='Prunes training samples shorter than the min duration (given in seconds, default 1)')
parser.add_argument('--max-duration', default=15, type=int,
help='Prunes training samples longer than the max duration (given in seconds, default 15)')
parser.add_argument('--remove-tarballs', action = 'store_true')
args = parser.parse_args()
LIBRI_SPEECH_URLS = {
"train": ["http://www.openslr.org/resources/12/train-clean-100.tar.gz",
"http://www.openslr.org/resources/12/train-clean-360.tar.gz",
"http://www.openslr.org/resources/12/train-other-500.tar.gz"],
"val": ["http://www.openslr.org/resources/12/dev-clean.tar.gz",
"http://www.openslr.org/resources/12/dev-other.tar.gz"],
"test_clean": ["http://www.openslr.org/resources/12/test-clean.tar.gz"],
"test_other": ["http://www.openslr.org/resources/12/test-other.tar.gz"]
}
def _preprocess_transcript(phrase):
return phrase.strip().upper()
def _process_file(wav_dir, txt_dir, base_filename, root_dir):
full_recording_path = os.path.join(root_dir, base_filename)
assert os.path.exists(full_recording_path) and os.path.exists(root_dir)
wav_recording_path = os.path.join(wav_dir, base_filename.replace(".flac", ".wav"))
subprocess.call(["sox {} -r {} -b 16 -c 1 {}".format(full_recording_path, str(args.sample_rate),
wav_recording_path)], shell=True)
# process transcript
txt_transcript_path = os.path.join(txt_dir, base_filename.replace(".flac", ".txt"))
transcript_file = os.path.join(root_dir, "-".join(base_filename.split('-')[:-1]) + ".trans.txt")
assert os.path.exists(transcript_file), "Transcript file {} does not exist.".format(transcript_file)
transcriptions = open(transcript_file).read().strip().split("\n")
transcriptions = {t.split()[0].split("-")[-1]: " ".join(t.split()[1:]) for t in transcriptions}
with open(txt_transcript_path, "w") as f:
key = base_filename.replace(".flac", "").split("-")[-1]
assert key in transcriptions, "{} is not in the transcriptions".format(key)
f.write(_preprocess_transcript(transcriptions[key]))
f.flush()
def main():
target_dl_dir = args.target_dir
if not os.path.exists(target_dl_dir):
os.makedirs(target_dl_dir)
files_to_dl = args.files_to_use.strip().split(',')
for split_type, lst_libri_urls in LIBRI_SPEECH_URLS.items():
split_dir = os.path.join(target_dl_dir, split_type)
if not os.path.exists(split_dir):
os.makedirs(split_dir)
split_wav_dir = os.path.join(split_dir, "wav")
if not os.path.exists(split_wav_dir):
os.makedirs(split_wav_dir)
split_txt_dir = os.path.join(split_dir, "txt")
if not os.path.exists(split_txt_dir):
os.makedirs(split_txt_dir)
extracted_dir = os.path.join(split_dir, "LibriSpeech")
if os.path.exists(extracted_dir):
shutil.rmtree(extracted_dir)
for url in lst_libri_urls:
# check if we want to dl this file
dl_flag = False
for f in files_to_dl:
if url.find(f) != -1:
dl_flag = True
if not dl_flag:
print("Skipping url: {}".format(url))
continue
filename = url.split("/")[-1]
target_filename = os.path.join(split_dir, filename)
if not os.path.exists(target_filename):
wget.download(url, split_dir)
print("Unpacking {}...".format(filename))
tar = tarfile.open(target_filename)
tar.extractall(split_dir)
tar.close()
if args.remove_tarballs:
os.remove(target_filename)
print("Converting flac files to wav and extracting transcripts...")
assert os.path.exists(extracted_dir), "Archive {} was not properly uncompressed.".format(filename)
for root, subdirs, files in tqdm(os.walk(extracted_dir)):
for f in files:
if f.find(".flac") != -1:
_process_file(wav_dir=split_wav_dir, txt_dir=split_txt_dir,
base_filename=f, root_dir=root)
print("Finished {}".format(url))
shutil.rmtree(extracted_dir)
if split_type == 'train': # Prune to min/max duration
create_manifest(split_dir, 'libri_' + split_type + '_manifest.csv', args.min_duration, args.max_duration)
else:
create_manifest(split_dir, 'libri_' + split_type + '_manifest.csv')
if __name__ == "__main__":
main()
| 49.236842 | 117 | 0.623018 | [
"MIT"
] | vadimkantorov/deepspeech.pytorch | data/librispeech.py | 5,613 | Python |
#!/usr/bin/python3
import datetime
import inquirer
import requests
import re
import csv
import os
import json
repositories = [
"beagle",
"beagle-web-react",
"beagle-web-core",
"beagle-web-angular",
"charlescd",
"charlescd-docs",
"horusec",
"horusec-engine-docs",
"ritchie-cli",
"ritchie-formulas",
"ritchie-formulas-demo"
]
def run(token):
insights = []
authorization = f"token {token}"
headers = {
"Accept": "application/vnd.github.v3+json",
"Authorization" : authorization,
}
for repository in repositories:
repo_url = f"https://api.github.com/repos/ZupIT/{repository}"
print(f"🐙 Getting insights for ZupIT's \033[36m{repository}\033[0m repository.")
traffic = requests.get(
url = repo_url + "/traffic/views",
headers = headers,
).json()
clones = requests.get(
url = repo_url + "/traffic/clones",
headers = headers,
).json()
contributors = requests.get(
url = repo_url + "/contributors",
headers = headers,
).json()
repo_stats = requests.get(
url = repo_url,
headers = headers,
).json()
try:
clones = clones["count"]
except (IndexError, KeyError) :
clones = "-"
try:
forks = repo_stats["forks_count"]
except (IndexError, KeyError):
forks = "-"
try:
stars = repo_stats["stargazers_count"]
except (IndexError, KeyError):
stars = "-"
try:
watchers = repo_stats["subscribers_count"]
except (IndexError, KeyError):
watchers = "-"
try:
views = traffic["count"]
except (IndexError, KeyError):
views = "-"
try:
uniques = traffic["uniques"]
except (IndexError, KeyError):
uniques = "-"
insights.append(
{
"repo": repository,
"views": views,
"uniques": uniques,
"clones": clones,
"contributors": len(contributors),
"contributors_list": contributors,
"forks": forks,
"stars": stars,
"watchers": watchers,
}
)
create_csv_file(insights)
def get_repositories(url, headers):
result = []
r = requests.get(
url = url,
headers = headers
)
if "next" in r.links :
result += get_repositories(r.links["next"]["url"], headers)
for data in r.json():
result.append(data["name"])
return result
def create_csv_file(insights):
current_date = datetime.datetime.now()
current_date_format = current_date.strftime("%m-%d-%Y-%Hh%M")
current_date_format_string = str(current_date_format)
filename = "zup-insights-" + current_date_format_string + ".csv"
file = open(filename, 'w+', newline ='')
with file:
header = ["Repository", "Views (14d)", "Uniques (14d)", "Clones (14d)", "Contributors", "Forks", "Stars", "Watchers"]
writer = csv.DictWriter(file, fieldnames = header)
writer.writeheader()
file = open(filename, 'a+', newline ='')
with file:
for insight in insights:
data = [[insight["repo"], insight["views"], insight["uniques"], insight["clones"], insight["contributors"], insight["forks"], insight["stars"], insight["watchers"]]]
write = csv.writer(file)
write.writerows(data)
print(f"\n\033[1m✅ Successfully generated \033[4m{filename}\033[0m\033[1m file for ZupIT's repositories\033[0m")
| 26.43662 | 177 | 0.546883 | [
"Apache-2.0"
] | GuillaumeFalourd/formulas-insights | github/get/zup-insights/src/formula/formula.py | 3,759 | Python |
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2017, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements the Check Constraint Module."""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases as database
from flask import render_template, make_response, request, jsonify
from flask_babel import gettext as _
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.tables.constraints.type \
import ConstraintRegistry
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
class CheckConstraintModule(CollectionNodeModule):
"""
class CheckConstraintModule(CollectionNodeModule):
This class represents The Check Constraint Module.
Methods:
-------
* __init__(*args, **kwargs)
- Initialize the Check Constraint Module.
* get_nodes(gid, sid, did, scid)
- Generate the Check Constraint collection node.
* node_inode(gid, sid, did, scid)
- Returns Check Constraint node as leaf node.
* script_load()
- Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
NODE_TYPE = 'check_constraints'
COLLECTION_LABEL = _("Check Constraints")
def __init__(self, *args, **kwargs):
super(CheckConstraintModule, self).__init__(*args, **kwargs)
self.min_ver = None
self.max_ver = None
def get_nodes(self, gid, sid, did, scid, doid):
"""
Generate the Check Constraint collection node.
"""
yield self.generate_browser_collection_node(doid)
@property
def node_inode(self):
"""
Returns Check Constraint node as leaf node.
"""
return False
@property
def script_load(self):
"""
Load the module script for the Check Constraint, when any of the
Check node is initialized.
"""
return database.DatabaseModule.NODE_TYPE
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
return [
render_template(
"check_constraint/css/check_constraint.css",
node_type=self.node_type
)
]
blueprint = CheckConstraintModule(__name__)
class CheckConstraintView(PGChildNodeView):
"""
class CheckConstraintView(PGChildNodeView):
This class inherits PGChildNodeView to get the different routes for
the module.
The class is responsible to Create, Read, Update and Delete operations for
the Check Constraint.
Methods:
-------
* module_js():
- Load JS file (check-constraints.js) for this module.
* check_precondition(f):
- Works as a decorator.
- Checks database connection status.
- Attach connection object and template path.
* list(gid, sid, did, scid, doid):
- List the Check Constraints.
* nodes(gid, sid, did, scid):
- Returns all the Check Constraints to generate Nodes in the browser.
* properties(gid, sid, did, scid, doid):
- Returns the Check Constraint properties.
* create(gid, sid, did, scid):
- Creates a new Check Constraint object.
* update(gid, sid, did, scid, doid):
- Updates the Check Constraint object.
* delete(gid, sid, did, scid, doid):
- Drops the Check Constraint object.
* sql(gid, sid, did, scid, doid=None):
- Returns the SQL for the Check Constraint object.
* msql(gid, sid, did, scid, doid=None):
- Returns the modified SQL.
* get_sql(gid, sid, data, scid, tid=None):
- Generates the SQL statements to create/update the Check Constraint.
object.
* dependents(gid, sid, did, scid, tid, cid):
- Returns the dependents for the Check Constraint object.
* dependencies(gid, sid, did, scid, tid, cid):
- Returns the dependencies for the Check Constraint object.
* validate_check_constraint(gid, sid, did, scid, tid, cid):
- Validate check constraint.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'cid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'delete': [{'delete': 'delete'}],
'children': [{'get': 'children'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'validate': [{'get': 'validate_check_constraint'}],
})
def module_js(self):
"""
Load JS file (check_constraint.js) for this module.
"""
return make_response(
render_template(
"check_constraint/js/check_constraint.js",
_=_
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
Works as a decorator.
Checks database connection status.
Attach connection object and template path.
"""
@wraps(f)
def wrap(*args, **kwargs):
self = args[0]
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=kwargs['tid'])
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
return f(*args, **kwargs)
return wrap
def end_transaction(self):
"""
End database transaction.
Returns:
"""
SQL = "END;"
self.conn.execute_scalar(SQL)
@check_precondition
def list(self, gid, sid, did, scid, tid, cid=None):
"""
List the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Id
"""
try:
res = self.get_node_list(gid, sid, did, scid, tid, cid)
return ajax_response(
response=res,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_node_list(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all check constraints
nodes within that collection as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Cehck constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
SQL = render_template("/".join([self.template_path, 'properties.sql']),
tid=tid)
status, res = self.conn.execute_dict(SQL)
return res['rows']
@check_precondition
def node(self, gid, sid, did, scid, tid, cid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
cid=cid)
status, rset = self.conn.execute_2darray(SQL)
if len(rset['rows']) == 0:
return gone(_("""Could not find the check constraint."""))
if "convalidated" in rset['rows'][0] and rset['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon=icon,
valid=valid
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
Returns all the Check Constraints.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check constraint Id.
"""
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return make_json_response(
data=res,
status=200
)
def get_nodes(self, gid, sid, did, scid, tid, cid=None):
"""
This function returns all event check constraint as a list.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
driver = get_driver(PG_DEFAULT_DRIVER)
self.manager = driver.connection_manager(sid)
self.conn = self.manager.connection(did=did)
self.qtIdent = driver.qtIdent
# Set the template path for the SQL scripts
self.template_path = 'check_constraint/sql/#{0}#'.format(self.manager.version)
SQL = render_template("/".join([self.template_path,
'get_parent.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
self.schema = rset['rows'][0]['schema']
self.table = rset['rows'][0]['table']
res = []
SQL = render_template("/".join([self.template_path,
'nodes.sql']),
tid=tid)
status, rset = self.conn.execute_2darray(SQL)
for row in rset['rows']:
if "convalidated" in row and row["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon=icon,
valid=valid
))
return res
@check_precondition
def properties(self, gid, sid, did, scid, tid, cid):
"""
Returns the Check Constraints property.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
return ajax_response(
response=data,
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid, cid=None):
"""
This function will create a primary key.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
scid: Schema ID
tid: Table ID
cid: Check constraint ID
Returns:
"""
required_args = ['consrc']
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for k, v in data.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except (ValueError, TypeError, KeyError):
data[k] = v
for arg in required_args:
if arg not in data or data[arg] == '':
return make_json_response(
status=400,
success=0,
errormsg=_(
"Could not find the required parameter (%s)." % arg
)
)
data['schema'] = self.schema
data['table'] = self.table
try:
if 'name' not in data or data['name'] == "":
SQL = "BEGIN;"
# Start transaction.
status, res = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
# The below SQL will execute CREATE DDL only
SQL = render_template(
"/".join([self.template_path, 'create.sql']),
data=data
)
status, msg = self.conn.execute_scalar(SQL)
if not status:
self.end_transaction()
return internal_server_error(errormsg=msg)
if 'name' not in data or data['name'] == "":
sql = render_template(
"/".join([self.template_path,
'get_oid_with_transaction.sql'],
),
tid=tid)
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
self.end_transaction()
data['name'] = res['rows'][0]['name']
else:
sql = render_template("/".join([self.template_path, 'get_oid.sql']),
tid=tid,
name=data['name'])
status, res = self.conn.execute_dict(sql)
if not status:
self.end_transaction()
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = "icon-check_constraints_bad"
valid = False
else:
icon = "icon-check_constraints"
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
res['rows'][0]['oid'],
tid,
data['name'],
icon=icon,
valid=valid
)
)
except Exception as e:
self.end_transaction()
return make_json_response(
status=400,
success=0,
errormsg=e
)
@check_precondition
def delete(self, gid, sid, did, scid, tid, cid):
"""
Drops the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Check Id
cid: Check Constraint Id
"""
try:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if not res['rows']:
return make_json_response(
success=0,
errormsg=_(
'Error: Object not found.'
),
info=_(
'The specified check constraint could not be found.\n'
)
)
data = res['rows'][0]
SQL = render_template("/".join([self.template_path,
'delete.sql']),
data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check Constraint dropped."),
data={
'id': tid,
'scid': scid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, cid):
"""
Updates the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
data['schema'] = self.schema
data['table'] = self.table
SQL, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not SQL:
return name
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
sql = render_template("/".join([self.template_path, 'get_name.sql']),
cid=cid)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
if "convalidated" in res['rows'][0] and res['rows'][0]["convalidated"]:
icon = 'icon-check_constraints_bad'
valid = False
else:
icon = 'icon-check_constraints'
valid = True
return jsonify(
node=self.blueprint.generate_browser_node(
cid,
tid,
name,
icon=icon,
valid=valid
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def sql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the SQL for the Check Constraint object.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
_("Could not find the object on the server.")
)
data = res['rows'][0]
data['schema'] = self.schema
data['table'] = self.table
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
sql_header = u"-- Constraint: {0}\n\n-- ".format(data['name'])
sql_header += render_template(
"/".join([self.template_path, 'delete.sql']),
data=data)
sql_header += "\n"
SQL = sql_header + SQL
return ajax_response(response=SQL)
@check_precondition
def msql(self, gid, sid, did, scid, tid, cid=None):
"""
Returns the modified SQL.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
Check Constraint object in json format.
"""
data = {}
for k, v in request.args.items():
try:
data[k] = json.loads(v, encoding='utf-8')
except ValueError:
data[k] = v
data['schema'] = self.schema
data['table'] = self.table
try:
sql, name = self.get_sql(gid, sid, data, scid, tid, cid)
if not sql:
return name
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
except Exception as e:
return internal_server_error(errormsg=str(e))
def get_sql(self, gid, sid, data, scid, tid, cid=None):
"""
Generates the SQL statements to create/update the Check Constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
if cid is not None:
SQL = render_template("/".join([self.template_path,
'properties.sql']),
tid=tid, cid=cid)
status, res = self.conn.execute_dict(SQL)
if not status:
return False, internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return False, gone(
_("Could not find the object on the server.")
)
old_data = res['rows'][0]
required_args = ['name']
for arg in required_args:
if arg not in data:
data[arg] = old_data[arg]
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data, conn=self.conn
)
else:
required_args = ['consrc']
for arg in required_args:
if arg not in data:
return _('-- definition incomplete')
elif isinstance(data[arg], list) and len(data[arg]) < 1:
return _('-- definition incomplete')
SQL = render_template("/".join([self.template_path,
'create.sql']),
data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependents and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependents_result = self.get_dependents(self.conn, cid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, cid):
"""
This function get the dependencies and return ajax response
for the Check Constraint node.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
"""
dependencies_result = self.get_dependencies(self.conn, cid)
return ajax_response(
response=dependencies_result,
status=200
)
@check_precondition
def validate_check_constraint(self, gid, sid, did, scid, tid, cid):
"""
Validate check constraint.
Args:
gid: Server Group Id
sid: Server Id
did: Database Id
scid: Schema Id
tid: Table Id
cid: Check Constraint Id
Returns:
"""
data = {}
try:
data['schema'] = self.schema
data['table'] = self.table
sql = render_template("/".join([self.template_path, 'get_name.sql']), cid=cid)
status, res = self.conn.execute_scalar(sql)
if not status:
return internal_server_error(errormsg=res)
data['name'] = res
sql = render_template("/".join([self.template_path, 'validate.sql']), data=data)
status, res = self.conn.execute_dict(sql)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=_("Check constraint updated."),
data={
'id': cid,
'tid': tid,
'scid': scid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
constraint = ConstraintRegistry(
'check_constraint', CheckConstraintModule, CheckConstraintView
)
CheckConstraintView.register_node_view(blueprint)
| 30.934569 | 92 | 0.501251 | [
"MIT"
] | jhkuang11/UniTrade | code/venv/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/constraints/check_constraint/__init__.py | 28,367 | Python |
import json
import subprocess
import ipaddress
import pytest
@pytest.fixture
def add_host():
def _inner(hostname, rack, rank, appliance):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
# First use of the fixture adds backend-0-0
_inner('backend-0-0', '0', '0', 'backend')
# Then return the inner function, so we can call it inside the test
# to get more hosts added
return _inner
@pytest.fixture
def add_host_with_interface():
def _inner(hostname, rack, rank, appliance, interface):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack add host interface {hostname} interface={interface}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy interface')
_inner('backend-0-0', '0', '0', 'backend', 'eth0')
return _inner
@pytest.fixture
def add_ib_switch():
def _inner(hostname, rack, rank, appliance, make, model, sw_type):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
cmd = f'stack set host attr {hostname} attr=switch_type value={sw_type}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set switch type')
_inner('switch-0-0', '0', '0', 'switch', 'Mellanox', 'm7800', 'infiniband')
return _inner
@pytest.fixture
def add_ib_switch_partition():
def _inner(switch_name, partition_name, options):
cmd = f'stack add switch partition {switch_name} name={partition_name} '
if options is not None:
cmd += f'options={options}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy switch partition')
_inner('switch-0-0', 'Default', '')
return _inner
@pytest.fixture
def add_switch():
def _inner(hostname, rack, rank, appliance, make, model):
cmd = f'stack add host {hostname} rack={rack} rank={rank} appliance={appliance}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to add a dummy host')
cmd = f'stack set host attr {hostname} attr=component.make value={make}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set make')
cmd = f'stack set host attr {hostname} attr=component.model value={model}'
result = subprocess.run(cmd.split())
if result.returncode != 0:
pytest.fail('unable to set model')
_inner('switch-0-0', '0', '0', 'switch', 'fake', 'unrl')
return _inner
@pytest.fixture
def add_appliance(host):
def _inner(name):
result = host.run(f'stack add appliance {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy appliance "{name}"')
# First use of the fixture adds appliance "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more appliances added
return _inner
@pytest.fixture
def add_box(host):
def _inner(name):
result = host.run(f'stack add box {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy box "{name}"')
# First use of the fixture adds box "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more boxes added
return _inner
@pytest.fixture
def add_cart(host):
def _inner(name):
result = host.run(f'stack add cart {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy cart "{name}"')
# First use of the fixture adds cart "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more carts added
return _inner
@pytest.fixture
def add_environment(host):
def _inner(name):
result = host.run(f'stack add environment {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy environment "{name}"')
# First use of the fixture adds environment "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more environments added
return _inner
@pytest.fixture
def add_group(host):
def _inner(name):
result = host.run(f'stack add group {name}')
if result.rc != 0:
pytest.fail(f'unable to add dummy group "{name}"')
# First use of the fixture adds group "test"
_inner('test')
# Then return the inner function, so we can call it inside the test
# to get more groups added
return _inner
@pytest.fixture
def add_network(host):
"""Adds a network to the stacki db. For historical reasons the first test network this creates is pxe=False."""
def _inner(name, address, pxe = False):
result = host.run(
f'stack add network {name} address={address} mask=255.255.255.0 pxe={pxe}'
)
if result.rc != 0:
pytest.fail(f'unable to add dummy network "{name}"')
# First use of the fixture adds network "test"
_inner('test', '192.168.0.0')
# Then return the inner function, so we can call it inside the test
# to get more networks added
return _inner
@pytest.fixture
def add_host_with_net(host, add_host_with_interface, add_network):
"""Adds a host with a network. The first network this adds defaults to pxe=True."""
def _inner(hostname, rack, rank, appliance, interface, ip, network, address, pxe):
# Add the host with an interface.
add_host_with_interface(hostname = hostname, rack = rack, rank = rank, appliance = appliance, interface = interface)
# Add the network.
add_network(name = network, address = address, pxe = pxe)
# Associate it to the interface.
result = host.run(f"stack set host interface network {hostname} network={network} interface={interface}")
assert result.rc == 0
# Set the interface IP
result = host.run(f"stack set host interface ip {hostname} ip={ip} network={network}")
assert result.rc == 0
# Add it to the frontend, because a lot of things in stacki expect backends to share networks with
# frontends.
result = host.run("stack list host interface a:frontend output-format=json")
assert result.rc == 0
# Try to figure out if the frontend has an interface on this network already.
interface_on_network = False
for frontend_interface in json.loads(result.stdout):
if frontend_interface["network"] == network:
interface_on_network = True
break
if interface_on_network:
return
# Need to add an interface to the frontend on this network. Make sure we choose the next latest
# interface name so we don't clash with other interface names.
latest_interface = max(frontend_interface["interface"] for frontend_interface in json.loads(result.stdout))
# This should be a string, so we tokenize it into characters
new_interface = list(latest_interface)
new_interface[-1] = str(int(new_interface[-1]) + 1)
new_interface = "".join(new_interface)
result = host.run(f"stack add host interface a:frontend interface={new_interface} network={network} ip={ipaddress.ip_address(ip) + 1}")
assert result.rc == 0
# First use of the add_host_with_interface fixture adds backend-0-0 with interface eth0.
# The first use of add_network adds a network called test, but that's not PXE so we don't want to use it.
# So the first call of this fixture needs to remove the test network, recreate it as a PXE network, and
# associate the network with the host's interface.
result = host.run(f"stack remove network test")
assert result.rc == 0
add_network(name = "test", address = "192.168.0.0", pxe = True)
result = host.run(f"stack set host interface network backend-0-0 network=test interface=eth0 ip=192.168.0.3")
assert result.rc == 0
# Add a frontend interface on the network.
result = host.run(f"stack add host interface a:frontend interface=eth2 network=test ip=192.168.0.2")
assert result.rc == 0
return _inner
@pytest.fixture(
params = (
("", "exec=True"),
("", "| bash -x"),
("document=", "exec=True"),
("document=", "| bash -x"),
),
ids = ("stack_load_exec", "stack_load_bash", "stack_load_document_exec", "stack_load_document_bash"),
)
def stack_load(request, host):
"""This fixture is used to run `stack load` on the host during integration tests.
There are 4 essentially equivalent ways of loading and running a dump.json. Using
this test fixture ensures that all 4 are tested. I.E:
stack load dump_file exec=True
stack load document=dump_file exec=True
stack load dump_file | bash -x
stack load document=dump_file | bash -x
"""
param_string, exec_string = request.param
def _load(dump_file, **kwargs):
if "exec" in kwargs:
raise ValueError("Cannot pass exec param to this fixture. It handles it for you.")
if "document" in kwargs:
raise ValueError("Cannot pass document param to this fixture. It handles it for you.")
kwargs_string = " ".join(f"{key}={value}" for key, value in kwargs.items())
return host.run(f"stack load {param_string}{dump_file} {exec_string} {kwargs_string}")
return _load
@pytest.fixture
def fake_local_firmware_file(tmp_path_factory):
"""Creates a fake local firmware file and returns a pathlib.Path object that points to it."""
# Add a fake piece of firmware.
fake_firmware_file = tmp_path_factory.mktemp("fake_firmware") / "foo.img"
fake_firmware_file.write_text("foofakefirmware")
return fake_firmware_file
| 33.694158 | 137 | 0.715553 | [
"BSD-3-Clause"
] | anooprajendra/stacki | test-framework/test-suites/integration/tests/fixtures/add_data.py | 9,805 | Python |
from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='nameko-couchbase',
version='0.1.5',
description='Nameko dependency for Couchbase',
url='https://github.com/geoffjukes/nameko-couchbase',
author='Geoff Jukes',
license="Apache License, Version 2.0",
classifiers=[
"Programming Language :: Python",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Intended Audience :: Developers",
],
keywords='nameko dependency couchbase',
py_modules=['nameko_couchbase'],
install_requires=['couchbase==2.5.9'],
) | 33.814815 | 71 | 0.638554 | [
"Apache-2.0"
] | geoffjukes/nameko-couchbase | setup.py | 913 | Python |
import pytest
def test_terms():
from pycldf.terms import TERMS
assert 'alignment' in TERMS.properties
with pytest.raises(ValueError):
TERMS.is_cldf_uri('http://cldf.clld.org/404')
assert not TERMS.is_cldf_uri('http://example.org')
assert TERMS.is_cldf_uri('http://cldf.clld.org/v1.0/terms.rdf#source')
assert len(TERMS.properties) + len(TERMS.classes) == len(TERMS)
assert len(TERMS.modules) + len(TERMS.components) == len(TERMS.classes)
assert 'LanguageTable' in TERMS.components
assert 'LanguageTable' not in TERMS.modules
assert 'Wordlist' in TERMS.modules
| 29.285714 | 75 | 0.710569 | [
"Apache-2.0"
] | SimonGreenhill/pycldf | tests/test_terms.py | 615 | Python |
#!/usr/bin/python
# Copyright (c) 2018-2019 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
# The Government's rights to use, modify, reproduce, release, perform, display,
# or disclose this software are subject to the terms of the Apache License as
# provided in Contract No. 8F-30005.
# Any reproduction of computer software, computer software documentation, or
# portions thereof marked with this legend must also reproduce the markings.
"""
This script runs the rdb tests. From the command line the tests are run with:
server:
orterun -N 1 --report-uri /tmp/urifile -x LD_LIBRARY_PATH
daos_server -o <builddir>/utils/config/examples/daos_server_rdb_tests.yml
start -d ./ -t 1 -m vos,rdb,rsvc,mgmt,rdbt
client:
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt init
--group=daos_server --uuid <uuid>
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt test --update
--group=daos_server
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt test
--group=daos_server
orterun --ompi-server file:/tmp/urifile <debug_cmds> -np 1 rdbt fini
--group=daos_server
Where debug_cmds = -x D_LOG_MASK=DEBUG,RPC=ERR,MEM=ERR -x DD_SUBSYS=all
-x DD_MASK=all
This script automates the process.
"""
import subprocess
import os
import sys
import time
import signal
import shlex
import string
build_root = os.path.join(sys.path[0], "../../../")
sys.path.insert(0, os.path.join(build_root, "utils/sl"))
from build_info import BuildInfo
from env_modules import load_mpi
from distutils.spawn import find_executable
urifile = "/tmp/urifile"
pid_file = "/tmp/" + str(os.getpid()) + "_output"
# To avoid repetition of parts of the oretrun command.
client_prefix = ""
client_suffix = ""
# In case orterun has quit but the daos_server is still running, save the PID.
#daos_server = None
class ServerFailedToStart(Exception):
pass
class ServerTimedOut(Exception):
pass
def set_logfile(config, logfile):
f = open(config, "r+")
for line in f.readlines():
string.replace(line,
" log_file: /tmp/server.log",
" log_file: {}".format(logfile))
f.close()
def start_server(binfo, orterun):
"""
Start the DAOS server with an orterun command as a child process. We use
subprocess.Popen since it returns control to the calling process and
provides access to the polling feature.
"""
config_file = os.path.join(build_root, "utils", "config", "examples",
"daos_server_unittests.yml")
log_file = os.path.join(binfo.get("PREFIX"),
"TESTING",
"daos-rdb-test.log")
set_logfile(config_file, log_file) # set D_LOG_FILE through config file
print("Starting DAOS server\n")
cmd = orterun
cmd += " -N 1 --report-uri {} ".format(urifile)
cmd += "-x LD_LIBRARY_PATH "
cmd += binfo.get("PREFIX") + "/bin/daos_server "
cmd += "--debug --config {} ".format(config_file)
cmd += "start -d ./ -t 1 -m vos,rdb,rsvc,mgmt,rdbt -i --recreate-superblocks "
print("Running command:\n{}".format(cmd))
sys.stdout.flush()
try:
p = subprocess.Popen(shlex.split(cmd))
return p
except Exception as e:
raise ServerFailedToStart("Server failed to start:\n{}".format(e))
def run_client(segment_type):
"""
There are four client segments to be run, init, update, test, and fini.
The command line varies slightly for each and in some cases there is a
tail after the suffix.
"""
tail = ""
if segment_type == "init":
uuid = subprocess.check_output(['uuidgen'])
tail = " --uuid {}".format(uuid)
elif segment_type == "update":
segment_type = "test --update"
cmd = client_prefix + segment_type + client_suffix + tail
print("Running command:\n{}".format(cmd))
rc = os.system(cmd)
if rc:
raise Exception("command {} failed with return code {}\n".format(
cmd, rc))
return 0
def pid_info(output_line):
"""
Take a line of 'ps -o pid,comm' output and return the PID number and name.
The line looks something like:
9108 orterun
or
10183 daos_server
Need both items. Return a tuple (name, pid)
Note: there could be leading spaces on the pid.
"""
info = output_line.lstrip().split()
try:
return info[1], info[0]
except Exception as e:
print("Unable to retrieve PID info from {}".format(output_line))
return "", None
def find_child(parent_pid, child_name):
"""
Given a PID and a process name, see if this PID has any children with the
specified name. If is does, return the child PID. If not, return None.
ps -o pid,comm --no-headers --ppid <pid> gives output that looks like this:
41108 orterun
41519 ps
"""
child_pid = None
cmd = ['ps', '-o', 'pid,comm', '--no-headers', '--ppid', str(parent_pid)]
try:
res = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
# parent_pid has no children
return None
except Exception as e:
print("ps command failed with: {}".format(e))
return None
# Get rid of the trailing blank line from subprocess.check_output
res = [s for s in res.splitlines() if s]
for line in res:
try:
current_name, current_pid = pid_info(line)
except Exception as e:
print("Unable to extract pid and process name from {}".format(
line))
continue
if current_pid is None:
return None
if current_name.startswith(child_name):
# This is the droid, uh, child we're looking for
return current_pid
child_pid = find_child(current_pid, child_name)
if child_pid is not None:
return child_pid
return child_pid
def daos_server_pid():
"""
Find the pid for the daos_server. Start drilling down from the parent
(current) process until we get output where one line contains
"daos_io_server" or "daos_server".
"""
parent_pid = os.getpid()
return find_child(parent_pid, "daos_")
def cleanup(daos_server):
""" Perform cleanup operations. Shut down the DAOS server by killing the
child processes that have been created. If the daos_server process is
killed, so are the processes for daos_io_server and orterun (theoretically).
It has been observed on occasion to go zombie until orterun itself is
killed.
"""
# Get PID of the daos server
cmd = "{} signal.SIGKILL".format(daos_server)
try:
os.kill(int(daos_server), signal.SIGKILL)
print("Shut down DAOS server with os.kill({} signal.SIGKILL)".format(
daos_server))
except Exception as e:
if daos_server is None:
print("No PID was found for the DAOS server")
elif "No such process" in e:
print("The daos_server process is no longer available"
" and could not be killed.")
else:
print("Unable to shut down DAOS server: {}".format(e))
if __name__ == "__main__":
"""
Start a DAOS server and then run the four stages of the client.
"""
print("Running rdb tests")
rc = 0
binfo = BuildInfo(os.path.join(build_root, ".build_vars.json"));
debug_cmds = "-x D_LOG_MASK=DEBUG,RPC=ERR,MEM=ERR " + \
"-x DD_SUBSYS=all -x DD_MASK=all"
load_mpi('openmpi')
orterun = find_executable('orterun')
if orterun is None:
raise ServerFailedToStart("No orterun installed")
try:
# Server operations
p = start_server(binfo, orterun)
counter = 0
daos_server = daos_server_pid()
while daos_server is None:
if counter >= 120:
raise ServerTimedOut("No DAOS server process detected before "\
"timeout")
counter += 1
time.sleep(1)
daos_server = daos_server_pid()
# Give daos_io_server some time to get ready.
time.sleep(10)
print("DAOS server started")
# Client operations
client_prefix = "{} --ompi-server " \
"file:{} {} --np 1 rdbt ".format(
orterun urifile, debug_cmds)
client_suffix = " --group=daos_server"
# orterun is called for the client four times: init, update, test,
# and fini
client_segments = ['init', 'update', 'test', 'fini']
try:
for segment in client_segments:
run_client(segment)
print("SUCCESS\nrbd tests PASSED")
except Exception as e:
print("rbd tests FAILED")
print("{}".format(e))
rc = 1
except ServerFailedToStart as e:
print("ServerFailedToStart: {}".format(e.message))
print("FAIL")
rc = 1
except ServerTimedOut as e:
print("ServerTimedOut: {}".format(e))
print("FAIL")
rc = 1
finally:
# Shut down the DAOS server when we are finished.
try:
if not p or p.poll() is not None:
# If the server is dead, somthing went very wrong
print("The server is unexpectedly absent.")
print("FAIL")
rc = 1
except NameError:
rc = 1
try:
cleanup(daos_server)
except NameError:
# The daos_server was never defined.
rc = 1
sys.exit(rc)
| 34.383871 | 82 | 0.638334 | [
"Apache-2.0"
] | marcelarosalesj/daos | src/rdb/tests/rdb_test_runner.py | 10,659 | Python |
"""
Self-supervised learning samplers.
"""
# Authors: Hubert Banville <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from . import RecordingSampler
class RelativePositioningSampler(RecordingSampler):
"""Sample examples for the relative positioning task from [Banville2020]_.
Sample examples as tuples of two window indices, with a label indicating
whether the windows are close or far, as defined by tau_pos and tau_neg.
Parameters
----------
metadata : pd.DataFrame
See RecordingSampler.
tau_pos : int
Size of the positive context, in samples. A positive pair contains two
windows x1 and x2 which are separated by at most `tau_pos` samples.
tau_neg : int
Size of the negative context, in samples. A negative pair contains two
windows x1 and x2 which are separated by at least `tau_neg` samples and
at most `tau_max` samples. Ignored if `same_rec_neg` is False.
n_examples : int
Number of pairs to extract.
tau_max : int | None
See `tau_neg`.
same_rec_neg : bool
If True, sample negative pairs from within the same recording. If
False, sample negative pairs from two different recordings.
random_state : None | np.RandomState | int
Random state.
References
----------
.. [Banville2020] Banville, H., Chehab, O., Hyvärinen, A., Engemann, D. A.,
& Gramfort, A. (2020). Uncovering the structure of clinical EEG
signals with self-supervised learning.
arXiv preprint arXiv:2007.16104.
"""
def __init__(self, metadata, tau_pos, tau_neg, n_examples, tau_max=None,
same_rec_neg=True, random_state=None):
super().__init__(metadata, random_state=random_state)
self.tau_pos = tau_pos
self.tau_neg = tau_neg
self.tau_max = np.inf if tau_max is None else tau_max
self.n_examples = n_examples
self.same_rec_neg = same_rec_neg
if not same_rec_neg and self.n_recordings < 2:
raise ValueError('More than one recording must be available when '
'using across-recording negative sampling.')
def _sample_pair(self):
"""Sample a pair of two windows.
"""
# Sample first window
win_ind1, rec_ind1 = self.sample_window()
ts1 = self.metadata.iloc[win_ind1]['i_start_in_trial']
ts = self.info.iloc[rec_ind1]['i_start_in_trial']
# Decide whether the pair will be positive or negative
pair_type = self.rng.binomial(1, 0.5)
win_ind2 = None
if pair_type == 0: # Negative example
if self.same_rec_neg:
mask = (
((ts <= ts1 - self.tau_neg) & (ts >= ts1 - self.tau_max)) |
((ts >= ts1 + self.tau_neg) & (ts <= ts1 + self.tau_max))
)
else:
rec_ind2 = rec_ind1
while rec_ind2 == rec_ind1:
win_ind2, rec_ind2 = self.sample_window()
elif pair_type == 1: # Positive example
mask = (ts >= ts1 - self.tau_pos) & (ts <= ts1 + self.tau_pos)
if win_ind2 is None:
mask[ts == ts1] = False # same window cannot be sampled twice
if sum(mask) == 0:
raise NotImplementedError
win_ind2 = self.rng.choice(self.info.iloc[rec_ind1]['index'][mask])
return win_ind1, win_ind2, float(pair_type)
def presample(self):
"""Presample examples.
Once presampled, the examples are the same from one epoch to another.
"""
self.examples = [self._sample_pair() for _ in range(self.n_examples)]
return self
def __iter__(self):
"""Iterate over pairs.
Yields
------
(int): position of the first window in the dataset.
(int): position of the second window in the dataset.
(float): 0 for negative pair, 1 for positive pair.
"""
for i in range(self.n_examples):
if hasattr(self, 'examples'):
yield self.examples[i]
else:
yield self._sample_pair()
def __len__(self):
return self.n_examples
| 35.97479 | 79 | 0.605466 | [
"BSD-3-Clause"
] | Div12345/braindecode | braindecode/samplers/ssl.py | 4,282 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# (C)Eduardo Ribeiro - 1600820
class Contract:
id = 0
school_code = 0
school_name = ""
n_contract = 0
n_hours_per_week = 0
contract_end_date = ""
application_deadline = ""
recruitment_group = ""
county = ""
district = ""
class_project = ""
qualifications = ""
def __init__(
self,
id,
school_code,
school_name,
n_contract,
n_hours_per_week,
contract_end_date,
application_deadline,
recruitment_group,
county,
district,
class_project,
qualifications,
):
self.id = id
self.school_code = school_code
self.school_name = school_name
self.n_contract = n_contract
self.n_hours_per_week = n_hours_per_week
self.contract_end_date = contract_end_date
self.application_deadline = application_deadline
self.recruitment_group = recruitment_group
self.county = county
self.district = district
self.class_project = class_project
self.qualifications = qualifications
| 24.125 | 56 | 0.611399 | [
"MIT"
] | ejgr-mtsiw/pw-html-parser | sigrhe_contract.py | 1,158 | Python |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_q_mx#x+8x13+_m=0*vp(!di0evkomq0*!@z7^-l+7!2izak14'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG',1)))
ALLOWED_HOSTS = [
'ec2-15-188-8-39.eu-west-3.compute.amazonaws.com',
'127.0.0.1'
]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
STATIC_ROOT = 'static/' | 25.761538 | 91 | 0.697522 | [
"MIT"
] | achrefabdennebi/profiles-rest-api | profiles_project/settings.py | 3,349 | Python |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CbtfArgonavis(CMakePackage):
"""CBTF Argo Navis project contains the CUDA collector and supporting
libraries that was done as a result of a DOE SBIR grant.
"""
homepage = "http://sourceforge.net/p/cbtf/wiki/Home/"
git = "https://github.com/OpenSpeedShop/cbtf-argonavis.git"
version('develop', branch='master')
version('1.9.3', branch='1.9.3')
version('1.9.2', branch='1.9.2')
version('1.9.1.2', branch='1.9.1.2')
version('1.9.1.1', branch='1.9.1.1')
version('1.9.1.0', branch='1.9.1.0')
variant('cti', default=False,
description="Build MRNet with the CTI startup option")
variant('crayfe', default=False,
description="build only the FE tool using the runtime_dir \
to point to target build.")
variant('runtime', default=False,
description="build only the runtime libraries and collectors.")
variant('build_type', default='None', values=('None'),
description='CMake build type')
depends_on("[email protected]:", type='build')
# To specify ^[email protected] on the command line spack
# apparently needs/wants this dependency explicity here
# even though it is referenced downstream
depends_on("elf", type="link")
# For boost
depends_on("[email protected]:1.69.0")
# For MRNet
depends_on("[email protected]:+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("[email protected]:+lwthreads", when='@develop~cti', type=('build', 'link', 'run'))
depends_on("[email protected]+cti", when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on("[email protected]+lwthreads", when='@1.9.1.0:9999~cti', type=('build', 'link', 'run'))
# For CBTF
depends_on("cbtf@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("[email protected]:9999", when='@1.9.1.0:9999', type=('build', 'link', 'run'))
# For CBTF with cti
depends_on("cbtf@develop+cti", when='@develop+cti', type=('build', 'link', 'run'))
depends_on("[email protected]:9999+cti", when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
# For CBTF with runtime
depends_on("cbtf@develop+runtime", when='@develop+runtime', type=('build', 'link', 'run'))
depends_on("[email protected]:9999+runtime", when='@1.9.1.0:9999+runtime', type=('build', 'link', 'run'))
# For libmonitor
depends_on("[email protected]+krellpatch", type=('build', 'link', 'run'))
# For PAPI
depends_on("[email protected]:", type=('build', 'link', 'run'))
# For CBTF-KRELL
depends_on("cbtf-krell@develop", when='@develop', type=('build', 'link', 'run'))
depends_on("[email protected]:9999", when='@1.9.1.0:9999', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+cti', when='@develop+cti', type=('build', 'link', 'run'))
depends_on('[email protected]:9999+cti', when='@1.9.1.0:9999+cti', type=('build', 'link', 'run'))
depends_on('cbtf-krell@develop+runtime', when='@develop+runtime', type=('build', 'link', 'run'))
depends_on('[email protected]:9999+runtime', when='@1.9.1.0:9999+runtime', type=('build', 'link', 'run'))
# For CUDA
depends_on("cuda")
parallel = False
build_directory = 'build_cbtf_argonavis'
def cmake_args(self):
spec = self.spec
compile_flags = "-O2 -g"
cmake_args = [
'-DCMAKE_CXX_FLAGS=%s' % compile_flags,
'-DCMAKE_C_FLAGS=%s' % compile_flags,
'-DCUDA_DIR=%s' % spec['cuda'].prefix,
'-DCUDA_INSTALL_PATH=%s' % spec['cuda'].prefix,
'-DCUDA_TOOLKIT_ROOT_DIR=%s' % spec['cuda'].prefix,
'-DCUPTI_DIR=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DCUPTI_ROOT=%s' % spec['cuda'].prefix.extras.CUPTI,
'-DPAPI_ROOT=%s' % spec['papi'].prefix,
'-DCBTF_DIR=%s' % spec['cbtf'].prefix,
'-DCBTF_KRELL_DIR=%s' % spec['cbtf-krell'].prefix,
'-DBOOST_ROOT=%s' % spec['boost'].prefix,
'-DBoost_DIR=%s' % spec['boost'].prefix,
'-DBOOST_LIBRARYDIR=%s' % spec['boost'].prefix.lib,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DLIBMONITOR_DIR=%s' % spec['libmonitor'].prefix,
'-DBoost_NO_SYSTEM_PATHS=ON']
return cmake_args
def setup_run_environment(self, env):
"""Set up the compile and runtime environments for a package."""
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
def setup_build_environment(self, env):
"""Set up the compile and runtime environments for a package."""
env.prepend_path(
'LD_LIBRARY_PATH',
self.spec['cuda'].prefix + '/extras/CUPTI/lib64')
| 41.280992 | 110 | 0.603003 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | CreRecombinase/spack | var/spack/repos/builtin/packages/cbtf-argonavis/package.py | 4,995 | Python |
"""SentencePiece based word tokenizer module"""
from pathlib import Path
from typing import List
import sentencepiece as spm
from urduhack.stop_words import STOP_WORDS
def _is_token(pieces: list, special_symbol: str = "▁") -> List[str]:
"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""
decoded = []
for piece in pieces:
if special_symbol not in piece:
if piece in STOP_WORDS or len(piece) > 3:
piece = special_symbol + piece
decoded.append(piece)
else:
decoded.append(piece)
else:
decoded.append(piece)
return decoded
def _load_model(model_path: str) -> spm.SentencePieceProcessor:
"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""
spm_model = spm.SentencePieceProcessor()
spm_model.Load(model_file=model_path)
return spm_model
def _is_model_available(model_path: str) -> None:
"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""
if not Path(model_path).exists():
_error = "Word tokenizer Model not found!" \
"Please run 'urduhack download' in terminal." \
"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"
raise FileNotFoundError(_error)
| 28.290323 | 102 | 0.641391 | [
"MIT"
] | cinfotech94/urduhackk | urduhack/tokenization/wtk.py | 1,756 | Python |
"""command line interface for mutation_origin"""
import os
import time
import pickle
from collections import defaultdict
import click
from tqdm import tqdm
import pandas
from numpy import log
from numpy.random import seed as np_seed
from scitrack import CachingLogger
from sklearn.model_selection import train_test_split
from mutation_origin.opt import (_seed, _feature_dim, _enu_path,
_germline_path, _output_path, _flank_size,
_train_size, _enu_ratio,
_numreps, _label_col, _proximal, _usegc,
_training_path, _c_values, _penalty_options,
_n_jobs, _classifier_path, _data_path,
_predictions_path, _alpha_options,
_overwrite, _verbose, _class_prior,
_strategy, _score)
from mutation_origin.preprocess import data_to_numeric
from mutation_origin.encoder import (get_scaler, inverse_transform_response,
transform_response)
from mutation_origin.classify import (logistic_regression, one_class_svm,
predict_origin, naive_bayes, xgboost)
from mutation_origin.util import (dump_json, load_predictions,
get_basename, get_classifier_label,
get_enu_germline_sizes, iter_indices,
load_classifier, open_)
from mutation_origin.postprocess import measure_performance
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2014, Gavin Huttley"
__credits__ = ["Yicheng Zhu", "Cheng Soon Ong", "Gavin Huttley"]
__license__ = "BSD"
__version__ = "0.3"
__maintainer__ = "Gavin Huttley"
__email__ = "[email protected]"
__status__ = "Development"
LOGGER = CachingLogger()
@click.group()
def main():
"""mutori -- for building and applying classifiers of mutation origin"""
pass
@main.command()
@_seed
@_enu_path
@_germline_path
@_output_path
@_train_size
@_enu_ratio
@_numreps
@_overwrite
def sample_data(enu_path, germline_path, output_path, seed,
train_size,
enu_ratio, numreps, overwrite):
"""creates train/test sample data"""
if seed is None:
seed = int(time.time())
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
# set the random number seed
np_seed(seed)
start_time = time.time()
os.makedirs(output_path, exist_ok=True)
logfile_path = os.path.join(output_path, "logs/data_sampling.log")
if os.path.exists(logfile_path) and not overwrite:
click.secho(f"Exists: {logfile_path}! use overwrite to force.",
fg='red')
return
LOGGER.log_file_path = logfile_path
LOGGER.input_file(enu_path)
LOGGER.input_file(germline_path)
enu = pandas.read_csv(enu_path, sep="\t", header=0)
germline = pandas.read_csv(germline_path, sep="\t", header=0)
train_size = train_size // 2
test_size = train_size
train_enu_ratio, test_enu_ratio = enu_ratio
enu_train_size, germ_train_size = get_enu_germline_sizes(train_size,
train_enu_ratio)
enu_test_size, germ_test_size = get_enu_germline_sizes(test_size,
test_enu_ratio)
assert min(enu_train_size, germ_train_size,
enu_test_size, germ_test_size) > 0
if (2 * train_size > enu.shape[0] or
2 * train_size > germline.shape[0]):
print(f"ENU data set size: {enu.shape[0]}")
print(f"Germline data set size: {germline.shape[0]}")
print(f"Train set size: {train_size}")
raise ValueError("2 x train size exceeds"
" size of training data source(s)")
for rep in range(numreps):
test_outpath = os.path.join(output_path, f"test-{rep}.tsv.gz")
train_outpath = os.path.join(output_path, f"train-{rep}.tsv.gz")
enu_training, enu_testing = train_test_split(
enu,
test_size=enu_test_size,
train_size=enu_train_size)
germ_training, germ_testing = train_test_split(
germline,
test_size=germ_test_size,
train_size=germ_train_size)
if any(map(lambda x: x.shape[0] == 0,
[enu_training, enu_testing, germ_training, germ_testing])):
raise RuntimeError("screw up in creating test/train set")
# concat the data frames
testing = pandas.concat([enu_testing, germ_testing])
training = pandas.concat([enu_training, germ_training])
# write out, separately, the ENU and Germline data for train and test
testing.to_csv(test_outpath, index=False,
sep="\t", compression='gzip')
training.to_csv(train_outpath, index=False,
sep="\t", compression='gzip')
LOGGER.output_file(test_outpath)
LOGGER.output_file(train_outpath)
duration = time.time() - start_time
LOGGER.log_message("%.2f" % (duration / 60.),
label="run duration (minutes)")
LOGGER.shutdown()
@main.command()
@_training_path
@_output_path
@_label_col
@_seed
@_score
@_flank_size
@_feature_dim
@_proximal
@_usegc
@_c_values
@_penalty_options
@_n_jobs
@_overwrite
@_verbose
def lr_train(training_path, output_path, label_col, seed, scoring,
flank_size, feature_dim, proximal,
usegc, c_values, penalty_options, n_jobs, overwrite, verbose):
"""logistic regression training, validation, dumps optimal model"""
if not seed:
seed = int(time.time())
np_seed(seed)
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
os.makedirs(output_path, exist_ok=True)
basename = get_basename(training_path)
outpath = os.path.join(output_path, f"{basename}-classifier-lr.pkl.gz")
if os.path.exists(outpath) and not overwrite:
if verbose > 1:
click.secho(f"Skipping. {outpath} exists. "
"use overwrite to force.",
fg='green')
return
logfile_path = os.path.join(output_path,
f"logs/{basename}-training-lr.log")
LOGGER.log_file_path = logfile_path
LOGGER.input_file(training_path)
start_time = time.time()
_, resp, feat, n_dims, names = data_to_numeric(training_path,
label_col, flank_size,
feature_dim, proximal,
usegc)
if usegc:
# we need to scale the data
scaler = get_scaler(feat)
feat = scaler.transform(feat)
classifier = logistic_regression(feat, resp, seed, scoring,
c_values,
penalty_options.split(","), n_jobs)
betas = dict(zip(names, classifier.best_estimator_.coef_.tolist()[0]))
result = dict(classifier=classifier.best_estimator_, betas=betas,
scoring=scoring)
result['feature_params'] = dict(feature_dim=feature_dim,
flank_size=flank_size, proximal=proximal,
usegc=usegc)
if usegc:
result['scaler'] = scaler
with open(outpath, 'wb') as clf_file:
pickle.dump(result, clf_file)
LOGGER.output_file(outpath)
duration = time.time() - start_time
LOGGER.log_message("%.2f" % (duration / 60.),
label="run duration (minutes)")
LOGGER.shutdown()
@main.command()
@_training_path
@_output_path
@_label_col
@_seed
@_score
@_flank_size
@_feature_dim
@_proximal
@_usegc
@_alpha_options
@_class_prior
@_n_jobs
@_overwrite
@_verbose
def nb_train(training_path, output_path, label_col, seed, scoring,
flank_size, feature_dim, proximal,
usegc, alpha_options, class_prior, n_jobs, overwrite, verbose):
"""Naive Bayes training, validation, dumps optimal model"""
if not seed:
seed = int(time.time())
np_seed(seed)
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
os.makedirs(output_path, exist_ok=True)
basename = get_basename(training_path)
outpath = os.path.join(output_path, f"{basename}-classifier-nb.pkl.gz")
logfile_path = os.path.join(output_path,
f"logs/{basename}-training-nb.log")
if os.path.exists(outpath) and not overwrite:
if verbose > 1:
click.secho(f"Skipping. {outpath} exists. "
"use overwrite to force.",
fg='green')
return
LOGGER.log_file_path = logfile_path
LOGGER.input_file(training_path)
start_time = time.time()
if class_prior is not None:
class_labels = list(class_prior)
encoded = transform_response(class_labels)
ordered = sorted(zip(encoded, class_labels))
class_prior = [class_prior[l] for _, l in ordered]
_, resp, feat, n_dims, names = data_to_numeric(training_path,
label_col, flank_size,
feature_dim, proximal,
usegc)
if usegc:
# we need to scale the data
scaler = get_scaler(feat)
feat = scaler.transform(feat)
classifier = naive_bayes(feat, resp, seed, alpha_options, scoring,
class_prior=class_prior, n_jobs=n_jobs)
betas = dict(zip(names, classifier.best_estimator_.coef_.tolist()[0]))
result = dict(classifier=classifier.best_estimator_, betas=betas,
scoring=scoring)
result['feature_params'] = dict(feature_dim=feature_dim,
flank_size=flank_size, proximal=proximal,
usegc=usegc)
if usegc:
result['scaler'] = scaler
with open_(outpath, 'wb') as clf_file:
pickle.dump(result, clf_file)
LOGGER.output_file(outpath)
duration = time.time() - start_time
LOGGER.log_message("%.2f" % (duration / 60.),
label="run duration (minutes)")
LOGGER.shutdown()
@main.command()
@_training_path
@_output_path
@_label_col
@_seed
@_flank_size
@_feature_dim
@_proximal
@_usegc
@_strategy
@_n_jobs
@_overwrite
@_verbose
def xgboost_train(training_path, output_path, label_col, seed,
flank_size, feature_dim, proximal,
usegc, strategy, n_jobs, overwrite, verbose):
"""Naive Bayes training, validation, dumps optimal model"""
if not seed:
seed = int(time.time())
np_seed(seed)
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
os.makedirs(output_path, exist_ok=True)
basename = get_basename(training_path)
outpath = os.path.join(output_path, f"{basename}-classifier-xgb.pkl.gz")
logfile_path = os.path.join(output_path,
f"logs/{basename}-training-xgb.log")
if os.path.exists(outpath) and not overwrite:
if verbose > 1:
click.secho(f"Skipping. {outpath} exists. "
"use overwrite to force.",
fg='green')
return
LOGGER.log_file_path = logfile_path
LOGGER.input_file(training_path)
start_time = time.time()
_, resp, feat, n_dims, names = data_to_numeric(training_path,
label_col, flank_size,
feature_dim, proximal,
usegc)
# hacking feature so all -1 > 0
resp = [v if v > 0 else 0 for v in resp]
if usegc:
# we need to scale the data
scaler = get_scaler(feat)
feat = scaler.transform(feat)
classifier = xgboost(feat, resp, seed, strategy, n_jobs, verbose)
result = dict(classifier=classifier)
result['feature_params'] = dict(feature_dim=feature_dim,
flank_size=flank_size, proximal=proximal,
usegc=usegc)
if usegc:
result['scaler'] = scaler
with open(outpath, 'wb') as clf_file:
pickle.dump(result, clf_file)
LOGGER.output_file(outpath)
duration = time.time() - start_time
LOGGER.log_message("%.2f" % (duration / 60.),
label="run duration (minutes)")
LOGGER.shutdown()
@main.command()
@_training_path
@_output_path
@_label_col
@_seed
@_flank_size
@_feature_dim
@_proximal
@_usegc
@_overwrite
@_verbose
def ocs_train(training_path, output_path, label_col, seed,
flank_size, feature_dim, proximal, usegc, overwrite, verbose):
"""one-class svm training for outlier detection"""
if seed is None:
seed = int(time.time())
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
start_time = time.time()
os.makedirs(output_path, exist_ok=True)
basename = get_basename(training_path)
outpath = os.path.join(output_path, f"{basename}-classifier-ocs.pkl.gz")
logfile_path = os.path.join(output_path,
f"logs/{basename}-training-ocs.log")
if os.path.exists(outpath) and not overwrite:
if verbose > 1:
click.secho(f"Skipping. {outpath} exists. "
"use overwrite to force.",
fg='green')
return
LOGGER.log_file_path = logfile_path
LOGGER.input_file(training_path)
start_time = time.time()
_, _, feat, n_dims, names = data_to_numeric(training_path,
label_col, flank_size,
feature_dim, proximal,
usegc=usegc,
one_class='g')
classifier = one_class_svm(feat, seed)
result = dict(classifier=classifier)
result['feature_params'] = dict(feature_dim=feature_dim,
flank_size=flank_size, proximal=proximal,
usegc=usegc)
with open(outpath, 'wb') as clf_file:
pickle.dump(result, clf_file)
LOGGER.output_file(outpath)
duration = time.time() - start_time
LOGGER.log_message("%.2f" % (duration / 60.),
label="run duration (minutes)")
LOGGER.shutdown()
@main.command()
@_classifier_path
@_data_path
@_output_path
@_label_col
@_class_prior
@_overwrite
@_verbose
def predict(classifier_path, data_path, output_path, label_col, class_prior,
overwrite, verbose):
"""predict labels for data"""
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
classifier, feature_params, scaler = load_classifier(classifier_path)
class_label = get_classifier_label(classifier)
if class_prior is not None and class_label == 'lr':
# https://stats.stackexchange.com/questions/117592/logistic-regression-prior-correction-at-test-time
# based on above and King and Zeng, we adjust the intercept term such
# that it is incremented by ln(p(1) / p(-1)) where p(1) is the prior
# of a 1 label, p(-1)=1-p(1)
class_labels = list(class_prior)
encoded = transform_response(class_labels)
ordered = sorted(zip(encoded, class_labels))
if 'e' in ordered[0]:
adj = log(class_prior['g'] / class_prior['e'])
else:
adj = log(class_prior['e'] / class_prior['g'])
classifier.intercept_ += adj
basename_class = get_basename(classifier_path)
basename_data = get_basename(data_path)
basename = f"{basename_class}-{basename_data}"
outpath = os.path.join(
output_path,
f"{basename}-predicted-{class_label}.json.gz")
os.makedirs(output_path, exist_ok=True)
logfile_path = os.path.join(output_path,
f"logs/{basename}-predict-{class_label}.log")
if os.path.exists(outpath) and not overwrite:
if verbose > 1:
click.secho(f"Skipping. {outpath} exists. "
"use overwrite to force.",
fg='green')
return
LOGGER.log_file_path = logfile_path
LOGGER.input_file(classifier_path)
LOGGER.input_file(data_path)
start_time = time.time()
# if NB, the score func name is different
if class_label in ("nb", "xgb"):
classifier.decision_function = classifier.predict_proba
fulldata = pandas.read_csv(data_path, sep='\t')
result = {}
result['feature_params'] = feature_params
result['classifier_label'] = class_label
result['classifier_path'] = classifier_path
result['predictions'] = defaultdict(list)
total = fulldata.shape[0] // 2000
pbar = tqdm(iter_indices(
fulldata.shape[0], block_size=2000), ncols=80, total=total)
for indices in pbar:
data = fulldata.iloc[indices]
ids, resp, feat, n_dims, names = data_to_numeric(data,
label_col=label_col,
**feature_params)
if scaler:
feat = scaler.transform(feat)
predictions, scores = predict_origin(classifier, feat)
if class_label in ("nb", "xgb"):
# each `score' is the probability of belong to either class
# reduce to just the first class
scores = scores[:, 1].tolist()
elif class_label == 'ocs':
scores = scores[:, 0].tolist()
predictions = inverse_transform_response(predictions)
result['predictions']['varid'].extend(list(ids))
result['predictions']['predicted'].extend(list(predictions))
result['predictions']['scores'].extend(list(scores))
dump_json(outpath, result)
LOGGER.output_file(outpath)
duration = time.time() - start_time
LOGGER.log_message("%.2f" % (duration / 60.),
label="run duration (minutes)")
LOGGER.shutdown()
# def performance -> produces summary stats on trained classifiers
# requires input data and the predicted results
@main.command()
@_data_path
@_predictions_path
@_output_path
@_label_col
@_overwrite
@_verbose
def performance(data_path, predictions_path, output_path, label_col,
overwrite, verbose):
"""produce measures of classifier performance"""
LOGGER.log_args()
LOGGER.log_versions(['sklearn', 'numpy'])
if not (data_path or predictions_path):
click.secho("Need data sets!", fg="red")
exit()
basename_train = get_basename(data_path)
basename_pred = get_basename(predictions_path)
basename = f"{basename_train}-{basename_pred}"
outpath = os.path.join(
output_path,
f"{basename}-performance.json.gz")
logfile_path = os.path.join(output_path,
f"logs/{basename}-performance.log")
if os.path.exists(outpath) and not overwrite:
if verbose > 1:
click.secho(f"Skipping. {outpath} exists. "
"Use overwrite to force.",
fg='green')
return
LOGGER.log_file_path = logfile_path
LOGGER.input_file(data_path)
LOGGER.input_file(predictions_path)
orig = pandas.read_csv(data_path, sep="\t")
predicted, feature_params, classifier_path, label =\
load_predictions(predictions_path)
result = measure_performance(orig, predicted,
label_col)
result["feature_params"] = feature_params
result["classifier_path"] = classifier_path
result["classifier_label"] = label
dump_json(outpath, result)
LOGGER.shutdown()
if __name__ == "__main__":
main()
| 35.40177 | 108 | 0.606739 | [
"BSD-3-Clause"
] | HuttleyLab/mutationorigin | mutation_origin/cli.py | 20,002 | Python |
"""
My first application
"""
import toga
from toga.style import Pack
from toga.style.pack import COLUMN, ROW
class HelloWorld(toga.App):
def startup(self):
"""
Construct and show the Toga application.
Usually, you would add your application to a main content box.
We then create a main window (with a name matching the app), and
show the main window.
"""
main_box = toga.Box(style=Pack(direction=COLUMN))
name_label = toga.Label(
'Your name: ',
style=Pack(padding=(0 ,5))
)
self.name_input = toga.TextInput(style=Pack(flex=1))
name_box = toga.Box(style=Pack(direction=ROW, padding=5))
name_box.add(name_label)
name_box.add(self.name_input)
button = toga.Button(
'Say Hello!',
on_press=self.say_hello,
style=Pack(padding=5)
)
main_box.add(name_box)
main_box.add(button)
self.main_window = toga.MainWindow(title=self.formal_name)
self.main_window.content = main_box
self.main_window.show()
def say_hello(self, widget):
if self.name_input.value:
name = self.name_input.value
else:
name = 'stranger'
self.main_window.info_dialog(
'Hi there!',
f"Hello, {name}"
)
def main():
return HelloWorld()
| 24.603448 | 72 | 0.580238 | [
"BSD-3-Clause"
] | The-Heyman/helloworld | src/helloworld/app.py | 1,427 | Python |
import os
turno = input('Qual turno você estuda? ')
if turno[0] == 'm' or turno[0] == 'M':
print('Bom Dia!')
elif turno[0] == 'v' or turno[0] == 'V':
print('Boa Tarde')
elif turno[0] == 'n' or turno[0] == 'N':
print('Boa Noite')
else:
print('Valor Inválido!')
os.system("pause")
| 20.8 | 42 | 0.535256 | [
"MIT"
] | WilliamSampaio/ExerciciosPython | exerc27/27.py | 314 | Python |
import ray
from copy import deepcopy
from leaderboard.leaderboard_evaluator import LeaderboardEvaluator
from leaderboard.utils.statistics_manager import StatisticsManager
@ray.remote(num_cpus=1./8, num_gpus=1./4, max_restarts=100, max_task_retries=-1)
class ScenarioRunner():
def __init__(self, args, scenario_class, scenario, route, checkpoint='simulation_results.json', town=None, port=1000, tm_port=1002, debug=False):
args = deepcopy(args)
# Inject args
args.scenario_class = scenario_class
args.town = town
args.port = port
args.trafficManagerPort = tm_port
args.scenarios = scenario
args.routes = route
args.debug = debug
args.checkpoint = checkpoint
args.record = ''
self.runner = LeaderboardEvaluator(args, StatisticsManager())
self.args = args
def run(self):
return self.runner.run(self.args)
| 34.296296 | 149 | 0.695464 | [
"MIT"
] | anthonyhu/WorldOnRails | runners/scenario_runner.py | 926 | Python |
from django.apps import AppConfig
class YtvideoConfig(AppConfig):
name = 'ytvideo'
| 14.833333 | 33 | 0.752809 | [
"MIT"
] | LSM2016/Bilibili- | ytvideo/apps.py | 89 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2015 by Gaik Tamazian
# gaik (dot) tamazian (at) gmail (dot) com
class Error(Exception):
"""
The class describes a basic error that may occur in any of the
Chromosomer-related routines.
"""
pass
class MapError(Error):
"""
The class describes an error that may occur while working with a
fragment __map object.
"""
pass
class AlignmentToMapError(Error):
"""
The class describes an error that may occur while creating a
fragment __map from alignments.
"""
pass
| 19.6 | 68 | 0.653061 | [
"MIT"
] | gtamazian/Chromosomer | chromosomer/exception.py | 588 | Python |
# coding: utf-8
import os
import numpy as np
import copy
from PyQt5.QtWidgets import (QPushButton, QScrollArea)
from PyQt5.QtCore import QThread, pyqtSignal
from multiprocessing import Process, Manager
from ..malss import MALSS
from .waiting_animation import WaitingAnimation
from .rfpimp import oob_importances
from .learning_curve_base import LearningCurveBase
class LearningCurve(LearningCurveBase):
def __init__(self, parent=None, button_func=None, params=None):
super().__init__(parent, 'LearningCurve', params)
self.button_func = button_func
path = os.path.abspath(os.path.dirname(__file__)) + '/static/'
path1 = path + 'check_curve'
text = self.get_text(path1)
if self.params.lang == 'en':
self.set_paragraph('', text=text)
else:
self.set_paragraph('', text=text)
self.plot_curve(self.params.results['algorithms'])
self.vbox.addStretch()
btn_fs = QPushButton('Try feature selection', self.inner)
btn_fs.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
btn_fs.clicked.connect(self.__button_clicked)
self.btn_next = QPushButton('Continue', self.inner)
self.btn_next.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
if self.params.lang == 'en':
self.btn_next.clicked.connect(lambda: self.button_func(
'Prediction'))
else:
self.btn_next.clicked.connect(lambda: self.button_func(
'予測'))
self.vbox.addWidget(btn_fs)
self.vbox.addWidget(self.btn_next)
# "parent.parent()" must be modified.
self.wait_ani = WaitingAnimation(parent.parent())
self.wait_ani.hide()
lists = ['task', 'supervised_learning', 'dummy', 'hyperparameter',
'overfitting', 'cross_validation', 'learning_curve',
'bias_variance']
if self.params.lang == 'jp':
lists = [l + '_jp' for l in lists]
else:
lists = [l + '_en' for l in lists]
self.wait_ani.set_lists(lists)
def resizeEvent(self, event):
# To be modified.
self.wait_ani.resize(self.parent().parent().size())
event.accept()
QScrollArea.resizeEvent(self, event)
def __button_clicked(self):
self.__feature_selection()
def __feature_selection(self):
self.mdl_fs = copy.deepcopy(self.params.mdl)
self.thread = FeatureSelectionWorker(self.mdl_fs)
self.thread.finSignal.connect(self.__feature_selected)
self.thread.start()
self.wait_ani.show()
def __feature_selected(self, signalData):
self.wait_ani.hide()
if 'error' in signalData:
self.params.error = signalData['error']
self.button_func('Error')
else:
if len(signalData['mdl'].data.X.columns) < len(self.params.X.columns):
# some features deleted
self.params.X_fs = signalData['mdl'].data.X
self.params.mdl_fs = signalData['mdl']
self.params.algorithms_fs = self.params.mdl_fs.get_algorithms()
if self.params.lang == 'en':
self.button_func('Feature selection')
else:
self.button_func('特徴量選択')
else:
# no features deleted
self.params.not_deleted = True
if self.params.lang == 'en':
self.button_func('Prediction')
else:
self.button_func('予測')
class LearningCurve2(LearningCurveBase):
def __init__(self, parent=None, button_func=None, params=None):
super().__init__(parent, 'LearningCurve 2', params)
self.button_func = button_func
path = os.path.abspath(os.path.dirname(__file__)) + '/static/'
path1 = path + 'learning_curve_2'
text = self.get_text(path1)
if self.params.lang == 'en':
self.set_paragraph('', text=text)
else:
self.set_paragraph('', text=text)
self.plot_curve(self.params.results_fs['algorithms'])
if self.params.lang == 'en':
text = ('Finally, MALSS output analysis results, and you can '
'predict unknown data (if you have).\n'
'Press "Next" to continue.')
self.set_paragraph('', text=text)
else:
text = ('最後に学習結果の出力と,未知データがあればその予測を'
'行いましょう.\nNextを押してください')
self.set_paragraph('', text=text)
self.vbox.addStretch()
self.btn_next = QPushButton('Next', self.inner)
self.btn_next.setStyleSheet('QPushButton{font: bold; font-size: 15pt; background-color: white;};')
if self.params.lang == 'en':
self.btn_next.clicked.connect(lambda: self.button_func(
'Prediction'))
else:
self.btn_next.clicked.connect(lambda: self.button_func(
'予測'))
self.vbox.addWidget(self.btn_next)
class FeatureSelectionWorker(QThread):
finSignal = pyqtSignal(dict)
def __init__(self, mdl):
super().__init__()
self.mdl = mdl
def run(self):
with Manager() as manager:
d = manager.dict()
job = Process(target=FeatureSelectionWorker.sub_job,
args=(self.mdl, d))
job.start()
job.join()
self.finSignal.emit(dict(d))
@staticmethod
def sub_job(mdl, d):
try:
mdl.select_features()
d['mdl'] = mdl
except Exception as e:
import traceback
d['error'] = traceback.format_exc() | 34.847953 | 107 | 0.5756 | [
"MIT"
] | canard0328/malss | malss/app/learning_curve.py | 6,063 | Python |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
_REF = "_ref"
class Output:
_REF = "_ref"
class DeleteHostInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_ref": {
"type": "string",
"title": "Ref",
"description": "Object Reference of the host to remove",
"order": 1
}
},
"required": [
"_ref"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DeleteHostOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_ref": {
"type": "string",
"title": "Ref",
"description": "Object Reference of the removed host",
"order": 1
}
},
"required": [
"_ref"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 16.568966 | 62 | 0.546306 | [
"MIT"
] | TonyHamil/insightconnect-plugins | infoblox/komand_infoblox/actions/delete_host/schema.py | 961 | Python |
# coding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_str,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
int_or_none,
is_html,
js_to_json,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
orderedSet,
parse_duration,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_timestamp,
unsmuggle_url,
UnsupportedError,
url_or_none,
xpath_attr,
xpath_text,
xpath_with_ns,
)
from .commonprotocols import RtmpIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .drtuber import DrTuberIE
from .redtube import RedTubeIE
from .tube8 import Tube8IE
from .mofosex import MofosexEmbedIE
from .spankwire import SpankwireIE
from .youporn import YouPornIE
from .vimeo import (
VimeoIE,
VHXEmbedIE,
)
from .dailymotion import DailymotionIE
from .dailymail import DailyMailIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .webcaster import WebcasterFeedIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .arkena import ArkenaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
from .kaltura import KalturaIE
from .eagleplatform import EaglePlatformIE
from .facebook import FacebookIE
from .soundcloud import SoundcloudEmbedIE
from .tunein import TuneInBaseIE
from .vbox7 import Vbox7IE
from .dbtv import DBTVIE
from .piksel import PikselIE
from .videa import VideaIE
from .twentymin import TwentyMinutenIE
from .ustream import UstreamIE
from .arte import ArteTVEmbedIE
from .videopress import VideoPressIE
from .rutube import RutubeIE
from .limelight import LimelightBaseIE
from .anvato import AnvatoIE
from .washingtonpost import WashingtonPostIE
from .wistia import WistiaIE
from .mediaset import MediasetIE
from .joj import JojIE
from .megaphone import MegaphoneIE
from .vzaar import VzaarIE
from .channel9 import Channel9IE
from .vshare import VShareIE
from .mediasite import MediasiteIE
from .springboardplatform import SpringboardPlatformIE
from .yapfiles import YapFilesIE
from .vice import ViceIE
from .xfileshare import XFileShareIE
from .cloudflarestream import CloudflareStreamIE
from .peertube import PeerTubeIE
from .teachable import TeachableIE
from .indavideo import IndavideoEmbedIE
from .apa import APAIE
from .foxnews import FoxNewsIE
from .viqeo import ViqeoIE
from .expressen import ExpressenIE
from .zype import ZypeIE
from .odnoklassniki import OdnoklassnikiIE
from .kinja import KinjaEmbedIE
from .gedidigital import GediDigitalIE
from .rcs import RCSEmbedsIE
from .bitchute import BitChuteIE
from .rumble import RumbleEmbedIE
from .arcpublishing import ArcPublishingIE
from .medialaan import MedialaanIE
from .simplecast import SimplecastIE
from .wimtv import WimTVIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
],
'skip': 'URL invalid',
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'http://podcastfeeds.nbcnews.com/nbcnews/video/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'title': 'MSNBC Rachel Maddow (video)',
'description': 're:.*her unique approach to storytelling.*',
},
'playlist': [{
'info_dict': {
'ext': 'mov',
'id': 'pdv_maddow_netcast_mov-12-03-2020-223726',
'title': 'MSNBC Rachel Maddow (video) - 12-03-2020-223726',
'description': 're:.*her unique approach to storytelling.*',
'upload_date': '20201204',
},
}],
},
# RSS feed with item with description and thumbnails
{
'url': 'https://anchor.fm/s/dd00e14/podcast/rss',
'info_dict': {
'id': 'https://anchor.fm/s/dd00e14/podcast/rss',
'title': 're:.*100% Hydrogen.*',
'description': 're:.*In this episode.*',
},
'playlist': [{
'info_dict': {
'ext': 'm4a',
'id': 'c1c879525ce2cb640b344507e682c36d',
'title': 're:Hydrogen!',
'description': 're:.*In this episode we are going.*',
'timestamp': 1567977776,
'upload_date': '20190908',
'duration': 459,
'thumbnail': r're:^https?://.*\.jpg$',
'episode_number': 1,
'season_number': 1,
'age_limit': 0,
},
}],
'params': {
'skip_download': True,
},
},
# RSS feed with enclosures and unsupported link URLs
{
'url': 'http://www.hellointernet.fm/podcast?format=rss',
'info_dict': {
'id': 'http://www.hellointernet.fm/podcast?format=rss',
'description': 'CGP Grey and Brady Haran talk about YouTube, life, work, whatever.',
'title': 'Hello Internet',
},
'playlist_mincount': 100,
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'skip': 'video gone',
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': r're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
{
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer'
# in the http requests
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# embedded with itemprop embedURL and video id spelled as `idVideo`
'add_id': ['BrightcoveLegacy'],
'url': 'http://bfmbusiness.bfmtv.com/mediaplayer/chroniques/olivier-delamarche/',
'info_dict': {
'id': '5255628253001',
'ext': 'mp4',
'title': 'md5:37c519b1128915607601e75a87995fc0',
'description': 'md5:37f7f888b434bb8f8cc8dbd4f7a4cf26',
'uploader': 'BFM BUSINESS',
'uploader_id': '876450612001',
'timestamp': 1482255315,
'upload_date': '20161220',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
'skip': 'video gone',
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/ytdl-org/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
{
# Brightcove video in <iframe>
'url': 'http://www.un.org/chinese/News/story.asp?NewsID=27724',
'md5': '36d74ef5e37c8b4a2ce92880d208b968',
'info_dict': {
'id': '5360463607001',
'ext': 'mp4',
'title': '叙利亚失明儿童在废墟上演唱《心跳》 呼吁获得正常童年生活',
'description': '联合国儿童基金会中东和北非区域大使、作曲家扎德·迪拉尼(Zade Dirani)在3月15日叙利亚冲突爆发7周年纪念日之际发布了为叙利亚谱写的歌曲《心跳》(HEARTBEAT),为受到六年冲突影响的叙利亚儿童发出强烈呐喊,呼吁世界做出共同努力,使叙利亚儿童重新获得享有正常童年生活的权利。',
'uploader': 'United Nations',
'uploader_id': '1362235914001',
'timestamp': 1489593889,
'upload_date': '20170315',
},
'add_ie': ['BrightcoveLegacy'],
},
{
# Brightcove with alternative playerID key
'url': 'http://www.nature.com/nmeth/journal/v9/n7/fig_tab/nmeth.2062_SV1.html',
'info_dict': {
'id': 'nmeth.2062_SV1',
'title': 'Simultaneous multiview imaging of the Drosophila syncytial blastoderm : Quantitative high-speed imaging of entire developing embryos with simultaneous multiview light-sheet microscopy : Nature Methods : Nature Research',
},
'playlist': [{
'info_dict': {
'id': '2228375078001',
'ext': 'mp4',
'title': 'nmeth.2062-sv1',
'description': 'nmeth.2062-sv1',
'timestamp': 1363357591,
'upload_date': '20130315',
'uploader': 'Nature Publishing Group',
'uploader_id': '1964492299001',
},
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
'skip': 'video rotates...weekly?',
},
{
# Brightcove:new type [2].
'url': 'http://www.delawaresportszone.com/video-st-thomas-more-earns-first-trip-to-basketball-semis',
'md5': '2b35148fcf48da41c9fb4591650784f3',
'info_dict': {
'id': '5348741021001',
'ext': 'mp4',
'upload_date': '20170306',
'uploader_id': '4191638492001',
'timestamp': 1488769918,
'title': 'VIDEO: St. Thomas More earns first trip to basketball semis',
},
},
{
# Alternative brightcove <video> attributes
'url': 'http://www.programme-tv.net/videos/extraits/81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche/',
'info_dict': {
'id': '81095-guillaume-canet-evoque-les-rumeurs-d-infidelite-de-marion-cotillard-avec-brad-pitt-dans-vivement-dimanche',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche, Extraits : toutes les vidéos avec Télé-Loisirs",
},
'playlist': [{
'md5': '732d22ba3d33f2f3fc253c39f8f36523',
'info_dict': {
'id': '5311302538001',
'ext': 'mp4',
'title': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche",
'description': "Guillaume Canet évoque les rumeurs d'infidélité de Marion Cotillard avec Brad Pitt dans Vivement Dimanche (France 2, 5 février 2017)",
'timestamp': 1486321708,
'upload_date': '20170205',
'uploader_id': '800000640001',
},
'only_matching': True,
}],
},
{
# Brightcove with UUID in videoPlayer
'url': 'http://www8.hp.com/cn/zh/home.html',
'info_dict': {
'id': '5255815316001',
'ext': 'mp4',
'title': 'Sprocket Video - China',
'description': 'Sprocket Video - China',
'uploader': 'HP-Video Gallery',
'timestamp': 1482263210,
'upload_date': '20161220',
'uploader_id': '1107601872001',
},
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
'skip': 'movie expired',
},
# ooyala video embedded with http://player.ooyala.com/static/v4/production/latest/core.min.js
{
'url': 'http://wnep.com/2017/07/22/steampunk-fest-comes-to-honesdale/',
'info_dict': {
'id': 'lwYWYxYzE6V5uJMjNGyKtwwiw9ZJD7t2',
'ext': 'mp4',
'title': 'Steampunk Fest Comes to Honesdale',
'duration': 43.276,
},
'params': {
'skip_download': True,
}
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
# HEAD requests lead to endless 301, while GET is OK
'expected_warnings': ['301'],
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
# This forum does not allow <iframe> syntaxes anymore
# Now HTML tags are displayed as-is
'skip': 'No videos on this page',
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
},
'skip': 'video gone',
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# DailyMail embed
{
'url': 'http://www.bumm.sk/krimi/2017/07/05/biztonsagi-kamera-buktatta-le-az-agg-ferfit-utlegelo-apolot',
'info_dict': {
'id': '1495629',
'ext': 'mp4',
'title': 'Care worker punches elderly dementia patient in head 11 times',
'description': 'md5:3a743dee84e57e48ec68bf67113199a5',
},
'add_ie': ['DailyMail'],
'params': {
'skip_download': True,
},
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVServices embed
{
'url': 'http://www.vulture.com/2016/06/new-key-peele-sketches-released.html',
'md5': 'ca1aef97695ef2c1d6973256a57e5252',
'info_dict': {
'id': '769f7ec0-0692-4d62-9b45-0d88074bffc1',
'ext': 'mp4',
'title': 'Key and Peele|October 10, 2012|2|203|Liam Neesons - Uncensored',
'description': 'Two valets share their love for movie star Liam Neesons.',
'timestamp': 1349922600,
'upload_date': '20121011',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# YouTube <object> embed
{
'url': 'http://www.improbable.com/2017/04/03/untrained-modern-youths-and-ancient-masters-in-selfie-portraits/',
'md5': '516718101ec834f74318df76259fb3cc',
'info_dict': {
'id': 'msN87y-iEx0',
'ext': 'webm',
'title': 'Feynman: Mirrors FUN TO IMAGINE 6',
'upload_date': '20080526',
'description': 'md5:0ffc78ea3f01b2e2c247d5f8d1d3c18d',
'uploader': 'Christopher Sykes',
'uploader_id': 'ChristopherJSykes',
},
'add_ie': ['Youtube'],
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/ytdl-org/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Soundcloud multiple embeds
{
'url': 'http://www.guitarplayer.com/lessons/1014/legato-workout-one-hour-to-more-fluid-performance---tab/52809',
'info_dict': {
'id': '52809',
'title': 'Guitar Essentials: Legato Workout—One-Hour to Fluid Performance | TAB + AUDIO',
},
'playlist_mincount': 7,
},
# TuneIn station embed
{
'url': 'http://radiocnrv.com/promouvoir-radio-cnrv/',
'info_dict': {
'id': '204146',
'ext': 'mp3',
'title': 'CNRV',
'location': 'Paris, France',
'is_live': True,
},
'params': {
# Live stream
'skip_download': True,
},
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'https://skiplagged.com/',
'info_dict': {
'id': 'skiplagged',
'title': 'Skiplagged: The smart way to find cheap flights',
},
'playlist_mincount': 1,
'add_ie': ['Youtube'],
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
'skip': 'All The Daily Show URLs now redirect to http://www.cc.com/shows/',
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:8078af856dca76edc42910b61273dbbf',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# jwplayer rtmp
{
'url': 'http://www.suffolk.edu/sjc/live.php',
'info_dict': {
'id': 'live',
'ext': 'flv',
'title': 'Massachusetts Supreme Judicial Court Oral Arguments',
'uploader': 'www.suffolk.edu',
},
'params': {
'skip_download': True,
},
'skip': 'Only has video a few mornings per month, see http://www.suffolk.edu/sjc/',
},
# Complex jwplayer
{
'url': 'http://www.indiedb.com/games/king-machine/videos',
'info_dict': {
'id': 'videos',
'ext': 'mp4',
'title': 'king machine trailer 1',
'description': 'Browse King Machine videos & audio for sweet media. Your eyes will thank you.',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
# JWPlayer config passed as variable
'url': 'http://www.txxx.com/videos/3326530/ariele/',
'info_dict': {
'id': '3326530_hq',
'ext': 'mp4',
'title': 'ARIELE | Tube Cup',
'uploader': 'www.txxx.com',
'age_limit': 18,
},
'params': {
'skip_download': True,
}
},
{
# JWPlatform iframe
'url': 'https://www.mediaite.com/tv/dem-senator-claims-gary-cohn-faked-a-bad-connection-during-trump-call-to-get-him-off-the-phone/',
'md5': 'ca00a040364b5b439230e7ebfd02c4e9',
'info_dict': {
'id': 'O0c5JcKT',
'ext': 'mp4',
'upload_date': '20171122',
'timestamp': 1511366290,
'title': 'Dem Senator Claims Gary Cohn Faked a Bad Connection During Trump Call to Get Him Off the Phone',
},
'add_ie': [JWPlatformIE.ie_key()],
},
{
# Video.js embed, multiple formats
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
'info_dict': {
'id': 'yygqldloqIk',
'ext': 'mp4',
'title': 'SolidWorks. Урок 6 Настройка чертежа',
'description': 'md5:baf95267792646afdbf030e4d06b2ab3',
'upload_date': '20130314',
'uploader': 'PROстое3D',
'uploader_id': 'PROstoe3D',
},
'params': {
'skip_download': True,
},
},
{
# Video.js embed, single format
'url': 'https://www.vooplayer.com/v3/watch/watch.php?v=NzgwNTg=',
'info_dict': {
'id': 'watch',
'ext': 'mp4',
'title': 'Step 1 - Good Foundation',
'description': 'md5:d1e7ff33a29fc3eb1673d6c270d344f4',
},
'params': {
'skip_download': True,
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': '[email protected]',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura embedded via quoted entry_id
'url': 'https://www.oreilly.com/ideas/my-cloud-makes-pretty-pictures',
'info_dict': {
'id': '0_utuok90b',
'ext': 'mp4',
'title': '06_matthew_brender_raj_dutt',
'timestamp': 1466638791,
'upload_date': '20160622',
},
'add_ie': ['Kaltura'],
'expected_warnings': [
'Could not send HEAD request'
],
'params': {
'skip_download': True,
}
},
{
# Kaltura embedded, some fileExt broken (#11480)
'url': 'http://www.cornell.edu/video/nima-arkani-hamed-standard-models-of-particle-physics',
'info_dict': {
'id': '1_sgtvehim',
'ext': 'mp4',
'title': 'Our "Standard Models" of particle physics and cosmology',
'description': 'md5:67ea74807b8c4fea92a6f38d6d323861',
'timestamp': 1321158993,
'upload_date': '20111113',
'uploader_id': 'kps1',
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed
'url': 'http://www.gsd.harvard.edu/event/i-m-pei-a-centennial-celebration/',
'md5': 'ae5ace8eb09dc1a35d03b579a9c2cc44',
'info_dict': {
'id': '0_f2cfbpwy',
'ext': 'mp4',
'title': 'I. M. Pei: A Centennial Celebration',
'description': 'md5:1db8f40c69edc46ca180ba30c567f37c',
'upload_date': '20170403',
'uploader_id': 'batchUser',
'timestamp': 1491232186,
},
'add_ie': ['Kaltura'],
},
{
# Kaltura iframe embed, more sophisticated
'url': 'http://www.cns.nyu.edu/~eero/math-tools/Videos/lecture-05sep2017.html',
'info_dict': {
'id': '1_9gzouybz',
'ext': 'mp4',
'title': 'lecture-05sep2017',
'description': 'md5:40f347d91fd4ba047e511c5321064b49',
'upload_date': '20170913',
'uploader_id': 'eps2',
'timestamp': 1505340777,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
{
# meta twitter:player
'url': 'http://thechive.com/2017/12/08/all-i-want-for-christmas-is-more-twerk/',
'info_dict': {
'id': '0_01b42zps',
'ext': 'mp4',
'title': 'Main Twerk (Video)',
'upload_date': '20171208',
'uploader_id': '[email protected]',
'timestamp': 1512713057,
},
'params': {
'skip_download': True,
},
'add_ie': ['Kaltura'],
},
# referrer protected EaglePlatform embed
{
'url': 'https://tvrain.ru/lite/teleshow/kak_vse_nachinalos/namin-418921/',
'info_dict': {
'id': '582306',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3382,
'view_count': int,
},
'params': {
'skip_download': True,
},
},
# ClipYou (EaglePlatform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
'params': {
'skip_download': True,
},
'skip': 'This video is unavailable.',
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
'skip': 'HTTP Error 404: Not Found',
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': r're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
'description': 'Amazon updates Fire TV line, Tesla\'s Model X spotted in the wild',
'timestamp': 1427237531,
'uploader': 'Crunch Report',
'upload_date': '20150324',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
'skip': 'Invalid Page URL',
},
# NBC News embed
{
'url': 'http://www.vulture.com/2016/06/letterman-couldnt-care-less-about-late-night.html',
'md5': '1aa589c675898ae6d37a17913cf68d66',
'info_dict': {
'id': 'x_dtl_oa_LettermanliftPR_160608',
'ext': 'mp4',
'title': 'David Letterman: A Preview',
'description': 'A preview of Tom Brokaw\'s interview with David Letterman as part of the On Assignment series powered by Dateline. Airs Sunday June 12 at 7/6c.',
'upload_date': '20160609',
'timestamp': 1465431544,
'uploader': 'NBCU-NEWS',
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
'expected_warnings': ['Failed to parse JSON Expecting value'],
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Kinja embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '106351',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'description': 'Migrated from OnionStudios',
'thumbnail': r're:^https?://.*\.jpe?g$',
'uploader': 'clickhole',
'upload_date': '20150527',
'timestamp': 1432744860,
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
{
# Brightcove embed with whitespace around attribute names
'url': 'http://www.stack.com/video/3167554373001/learn-to-hit-open-three-pointers-with-damian-lillard-s-baseline-drift-drill',
'info_dict': {
'id': '3167554373001',
'ext': 'mp4',
'title': "Learn to Hit Open Three-Pointers With Damian Lillard's Baseline Drift Drill",
'description': 'md5:57bacb0e0f29349de4972bfda3191713',
'uploader_id': '1079349493',
'upload_date': '20140207',
'timestamp': 1391810548,
},
'params': {
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': '7619da8c820e835bef21a1efa2a0fc71',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Another LiveLeak embed pattern (#13336)
{
'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/',
'info_dict': {
'id': '2eb_1496309988',
'ext': 'mp4',
'title': 'Thief robs place where everyone was armed',
'description': 'md5:694d73ee79e535953cf2488562288eee',
'uploader': 'brazilwtf',
},
'add_ie': [LiveLeakIE.ie_key()],
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
# twitter:player:stream embed
{
'url': 'http://www.rtl.be/info/video/589263.aspx?CategoryID=288',
'info_dict': {
'id': 'master',
'ext': 'mp4',
'title': 'Une nouvelle espèce de dinosaure découverte en Argentine',
'uploader': 'www.rtl.be',
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# twitter:player embed
{
'url': 'http://www.theatlantic.com/video/index/484130/what-do-black-holes-sound-like/',
'md5': 'a3e0df96369831de324f0778e126653c',
'info_dict': {
'id': '4909620399001',
'ext': 'mp4',
'title': 'What Do Black Holes Sound Like?',
'description': 'what do black holes sound like',
'upload_date': '20160524',
'uploader_id': '29913724001',
'timestamp': 1464107587,
'uploader': 'TheAtlantic',
},
'add_ie': ['BrightcoveLegacy'],
},
# Facebook <iframe> embed
{
'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html',
'md5': 'fbcde74f534176ecb015849146dd3aee',
'info_dict': {
'id': '599637780109885',
'ext': 'mp4',
'title': 'Facebook video #599637780109885',
},
},
# Facebook <iframe> embed, plugin video
{
'url': 'http://5pillarsuk.com/2017/06/07/tariq-ramadan-disagrees-with-pr-exercise-by-imams-refusing-funeral-prayers-for-london-attackers/',
'info_dict': {
'id': '1754168231264132',
'ext': 'mp4',
'title': 'About the Imams and Religious leaders refusing to perform funeral prayers for...',
'uploader': 'Tariq Ramadan (official)',
'timestamp': 1496758379,
'upload_date': '20170606',
},
'params': {
'skip_download': True,
},
},
# Facebook API embed
{
'url': 'http://www.lothype.com/blue-stars-2016-preview-standstill-full-show/',
'md5': 'a47372ee61b39a7b90287094d447d94e',
'info_dict': {
'id': '10153467542406923',
'ext': 'mp4',
'title': 'Facebook video #10153467542406923',
},
},
# Wordpress "YouTube Video Importer" plugin
{
'url': 'http://www.lothype.com/blue-devils-drumline-stanford-lot-2016/',
'md5': 'd16797741b560b485194eddda8121b48',
'info_dict': {
'id': 'HNTXWDXV9Is',
'ext': 'mp4',
'title': 'Blue Devils Drumline Stanford lot 2016',
'upload_date': '20160627',
'uploader_id': 'GENOCIDE8GENERAL10',
'uploader': 'cylus cyrus',
},
},
{
# video stored on custom kaltura server
'url': 'http://www.expansion.com/multimedia/videos.html?media=EQcM30NHIPv',
'md5': '537617d06e64dfed891fa1593c4b30cc',
'info_dict': {
'id': '0_1iotm5bh',
'ext': 'mp4',
'title': 'Elecciones británicas: 5 lecciones para Rajoy',
'description': 'md5:435a89d68b9760b92ce67ed227055f16',
'uploader_id': '[email protected]',
'upload_date': '20150429',
'timestamp': 1430303472,
},
'add_ie': ['Kaltura'],
},
{
# multiple kaltura embeds, nsfw
'url': 'https://www.quartier-rouge.be/prive/femmes/kamila-avec-video-jaime-sadomie.html',
'info_dict': {
'id': 'kamila-avec-video-jaime-sadomie',
'title': "Kamila avec vídeo “J'aime sadomie”",
},
'playlist_count': 8,
},
{
# Non-standard Vimeo embed
'url': 'https://openclassrooms.com/courses/understanding-the-web',
'md5': '64d86f1c7d369afd9a78b38cbb88d80a',
'info_dict': {
'id': '148867247',
'ext': 'mp4',
'title': 'Understanding the web - Teaser',
'description': 'This is "Understanding the web - Teaser" by openclassrooms on Vimeo, the home for high quality videos and the people who love them.',
'upload_date': '20151214',
'uploader': 'OpenClassrooms',
'uploader_id': 'openclassrooms',
},
'add_ie': ['Vimeo'],
},
{
# generic vimeo embed that requires original URL passed as Referer
'url': 'http://racing4everyone.eu/2016/07/30/formula-1-2016-round12-germany/',
'only_matching': True,
},
{
'url': 'https://support.arkena.com/display/PLAY/Ways+to+embed+your+video',
'md5': 'b96f2f71b359a8ecd05ce4e1daa72365',
'info_dict': {
'id': 'b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe',
'ext': 'mp4',
'title': 'Big Buck Bunny',
'description': 'Royalty free test video',
'timestamp': 1432816365,
'upload_date': '20150528',
'is_live': False,
},
'params': {
'skip_download': True,
},
'add_ie': [ArkenaIE.ie_key()],
},
{
'url': 'http://nova.bg/news/view/2016/08/16/156543/%D0%BD%D0%B0-%D0%BA%D0%BE%D1%81%D1%8A%D0%BC-%D0%BE%D1%82-%D0%B2%D0%B7%D1%80%D0%B8%D0%B2-%D0%BE%D1%82%D1%86%D0%B5%D0%BF%D0%B8%D1%85%D0%B0-%D1%86%D1%8F%D0%BB-%D0%BA%D0%B2%D0%B0%D1%80%D1%82%D0%B0%D0%BB-%D0%B7%D0%B0%D1%80%D0%B0%D0%B4%D0%B8-%D0%B8%D0%B7%D1%82%D0%B8%D1%87%D0%B0%D0%BD%D0%B5-%D0%BD%D0%B0-%D0%B3%D0%B0%D0%B7-%D0%B2-%D0%BF%D0%BB%D0%BE%D0%B2%D0%B4%D0%B8%D0%B2/',
'info_dict': {
'id': '1c7141f46c',
'ext': 'mp4',
'title': 'НА КОСЪМ ОТ ВЗРИВ: Изтичане на газ на бензиностанция в Пловдив',
},
'params': {
'skip_download': True,
},
'add_ie': [Vbox7IE.ie_key()],
},
{
# DBTV embeds
'url': 'http://www.dagbladet.no/2016/02/23/nyheter/nordlys/ski/troms/ver/43254897/',
'info_dict': {
'id': '43254897',
'title': 'Etter ett års planlegging, klaffet endelig alt: - Jeg måtte ta en liten dans',
},
'playlist_mincount': 3,
},
{
# Videa embeds
'url': 'http://forum.dvdtalk.com/movie-talk/623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style.html',
'info_dict': {
'id': '623756-deleted-magic-star-wars-ot-deleted-alt-scenes-docu-style',
'title': 'Deleted Magic - Star Wars: OT Deleted / Alt. Scenes Docu. Style - DVD Talk Forum',
},
'playlist_mincount': 2,
},
{
# 20 minuten embed
'url': 'http://www.20min.ch/schweiz/news/story/So-kommen-Sie-bei-Eis-und-Schnee-sicher-an-27032552',
'info_dict': {
'id': '523629',
'ext': 'mp4',
'title': 'So kommen Sie bei Eis und Schnee sicher an',
'description': 'md5:117c212f64b25e3d95747e5276863f7d',
},
'params': {
'skip_download': True,
},
'add_ie': [TwentyMinutenIE.ie_key()],
},
{
# VideoPress embed
'url': 'https://en.support.wordpress.com/videopress/',
'info_dict': {
'id': 'OcobLTqC',
'ext': 'm4v',
'title': 'IMG_5786',
'timestamp': 1435711927,
'upload_date': '20150701',
},
'params': {
'skip_download': True,
},
'add_ie': [VideoPressIE.ie_key()],
},
{
# Rutube embed
'url': 'http://magazzino.friday.ru/videos/vipuski/kazan-2',
'info_dict': {
'id': '9b3d5bee0a8740bf70dfd29d3ea43541',
'ext': 'flv',
'title': 'Магаззино: Казань 2',
'description': 'md5:99bccdfac2269f0e8fdbc4bbc9db184a',
'uploader': 'Магаззино',
'upload_date': '20170228',
'uploader_id': '996642',
},
'params': {
'skip_download': True,
},
'add_ie': [RutubeIE.ie_key()],
},
{
# ThePlatform embedded with whitespaces in URLs
'url': 'http://www.golfchannel.com/topics/shows/golftalkcentral.htm',
'only_matching': True,
},
{
# Senate ISVP iframe https
'url': 'https://www.hsgac.senate.gov/hearings/canadas-fast-track-refugee-plan-unanswered-questions-and-implications-for-us-national-security',
'md5': 'fb8c70b0b515e5037981a2492099aab8',
'info_dict': {
'id': 'govtaff020316',
'ext': 'mp4',
'title': 'Integrated Senate Video Player',
},
'add_ie': [SenateISVPIE.ie_key()],
},
{
# Limelight embeds (1 channel embed + 4 media embeds)
'url': 'http://www.sedona.com/FacilitatorTraining2017',
'info_dict': {
'id': 'FacilitatorTraining2017',
'title': 'Facilitator Training 2017',
},
'playlist_mincount': 5,
},
{
# Limelight embed (LimelightPlayerUtil.embed)
'url': 'https://tv5.ca/videos?v=xuu8qowr291ri',
'info_dict': {
'id': '95d035dc5c8a401588e9c0e6bd1e9c92',
'ext': 'mp4',
'title': '07448641',
'timestamp': 1499890639,
'upload_date': '20170712',
},
'params': {
'skip_download': True,
},
'add_ie': ['LimelightMedia'],
},
{
'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/',
'info_dict': {
'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest',
'title': 'Standoff with Walnut Creek murder suspect ends',
'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788',
},
'playlist_mincount': 4,
},
{
# WashingtonPost embed
'url': 'http://www.vanityfair.com/hollywood/2017/04/donald-trump-tv-pitches',
'info_dict': {
'id': '8caf6e88-d0ec-11e5-90d3-34c2c42653ac',
'ext': 'mp4',
'title': "No one has seen the drama series based on Trump's life \u2014 until now",
'description': 'Donald Trump wanted a weekly TV drama based on his life. It never aired. But The Washington Post recently obtained a scene from the pilot script — and enlisted actors.',
'timestamp': 1455216756,
'uploader': 'The Washington Post',
'upload_date': '20160211',
},
'add_ie': [WashingtonPostIE.ie_key()],
},
{
# Mediaset embed
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
'info_dict': {
'id': '720642',
'ext': 'mp4',
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
},
'params': {
'skip_download': True,
},
'add_ie': [MediasetIE.ie_key()],
},
{
# JOJ.sk embeds
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'info_dict': {
'id': '238543-slovenskom-sa-prehnala-vlna-silnych-burok',
'title': 'Slovenskom sa prehnala vlna silných búrok',
},
'playlist_mincount': 5,
'add_ie': [JojIE.ie_key()],
},
{
# AMP embed (see https://www.ampproject.org/docs/reference/components/amp-video)
'url': 'https://tvrain.ru/amp/418921/',
'md5': 'cc00413936695987e8de148b67d14f1d',
'info_dict': {
'id': '418921',
'ext': 'mp4',
'title': 'Стас Намин: «Мы нарушили девственность Кремля»',
},
},
{
# vzaar embed
'url': 'http://help.vzaar.com/article/165-embedding-video',
'md5': '7e3919d9d2620b89e3e00bec7fe8c9d4',
'info_dict': {
'id': '8707641',
'ext': 'mp4',
'title': 'Building A Business Online: Principal Chairs Q & A',
},
},
{
# multiple HTML5 videos on one page
'url': 'https://www.paragon-software.com/home/rk-free/keyscenarios.html',
'info_dict': {
'id': 'keyscenarios',
'title': 'Rescue Kit 14 Free Edition - Getting started',
},
'playlist_count': 4,
},
{
# vshare embed
'url': 'https://youtube-dl-demo.neocities.org/vshare.html',
'md5': '17b39f55b5497ae8b59f5fbce8e35886',
'info_dict': {
'id': '0f64ce6',
'title': 'vl14062007715967',
'ext': 'mp4',
}
},
{
'url': 'http://www.heidelberg-laureate-forum.org/blog/video/lecture-friday-september-23-2016-sir-c-antony-r-hoare/',
'md5': 'aecd089f55b1cb5a59032cb049d3a356',
'info_dict': {
'id': '90227f51a80c4d8f86c345a7fa62bd9a1d',
'ext': 'mp4',
'title': 'Lecture: Friday, September 23, 2016 - Sir Tony Hoare',
'description': 'md5:5a51db84a62def7b7054df2ade403c6c',
'timestamp': 1474354800,
'upload_date': '20160920',
}
},
{
'url': 'http://www.kidzworld.com/article/30935-trolls-the-beat-goes-on-interview-skylar-astin-and-amanda-leighton',
'info_dict': {
'id': '1731611',
'ext': 'mp4',
'title': 'Official Trailer | TROLLS: THE BEAT GOES ON!',
'description': 'md5:eb5f23826a027ba95277d105f248b825',
'timestamp': 1516100691,
'upload_date': '20180116',
},
'params': {
'skip_download': True,
},
'add_ie': [SpringboardPlatformIE.ie_key()],
},
{
'url': 'https://www.yapfiles.ru/show/1872528/690b05d3054d2dbe1e69523aa21bb3b1.mp4.html',
'info_dict': {
'id': 'vMDE4NzI1Mjgt690b',
'ext': 'mp4',
'title': 'Котята',
},
'add_ie': [YapFilesIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# CloudflareStream embed
'url': 'https://www.cloudflare.com/products/cloudflare-stream/',
'info_dict': {
'id': '31c9291ab41fac05471db4e73aa11717',
'ext': 'mp4',
'title': '31c9291ab41fac05471db4e73aa11717',
},
'add_ie': [CloudflareStreamIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# PeerTube embed
'url': 'https://joinpeertube.org/fr/home/',
'info_dict': {
'id': 'home',
'title': 'Reprenez le contrôle de vos vidéos ! #JoinPeertube',
},
'playlist_count': 2,
},
{
# Indavideo embed
'url': 'https://streetkitchen.hu/receptek/igy_kell_otthon_hamburgert_sutni/',
'info_dict': {
'id': '1693903',
'ext': 'mp4',
'title': 'Így kell otthon hamburgert sütni',
'description': 'md5:f5a730ecf900a5c852e1e00540bbb0f7',
'timestamp': 1426330212,
'upload_date': '20150314',
'uploader': 'StreetKitchen',
'uploader_id': '546363',
},
'add_ie': [IndavideoEmbedIE.ie_key()],
'params': {
'skip_download': True,
},
},
{
# APA embed via JWPlatform embed
'url': 'http://www.vol.at/blue-man-group/5593454',
'info_dict': {
'id': 'jjv85FdZ',
'ext': 'mp4',
'title': '"Blau ist mysteriös": Die Blue Man Group im Interview',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 254,
'timestamp': 1519211149,
'upload_date': '20180221',
},
'params': {
'skip_download': True,
},
},
{
'url': 'http://share-videos.se/auto/video/83645793?uid=13',
'md5': 'b68d276de422ab07ee1d49388103f457',
'info_dict': {
'id': '83645793',
'title': 'Lock up and get excited',
'ext': 'mp4'
},
'skip': 'TODO: fix nested playlists processing in tests',
},
{
# Viqeo embeds
'url': 'https://viqeo.tv/',
'info_dict': {
'id': 'viqeo',
'title': 'All-new video platform',
},
'playlist_count': 6,
},
{
# Squarespace video embed, 2019-08-28
'url': 'http://ootboxford.com',
'info_dict': {
'id': 'Tc7b_JGdZfw',
'title': 'Out of the Blue, at Childish Things 10',
'ext': 'mp4',
'description': 'md5:a83d0026666cf5ee970f8bd1cfd69c7f',
'uploader_id': 'helendouglashouse',
'uploader': 'Helen & Douglas House',
'upload_date': '20140328',
},
'params': {
'skip_download': True,
},
},
# {
# # Zype embed
# 'url': 'https://www.cookscountry.com/episode/554-smoky-barbecue-favorites',
# 'info_dict': {
# 'id': '5b400b834b32992a310622b9',
# 'ext': 'mp4',
# 'title': 'Smoky Barbecue Favorites',
# 'thumbnail': r're:^https?://.*\.jpe?g',
# 'description': 'md5:5ff01e76316bd8d46508af26dc86023b',
# 'upload_date': '20170909',
# 'timestamp': 1504915200,
# },
# 'add_ie': [ZypeIE.ie_key()],
# 'params': {
# 'skip_download': True,
# },
# },
{
# videojs embed
'url': 'https://video.sibnet.ru/shell.php?videoid=3422904',
'info_dict': {
'id': 'shell',
'ext': 'mp4',
'title': 'Доставщик пиццы спросил разрешения сыграть на фортепиано',
'description': 'md5:89209cdc587dab1e4a090453dbaa2cb1',
'thumbnail': r're:^https?://.*\.jpg$',
},
'params': {
'skip_download': True,
},
'expected_warnings': ['Failed to download MPD manifest'],
},
{
# DailyMotion embed with DM.player
'url': 'https://www.beinsports.com/us/copa-del-rey/video/the-locker-room-valencia-beat-barca-in-copa/1203804',
'info_dict': {
'id': 'k6aKkGHd9FJs4mtJN39',
'ext': 'mp4',
'title': 'The Locker Room: Valencia Beat Barca In Copa del Rey Final',
'description': 'This video is private.',
'uploader_id': 'x1jf30l',
'uploader': 'beIN SPORTS USA',
'upload_date': '20190528',
'timestamp': 1559062971,
},
'params': {
'skip_download': True,
},
},
# {
# # TODO: find another test
# # http://schema.org/VideoObject
# 'url': 'https://flipagram.com/f/nyvTSJMKId',
# 'md5': '888dcf08b7ea671381f00fab74692755',
# 'info_dict': {
# 'id': 'nyvTSJMKId',
# 'ext': 'mp4',
# 'title': 'Flipagram by sjuria101 featuring Midnight Memories by One Direction',
# 'description': '#love for cats.',
# 'timestamp': 1461244995,
# 'upload_date': '20160421',
# },
# 'params': {
# 'force_generic_extractor': True,
# },
# },
{
# VHX Embed
'url': 'https://demo.vhx.tv/category-c/videos/file-example-mp4-480-1-5mg-copy',
'info_dict': {
'id': '858208',
'ext': 'mp4',
'title': 'Untitled',
'uploader_id': 'user80538407',
'uploader': 'OTT Videos',
},
},
{
# ArcPublishing PoWa video player
'url': 'https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/',
'md5': 'b03b2fac8680e1e5a7cc81a5c27e71b3',
'info_dict': {
'id': '8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'ext': 'mp4',
'title': 'Senate candidates wave to voters on Anchorage streets',
'description': 'md5:91f51a6511f090617353dc720318b20e',
'timestamp': 1604378735,
'upload_date': '20201103',
'duration': 1581,
},
},
{
# MyChannels SDK embed
# https://www.24kitchen.nl/populair/deskundige-dit-waarom-sommigen-gevoelig-zijn-voor-voedselallergieen
'url': 'https://www.demorgen.be/nieuws/burgemeester-rotterdam-richt-zich-in-videoboodschap-tot-relschoppers-voelt-het-goed~b0bcfd741/',
'md5': '90c0699c37006ef18e198c032d81739c',
'info_dict': {
'id': '194165',
'ext': 'mp4',
'title': 'Burgemeester Aboutaleb spreekt relschoppers toe',
'timestamp': 1611740340,
'upload_date': '20210127',
'duration': 159,
},
},
{
# Simplecast player embed
'url': 'https://www.bio.org/podcast',
'info_dict': {
'id': 'podcast',
'title': 'I AM BIO Podcast | BIO',
},
'playlist_mincount': 52,
},
{
# WimTv embed player
'url': 'http://www.msmotor.tv/wearefmi-pt-2-2021/',
'info_dict': {
'id': 'wearefmi-pt-2-2021',
'title': '#WEAREFMI – PT.2 – 2021 – MsMotorTV',
},
'playlist_count': 1,
},
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
NS_MAP = {
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
}
entries = []
for it in doc.findall('./channel/item'):
next_url = None
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
continue
def itunes(key):
return xpath_text(
it, xpath_with_ns('./itunes:%s' % key, NS_MAP),
default=None)
duration = itunes('duration')
explicit = (itunes('explicit') or '').lower()
if explicit in ('true', 'yes'):
age_limit = 18
elif explicit in ('false', 'no'):
age_limit = 0
else:
age_limit = None
entries.append({
'_type': 'url_transparent',
'url': next_url,
'title': it.find('title').text,
'description': xpath_text(it, 'description', default=None),
'timestamp': unified_timestamp(
xpath_text(it, 'pubDate', default=None)),
'duration': int_or_none(duration) or parse_duration(duration),
'thumbnail': url_or_none(xpath_attr(it, xpath_with_ns('./itunes:image', NS_MAP), 'href')),
'episode': itunes('title'),
'episode_number': int_or_none(itunes('episode')),
'season_number': int_or_none(itunes('season')),
'age_limit': age_limit,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return self.url_result(self.http_scheme() + url)
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self.get_param('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if re.match(r'^[^\s/]+\.[^\s/]+/', url):
self.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call yt-dlp like this: yt-dlp -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run yt-dlp "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = self._generic_id(url)
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': self._generic_title(url),
'timestamp': unified_timestamp(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = compat_str(m.group('format_id'))
subtitles = {}
if format_id.endswith('mpegurl'):
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': format_id,
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
info_dict['subtitles'] = subtitles
return info_dict
if not self.get_param('test', False) and not is_intentional:
force = self.get_param('force_generic_extractor', False)
self.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to yt-dlp default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
if '<title>DPG Media Privacy Gate</title>' in webpage:
webpage = self._download_webpage(url, video_id)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
try:
doc = compat_etree_fromstring(webpage)
except compat_xml_parse_error:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif doc.tag == 'SmoothStreamingMedia':
info_dict['formats'], info_dict['subtitles'] = self._parse_ism_formats_and_subtitles(doc, url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(
self._parse_xspf(
doc, video_id, xspf_url=url,
xspf_base_url=full_response.geturl()),
video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
doc,
mpd_base_url=full_response.geturl().rpartition('/')[0],
mpd_url=url)
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
# FIXME: unescaping the whole page may break URLs, commenting out for now.
# There probably should be a second run of generic extractor on unescaped webpage.
# webpage = compat_urllib_parse_unquote(webpage)
# Unescape squarespace embeds to be detected by generic extractor,
# see https://github.com/ytdl-org/youtube-dl/issues/21294
webpage = re.sub(
r'<div[^>]+class=[^>]*?\bsqs-video-wrapper\b[^>]*>',
lambda x: unescapeHTML(x.group(0)), webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
info_dict.update({
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'age_limit': age_limit,
})
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(self, webpage)
if bc_urls:
return self.playlist_from_matches(
bc_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'referrer': url}),
ie='BrightcoveNew')
# Look for Nexx embeds
nexx_urls = NexxIE._extract_urls(webpage)
if nexx_urls:
return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key())
# Look for Nexx iFrame embeds
nexx_embed_urls = NexxEmbedIE._extract_urls(webpage)
if nexx_embed_urls:
return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key())
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform')
arc_urls = ArcPublishingIE._extract_urls(webpage)
if arc_urls:
return self.playlist_from_matches(arc_urls, video_id, video_title, ie=ArcPublishingIE.ie_key())
mychannels_urls = MedialaanIE._extract_urls(webpage)
if mychannels_urls:
return self.playlist_from_matches(
mychannels_urls, video_id, video_title, ie=MedialaanIE.ie_key())
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:(?:www|static)\.)?rtl\.nl/(?:system/videoplayer/[^"]+(?:video_)?)?embed[^"]+)"',
webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl')
vimeo_urls = VimeoIE._extract_urls(url, webpage)
if vimeo_urls:
return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key())
vhx_url = VHXEmbedIE._extract_url(webpage)
if vhx_url:
return self.url_result(vhx_url, VHXEmbedIE.ie_key())
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Invidious Instances
# https://github.com/yt-dlp/yt-dlp/issues/195
# https://github.com/iv-org/invidious/pull/1730
youtube_url = self._search_regex(
r'<link rel="alternate" href="(https://www\.youtube\.com/watch\?v=[0-9A-Za-z_-]{11})"',
webpage, 'youtube link', default=None)
if youtube_url:
return self.url_result(youtube_url, YoutubeIE.ie_key())
# Look for YouTube embeds
youtube_urls = YoutubeIE._extract_urls(webpage)
if youtube_urls:
return self.playlist_from_matches(
youtube_urls, video_id, video_title, ie=YoutubeIE.ie_key())
matches = DailymotionIE._extract_urls(webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title)
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return self.playlist_from_matches(
playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for DailyMail embeds
dailymail_urls = DailyMailIE._extract_urls(webpage)
if dailymail_urls:
return self.playlist_from_matches(
dailymail_urls, video_id, video_title, ie=DailyMailIE.ie_key())
# Look for Teachable embeds, must be before Wistia
teachable_url = TeachableIE._extract_url(webpage, url)
if teachable_url:
return self.url_result(teachable_url)
# Look for embedded Wistia player
wistia_urls = WistiaIE._extract_urls(webpage)
if wistia_urls:
playlist = self.playlist_from_matches(wistia_urls, video_id, video_title, ie=WistiaIE.ie_key())
for entry in playlist['entries']:
entry.update({
'_type': 'url_transparent',
'uploader': video_uploader,
})
return playlist
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage)
or re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'OO\.Player\.create\.apply\(\s*OO\.Player\s*,\s*op\(\s*\[\s*[\'"][^\'"]*[\'"]\s*,\s*[\'"](?P<ec>.{32})[\'"]', webpage)
or re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage)
or re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
embed_token = self._search_regex(
r'embedToken[\'"]?\s*:\s*[\'"]([^\'"]+)',
webpage, 'ooyala embed token', default=None)
return OoyalaIE._build_url_result(smuggle_url(
mobj.group('ec'), {
'domain': url,
'embed_token': embed_token,
}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return self.playlist_from_matches(
embeds, video_id, video_title,
getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded Facebook player
facebook_urls = FacebookIE._extract_urls(webpage)
if facebook_urls:
return self.playlist_from_matches(facebook_urls, video_id, video_title)
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
odnoklassniki_url = OdnoklassnikiIE._extract_url(webpage)
if odnoklassniki_url:
return self.url_result(odnoklassniki_url, OdnoklassnikiIE.ie_key())
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return self.playlist_from_matches(
matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie')
# Look for Simplecast embeds
simplecast_urls = SimplecastIE._extract_urls(webpage)
if simplecast_urls:
return self.playlist_from_matches(
simplecast_urls, video_id, video_title)
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxIE._extract_urls(webpage)
if sportbox_urls:
return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie=SportBoxIE.ie_key())
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded PornHub player
pornhub_urls = PornHubIE._extract_urls(webpage)
if pornhub_urls:
return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key())
# Look for embedded DrTuber player
drtuber_urls = DrTuberIE._extract_urls(webpage)
if drtuber_urls:
return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key())
# Look for embedded RedTube player
redtube_urls = RedTubeIE._extract_urls(webpage)
if redtube_urls:
return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key())
# Look for embedded Tube8 player
tube8_urls = Tube8IE._extract_urls(webpage)
if tube8_urls:
return self.playlist_from_matches(tube8_urls, video_id, video_title, ie=Tube8IE.ie_key())
# Look for embedded Mofosex player
mofosex_urls = MofosexEmbedIE._extract_urls(webpage)
if mofosex_urls:
return self.playlist_from_matches(mofosex_urls, video_id, video_title, ie=MofosexEmbedIE.ie_key())
# Look for embedded Spankwire player
spankwire_urls = SpankwireIE._extract_urls(webpage)
if spankwire_urls:
return self.playlist_from_matches(spankwire_urls, video_id, video_title, ie=SpankwireIE.ie_key())
# Look for embedded YouPorn player
youporn_urls = YouPornIE._extract_urls(webpage)
if youporn_urls:
return self.playlist_from_matches(youporn_urls, video_id, video_title, ie=YouPornIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
ustream_url = UstreamIE._extract_url(webpage)
if ustream_url:
return self.url_result(ustream_url, UstreamIE.ie_key())
# Look for embedded arte.tv player
arte_urls = ArteTVEmbedIE._extract_urls(webpage)
if arte_urls:
return self.playlist_from_matches(arte_urls, video_id, video_title)
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
soundcloud_urls = SoundcloudEmbedIE._extract_urls(webpage)
if soundcloud_urls:
return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML)
# Look for tunein player
tunein_urls = TuneInBaseIE._extract_urls(webpage)
if tunein_urls:
return self.playlist_from_matches(tunein_urls, video_id, video_title)
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m\.mlb\.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
kaltura_urls = KalturaIE._extract_urls(webpage)
if kaltura_urls:
return self.playlist_from_matches(
kaltura_urls, video_id, video_title,
getter=lambda x: smuggle_url(x, {'source_url': url}),
ie=KalturaIE.ie_key())
# Look for EaglePlatform embeds
eagleplatform_url = EaglePlatformIE._extract_url(webpage)
if eagleplatform_url:
return self.url_result(smuggle_url(eagleplatform_url, {'referrer': url}), EaglePlatformIE.ie_key())
# Look for ClipYou (uses EaglePlatform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Webcaster embeds
webcaster_url = WebcasterFeedIE._extract_url(self, webpage)
if webcaster_url:
return self.url_result(webcaster_url, ie=WebcasterFeedIE.ie_key())
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for NBC News embeds
nbc_news_embed_url = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//www\.nbcnews\.com/widget/video-embed/[^"\']+)\1', webpage)
if nbc_news_embed_url:
return self.url_result(nbc_news_embed_url.group('url'), 'NBCNews')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?:https?:)?(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Kinja embeds
kinja_embed_urls = KinjaEmbedIE._extract_urls(webpage, url)
if kinja_embed_urls:
return self.playlist_from_matches(
kinja_embed_urls, video_id, video_title)
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_urls = JWPlatformIE._extract_urls(webpage)
if jwplatform_urls:
return self.playlist_from_matches(jwplatform_urls, video_id, video_title, ie=JWPlatformIE.ie_key())
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Arkena embeds
arkena_url = ArkenaIE._extract_url(webpage)
if arkena_url:
return self.url_result(arkena_url, ArkenaIE.ie_key())
# Look for Piksel embeds
piksel_url = PikselIE._extract_url(webpage)
if piksel_url:
return self.url_result(piksel_url, PikselIE.ie_key())
# Look for Limelight embeds
limelight_urls = LimelightBaseIE._extract_urls(webpage, url)
if limelight_urls:
return self.playlist_result(
limelight_urls, video_id, video_title, video_description)
# Look for Anvato embeds
anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id)
if anvato_urls:
return self.playlist_result(
anvato_urls, video_id, video_title, video_description)
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for VODPlatform embeds
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:(?:www\.)?vod-platform\.net|embed\.kwikmotion\.com)/[eE]mbed/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group('url'))), 'VODPlatform')
# Look for Mangomolo embeds
mobj = re.search(
r'''(?x)<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//
(?:
admin\.mangomolo\.com/analytics/index\.php/customers/embed|
player\.mangomolo\.com/v1
)/
(?:
video\?.*?\bid=(?P<video_id>\d+)|
(?:index|live)\?.*?\bchannelid=(?P<channel_id>(?:[A-Za-z0-9+/=]|%2B|%2F|%3D)+)
).+?)\1''', webpage)
if mobj is not None:
info = {
'_type': 'url_transparent',
'url': self._proto_relative_url(unescapeHTML(mobj.group('url'))),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
video_id = mobj.group('video_id')
if video_id:
info.update({
'ie_key': 'MangomoloVideo',
'id': video_id,
})
else:
info.update({
'ie_key': 'MangomoloLive',
'id': mobj.group('channel_id'),
})
return info
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_urls = LiveLeakIE._extract_urls(webpage)
if liveleak_urls:
return self.playlist_from_matches(liveleak_urls, video_id, video_title)
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
# Look for VBOX7 embeds
vbox7_url = Vbox7IE._extract_url(webpage)
if vbox7_url:
return self.url_result(vbox7_url, Vbox7IE.ie_key())
# Look for DBTV embeds
dbtv_urls = DBTVIE._extract_urls(webpage)
if dbtv_urls:
return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key())
# Look for Videa embeds
videa_urls = VideaIE._extract_urls(webpage)
if videa_urls:
return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key())
# Look for 20 minuten embeds
twentymin_urls = TwentyMinutenIE._extract_urls(webpage)
if twentymin_urls:
return self.playlist_from_matches(
twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key())
# Look for VideoPress embeds
videopress_urls = VideoPressIE._extract_urls(webpage)
if videopress_urls:
return self.playlist_from_matches(
videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key())
# Look for Rutube embeds
rutube_urls = RutubeIE._extract_urls(webpage)
if rutube_urls:
return self.playlist_from_matches(
rutube_urls, video_id, video_title, ie=RutubeIE.ie_key())
# Look for WashingtonPost embeds
wapo_urls = WashingtonPostIE._extract_urls(webpage)
if wapo_urls:
return self.playlist_from_matches(
wapo_urls, video_id, video_title, ie=WashingtonPostIE.ie_key())
# Look for Mediaset embeds
mediaset_urls = MediasetIE._extract_urls(self, webpage)
if mediaset_urls:
return self.playlist_from_matches(
mediaset_urls, video_id, video_title, ie=MediasetIE.ie_key())
# Look for JOJ.sk embeds
joj_urls = JojIE._extract_urls(webpage)
if joj_urls:
return self.playlist_from_matches(
joj_urls, video_id, video_title, ie=JojIE.ie_key())
# Look for megaphone.fm embeds
mpfn_urls = MegaphoneIE._extract_urls(webpage)
if mpfn_urls:
return self.playlist_from_matches(
mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key())
# Look for vzaar embeds
vzaar_urls = VzaarIE._extract_urls(webpage)
if vzaar_urls:
return self.playlist_from_matches(
vzaar_urls, video_id, video_title, ie=VzaarIE.ie_key())
channel9_urls = Channel9IE._extract_urls(webpage)
if channel9_urls:
return self.playlist_from_matches(
channel9_urls, video_id, video_title, ie=Channel9IE.ie_key())
vshare_urls = VShareIE._extract_urls(webpage)
if vshare_urls:
return self.playlist_from_matches(
vshare_urls, video_id, video_title, ie=VShareIE.ie_key())
# Look for Mediasite embeds
mediasite_urls = MediasiteIE._extract_urls(webpage)
if mediasite_urls:
entries = [
self.url_result(smuggle_url(
compat_urlparse.urljoin(url, mediasite_url),
{'UrlReferrer': url}), ie=MediasiteIE.ie_key())
for mediasite_url in mediasite_urls]
return self.playlist_result(entries, video_id, video_title)
springboardplatform_urls = SpringboardPlatformIE._extract_urls(webpage)
if springboardplatform_urls:
return self.playlist_from_matches(
springboardplatform_urls, video_id, video_title,
ie=SpringboardPlatformIE.ie_key())
yapfiles_urls = YapFilesIE._extract_urls(webpage)
if yapfiles_urls:
return self.playlist_from_matches(
yapfiles_urls, video_id, video_title, ie=YapFilesIE.ie_key())
vice_urls = ViceIE._extract_urls(webpage)
if vice_urls:
return self.playlist_from_matches(
vice_urls, video_id, video_title, ie=ViceIE.ie_key())
xfileshare_urls = XFileShareIE._extract_urls(webpage)
if xfileshare_urls:
return self.playlist_from_matches(
xfileshare_urls, video_id, video_title, ie=XFileShareIE.ie_key())
cloudflarestream_urls = CloudflareStreamIE._extract_urls(webpage)
if cloudflarestream_urls:
return self.playlist_from_matches(
cloudflarestream_urls, video_id, video_title, ie=CloudflareStreamIE.ie_key())
peertube_urls = PeerTubeIE._extract_urls(webpage, url)
if peertube_urls:
return self.playlist_from_matches(
peertube_urls, video_id, video_title, ie=PeerTubeIE.ie_key())
indavideo_urls = IndavideoEmbedIE._extract_urls(webpage)
if indavideo_urls:
return self.playlist_from_matches(
indavideo_urls, video_id, video_title, ie=IndavideoEmbedIE.ie_key())
apa_urls = APAIE._extract_urls(webpage)
if apa_urls:
return self.playlist_from_matches(
apa_urls, video_id, video_title, ie=APAIE.ie_key())
foxnews_urls = FoxNewsIE._extract_urls(webpage)
if foxnews_urls:
return self.playlist_from_matches(
foxnews_urls, video_id, video_title, ie=FoxNewsIE.ie_key())
sharevideos_urls = [sharevideos_mobj.group('url') for sharevideos_mobj in re.finditer(
r'<iframe[^>]+?\bsrc\s*=\s*(["\'])(?P<url>(?:https?:)?//embed\.share-videos\.se/auto/embed/\d+\?.*?\buid=\d+.*?)\1',
webpage)]
if sharevideos_urls:
return self.playlist_from_matches(
sharevideos_urls, video_id, video_title)
viqeo_urls = ViqeoIE._extract_urls(webpage)
if viqeo_urls:
return self.playlist_from_matches(
viqeo_urls, video_id, video_title, ie=ViqeoIE.ie_key())
expressen_urls = ExpressenIE._extract_urls(webpage)
if expressen_urls:
return self.playlist_from_matches(
expressen_urls, video_id, video_title, ie=ExpressenIE.ie_key())
zype_urls = ZypeIE._extract_urls(webpage)
if zype_urls:
return self.playlist_from_matches(
zype_urls, video_id, video_title, ie=ZypeIE.ie_key())
gedi_urls = GediDigitalIE._extract_urls(webpage)
if gedi_urls:
return self.playlist_from_matches(
gedi_urls, video_id, video_title, ie=GediDigitalIE.ie_key())
# Look for RCS media group embeds
rcs_urls = RCSEmbedsIE._extract_urls(webpage)
if rcs_urls:
return self.playlist_from_matches(
rcs_urls, video_id, video_title, ie=RCSEmbedsIE.ie_key())
wimtv_urls = WimTVIE._extract_urls(webpage)
if wimtv_urls:
return self.playlist_from_matches(
wimtv_urls, video_id, video_title, ie=WimTVIE.ie_key())
bitchute_urls = BitChuteIE._extract_urls(webpage)
if bitchute_urls:
return self.playlist_from_matches(
bitchute_urls, video_id, video_title, ie=BitChuteIE.ie_key())
rumble_urls = RumbleEmbedIE._extract_urls(webpage)
if len(rumble_urls) == 1:
return self.url_result(rumble_urls[0], RumbleEmbedIE.ie_key())
if rumble_urls:
return self.playlist_from_matches(
rumble_urls, video_id, video_title, ie=RumbleEmbedIE.ie_key())
# Look for HTML5 media
entries = self._parse_html5_media_entries(url, webpage, video_id, m3u8_id='hls')
if entries:
if len(entries) == 1:
entries[0].update({
'id': video_id,
'title': video_title,
})
else:
for num, entry in enumerate(entries, start=1):
entry.update({
'id': '%s-%s' % (video_id, num),
'title': '%s (%d)' % (video_title, num),
})
for entry in entries:
self._sort_formats(entry['formats'])
return self.playlist_result(entries, video_id, video_title)
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
if jwplayer_data:
try:
info = self._parse_jwplayer_data(
jwplayer_data, video_id, require_title=False, base_url=url)
return merge_dicts(info, info_dict)
except ExtractorError:
# See https://github.com/ytdl-org/youtube-dl/pull/16735
pass
# Video.js embed
mobj = re.search(
r'(?s)\bvideojs\s*\(.+?\.src\s*\(\s*((?:\[.+?\]|{.+?}))\s*\)\s*;',
webpage)
if mobj is not None:
sources = self._parse_json(
mobj.group(1), video_id, transform_source=js_to_json,
fatal=False) or []
if not isinstance(sources, list):
sources = [sources]
formats = []
for source in sources:
src = source.get('src')
if not src or not isinstance(src, compat_str):
continue
src = compat_urlparse.urljoin(url, src)
src_type = source.get('type')
if isinstance(src_type, compat_str):
src_type = src_type.lower()
ext = determine_ext(src).lower()
if src_type == 'video/youtube':
return self.url_result(src, YoutubeIE.ie_key())
if src_type == 'application/dash+xml' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src, video_id, mpd_id='dash', fatal=False))
elif src_type == 'application/x-mpegurl' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'url': src,
'ext': (mimetype2ext(src_type)
or ext if ext in KNOWN_EXTENSIONS else 'mp4'),
})
if formats:
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
# Looking for http://schema.org/VideoObject
json_ld = self._search_json_ld(
webpage, video_id, default={}, expected_type='VideoObject')
if json_ld.get('url'):
return merge_dicts(json_ld, info_dict)
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
if RtmpIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml', 'js', 'xml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
# twitter:player:stream should be checked before twitter:player since
# it is expected to contain a raw stream (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
if new_url != url:
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
else:
found = None
if not found:
# twitter:player is a https URL to iframe player that may or may not
# be supported by yt-dlp thus this is checked the very last (see
# https://dev.twitter.com/cards/types/player#On_twitter.com_via_desktop_browser)
embed_url = self._html_search_meta('twitter:player', webpage, default=None)
if embed_url and embed_url != url:
return self.url_result(embed_url)
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
if RtmpIE.suitable(video_url):
entry_info_dict.update({
'_type': 'url_transparent',
'ie_key': RtmpIE.ie_key(),
'url': video_url,
})
entries.append(entry_info_dict)
continue
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
# Just matching .ism/manifest is not enough to be reliably sure
# whether it's actually an ISM manifest or some other streaming
# manifest since there are various streaming URL formats
# possible (see [1]) as well as some other shenanigans like
# .smil/manifest URLs that actually serve an ISM (see [2]) and
# so on.
# Thus the most reasonable way to solve this is to delegate
# to generic extractor in order to look into the contents of
# the manifest itself.
# 1. https://azure.microsoft.com/en-us/documentation/articles/media-services-deliver-content-overview/#streaming-url-formats
# 2. https://svs.itworkscdn.net/lbcivod/smil:itwfcdn/lbci/170976.smil/Manifest
entry_info_dict = self.url_result(
smuggle_url(video_url, {'to_generic': True}),
GenericIE.ie_key())
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
| 40.959868 | 432 | 0.519898 | [
"Unlicense"
] | king-millez/yt-dlp | yt_dlp/extractor/generic.py | 149,939 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.