input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
length '2'",
"'a' is longer than maximum length '0'"
]}
]
)
def test_group_errors_true_returns_ok(self):
path = "/a/b"
dictionary = {
"a": {
"b": "hello"
}
}
@extract([Parameter(path, "event", validators=[Mandatory])], True)
def handler(event, context, b=None): # noqa
return b
response = handler(dictionary, None)
self.assertEqual("hello", response)
def test_mandatory_parameter_with_default_returns_error_on_empty(self):
event = {
"var": ""
}
@extract([
Parameter("/var", "event", validators=[Mandatory], default="hello")
])
def handler(event, context, var=None): # noqa: pylint - unused-argument
return {}
response = handler(event, None)
self.assertEqual(response["statusCode"], 400)
self.assertEqual("{\"message\": [{\"var\": [\"Missing mandatory value\"]}]}", response["body"])
def test_group_errors_true_on_extract_from_event_returns_ok(self):
path = "/a/b"
dictionary = {
"a": {
"b": "hello"
}
}
@extract_from_event([Parameter(path, validators=[Mandatory])], True)
def handler(event, context, b=None): # noqa
return b
response = handler(dictionary, None)
self.assertEqual("hello", response)
def test_group_errors_true_on_extract_from_context_returns_ok(self):
path = "/a/b"
dictionary = {
"a": {
"b": "hello"
}
}
@extract_from_context([Parameter(path, validators=[Mandatory])], True)
def handler(event, context, b=None): # noqa
return b
response = handler(None, dictionary)
self.assertEqual("hello", response)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_can_output_custom_error_message_on_validation_failure(self, mock_logger):
path_1 = "/a/b/c"
path_2 = "/a/b/d"
path_3 = "/a/b/e"
path_4 = "/a/b/f"
path_5 = "/a/b/g"
dictionary = {
"a": {
"b": {
"e": 23,
"f": 15,
"g": "a"
}
}
}
schema = Schema(
{
"g": int
}
)
@extract([
Parameter(path_1, "event", validators=[Mandatory("Missing c")], var_name="c"),
Parameter(path_2, "event", validators=[Mandatory("Missing d")]),
Parameter(path_3, "event", validators=[Minimum(30, "Bad e value {value}, should be at least {condition}")]),
Parameter(path_4, "event", validators=[Maximum(10, "Bad f")]),
Parameter(path_5, "event", validators=[
RegexValidator(r"[0-9]+", "Bad g regex 1"),
RegexValidator(r"[1][0-9]+", "Bad g regex 2"),
SchemaValidator(schema, "Bad g schema"),
MinLength(2, "Bad g min length"),
MaxLength(0, "Bad g max length")
])
], True)
def handler(event, context, c=None, d=None): # noqa: pylint - unused-argument
return {}
response = handler(dictionary, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual(
"{\"message\": [{\"c\": [\"Missing c\"]}, "
"{\"d\": [\"Missing d\"]}, "
"{\"e\": [\"Bad e value 23, should be at least 30\"]}, "
"{\"f\": [\"Bad f\"]}, "
"{\"g\": [\"Bad g regex 1\", "
"\"Bad g regex 2\", "
"\"Bad g schema\", "
"\"Bad g min length\", "
"\"Bad g max length\""
"]}]}",
response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[
{"c": ["Missing c"]},
{"d": ["Missing d"]},
{"e": ["Bad e value 23, should be at least 30"]},
{"f": ["Bad f"]},
{"g": [
"Bad g regex 1",
"Bad g regex 2",
"Bad g schema",
"Bad g min length",
"Bad g max length"
]}
]
)
def test_extract_returns_400_on_missing_mandatory_key_with_regex(self):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
}
}
}
@extract([Parameter(path, "event", validators=[Mandatory, RegexValidator("[0-9]+")])], group_errors=True)
def handler(event, context, c=None): # noqa
return {}
response = handler(dictionary, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual("{\"message\": [{\"c\": [\"Missing mandatory value\"]}]}", response["body"])
def test_extract_nulls_are_returned(self):
path = "/a/b"
dictionary = {
"a": {
}
}
@extract([Parameter(path, "event", default=None)], allow_none_defaults=True)
def handler(event, context, **kwargs): # noqa
return kwargs["b"]
response = handler(dictionary, None)
self.assertEqual(None, response)
def test_extract_nulls_raises_exception_when_extracted_from_kwargs_if_allow_none_defaults_is_false(self):
path = "/a/b"
dictionary = {
"a": {
}
}
@extract([Parameter(path, "event", default=None)], allow_none_defaults=False)
def handler(event, context, **kwargs): # noqa
return kwargs["b"]
with self.assertRaises(KeyError):
handler(dictionary, None)
def test_extract_nulls_preserve_signature_defaults(self):
path = "/a/b"
dictionary = {
"a": {
}
}
@extract([Parameter(path, "event")])
def handler(event, context, b="Hello"): # noqa
return b
response = handler(dictionary, None)
self.assertEqual("Hello", response)
def test_extract_nulls_default_on_decorator_takes_precedence(self):
path = "/a/b"
dictionary = {
"a": {
}
}
@extract([Parameter(path, "event", default="bye")])
def handler(event, context, b="Hello"): # noqa
return b
response = handler(dictionary, None)
self.assertEqual("bye", response)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_returns_400_on_invalid_bool_type(self, mock_logger):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": 1
}
}
}
@extract([Parameter(path, "event", [Type(bool)])])
def handler(event, context, c=None): # noqa
return {}
response = handler(dictionary, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual("{\"message\": [{\"c\": [\"\'1\' is not of type \'bool'\"]}]}", response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"c": ["'1' is not of type 'bool'"]}]
)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_returns_400_on_invalid_float_type(self, mock_logger):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": 1
}
}
}
@extract([Parameter(path, "event", [Type(float)])])
def handler(event, context, c=None): # noqa
return {}
response = handler(dictionary, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual("{\"message\": [{\"c\": [\"\'1\' is not of type \'float'\"]}]}", response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"c": ["'1' is not of type 'float'"]}]
)
def test_type_validator_returns_true_when_none_is_passed_in(self):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": None
}
}
}
@extract([Parameter(path, "event", [Type(float)])])
def handler(event, context, c=None): # noqa
return c
response = handler(dictionary, None)
self.assertEqual(None, response)
def test_extract_succeeds_with_valid_type_validation(self):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": 1
}
}
}
@extract([Parameter(path, "event", [Type(int)])])
def handler(event, context, c=None): # noqa
return c
response = handler(dictionary, None)
self.assertEqual(1, response)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_returns_400_on_value_not_in_list(self, mock_logger):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": "Hello"
}
}
}
@extract([Parameter(path, "event", [EnumValidator("bye", "test", "another")])])
def handler(event, context, c=None): # noqa
return {}
response = handler(dictionary, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual(
"{\"message\": [{\"c\": [\"\'Hello\' is not in list \'(\'bye\', \'test\', \'another\')'\"]}]}",
response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"c": ["'Hello' is not in list '('bye', 'test', 'another')'"]}]
)
def test_extract_suceeds_with_valid_enum_validation(self):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": 123
}
}
}
@extract([Parameter(path, "event", [EnumValidator("Hello", 123)])])
def handler(event, context, c=None): # noqa
return c
response = handler(dictionary, None)
self.assertEqual(123, response)
def test_enum_validator_returns_true_when_none_is_passed_in(self):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": None
}
}
}
@extract([Parameter(path, "event", [EnumValidator("Test", "another")])])
def handler(event, context, c=None): # noqa
return c
response = handler(dictionary, None)
self.assertEqual(None, response)
def test_extract_from_event_missing_parameter_path(self):
event = {
"body": "{}"
}
@extract_from_event(parameters=[Parameter(path="body[json]/optional/value", default="Hello")])
def handler(event, context, **kwargs): # noqa
return {
"statusCode": HTTPStatus.OK,
"body": json.dumps(kwargs)
}
expected_body = json.dumps({
"value": "Hello"
})
response = handler(event, None)
self.assertEqual(HTTPStatus.OK, response["statusCode"])
self.assertEqual(expected_body, response["body"])
def test_extract_non_empty_parameter(self):
event = {
"value": 20
}
@extract([Parameter("/value", "event", validators=[NonEmpty])])
def handler(event, value=None): # noqa: pylint - unused-argument
return value
response = handler(event)
self.assertEqual(20, response)
def test_extract_missing_non_empty_parameter(self):
event = {
"a": 20
}
@extract([Parameter("/b", "event", validators=[NonEmpty])])
def handler(event, b=None): # noqa: pylint - unused-argument
return b
response = handler(event)
self.assertEqual(None, response)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_non_empty_parameter_that_is_empty(self, mock_logger):
event = {
"a": {}
}
@extract([Parameter("/a", "event", validators=[NonEmpty])])
def handler(event, a=None): # noqa: pylint - unused-argument
return {}
response = handler(event, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual(
"{\"message\": [{\"a\": [\"Value is empty\"]}]}",
response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"a": ["Value is empty"]}]
)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_non_empty_parameter_that_is_empty_with_custom_message(self, mock_logger):
event = {
"a": {}
}
@extract([Parameter("/a", "event", validators=[NonEmpty("The value was empty")])])
def handler(event, a=None): # noqa: pylint - unused-argument
return {}
response = handler(event, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual(
"{\"message\": [{\"a\": [\"The value was empty\"]}]}",
response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"a": ["The value was empty"]}]
)
def test_extract_date_parameter(self):
event = {
"a": "2001-01-01 00:00:00"
}
@extract([Parameter("/a", "event", validators=[DateValidator("%Y-%m-%d %H:%M:%S")])])
def handler(event, a=None): # noqa: pylint - unused-argument
return a
response = handler(event)
self.assertEqual("2001-01-01 00:00:00", response)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_date_parameter_fails_on_invalid_date(self, mock_logger):
event = {
"a": "2001-01-01 35:00:00"
}
@extract([Parameter("/a", "event", validators=[DateValidator("%Y-%m-%d %H:%M:%S")])])
def handler(event, a=None): # noqa: pylint - unused-argument
return {}
response = handler(event, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual("{\"message\": [{\"a\": [\"'2001-01-01 35:00:00' is not a '%Y-%m-%d %H:%M:%S' date\"]}]}",
response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"a": ["'2001-01-01 35:00:00' is not a '%Y-%m-%d %H:%M:%S' date"]}]
)
@patch("aws_lambda_decorators.decorators.LOGGER")
def test_extract_date_parameter_fails_with_custom_error(self, mock_logger):
event = {
"a": "2001-01-01 35:00:00"
}
@extract([Parameter("/a", "event", validators=[DateValidator("%Y-%m-%d %H:%M:%S", "Not a valid date!")])])
def handler(event, a=None): # noqa: pylint - unused-argument
return {}
response = handler(event, None)
self.assertEqual(400, response["statusCode"])
self.assertEqual("{\"message\": [{\"a\": [\"Not a valid date!\"]}]}", response["body"])
mock_logger.error.assert_called_once_with(
"Error validating parameters. Errors: %s",
[{"a": ["Not a valid date!"]}]
)
def test_extract_date_parameter_valid_on_empty(self):
event = {
"a": None
}
@extract([Parameter("/a", "event", validators=[DateValidator("%Y-%m-%d %H:%M:%S")])])
def handler(event, a=None): # noqa: pylint - unused-argument
return a
response = handler(event)
self.assertEqual(None, response)
def test_extract_currency_parameter(self):
event = {
"a": "GBP"
}
@extract([Parameter("/a", "event", [CurrencyValidator])])
def handler(event, a=None): # noqa: pylint - unused-argument
return a
response = handler(event)
self.assertEqual("GBP", response)
def test_currency_validator_returns_true_when_none_is_passed_in(self):
path = "/a/b/c"
dictionary = {
"a": {
"b": {
"c": None
}
}
}
@extract([Parameter(path, "event", [CurrencyValidator])])
def handler(event, c=None): # noqa
return c
response = handler(dictionary, None)
self.assertEqual(None, response)
def test_currency_validator_returns_false_when_invalid_code_passed_in(self):
event = {
"a": "GBT"
}
@extract([Parameter("/a", "event", [CurrencyValidator])])
def handler(event, a=None): # noqa: pylint - unused-argument
return {}
response = handler(event)
self.assertEqual(400, response["statusCode"])
| |
name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSegmentationTaskResponse(AbstractModel):
"""CreateSegmentationTask返回参数结构体
"""
def __init__(self):
r"""
:param TaskID: 任务标识ID,可以用与追溯任务状态,查看任务结果
:type TaskID: str
:param EstimatedProcessingTime: 预估处理时间,单位为秒
:type EstimatedProcessingTime: float
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskID = None
self.EstimatedProcessingTime = None
self.RequestId = None
def _deserialize(self, params):
self.TaskID = params.get("TaskID")
self.EstimatedProcessingTime = params.get("EstimatedProcessingTime")
self.RequestId = params.get("RequestId")
class CreateTraceRequest(AbstractModel):
"""CreateTrace请求参数结构体
"""
def __init__(self):
r"""
:param PersonId: 人员ID。
:type PersonId: str
:param Trace: 人体轨迹信息。
:type Trace: :class:`tencentcloud.bda.v20200324.models.Trace`
"""
self.PersonId = None
self.Trace = None
def _deserialize(self, params):
self.PersonId = params.get("PersonId")
if params.get("Trace") is not None:
self.Trace = Trace()
self.Trace._deserialize(params.get("Trace"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateTraceResponse(AbstractModel):
"""CreateTrace返回参数结构体
"""
def __init__(self):
r"""
:param TraceId: 人员轨迹唯一标识。
:type TraceId: str
:param BodyModelVersion: 人体识别所用的算法模型版本。
:type BodyModelVersion: str
:param InputRetCode: 输入的人体轨迹图片中的合法性校验结果。
只有为0时结果才有意义。
-1001: 输入图片不合法。-1002: 输入图片不能构成轨迹。
:type InputRetCode: int
:param InputRetCodeDetails: 输入的人体轨迹图片中的合法性校验结果详情。
-1101:图片无效,-1102:url不合法。-1103:图片过大。-1104:图片下载失败。-1105:图片解码失败。-1109:图片分辨率过高。-2023:轨迹中有非同人图片。-2024: 轨迹提取失败。-2025: 人体检测失败。
:type InputRetCodeDetails: list of int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TraceId = None
self.BodyModelVersion = None
self.InputRetCode = None
self.InputRetCodeDetails = None
self.RequestId = None
def _deserialize(self, params):
self.TraceId = params.get("TraceId")
self.BodyModelVersion = params.get("BodyModelVersion")
self.InputRetCode = params.get("InputRetCode")
self.InputRetCodeDetails = params.get("InputRetCodeDetails")
self.RequestId = params.get("RequestId")
class DeleteGroupRequest(AbstractModel):
"""DeleteGroup请求参数结构体
"""
def __init__(self):
r"""
:param GroupId: 人体库ID。
:type GroupId: str
"""
self.GroupId = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteGroupResponse(AbstractModel):
"""DeleteGroup返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeletePersonRequest(AbstractModel):
"""DeletePerson请求参数结构体
"""
def __init__(self):
r"""
:param PersonId: 人员ID。
:type PersonId: str
"""
self.PersonId = None
def _deserialize(self, params):
self.PersonId = params.get("PersonId")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeletePersonResponse(AbstractModel):
"""DeletePerson返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeSegmentationTaskRequest(AbstractModel):
"""DescribeSegmentationTask请求参数结构体
"""
def __init__(self):
r"""
:param TaskID: 在提交分割任务成功时返回的任务标识ID。
:type TaskID: str
"""
self.TaskID = None
def _deserialize(self, params):
self.TaskID = params.get("TaskID")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSegmentationTaskResponse(AbstractModel):
"""DescribeSegmentationTask返回参数结构体
"""
def __init__(self):
r"""
:param TaskStatus: 当前任务状态:
QUEUING 排队中
PROCESSING 处理中
FINISHED 处理完成
:type TaskStatus: str
:param ResultVideoUrl: 分割后视频URL, 存储于腾讯云COS
注意:此字段可能返回 null,表示取不到有效值。
:type ResultVideoUrl: str
:param ResultVideoMD5: 分割后视频MD5,用于校验
注意:此字段可能返回 null,表示取不到有效值。
:type ResultVideoMD5: str
:param VideoBasicInformation: 视频基本信息
注意:此字段可能返回 null,表示取不到有效值。
:type VideoBasicInformation: :class:`tencentcloud.bda.v20200324.models.VideoBasicInformation`
:param ErrorMsg: 分割任务错误信息
注意:此字段可能返回 null,表示取不到有效值。
:type ErrorMsg: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TaskStatus = None
self.ResultVideoUrl = None
self.ResultVideoMD5 = None
self.VideoBasicInformation = None
self.ErrorMsg = None
self.RequestId = None
def _deserialize(self, params):
self.TaskStatus = params.get("TaskStatus")
self.ResultVideoUrl = params.get("ResultVideoUrl")
self.ResultVideoMD5 = params.get("ResultVideoMD5")
if params.get("VideoBasicInformation") is not None:
self.VideoBasicInformation = VideoBasicInformation()
self.VideoBasicInformation._deserialize(params.get("VideoBasicInformation"))
self.ErrorMsg = params.get("ErrorMsg")
self.RequestId = params.get("RequestId")
class DetectBodyJointsRequest(AbstractModel):
"""DetectBodyJoints请求参数结构体
"""
def __init__(self):
r"""
:param Image: 图片 base64 数据,base64 编码后大小不可超过5M。
支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。
:type Image: str
:param Url: 图片的 Url 。对应图片 base64 编码后大小不可超过5M。
Url、Image必须提供一个,如果都提供,只使用 Url。
图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。
非腾讯云存储的Url速度和稳定性可能受一定影响。
支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。
:type Url: str
"""
self.Image = None
self.Url = None
def _deserialize(self, params):
self.Image = params.get("Image")
self.Url = params.get("Url")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectBodyJointsResponse(AbstractModel):
"""DetectBodyJoints返回参数结构体
"""
def __init__(self):
r"""
:param BodyJointsResults: 图中检测出的人体框和人体关键点, 包含14个人体关键点的坐标,建议根据人体框置信度筛选出合格的人体;
:type BodyJointsResults: list of BodyJointsResult
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BodyJointsResults = None
self.RequestId = None
def _deserialize(self, params):
if params.get("BodyJointsResults") is not None:
self.BodyJointsResults = []
for item in params.get("BodyJointsResults"):
obj = BodyJointsResult()
obj._deserialize(item)
self.BodyJointsResults.append(obj)
self.RequestId = params.get("RequestId")
class DetectBodyRequest(AbstractModel):
"""DetectBody请求参数结构体
"""
def __init__(self):
r"""
:param Image: 人体图片 Base64 数据。
图片 base64 编码后大小不可超过5M。
图片分辨率不得超过 1920 * 1080 。
支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。
:type Image: str
:param MaxBodyNum: 最多检测的人体数目,默认值为1(仅检测图片中面积最大的那个人体); 最大值10 ,检测图片中面积最大的10个人体。
:type MaxBodyNum: int
:param Url: 人体图片 Url 。
Url、Image必须提供一个,如果都提供,只使用 Url。
图片 base64 编码后大小不可超过5M。
图片分辨率不得超过 1920 * 1080 。
图片存储于腾讯云的Url可保障更高下载速度和稳定性,建议图片存储于腾讯云。
非腾讯云存储的Url速度和稳定性可能受一定影响。
支持PNG、JPG、JPEG、BMP,不支持 GIF 图片。
:type Url: str
:param AttributesOptions: 是否返回年龄、性别、朝向等属性。
可选项有 Age、Bag、Gender、UpperBodyCloth、LowerBodyCloth、Orientation。
如果此参数为空则为不需要返回。
需要将属性组成一个用逗号分隔的字符串,属性之间的顺序没有要求。
关于各属性的详细描述,参见下文出参。
最多返回面积最大的 5 个人体属性信息,超过 5 个人体(第 6 个及以后的人体)的 BodyAttributesInfo 不具备参考意义。
:type AttributesOptions: :class:`tencentcloud.bda.v20200324.models.AttributesOptions`
"""
self.Image = None
self.MaxBodyNum = None
self.Url = None
self.AttributesOptions = None
def _deserialize(self, params):
self.Image = params.get("Image")
self.MaxBodyNum = params.get("MaxBodyNum")
self.Url = params.get("Url")
if params.get("AttributesOptions") is not None:
self.AttributesOptions = AttributesOptions()
self.AttributesOptions._deserialize(params.get("AttributesOptions"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DetectBodyResponse(AbstractModel):
"""DetectBody返回参数结构体
"""
def __init__(self):
r"""
:param BodyDetectResults: 图中检测出来的人体框。
:type BodyDetectResults: list of BodyDetectResult
:param BodyModelVersion: 人体识别所用的算法模型版本。
:type BodyModelVersion: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.BodyDetectResults = None
self.BodyModelVersion = None
self.RequestId = None
def _deserialize(self, params):
if params.get("BodyDetectResults") is not None:
self.BodyDetectResults = []
for item in params.get("BodyDetectResults"):
obj = BodyDetectResult()
obj._deserialize(item)
self.BodyDetectResults.append(obj)
self.BodyModelVersion = params.get("BodyModelVersion")
self.RequestId = params.get("RequestId")
class Gender(AbstractModel):
"""人体性别信息。
AttributesType 不含 Gender 或检测超过 5 个人体时,此参数仍返回,但不具备参考意义。
"""
def __init__(self):
r"""
:param Type: 性别信息,返回值为以下集合中的一个 {男性, 女性}
:type Type: str
:param Probability: Type识别概率值,[0.0,1.0],代表判断正确的概率。如0.8则代表有Type值有80%概率正确。
:type Probability: float
"""
self.Type = None
self.Probability = None
def _deserialize(self, params):
self.Type = params.get("Type")
self.Probability = params.get("Probability")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetGroupListRequest(AbstractModel):
"""GetGroupList请求参数结构体
"""
def __init__(self):
r"""
:param Offset: 起始序号,默认值为0。
:type Offset: int
:param Limit: 返回数量,默认值为10,最大值为1000。
:type Limit: int
"""
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetGroupListResponse(AbstractModel):
"""GetGroupList返回参数结构体
"""
def __init__(self):
r"""
:param GroupInfos: 返回的人体库信息。
:type GroupInfos: list of GroupInfo
:param GroupNum: 人体库总数量。
:type GroupNum: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.GroupInfos = None
self.GroupNum = None
self.RequestId = None
def _deserialize(self, params):
if params.get("GroupInfos") is not None:
self.GroupInfos = []
for item in params.get("GroupInfos"):
obj = GroupInfo()
obj._deserialize(item)
self.GroupInfos.append(obj)
self.GroupNum = params.get("GroupNum")
self.RequestId = params.get("RequestId")
class GetPersonListRequest(AbstractModel):
"""GetPersonList请求参数结构体
"""
def __init__(self):
r"""
:param GroupId: 人体库ID。
:type GroupId: str
:param Offset: 起始序号,默认值为0。
:type Offset: int
:param Limit: 返回数量,默认值为10,最大值为1000。
:type Limit: int
"""
self.GroupId = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.GroupId = params.get("GroupId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GetPersonListResponse(AbstractModel):
"""GetPersonList返回参数结构体
"""
def __init__(self):
r"""
:param PersonInfos: 返回的人员信息。
:type PersonInfos: list of PersonInfo
:param PersonNum: 该人体库的人员数量。
:type PersonNum: int
:param BodyModelVersion: 人体识别所用的算法模型版本。
:type BodyModelVersion: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.PersonInfos = None
self.PersonNum = None
self.BodyModelVersion = None
self.RequestId = None
def _deserialize(self, params):
if params.get("PersonInfos") is not None:
self.PersonInfos = []
for item in params.get("PersonInfos"):
obj = PersonInfo()
obj._deserialize(item)
self.PersonInfos.append(obj)
self.PersonNum = params.get("PersonNum")
self.BodyModelVersion = params.get("BodyModelVersion")
self.RequestId = params.get("RequestId")
class GetSummaryInfoRequest(AbstractModel):
"""GetSummaryInfo请求参数结构体
"""
class GetSummaryInfoResponse(AbstractModel):
"""GetSummaryInfo返回参数结构体
"""
def __init__(self):
r"""
:param GroupCount: 人体库总数量。
:type GroupCount: int
:param PersonCount: 人员总数量
:type PersonCount: int
:param TraceCount: 人员轨迹总数量
:type TraceCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.GroupCount = None
self.PersonCount = None
self.TraceCount = None
self.RequestId = None
def _deserialize(self, params):
self.GroupCount = params.get("GroupCount")
self.PersonCount = params.get("PersonCount")
self.TraceCount = params.get("TraceCount")
self.RequestId = params.get("RequestId")
class GroupInfo(AbstractModel):
"""返回的人员库信息。
"""
def __init__(self):
r"""
:param GroupName: 人体库名称。
:type GroupName: str
:param GroupId: 人体库ID。
:type GroupId: str
:param Tag: 人体库信息备注。
:type Tag: str
:param BodyModelVersion: 人体识别所用的算法模型版本。
:type BodyModelVersion: str
:param CreationTimestamp: Group的创建时间和日期 CreationTimestamp。CreationTimestamp 的值是自 Unix 纪元时间到Group创建时间的毫秒数。
Unix 纪元时间是 1970 年 1 月 1 日星期四,协调世界时 (UTC) 。
:type CreationTimestamp: int
"""
self.GroupName = None
self.GroupId = None
self.Tag = None
self.BodyModelVersion = None
self.CreationTimestamp = None
def _deserialize(self, params):
self.GroupName = params.get("GroupName")
self.GroupId | |
<gh_stars>0
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature analysis functionality.
"""
import logging
from math import log
import numbers
import random
import _registries
import _tokenizer
import _transforms
import apache_beam as beam
from apache_beam.typehints import Dict
from apache_beam.typehints import Tuple
from apache_beam.typehints import Union
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
import numpy as np
from google.cloud.ml.util import _dataflow as dfutil
class _ExtractValues(beam.DoFn):
"""Extract values from all feature columns."""
def __init__(self, sorted_feature_columns):
self._sorted_feature_columns = sorted_feature_columns
# TODO(user): Remove the context param and try catch after sdk update
def start_bundle(self, context=None):
self._extractors = [
_registries.analyzer_registry.get_analyzer(column).extract_value
for column in self._sorted_feature_columns
]
def process(self, element):
try:
element = element.element
except AttributeError:
pass
try:
instance = element
yield [
extract_value(instance, column_index)
for column_index, extract_value in enumerate(self._extractors)
]
except Exception as ex: # pylint: disable=broad-except
try:
yield beam.pvalue.TaggedOutput('errors', (ex, element))
except AttributeError:
yield beam.pvalue.SideOutputValue('errors', (ex, element))
class AnalyzeData(beam.PTransform):
"""A PTransform to analyze feature data to create metadata for preprocessing.
The input to this PTransform is a PCollection representing the source dataset,
with each element of the collection being a dictionary. The keys of which
correspond to the columns referenced in the feature spec provided when
constructing this transform.
"""
def __init__(self,
features,
input_format=None,
format_metadata=None,
error_threshold=0,
return_bad_elements=False):
"""Construct an AnalyzeData PTransform.
Args:
features: A list of Features for the data.
input_format: Optional, whether the input was csv or json.
format_metadata: Optional, arguments to store in the metadata for the
input_format.
error_threshold: How many errors are allowed before the job fails.
return_bad_elements: Should elements with errors be returned as a side
output. Defaults to False.
"""
super(AnalyzeData, self).__init__('Analyze Data')
self._features = features
self._format = input_format
self._format_metadata = format_metadata
self._error_threshold = error_threshold
self._return_bad_elements = return_bad_elements
self._sorted_feature_columns = _transforms.sorted_columns_from_features(
self._features)
# TODO(b/33677990): Remove apply method.
def apply(self, data):
return self.expand(data)
def expand(self, data):
"""Analyzes each of the columns in the feature spec to generate metadata.
Args:
data: The input PCollection.
Returns:
Just the metadata if return_bad_elements is False, otherwise a tuple of
the metadata and the bad elements side output.
"""
rows, errors = data | 'Extract Columns' >> beam.ParDo(
_ExtractValues(self._sorted_feature_columns)).with_outputs(
'errors', main='rows')
_ = data | dfutil.CountPCollection('ml-analyze-input')
_ = errors | dfutil.CountPCollection('ml-analyze-errors')
_ = (errors, data) | dfutil.CheckErrorThreshold(self._error_threshold)
analysis_list = []
combine_fn_analyzers = {}
for ix, column in enumerate(self._sorted_feature_columns):
analyzer = _registries.analyzer_registry.get_analyzer(column)
if isinstance(analyzer, CombineFnColumnAnalyzer):
combine_fn_analyzers[ix] = analyzer
else:
values = rows | 'extract_%s' % column.name >> beam.Map(
lambda row, ix=ix: row[ix])
analysis_list.append(values | analyzer)
if combine_fn_analyzers:
analysis_list.append(rows | 'Analyze CombineFn Features' >>
_MultiColumnAnalyzer(combine_fn_analyzers))
columns = analysis_list | beam.Flatten() | beam.combiners.ToDict()
metadata = columns | 'Generate Metadata' >> beam.Map(self._create_metadata)
if self._return_bad_elements:
return metadata, errors
else:
return metadata
def _get_version(self):
# Version numbers are stored in the top level package.
# Which we can't import at the top as it would be a circular reference.
import google.cloud.ml as ml # pylint: disable=g-import-not-at-top
return ml.__version__
def _create_metadata(self, columns):
features = {}
stats = {}
metadata = {
'sdk_version': self._get_version(),
'columns': columns,
'features': features,
'stats': stats,
}
if self._format:
metadata['format'] = self._format
if self._format_metadata:
metadata[self._format] = self._format_metadata
for feature in self._features:
feature_size = 0
feature_type = 'dense'
feature_dtype = 'int64'
feature_cols = []
for feature_column in feature.columns:
column_name = feature_column.name
column = columns.get(column_name, None)
if not column:
logging.warning('%s not processed because it has no metadata',
column_name)
continue
value_type = column['type']
if value_type == 'target' and hasattr(feature_column, 'scenario'):
column['scenario'] = feature_column.scenario
transformer = _registries.transformation_registry.get_transformer(
column)
if transformer.dtype != 'int64':
# If we're combining an int with anything else, the "other" dtype
# takes precedence. For numeric columns, this will be 'float' and for
# anything else, this will likely be 'bytes'
# TODO(user). Some unexpected behaviour could result from the
# assignment of dtypes here (i.e. in the loop) with respect to
# incompatible types getting combined mistakenly. At the time of
# b/32318252 has been opened to track refactoring this logic so that
# it is clearer to the reader.
feature_dtype = transformer.dtype
if transformer.feature_type == 'sparse':
# If we're combining dense transforms with sparse transforms, the
# resulting feature will be sparse.
# TODO(user): Consider having an enum for 'sparse' and 'dense'
feature_type = 'sparse'
feature_size += transformer.feature_size
if value_type == 'key':
stats['instances'] = column['count']
elif value_type == 'target':
if 'vocab' in column:
stats['labels'] = len(column['vocab'])
if 'mean' in column:
stats['mean'] = column['mean']
feature_cols.append(column_name)
features[feature.name] = {
'name': feature.name,
'size': feature_size,
'type': feature_type,
'dtype': feature_dtype,
'columns': feature_cols
}
return metadata
class _MultiColumnAnalyzer(beam.PTransform):
def __init__(self, analyzers):
self._analyzers = analyzers
# TODO(b/33677990): Remove apply method.
def apply(self, rows):
return self.expand(rows)
def expand(self, rows):
value_indices, value_analyzers = zip(*self._analyzers.items())
assert all(
isinstance(analyzer, CombineFnColumnAnalyzer)
for analyzer in value_analyzers)
return (
rows
| 'Extract' >> beam.Map(lambda row: [row[ix] for ix in value_indices])
| 'Combine' >> beam.CombineGlobally(beam.combiners.TupleCombineFn(
*[a.combine_fn for a in value_analyzers])).without_defaults()
| 'PairWithName' >> beam.FlatMap(lambda combined_values: [ # pylint: disable=g-long-lambda
(a.column_name, a.combined_value_to_dict(c))
for a, c in zip(value_analyzers, combined_values)]))
class ColumnAnalyzer(beam.PTransform):
"""Base class for column analyzers.
"""
def __init__(self, column):
super(ColumnAnalyzer, self).__init__('Analyze ' + column.name)
self._column = column
def extract_value(self, instance, index):
"""Extracts the column value from an element (represented as a dict).
By default, extracts the value by column name, returning None if it does
not exist.
May be overridden to compute this value and/or throw an error if the
column value is not valid.
Args:
instance: The input instance to extract from.
index: The index for the feature column being analyzed.
Returns:
The column from this instance.
"""
return instance[index]
def _get_column_metadata(self):
"""Returns a dictionary of the needed metadata.
Sets name, type and transforms args if there are any.
Returns:
A dictionary of the needed metadata.
"""
column_metadata = {'name': self._column.name}
if self._column.default is not None:
column_metadata['default'] = self._column.default
if self._column.value_type:
column_metadata['type'] = self._column.value_type
transform_name = self._column._transform # pylint: disable=protected-access
if transform_name:
column_metadata['transform'] = transform_name
if transform_name and self._column.transform_args:
column_metadata[transform_name] = self._column.transform_args
return column_metadata
class IdentityColumnAnalyzer(ColumnAnalyzer):
"""This is the default analyzer, and only generates simple metatada.
Disregards the values and returns a PCollection with a single entry. A tuple
in the same format as all the other metadata.
"""
# TODO(b/33677990): Remove apply method.
def apply(self, values):
return self.expand(values)
def expand(self, values):
return ['empty'] | 'Identity Metadata' >> beam.Map(
self._ret_val) # run once
def _ret_val(self, _):
return (self._column.name, self._get_column_metadata())
class CombineFnColumnAnalyzer(ColumnAnalyzer):
"""Analyzes columns using a CombineFn.
Subclasses MUST NOT override the apply method, as an alternative
(cross-feature) PTransform may be used instead.
"""
def __init__(self, column, combine_fn, output_name='combined_value'):
assert self.apply.im_func is CombineFnColumnAnalyzer.apply.im_func, (
'Subclass %s of CombineFnColumnAnalyzer must not overload apply.' %
type(self))
super(CombineFnColumnAnalyzer, self).__init__(column)
self._combine_fn = combine_fn
self._output_name = output_name
@property
def combine_fn(self):
return self._combine_fn
@property
def column_name(self):
return self._column.name
# TODO(b/33677990): Remove apply method.
def apply(self, values):
return self.expand(values)
def expand(self, values):
return (
values
| beam.CombineGlobally(self._combine_fn).without_defaults()
|
beam.Map(lambda c: (self.column_name, self.combined_value_to_dict(c))))
def combined_value_to_dict(self, aggregate):
return dict(self._get_column_metadata(), **{self._output_name: aggregate})
@_registries.register_analyzer('key')
class IdColumnAnalyzer(CombineFnColumnAnalyzer):
"""Analyzes id columns to produce a count of instances.
"""
def __init__(self, column):
super(IdColumnAnalyzer, self).__init__(column,
beam.combiners.CountCombineFn(),
'count')
def combined_value_to_dict(self, count):
return {'name': self._column.name, 'type': 'key', 'count': count}
@_registries.register_analyzer('numeric')
@with_input_types(Union[int, long, float])
@with_output_types(Tuple[str, Dict[Union[str, unicode], float]])
class NumericColumnAnalyzer(CombineFnColumnAnalyzer):
"""Analyzes numeric columns to produce a min/max/mean statistics.
"""
def __init__(self, column):
super(NumericColumnAnalyzer, self).__init__(
column, self.MinMeanMax(getattr(column, 'log_base', None)))
def extract_value(self, instance, index):
value = instance[index]
if value is not None and not isinstance(value, numbers.Number):
return float(value)
else:
return value
def combined_value_to_dict(self, combined_value):
return dict(self._get_column_metadata(), **combined_value)
class MinMeanMax(beam.core.CombineFn):
"""Aggregator to combine values within a numeric column.
"""
def __init__(self, log_base=None):
self._log_base = log_base
def create_accumulator(self):
return (float('+inf'), float('-inf'), 0, 0)
def add_input(self, stats, element):
if element is None:
return stats
(min_value, max_value, total, count) = | |
<filename>pandas/tseries/tests/test_daterange.py<gh_stars>0
from datetime import datetime
from pandas.compat import range
import nose
import numpy as np
from pandas.core.index import Index
from pandas.tseries.index import DatetimeIndex
from pandas import Timestamp
from pandas.tseries.offsets import generate_range
from pandas.tseries.index import cdate_range, bdate_range, date_range
from pandas.core import common as com
import pandas.core.datetools as datetools
from pandas.util.testing import assertRaisesRegexp
import pandas.util.testing as tm
def eq_gen_range(kwargs, expected):
rng = generate_range(**kwargs)
assert (np.array_equal(list(rng), expected))
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestGenRangeGeneration(tm.TestCase):
def test_generate(self):
rng1 = list(generate_range(START, END, offset=datetools.bday))
rng2 = list(generate_range(START, END, time_rule='B'))
self.assertEqual(rng1, rng2)
def test_generate_cday(self):
rng1 = list(generate_range(START, END, offset=datetools.cday))
rng2 = list(generate_range(START, END, time_rule='C'))
self.assertEqual(rng1, rng2)
def test_1(self):
eq_gen_range(dict(start=datetime(2009, 3, 25), periods=2),
[datetime(2009, 3, 25), datetime(2009, 3, 26)])
def test_2(self):
eq_gen_range(dict(start=datetime(2008, 1, 1),
end=datetime(2008, 1, 3)),
[datetime(2008, 1, 1),
datetime(2008, 1, 2),
datetime(2008, 1, 3)])
def test_3(self):
eq_gen_range(dict(start=datetime(2008, 1, 5),
end=datetime(2008, 1, 6)),
[])
def test_precision_finer_than_offset(self):
# GH 9907
result1 = DatetimeIndex(start='2015-04-15 00:00:03',
end='2016-04-22 00:00:00', freq='Q')
result2 = DatetimeIndex(start='2015-04-15 00:00:03',
end='2015-06-22 00:00:04', freq='W')
expected1_list = ['2015-06-30 00:00:03', '2015-09-30 00:00:03',
'2015-12-31 00:00:03', '2016-03-31 00:00:03']
expected2_list = ['2015-04-19 00:00:03', '2015-04-26 00:00:03',
'2015-05-03 00:00:03', '2015-05-10 00:00:03',
'2015-05-17 00:00:03', '2015-05-24 00:00:03',
'2015-05-31 00:00:03', '2015-06-07 00:00:03',
'2015-06-14 00:00:03', '2015-06-21 00:00:03']
expected1 = DatetimeIndex(expected1_list, dtype='datetime64[ns]',
freq='Q-DEC', tz=None)
expected2 = DatetimeIndex(expected2_list, dtype='datetime64[ns]',
freq='W-SUN', tz=None)
self.assert_index_equal(result1, expected1)
self.assert_index_equal(result2, expected2)
class TestDateRange(tm.TestCase):
def setUp(self):
self.rng = bdate_range(START, END)
def test_constructor(self):
bdate_range(START, END, freq=datetools.bday)
bdate_range(START, periods=20, freq=datetools.bday)
bdate_range(end=START, periods=20, freq=datetools.bday)
self.assertRaises(ValueError, date_range, '2011-1-1', '2012-1-1', 'B')
self.assertRaises(ValueError, bdate_range, '2011-1-1', '2012-1-1', 'B')
def test_naive_aware_conflicts(self):
naive = bdate_range(START, END, freq=datetools.bday, tz=None)
aware = bdate_range(START, END, freq=datetools.bday,
tz="Asia/Hong_Kong")
assertRaisesRegexp(TypeError, "tz-naive.*tz-aware", naive.join, aware)
assertRaisesRegexp(TypeError, "tz-naive.*tz-aware", aware.join, naive)
def test_cached_range(self):
DatetimeIndex._cached_range(START, END, offset=datetools.bday)
DatetimeIndex._cached_range(START, periods=20,
offset=datetools.bday)
DatetimeIndex._cached_range(end=START, periods=20,
offset=datetools.bday)
assertRaisesRegexp(TypeError, "offset", DatetimeIndex._cached_range,
START, END)
assertRaisesRegexp(TypeError, "specify period",
DatetimeIndex._cached_range, START,
offset=datetools.bday)
assertRaisesRegexp(TypeError, "specify period",
DatetimeIndex._cached_range, end=END,
offset=datetools.bday)
assertRaisesRegexp(TypeError, "start or end",
DatetimeIndex._cached_range, periods=20,
offset=datetools.bday)
def test_cached_range_bug(self):
rng = date_range('2010-09-01 05:00:00', periods=50,
freq=datetools.DateOffset(hours=6))
self.assertEqual(len(rng), 50)
self.assertEqual(rng[0], datetime(2010, 9, 1, 5))
def test_timezone_comparaison_bug(self):
start = Timestamp('20130220 10:00', tz='US/Eastern')
try:
date_range(start, periods=2, tz='US/Eastern')
except AssertionError:
self.fail()
def test_timezone_comparaison_assert(self):
start = Timestamp('20130220 10:00', tz='US/Eastern')
self.assertRaises(AssertionError, date_range, start, periods=2,
tz='Europe/Berlin')
def test_comparison(self):
d = self.rng[10]
comp = self.rng > d
self.assertTrue(comp[11])
self.assertFalse(comp[9])
def test_copy(self):
cp = self.rng.copy()
repr(cp)
self.assert_index_equal(cp, self.rng)
def test_repr(self):
# only really care that it works
repr(self.rng)
def test_getitem(self):
smaller = self.rng[:5]
exp = DatetimeIndex(self.rng.view(np.ndarray)[:5])
self.assert_index_equal(smaller, exp)
self.assertEqual(smaller.offset, self.rng.offset)
sliced = self.rng[::5]
self.assertEqual(sliced.offset, datetools.bday * 5)
fancy_indexed = self.rng[[4, 3, 2, 1, 0]]
self.assertEqual(len(fancy_indexed), 5)
tm.assertIsInstance(fancy_indexed, DatetimeIndex)
self.assertIsNone(fancy_indexed.freq)
# 32-bit vs. 64-bit platforms
self.assertEqual(self.rng[4], self.rng[np.int_(4)])
def test_getitem_matplotlib_hackaround(self):
values = self.rng[:, None]
expected = self.rng.values[:, None]
self.assert_numpy_array_equal(values, expected)
def test_shift(self):
shifted = self.rng.shift(5)
self.assertEqual(shifted[0], self.rng[5])
self.assertEqual(shifted.offset, self.rng.offset)
shifted = self.rng.shift(-5)
self.assertEqual(shifted[5], self.rng[0])
self.assertEqual(shifted.offset, self.rng.offset)
shifted = self.rng.shift(0)
self.assertEqual(shifted[0], self.rng[0])
self.assertEqual(shifted.offset, self.rng.offset)
rng = date_range(START, END, freq=datetools.bmonthEnd)
shifted = rng.shift(1, freq=datetools.bday)
self.assertEqual(shifted[0], rng[0] + datetools.bday)
def test_pickle_unpickle(self):
unpickled = self.round_trip_pickle(self.rng)
self.assertIsNotNone(unpickled.offset)
def test_union(self):
# overlapping
left = self.rng[:10]
right = self.rng[5:10]
the_union = left.union(right)
tm.assertIsInstance(the_union, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_union = left.union(right)
tm.assertIsInstance(the_union, Index)
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_union = left.union(right)
tm.assertIsInstance(the_union, DatetimeIndex)
# order does not matter
tm.assert_index_equal(right.union(left), the_union)
# overlapping, but different offset
rng = date_range(START, END, freq=datetools.bmonthEnd)
the_union = self.rng.union(rng)
tm.assertIsInstance(the_union, DatetimeIndex)
def test_outer_join(self):
# should just behave as union
# overlapping
left = self.rng[:10]
right = self.rng[5:10]
the_join = left.join(right, how='outer')
tm.assertIsInstance(the_join, DatetimeIndex)
# non-overlapping, gap in middle
left = self.rng[:5]
right = self.rng[10:]
the_join = left.join(right, how='outer')
tm.assertIsInstance(the_join, DatetimeIndex)
self.assertIsNone(the_join.freq)
# non-overlapping, no gap
left = self.rng[:5]
right = self.rng[5:10]
the_join = left.join(right, how='outer')
tm.assertIsInstance(the_join, DatetimeIndex)
# overlapping, but different offset
rng = date_range(START, END, freq=datetools.bmonthEnd)
the_join = self.rng.join(rng, how='outer')
tm.assertIsInstance(the_join, DatetimeIndex)
self.assertIsNone(the_join.freq)
def test_union_not_cacheable(self):
rng = date_range('1/1/2000', periods=50, freq=datetools.Minute())
rng1 = rng[10:]
rng2 = rng[:25]
the_union = rng1.union(rng2)
self.assert_index_equal(the_union, rng)
rng1 = rng[10:]
rng2 = rng[15:35]
the_union = rng1.union(rng2)
expected = rng[10:]
self.assert_index_equal(the_union, expected)
def test_intersection(self):
rng = date_range('1/1/2000', periods=50, freq=datetools.Minute())
rng1 = rng[10:]
rng2 = rng[:25]
the_int = rng1.intersection(rng2)
expected = rng[10:25]
self.assert_index_equal(the_int, expected)
tm.assertIsInstance(the_int, DatetimeIndex)
self.assertEqual(the_int.offset, rng.offset)
the_int = rng1.intersection(rng2.view(DatetimeIndex))
self.assert_index_equal(the_int, expected)
# non-overlapping
the_int = rng[:10].intersection(rng[10:])
expected = DatetimeIndex([])
self.assert_index_equal(the_int, expected)
def test_intersection_bug(self):
# GH #771
a = bdate_range('11/30/2011', '12/31/2011')
b = bdate_range('12/10/2011', '12/20/2011')
result = a.intersection(b)
self.assert_index_equal(result, b)
def test_summary(self):
self.rng.summary()
self.rng[2:2].summary()
def test_summary_pytz(self):
tm._skip_if_no_pytz()
import pytz
bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc).summary()
def test_summary_dateutil(self):
tm._skip_if_no_dateutil()
import dateutil
bdate_range('1/1/2005', '1/1/2009', tz=dateutil.tz.tzutc()).summary()
def test_misc(self):
end = datetime(2009, 5, 13)
dr = bdate_range(end=end, periods=20)
firstDate = end - 19 * datetools.bday
assert len(dr) == 20
assert dr[0] == firstDate
assert dr[-1] == end
def test_date_parse_failure(self):
badly_formed_date = '2007/100/1'
self.assertRaises(ValueError, Timestamp, badly_formed_date)
self.assertRaises(ValueError, bdate_range, start=badly_formed_date,
periods=10)
self.assertRaises(ValueError, bdate_range, end=badly_formed_date,
periods=10)
self.assertRaises(ValueError, bdate_range, badly_formed_date,
badly_formed_date)
def test_equals(self):
self.assertFalse(self.rng.equals(list(self.rng)))
def test_identical(self):
t1 = self.rng.copy()
t2 = self.rng.copy()
self.assertTrue(t1.identical(t2))
# name
t1 = t1.rename('foo')
self.assertTrue(t1.equals(t2))
self.assertFalse(t1.identical(t2))
t2 = t2.rename('foo')
self.assertTrue(t1.identical(t2))
# freq
t2v = Index(t2.values)
self.assertTrue(t1.equals(t2v))
self.assertFalse(t1.identical(t2v))
def test_daterange_bug_456(self):
# GH #456
rng1 = bdate_range('12/5/2011', '12/5/2011')
rng2 = bdate_range('12/2/2011', '12/5/2011')
rng2.offset = datetools.BDay()
result = rng1.union(rng2)
tm.assertIsInstance(result, DatetimeIndex)
def test_error_with_zero_monthends(self):
self.assertRaises(ValueError, date_range, '1/1/2000', '1/1/2001',
freq=datetools.MonthEnd(0))
def test_range_bug(self):
# GH #770
offset = datetools.DateOffset(months=3)
result = date_range("2011-1-1", "2012-1-31", freq=offset)
start = datetime(2011, 1, 1)
exp_values = [start + i * offset for i in range(5)]
tm.assert_index_equal(result, DatetimeIndex(exp_values))
def test_range_tz_pytz(self):
# GH 2906
tm._skip_if_no_pytz()
from pytz import timezone
tz = timezone('US/Eastern')
start = tz.localize(datetime(2011, 1, 1))
end = tz.localize(datetime(2011, 1, 3))
dr = date_range(start=start, periods=3)
self.assertEqual(dr.tz.zone, tz.zone)
self.assertEqual(dr[0], start)
self.assertEqual(dr[2], end)
dr = date_range(end=end, periods=3)
self.assertEqual(dr.tz.zone, tz.zone)
self.assertEqual(dr[0], start)
self.assertEqual(dr[2], end)
dr = date_range(start=start, end=end)
self.assertEqual(dr.tz.zone, tz.zone)
self.assertEqual(dr[0], start)
self.assertEqual(dr[2], end)
def test_range_tz_dst_straddle_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone
tz = timezone('US/Eastern')
dates = [(tz.localize(datetime(2014, 3, 6)),
tz.localize(datetime(2014, 3, 12))),
(tz.localize(datetime(2013, 11, 1)),
tz.localize(datetime(2013, 11, 6)))]
for (start, end) in dates:
dr = date_range(start, end, freq='D')
self.assertEqual(dr[0], start)
self.assertEqual(dr[-1], end)
self.assertEqual(np.all(dr.hour == 0), True)
dr = date_range(start, end, freq='D', tz='US/Eastern')
self.assertEqual(dr[0], start)
self.assertEqual(dr[-1], end)
self.assertEqual(np.all(dr.hour == 0), True)
dr = date_range(start.replace(tzinfo=None), end.replace(
tzinfo=None), freq='D', tz='US/Eastern')
self.assertEqual(dr[0], start)
self.assertEqual(dr[-1], end)
self.assertEqual(np.all(dr.hour == 0), True)
def test_range_tz_dateutil(self):
# GH 2906
tm._skip_if_no_dateutil()
# Use maybe_get_tz to fix filename in tz under dateutil.
from pandas.tslib import maybe_get_tz
tz = lambda x: maybe_get_tz('dateutil/' + x)
start = datetime(2011, 1, 1, tzinfo=tz('US/Eastern'))
end = datetime(2011, 1, 3, tzinfo=tz('US/Eastern'))
dr = date_range(start=start, periods=3)
self.assertTrue(dr.tz == tz('US/Eastern'))
self.assertTrue(dr[0] == start)
self.assertTrue(dr[2] == end)
dr = date_range(end=end, periods=3)
self.assertTrue(dr.tz == tz('US/Eastern'))
self.assertTrue(dr[0] == start)
self.assertTrue(dr[2] == end)
dr = date_range(start=start, end=end)
self.assertTrue(dr.tz == tz('US/Eastern'))
self.assertTrue(dr[0] == start)
self.assertTrue(dr[2] == end)
def test_month_range_union_tz_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone
tz = timezone('US/Eastern')
early_start = datetime(2011, 1, 1)
early_end = datetime(2011, 3, 1)
late_start = datetime(2011, 3, 1)
late_end = datetime(2011, 5, 1)
early_dr = date_range(start=early_start, end=early_end, tz=tz,
freq=datetools.monthEnd)
late_dr = date_range(start=late_start, end=late_end, tz=tz,
freq=datetools.monthEnd)
early_dr.union(late_dr)
def test_month_range_union_tz_dateutil(self):
tm._skip_if_windows_python_3()
tm._skip_if_no_dateutil()
from pandas.tslib import _dateutil_gettz as timezone
tz = timezone('US/Eastern')
early_start = datetime(2011, 1, 1)
early_end = datetime(2011, 3, 1)
late_start = datetime(2011, 3, 1)
late_end = datetime(2011, 5, 1)
early_dr = date_range(start=early_start, end=early_end, tz=tz,
freq=datetools.monthEnd)
late_dr = date_range(start=late_start, end=late_end, tz=tz,
freq=datetools.monthEnd)
early_dr.union(late_dr)
def test_range_closed(self):
begin = datetime(2011, 1, 1)
end = datetime(2014, 1, 1)
for freq in ["3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
self.assert_index_equal(expected_left, left)
self.assert_index_equal(expected_right, right)
def test_range_closed_with_tz_aware_start_end(self):
# GH12409
begin = Timestamp('2011/1/1', tz='US/Eastern')
end = Timestamp('2014/1/1', tz='US/Eastern')
for freq in ["3D", "2M", "7W", "3H", "A"]:
closed = date_range(begin, end, closed=None, freq=freq)
left = date_range(begin, end, closed="left", freq=freq)
right = date_range(begin, end, closed="right", freq=freq)
expected_left = left
expected_right = right
if end == closed[-1]:
expected_left = closed[:-1]
if begin == closed[0]:
expected_right = closed[1:]
self.assert_index_equal(expected_left, left)
self.assert_index_equal(expected_right, right)
# test with default frequency, UTC
begin = Timestamp('2011/1/1', tz='UTC')
end = Timestamp('2014/1/1', tz='UTC')
intervals = ['left', 'right', None]
for i in intervals:
result = date_range(start=begin, end=end, closed=i)
self.assertEqual(result[0], begin)
self.assertEqual(result[-1], end)
def test_range_closed_boundary(self):
# | |
def test_affine_transform_opencv(self, xp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
matrix = testing.shaped_random((2, 3), xp, dtype)
if xp == cupy:
return cupyx.scipy.ndimage.affine_transform(a, matrix, order=1,
mode='opencv')
else:
return cv2.warpAffine(a, matrix, (a.shape[1], a.shape[0]))
@testing.parameterize(*(
testing.product({
'angle': [-10, 1000],
'axes': [(1, 0)],
'reshape': [False, True],
'output': [None, numpy.float64, 'empty'],
'order': [0, 1],
'mode': legacy_modes,
'cval': [1.0],
'prefilter': [True],
}) + testing.product({
'angle': [-15],
'axes': [(1, 0)],
'reshape': [False],
'output': [None],
'order': [0, 1, 3],
'mode': legacy_modes + scipy16_modes,
'cval': [1.0],
'prefilter': [True],
})
))
@testing.gpu
@testing.with_requires('scipy')
class TestRotate:
_multiprocess_can_split = True
def _rotate(self, xp, scp, a):
_conditional_scipy_version_skip(self.mode, self.order)
rotate = scp.ndimage.rotate
if self.output == 'empty':
output = rotate(a, self.angle, self.axes,
self.reshape, None, self.order,
self.mode, self.cval, self.prefilter)
return_value = rotate(a, self.angle, self.axes,
self.reshape, output, self.order,
self.mode, self.cval, self.prefilter)
assert return_value is None or return_value is output
return output
else:
return rotate(a, self.angle, self.axes,
self.reshape, self.output, self.order,
self.mode, self.cval, self.prefilter)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_rotate_float(self, xp, scp, dtype):
a = testing.shaped_random((10, 10), xp, dtype)
return self._rotate(xp, scp, a)
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
@testing.with_requires('scipy>=1.6.0')
def test_rotate_complex_float(self, xp, scp, dtype):
if self.output == numpy.float64:
self.output = numpy.complex128
a = testing.shaped_random((10, 10), xp, dtype)
return self._rotate(xp, scp, a)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_rotate_fortran_order(self, xp, scp, dtype):
a = testing.shaped_random((10, 10), xp, dtype)
a = xp.asfortranarray(a)
return self._rotate(xp, scp, a)
def _hip_skip_invalid_condition(self):
if runtime.is_hip:
if (self.angle in [-10, 1000]
and self.mode in ['constant', 'nearest', 'mirror']
and self.output == numpy.float64
and self.reshape):
pytest.xfail('ROCm/HIP may have a bug')
if (self.angle == -15
and self.mode in [
'nearest', 'grid-wrap', 'reflect', 'grid-mirror']
and self.order == 3):
pytest.xfail('ROCm/HIP may have a bug')
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_rotate_int(self, xp, scp, dtype):
self._hip_skip_invalid_condition()
if numpy.lib.NumpyVersion(scipy.__version__) < '1.0.0':
if dtype in (numpy.dtype('l'), numpy.dtype('q')):
dtype = numpy.int64
elif dtype in (numpy.dtype('L'), numpy.dtype('Q')):
dtype = numpy.uint64
a = testing.shaped_random((10, 10), xp, dtype)
out = self._rotate(xp, scp, a)
float_out = self._rotate(xp, scp, a.astype(xp.float64)) % 1
half = xp.full_like(float_out, 0.5)
out[xp.isclose(float_out, half, atol=1e-5)] = 0
return out
@testing.gpu
# Scipy older than 1.3.0 raises IndexError instead of ValueError
@testing.with_requires('scipy>=1.3.0')
class TestRotateExceptions:
def test_rotate_invalid_plane(self):
ndimage_modules = (scipy.ndimage, cupyx.scipy.ndimage)
for (xp, ndi) in zip((numpy, cupy), ndimage_modules):
x = xp.ones((8, 8, 8))
angle = 15
with pytest.raises(ValueError):
ndi.rotate(x, angle, [0, x.ndim])
with pytest.raises(ValueError):
ndi.rotate(x, angle, [-(x.ndim + 1), 1])
@testing.parameterize(
{'axes': (-1, -2)},
{'axes': (0, 1)},
{'axes': (2, 0)},
{'axes': (-2, 2)},
)
@testing.gpu
@testing.with_requires('scipy')
class TestRotateAxes:
_multiprocess_can_split = True
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_rotate_axes(self, xp, scp, dtype):
a = testing.shaped_random((10, 10, 10), xp, dtype)
rotate = scp.ndimage.rotate
return rotate(a, 1, self.axes, order=1)
@testing.gpu
@testing.with_requires('opencv-python')
class TestRotateOpenCV:
_multiprocess_can_split = True
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=0.3)
def test_rotate_opencv(self, xp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
if xp == cupy:
return cupyx.scipy.ndimage.rotate(a, 10, reshape=False,
order=1, mode='opencv')
else:
matrix = cv2.getRotationMatrix2D((49.5, 49.5), 10, 1)
return cv2.warpAffine(a, matrix, (a.shape[1], a.shape[0]))
@testing.parameterize(*(
testing.product({
'shift': [0.1, -10, (5, -5)],
'output': [None, numpy.float64, 'empty'],
'order': [0, 1, 3],
'mode': legacy_modes + scipy16_modes,
'cval': [1.0],
'prefilter': [True],
}) + testing.product({
'shift': [0.1, ],
'output': [None, numpy.float64, 'empty'],
'order': [0, 1, 3],
'mode': ['constant', ],
'cval': [cupy.nan, cupy.inf, -cupy.inf],
'prefilter': [True],
})
))
@testing.gpu
@testing.with_requires('scipy')
class TestShift:
_multiprocess_can_split = True
def _shift(self, xp, scp, a):
shift = scp.ndimage.shift
_conditional_scipy_version_skip(self.mode, self.order)
if self.output == 'empty':
output = xp.empty_like(a)
return_value = shift(a, self.shift, output, self.order,
self.mode, self.cval, self.prefilter)
assert return_value is None or return_value is output
return output
else:
return shift(a, self.shift, self.output, self.order,
self.mode, self.cval, self.prefilter)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_shift_float(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
return self._shift(xp, scp, a)
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
@testing.with_requires('scipy>=1.6.0')
def test_shift_complex_float(self, xp, scp, dtype):
if self.output == numpy.float64:
self.output = numpy.complex128
a = testing.shaped_random((100, 100), xp, dtype)
return self._shift(xp, scp, a)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_shift_fortran_order(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
a = xp.asfortranarray(a)
return self._shift(xp, scp, a)
def _hip_skip_invalid_condition(self):
if (runtime.is_hip
and self.cval == 1.0
and self.order == 3
and self.output in [None, 'empty']
and self.shift == 0.1):
pytest.xfail('ROCm/HIP may have a bug')
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_shift_int(self, xp, scp, dtype):
self._hip_skip_invalid_condition()
if self.mode == 'constant' and not xp.isfinite(self.cval):
if self.output is None or self.output == 'empty':
# Non-finite cval with integer output array is not supported
# CuPy exception is tested in TestInterpolationInvalidCval
return xp.asarray([])
if numpy.lib.NumpyVersion(scipy.__version__) < '1.0.0':
if dtype in (numpy.dtype('l'), numpy.dtype('q')):
dtype = numpy.int64
elif dtype in (numpy.dtype('L'), numpy.dtype('Q')):
dtype = numpy.uint64
a = testing.shaped_random((100, 100), xp, dtype)
out = self._shift(xp, scp, a)
float_out = self._shift(xp, scp, a.astype(xp.float64)) % 1
half = xp.full_like(float_out, 0.5)
out[xp.isclose(float_out, half, atol=1e-5)] = 0
return out
# non-finite cval with integer valued output is not allowed for CuPy
@testing.parameterize(*testing.product({
'output': [None, numpy.float64, numpy.int32, 'empty'],
'order': [0, 1],
'mode': ['constant', 'nearest'],
'cval': [cupy.nan, cupy.inf, -cupy.inf],
}))
@testing.gpu
class TestInterpolationInvalidCval:
def _prep_output(self, a):
if self.output == 'empty':
return cupy.zeros_like(a)
return self.output
@testing.for_int_dtypes(no_bool=True)
def test_shift(self, dtype):
a = cupy.ones((32,), dtype=dtype)
shift = cupyx.scipy.ndimage.shift
output = self._prep_output(a)
if _util._is_integer_output(output, a) and self.mode == 'constant':
with pytest.raises(NotImplementedError):
shift(a, 1, output=output, order=self.order, mode=self.mode,
cval=self.cval)
else:
shift(a, 1, output=output, order=self.order, mode=self.mode,
cval=self.cval)
@testing.for_int_dtypes(no_bool=True)
def test_zoom(self, dtype):
a = cupy.ones((32,), dtype=dtype)
zoom = cupyx.scipy.ndimage.zoom
output = self._prep_output(a)
if _util._is_integer_output(output, a) and self.mode == 'constant':
with pytest.raises(NotImplementedError):
# zoom of 1.0 to keep same shape
zoom(a, 1, output=output, order=self.order, mode=self.mode,
cval=self.cval)
else:
zoom(a, 1, output=output, order=self.order, mode=self.mode,
cval=self.cval)
@testing.for_int_dtypes(no_bool=True)
def test_rotate(self, dtype):
a = cupy.ones((16, 16), dtype=dtype)
rotate = cupyx.scipy.ndimage.rotate
output = self._prep_output(a)
if _util._is_integer_output(output, a) and self.mode == 'constant':
with pytest.raises(NotImplementedError):
# rotate by 0 to keep same shape
rotate(a, 0, output=output, order=self.order, mode=self.mode,
cval=self.cval)
else:
rotate(a, 0, output=output, order=self.order, mode=self.mode,
cval=self.cval)
@testing.for_int_dtypes(no_bool=True)
def test_affine(self, dtype):
a = cupy.ones((16, 16), dtype=dtype)
affine = cupy.eye(2)
affine_transform = cupyx.scipy.ndimage.affine_transform
output = self._prep_output(a)
if _util._is_integer_output(output, a) and self.mode == 'constant':
with pytest.raises(NotImplementedError):
affine_transform(a, affine, output=output, order=self.order,
mode=self.mode, cval=self.cval)
else:
affine_transform(a, affine, output=output, order=self.order,
mode=self.mode, cval=self.cval)
@testing.for_int_dtypes(no_bool=True)
def test_map_coordinates(self, dtype):
a = cupy.ones((32,), dtype=dtype)
coords = cupy.arange(32)[cupy.newaxis, :] + 2.5
map_coordinates = cupyx.scipy.ndimage.map_coordinates
output = self._prep_output(a)
if _util._is_integer_output(output, a) and self.mode == 'constant':
with pytest.raises(NotImplementedError):
map_coordinates(a, coords, output=output, order=self.order,
mode=self.mode, cval=self.cval)
else:
map_coordinates(a, coords, output=output, order=self.order,
mode=self.mode, cval=self.cval)
@testing.gpu
@testing.with_requires('opencv-python')
class TestShiftOpenCV:
_multiprocess_can_split = True
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=0.2)
def test_shift_opencv(self, xp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
shift = testing.shaped_random((2,), xp, dtype)
if xp == cupy:
return cupyx.scipy.ndimage.shift(a, shift, order=1,
mode='opencv')
else:
matrix = numpy.array([[1, 0, shift[1]], [0, 1, shift[0]]])
return cv2.warpAffine(a, matrix, (a.shape[1], a.shape[0]))
@testing.parameterize(*testing.product({
'zoom': [0.1, 10, (0.1, 10)],
'output': [None, numpy.float64, 'empty'],
'order': [0, 1],
'mode': legacy_modes,
'cval': [1.0],
'prefilter': [True],
}))
@testing.gpu
@testing.with_requires('scipy')
class TestZoom:
_multiprocess_can_split = True
def _zoom(self, xp, scp, a):
_conditional_scipy_version_skip(self.mode, self.order)
zoom = scp.ndimage.zoom
if self.output == 'empty':
output = zoom(a, self.zoom, None, self.order,
self.mode, self.cval, self.prefilter)
return_value = zoom(a, self.zoom, output, self.order,
self.mode, self.cval, self.prefilter)
assert return_value is None or return_value is output
return output
else:
return zoom(a, self.zoom, self.output, self.order,
self.mode, self.cval, self.prefilter)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_zoom_float(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
return self._zoom(xp, scp, a)
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
@testing.with_requires('scipy>=1.6.0')
def test_zoom_complex_float(self, xp, scp, dtype):
if self.output == numpy.float64:
self.output = numpy.complex128
a = testing.shaped_random((100, 100), xp, dtype)
return self._zoom(xp, scp, a)
@testing.for_float_dtypes(no_float16=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_zoom_fortran_order(self, xp, scp, dtype):
a = testing.shaped_random((100, 100), xp, dtype)
a = xp.asfortranarray(a)
return self._zoom(xp, scp, a)
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def test_zoom_int(self, xp, scp, dtype):
if numpy.lib.NumpyVersion(scipy.__version__) < '1.0.0':
if dtype in (numpy.dtype('l'), numpy.dtype('q')):
dtype = numpy.int64
elif dtype in (numpy.dtype('L'), numpy.dtype('Q')):
dtype = numpy.uint64
a = testing.shaped_random((100, 100), xp, dtype)
out = self._zoom(xp, scp, a)
float_out = self._zoom(xp, scp, a.astype(xp.float64)) % 1
half = xp.full_like(float_out, 0.5)
out[xp.isclose(float_out, half, atol=1e-5)] = 0
return out
@testing.parameterize(*testing.product({
'shape': [(2, 3), (4, 4)],
'zoom': [(1, 1), (3, 5), (8, 2), (8, 8)],
'mode': ['nearest', 'reflect', 'mirror', 'grid-wrap', 'grid-constant'],
}))
@testing.gpu
class TestZoomOrder0IntegerGrid():
def test_zoom_grid_by_int_order0(self):
# When grid_mode is True, order 0 zoom should be the same as
# replication via a Kronecker product. The only exceptions to this are
# the non-grid modes 'constant' and 'wrap'.
size = numpy.prod(self.shape)
x = cupy.arange(size, dtype=float).reshape(self.shape)
testing.assert_array_almost_equal(
cupyx.scipy.ndimage.zoom(
x, self.zoom, order=0, mode=self.mode, grid_mode=True
),
cupy.kron(x, cupy.ones(self.zoom)),
)
@testing.parameterize(*testing.product({
'shape': [(5, 5, 2)],
'zoom': [(2, 2, 0.5)], # selected to give output.shape[-1] | |
<reponame>jeremyko/let-me-test
#-*- coding: utf-8 -*-
#202007 kojh create
import sys
import os
import glob
import datetime
import subprocess
import traceback
import configparser
import shutil
import logging
from tspec_cmd_impl import *
from module_core import *
_g_runner_self = None # XXX
#///////////////////////////////////////////////////////////////////////////////
class PkgTestRunner:
logger = None
logger_summary = None
__args = None
__ini_config = None
__group_dirs = [] # name only
__group_dirs_full = [] # full path
__test_dirs_per_group = []
__test_dirs_per_group_full = []
__total_test_cnt = 0
__total_tspec_cnt = 0
__failed_tests = []
__succeeded_tests = []
__failed_test_cnt = 0
__succeeded_test_cnt = 0
temp_internal_use_only_dir = None
temp_internal_use_only_dir_remote = None
__log_level = None
#DB ----------------
ora_conn_str = None
mysql_host = None
mysql_user = None
mysql_passwd = <PASSWORD>
mysql_db_name= None
# ----------------
pkg_dir = None
package_id = None
package_name = None
system_name = None
service_name = None
xml_cfg_path = None
xml_db_path = None
input_path = None
work_path = None
stat_path = None
output_path = None
pfnm_userid = None
pfnm_passwd = None
cli_name = None
db_type = None
log_base_path= None
cur_ctx_test_path= None
is_xml_config_changed = False
start_all_prc_per_tspec = 'Y'
ini_config_path_full = None
simul_name = None
cur_indent = None
simul_gsn_binding_ip = None
simul_gsn_binding_port = None
simul_gtp_ip = None
simul_gtp_port = None
simul_dm_base_path = None # XXX diameter simul only XXX
#simul_tps = 0
pid_save = []
info_repo = {}
cleanup_cli_cmds = [] # 1개의 tspec 이 종료되고, xml db, xml config가 원복된후 수행될 CLI 명령을 관리
change_xml_dbs = {} # table_name:is_changed
dm_sim_cfg_backup_files = [] # dm simul only. client.list, tas01.ini ...
PKG_CFG_NAME ='per_pkg.ini'
TSPEC_FILE_EXT ='.tspec'
TSPEC_DIR_NAME = 'tspecs'
GROUP_DIR_PREFIX ='group_'
xml_data_string =''
ems_ip = None
ems_id = None
ems_passwd = <PASSWORD>
ems_xml_cfg_path = None
ems_is_xml_config_changed = False
ems_cli_cmd = None
ems_package_name = None
ems_system_name = None
ems_service_name = None
ems_policy_path = None
start_timestamp = None
test_result_dir = None
#==================================================================
def __init__(self,logger,logger_summary,timestamp,args):
self.__ini_config = configparser.ConfigParser()
self.pkg_dir = args.pkg_dir
self.logger = logger;
self.logger_summary = logger_summary;
self.start_timestamp = timestamp
self.test_result_dir = self.pkg_dir + "pkg_test_result"
self.test_result_dir = os.path.join(self.test_result_dir, "{}_RESULT_DATA".format(timestamp) )
self.__args = args
self.pkg_dir = os.path.abspath(self.pkg_dir) + os.sep
global _g_runner_self
_g_runner_self = self
self.reset_variables()
#==================================================================
def reset_variables(self):
self.cur_ctx_test_path = None
self.__group_dirs = []
self.__group_dirs_full = []
self.__test_dirs_per_group = []
self.__test_dirs_per_group_full = []
self.__total_test_cnt = 0
self.__total_tspec_cnt = 0
self.__failed_tests = []
self.__succeeded_tests = []
self.__failed_test_cnt = 0
self.__succeeded_test_cnt = 0
self.__failed_tspec_cnt = 0
self.__succeeded_tspec_cnt = 0
self.pid_save = []
self.info_repo = {}
self.cleanup_cli_cmds = []
self.change_xml_dbs = {}
self.dm_sim_cfg_backup_files =[]
self.cur_indent = ' '
#==================================================================
def run_teardown_if_any(self):
self.logger.info("\n\n")
setup_path = os.path.join(self.pkg_dir,'pkg_teardown.tspec')
if os.path.isfile(setup_path):
self.logger.info(">> run pkg common tspec (teardown.tspec)")
execfile(setup_path)
#==================================================================
def display_run_result(self):
if self.__total_tspec_cnt == 0 :
self.logger.error("invalid run command. check cmd arguments")
return;
self.logger.info(" ")
self.logger.info(" ")
self.logger.info("---------------------------------------------------------")
msg ="TOTAL {} TSPEC".format(self.__total_tspec_cnt)
self.logger.info (msg)
self.logger_summary.info (msg)
msg =" {} OK".format(self.__succeeded_tspec_cnt)
self.logger.info (msg)
self.logger_summary.info (msg)
for succeeded in self.__succeeded_tests :
msg = " succeeded : {}".format(succeeded)
self.logger.info (msg)
self.logger_summary.info(msg)
if(self.__failed_test_cnt >0 ):
msg =" {} FAILED".format(self.__failed_tspec_cnt)
self.logger.error (msg)
self.logger_summary.error(msg)
for failed in self.__failed_tests :
msg = " failed : {}".format(failed)
self.logger.error (msg)
self.logger_summary.error(msg)
#==================================================================
# run test
#==================================================================
def run_test(self):
self.logger.info("pkg dir [{}]".format(self.pkg_dir))
self.read_pkg_ini()
self.reset_variables()
#create temporary work directory
if(os.path.isdir(self.temp_internal_use_only_dir)==False):
self.logger.debug("create internal use only dir : {}".
format(self.temp_internal_use_only_dir))
os.mkdir(self.temp_internal_use_only_dir)
#create result directory
self.__result_base_dir = self.pkg_dir + 'pkg_test_result'
if(os.path.isdir(self.__result_base_dir)==False):
self.logger.debug("create result dir : {}".
format(self.__result_base_dir))
os.mkdir(self.__result_base_dir)
self.set_log_level()
self.get_groups ()
if(self.__group_dirs):
#-------------------------------------------
# XXX : run setup.tspec if any.
setup_path = os.path.join(self.pkg_dir,'pkg_setup.tspec')
if os.path.isfile(setup_path):
self.logger.info("\n\n")
self.logger.info(">> run pkg common tspec (setup.tspec)")
execfile(setup_path)
#-------------------------------------------
self.logger.info("run groups : {}".format(self.__group_dirs))
self.run_groups()
else:
err_msg ="invalid pkg dir or usage {}".format(self.pkg_dir)
self.logger.error(err_msg)
self.logger_summary.error(err_msg)
return False
return True
#==================================================================
def strip_all_members(self):
self.xml_cfg_path = self.xml_cfg_path.strip()
self.xml_db_path = self.xml_db_path .strip()
self.package_id = self.package_id.strip()
self.package_name = self.package_name.strip()
self.system_name = self.system_name .strip()
self.service_name = self.service_name.strip()
self.cli_name = self.cli_name .strip()
self.log_base_path= self.log_base_path.strip()
self.pfnm_userid = self.pfnm_userid.strip()
self.pfnm_passwd = self.pfnm_passwd.strip()
self.db_type = self.db_type.strip()
self.start_all_prc_per_tspec = self.start_all_prc_per_tspec.strip()
self.input_path = self.input_path.strip()
self.work_path = self.work_path.strip()
self.output_path = self.output_path.strip()
self.stat_path = self.stat_path.strip()
self.ems_xml_cfg_path = self.ems_xml_cfg_path.strip()
self.ems_policy_path = self.ems_policy_path.strip()
if(self.simul_dm_base_path):
self.simul_dm_base_path = self.simul_dm_base_path.strip()
#==================================================================
def remove_tailing_os_sep (self):
# XXX 모든 설정의 경로 값은 끝에 '/' 가 불필요하다. 만약 잘못 설정한 경우
# 마지막 '/' 를 제거한다.
"""
'/LOG/' --> '/LOG'
"""
if self.xml_cfg_path.endswith(os.sep):
self.xml_cfg_path = self.xml_cfg_path[:-1]
if self.xml_db_path.endswith(os.sep):
self.xml_db_path = self.xml_db_path[:-1]
if self.log_base_path.endswith(os.sep):
self.log_base_path = self.log_base_path[:-1]
if(self.simul_dm_base_path):
if self.simul_dm_base_path.endswith(os.sep):
self.simul_dm_base_path = self.simul_dm_base_path[:-1]
if self.input_path.endswith(os.sep):
self.input_path = self.input_path[:-1]
if self.work_path.endswith(os.sep):
self.work_path = self.work_path[:-1]
if self.output_path.endswith(os.sep):
self.output_path = self.output_path[:-1]
if self.stat_path.endswith(os.sep):
self.stat_path = self.stat_path[:-1]
if self.ems_xml_cfg_path.endswith(os.sep):
self.ems_xml_cfg_path = self.ems_xml_cfg_path[:-1]
if self.ems_policy_path.endswith(os.sep):
self.ems_policy_path = self.ems_policy_path[:-1]
#==================================================================
def read_pkg_ini(self):
# read per_pkg.ini, pkg_dir ends with os.sep
self.ini_config_path_full = self.pkg_dir+self.PKG_CFG_NAME
if (os.path.isfile(self.ini_config_path_full) == False):
err_msg ="invalid cfg path {}".format(self.ini_config_path_full)
self.logger.error(err_msg)
return False
self.__ini_config.read(self.ini_config_path_full)
self.package_id = self.__ini_config['COMMON']['PACKAGE_ID']
self.package_name = self.__ini_config['COMMON']['PACKAGE_NAME']
self.system_name = self.__ini_config['COMMON']['SYSTEM_NAME']
self.service_name = self.__ini_config['COMMON']['SERVICE_NAME']
self.xml_cfg_path = self.__ini_config['COMMON']['CONFIG_PATH']
self.xml_db_path = self.__ini_config['COMMON']['XML_DB_PATH']
self.input_path = self.__ini_config['COMMON']['INPUT_PATH']
self.work_path = self.__ini_config['COMMON']['WORK_PATH']
self.output_path = self.__ini_config['COMMON']['OUTPUT_PATH']
self.stat_path = self.__ini_config['COMMON']['STAT_PATH']
self.cli_name = self.__ini_config['COMMON']['CLI_NAME']
self.start_all_prc_per_tspec = self.__ini_config['COMMON']['START_ALL_PRC_PER_TSPEC']
self.__log_level = self.__ini_config['LOG']['LOG_LEVEL']
self.log_base_path= self.__ini_config['LOG']['LOG_BASE_PATH']
self.pfnm_userid = self.__ini_config['PFNM']['USER']
self.pfnm_passwd = self.__ini_config['PFNM']['<PASSWORD>']
self.db_type = self.__ini_config['DB']['DB_TYPE']
if(self.__ini_config.has_option('DB', 'ORA_CONN')):
self.ora_conn_str = self.__ini_config['DB']['ORA_CONN']
if(self.__ini_config.has_option('DB', 'MYSQL_HOST')):
self.mysql_host = self.__ini_config['DB']['MYSQL_HOST']
self.mysql_user = self.__ini_config['DB']['MYSQL_USER']
self.mysql_passwd = self.__ini_config['DB']['MYSQL_PASSWD']
self.mysql_db_name= self.__ini_config['DB']['MYSQL_DB_NAME']
if(self.db_type =='ORACLE' and self.ora_conn_str ==None):
err_msg ="oracle connection string not set"
self.logger.error(err_msg)
return False
if self.db_type =='MYSQL' :
if self.mysql_host ==None or self.mysql_user ==None or \
self.mysql_passwd ==None or self.mysql_db_name ==None :
err_msg ="mysql db info not all set"
self.logger.error(err_msg)
return False
if(self.__ini_config.has_option('SIMUL', 'SIMUL_NAME')):
self.simul_name = self.__ini_config['SIMUL']['SIMUL_NAME']
if(self.__ini_config.has_option('GSN_CONFIG', 'GSN_BINDING_IP')):
# XXX gtp only !
# T_GTP_NODE_INFO 에 존재해야함. GTP 가 미리 정의된 GSN 으로 부터만 수신함.
# GTP 에서 메시지 수신시 전송처의 port ,ip 를 찾는 로직이 존재한다.
self.simul_gsn_binding_ip = self.__ini_config['GSN_CONFIG']['GSN_BINDING_IP']
self.simul_gsn_binding_port = self.__ini_config['GSN_CONFIG']['GSN_BINDING_PORT']
# GTP info : GTP 프로세스가 binding한 ip, port 정보
# T_GTP_DEF 에 존재해야함. GTP 기동시 server로 동작하는 정보임
self.simul_gtp_ip = self.__ini_config['GTP_CONFIG']['GTP_IP']
self.simul_gtp_port = self.__ini_config['GTP_CONFIG']['GTP_PORT']
#self.simul_tps = self.__ini_config['TPS_CONFIG']['SEND_PER_SECOND']
#simul 에서 직접 설정을 읽어서 TPS 처리 하므로 여기서 불필요
if(self.__ini_config.has_option('SIMUL', 'SUMUL_DM_BASE_PATH')):
# diameter simul only !!
# XXX : diameter simul use relative path !!!!
# /CG/OFCS_SIM/IMS_SIM/config --> client.list
# /CG/OFCS_SIM/IMS_SIM/raw --> tas01.ini
self.simul_dm_base_path = self.__ini_config['SIMUL']['SUMUL_DM_BASE_PATH']
if(self.__ini_config.has_option('EMS', 'IP')==False):
self.logger.error("- EMS CONFIG NOT EXISTS")
return False
self.ems_ip = self.__ini_config['EMS']['IP']
self.ems_id = self.__ini_config['EMS']['ID']
self.ems_passwd = self.__ini_config['EMS']['PASSWD']
self.ems_xml_cfg_path = self.__ini_config['EMS']['CONFIG_PATH']
self.ems_cli_name = self.__ini_config['EMS']['CLI_NAME']
self.ems_package_name = self.__ini_config['EMS']['PACKAGE_NAME']
self.ems_system_name = self.__ini_config['EMS']['SYSTEM_NAME']
self.ems_service_name = self.__ini_config['EMS']['SERVICE_NAME']
self.ems_policy_path = self.__ini_config['EMS']['POLICY_PATH']
self.strip_all_members()
self.remove_tailing_os_sep()
self.logger.info("- xml_cfg_path [{}]".format(self.xml_cfg_path))
self.logger.info("- xml_db_path [{}]".format(self.xml_db_path))
self.logger.info("- input_path [{}]".format(self.input_path))
self.logger.info("- work_path [{}]".format(self.work_path))
self.logger.info("- stat_path [{}]".format(self.stat_path))
self.logger.info("- output_path [{}]".format(self.output_path))
self.logger.info("- package_id [{}]".format(self.package_id))
self.logger.info("- package_name [{}]".format(self.package_name))
self.logger.info("- system_name [{}]".format(self.system_name ))
self.logger.info("- service_name [{}]".format(self.service_name ))
self.logger.info("- cli_name [{}]".format(self.cli_name ))
self.logger.info("- log_level [{}]".format(self.__log_level ))
self.logger.info("- log_base_path[{}]".format(self.log_base_path ))
self.logger.info("- db_type [{}]".format(self.db_type ))
if self.db_type =='ORACLE' :
self.logger.info("- conn str [{}]".format(self.ora_conn_str))
if self.db_type =='MYSQL' :
self.logger.info("- host [{}]".format(self.mysql_host ))
self.logger.info("- user [{}]".format(self.mysql_user ))
#self.logger.info("- passwd [{}]".format(self.mysql_passwd ))
self.logger.info("- db name [{}]".format(self.mysql_db_name))
self.logger.info("- simul_name [{}]".format(self.simul_name ))
self.logger.info("- simul_gsn_binding_ip [{}]".format(self.simul_gsn_binding_ip ))
self.logger.info("- simul_gsn_binding_port[{}]".format(self.simul_gsn_binding_port ))
self.logger.info("- simul_gtp_ip [{}]".format(self.simul_gtp_ip ))
self.logger.info("- simul_gtp_ip [{}]".format(self.simul_gtp_port ))
#self.logger.info("- simul_tps [{}]".format(self.simul_tps ))
self.logger.info("- start_all_prc_per_tspec [{}]".format(self.start_all_prc_per_tspec ))
self.temp_internal_use_only_dir = self.pkg_dir + 'do_not_delete_internal_use'
self.logger.debug("- internal_use_only_dir [{}]".
format(self.temp_internal_use_only_dir))
if(self.simul_dm_base_path):
self.logger.info("- simul_dm_base_path [{}]".format(self.simul_dm_base_path ))
self.temp_internal_use_only_dir_remote = self.temp_internal_use_only_dir + '/remote'
self.logger.info("- test result dir [{}]".format(self.test_result_dir ))
#==================================================================
def set_log_level(self):
#print ("log level is {}".format(self.__log_level))
if(self.__log_level == 'DEBUG'):
self.logger.setLevel(logging.DEBUG)
elif(self.__log_level == 'INFO'):
self.logger.setLevel(logging.INFO)
elif(self.__log_level == 'WARNING'):
self.logger.setLevel(logging.WARNING)
elif(self.__log_level == 'ERROR'):
self.logger.setLevel(logging.ERROR)
elif(self.__log_level == 'CRITICAL'):
self.logger.setLevel(logging.CRITICAL)
else:
self.logger.setLevel(logging.INFO)
#==================================================================
#remove temp directory : self.temp_internal_use_only_dir
def clean_all(self):
self.logger.debug("clean all temporary files")
if(os.path.isdir(self.temp_internal_use_only_dir)):
self.logger.debug("remove internal use only dir : {}".
format(self.temp_internal_use_only_dir))
shutil.rmtree(self.temp_internal_use_only_dir) #XXX dangerous
return True
#==================================================================
def get_groups(self):
grp_dirs = os.listdir(self.pkg_dir)
grp_dirs.sort()
self.__group_dirs = []
self.__group_dirs_full = []
for grp_dir_name in grp_dirs :
if(grp_dir_name.startswith(self.GROUP_DIR_PREFIX)) :
# this is group directory
if(self.__args.group is not None):
#TODO dup check
for grp_arg in self.__args.group:
if(grp_dir_name == grp_arg):
self.logger.info("[run specific group] {}".format(grp_dir_name))
self.__group_dirs_full.append(self.pkg_dir+grp_dir_name)
self.__group_dirs.append(grp_dir_name)
else:
self.__group_dirs_full.append(self.pkg_dir+grp_dir_name)
# pkg_dir ends with os.sep
self.__group_dirs.append(grp_dir_name)
#==================================================================
def run_groups(self):
index =0
for grp_dir_name in self.__group_dirs | |
QtWidgets.QPushButton(self.groupBox_28)
self.ss7i64in_9.setObjectName("ss7i64in_9")
self.gridLayout_70.addWidget(self.ss7i64in_9, 9, 1, 1, 1)
self.ss7i64in_10 = QtWidgets.QPushButton(self.groupBox_28)
self.ss7i64in_10.setObjectName("ss7i64in_10")
self.gridLayout_70.addWidget(self.ss7i64in_10, 10, 1, 1, 1)
self.ss7i64in_11 = QtWidgets.QPushButton(self.groupBox_28)
self.ss7i64in_11.setObjectName("ss7i64in_11")
self.gridLayout_70.addWidget(self.ss7i64in_11, 11, 1, 1, 1)
self.gridLayout_68.addWidget(self.groupBox_28, 0, 0, 1, 1)
self.groupBox_30 = QtWidgets.QGroupBox(self.groupBox_32)
self.groupBox_30.setObjectName("groupBox_30")
self.gridLayout_72 = QtWidgets.QGridLayout(self.groupBox_30)
self.gridLayout_72.setContentsMargins(8, 8, 8, 8)
self.gridLayout_72.setSpacing(5)
self.gridLayout_72.setObjectName("gridLayout_72")
self.label_485 = QtWidgets.QLabel(self.groupBox_30)
self.label_485.setObjectName("label_485")
self.gridLayout_72.addWidget(self.label_485, 0, 0, 1, 1)
self.label_513 = QtWidgets.QLabel(self.groupBox_30)
self.label_513.setObjectName("label_513")
self.gridLayout_72.addWidget(self.label_513, 1, 0, 1, 1)
self.label_514 = QtWidgets.QLabel(self.groupBox_30)
self.label_514.setObjectName("label_514")
self.gridLayout_72.addWidget(self.label_514, 2, 0, 1, 1)
self.label_519 = QtWidgets.QLabel(self.groupBox_30)
self.label_519.setObjectName("label_519")
self.gridLayout_72.addWidget(self.label_519, 3, 0, 1, 1)
self.label_517 = QtWidgets.QLabel(self.groupBox_30)
self.label_517.setObjectName("label_517")
self.gridLayout_72.addWidget(self.label_517, 4, 0, 1, 1)
self.label_515 = QtWidgets.QLabel(self.groupBox_30)
self.label_515.setObjectName("label_515")
self.gridLayout_72.addWidget(self.label_515, 5, 0, 1, 1)
self.label_516 = QtWidgets.QLabel(self.groupBox_30)
self.label_516.setObjectName("label_516")
self.gridLayout_72.addWidget(self.label_516, 6, 0, 1, 1)
self.label_511 = QtWidgets.QLabel(self.groupBox_30)
self.label_511.setObjectName("label_511")
self.gridLayout_72.addWidget(self.label_511, 7, 0, 1, 1)
self.label_520 = QtWidgets.QLabel(self.groupBox_30)
self.label_520.setObjectName("label_520")
self.gridLayout_72.addWidget(self.label_520, 8, 0, 1, 1)
self.label_512 = QtWidgets.QLabel(self.groupBox_30)
self.label_512.setObjectName("label_512")
self.gridLayout_72.addWidget(self.label_512, 9, 0, 1, 1)
self.label_518 = QtWidgets.QLabel(self.groupBox_30)
self.label_518.setObjectName("label_518")
self.gridLayout_72.addWidget(self.label_518, 10, 0, 1, 1)
self.label_510 = QtWidgets.QLabel(self.groupBox_30)
self.label_510.setObjectName("label_510")
self.gridLayout_72.addWidget(self.label_510, 11, 0, 1, 1)
self.ss7i64out_0 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_0.setObjectName("ss7i64out_0")
self.gridLayout_72.addWidget(self.ss7i64out_0, 0, 1, 1, 1)
self.ss7i64out_1 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_1.setObjectName("ss7i64out_1")
self.gridLayout_72.addWidget(self.ss7i64out_1, 1, 1, 1, 1)
self.ss7i64out_2 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_2.setObjectName("ss7i64out_2")
self.gridLayout_72.addWidget(self.ss7i64out_2, 2, 1, 1, 1)
self.ss7i64out_3 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_3.setObjectName("ss7i64out_3")
self.gridLayout_72.addWidget(self.ss7i64out_3, 3, 1, 1, 1)
self.ss7i64out_4 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_4.setObjectName("ss7i64out_4")
self.gridLayout_72.addWidget(self.ss7i64out_4, 4, 1, 1, 1)
self.ss7i64out_5 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_5.setObjectName("ss7i64out_5")
self.gridLayout_72.addWidget(self.ss7i64out_5, 5, 1, 1, 1)
self.ss7i64out_6 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_6.setObjectName("ss7i64out_6")
self.gridLayout_72.addWidget(self.ss7i64out_6, 6, 1, 1, 1)
self.ss7i64out_7 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_7.setObjectName("ss7i64out_7")
self.gridLayout_72.addWidget(self.ss7i64out_7, 7, 1, 1, 1)
self.ss7i64out_8 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_8.setObjectName("ss7i64out_8")
self.gridLayout_72.addWidget(self.ss7i64out_8, 8, 1, 1, 1)
self.ss7i64out_9 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_9.setObjectName("ss7i64out_9")
self.gridLayout_72.addWidget(self.ss7i64out_9, 9, 1, 1, 1)
self.ss7i64out_10 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_10.setObjectName("ss7i64out_10")
self.gridLayout_72.addWidget(self.ss7i64out_10, 10, 1, 1, 1)
self.ss7i64out_11 = QtWidgets.QPushButton(self.groupBox_30)
self.ss7i64out_11.setObjectName("ss7i64out_11")
self.gridLayout_72.addWidget(self.ss7i64out_11, 11, 1, 1, 1)
self.gridLayout_68.addWidget(self.groupBox_30, 0, 2, 1, 1)
spacerItem3 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_68.addItem(spacerItem3, 1, 0, 1, 1)
self.gridLayout_69.addWidget(self.groupBox_32, 0, 0, 1, 1)
self.smartSerialSW.addWidget(self.ss7i64)
self.ss7i69 = QtWidgets.QWidget()
self.ss7i69.setObjectName("ss7i69")
self.gridLayout_74 = QtWidgets.QGridLayout(self.ss7i69)
self.gridLayout_74.setContentsMargins(8, 8, 8, 8)
self.gridLayout_74.setSpacing(5)
self.gridLayout_74.setObjectName("gridLayout_74")
self.groupBox_33 = QtWidgets.QGroupBox(self.ss7i69)
self.groupBox_33.setObjectName("groupBox_33")
self.gridLayout_84 = QtWidgets.QGridLayout(self.groupBox_33)
self.gridLayout_84.setContentsMargins(8, 8, 8, 8)
self.gridLayout_84.setSpacing(5)
self.gridLayout_84.setObjectName("gridLayout_84")
self.groupBox_40 = QtWidgets.QGroupBox(self.groupBox_33)
self.groupBox_40.setObjectName("groupBox_40")
self.gridLayout_81 = QtWidgets.QGridLayout(self.groupBox_40)
self.gridLayout_81.setContentsMargins(8, 8, 8, 8)
self.gridLayout_81.setSpacing(5)
self.gridLayout_81.setObjectName("gridLayout_81")
self.label_546 = QtWidgets.QLabel(self.groupBox_40)
self.label_546.setObjectName("label_546")
self.gridLayout_81.addWidget(self.label_546, 2, 0, 1, 1)
self.label_550 = QtWidgets.QLabel(self.groupBox_40)
self.label_550.setObjectName("label_550")
self.gridLayout_81.addWidget(self.label_550, 12, 0, 1, 1)
self.label_548 = QtWidgets.QLabel(self.groupBox_40)
self.label_548.setObjectName("label_548")
self.gridLayout_81.addWidget(self.label_548, 1, 0, 1, 1)
self.label_547 = QtWidgets.QLabel(self.groupBox_40)
self.label_547.setObjectName("label_547")
self.gridLayout_81.addWidget(self.label_547, 6, 0, 1, 1)
self.label_556 = QtWidgets.QLabel(self.groupBox_40)
self.label_556.setObjectName("label_556")
self.gridLayout_81.addWidget(self.label_556, 3, 0, 1, 1)
self.label_413 = QtWidgets.QLabel(self.groupBox_40)
self.label_413.setObjectName("label_413")
self.gridLayout_81.addWidget(self.label_413, 0, 0, 1, 1)
self.label_551 = QtWidgets.QLabel(self.groupBox_40)
self.label_551.setObjectName("label_551")
self.gridLayout_81.addWidget(self.label_551, 7, 0, 1, 1)
self.label_554 = QtWidgets.QLabel(self.groupBox_40)
self.label_554.setObjectName("label_554")
self.gridLayout_81.addWidget(self.label_554, 9, 0, 1, 1)
self.label_555 = QtWidgets.QLabel(self.groupBox_40)
self.label_555.setObjectName("label_555")
self.gridLayout_81.addWidget(self.label_555, 5, 0, 1, 1)
self.label_549 = QtWidgets.QLabel(self.groupBox_40)
self.label_549.setObjectName("label_549")
self.gridLayout_81.addWidget(self.label_549, 4, 0, 1, 1)
self.label_552 = QtWidgets.QLabel(self.groupBox_40)
self.label_552.setObjectName("label_552")
self.gridLayout_81.addWidget(self.label_552, 10, 0, 1, 1)
self.label_553 = QtWidgets.QLabel(self.groupBox_40)
self.label_553.setObjectName("label_553")
self.gridLayout_81.addWidget(self.label_553, 11, 0, 1, 1)
self.ss7i69in_0 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_0.setObjectName("ss7i69in_0")
self.gridLayout_81.addWidget(self.ss7i69in_0, 0, 1, 1, 1)
self.ss7i69in_1 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_1.setObjectName("ss7i69in_1")
self.gridLayout_81.addWidget(self.ss7i69in_1, 1, 1, 1, 1)
self.ss7i69in_2 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_2.setObjectName("ss7i69in_2")
self.gridLayout_81.addWidget(self.ss7i69in_2, 2, 1, 1, 1)
self.ss7i69in_3 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_3.setObjectName("ss7i69in_3")
self.gridLayout_81.addWidget(self.ss7i69in_3, 3, 1, 1, 1)
self.ss7i69in_4 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_4.setObjectName("ss7i69in_4")
self.gridLayout_81.addWidget(self.ss7i69in_4, 4, 1, 1, 1)
self.ss7i69in_5 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_5.setObjectName("ss7i69in_5")
self.gridLayout_81.addWidget(self.ss7i69in_5, 5, 1, 1, 1)
self.ss7i69in_6 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_6.setObjectName("ss7i69in_6")
self.gridLayout_81.addWidget(self.ss7i69in_6, 6, 1, 1, 1)
self.ss7i69in_7 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_7.setObjectName("ss7i69in_7")
self.gridLayout_81.addWidget(self.ss7i69in_7, 7, 1, 1, 1)
self.ss7i69in_8 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_8.setObjectName("ss7i69in_8")
self.gridLayout_81.addWidget(self.ss7i69in_8, 9, 1, 1, 1)
self.ss7i69in_9 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_9.setObjectName("ss7i69in_9")
self.gridLayout_81.addWidget(self.ss7i69in_9, 10, 1, 1, 1)
self.ss7i69in_10 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_10.setObjectName("ss7i69in_10")
self.gridLayout_81.addWidget(self.ss7i69in_10, 11, 1, 1, 1)
self.ss7i69in_11 = QtWidgets.QPushButton(self.groupBox_40)
self.ss7i69in_11.setObjectName("ss7i69in_11")
self.gridLayout_81.addWidget(self.ss7i69in_11, 12, 1, 1, 1)
self.gridLayout_84.addWidget(self.groupBox_40, 0, 0, 1, 1)
self.groupBox_42 = QtWidgets.QGroupBox(self.groupBox_33)
self.groupBox_42.setObjectName("groupBox_42")
self.gridLayout_83 = QtWidgets.QGridLayout(self.groupBox_42)
self.gridLayout_83.setContentsMargins(8, 8, 8, 8)
self.gridLayout_83.setSpacing(5)
self.gridLayout_83.setObjectName("gridLayout_83")
self.label_571 = QtWidgets.QLabel(self.groupBox_42)
self.label_571.setObjectName("label_571")
self.gridLayout_83.addWidget(self.label_571, 9, 0, 1, 1)
self.label_572 = QtWidgets.QLabel(self.groupBox_42)
self.label_572.setObjectName("label_572")
self.gridLayout_83.addWidget(self.label_572, 3, 0, 1, 1)
self.label_570 = QtWidgets.QLabel(self.groupBox_42)
self.label_570.setObjectName("label_570")
self.gridLayout_83.addWidget(self.label_570, 11, 0, 1, 1)
self.label_569 = QtWidgets.QLabel(self.groupBox_42)
self.label_569.setObjectName("label_569")
self.gridLayout_83.addWidget(self.label_569, 2, 0, 1, 1)
self.label_576 = QtWidgets.QLabel(self.groupBox_42)
self.label_576.setObjectName("label_576")
self.gridLayout_83.addWidget(self.label_576, 5, 0, 1, 1)
self.label_575 = QtWidgets.QLabel(self.groupBox_42)
self.label_575.setObjectName("label_575")
self.gridLayout_83.addWidget(self.label_575, 10, 0, 1, 1)
self.label_574 = QtWidgets.QLabel(self.groupBox_42)
self.label_574.setObjectName("label_574")
self.gridLayout_83.addWidget(self.label_574, 8, 0, 1, 1)
self.label_577 = QtWidgets.QLabel(self.groupBox_42)
self.label_577.setObjectName("label_577")
self.gridLayout_83.addWidget(self.label_577, 4, 0, 1, 1)
self.label_578 = QtWidgets.QLabel(self.groupBox_42)
self.label_578.setObjectName("label_578")
self.gridLayout_83.addWidget(self.label_578, 7, 0, 1, 1)
self.label_579 = QtWidgets.QLabel(self.groupBox_42)
self.label_579.setObjectName("label_579")
self.gridLayout_83.addWidget(self.label_579, 0, 0, 1, 1)
self.label_581 = QtWidgets.QLabel(self.groupBox_42)
self.label_581.setObjectName("label_581")
self.gridLayout_83.addWidget(self.label_581, 6, 0, 1, 1)
self.label_580 = QtWidgets.QLabel(self.groupBox_42)
self.label_580.setObjectName("label_580")
self.gridLayout_83.addWidget(self.label_580, 1, 0, 1, 1)
self.ss7i69in_12 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_12.setObjectName("ss7i69in_12")
self.gridLayout_83.addWidget(self.ss7i69in_12, 0, 1, 1, 1)
self.ss7i69in_13 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_13.setObjectName("ss7i69in_13")
self.gridLayout_83.addWidget(self.ss7i69in_13, 1, 1, 1, 1)
self.ss7i69in_14 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_14.setObjectName("ss7i69in_14")
self.gridLayout_83.addWidget(self.ss7i69in_14, 2, 1, 1, 1)
self.ss7i69in_15 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_15.setObjectName("ss7i69in_15")
self.gridLayout_83.addWidget(self.ss7i69in_15, 3, 1, 1, 1)
self.ss7i69in_16 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_16.setObjectName("ss7i69in_16")
self.gridLayout_83.addWidget(self.ss7i69in_16, 4, 1, 1, 1)
self.ss7i69in_17 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_17.setObjectName("ss7i69in_17")
self.gridLayout_83.addWidget(self.ss7i69in_17, 5, 1, 1, 1)
self.ss7i69in_18 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_18.setObjectName("ss7i69in_18")
self.gridLayout_83.addWidget(self.ss7i69in_18, 6, 1, 1, 1)
self.ss7i69in_19 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_19.setObjectName("ss7i69in_19")
self.gridLayout_83.addWidget(self.ss7i69in_19, 7, 1, 1, 1)
self.ss7i69in_20 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_20.setObjectName("ss7i69in_20")
self.gridLayout_83.addWidget(self.ss7i69in_20, 8, 1, 1, 1)
self.ss7i69in_21 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_21.setObjectName("ss7i69in_21")
self.gridLayout_83.addWidget(self.ss7i69in_21, 9, 1, 1, 1)
self.ss7i69in_22 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_22.setObjectName("ss7i69in_22")
self.gridLayout_83.addWidget(self.ss7i69in_22, 10, 1, 1, 1)
self.ss7i69in_23 = QtWidgets.QPushButton(self.groupBox_42)
self.ss7i69in_23.setObjectName("ss7i69in_23")
self.gridLayout_83.addWidget(self.ss7i69in_23, 11, 1, 1, 1)
self.gridLayout_84.addWidget(self.groupBox_42, 0, 1, 1, 1)
self.groupBox_39 = QtWidgets.QGroupBox(self.groupBox_33)
self.groupBox_39.setObjectName("groupBox_39")
self.gridLayout_80 = QtWidgets.QGridLayout(self.groupBox_39)
self.gridLayout_80.setContentsMargins(8, 8, 8, 8)
self.gridLayout_80.setSpacing(5)
self.gridLayout_80.setObjectName("gridLayout_80")
self.label_535 = QtWidgets.QLabel(self.groupBox_39)
self.label_535.setObjectName("label_535")
self.gridLayout_80.addWidget(self.label_535, 9, 0, 1, 1)
self.label_540 = QtWidgets.QLabel(self.groupBox_39)
self.label_540.setObjectName("label_540")
self.gridLayout_80.addWidget(self.label_540, 11, 0, 1, 1)
self.label_534 = QtWidgets.QLabel(self.groupBox_39)
self.label_534.setObjectName("label_534")
self.gridLayout_80.addWidget(self.label_534, 8, 0, 1, 1)
self.label_533 = QtWidgets.QLabel(self.groupBox_39)
self.label_533.setObjectName("label_533")
self.gridLayout_80.addWidget(self.label_533, 7, 0, 1, 1)
self.label_545 = QtWidgets.QLabel(self.groupBox_39)
self.label_545.setObjectName("label_545")
self.gridLayout_80.addWidget(self.label_545, 0, 0, 1, 1)
self.label_537 = QtWidgets.QLabel(self.groupBox_39)
self.label_537.setObjectName("label_537")
self.gridLayout_80.addWidget(self.label_537, 1, 0, 1, 1)
self.label_544 = QtWidgets.QLabel(self.groupBox_39)
self.label_544.setObjectName("label_544")
self.gridLayout_80.addWidget(self.label_544, 6, 0, 1, 1)
self.label_543 = QtWidgets.QLabel(self.groupBox_39)
self.label_543.setObjectName("label_543")
self.gridLayout_80.addWidget(self.label_543, 5, 0, 1, 1)
self.label_542 = QtWidgets.QLabel(self.groupBox_39)
self.label_542.setObjectName("label_542")
self.gridLayout_80.addWidget(self.label_542, 4, 0, 1, 1)
self.label_536 = QtWidgets.QLabel(self.groupBox_39)
self.label_536.setObjectName("label_536")
self.gridLayout_80.addWidget(self.label_536, 2, 0, 1, 1)
self.label_539 = QtWidgets.QLabel(self.groupBox_39)
self.label_539.setObjectName("label_539")
self.gridLayout_80.addWidget(self.label_539, 10, 0, 1, 1)
self.label_538 = QtWidgets.QLabel(self.groupBox_39)
self.label_538.setObjectName("label_538")
self.gridLayout_80.addWidget(self.label_538, 3, 0, 1, 1)
self.ss7i69out_12 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_12.setObjectName("ss7i69out_12")
self.gridLayout_80.addWidget(self.ss7i69out_12, 0, 1, 1, 1)
self.ss7i69out_13 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_13.setObjectName("ss7i69out_13")
self.gridLayout_80.addWidget(self.ss7i69out_13, 1, 1, 1, 1)
self.ss7i69out_14 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_14.setObjectName("ss7i69out_14")
self.gridLayout_80.addWidget(self.ss7i69out_14, 2, 1, 1, 1)
self.ss7i69out_15 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_15.setObjectName("ss7i69out_15")
self.gridLayout_80.addWidget(self.ss7i69out_15, 3, 1, 1, 1)
self.ss7i69out_16 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_16.setObjectName("ss7i69out_16")
self.gridLayout_80.addWidget(self.ss7i69out_16, 4, 1, 1, 1)
self.ss7i69out_17 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_17.setObjectName("ss7i69out_17")
self.gridLayout_80.addWidget(self.ss7i69out_17, 5, 1, 1, 1)
self.ss7i69out_18 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_18.setObjectName("ss7i69out_18")
self.gridLayout_80.addWidget(self.ss7i69out_18, 6, 1, 1, 1)
self.ss7i69out_19 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_19.setObjectName("ss7i69out_19")
self.gridLayout_80.addWidget(self.ss7i69out_19, 7, 1, 1, 1)
self.ss7i69out_20 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_20.setObjectName("ss7i69out_20")
self.gridLayout_80.addWidget(self.ss7i69out_20, 8, 1, 1, 1)
self.ss7i69out_21 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_21.setObjectName("ss7i69out_21")
self.gridLayout_80.addWidget(self.ss7i69out_21, 9, 1, 1, 1)
self.ss7i69out_22 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_22.setObjectName("ss7i69out_22")
self.gridLayout_80.addWidget(self.ss7i69out_22, 10, 1, 1, 1)
self.ss7i69out_23 = QtWidgets.QPushButton(self.groupBox_39)
self.ss7i69out_23.setObjectName("ss7i69out_23")
self.gridLayout_80.addWidget(self.ss7i69out_23, 11, 1, 1, 1)
self.gridLayout_84.addWidget(self.groupBox_39, 0, 3, 1, 1)
self.groupBox_41 = QtWidgets.QGroupBox(self.groupBox_33)
self.groupBox_41.setObjectName("groupBox_41")
self.gridLayout_82 = QtWidgets.QGridLayout(self.groupBox_41)
self.gridLayout_82.setContentsMargins(8, 8, 8, 8)
self.gridLayout_82.setSpacing(5)
self.gridLayout_82.setObjectName("gridLayout_82")
self.label_557 = QtWidgets.QLabel(self.groupBox_41)
self.label_557.setObjectName("label_557")
self.gridLayout_82.addWidget(self.label_557, 0, 0, 1, 1)
self.label_558 = QtWidgets.QLabel(self.groupBox_41)
self.label_558.setObjectName("label_558")
self.gridLayout_82.addWidget(self.label_558, 1, 0, 1, 1)
self.label_559 = QtWidgets.QLabel(self.groupBox_41)
self.label_559.setObjectName("label_559")
self.gridLayout_82.addWidget(self.label_559, 2, 0, 1, 1)
self.label_560 = QtWidgets.QLabel(self.groupBox_41)
self.label_560.setObjectName("label_560")
self.gridLayout_82.addWidget(self.label_560, 3, 0, 1, 1)
self.label_561 = QtWidgets.QLabel(self.groupBox_41)
self.label_561.setObjectName("label_561")
self.gridLayout_82.addWidget(self.label_561, 4, 0, 1, 1)
self.label_562 = QtWidgets.QLabel(self.groupBox_41)
self.label_562.setObjectName("label_562")
self.gridLayout_82.addWidget(self.label_562, 5, 0, 1, 1)
self.label_563 = QtWidgets.QLabel(self.groupBox_41)
self.label_563.setObjectName("label_563")
self.gridLayout_82.addWidget(self.label_563, 6, 0, 1, 1)
self.label_564 = QtWidgets.QLabel(self.groupBox_41)
self.label_564.setObjectName("label_564")
self.gridLayout_82.addWidget(self.label_564, 7, 0, 1, 1)
self.label_565 = QtWidgets.QLabel(self.groupBox_41)
self.label_565.setObjectName("label_565")
self.gridLayout_82.addWidget(self.label_565, 8, 0, 1, 1)
self.label_566 = QtWidgets.QLabel(self.groupBox_41)
self.label_566.setObjectName("label_566")
self.gridLayout_82.addWidget(self.label_566, 9, 0, 1, 1)
self.label_567 = QtWidgets.QLabel(self.groupBox_41)
self.label_567.setObjectName("label_567")
self.gridLayout_82.addWidget(self.label_567, 10, 0, 1, 1)
self.label_568 = QtWidgets.QLabel(self.groupBox_41)
self.label_568.setObjectName("label_568")
self.gridLayout_82.addWidget(self.label_568, 11, 0, 1, 1)
self.ss7i69out_0 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_0.setObjectName("ss7i69out_0")
self.gridLayout_82.addWidget(self.ss7i69out_0, 0, 1, 1, 1)
self.ss7i69out_1 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_1.setObjectName("ss7i69out_1")
self.gridLayout_82.addWidget(self.ss7i69out_1, 1, 1, 1, 1)
self.ss7i69out_2 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_2.setObjectName("ss7i69out_2")
self.gridLayout_82.addWidget(self.ss7i69out_2, 2, 1, 1, 1)
self.ss7i69out_3 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_3.setObjectName("ss7i69out_3")
self.gridLayout_82.addWidget(self.ss7i69out_3, 3, 1, 1, 1)
self.ss7i69out_4 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_4.setObjectName("ss7i69out_4")
self.gridLayout_82.addWidget(self.ss7i69out_4, 4, 1, 1, 1)
self.ss7i69out_5 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_5.setObjectName("ss7i69out_5")
self.gridLayout_82.addWidget(self.ss7i69out_5, 5, 1, 1, 1)
self.ss7i69out_6 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_6.setObjectName("ss7i69out_6")
self.gridLayout_82.addWidget(self.ss7i69out_6, 6, 1, 1, 1)
self.ss7i69out_7 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_7.setObjectName("ss7i69out_7")
self.gridLayout_82.addWidget(self.ss7i69out_7, 7, 1, 1, 1)
self.ss7i69out_8 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_8.setObjectName("ss7i69out_8")
self.gridLayout_82.addWidget(self.ss7i69out_8, 8, 1, 1, 1)
self.ss7i69out_9 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_9.setObjectName("ss7i69out_9")
self.gridLayout_82.addWidget(self.ss7i69out_9, 9, 1, 1, 1)
self.ss7i69out_10 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_10.setObjectName("ss7i69out_10")
self.gridLayout_82.addWidget(self.ss7i69out_10, 10, 1, 1, 1)
self.ss7i69out_11 = QtWidgets.QPushButton(self.groupBox_41)
self.ss7i69out_11.setObjectName("ss7i69out_11")
self.gridLayout_82.addWidget(self.ss7i69out_11, 11, 1, 1, 1)
self.gridLayout_84.addWidget(self.groupBox_41, 0, 2, 1, 1)
spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.gridLayout_84.addItem(spacerItem4, 1, 0, 1, 1)
self.gridLayout_74.addWidget(self.groupBox_33, 0, 0, 1, 1)
self.smartSerialSW.addWidget(self.ss7i69)
self.ss7i70 = QtWidgets.QWidget()
self.ss7i70.setObjectName("ss7i70")
self.gridLayout_75 = QtWidgets.QGridLayout(self.ss7i70)
self.gridLayout_75.setContentsMargins(8, 8, 8, 8)
self.gridLayout_75.setSpacing(5)
self.gridLayout_75.setObjectName("gridLayout_75")
self.groupBox_34 = QtWidgets.QGroupBox(self.ss7i70)
self.groupBox_34.setObjectName("groupBox_34")
self.gridLayout_89 = QtWidgets.QGridLayout(self.groupBox_34)
| |
<reponame>TuxML/tuxml
"""Management of the compiler
:author: <NAME>, <NAME>, <NAME>, <NAME>
:version: 2
"""
# @file compiler.py
import re
import subprocess
import shutil
import time
import os
import bz2
import threading
from compilation.logger import *
import compilation.settings as settings
## Compiler
# @author <NAME>
# @version 1
# @brief Compiler object is a wrapper to do a compilation.
# @details Compiler object handle everything related to a compilation.
# To use it, create it, call the method run once and you can retrieve all the
# compilation result with is_successful and and get_compilation_dictionary.
class Compiler:
"""Wrapper for the compilation. Handle everything related to compiling
the kernel. To use the compiler class, create a ``Compiler``
instance then run it with the ``run`` method. The method
``is_successful`` will tell if the compilation was made
successfully and more info about compilation can be retrieve with
the method ``get_compilation_dictionary``.
:param logger: Log (output) manager
:type logger: `Logger <logger.html>`_
:param package_manager: package manager to use
:type package_manager: `PackageManager <package_manager.html>`_
:param nb_core: number of cores to use for the compilation
:type nb_core: int
:param kernel_path: path to the Linux kernel directory
:type kernel_path: str
:param kernel_version: version of the Linux kernel
:type kernel_version: str
:param tiny: tiny configuration of the Linux kernel
:type tiny: bool
:param config_file: path to the configuration file (``.config``)
:type config_file: str
:param compiler_exec: compiler to use
:type compiler_exec: str
"""
def __init__(self, logger, package_manager, nb_core, kernel_path,
kernel_version, tiny=False, config_file=None,
compiler_exec='gcc'):
"""Constructor method
"""
assert(logger is not None)
assert(package_manager is not None)
if config_file is not None:
assert(not tiny)
self.__logger = logger
self.__nb_core = nb_core
self.__kernel_path = kernel_path
self.__package_manager = package_manager
self.__kernel_version = kernel_version
self.__tiny = tiny
self.__config_file = config_file
self.__compiler_exec = compiler_exec
# Variables results
self.__compilation_success = False
self.__compilation_time = 0
self.__kernel_size = -1
self.__kernel_compressed_size = ""
self.__result_dictionary = {}
# Presetting of __kernel_compressed_size
for compression in settings.KERNEL_COMPRESSION_TYPE:
for typ in ["-bzImage", "-vmlinux", ""]:
self.__kernel_compressed_size = "{}{}{} : -1 , ".format(
self.__kernel_compressed_size,
compression,
typ
)
self.__kernel_compressed_size = self.__kernel_compressed_size[:-3]
## run
# @author <NAME>
# @version 1
# @brief Call it once to do the whole compilation process.
# @details Thread like method.
def run(self):
"""Generates a configuration, launch compilation and retrieve data
about the process.
"""
self.__linux_config_generator(self.__tiny, self.__config_file)
self.__do_a_compilation()
if self.__compilation_success:
self.__kernel_size = self.__retrieve_kernel_size(
"{}/vmlinux".format(self.__kernel_path))
self.__get_compressed_kernel_size()
self.__set_result_dictionary()
## __do_a_compilation
# @author <NAME>, <NAME>
# @version 2
# @brief Run a compilation, with autofix and timer.
def __do_a_compilation(self):
"""Run a compilation, with autofix and timer.
.. note:: *Autofix* fixes automatically dependencies of tools
used for the compilation
"""
start_compilation_timer = time.time()
install_time_cpt = 0
self.__logger.reset_stdout_pipe()
self.__logger.reset_stderr_pipe()
self.__compilation_success = True
while self.__compilation_success and not self.__compile(start_compilation_timer):
start_installation_timer = time.time()
success, missing_files, missing_package = self.__log_analyser()
retry = success\
and self.__package_manager\
.fix_missing_dependencies(missing_files,
missing_package)
stop_installation_timer = time.time()
install_time_cpt += \
stop_installation_timer - start_installation_timer
if retry:
self.__logger.timed_print_output("Restarting compilation",
color=COLOR_SUCCESS)
self.__logger.reset_stdout_pipe()
self.__logger.reset_stderr_pipe()
else:
self.__compilation_success = False
end_compilation_timer = time.time()
self.__compilation_time = \
end_compilation_timer - start_compilation_timer - install_time_cpt
# Logging compilation result
if self.is_successful():
self.__logger.timed_print_output(
"Successfully compiled in {} (installation_time = {})".format(
time.strftime(
"%H:%M:%S",
time.gmtime(self.__compilation_time)),
time.strftime(
"%H:%M:%S",
time.gmtime(install_time_cpt)),
),
color=COLOR_SUCCESS
)
else:
self.__logger.timed_print_output(
"Unable to compile in {} (installation_time = {})".format(
time.strftime(
"%H:%M:%S",
time.gmtime(self.__compilation_time)),
time.strftime(
"%H:%M:%S",
time.gmtime(install_time_cpt)),
),
color=COLOR_ERROR
)
## __linux_config_generator
# @author <NAME>, <NAME>
# @version 2
# @brief Generate .config in the kernel folder, in order to compile with it.
def __linux_config_generator(self, tiny, specific_config):
"""Generates .config in the kernel directory. (Calls tinyconfig or
randconfig)
:param tiny: set to True if you want a tiny Linux
configuration. False otherwise.
:type tiny: bool
:param specific_config: path to a .config
:type specific_config: str
"""
if specific_config is not None:
self.__logger.timed_print_output("Using specific KCONFIG file.")
shutil.copyfile(
specific_config, "{}/.config".format(self.__kernel_path))
elif tiny:
self.__logger.timed_print_output(
"Tiny config with preset values:")
with open(settings.CONFIG_PRESET_FILE, 'r') as preset_list:
self.__logger.print_output(preset_list.read())
subprocess.run(
args="KCONFIG_ALLCONFIG={} make CC={} HOSTCC={} -C {} tinyconfig -j{}"
.format(
settings.CONFIG_PRESET_FILE,
self.__compiler_exec,
self.__compiler_exec,
self.__kernel_path,
self.__nb_core
),
shell=True,
stdout=subprocess.DEVNULL,
stderr=self.__logger.get_stderr_pipe(),
check=True
)
else:
self.__logger.print_output(
"Random config based on the following preset values:")
with open(settings.CONFIG_PRESET_FILE, 'r') as preset_list:
self.__logger.print_output(preset_list.read())
subprocess.run(
args="KCONFIG_ALLCONFIG={} make CC={} HOSTCC={} -C {} randconfig -j{}"
.format(
settings.CONFIG_PRESET_FILE,
self.__compiler_exec,
self.__compiler_exec,
self.__kernel_path,
self.__nb_core
),
shell=True,
stdout=subprocess.DEVNULL,
stderr=self.__logger.get_stderr_pipe(),
check=True
)
## __log_output
# @author <NAME>
# @version 1
# @brief Print logs in a file with the time
# Print the logs from pipe_to_read in the log file pipe_to_write prefixed
# with the time at which the compilation started (start_compilation_timer)
def __log_output(self,
pipe_to_read,
pipe_to_write,
start_compilation_timer):
for line in iter(pipe_to_read.readline, ""):
now = time.time() - start_compilation_timer
now_f = time.strftime("[%H:%M:%S] ", time.gmtime(now))
print(now_f + line, end="", file=pipe_to_write, flush=True)
## __compile
# @author <NAME>, <NAME>, <NAME>
# @version 3
# @brief Run a compilation and return is successful or not.
# @details The main difference here is that this method does not try to fix
# if the compilation fail. It just call the make and return if the make is
# successful or not.
# The compilation logs are logged in the stdout.log file prefixed with the
# time in the format:
# [00:00:00] make: Entering directory '/TuxML/linux-4.13.3'
# [00:00:00] scripts/kconfig/conf --silentoldconfig Kconfig
# [00:00:01] SYSTBL arch/x86/entry/syscalls/../../include/generated/asm/syscalls_32.h
# [00:00:01] SYSHDR arch/x86/entry/syscalls/../../include/generated/asm/unistd_32_ia32.h
def __compile(self, start_compilation_timer=time.time()):
"""Compile a Linux kernel and returns the status of the compilation
(success or not). this method does not try to fix if the compilation
fail. It just call the make and return if the make is successful or not.
The compilation logs are printed on the output and logged in the
stdout.log file prefixed with the time.
:param start_compilation_timer: Time at which the method was called
(float)
:return: status of the compilation: True if successful, False
otherwise
:rtype: bool
"""
self.__logger.timed_print_output("Compilation in progress")
popen = subprocess.Popen(
[
"make",
"CC={}".format(self.__compiler_exec),
"HOSTCC={}".format(self.__compiler_exec),
"-C",
self.__kernel_path,
"-j{}".format(self.__nb_core)
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True
)
# Logging of stdout is made in another thread while logging of stderr is
# made in this thread
tout = threading.Thread(target=self.__log_output,
args=(popen.stdout,
self.__logger.get_stdout_pipe(),
start_compilation_timer))
tout.deamon = True
tout.start()
for line in iter(popen.stderr.readline, ""):
now = time.time() - start_compilation_timer
now_f = time.strftime("[%H:%M:%S] ", time.gmtime(now))
print(
now_f + line,
end="",
file=self.__logger.get_stderr_pipe(),
flush=True
)
failure = popen.wait()
popen.stdout.close()
popen.stderr.close()
tout.join()
if not failure:
self.__logger.timed_print_output(
"Compilation successful.",
color=COLOR_SUCCESS)
return True
else:
self.__logger.timed_print_output(
"Compilation failed, exit status : {}.".format(failure),
color=COLOR_ERROR)
return False
## log_analyser
# @author <NAME>, <NAME>
# @version 2
# @brief Analyse settings.STDERR_FILE
# @return (status, missing_files, missing_packages)
def __log_analyser(self):
"""Analyses ``settings.STDERR_FILE`` (log file).
:return: Tuple like so: ``(status, missing_files,
missing_packages)``
:rtype: tuple
"""
self.__logger.timed_print_output(
"Analysing {}".format(settings.STDERR_FILE))
files, packages = list(), list()
with open(settings.STDERR_FILE, 'r') as err_logs:
for line in err_logs:
if re.search("fatal error", line):
# case "file.c:48:19: fatal error: <file.h>: No such file or directory"
files.append(line.split(":")[4].strip())
elif re.search("Command not found", line):
# case "make[4]: <command> : command not found"
packages.append(line.split(":")[1].strip())
elif re.search("not found", line):
if len(line.split(":")) == 4:
# case "/bin/sh: 1: <command>: not found"
packages.append(line.split(":")[2].strip())
else:
# ./scripts/gcc-plugin.sh: 11: ./scripts/gcc-plugin.sh: <package>: not found
packages.append(line.split(":")[3].strip())
success = len(files) > 0 or len(packages) > 0
if success:
self.__logger.timed_print_output(
"Missing file(s)/package(s) found.", color=COLOR_SUCCESS)
else:
self.__logger.timed_print_output(
"Unable to find the missing package(s).", color=COLOR_ERROR)
return success, files, packages
## is_successful
# @author <NAME>
# @version 1
def is_successful(self):
"""Predicate on the status of the previous compilation
:return: either the compilation was successful or not
:rtype: bool
"""
return self.__compilation_success
## get_compilation_dictionary
# @author <NAME>
# @version 1
# @brief Return a dictionary containing all the data about compilation.
# @details All the key represent each field (minus cid) of the bdd.
def get_compilation_dictionary(self):
"""Gives all the data about the previous compilation.
Each key of the dictionary represents each field of the
database (minus cid).
:return: info about the compilation
:rtype: dictionary
"""
return self.__result_dictionary
## __retrieve_kernel_size
# @author <NAME>
# @version 1
# @brief Retrieve the kernel size
# @details Check if the path exist, if yes, returns its size. If not, return
# -1. Note that this method name is such, because this method is only use to
# retrieve size of a kernel. But it could have been name __retrieve_size.
@staticmethod
def __retrieve_kernel_size(compiled_kernel_path):
"""Retrieve kernel | |
<filename>Packs/Rapid7_InsightIDR/Integrations/Rapid7_InsightIDR/Rapid7_InsightIDR.py
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
import dateparser
import json
import urllib3
from datetime import datetime, timedelta
from typing import Dict, Tuple
from requests import Response
# Disable insecure warnings
urllib3.disable_warnings()
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
INVESTIGATIONS_FIELDS = ['title', 'id', 'status', 'created_time', 'source', 'assignee', 'alerts']
THREATS_FIELDS = ['name', 'note', 'indicator_count', 'published']
LOGS_FIELDS = ['name', 'id']
EVENTS_FIELDS = ['log_id', 'message', 'timestamp']
class Client(BaseClient):
"""Client for Rapid7 InsightIDR REST API."""
def __init__(self, base_url: str, headers: dict, verify: bool, proxy: bool):
super().__init__(base_url=base_url, verify=verify, proxy=proxy, headers=headers)
def list_investigations(self, params: dict) -> dict:
return self._http_request(method='GET',
url_suffix='idr/v1/investigations',
params=params)
def bulk_close_investigations(self, body: dict) -> dict:
return self._http_request(method='POST',
url_suffix='idr/v1/investigations/bulk_close',
headers=self._headers,
json_data=body)
def assign_user(self, investigation_id: str, body: dict) -> dict:
return self._http_request(method='PUT',
url_suffix=f'idr/v1/investigations/{investigation_id}/assignee',
headers=self._headers,
json_data=body)
def set_status(self, investigation_id: str, status: str) -> dict:
return self._http_request(method='PUT',
url_suffix=f'idr/v1/investigations/{investigation_id}'
f'/status/{status}',
headers=self._headers)
def add_threat_indicators(self, key: str, body: dict) -> dict:
return self._http_request(method='POST',
url_suffix=f'idr/v1/customthreats/key/{key}/indicators/add',
headers=self._headers,
params={"format": "json"},
json_data=body)
def replace_threat_indicators(self, key: str, body: dict) -> dict:
return self._http_request(method='POST',
url_suffix=f'idr/v1/customthreats/key/{key}/indicators/replace',
headers=self._headers,
params={"format": "json"},
json_data=body)
def list_logs(self) -> dict:
return self._http_request(method='GET',
url_suffix='log_search/management/logs',
headers=self._headers)
def list_log_sets(self) -> dict:
return self._http_request(method='GET',
url_suffix='log_search/management/logsets',
headers=self._headers)
def download_logs(self, log_ids: str, params: dict) -> Response:
headers = self._headers.copy()
headers['Accept-Encoding'] = ''
return self._http_request(method='GET',
url_suffix=f'log_search/download/logs/{log_ids}',
headers=headers,
params=params,
resp_type='response')
def query_log(self, log_id: str, params: dict) -> Response:
return self._http_request(method='GET',
url_suffix=f'log_search/query/logs/{log_id}',
headers=self._headers,
params=params,
resp_type='response')
def query_log_set(self, log_set_id: str, params: dict) -> Response:
return self._http_request(method='GET',
url_suffix=f'log_search/query/logsets/{log_set_id}',
headers=self._headers,
params=params,
resp_type='response')
def query_log_callback(self, url: str) -> dict:
return self._http_request(method='GET',
url_suffix='',
full_url=url,
headers=self._headers)
def validate(self) -> Response:
"""
Validate API using list-investigations method.
Returns:
response(Response): API response from InsightIDR
"""
params = {'size': 1}
return self._http_request(method='GET',
url_suffix='idr/v1/investigations',
params=params,
resp_type='response')
def insight_idr_list_investigations_command(client: Client, statuses: str = None,
time_range: str = None,
start_time: str = None, end_time: str = None,
index: int = 0, page_size: int = 20) -> CommandResults:
"""
List investigations according to received parameters.
Args:
client(Client): Rapid7 client
statuses(str): An optional comma separated set of investigation statuses
time_range(str): An optional relative time range in a readable format.
start_time(str): An optional ISO formatted timestamp
end_time(str): An optional ISO formatted timestamp
index(int): The optional 0 based index of the page to retrieve
page_size(int): The optional size of the page to retrieve
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
# start_time and end_time can come in "last 1 day" format, so we parse it
if time_range:
start_time, end_time = parse_date_range(time_range, date_format=DATE_FORMAT)
params = {
'statuses': statuses,
'start_time': start_time,
'end_time': end_time,
'index': index,
'size': page_size
}
results = client.list_investigations(remove_empty_elements(params))
data_for_output = results.get('data', [])
readable_output = tableToMarkdown('Requested Investigations',
data_for_output,
headers=INVESTIGATIONS_FIELDS,
removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Investigation',
outputs_key_field='id',
raw_response=results,
outputs=data_for_output,
readable_output=readable_output
)
return command_results
def insight_idr_get_investigation_command(client: Client, investigation_id: str) -> CommandResults:
"""
List investigations according to received parameters.
Args:
client(Client): Rapid7 client
investigation_id(str): Investigation ID
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
results = client.list_investigations({})
data_for_output = results.get('data', [])
investigation_data = {}
for investigation in data_for_output:
if investigation.get('id') == investigation_id:
investigation_data = investigation
if not investigation_data:
return CommandResults(raw_response=None)
readable_output = tableToMarkdown(f'Investigation Information (id: {investigation_id})',
investigation_data,
headers=INVESTIGATIONS_FIELDS,
removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Investigation',
outputs_key_field='id',
raw_response=investigation_data,
outputs=investigation_data,
readable_output=readable_output
)
return command_results
def insight_idr_close_investigations_command(client: Client, start_time: str, end_time: str,
source: str, max_investigations_to_close: int = None,
alert_type: str = None) -> CommandResults:
"""
Close investigations by start_time, end_time and source.
Args:
client(Client): Rapid7 client
start_time(str): An ISO formatted timestamp.
end_time(str): An ISO formatted timestamp.
source(str): The name of an investigation source
max_investigations_to_close(int): An optional maximum number of alerts to close
alert_type(str): The category of alerts that should be closed
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
body = {
'from': start_time,
'to': end_time,
'source': source,
'max_investigations_to_close': max_investigations_to_close,
'alert_type': alert_type
}
results = client.bulk_close_investigations(remove_empty_elements(body))
ids = {
'id': results.get('ids')
}
data_for_outputs = []
for current_id in results.get('ids', []):
data_for_outputs.append({
'id': current_id,
'status': 'CLOSED'
})
readable_output = tableToMarkdown('Closed Investigations IDs', ids, headers=['id'],
removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Investigation',
outputs_key_field='id',
raw_response=results,
outputs=data_for_outputs,
readable_output=readable_output
)
return command_results
def insight_idr_assign_user_command(client: Client, investigation_id: str,
user_email_address: str):
"""
Assigning user, by email, to investigation or investigations.
Args:
client(Client): Rapid7 client
investigation_id(str): Investigation IDs, One or XSOAR list (str separated by commas)
user_email_address(str): The email address of the user to assign
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
results = []
data_for_readable_output = []
for investigation in argToList(investigation_id):
body = {
'user_email_address': user_email_address
}
result = client.assign_user(investigation, body)
results.append(result)
data_for_readable_output.append(result)
time.sleep(0.01)
readable_output = tableToMarkdown(f'Investigation Information (id: {investigation_id})',
data_for_readable_output,
headers=INVESTIGATIONS_FIELDS,
removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Investigation',
outputs_key_field='id',
raw_response=results,
outputs=data_for_readable_output,
readable_output=readable_output
)
return command_results
def insight_idr_set_status_command(client: Client, investigation_id: str, status: str):
"""
Change the status of investigation or investigations to OPEN/CLOSED.
Args:
client(Client): Rapid7 client
investigation_id(str): Investigation IDs, One or XSOAR list (str separated by commas)
status(str): The new status for the investigation (open/closed)
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
results = []
data_for_readable_output = []
for investigation in argToList(investigation_id):
result = client.set_status(investigation, status)
results.append(result)
data_for_readable_output.append(result)
time.sleep(0.01)
readable_output = tableToMarkdown(f'Investigation Information (id: {investigation_id})',
data_for_readable_output,
headers=INVESTIGATIONS_FIELDS,
removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Investigation',
outputs_key_field='id',
raw_response=results,
outputs=data_for_readable_output,
readable_output=readable_output
)
return command_results
def insight_idr_add_threat_indicators_command(client: Client, key: str,
ip_addresses: str = None,
hashes: str = None,
domain_names: str = None,
url: str = None) -> CommandResults:
"""
Adding threat indicators to threat (or threats) by key.
Args:
client(Client): Rapid7 client
key(str): Threat key (Threat IDs), One or XSOAR list (str separated by commas)
ip_addresses(str): IPs addresses, One or XSOAR list (str separated by commas)
hashes(str): Hashes, One or XSOAR list (str separated by commas)
domain_names(str): Domain names, One or XSOAR list (str separated by commas)
url(str): URLs, One or XSOAR list (str separated by commas)
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
body = {
'ips': argToList(ip_addresses),
'hashes': argToList(hashes),
'domain_names': argToList(domain_names),
'urls': argToList(url)
}
body = remove_empty_elements(body)
results = []
data_for_readable_output = []
for threat in argToList(key):
result = client.add_threat_indicators(threat, body)
results.append(result)
data_for_readable_output.append(result.get('threat'))
time.sleep(0.01)
readable_output = tableToMarkdown(f'Threat Information (key: {key})', data_for_readable_output,
headers=THREATS_FIELDS, removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Threat',
outputs_key_field='name',
raw_response=results,
outputs=data_for_readable_output,
readable_output=readable_output
)
return command_results
def insight_idr_replace_threat_indicators_command(client: Client, key: str,
ip_addresses: str = None, hashes: str = None,
domain_names: str = None,
url: str = None) -> CommandResults:
"""
Replace threat indicators to threat (or threats) by key.
Args:
client(Client): Rapid7 Client
key(str): Threat key (threat ID), One or XSOAR list (str separated by commas)
ip_addresses(str/List[str]): IPs addresses, One or XSOAR list (str separated by commas)
hashes(str/List[str]): hashes, One or XSOAR list (str separated by commas)
domain_names(str/List[str]): DOMAIN NAMEs, One or XSOAR list (str separated by commas)
url(str/List[str]): URLs, One or XSOAR list (str separated by commas)
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
body = {
'ips': argToList(ip_addresses),
'hashes': argToList(hashes),
'domain_names': argToList(domain_names),
'urls': argToList(url)
}
body = remove_empty_elements(body)
results = []
data_for_readable_output = []
for threat in argToList(key):
result = client.replace_threat_indicators(threat, body)
results.append(result)
data_for_readable_output.append(result.get('threat'))
time.sleep(0.01)
readable_output = tableToMarkdown(f'Threat Information (key: {key})', data_for_readable_output,
headers=THREATS_FIELDS, removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Threat',
outputs_key_field='name',
raw_response=results,
outputs=data_for_readable_output,
readable_output=readable_output
)
return command_results
def insight_idr_list_logs_command(client: Client) -> CommandResults:
"""
List all logs.
Args:
client(Client): Rapid7 Client
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
results = client.list_logs()
logs = results.get('logs', {})
data_for_readable_output = []
for log in logs:
data_for_readable_output.append(log)
readable_output = tableToMarkdown('List Logs', data_for_readable_output, headers=LOGS_FIELDS,
removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.Log',
outputs_key_field='id',
raw_response=results,
outputs=data_for_readable_output,
readable_output=readable_output
)
return command_results
def insight_idr_list_log_sets_command(client: Client) -> CommandResults:
"""
List all log sets.
Args:
client(Client): Rapid7 Client
Returns:
CommandResults with raw_response, readable_output and outputs.
"""
results = client.list_log_sets()
logs = results.get('logsets', {})
data_for_readable_output = []
for log in logs:
data_for_readable_output.append(log)
readable_output = tableToMarkdown('List Log Sets', data_for_readable_output,
headers=LOGS_FIELDS, removeNull=True)
command_results = CommandResults(
outputs_prefix='Rapid7InsightIDR.LogSet',
outputs_key_field='id',
raw_response=results,
outputs=data_for_readable_output,
readable_output=readable_output
)
return command_results
def insight_idr_download_logs_command(client: Client, log_ids: str, time_range: str = None,
start_time: str = None, end_time: str = None,
query: str = None, limit: str = None):
"""
Download logs to .log file based on time and query (query - optional)
Args:
client(Client): Rapid7 Client
log_ids(str): Log ids to be downloaded
time_range(str): human time format 'last 4 days' (can be hours, days, months, years
start_time(str): UNIX timestamp in milliseconds
end_time(str): UNIX timestamp in milliseconds
query(str): LEQL query
limit(int): max number of logs to download
Returns:
CommandResults with raw_response, readable_output | |
msg = "At line {0} column {1}: {2}".format(lineno, colno, msg)
raise Exception(msg)
err = StringIO()
lineno = ctx.line(ctx.pos) + 1
colno = ctx.col(ctx.pos) + 1
msg = "At line {0} column {1}:"
print(msg.format(lineno, colno, ctx.lines), file=err)
for parsers, msg in ctx.errors:
names = " -> ".join([p.name for p in parsers if p.name])
v = data[ctx.pos] or "EOF"
print(names, file=err)
print(" {0} Got {1!r}.".format(msg, v), file=err)
err.seek(0)
raise Exception(err.read())
def __repr__(self):
return self.name or self.__class__.__name__
class AnyChar(Parser):
def process(self, pos, data, ctx):
c = data[pos]
if c is not None:
return (pos + 1, c)
msg = "Expected any character."
ctx.set(pos, msg)
raise Exception(msg)
class Char(Parser):
"""
Char matches a single character.
.. code-block:: python
a = Char("a") # parses a single "a"
val = a("a") # produces an "a" from the data.
val = a("b") # raises an exception
"""
def __init__(self, char):
super(Char, self).__init__()
self.char = char
def process(self, pos, data, ctx):
if data[pos] == self.char:
return (pos + 1, self.char)
msg = "Expected {0}.".format(self.char)
ctx.set(pos, msg)
raise Exception(msg)
def __repr__(self):
if self.name is None:
return "Char({0})".format(self.char)
return self.name
class InSet(Parser):
"""
InSet matches any single character from a set.
.. code-block:: python
vowel = InSet("aeiou") # or InSet(set("aeiou"))
val = vowel("a") # okay
val = vowel("e") # okay
val = vowel("i") # okay
val = vowel("o") # okay
val = vowel("u") # okay
val = vowel("y") # raises an exception
"""
def __init__(self, s, name=None):
super(InSet, self).__init__()
self.values = set(s)
self.name = name
def process(self, pos, data, ctx):
c = data[pos]
if c in self.values:
return (pos + 1, c)
msg = "Expected {0}.".format(self)
ctx.set(pos, msg)
raise Exception(msg)
def __repr__(self):
if self.name is None:
return "InSet({0!r})".format(sorted(self.values))
return super(InSet, self).__repr__()
class String(Parser):
"""
Match one or more characters in a set. Matching is greedy.
.. code-block:: python
vowels = String("aeiou")
val = vowels("a") # returns "a"
val = vowels("u") # returns "u"
val = vowels("aaeiouuoui") # returns "aaeiouuoui"
val = vowels("uoiea") # returns "uoiea"
val = vowels("oouieaaea") # returns "oouieaaea"
val = vowels("ga") # raises an exception
"""
def __init__(self, chars, echars=None, min_length=1):
super(String, self).__init__()
self.chars = set(chars)
self.echars = set(echars) if echars else set()
self.min_length = min_length
def process(self, pos, data, ctx):
results = []
p = data[pos]
old = pos
while p in self.chars or p == "\\":
if p == "\\" and data[pos + 1] in self.echars:
results.append(data[pos + 1])
pos += 2
elif p in self.chars:
results.append(p)
pos += 1
else:
break
p = data[pos]
if len(results) < self.min_length:
msg = "Expected {0} of {1}.".format(self.min_length, sorted(self.chars))
ctx.set(old, msg)
raise Exception(msg)
return pos, "".join(results)
class Literal(Parser):
"""
Match a literal string. The ``value`` keyword lets you return a python
value instead of the matched input. The ``ignore_case`` keyword makes the
match case insensitive.
.. code-block:: python
lit = Literal("true")
val = lit("true") # returns "true"
val = lit("True") # raises an exception
val = lit("one") # raises an exception
lit = Literal("true", ignore_case=True)
val = lit("true") # returns "true"
val = lit("TRUE") # returns "TRUE"
val = lit("one") # raises an exception
t = Literal("true", value=True)
f = Literal("false", value=False)
val = t("true") # returns the boolean True
val = t("True") # raises an exception
val = f("false") # returns the boolean False
val = f("False") # raises and exception
t = Literal("true", value=True, ignore_case=True)
f = Literal("false", value=False, ignore_case=True)
val = t("true") # returns the boolean True
val = t("True") # returns the boolean True
val = f("false") # returns the boolean False
val = f("False") # returns the boolean False
"""
_NULL = object()
def __init__(self, chars, value=_NULL, ignore_case=False):
super(Literal, self).__init__()
self.chars = chars if not ignore_case else chars.lower()
self.value = value
self.ignore_case = ignore_case
self.name = "Literal{0!r}".format(self.chars)
def process(self, pos, data, ctx):
old = pos
if not self.ignore_case:
for c in self.chars:
if data[pos] == c:
pos += 1
else:
msg = "Expected {0!r}.".format(self.chars)
ctx.set(old, msg)
raise Exception(msg)
return pos, (self.chars if self.value is self._NULL else self.value)
else:
result = []
for c in self.chars:
if data[pos].lower() == c:
result.append(data[pos])
pos += 1
else:
msg = "Expected case insensitive {0!r}.".format(self.chars)
ctx.set(old, msg)
raise Exception(msg)
return pos, ("".join(result) if self.value is self._NULL else self.value)
class Wrapper(Parser):
"""
Parser that wraps another parser. This can be used to prevent sequences and
choices from accidentally accumulating other parsers when used in multiple
parts of a grammar.
"""
def __init__(self, parser):
super(Wrapper, self).__init__()
self.add_child(parser)
def process(self, pos, data, ctx):
return self.children[0].process(pos, data, ctx)
class Mark(object):
"""
An object created by :py:class:`PosMarker` to capture a value at a position
in the input. Marks can give more context to a value transformed by mapped
functions.
"""
def __init__(self, lineno, col, value):
self.lineno = lineno
self.col = col
self.value = value
class PosMarker(Wrapper):
"""
Save the line number and column of a subparser by wrapping it in a
PosMarker. The value of the parser that handled the input as well as the
initial input position will be returned as a :py:class:`Mark`.
"""
def process(self, pos, data, ctx):
lineno = ctx.line(pos) + 1
col = ctx.col(pos) + 1
pos, result = super(PosMarker, self).process(pos, data, ctx)
return pos, Mark(lineno, col, result)
class Sequence(Parser):
"""
A Sequence requires all of its children to succeed. It returns a list of
the values they matched.
Additional uses of ``+`` on the parser will cause it to accumulate parsers
onto itself instead of creating new Sequences. This has the desirable
effect of causing sequence results to be represented as flat lists instead
of trees, but it can also have unintended consequences if a sequence is
used in multiple parts of a grammar as the initial element of another
sequence. Use a :py:class:`Wrapper` to prevent that from happening.
.. code-block :: python
a = Char("a") # parses a single "a"
b = Char("b") # parses a single "b"
c = Char("c") # parses a single "c"
ab = a + b # parses a single "a" followed by a single "b"
# (a + b) creates a "Sequence" object. Using `ab`
# as an element in a later sequence would modify
# its original definition.
abc = a + b + c # parses "abc"
# (a + b) creates a "Sequence" object to which c
# is appended
val = ab("ab") # produces a list ["a", "b"]
val = ab("a") # raises an exception
val = ab("b") # raises an exception
val = ab("ac") # raises an exception
val = ab("cb") # raises an exception
val = abc("abc") # produces ["a", "b", "c"]
"""
def __init__(self, children):
super(Sequence, self).__init__()
self.set_children(children)
def __add__(self, other):
return self.add_child(other)
def process(self, pos, data, ctx):
results = []
for p in self.children:
pos, res = p.process(pos, data, ctx)
results.append(res)
return pos, results
class Choice(Parser):
"""
A Choice requires at least one of its children to succeed, and it returns
the value of the one that matched. Alternatives in a choice are tried left
to right, so they have a definite priority. This a feature of PEGs over
context free grammars.
Additional uses of ``|`` on the parser will cause it to accumulate parsers
onto itself instead of creating new Choices. This has the desirable effect
of increasing efficiency, but it can also have unintended consequences if a
choice is used in multiple parts of a grammar as the initial element of
another choice. Use a :py:class:`Wrapper` to prevent that from happening.
.. code-block:: python
abc = a | b | c # alternation or choice.
val = abc("a") # parses a single "a"
val = abc("b") # parses a single "b"
val = abc("c") # parses a single "c"
val = abc("d") # raises an exception
"""
def __init__(self, children):
super(Choice, self).__init__()
self.set_children(children)
def __or__(self, other):
return self.add_child(other)
def process(self, pos, data, ctx):
for c in self.children:
try:
return c.process(pos, data, | |
<gh_stars>0
# Copyright 2021 University College London. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configuration."""
import copy
import dataclasses
import importlib.util
import pathlib
from typing import List, Union, Optional
import keras_tuner as kt
import tensorflow as tf
from keras_declarative import hyperparams
from keras_declarative import util
# NOTE: This file is more easily read from the bottom up, as the more generic
# configuration elements are at the bottom and become more specific towards the
# top.
@dataclasses.dataclass
class DlexDataSplitConfig(hyperparams.Config):
"""Data split configuration (DLEX).
Attributes:
train: The training split. Can be an integer (e.g. 50) to use a fixed
number of examples, or a percentage (e.g. 50%) to use a fixed percentage
of the total number of examples.
val: The validation split. Can be an integer (e.g. 50) to use a fixed
number of examples, or a percentage (e.g. 50%) to use a fixed percentage
of the total number of examples.
test: The test split. Can be an integer (e.g. 50) to use a fixed number of
examples, or a percentage (e.g. 50%) to use a fixed percentage of the
total number of examples.
"""
train: int = 0
val: int = 0
test: int = 0
mode: str = 'random'
@dataclasses.dataclass
class TfdsDataSplitConfig(hyperparams.Config):
"""Data split configuration (TFDS).
Attributes:
train: A TFDS split. See https://www.tensorflow.org/datasets/splits.
val: A TFDS split. See https://www.tensorflow.org/datasets/splits.
test: A TFDS split. See https://www.tensorflow.org/datasets/splits.
"""
train: str = None
val: str = None
test: str = None
@dataclasses.dataclass
class DlexDataSourceConfig(hyperparams.Config):
"""DLEX data source configuration.
Attributes:
path: Path to the directory containing the DLEX files.
prefix: The prefix of the DLEX files.
split: The split configuration.
"""
path: str = None
prefix: str = None
split: DlexDataSplitConfig = DlexDataSplitConfig()
@dataclasses.dataclass
class TfdsDataSourceConfig(hyperparams.Config):
"""TFDS data source configuration.
Attributes:
name: The name of the TFDS dataset.
version: The version of the TFDS dataset.
split: The split configuration.
data_dir: The TFDS data directory.
"""
name: str = None
version: str = None
split: TfdsDataSplitConfig = TfdsDataSplitConfig()
data_dir: str = None
@dataclasses.dataclass
class DataSourceConfig(hyperparams.OneOfConfig):
"""Data source configuration.
Attributes:
type: The type of data source.
dlex: The DLEX data source configuration.
tfds: The TFDS data source configuration.
"""
type: str = None
dlex: DlexDataSourceConfig = DlexDataSourceConfig()
tfds: TfdsDataSourceConfig = TfdsDataSourceConfig()
@dataclasses.dataclass
class TensorSpecConfig(hyperparams.Config):
"""Tensor specification configuration."""
name: Optional[Union[str, int]] = None
shape: List[int] = None
dtype: str = 'float32'
@dataclasses.dataclass
class DataSpecsConfig(hyperparams.Config):
"""Specs configuration."""
train: List[TensorSpecConfig] = dataclasses.field(default_factory=list)
val: List[TensorSpecConfig] = dataclasses.field(default_factory=list)
test: List[TensorSpecConfig] = dataclasses.field(default_factory=list)
class ObjectConfig(hyperparams.ParamsDict):
"""Object configuration."""
@dataclasses.dataclass
class ApplyTransformConfig(hyperparams.Config):
"""Apply transform configuration."""
transformation_func: ObjectConfig = ObjectConfig()
@dataclasses.dataclass
class BatchTransformConfig(hyperparams.Config):
"""Batch transform configuration."""
batch_size: int = None
drop_remainder: bool = False
num_parallel_calls: Optional[int] = None
deterministic: Optional[bool] = None
@dataclasses.dataclass
class CacheTransformConfig(hyperparams.Config):
"""Cache transform configuration."""
filename: str = ''
@dataclasses.dataclass
class FilterTransformConfig(hyperparams.Config):
"""Shuffle transform configuration."""
predicate: ObjectConfig = ObjectConfig()
@dataclasses.dataclass
class FlatMapTransformConfig(hyperparams.Config):
"""Flat map transform configuration."""
map_func: ObjectConfig = ObjectConfig()
@dataclasses.dataclass
class MapTransformConfig(hyperparams.Config):
"""Map transform configuration."""
map_func: ObjectConfig = ObjectConfig()
num_parallel_calls: Optional[int] = None
deterministic: Optional[bool] = None
component: Optional[Union[int, str]] = None
output: Optional[Union[int, str]] = None
@dataclasses.dataclass
class PrefetchTransformConfig(hyperparams.Config):
"""Prefetch transform configuration."""
buffer_size: int = None
@dataclasses.dataclass
class RepeatTransformConfig(hyperparams.Config):
"""Repeat transform configuration."""
count: int = None
@dataclasses.dataclass
class ShuffleTransformConfig(hyperparams.Config):
"""Shuffle transform configuration."""
buffer_size: int = None
seed: Optional[int] = None
reshuffle_each_iteration: Optional[bool] = None
@dataclasses.dataclass
class DataTransformConfig(hyperparams.OneOfConfig):
"""Data transform configuration."""
type: str = None
apply: ApplyTransformConfig = ApplyTransformConfig()
batch: BatchTransformConfig = BatchTransformConfig()
cache: CacheTransformConfig = CacheTransformConfig()
filter: FilterTransformConfig = FilterTransformConfig()
flat_map: FlatMapTransformConfig = FlatMapTransformConfig()
map: MapTransformConfig = MapTransformConfig()
prefetch: PrefetchTransformConfig = PrefetchTransformConfig()
repeat: RepeatTransformConfig = RepeatTransformConfig()
shuffle: ShuffleTransformConfig = ShuffleTransformConfig()
class DataOptionsConfig(hyperparams.Config):
"""Data options configuration."""
shuffle_training_only: bool = True
max_intra_op_parallelism: int = None
private_threadpool_size: int = None
@dataclasses.dataclass
class DataTransformsConfig(hyperparams.Config):
"""Data transforms configuration."""
train: List[DataTransformConfig] = dataclasses.field(default_factory=list) # pylint: disable=line-too-long
val: List[DataTransformConfig] = dataclasses.field(default_factory=list) # pylint: disable=line-too-long
test: List[DataTransformConfig] = dataclasses.field(default_factory=list) # pylint: disable=line-too-long
@dataclasses.dataclass
class DataConfig(hyperparams.Config):
"""Data configuration."""
sources: List[DataSourceConfig] = dataclasses.field(default_factory=list)
specs: DataSpecsConfig = DataSpecsConfig()
transforms: DataTransformsConfig = DataTransformsConfig()
options: DataOptionsConfig = DataOptionsConfig()
@dataclasses.dataclass
class ExperimentConfig(hyperparams.Config):
"""Experiment configuration.
Attributes:
name: The name of this experiment. Defaults to
`<config_filename>_<datetime>`.
path: The path to this experiment. Results will be saved in a new directory
`path/name`. Defaults to current working directory.
seed: A global seed to be used for all random number generators.
"""
name: Optional[str] = None
path: Optional[str] = None
seed: Optional[int] = None
@dataclasses.dataclass
class AppConfig(hyperparams.Config):
"""App configuration."""
name: str = None
config: ObjectConfig = ObjectConfig()
preprocess_input: bool = True
decode_predictions: bool = True
@dataclasses.dataclass
class ModelConfig(hyperparams.Config):
"""Model configuration.
Attributes:
type: A `str`. The type of model. One of `'app'`, `'layers'` or `'model'`.
app: An `AppConfig`. The app configuration.
network: A `str` or `ObjectConfig` defining a `tf.keras.layers.Layer` or a
list thereof, implementing a sequential network architecture.
input_spec: A list of `TensorSpecConfig` defining the model input
specification. If not specified, we will attempt to infer the input
specification from the training dataset.
path: A `str`. Path to an existing model. Defaults to `None`. If not `None`,
loads this model ignoring the remaining arguments.
weights: A `str`. Path to model weights.
"""
type: str = None
app: AppConfig = AppConfig()
network: List[ObjectConfig] = ObjectConfig()
input_spec: List[TensorSpecConfig] = dataclasses.field(default_factory=list)
path: str = None
weights: str = None
@dataclasses.dataclass
class TrainingConfig(hyperparams.Config):
"""Training configuration.
See `tf.keras.Model.compile` and `tf.keras.Model.fit` for more information
about these attributes.
Attributes:
optimizer: A `str` or `ObjectConfig` defining a
`tf.keras.optimizers.Optimizer`.
loss: A nested structure of `str` or `ObjectConfig` defining one or more
`tf.keras.losses.Loss`.
metrics: A nested structure of `str` or `ObjectConfig` defining a list of
`tf.keras.metrics.Metric`.
loss_weights: A list or dict of `float` scalars to weight the different loss
functions.
weighted_metrics: A list of `str` or `ObjectConfig` defining a list of
`tf.keras.metrics.Metric`.
run_eagerly: A `bool`. If true, run the model eagerly, without creating a
graph.
steps_per_execution: An `int`. The number of batches to run during each
graph call.
epochs: An `int`. The number of epochs to train the model.
verbose: An `int`. The verbosity mode.
callbacks: A list of `str` or `ObjectConfig` defining a list of
`tf.keras.callbacks.Callback`.
use_default_callbacks: A `bool`. If true, a `ModelCheckpoint` callback and a
`TensorBoard` callback will be added automatically, without the need to
specify them explicitly.
"""
optimizer: ObjectConfig = ObjectConfig()
loss: List[ObjectConfig] = dataclasses.field(default_factory=list)
metrics: List[ObjectConfig] = dataclasses.field(default_factory=list)
loss_weights: List[float] = dataclasses.field(default_factory=list)
weighted_metrics: List[ObjectConfig] = dataclasses.field(default_factory=list)
run_eagerly: bool = None
steps_per_execution: int = None
epochs: int = 1
verbose: Union[str, int] = 1
callbacks: List[ObjectConfig] = dataclasses.field(default_factory=list)
use_default_callbacks: bool = True
@dataclasses.dataclass
class PredictConfig(hyperparams.Config):
"""Prediction configuration.
Attributes:
datasets: A string or list of strings with the datasets to obtain and store
predictions for. Can include the strings `'train'`, `'val'` and `'test'`.
evaluate: A `bool`. Whether to evaluate the model using the specified
datasets.
save_images: A `bool`. If true, saves processed images of the predictions.
3D images are saved as GIF files.
"""
datasets: List[str] = 'test'
evaluate: bool = True
save_images: bool = False
@dataclasses.dataclass
class TuningConfig(hyperparams.Config):
"""Tuning configuration.
Attributes:
tuner: An `ObjectConfig` definining the tuner configuration. For a list of
valid tuners and their configurations, see
https://keras.io/api/keras_tuner/tuners/.
"""
tuner: ObjectConfig = ObjectConfig()
@dataclasses.dataclass
class DistributeConfig(hyperparams.Config):
"""Distribute configuration.
Attribute:
strategy: An `ObjectConfig` defining the distribute strategy configuration.
"""
strategy: ObjectConfig = ObjectConfig()
@dataclasses.dataclass
class TrainWorkflowConfig(hyperparams.Config):
"""Train model workflow configuration.
Attributes:
experiment: An `ExperimentConfig`. General experiment configuration.
data: A `DataConfig`. The dataset/s configuration.
model: A `ModelConfig`. The model configuration.
training: A `TrainingConfig`. The training configuration.
predict: A `PredictConfig`. The prediction configuration.
tuning: A `TuningConfig`. The tuning configuration.
distribute: A `DistributeConfig`. The distribute configuration.
"""
experiment: ExperimentConfig = ExperimentConfig()
data: DataConfig = DataConfig()
model: ModelConfig = ModelConfig()
training: TrainingConfig = TrainingConfig()
predict: PredictConfig = PredictConfig()
tuning: TuningConfig = TuningConfig()
distribute: DistributeConfig = DistributeConfig()
@dataclasses.dataclass
class TestWorkflowConfig(hyperparams.Config):
"""Test model workflow configuration.
Attributes:
experiment: An `ExperimentConfig`. General experiment configuration.
data: A `DataConfig`. The dataset/s configuration.
model: A `ModelConfig`. The model configuration.
predict: A `PredictConfig`. The | |
# imports to make app work correctly
from flask import Flask, render_template, flash, redirect, url_for, session, request, logging , send_from_directory, send_file
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from passlib.hash import sha256_crypt
from functools import wraps
from flask_mail import Mail, Message
from shutil import rmtree
# from PIL import Image
# from io import BytesIO
# from flask.helpers import flash
import MySQLdb
import os
import random
import string
# import uuid
from werkzeug.utils import secure_filename
# from flask_uploads import UploadSet, configure_uploads, IMAGES
# connect to database in localhost to create database and their tables
database = MySQLdb.connect("localhost", "OSAMA", "OSAMA")
cursor = database.cursor()
# cursor.execute("DROP DATABASE IF EXISTS osama_blog;")
cursor.execute("CREATE DATABASE IF NOT EXISTS osama_blog DEFAULT CHARSET UTF8")
database.select_db('osama_blog')
# cursor.execute("DROP TABLE IF EXISTS users;")
# cursor.execute("DROP TABLE IF EXISTS articles;")
# cursor.execute("DROP TABLE IF EXISTS short_link;")
# cursor.execute("DROP TABLE IF EXISTS about_me;")
cursor.execute("CREATE TABLE IF NOT EXISTS users(\
id INT(11) AUTO_INCREMENT PRIMARY KEY,\
name VARCHAR(100) NOT NULL,\
email VARCHAR(100) NOT NULL,\
username VARCHAR(100) NOT NULL,\
password VARCHAR(100) NOT NULL,\
files TEXT NOT NULL,\
register_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP );")
cursor.execute("CREATE TABLE IF NOT EXISTS articles(\
id INT(11) AUTO_INCREMENT PRIMARY KEY,\
category VARCHAR(100) NOT NULL,\
title VARCHAR(255) NOT NULL,\
author VARCHAR(100) NOT NULL,\
body TEXT NOT NULL,\
files TEXT NOT NULL,\
written_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP);")
cursor.execute("CREATE TABLE IF NOT EXISTS short_link(\
id INT(11) AUTO_INCREMENT PRIMARY KEY,\
original_link varchar(255) UNIQUE NOT NULL,\
short_link TEXT NOT NULL,\
process_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP );")
cursor.execute("CREATE TABLE IF NOT EXISTS about_me(picture VARCHAR(100) PRIMARY KEY);")
database.close()
app = Flask(__name__)
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_sqlalchemy import SQLAlchemy
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://OSAMA:OSAMA@localhost/osama_blog2'
db = SQLAlchemy(app)
admin = Admin(app)
# application configuration
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
# app.config['MYSQL_PASSWORD'] = '<PASSWORD>'
app.config['MYSQL_DB'] = 'osama_blog'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
mysql = MySQL(app)
# application configuration to send email with gmail
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 587
app.config['MAIL_USERNAME'] = '<EMAIL>'
app.config['MAIL_PASSWORD'] = '<PASSWORD>'
app.config['MAIL_USE_TLS'] = True
app.config['MAIL_USE_SSL'] = False
mail = Mail(app)
# website icon
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'i.ico', mimetype='image/vnd.microsoft.icon')
# home page
@app.route('/')
def home_page():
return render_template('home.html')
# url shortener form
@app.route('/result', methods=['GET', 'POST'])
def result():
cur = mysql.connection.cursor()
r = request.form['url']
if r == None or r == "" or r == " ":
return redirect(url_for('home_page'))
cur.execute("SELECT original_link FROM short_link WHERE original_link=%s", [r])
sl = cur.fetchone()
if r in str(sl):
cur.execute("SELECT * FROM short_link WHERE original_link=%s", [r])
sl2 = cur.fetchone()
return render_template('url_exists.html', sl2=sl2)
else:
# def random_string(string_length=5):
# return str(uuid.uuid4())[0:string_length]
def random_string():
return ("".join([random.choice(string.ascii_letters + string.digits) for i in range(6)]))
cur.execute("INSERT INTO short_link(original_link, short_link)\
VALUES(%s, %s)", ([r], [random_string()]))
mysql.connection.commit()
res = cur.execute("SELECT * FROM short_link WHERE original_link=%s", [r])
short_link = cur.fetchone()
if res > 0:
return render_template('url_result.html', short_link=short_link)
else:
msg = 'No Urls Found!'
return render_template('url_fail.html', msg=msg, short_link=short_link)
cur.close()
return redirect(url_for('home_page'))
# redirect page
@app.route('/redirect/<url_name>')
# def href(url_name):
def redirect_link(url_name):
cur = mysql.connection.cursor()
cur.execute("SELECT short_link FROM short_link WHERE short_link=%s", [url_name])
ff = cur.fetchone()
if url_name in str(ff):
cur.execute("SELECT original_link FROM short_link WHERE short_link=%s", [url_name])
f = cur.fetchone()
for i in f:
return redirect(f[i])
else:
return render_template('url_redirect_fail.html')
# return redirect(url_for(f.original_link))
msg = 'No Urls Found!'
cur.close()
return render_template('url_redirect_fail.html', msg=msg)
# apout page
@app.route('/about')
def about():
cur = mysql.connection.cursor()
# cur.execute("INSERT INTO about_me(picture)\
# VALUES(%s)", ['about_me.png'])
# mysql.connection.commit()
cur.execute("SELECT picture FROM about_me")
article = cur.fetchone()
cur.close()
return render_template('about.html', article=article)
# all articles page
@app.route('/articles')
def articles():
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM articles")
articles = cur.fetchall()
if result > 0:
return render_template('articles.html', articles=articles)
else:
msg = 'No Articles Found'
return render_template('articles.html', msg=msg)
cur.close()
# redirect to categories.html from articles page form
@app.route('/categories', methods=['post'])
def categories():
if request.method == "POST":
cur = mysql.connection.cursor()
if request.form['categories'] == request.form['categories']:
result = cur.execute("SELECT * FROM articles WHERE category=%s", [request.form['categories']])
articles = cur.fetchall()
cur.close()
if result > 0:
# flash("done!", "success")
return render_template('categories.html', articles=articles)
else:
# flash("done2!", "success")
msg = 'No Articles Found'
return render_template('categories.html', msg=msg)
# elif request.form['categories'] == request.form['categories']:
# result = cur.execute("SELECT * FROM articles WHERE category=%s", [request.form['categories']])
# articles = cur.fetchall()
# cur.close()
# if result > 0:
# flash("done!", "success")
# return render_template('categories.html', articles=articles)
# else:
# flash("done2!", "success")
# msg = 'No Articles Found'
# return render_template('categories.html', msg=msg)
return render_template('categories.html')
# search by category from dashboard page
@app.route('/search_by_categories/<category>', methods=['post', 'get'])
def category(category):
cur = mysql.connection.cursor()
result = cur.execute("SELECT * FROM `osama_blog`.`articles` \
WHERE(CONVERT(`category` USING utf8)\
LIKE %s)", ["%" + category + "%"])
articles = cur.fetchall()
cur.close()
if result > 0:
return render_template('categories.html', articles=articles)
else:
msg = 'No Articles Found'
return render_template('categories.html', msg=msg)
# display article picture from dashboard page
@app.route('/article_picture/<id>/<picture_name>', methods=['post', 'get'])
def article_picture(id, picture_name):
return render_template('article_picture.html', id=id, picture_name=picture_name)
# display article picture from article page
@app.route('/article_picture_inner/<id>/<user_name>/<pic>', methods=['post', 'get'])
def article_picture_inner(id, user_name, pic):
return render_template('article_picture_inner.html', id=id, user_name=user_name, pic=pic)
# display profile picture from dashboard page
@app.route('/profile_picture/<pic>', methods=['post', 'get'])
def profile_picture(pic):
username = session['username']
return render_template('profile_picture.html', pic=pic, username=username)
# show article page
# @app.route('/article/<string:id>/')
# def article(id):
# if 'logged_in' in session:
# user_name = session['username']
# cur = mysql.connection.cursor()
# cur.execute("SELECT * FROM articles WHERE id = {}".format(id))
# article = cur.fetchone()
# return render_template('article.html', article=article, user_name=user_name, id=id)
# cur.close()
# else:
# cur = mysql.connection.cursor()
# cur.execute("SELECT * FROM articles WHERE id = {}".format(id))
# article = cur.fetchone()
# cur.execute("SELECT author FROM articles WHERE id = {}".format(id))
# art = cur.fetchone()
# for user in art:
# session['author'] = art[user]
# return render_template('article.html', article=article, user_name=art[user], id=id)
# return render_template('article.html', article=article, user_name=session['author'], id=id)
@app.route('/article/<string:id>/')
def article(id):
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM articles WHERE id = {}".format(id))
article = cur.fetchone()
cur.execute("SELECT author FROM articles WHERE id = {}".format(id))
art = cur.fetchone()
for user in art:
return render_template('article.html', article=article, user_name=art[user], id=id)
# register form validators
class RegisterForm(Form):
name = StringField('Name', [validators.Length(min=1, max=50)])
email = StringField('Email', [validators.Email("Field must be a valid email address.")])
username = StringField('Username', [validators.Length(min=6, max=100)])
password = PasswordField('Password',
[validators.DataRequired(), validators.Length(min=6, max=100),
validators.EqualTo('confirm', message='Passwords Do Not Match')])
confirm = PasswordField('Confirm Password', [validators.DataRequired()])
# register page
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data.lower()
email = form.email.data.lower()
username = form.username.data
password = <PASSWORD>256_crypt.encrypt(str(form.password.data))
cur = mysql.connection.cursor()
cur.execute("SELECT username FROM users WHERE username = BINARY %s", [username])
res = cur.fetchone()
if username in str(res):
msg = "Username Already Exists"
return render_template('register.html', form=form, msg=msg)
else:
cur.execute("INSERT INTO users(name, email, username, password)\
VALUES(%s, %s, %s, %s)", (name, email, username, password))
mysql.connection.commit()
cur.close()
flash('You Are Now Registered And You Can login!', 'success')
return redirect(url_for('login'))
return render_template('register.html', form=form)
# login page
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
username = request.form['username']
password_candidate = request.form['password']
cur = mysql.connection.cursor()
# collate utf8_bin
result = cur.execute("SELECT * FROM users WHERE username = BINARY %s", [username])
if result > 0:
data = cur.fetchone()
password = data['password']
if sha256_crypt.verify(password_candidate, password):
session['logged_in'] = True
session['username'] = username
session['register_date'] = data['register_date']
flash('Now You Are Logged In ', 'success')
return redirect(url_for('dashboard'))
else:
error = 'Wrong Password!'
return render_template('login.html', error=error)
cur.close()
else:
error = 'Username Can Not Be Found!'
return render_template('login.html', error=error)
return render_template('login.html')
# check if user is still logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session :
return f(*args, **kwargs)
else:
flash('Unauthorized, Please Login', 'danger')
return redirect(url_for('login'))
return wrap
# log out
@app.route('/logout')
@is_logged_in
def logout():
session.clear()
flash('You Are Now Logged Out', 'success')
return redirect(url_for('login'))
# dashboard page
@app.route('/dashboard')
@is_logged_in
def dashboard():
cur = mysql.connection.cursor()
username = session['username']
result = cur.execute("SELECT * FROM articles WHERE author=%s", [username])
articles = cur.fetchall()
cur.execute("SELECT files FROM users WHERE username=%s", [username])
art = cur.fetchone()
if result > 0:
return render_template('dashboard.html', articles=articles, username=username, art=art)
else:
msg = 'No Articles Found'
return render_template('dashboard.html', msg=msg, username=username, art=art)
cur.close()
# add new article validators
class ArticleForm(Form):
title = StringField('Title', [validators.Length(min=1, max=200)])
body = TextAreaField('Body', [validators.Length(min=10)])
# add new article page
@app.route('/add_article', methods=['GET', 'POST'])
@is_logged_in
def add_article():
form = ArticleForm(request.form)
if request.method == 'POST' and form.validate():
title = form.title.data
body = form.body.data
cur = mysql.connection.cursor()
cur.execute("INSERT INTO articles(category, title, body, author) \
VALUES(%s, %s, %s, %s)", ([request.form['categories']], title, body, session['username']))
mysql.connection.commit()
cur.close()
flash('Article Has Been Created Successfully', 'success')
return redirect(url_for('dashboard'))
return render_template('add_article.html', form=form)
# edit article page
@app.route('/edit_article/<string:id>', methods=['GET', 'POST'])
@is_logged_in
def edit_article(id):
session['edit_article_id'] = id
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM articles WHERE id = %s", [id])
article = cur.fetchone()
cur.close()
form = ArticleForm(request.form)
form.title.data = article['title']
form.body.data = article['body']
if request.method == 'POST' and form.validate():
title = request.form['title']
body = request.form['body']
cur = mysql.connection.cursor()
app.logger.info(title)
cur.execute("UPDATE articles SET category=%s, title=%s, body=%s WHERE id=%s", ([request.form['categories']], title, body, id))
mysql.connection.commit()
cur.close()
flash('Article Has Been Updated Successfully', 'success')
return redirect(url_for('dashboard'))
return render_template('edit_article.html', form=form)
# delete article
@app.route('/delete_article/<string:id>', methods=['POST'])
@is_logged_in
def delete_article(id):
cur = mysql.connection.cursor()
| |
items2search:
item_search += '&type={}'.format(item)
for status in stati2search:
item_search += '&status={}'.format(status)
if id_list:
itemids = re.split(',|\s+', id_list)
itemids = [id for id in itemids if id]
else:
itemres = ff_utils.search_metadata(item_search, key=connection.ff_keys, page_limit=500)
itemids = [item.get('uuid') for item in itemres]
es_items = ff_utils.get_es_metadata(itemids, key=connection.ff_keys, chunk_size=200, is_generator=True)
for es_item in es_items:
label = es_item.get('embedded').get('display_title')
desc = es_item.get('object').get('description')
lab = es_item.get('embedded').get('lab').get('display_title')
status = es_item.get('properties').get('status', 'in review by lab')
opfs = _get_all_other_processed_files(es_item)
id2links[es_item.get('uuid')] = [li.get('uuid') for li in es_item.get('linked_uuids_embedded')]
id2status[es_item.get('uuid')] = STATUS_LEVEL.get(status)
id2item[es_item.get('uuid')] = {'label': label, 'status': status, 'lab': lab,
'description': desc, 'to_ignore': list(set(opfs))}
mismatches = {}
linked2get = {}
for i, iid in enumerate(itemids):
linkedids = id2links.get(iid)
if not linkedids: # item with no link
continue
istatus = id2status.get(iid)
for lid in linkedids:
lstatus = id2status.get(lid)
if not lstatus: # add to list to get
linked2get.setdefault(lid, []).append(iid)
elif lstatus < istatus: # status mismatch for an item we've seen before
ignore = id2item.get(iid).get('to_ignore')
if ignore is not None and lid in ignore:
continue
else:
mismatches.setdefault(iid, []).append(lid)
if len(linked2get) > MIN_CHUNK_SIZE or i + 1 == len(itemids): # only query es when we have more than a set number of ids (500)
linked2chk = ff_utils.get_es_metadata(list(linked2get.keys()), key=connection.ff_keys,
chunk_size=200, is_generator=True)
for litem in linked2chk:
luuid = litem.get('uuid')
listatus = litem.get('properties').get('status', 'in review by lab')
llabel = litem.get('item_type')
lstatus = STATUS_LEVEL.get(listatus)
# add info to tracking dict
id2status[luuid] = lstatus
id2item[luuid] = {'label': llabel, 'status': listatus}
for lfid in set(linked2get[luuid]):
# check to see if the linked item is something to ignore for that item
ignore = id2item[lfid].get('to_ignore')
if ignore is not None and luuid in ignore:
continue
elif lstatus < id2status[lfid]: # status mismatch so add to report
mismatches.setdefault(lfid, []).append(luuid)
linked2get = {} # reset the linked id dict
if mismatches:
brief_output = {}
full_output = {}
for eid, mids in mismatches.items():
eset = id2item.get(eid)
key = '{} | {} | {} | {}'.format(
eid, eset.get('label'), eset.get('status'), eset.get('description'))
brief_output.setdefault(eset.get('lab'), {}).update({key: len(mids)})
for mid in mids:
mitem = id2item.get(mid)
val = '{} | {} | {}'.format(mid, mitem.get('label'), mitem.get('status'))
full_output.setdefault(eset.get('lab'), {}).setdefault(key, []).append(val)
check.status = 'WARN'
check.summary = "MISMATCHED STATUSES FOUND"
check.description = 'Released or pre-release items have linked items with unreleased status'
check.brief_output = brief_output
check.full_output = full_output
else:
check.status = 'PASS'
check.summary = "NO MISMATCHES FOUND"
check.description = 'all statuses present and correct'
return check
@check_function(id_list=None)
def check_opf_status_mismatch(connection, **kwargs):
'''
Check to make sure that collections of other_processed_files don't have
status mismatches. Specifically, checks that (1) all files in an
other_processed_files collection have the same status; and (2) the status of
the experiment set is on the same status level or higher than the status of
files in the other_processed_files collection (e.g., if the other_processed_files
were released when the experiment set is in review by lab.)
'''
check = CheckResult(connection, 'check_opf_status_mismatch')
opf_set = ('search/?type=ExperimentSet&other_processed_files.title%21=No+value&field=status'
'&field=other_processed_files&field=experiments_in_set.other_processed_files')
opf_exp = ('search/?type=ExperimentSet&other_processed_files.title=No+value'
'&experiments_in_set.other_processed_files.title%21=No+value'
'&field=experiments_in_set.other_processed_files&field=status')
opf_set_results = ff_utils.search_metadata(opf_set, key=connection.ff_keys)
opf_exp_results = ff_utils.search_metadata(opf_exp, key=connection.ff_keys)
results = opf_set_results + opf_exp_results
# extract file uuids
files = []
for result in results:
if result.get('other_processed_files'):
for case in result['other_processed_files']:
files.extend([i['uuid'] for i in case['files']])
if case.get('higlass_view_config'):
files.append(case['higlass_view_config'].get('uuid'))
if result.get('experiments_in_set'):
for exp in result['experiments_in_set']:
for case in exp['other_processed_files']:
files.extend([i['uuid'] for i in case['files']])
# get metadata for files, to collect status
resp = ff_utils.get_es_metadata(list(set(files)),
sources=['links.quality_metric', 'object.status', 'uuid'],
key=connection.ff_keys)
opf_status_dict = {item['uuid']: item['object']['status'] for item in resp if item['uuid'] in files}
opf_linked_dict = {
item['uuid']: item.get('links', {}).get('quality_metric', []) for item in resp if item['uuid'] in files
}
quality_metrics = [uuid for item in resp for uuid in item.get('links', {}).get('quality_metric', [])]
qm_resp = ff_utils.get_es_metadata(list(set(quality_metrics)),
sources=['uuid', 'object.status'],
key=connection.ff_keys)
opf_other_dict = {item['uuid']: item['object']['status'] for item in qm_resp if item not in files}
check.full_output = {}
for result in results:
hg_dict = {item['title']: item.get('higlass_view_config', {}).get('uuid')
for item in result.get('other_processed_files', [])}
titles = [item['title'] for item in result.get('other_processed_files', [])]
titles.extend([item['title'] for exp in result.get('experiments_in_set', [])
for item in exp.get('other_processed_files', [])])
titles = list(set(titles))
problem_dict = {}
for title in titles:
file_list = [item for fileset in result.get('other_processed_files', [])
for item in fileset['files'] if fileset['title'] == title]
file_list.extend([item for exp in result.get('experiments_in_set', [])
for fileset in exp['other_processed_files']
for item in fileset['files'] if fileset['title'] == title])
statuses = set([opf_status_dict[f['uuid']] for f in file_list])
# import pdb; pdb.set_trace()
if not statuses:
# to account for empty sections that may not yet contain files
pass
elif len(statuses) > 1: # status mismatch in opf collection
scores = set([STATUS_LEVEL.get(status, 0) for status in list(statuses)])
if len(scores) > 1:
problem_dict[title] = {f['@id']: {'status': opf_status_dict[f['uuid']]} for f in file_list}
if hg_dict.get(title):
problem_dict[title][hg_dict[title]] = {'status': opf_status_dict[hg_dict[title]]}
elif hg_dict.get(title) and STATUS_LEVEL[list(statuses)[0]] != STATUS_LEVEL[opf_status_dict[hg_dict[title]]]:
if not (list(statuses)[0] == 'pre-release' and opf_status_dict[hg_dict[title]] == 'released to lab'):
problem_dict[title] = {'files': list(statuses)[0],
'higlass_view_config': opf_status_dict[hg_dict[title]]}
elif STATUS_LEVEL[result['status']] < STATUS_LEVEL[list(statuses)[0]]:
problem_dict[title] = {result['@id']: result['status'], title: list(statuses)[0]}
for f in file_list:
if opf_linked_dict.get(f['uuid']):
for qm in opf_linked_dict[f['uuid']]:
if (STATUS_LEVEL[opf_other_dict[qm]] != STATUS_LEVEL[opf_status_dict[f['uuid']]]):
if title not in problem_dict:
problem_dict[title] = {}
if f['@id'] not in problem_dict[title]:
problem_dict[title][f['@id']] = {}
problem_dict[title][f['@id']]['quality_metric'] = {
'uuid': opf_linked_dict[f['uuid']], 'status': opf_other_dict[qm]
}
if problem_dict:
check.full_output[result['@id']] = problem_dict
if check.full_output:
check.brief_output = list(check.full_output.keys())
check.status = 'WARN'
check.summary = 'Other processed files with status mismatches found'
check.description = ('{} Experiment Sets found with status mismatches in '
'other processed files'.format(len(check.brief_output)))
else:
check.status = "PASS"
check.summary = 'All other processed files have matching statuses'
check.description = 'No Experiment Sets found with status mismatches in other processed files'
return check
@check_function()
def check_validation_errors(connection, **kwargs):
'''
Counts number of items in fourfront with schema validation errors,
returns link to search if found.
'''
check = CheckResult(connection, 'check_validation_errors')
search_url = 'search/?validation_errors.name!=No+value&type=Item'
results = ff_utils.search_metadata(search_url + '&field=@id', key=connection.ff_keys)
if results:
types = {item for result in results for item in result['@type'] if item != 'Item'}
check.status = 'WARN'
check.summary = 'Validation errors found'
check.description = ('{} items found with validation errors, comprising the following '
'item types: {}. \nFor search results see link below.'.format(
len(results), ', '.join(list(types))))
check.ff_link = connection.ff_server + search_url
else:
check.status = 'PASS'
check.summary = 'No validation errors'
check.description = 'No validation errors found.'
return check
def _get_all_other_processed_files(item):
toignore = []
# get directly linked other processed files
for pfinfo in item.get('properties').get('other_processed_files', []):
toignore.extend([pf for pf in pfinfo.get('files', []) if pf is not None])
# toignore.extend([pf['quality_metric'] for pf in pfinfo.get('files', []) if pf and pf.get('quality_metric')])
# qcs = [pf for pf in pfinfo.get('files', []) if pf is not None]
hgv = pfinfo.get('higlass_view_config')
if hgv:
toignore.append(hgv)
# experiment sets can also have linked opfs from experiment
for pfinfo in item['embedded'].get('other_processed_files', []):
toignore.extend([pf['quality_metric']['uuid'] for pf in pfinfo.get('files') if pf and pf.get('quality_metric')])
expts = item.get('embedded').get('experiments_in_set')
if expts is not None:
for exp in expts:
opfs = exp.get('other_processed_files')
if opfs is not None:
for pfinfo in opfs:
toignore.extend([pf.get('uuid') for pf in pfinfo.get('files', []) if pf is not None])
toignore.extend([pf['quality_metric']['uuid'] for pf in pfinfo.get('files', []) if pf and pf.get('quality_metric')])
hgv = pfinfo.get('higlass_view_config')
if hgv:
toignore.append(hgv)
return toignore
@check_function()
def check_bio_feature_organism_name(connection, **kwargs):
'''
Attempts to identify an organism to add to the organism_name field in BioFeature items
checks the linked genes or the genomic regions and then description
'''
check = CheckResult(connection, 'check_bio_feature_organism_name')
check.action = "patch_bio_feature_organism_name"
# create some mappings
organism_search = 'search/?type=Organism'
organisms = ff_utils.search_metadata(organism_search, key=connection.ff_keys)
orgn2name = {o.get('@id'): o.get('name') for o in organisms}
# add special cases
orgn2name['unspecified'] = 'unspecified'
orgn2name['multiple organisms'] = 'multiple organisms'
genome2orgn = {o.get('genome_assembly'): o.get('@id') for o in organisms if 'genome_assembly' in o}
gene_search = 'search/?type=Gene'
genes = ff_utils.search_metadata(gene_search, key=connection.ff_keys)
gene2org = {g.get('@id'): g.get('organism').get('@id') for g in genes}
# get all BioFeatures
biofeat_search = 'search/?type=BioFeature'
biofeatures = ff_utils.search_metadata(biofeat_search, key=connection.ff_keys)
matches = 0
name_trumps_guess = 0
mismatches = 0
to_patch = {}
brief_report = []
to_report = {'name_trumps_guess': {}, 'lost_and_found': {}, 'orphans': {}, 'mismatches': {}}
for biofeat in biofeatures:
linked_orgn_name = None
orgn_name = biofeat.get('organism_name')
biogenes = biofeat.get('relevant_genes')
if biogenes is not None:
borgns = [gene2org.get(g.get('@id')) for g in biogenes if '@id' in g]
linked_orgn_name = _get_orgname_from_atid_list(borgns, orgn2name)
if not linked_orgn_name: # didn't get it from genes - | |
<filename>cupy/_core/_gufuncs.py<gh_stars>1000+
import re
import numpy
import cupy
import cupy._core._routines_manipulation as _manipulation
from cupy._core._dtype import get_dtype
from cupy._core import internal
# Signature parsing code and dimension accessing has been borrowed
# from dask
# https://github.com/dask/dask/blob/61b578f5a3ad88cbc6a8b9a73ce08c551bd969fa/dask/array/gufunc.py#L12-L55
_DIMENSION_NAME = r'\w+\?*'
_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*,?)?'.format(_DIMENSION_NAME)
_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST)
_INPUT_ARGUMENTS = '(?:{0:}(?:,{0:})*,?)?'.format(_ARGUMENT)
_OUTPUT_ARGUMENTS = '{0:}(?:,{0:})*'.format(
_ARGUMENT
) # Use `'{0:}(?:,{0:})*,?'` if gufunc-
# signature should be allowed for length 1 tuple returns
_SIGNATURE = '^{0:}->{1:}$'.format(_INPUT_ARGUMENTS, _OUTPUT_ARGUMENTS)
def _parse_gufunc_signature(signature):
# The code has been modifyed from dask to support optional dimensions
if not isinstance(signature, str):
raise TypeError('Signature is not a string')
if signature == '' or signature is None:
raise ValueError('Signature cannot be empty')
signature = signature.replace(' ', '')
if not re.match(_SIGNATURE, signature):
raise ValueError('Not a valid gufunc signature: {}'.format(signature))
in_txt, out_txt = signature.split('->')
ins = [tuple(x.split(',')) if x != '' else ()
for x in in_txt[1:-1].split('),(')]
outs = [tuple(y.split(',')) if y != '' else ()
for y in out_txt[1:-1].split('),(')]
# TODO(ecastill) multiple output support
if len(outs) > 1:
raise ValueError('Currently more than 1 output is not supported')
return ins, outs
def _validate_normalize_axes(
axes, axis, keepdims, input_coredimss, output_coredimss
):
# This code credit goes to Dask
# https://github.com/dask/dask/blob/61b578f5a3ad88cbc6a8b9a73ce08c551bd969fa/dask/array/gufunc.py#L58-L172
nin = len(input_coredimss)
nout = (
1 if not isinstance(output_coredimss, list) else len(output_coredimss)
)
if axes is not None and axis is not None:
raise ValueError(
'Only one of `axis` or `axes` keyword arguments should be given')
if axes and not isinstance(axes, list):
raise ValueError('`axes` has to be of type list')
# output_coredimss = output_coredimss if nout > 1 else [output_coredimss]
filtered_core_dims = list(filter(len, input_coredimss))
nr_outputs_with_coredims = len(
[True for x in output_coredimss if len(x) > 0])
if keepdims:
if nr_outputs_with_coredims > 0:
raise ValueError('`keepdims` can only be used for scalar outputs')
output_coredimss = len(output_coredimss) * [filtered_core_dims[0]]
core_dims = input_coredimss + output_coredimss
if axis is not None:
if not isinstance(axis, int):
raise ValueError('`axis` argument has to be an integer value')
if filtered_core_dims:
cd0 = filtered_core_dims[0]
if len(cd0) != 1:
raise ValueError(
'`axis` can be used only, if one core dimension is present'
)
for cd in filtered_core_dims:
if cd0 != cd:
raise ValueError(
'To use `axis`, all core dimensions have to be equal'
)
# Expand defaults or axis
if axes is None:
if axis is not None:
axes = [(axis,) if cd else tuple() for cd in core_dims]
else:
axes = [tuple(range(-len(icd), 0)) for icd in core_dims]
axes = [(a,) if isinstance(a, int) else a for a in axes]
if (
(nr_outputs_with_coredims == 0)
and (nin != len(axes))
and (nin + nout != len(axes))
) or ((nr_outputs_with_coredims > 0) and (nin + nout != len(axes))):
raise ValueError(
'The number of `axes` entries is not equal the number'
' of input and output arguments')
# Treat outputs
output_axes = axes[nin:]
output_axes = (
output_axes
if output_axes
else [tuple(range(-len(ocd), 0)) for ocd in output_coredimss]
)
input_axes = axes[:nin]
# Assert we have as many axes as output core dimensions
for idx, (iax, icd) in enumerate(zip(input_axes, input_coredimss)):
if len(iax) != len(icd):
raise ValueError(
f'The number of `axes` entries for argument #{idx}'
' is not equal the number of respective input core'
' dimensions in signature')
if not keepdims:
for idx, (oax, ocd) in enumerate(zip(output_axes, output_coredimss)):
if len(oax) != len(ocd):
raise ValueError(
f'The number of `axes` entries for argument #{idx}'
' is not equal the number of respective output core'
' dimensions in signature')
else:
if input_coredimss:
icd0 = input_coredimss[0]
for icd in input_coredimss:
if icd0 != icd:
raise ValueError(
'To use `keepdims`, all core dimensions'
' have to be equal')
iax0 = input_axes[0]
output_axes = [iax0 for _ in output_coredimss]
return input_axes, output_axes
class _OpsRegister:
'''
Holds the ops for each dtypes signature like ('ff->f', func1)
and allows to do look ups for these
'''
class _Op:
def __init__(self, in_types, out_types, func):
self.func = func
self.in_types = tuple(numpy.dtype(i) for i in in_types)
self.out_types = tuple(numpy.dtype(o) for o in out_types)
self.sig_str = (''.join(
in_t.char for in_t in self.in_types) + '->' + ''.join(
out_t.char for out_t in self.out_types))
def __init__(self, signatures, default_func, nin, nout, name):
self._default_func = default_func
self._nin = nin
self._nout = nout
self._ops = self._process_signatures(signatures)
self._name = name
def _sig_str_to_tuple(self, sig):
sig = sig.replace(' ', '')
toks = sig.split('->')
if len(toks) != 2:
raise ValueError(f'signature {sig} for dtypes is invalid')
else:
ins, outs = toks
return ins, outs
def _process_signatures(self, signatures):
ops = []
for sig in signatures:
if isinstance(sig, tuple):
sig, op = sig
else:
op = self._default_func
ins, outs = self._sig_str_to_tuple(sig)
# Check the number of inputs and outputs matches the gufunc sig
if len(ins) != self._nin:
raise ValueError(
f'signature {sig} for dtypes is invalid number of inputs '
'is not consistent with general signature')
if len(outs) != self._nout:
raise ValueError(
f'signature {sig} for dtypes is invalid number of inputs '
'is not consistent with general signature')
ops.append(_OpsRegister._Op(ins, outs, op))
return ops
def _determine_from_args(self, args, casting):
n = len(args)
in_types = tuple(arg.dtype for arg in args)
for op in self._ops:
op_types = op.in_types
for i in range(n):
it = in_types[i]
ot = op_types[i]
if not numpy.can_cast(it, ot, casting=casting):
break
else:
return op
return None
def _determine_from_dtype(self, dtype):
for op in self._ops:
op_types = op.out_types
for t in op_types:
if t != dtype:
break
else:
return op
return None
def _determine_from_signature(self, signature):
# Lets convert the signature as it can be a tuple of tuples
# or a string
if isinstance(signature, tuple):
# create a string to do a look-up on the ops
if len(signature) == 1:
raise TypeError(
'The use of a length 1 tuple for the ufunc `signature` is'
' not allowed. Use `dtype` or fill the tuple with'
' `None`s.')
nin = self._nin
nout = self._nout
if len(signature) != (nin + nout):
raise TypeError(
'A type-tuple must be specified of length 1 or 3 for ufunc'
f' {self._name}')
signature = ''.join(
numpy.dtype(t).char for t in signature[:nin]) + '->' + ''.join(
numpy.dtype(t).char for t in signature[nin:nin+nout])
if isinstance(signature, str):
is_out = len(signature) == 1
for op in self._ops:
if is_out:
for t in op.out_types:
if t.char != signature:
break
else:
return op
else:
if op.sig_str == signature:
return op
raise TypeError('No loop matching the specified signature and'
f' casting was found for ufunc {self._name}')
def determine_dtype(self, args, dtype, casting, signature):
ret_dtype = None
func = self._default_func
if signature is not None:
# TODO(ecastill) use an externally provided signature to
# find the typecasting rules
op = self._determine_from_signature(signature)
elif dtype is not None:
if type(dtype) == tuple:
# TODO(ecastill) support dtype tuples
raise RuntimeError('dtype with tuple is not yet supported')
op = self._determine_from_dtype(dtype)
else:
op = self._determine_from_args(args, casting)
if op is None:
# Should we allow op to be none?
if dtype is None:
dtype = args[0].dtype
for arg in args:
ret_dtype = numpy.promote_types(dtype, arg.dtype)
else:
ret_dtype = get_dtype(dtype)
else:
# Convert args to the op specified in_types
n_args = []
for i, (arg, in_type) in enumerate(zip(args, op.in_types)):
if numpy.can_cast(arg.dtype, in_type, casting=casting):
n_args.append(arg.astype(in_type, copy=False))
else:
raise TypeError(
f'cannot cast ufunc {self._name} input {i} from'
f' {arg.dtype} to {dtype} with casting rule'
f' {casting}')
args = n_args
ret_dtype = op.out_types[0]
func = op.func
return args, ret_dtype, func
class _GUFunc:
'''
Creates a Generalized Universal Function by wrapping a user
provided function with the signature.
``signature`` determines if the function consumes or produces core
dimensions. The remaining dimensions in given input arrays (``*args``)
are considered loop dimensions and are required to broadcast
naturally against each other.
Args:
func (callable):
Function to call like ``func(*args, **kwargs)`` on input arrays
(``*args``) that returns an array or tuple of arrays. If
multiple arguments with non-matching dimensions are supplied,
this function is expected to vectorize (broadcast) over axes of
positional arguments in the style of NumPy universal functions.
signature (string):
Specifies what core dimensions are consumed and produced by
``func``. According to the specification of numpy.gufunc
signature.
supports_batched (bool, optional):
If the wrapped function supports to pass the complete input
array with the loop and the core | |
ListShares(self, video_id: str, account_id: str='') -> Response
Lists the existing shares for an account.
ShareVideo(self, video_id: str, json_body: Union[str, dict], account_id: str='') -> Response
Shares a video to one or more affiliates.
GetShare(self, video_id: str, affiliate_account_id: str, account_id: str='') -> Response
Lists the existing shares for an account.
UnshareVideo(self, video_id: str, affiliate_account_id: str, account_id: str='') -> Response
Un-shares a video with a specific affiliate.
"""
# base URL for API calls
base_url = 'https://cms.api.brightcove.com/v1/accounts/{account_id}'
def __init__(self, oauth: OAuth, query: str=''):
"""
Args:
oauth (OAuth): OAuth instance to use for the API calls.
query (str, optional): Query string to be used by API calls. Defaults to ''.
"""
super().__init__(oauth=oauth, query=query)
#===========================================
# get who created a video
#===========================================
@staticmethod
def GetCreatedBy(video: dict) -> str:
"""
Gets creator of a video.
Args:
video (dict): Video object.
Returns:
str: name of the creator.
"""
creator = 'Unknown'
if video:
created_by = video.get('created_by')
if created_by:
ctype = created_by.get('type')
if ctype=='api_key':
creator = 'API'
elif ctype=='user':
creator = created_by.get('email')
return creator
#===========================================
# account based video information
#===========================================
#region account based video information
def GetVideoCount(self, search_query: str='', account_id: str='') -> int:
"""
Gets count of videos for the account or a search.
Args:
search_query (str, optional): Search query. Defaults to ''.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
int: Number of videos in account, -1 if error occured.
"""
search_query = search_query or self.search_query
url = f'{self.base_url}/videos/count?q={search_query}'.format(account_id=account_id or self.oauth.account_id)
response = self.session.get(url, headers=self.oauth.headers)
if response.status_code == 200:
return int(response.json().get('count'))
return -1
def GetVideos(self, page_size: int=20, page_offset: int=0, search_query: str='', account_id: str='') -> Response:
"""
Gets a page of video objects.
Args:
page_size (int, optional): Number of items to return. Defaults to 20.
page_offset (int, optional): Number of items to skip. Defaults to 0.
search_query (str, optional): Search query. Defaults to ''.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
search_query = search_query or self.search_query
url = f'{self.base_url}/videos?limit={page_size}&offset={page_offset}&sort=created_at&q={search_query}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url, headers=self.oauth.headers)
def GetLightVideos(self, page_size: int=20, page_offset: int=0, search_query: str='', account_id: str='') -> Response:
"""
Gets a page of video objects with fewer information.
Args:
page_size (int, optional): Number of items to return. Defaults to 20.
page_offset (int, optional): Number of items to skip. Defaults to 0.
search_query (str, optional): Search query. Defaults to ''.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
search_query = search_query or self.search_query
url = f'{self.base_url}/lightvideos?limit={page_size}&offset={page_offset}&sort=created_at&q={search_query}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url, headers=self.oauth.headers)
#endregion
#===========================================
# single video operations
#===========================================
#region video management
def CreateVideo(self, video_title: str='Video Title', json_body: Optional[Union[dict,str]]=None, account_id: str='') -> Response:
"""
Create a new video object in the account.
Note: this does not ingest a video file - use the Dynamic Ingest API for ingestion
Args:
video_title (str, optional): Name/title of the video. Defaults to 'Video Title'.
json_body (Optional[Union[dict,str]], optional): JSON data with metadata. Defaults to None.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/'.format(account_id=account_id or self.oauth.account_id)
json_body = json_body or { "name": video_title }
return self.session.post(url=url, headers=self.oauth.headers, data=self._json_to_string(json_body))
def GetVideo(self, video_id: str, account_id: str='') -> Response:
"""
Gets a video object - you can include up to 10 video IDs separated by commas.
Args:
video_id (str): Video ID(s).
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def DeleteVideo(self, video_id: str, account_id: str='') -> Response:
"""
Deletes one or more videos.
Note that for this operation you can specify a comma-delimited list of video ids to delete
Args:
video_id (str): Video ID(s).
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.delete(url=url, headers=self.oauth.headers)
def UpdateVideo(self, video_id: str, json_body: Union[str, dict], account_id: str='') -> Response:
"""
Update video metadata - note that this API does not ingest any media files - use the
Dynamic Ingest API for ingestion. Also note that replacing WebVTT text tracks is a
two-step operation - see Add WebVTT Captions for details.
Args:
video_id (str): Video ID.
json_body (Union[str, dict]): JSON data with video metadata.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.patch(url, headers=self.oauth.headers, data=self._json_to_string(json_body))
def GetVideoSources(self, video_id: str, account_id: str='') -> Response:
"""
Gets an array of sources (renditions) for a video.
Args:
video_id (str): Video ID(s).
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/sources'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def GetVideoImages(self, video_id: str, account_id: str='') -> Response:
"""
Gets the images for a video.
Args:
video_id (str): Video ID(s).
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/images'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
#endregion
#===========================================
# audio tracks
#===========================================
#region audio tracks
def GetVideoAudioTracks(self, video_id: str, account_id: str='') -> Response:
"""
Gets the audio tracks for a video Dynamic Delivery only.
Args:
video_id (str): Video ID.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/audio_tracks'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def GetVideoAudioTrack(self, video_id: str, track_id: str, account_id: str='') -> Response:
"""
Gets one audio track for a video by its ID Dynamic Delivery only.
Args:
video_id (str): Video ID.
track_id (str): Audio track ID.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/audio_tracks/{track_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def DeleteVideoAudioTrack(self, video_id: str, track_id: str, account_id: str='') -> Response:
"""
Deletes one audio track for a video by its ID Dynamic Delivery only.
Args:
video_id (str): Video ID.
track_id (str): Audio track ID.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/audio_tracks/{track_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.delete(url=url, headers=self.oauth.headers)
def UpdateVideoAudioTrack(self, video_id: str, track_id: str, json_body: Union[str, dict], account_id: str='') -> Response:
"""
Updates audio track metadata for a video Dynamic Delivery only.
Args:
video_id (str): Video ID.
track_id (str): Audio track ID.
json_body (Union[str, dict]): JSON data with the audio track information.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/audio_tracks/{track_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.patch(url=url, headers=self.oauth.headers, data=self._json_to_string(json_body))
#endregion
#===========================================
# digital master
#===========================================
#region digital master
def GetDigitalMasterInfo(self, video_id: str, account_id: str='') -> Response:
"""
Gets the stored digital master for a video, if any.
Args:
video_id (str): Video ID.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/digital_master'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def DeleteDigitalMaster(self, video_id: str, account_id: str='') -> Response:
"""
Deletes the archived digital master for a video. Be sure to read Digital Master Delete API
before using this operation to understand the implications.
Args:
video_id (str): Video ID.
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/videos/{video_id}/digital_master'.format(account_id=account_id or self.oauth.account_id)
return self.session.delete(url=url, headers=self.oauth.headers)
#endregion
#===========================================
# custom fields
#===========================================
#region custom fields
def GetVideoFields(self, account_id: str='') -> Response:
"""
Gets a list of video fields from account.
Args:
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/video_fields'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url=url, headers=self.oauth.headers)
def GetCustomFields(self, account_id: str='') -> Response:
"""
Gets a list of custom fields from account.
Args:
account_id (str, optional): Brightcove Account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url | |
-0.045, -0.047, -0.048, -0.049, -0.045, -0.033, -0.016],
[10, -0.043, -0.044, -0.041, -0.041, -0.040, -0.038, -0.034, -0.035, -0.035, -0.029, -0.022, -0.009],
[20, -0.052, -0.034, -0.036, -0.036, -0.035, -0.028, -0.024, -0.023, -0.020, -0.016, -0.010, -0.014],
[30, -0.062, -0.034, -0.027, -0.028, -0.027, -0.027, -0.023, -0.023, -0.019, -0.009, -0.025, -0.010]]).T)
tables['thrust_idle'] = create_table2D(
name='thrust_idle', row_label='alt_ft', col_label='mach',
data=np.array([ # alt, ft
[0, 0, 1.0e4, 2.0e4, 3.0e4, 4.0e4, 5.0e4],
[0, 1060, 670, 890, 1140, 1500, 1860],
[0.2, 635, 425, 690, 1010, 1330, 1700],
[0.4, 60, 25, 345, 755, 1130, 1525],
[0.6, -1020, -710, -300, 350, 910, 1360], # mach
[0.8, -2700, -1900, -1300, -247, 600, 1100],
[1.0, -3600, -1400, -595, -342, -200, 700]]).T)
tables['thrust_mil'] = create_table2D(
name='thrust_mil', row_label='alt_ft', col_label='mach',
data=np.array([ # alt, ft
[0, 0, 1.0e4, 2.0e4, 3.0e4, 4.0e4, 5.0e4],
[0, 12680, 9150, 6200, 3950, 2450, 1400],
[0.2, 12680, 9150, 6313, 4040, 2470, 1400],
[0.4, 12610, 9312, 6610, 4290, 2600, 1560], # mach
[0.6, 12640, 9839, 7090, 4660, 2840, 1660],
[0.8, 12390, 10176, 7750, 5320, 3250, 1930],
[1.0, 11680, 9848, 8050, 6100, 3800, 2310]]).T)
tables['thrust_max'] = create_table2D(
name='thrust_max', row_label='alt_ft', col_label='mach',
data=np.array([ # alt, ft
[0, 0, 1.0e4, 2.0e4, 3.0e4, 4.0e4, 5.0e4],
[0, 20000, 15000, 10800, 7000, 4000, 2500],
[0.2, 21420, 15700, 11225, 7323, 4435, 2600],
[0.4, 22700, 16860, 12250, 8154, 5000, 2835], # mach
[0.6, 24240, 18910, 13760, 9285, 5700, 3215],
[0.8, 26070, 21075, 15975, 11115, 6860, 3950],
[1.0, 28886, 23319, 18300, 13484, 8642, 5057]]).T)
def thrust():
power = ca.MX.sym('power')
alt = ca.MX.sym('alt')
rmach = ca.MX.sym('rmach')
tidl = tables['thrust_idle'](alt, rmach)
tmil = tables['thrust_mil'](alt, rmach)
tmax = tables['thrust_max'](alt, rmach)
thrust = ca.if_else(power < 50,
tidl + (tmil - tidl)*power*0.02,
tmil + (tmax - tmil)*(power - 50)*0.02)
return ca.Function('thrust',
[power, alt, rmach],
[thrust],
['power', 'alt', 'mach'],
['thrust'])
tables['thrust'] = thrust()
def propulsion():
dp = ca.MX.sym('dp')
thtl = ca.MX.sym('thtl')
power = ca.MX.sym('power')
power_cmd = ca.MX.sym('power_cmd')
# reciprocal of time constant
rtau = ca.Function('rtau', [dp], [ca.if_else(dp < 25, 1, ca.if_else(dp > 50, 0.1, 1.9 - 0.036*dp))])
# power command vs. throttle relationship
tgear = ca.Function('tgear', [thtl],
[ca.if_else(thtl < 0.77, 64.94*thtl, 217.38*thtl - 117.38)],
['thtl'], ['pow'])
# rate of change of power
pdot = ca.Function('pdot', [power, power_cmd], [
ca.if_else(power_cmd > 50,
ca.if_else(power > 50, 5*(power_cmd - power), rtau(60 - power)*(60 - power)),
ca.if_else(power > 50, 5*(40 - power), rtau(power_cmd - power)*(power_cmd - power))
)
], ['power', 'power_cmd'], ['pdot'])
tables['tgear'] = tgear
tables['pdot'] = pdot
propulsion()
def atmosphere():
vt = ca.MX.sym('vt')
alt = ca.MX.sym('alt')
R0 = 2.377e-3
Tfac = 1 - 0.703e-5*alt
T = ca.if_else(alt > 35000, 390, 519*Tfac)
rho = R0*(Tfac**(4.14))
tables['amach'] = ca.Function('amach', [vt, alt], [vt/(ca.sqrt(1.4*1716.3*T))], ['vt', 'alt'], ['amach'])
tables['qbar'] = ca.Function('qbar', [vt, alt], [0.5*rho*vt**2], ['vt', 'alt'], ['qbar'])
tables['ps'] = ca.Function('qbar', [alt], [1715*rho*T], ['alt'], ['amach'])
atmosphere()
return tables
tables = build_tables()
class CasadiDataClass:
"""
A base class for dataclasses with casadi.
"""
def __post_init__(self):
self.__name_to_index = {}
self.__index_to_name = {}
for i, field in enumerate(self.fields()):
self.__name_to_index[field.name] = i
self.__index_to_name[i] = field.name
@classmethod
def fields(cls):
return dataclasses.fields(cls)
def to_casadi(self):
return ca.vertcat(*self.to_tuple())
def to_tuple(self):
return dataclasses.astuple(self)
def to_dict(self):
return dataclasses.asdict(self)
@classmethod
def from_casadi(cls, v):
return cls(*[v[i] for i in range(v.shape[0])])
@classmethod
def sym(cls, name):
v = ca.MX.sym(name, len(cls.fields()))
return cls(*[v[i] for i in range(v.shape[0])])
def name_to_index(self, name):
return self.__name_to_index[name]
def index_to_name(self, index):
return self.__index_to_name[index]
@dataclasses.dataclass
class State(CasadiDataClass):
"""The vehicle state."""
VT: float = 0 # true velocity, (ft/s)
alpha: float = 0 # angle of attack, (rad)
beta: float = 0 # sideslip angle, (rad)
phi: float = 0 # B321 roll angle, (rad)
theta: float = 0 # B321 pitch angle, (rad)
psi: float = 0 # B321 yaw angle, (rad)
P: float = 0 # body roll rate, (rad/s)
Q: float = 0 # body pitch rate, (rad/s)
R: float = 0 # body yaw rate, (rad/s)
p_N: float = 0 # north position, (m)
p_E: float = 0 # east position, (m)
alt: float = 0 # altitude, (m)
power: float = 0 # power, (0-1)
ail_deg: float = 0 # aileron position, (deg)
elv_deg: float = 0 # elevator position, (deg)
rdr_deg: float = 0 # rudder position, (deg)
@dataclasses.dataclass
class StateDot(CasadiDataClass):
"""The derivative of the vehicle state."""
VT_dot: float = 0 # true velocity derivative, (ft/s^2)
alpha_dot: float = 0 # angle of attack rate, (rad/s)
beta_dot: float = 0 # sideslip rate, (rad/s)
phi_dot: float = 0 # B321 roll rate, (rad/s)
theta_dot: float = 0 # B321 pitch rate, (rad/s)
psi_dot: float = 0 # B321 yaw rate, (rad/s)
P_dot: float = 0 # body roll accel, (rad/s^2)
Q_dot: float = 0 # body pitch accel, (rad/s^2)
R_dot: float = 0 # body yaw accel, (rad/s^2)
V_N: float = 0 # north velocity, (m/s)
V_E: float = 0 # east velocity, (m/s)
alt_dot: float = 0 # climb rate, (m/s)
power_dot: float = 0 # power rate, (NA)
ail_rate_dps: float = 0 # aileron rate, (deg/s)
elv_rate_dps: float = 0 # elevator rate, (deg/s)
rdr_rate_dps: float = 0 # rudder rate, (deg/s)
@dataclasses.dataclass
class Control(CasadiDataClass):
"""The control input."""
thtl: float = 0 # throttle (0-1)
ail_cmd_deg: float = 0 # aileron command, (deg)
elv_cmd_deg: float = 0 # elevator command, (deg)
rdr_cmd_deg: float = 0 # rudder command, (deg)
@dataclasses.dataclass
class Parameters(CasadiDataClass):
"""The constant parameters."""
s: float = 300.0 # reference area, ft^2
b: float = 30.0 # wing span, ft
cbar: float = 11.32 # mean chord, ft
xcgr: float = 0.35 # reference cg, %chord
xcg: float = 0.35 # actual cg, %chord
hx: float = 160.0
g: float = 32.17 # acceleration of gravity, ft/s^2
weight: float = 20490.446 # weight, slugs
axx: float = 9496.0 # moment of inertia about x
ayy: float = 55814.0 # moment of inertia about y
azz: float = 63100.0 # moment of inertia about z
axz: float = 982.0 # xz moment of inertia
def force_moment(x: State, u: Control, p: Parameters):
"""
The function computes the forces and moments acting on the aircraft.
It is important to separate this from the dynamics as the Gazebo
simulator will be used to simulate extra forces and moments
from collision.
"""
# functions
cos = ca.cos
sin = ca.sin
# parameters
weight = p.weight
g = p.g
hx = p.hx
b = p.b
cbar = p.cbar
s = p.s
xcg = p.xcg
xcgr = p.xcgr
# state
VT = x.VT
alpha = x.alpha
beta = x.beta
phi = x.phi
theta = x.theta
P = x.P
Q = x.Q
R = x.R
alt = x.alt
power = x.power
ail_deg = x.ail_deg
elv_deg = x.elv_deg
rdr_deg = x.rdr_deg
# mass properties
mass = weight/g
# air data computer and engine model
amach = tables['amach'](VT, alt)
qbar = tables['qbar'](VT, alt)
thrust = tables['thrust'](power, alt, amach)
# force component buildup
rad2deg = 180/np.pi
alpha_deg = rad2deg*alpha
beta_deg = rad2deg*beta
dail = ail_deg/20.0
drdr = rdr_deg/30.0
cxt = tables['Cx'](alpha_deg, elv_deg)
cyt = tables['Cy'](beta_deg, ail_deg, rdr_deg)
czt = tables['Cz'](alpha_deg, beta_deg, elv_deg)
clt = ca.sign(beta_deg)*tables['Cl'](alpha_deg, beta_deg) \
+ tables['DlDa'](alpha_deg, beta_deg)*dail \
+ tables['DlDr'](alpha_deg, beta_deg)*drdr
cmt = tables['Cm'](alpha_deg, elv_deg)
cnt = ca.sign(beta_deg)*tables['Cn'](alpha_deg, beta_deg) \
+ tables['DnDa'](alpha_deg, beta_deg)*dail \
+ tables['DnDr'](alpha_deg, beta_deg)*drdr
# damping
tvt = 0.5/VT
b2v = b*tvt
cq = cbar*Q*tvt
cxt += cq*tables['CXq'](alpha_deg)
cyt += b2v*(tables['CYr'](alpha_deg)*R + tables['CYp'](alpha_deg)*P)
czt += cq*tables['CZq'](alpha_deg)
clt += b2v*(tables['Clr'](alpha_deg)*R + tables['Clp'](alpha_deg)*P)
cmt += cq*tables['Cmq'](alpha_deg) + czt*(xcgr - xcg)
cnt += b2v*(tables['Cnr'](alpha_deg)*R + tables['Cnp'](alpha_deg)*P) - cyt*(xcgr - xcg)*cbar/b
# get ready for state equations
sth = sin(theta)
cth = cos(theta)
sph = sin(phi)
cph = cos(phi)
qs = qbar*s
qsb = qs*b
rmqs = qs/mass
gcth = g*cth
ay = rmqs*cyt
az = rmqs*czt
qhx = Q*hx
# force
Fx = -mass*g*sth + qs*cxt + thrust
Fy = mass*(gcth*sph + ay)
Fz = mass*(gcth*cph + az)
# moment
Mx = qsb*clt # roll
My = qs*cbar*cmt - R*hx # pitch
Mz = qsb*cnt + qhx # yaw
return ca.vertcat(Fx, Fy, Fz), ca.vertcat(Mx, My, Mz)
def dynamics(x: State, u: Control, p: Parameters):
"""
| |
1.33500106673234)
m.c12 = Constraint(expr= m.x6 - m.x2509 >= 0.741937344729377)
m.c13 = Constraint(expr= m.x7 - m.x2509 >= 0.916290731874155)
m.c14 = Constraint(expr= m.x2 - m.x2510 >= -0.356674943938732)
m.c15 = Constraint(expr= m.x3 - m.x2510 >= 0.955511445027436)
m.c16 = Constraint(expr= m.x4 - m.x2510 >= 0.470003629245736)
m.c17 = Constraint(expr= m.x5 - m.x2510 >= 1.22377543162212)
m.c18 = Constraint(expr= m.x6 - m.x2510 >= 1.16315080980568)
m.c19 = Constraint(expr= m.x7 - m.x2510 >= 1.06471073699243)
m.c20 = Constraint(expr= m.x2 - m.x2511 >= 1.54756250871601)
m.c21 = Constraint(expr= m.x3 - m.x2511 >= 0.832909122935104)
m.c22 = Constraint(expr= m.x4 - m.x2511 >= 0.470003629245736)
m.c23 = Constraint(expr= m.x5 - m.x2511 >= 0.993251773010283)
m.c24 = Constraint(expr= m.x6 - m.x2511 >= 0.182321556793955)
m.c25 = Constraint(expr= m.x7 - m.x2511 >= 0.916290731874155)
m.c26 = Constraint(expr=m.x8*m.x2512 + m.x633*m.x2518 + m.x1258*m.x2524 + m.x1883*m.x2530 <= 8)
m.c27 = Constraint(expr=m.x9*m.x2512 + m.x634*m.x2518 + m.x1259*m.x2524 + m.x1884*m.x2530 <= 8)
m.c28 = Constraint(expr=m.x10*m.x2512 + m.x635*m.x2518 + m.x1260*m.x2524 + m.x1885*m.x2530 <= 8)
m.c29 = Constraint(expr=m.x11*m.x2512 + m.x636*m.x2518 + m.x1261*m.x2524 + m.x1886*m.x2530 <= 8)
m.c30 = Constraint(expr=m.x12*m.x2512 + m.x637*m.x2518 + m.x1262*m.x2524 + m.x1887*m.x2530 <= 8)
m.c31 = Constraint(expr=m.x13*m.x2512 + m.x638*m.x2518 + m.x1263*m.x2524 + m.x1888*m.x2530 <= 8)
m.c32 = Constraint(expr=m.x14*m.x2512 + m.x639*m.x2518 + m.x1264*m.x2524 + m.x1889*m.x2530 <= 8)
m.c33 = Constraint(expr=m.x15*m.x2512 + m.x640*m.x2518 + m.x1265*m.x2524 + m.x1890*m.x2530 <= 8)
m.c34 = Constraint(expr=m.x16*m.x2512 + m.x641*m.x2518 + m.x1266*m.x2524 + m.x1891*m.x2530 <= 8)
m.c35 = Constraint(expr=m.x17*m.x2512 + m.x642*m.x2518 + m.x1267*m.x2524 + m.x1892*m.x2530 <= 8)
m.c36 = Constraint(expr=m.x18*m.x2512 + m.x643*m.x2518 + m.x1268*m.x2524 + m.x1893*m.x2530 <= 8)
m.c37 = Constraint(expr=m.x19*m.x2512 + m.x644*m.x2518 + m.x1269*m.x2524 + m.x1894*m.x2530 <= 8)
m.c38 = Constraint(expr=m.x20*m.x2512 + m.x645*m.x2518 + m.x1270*m.x2524 + m.x1895*m.x2530 <= 8)
m.c39 = Constraint(expr=m.x21*m.x2512 + m.x646*m.x2518 + m.x1271*m.x2524 + m.x1896*m.x2530 <= 8)
m.c40 = Constraint(expr=m.x22*m.x2512 + m.x647*m.x2518 + m.x1272*m.x2524 + m.x1897*m.x2530 <= 8)
m.c41 = Constraint(expr=m.x23*m.x2512 + m.x648*m.x2518 + m.x1273*m.x2524 + m.x1898*m.x2530 <= 8)
m.c42 = Constraint(expr=m.x24*m.x2512 + m.x649*m.x2518 + m.x1274*m.x2524 + m.x1899*m.x2530 <= 8)
m.c43 = Constraint(expr=m.x25*m.x2512 + m.x650*m.x2518 + m.x1275*m.x2524 + m.x1900*m.x2530 <= 8)
m.c44 = Constraint(expr=m.x26*m.x2512 + m.x651*m.x2518 + m.x1276*m.x2524 + m.x1901*m.x2530 <= 8)
m.c45 = Constraint(expr=m.x27*m.x2512 + m.x652*m.x2518 + m.x1277*m.x2524 + m.x1902*m.x2530 <= 8)
m.c46 = Constraint(expr=m.x28*m.x2512 + m.x653*m.x2518 + m.x1278*m.x2524 + m.x1903*m.x2530 <= 8)
m.c47 = Constraint(expr=m.x29*m.x2512 + m.x654*m.x2518 + m.x1279*m.x2524 + m.x1904*m.x2530 <= 8)
m.c48 = Constraint(expr=m.x30*m.x2512 + m.x655*m.x2518 + m.x1280*m.x2524 + m.x1905*m.x2530 <= 8)
m.c49 = Constraint(expr=m.x31*m.x2512 + m.x656*m.x2518 + m.x1281*m.x2524 + m.x1906*m.x2530 <= 8)
m.c50 = Constraint(expr=m.x32*m.x2512 + m.x657*m.x2518 + m.x1282*m.x2524 + m.x1907*m.x2530 <= 8)
m.c51 = Constraint(expr=m.x33*m.x2512 + m.x658*m.x2518 + m.x1283*m.x2524 + m.x1908*m.x2530 <= 8)
m.c52 = Constraint(expr=m.x34*m.x2512 + m.x659*m.x2518 + m.x1284*m.x2524 + m.x1909*m.x2530 <= 8)
m.c53 = Constraint(expr=m.x35*m.x2512 + m.x660*m.x2518 + m.x1285*m.x2524 + m.x1910*m.x2530 <= 8)
m.c54 = Constraint(expr=m.x36*m.x2512 + m.x661*m.x2518 + m.x1286*m.x2524 + m.x1911*m.x2530 <= 8)
m.c55 = Constraint(expr=m.x37*m.x2512 + m.x662*m.x2518 + m.x1287*m.x2524 + m.x1912*m.x2530 <= 8)
m.c56 = Constraint(expr=m.x38*m.x2512 + m.x663*m.x2518 + m.x1288*m.x2524 + m.x1913*m.x2530 <= 8)
m.c57 = Constraint(expr=m.x39*m.x2512 + m.x664*m.x2518 + m.x1289*m.x2524 + m.x1914*m.x2530 <= 8)
m.c58 = Constraint(expr=m.x40*m.x2512 + m.x665*m.x2518 + m.x1290*m.x2524 + m.x1915*m.x2530 <= 8)
m.c59 = Constraint(expr=m.x41*m.x2512 + m.x666*m.x2518 + m.x1291*m.x2524 + m.x1916*m.x2530 <= 8)
m.c60 = Constraint(expr=m.x42*m.x2512 + m.x667*m.x2518 + m.x1292*m.x2524 + m.x1917*m.x2530 <= 8)
m.c61 = Constraint(expr=m.x43*m.x2512 + m.x668*m.x2518 + m.x1293*m.x2524 + m.x1918*m.x2530 <= 8)
m.c62 = Constraint(expr=m.x44*m.x2512 + m.x669*m.x2518 + m.x1294*m.x2524 + m.x1919*m.x2530 <= 8)
m.c63 = Constraint(expr=m.x45*m.x2512 + m.x670*m.x2518 + m.x1295*m.x2524 + m.x1920*m.x2530 <= 8)
m.c64 = Constraint(expr=m.x46*m.x2512 + m.x671*m.x2518 + m.x1296*m.x2524 + m.x1921*m.x2530 <= 8)
m.c65 = Constraint(expr=m.x47*m.x2512 + m.x672*m.x2518 + m.x1297*m.x2524 + m.x1922*m.x2530 <= 8)
m.c66 = Constraint(expr=m.x48*m.x2512 + m.x673*m.x2518 + m.x1298*m.x2524 + m.x1923*m.x2530 <= 8)
m.c67 = Constraint(expr=m.x49*m.x2512 + m.x674*m.x2518 + m.x1299*m.x2524 + m.x1924*m.x2530 <= 8)
m.c68 = Constraint(expr=m.x50*m.x2512 + m.x675*m.x2518 + m.x1300*m.x2524 + m.x1925*m.x2530 <= 8)
m.c69 = Constraint(expr=m.x51*m.x2512 + m.x676*m.x2518 + m.x1301*m.x2524 + m.x1926*m.x2530 <= 8)
m.c70 = Constraint(expr=m.x52*m.x2512 + m.x677*m.x2518 + m.x1302*m.x2524 + m.x1927*m.x2530 <= 8)
m.c71 = Constraint(expr=m.x53*m.x2512 + m.x678*m.x2518 + m.x1303*m.x2524 + m.x1928*m.x2530 <= 8)
m.c72 = Constraint(expr=m.x54*m.x2512 + m.x679*m.x2518 + m.x1304*m.x2524 + m.x1929*m.x2530 <= 8)
m.c73 = Constraint(expr=m.x55*m.x2512 + m.x680*m.x2518 + m.x1305*m.x2524 + m.x1930*m.x2530 <= 8)
m.c74 = Constraint(expr=m.x56*m.x2512 + m.x681*m.x2518 + m.x1306*m.x2524 + m.x1931*m.x2530 <= 8)
m.c75 = Constraint(expr=m.x57*m.x2512 + m.x682*m.x2518 + m.x1307*m.x2524 + m.x1932*m.x2530 <= 8)
m.c76 = Constraint(expr=m.x58*m.x2512 + m.x683*m.x2518 + m.x1308*m.x2524 + m.x1933*m.x2530 <= 8)
m.c77 = Constraint(expr=m.x59*m.x2512 + m.x684*m.x2518 + m.x1309*m.x2524 + m.x1934*m.x2530 <= 8)
m.c78 = Constraint(expr=m.x60*m.x2512 + m.x685*m.x2518 + m.x1310*m.x2524 + m.x1935*m.x2530 <= 8)
m.c79 = Constraint(expr=m.x61*m.x2512 + m.x686*m.x2518 + m.x1311*m.x2524 + m.x1936*m.x2530 <= 8)
m.c80 = Constraint(expr=m.x62*m.x2512 + m.x687*m.x2518 + m.x1312*m.x2524 + m.x1937*m.x2530 <= 8)
m.c81 = Constraint(expr=m.x63*m.x2512 + m.x688*m.x2518 + m.x1313*m.x2524 + m.x1938*m.x2530 <= 8)
m.c82 = Constraint(expr=m.x64*m.x2512 + m.x689*m.x2518 + m.x1314*m.x2524 + m.x1939*m.x2530 <= 8)
m.c83 = Constraint(expr=m.x65*m.x2512 + m.x690*m.x2518 + m.x1315*m.x2524 + m.x1940*m.x2530 <= 8)
m.c84 = Constraint(expr=m.x66*m.x2512 + m.x691*m.x2518 + m.x1316*m.x2524 + m.x1941*m.x2530 <= 8)
m.c85 = Constraint(expr=m.x67*m.x2512 + m.x692*m.x2518 + m.x1317*m.x2524 + m.x1942*m.x2530 <= 8)
m.c86 = Constraint(expr=m.x68*m.x2512 + m.x693*m.x2518 + m.x1318*m.x2524 + m.x1943*m.x2530 <= 8)
m.c87 = Constraint(expr=m.x69*m.x2512 + m.x694*m.x2518 + m.x1319*m.x2524 + m.x1944*m.x2530 <= 8)
m.c88 = Constraint(expr=m.x70*m.x2512 + m.x695*m.x2518 + m.x1320*m.x2524 + m.x1945*m.x2530 <= 8)
m.c89 = Constraint(expr=m.x71*m.x2512 + m.x696*m.x2518 + m.x1321*m.x2524 + m.x1946*m.x2530 <= 8)
m.c90 = Constraint(expr=m.x72*m.x2512 + m.x697*m.x2518 + m.x1322*m.x2524 + m.x1947*m.x2530 <= 8)
m.c91 = Constraint(expr=m.x73*m.x2512 + m.x698*m.x2518 + m.x1323*m.x2524 + m.x1948*m.x2530 <= 8)
m.c92 = Constraint(expr=m.x74*m.x2512 + m.x699*m.x2518 + m.x1324*m.x2524 + m.x1949*m.x2530 <= 8)
m.c93 = Constraint(expr=m.x75*m.x2512 + m.x700*m.x2518 + m.x1325*m.x2524 + m.x1950*m.x2530 <= 8)
m.c94 = Constraint(expr=m.x76*m.x2512 + m.x701*m.x2518 + m.x1326*m.x2524 + m.x1951*m.x2530 <= 8)
m.c95 = Constraint(expr=m.x77*m.x2512 + m.x702*m.x2518 + m.x1327*m.x2524 + m.x1952*m.x2530 <= 8)
m.c96 = Constraint(expr=m.x78*m.x2512 + m.x703*m.x2518 + m.x1328*m.x2524 + m.x1953*m.x2530 <= 8)
m.c97 = Constraint(expr=m.x79*m.x2512 + m.x704*m.x2518 + m.x1329*m.x2524 + m.x1954*m.x2530 <= 8)
m.c98 = Constraint(expr=m.x80*m.x2512 + m.x705*m.x2518 + m.x1330*m.x2524 + m.x1955*m.x2530 <= 8)
m.c99 = Constraint(expr=m.x81*m.x2512 + m.x706*m.x2518 + m.x1331*m.x2524 + m.x1956*m.x2530 <= 8)
m.c100 = Constraint(expr=m.x82*m.x2512 + m.x707*m.x2518 + m.x1332*m.x2524 + m.x1957*m.x2530 <= 8)
m.c101 = Constraint(expr=m.x83*m.x2512 + m.x708*m.x2518 + m.x1333*m.x2524 + m.x1958*m.x2530 <= 8)
m.c102 = Constraint(expr=m.x84*m.x2512 + m.x709*m.x2518 + m.x1334*m.x2524 + m.x1959*m.x2530 <= 8)
m.c103 = Constraint(expr=m.x85*m.x2512 + m.x710*m.x2518 + m.x1335*m.x2524 + m.x1960*m.x2530 <= 8)
m.c104 = Constraint(expr=m.x86*m.x2512 + m.x711*m.x2518 + m.x1336*m.x2524 + m.x1961*m.x2530 <= 8)
m.c105 = Constraint(expr=m.x87*m.x2512 + m.x712*m.x2518 + m.x1337*m.x2524 + m.x1962*m.x2530 <= 8)
m.c106 = Constraint(expr=m.x88*m.x2512 + m.x713*m.x2518 + m.x1338*m.x2524 + m.x1963*m.x2530 <= 8)
m.c107 = Constraint(expr=m.x89*m.x2512 + m.x714*m.x2518 + m.x1339*m.x2524 + m.x1964*m.x2530 <= 8)
m.c108 = Constraint(expr=m.x90*m.x2512 + m.x715*m.x2518 + m.x1340*m.x2524 + m.x1965*m.x2530 <= 8)
m.c109 = Constraint(expr=m.x91*m.x2512 + m.x716*m.x2518 + m.x1341*m.x2524 + m.x1966*m.x2530 <= 8)
m.c110 = Constraint(expr=m.x92*m.x2512 + m.x717*m.x2518 + m.x1342*m.x2524 + m.x1967*m.x2530 <= 8)
m.c111 = Constraint(expr=m.x93*m.x2512 + m.x718*m.x2518 + m.x1343*m.x2524 + m.x1968*m.x2530 <= 8)
m.c112 = Constraint(expr=m.x94*m.x2512 + m.x719*m.x2518 + m.x1344*m.x2524 + m.x1969*m.x2530 <= 8)
m.c113 = Constraint(expr=m.x95*m.x2512 + m.x720*m.x2518 + m.x1345*m.x2524 + m.x1970*m.x2530 <= 8)
m.c114 = Constraint(expr=m.x96*m.x2512 + m.x721*m.x2518 + m.x1346*m.x2524 + m.x1971*m.x2530 <= 8)
m.c115 = Constraint(expr=m.x97*m.x2512 + m.x722*m.x2518 + m.x1347*m.x2524 + m.x1972*m.x2530 <= 8)
m.c116 = Constraint(expr=m.x98*m.x2512 + m.x723*m.x2518 + m.x1348*m.x2524 + m.x1973*m.x2530 <= 8)
m.c117 = Constraint(expr=m.x99*m.x2512 + m.x724*m.x2518 + m.x1349*m.x2524 + m.x1974*m.x2530 <= 8)
m.c118 = Constraint(expr=m.x100*m.x2512 + m.x725*m.x2518 + m.x1350*m.x2524 + m.x1975*m.x2530 <= 8)
m.c119 = Constraint(expr=m.x101*m.x2512 + m.x726*m.x2518 + m.x1351*m.x2524 + m.x1976*m.x2530 <= 8)
m.c120 = Constraint(expr=m.x102*m.x2512 + m.x727*m.x2518 + m.x1352*m.x2524 + m.x1977*m.x2530 <= 8)
m.c121 = Constraint(expr=m.x103*m.x2512 + m.x728*m.x2518 + m.x1353*m.x2524 + m.x1978*m.x2530 <= 8)
m.c122 = Constraint(expr=m.x104*m.x2512 + m.x729*m.x2518 + m.x1354*m.x2524 + m.x1979*m.x2530 <= 8)
m.c123 = Constraint(expr=m.x105*m.x2512 + m.x730*m.x2518 + m.x1355*m.x2524 + m.x1980*m.x2530 <= 8)
m.c124 = Constraint(expr=m.x106*m.x2512 + m.x731*m.x2518 + m.x1356*m.x2524 + m.x1981*m.x2530 <= 8)
m.c125 = Constraint(expr=m.x107*m.x2512 + m.x732*m.x2518 + m.x1357*m.x2524 + m.x1982*m.x2530 <= 8)
m.c126 = Constraint(expr=m.x108*m.x2512 + m.x733*m.x2518 + m.x1358*m.x2524 + m.x1983*m.x2530 <= 8)
m.c127 = Constraint(expr=m.x109*m.x2512 + m.x734*m.x2518 + m.x1359*m.x2524 + m.x1984*m.x2530 <= 8)
m.c128 = Constraint(expr=m.x110*m.x2512 + m.x735*m.x2518 + m.x1360*m.x2524 + m.x1985*m.x2530 <= 8)
m.c129 = Constraint(expr=m.x111*m.x2512 + m.x736*m.x2518 + m.x1361*m.x2524 + m.x1986*m.x2530 <= 8)
m.c130 = Constraint(expr=m.x112*m.x2512 + m.x737*m.x2518 + m.x1362*m.x2524 + m.x1987*m.x2530 <= 8)
m.c131 = Constraint(expr=m.x113*m.x2512 + m.x738*m.x2518 + m.x1363*m.x2524 + m.x1988*m.x2530 <= 8)
m.c132 = Constraint(expr=m.x114*m.x2512 + m.x739*m.x2518 + m.x1364*m.x2524 + m.x1989*m.x2530 <= 8)
m.c133 = Constraint(expr=m.x115*m.x2512 + m.x740*m.x2518 + m.x1365*m.x2524 + m.x1990*m.x2530 <= 8)
m.c134 = Constraint(expr=m.x116*m.x2512 + m.x741*m.x2518 + m.x1366*m.x2524 + m.x1991*m.x2530 <= 8)
m.c135 = Constraint(expr=m.x117*m.x2512 + m.x742*m.x2518 + m.x1367*m.x2524 + m.x1992*m.x2530 <= 8)
m.c136 = Constraint(expr=m.x118*m.x2512 + m.x743*m.x2518 + m.x1368*m.x2524 + m.x1993*m.x2530 <= 8)
m.c137 = Constraint(expr=m.x119*m.x2512 + m.x744*m.x2518 + m.x1369*m.x2524 + m.x1994*m.x2530 <= 8)
m.c138 = Constraint(expr=m.x120*m.x2512 + m.x745*m.x2518 + m.x1370*m.x2524 + m.x1995*m.x2530 <= 8)
m.c139 = Constraint(expr=m.x121*m.x2512 + m.x746*m.x2518 + m.x1371*m.x2524 + m.x1996*m.x2530 <= 8)
m.c140 = Constraint(expr=m.x122*m.x2512 + m.x747*m.x2518 + m.x1372*m.x2524 + m.x1997*m.x2530 <= 8)
m.c141 = Constraint(expr=m.x123*m.x2512 + m.x748*m.x2518 + m.x1373*m.x2524 + m.x1998*m.x2530 <= 8)
m.c142 = Constraint(expr=m.x124*m.x2512 + m.x749*m.x2518 + m.x1374*m.x2524 + m.x1999*m.x2530 <= 8)
m.c143 = Constraint(expr=m.x125*m.x2512 + m.x750*m.x2518 + m.x1375*m.x2524 + m.x2000*m.x2530 <= 8)
m.c144 = | |
<gh_stars>1-10
'''
A simple implementation of the material design colors in python
- currently only supports 'rgb' format; more will be added soon
Usage:
from material_colors import MaterialColors
Colors = MaterialColors('rgb')
my_red_color = Colors.red()
my_purple_color = Colors.purple(300)
'''
class MaterialColors():
def __init__(self, mode = 'rgb'):
# set the format in which the color gets returned
self.setMode(mode)
# normal colors
self._red = {50: [(255, 235, 238)],
100: [(255, 205, 210)],
200: [(239, 154, 154)],
300: [(239, 154, 154)],
400: [(239, 83, 80)],
500: [(244, 67, 54)],
600: [(229, 57, 53)],
700: [(211, 47, 47)],
800: [(198, 40, 40)],
900: [(183, 28, 28)]}
self._pink = {50: [(252, 228, 236)],
100: [(248, 187, 208)],
200: [(244, 143, 177)],
300: [(240, 98, 146)],
400: [(236, 64, 122)],
500: [(233, 30, 99)],
600: [(216, 27, 96)],
700: [(194, 24, 91)],
800: [(173, 20, 87)],
900: [(136, 14, 79)]}
self._purple = {50: [(243, 229, 245)],
100: [(225, 190, 231)],
200: [(206, 147, 216)],
300: [(186, 104, 200)],
400: [(171, 71, 188)],
500: [(156, 39, 176)],
600: [(142, 36, 170)],
700: [(123, 31, 162)],
800: [(106, 27, 154)],
900: [(74, 20, 140)]}
self._deep_purple = {50: [(237, 231, 246)],
100: [(209, 196, 233)],
200: [(179, 157, 219)],
300: [(149, 117, 205)],
400: [(126, 87, 194)],
500: [(103, 58, 183)],
600: [(94, 53, 177)],
700: [(81, 45, 168)],
800: [(69, 39, 160)],
900: [(49, 27, 146)]}
self._indigo = {50: [(232, 234, 246)],
100: [(197, 202, 233)],
200: [(159, 168, 218)],
300: [(121, 134, 203)],
400: [(92, 107, 192)],
500: [(63, 81, 181)],
600: [(57, 73, 171)],
700: [(48, 63, 159)],
800: [(40, 53, 147)],
900: [(26, 35, 126)]}
self._blue = {50: [(227, 242, 253)],
100: [(187, 222, 251)],
200: [(144, 202, 249)],
300: [(100, 181, 246)],
400: [(66, 165, 245)],
500: [(33, 150, 243)],
600: [(30, 136, 229)],
700: [(25, 118, 210)],
800: [(21, 101, 192)],
900: [(13, 71, 161)]}
self._light_blue = {50: [(225, 245, 254)],
100: [(179, 229, 252)],
200: [(129, 212, 250)],
300: [(129, 212, 250)],
400: [(41, 182, 246)],
500: [(3, 169, 244)],
600: [(3, 155, 229)],
700: [(2, 136, 209)],
800: [(2, 119, 189)],
900: [(1, 87, 155)]}
self._cyan = {50: [(224, 247, 250)],
100: [(178, 235, 242)],
200: [(128, 222, 234)],
300: [(77, 208, 225)],
400: [(38, 198, 218)],
500: [(0, 188, 212)],
600: [(0, 172, 193)],
700: [(0, 151, 167)],
800: [(0, 131, 143)],
900: [(0, 96, 100)]}
self._teal = {50: [(224, 242, 241)],
100: [(178, 223, 219)],
200: [(128, 203, 196)],
300: [(77, 182, 172)],
400: [(38, 166, 154)],
500: [(0, 150, 136)],
600: [(0, 137, 123)],
700: [(0, 121, 107)],
800: [(0, 105, 92)],
900: [(0, 77, 64)]}
self._green = {50: [(232, 245, 233)],
100: [(200, 230, 201)],
200: [(165, 214, 167)],
300: [(129, 199, 132)],
400: [(102, 187, 106)],
500: [(76, 175, 80)],
600: [(67, 160, 71)],
700: [(56, 142, 60)],
800: [(46, 125, 50)],
900: [(27, 94, 32)]}
self._light_green = {50: [(241, 248, 233)],
100: [(220, 237, 200)],
200: [(197, 225, 165)],
300: [(174, 213, 129)],
400: [(156, 204, 101)],
500: [(139, 195, 74)],
600: [(124, 179, 66)],
700: [(104, 159, 56)],
800: [(85, 139, 47)],
900: [(51, 105, 30)]}
self._lime = {50: [(249, 251, 231)],
100: [(240, 244, 195)],
200: [(230, 238, 156)],
300: [(220, 231, 117)],
400: [(212, 225, 87)],
500: [(205, 220, 57)],
600: [(192, 202, 51)],
700: [(175, 180, 43)],
800: [(158, 157, 36)],
900: [(130, 119, 23)]}
self._yellow = {50: [(255, 253, 231)],
100: [(255, 249, 196)],
200: [(255, 245, 157)],
300: [(255, 241, 118)],
400: [(255, 238, 88)],
500: [(255, 235, 59)],
600: [(253, 216, 53)],
700: [(251, 192, 45)],
800: [(249, 168, 37)],
900: [(245, 127, 23)]}
self._amber = {50: [(255, 248, 225)],
100: [(255, 236, 179)],
200: [(255, 224, 130)],
300: [(255, 213, 79)],
400: [(255, 202, 40)],
500: [(255, 193, 7)],
600: [(255, 179, 0)],
700: [(255, 160, 0)],
800: [(255, 143, 0)],
900: [(255, 111, 0)]}
self._orange = {50: [(255, 243, 224)],
100: [(255, 224, 178)],
200: [(255, 204, 128)],
300: [(255, 183, 77)],
400: [(255, 167, 38)],
500: [(255, 152, 0)],
600: [(251, 140, 0)],
700: [(245, 124, 0)],
800: [(239, 108, 0)],
900: [(230, 81, 0)]}
self._deep_orange = {50: [(251, 233, 231)],
100: [(255, 204, 188)],
200: [(255, 171, 145)],
300: [(255, 138, 101)],
400: [(255, 112, 67)],
500: [(255, 87, 34)],
600: [(244, 81, 30)],
700: [(230, 74, 25)],
800: [(216, 67, 21)],
900: [(191, 54, 12)]}
self._brown = {50: [(239, 235, 233)],
100: [(215, 204, 200)],
200: [(188, 170, 164)],
300: [(161, 136, 127)],
400: [(141, 110, 99)],
500: [(121, 85, 72)],
600: [(109, 76, 65)],
700: [(93, 64, 55)],
800: [(78, 52, 46)],
900: [(62, 39, 35)]}
self._grey = {50: [(250, 250, 250)],
100: [(245, 245, 245)],
200: [(238, 238, 238)],
300: [(224, 224, 224)],
400: [(189, 189, 189)],
500: [(158, 158, 158)],
600: [(117, 117, 117)],
700: [(97, 97, 97)],
800: [(66, 66, 66)],
900: [(33, 33, 33)]}
self._blue_grey = {50: [(236, 239, 241)],
100: [(207, 216, 220)],
200: [(176, 190, 197)],
300: [(144, 164, 174)],
400: [(120, 144, 156)],
500: [(96, 125, 139)],
600: [(84, 110, 122)],
700: [(69, 90, 100)],
800: [(55, 71, 79)],
900: [(38, 50, 56)]}
# accent colors
self._accent_red = {100: [(255, 138, 128)],
200: [(255, 82, 82)],
400: [(255, 23, 68)],
700: [(213, 0, 0)]}
self._accent_pink = {100: [(255, 128, 171)],
200: [(255, 64, 129)],
400: [(245, 0, 87)],
700: [(197, 17, 98)]}
self._accent_purple = {100: [(234, 128, 252)],
200: [(224, 64, 251)],
400: [(213, 0, 249)],
700: [(170, 0, 255)]}
self._accent_deep_purple = {100: [(179, 136, 255)],
200: [(124, 77, 255)],
400: [(101, 31, 255)],
700: [(98, 0, 234)]}
self._accent_indigo = {100: [(140, 158, 255)],
200: [(83, 109, 254)],
400: [(61, 90, 254)],
700: [(48, 79, 254)]}
self._accent_blue = {100: [(130, 177, 255)],
200: [(68, 138, 255)],
400: [(41, 121, 255)],
700: [(41, 98, 255)]}
self._accent_light_blue = {100: [(128, 216, 255)],
200: [(64, 196, 255)],
400: [(0, 176, 255)],
700: [(0, 145, 234)]}
self._accent_cyan = {100: [(132, 255, 255)],
200: [(24, 255, 255)],
400: [(0, 229, 255)],
700: [(0, 184, 212)]}
self._accent_teal = {100: [(167, 255, 235)],
200: [(100, 255, 218)],
400: [(29, 233, 182)],
700: [(0, 191, 165)]}
self._accent_green = {100: [(185, 246, 202)],
200: [(105, 240, 174)],
400: [(0, 230, 118)],
700: [(0, 200, 83)]}
self._accent_light_green = {100: [(204, 255, 144)],
200: [(178, 255, 89)],
400: [(118, 255, 3)],
700: [(100, 221, 23)]}
self._accent_lime = {100: [(244, 255, 129)],
200: [(238, 255, 65)],
400: [(198, 255, 0)],
700: [(174, 234, 0)]}
self._accent_yellow = {100: [(255, 253, 231)],
200: [(255, 255, 0)],
400: [(255, 234, 0)],
700: [(255, 214, 0)]}
self._accent_amber = {100: [(255, 229, 127)],
200: [(255, 215, 64)],
400: [(255, 196, 0)],
700: [(255, 171, 0)]}
self._accent_orange = {100: [(255, 209, 128)],
200: [(255, 171, 64)],
400: [(255, 145, 0)],
700: [(255, 109, 0)]}
self._accent_deep_orange = {100: [(255, 158, 128)],
200: [(255, 110, 64)],
400: [(255, 61, 0)],
700: [(221, 44, 0)]}
# normal colors
def red(self, val = 500):
return self._red[val][self.mode]
def pink(self, val = 500):
return self._pink[val][self.mode]
def purple(self, val = 500):
return self._purple[val][self.mode]
def deepPurple(self, val = 500):
return self._deep_purple[val][self.mode]
def indigo(self, val = 500):
return self._indigo[val][self.mode]
def blue(self, val = 500):
return self._blue[val][self.mode]
def lightBlue(self, val = 500):
return self._light_blue[val][self.mode]
def cyan(self, val = 500):
return self._cyan[val][self.mode]
def teal(self, val = 500):
return self._teal[val][self.mode]
def green(self, val = 500):
return self._green[val][self.mode]
def lightGreen(self, val = 500):
return self._light_green[val][self.mode]
def lime(self, val = 500):
return self._lime[val][self.mode]
def yellow(self, val = 500):
return self._yellow[val][self.mode]
def amber(self, val = 500):
return self._amber[val][self.mode]
def orange(self, val = 500):
return self._orange[val][self.mode]
def deepOrange(self, val = 500):
return self._deep_orange[val][self.mode]
def brown(self, val = 500):
return self._brown[val][self.mode]
def grey(self, val = 500):
return self._grey[val][self.mode]
def blueGrey(self, val | |
# Copyright 2020 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import Counter, abc, defaultdict
import networkx as nx
import numpy as np
from scipy import spatial
from . import layout
def intersection(S_layout, T_layout, **kwargs):
"""
Map each vertex of S to its nearest row/column intersection qubit in T (T must be a D-Wave hardware graph).
Note: This will modifiy S_layout.
Parameters
----------
S_layout : layout.Layout
A layout for S; i.e. a map from S to R^d.
T_layout : layout.Layout
A layout for T; i.e. a map from T to R^d.
scale_ratio : float (default None)
If None, S_layout is not scaled. Otherwise, S_layout is scaled to scale_ratio*T_layout.scale.
Returns
-------
placement : dict
A mapping from vertices of S (keys) to vertices of T (values).
"""
# Extract the target graph
T = T_layout.G
# Currently only implemented for 2d chimera
if T.graph.get("family") not in ("chimera", "pegasus"):
raise NotImplementedError(
"This strategy is currently only implemented for Chimera.")
# Bin vertices of S and T into a grid graph G
G = _intersection_binning(S_layout, T)
placement = {}
for _, data in G.nodes(data=True):
for v in data["variables"]:
placement[v] = data["qubits"]
return placement
def _intersection_binning(S_layout, T):
"""
Map the vertices of S to the "intersection graph" of T. This modifies the grid graph G by assigning vertices
from S and T to vertices of G.
Parameters
----------
S_layout : layout.Layout
A layout for S; i.e. a map from S to R^d.
T : networkx.Graph
The target graph to embed S in.
scale_ratio : float (default None)
If None, S_layout is not scaled. Otherwise, S_layout is scaled to scale_ratio*T_layout.scale.
Returns
-------
G : networkx.Graph
A grid graph. Each vertex of G contains data attributes "variables" and "qubits", that is, respectively
vertices of S and T assigned to that vertex.
"""
# Scale the layout so that for each unit-cell edge, we have an integer point.
m, n, t = T.graph["rows"], T.graph["columns"], T.graph["tile"]
# --- Make the "intersection graph" of the dnx_graph
# Grid points correspond to intersection rows and columns of the dnx_graph
G = nx.grid_2d_graph(t*n, t*m)
# Determine the scale for putting things in the positive quadrant
scale = (t*min(n, m) - 1)/2
# Get the row, column mappings for the dnx graph
lattice_mapping = _lookup_intersection_coordinates(T)
# Less efficient, but more readable to initialize all at once
for v in G:
G.nodes[v]["qubits"] = set()
G.nodes[v]["variables"] = set()
# Add qubits (vertices of T) to grid points
for int_point, Q in lattice_mapping.items():
G.nodes[int_point]["qubits"] |= Q
# --- Map the S_layout to the grid
# "Zoom in" on layout_S so that the integer points are better represented
zoom_scale = S_layout.scale*t
if zoom_scale < scale:
S_layout.scale = zoom_scale
else:
S_layout.scale = scale
# Center to the positive orthant
S_layout.center = 2*(scale, )
# Add "variables" (vertices from S) to grid points too
for v, pos in S_layout.items():
grid_point = tuple(int(x) for x in np.round(pos))
G.nodes[grid_point]["variables"].add(v)
return G
def _lookup_intersection_coordinates(G):
"""
For a dwave_networkx graph G, this returns a dictionary mapping the lattice points to sets of vertices of G.
- Chimera: Each lattice point corresponds to the 2 qubits intersecting at that point.
- Pegasus: Not Implemented
"""
graph_data = G.graph
family = graph_data.get("family")
if family == "chimera":
t = graph_data.get("tile")
intersection_points = defaultdict(set)
if graph_data["labels"] == "coordinate":
for v in G:
_chimera_all_intersection_points(intersection_points, v, t, *v)
elif graph_data["data"]:
for v, d in G.nodes(data=True):
_chimera_all_intersection_points(
intersection_points, v, t, *d["chimera_index"])
else:
raise NotImplementedError("Please pass in a Chimera graph created"
" with an optional parameter 'data=True' or 'coordinates=True'")
return intersection_points
elif family == "pegasus":
offsets = [graph_data['vertical_offsets'],
graph_data['horizontal_offsets']]
intersection_points = defaultdict(set)
if graph_data["labels"] == "coordinate":
for v in G:
_pegasus_all_intersection_points(intersection_points, offsets,
v, *v)
elif graph_data["data"]:
for v, d in G.nodes(data=True):
_pegasus_all_intersection_points(intersection_points, offsets,
v, *d["pegasus_index"])
else:
raise NotImplementedError("Please pass in a Pegasus graph created"
" with an optional parameter 'data=True' or 'coordinates=True'")
return intersection_points
def _chimera_all_intersection_points(intersection_points, v, t, i, j, u, k):
"""
Given a coordinate vertex, v = (i, j, u, k), of a Chimera with tile, t, get all intersection points it is in.
"""
# If you're a row vertex, you go in all grid points of your row intersecting columns in your unit tile
if u == 1:
row = i*t + k
for kk in range(t):
col = j*t + kk
intersection_points[(col, row)].add(v)
# Sameish for a column vertex.
elif u == 0:
col = j*t + k
for kk in range(t):
row = i*t + kk
intersection_points[(col, row)].add(v)
def _pegasus_all_intersection_points(intersection_points, offsets, v, u, w, k, z):
"""
Given a coordinate vertex, v = (u, w, k, z), of a Pegasus graph with offsets
`offsets`, get all intersection points it is in.
"""
# Each horizontal qubit spans twelve grid-points in the row 12w+k
if u == 1:
row = 12*w + k
col_0 = 12*z + offsets[u][k]
for kk in range(12):
intersection_points[(col_0 + kk, row)].add(v)
# Sameish for a column vertex.
elif u == 0:
col = 12*w + k
row_0 = 12*z + offsets[u][k]
for kk in range(12):
intersection_points[(col, row_0 + kk)].add(v)
def closest(S_layout, T_layout, subset_size=(1, 1), num_neighbors=1, **kwargs):
"""
Maps vertices of S to the closest vertices of T as given by S_layout and T_layout. i.e. For each vertex u in
S_layout and each vertex v in T_layout, map u to the v with minimum Euclidean distance (||u - v||_2).
Parameters
----------
S_layout : layout.Layout
A layout for S; i.e. a map from S to R^d.
T_layout : layout.Layout
A layout for T; i.e. a map from T to R^d.
subset_size : tuple (default (1, 1))
A lower (subset_size[0]) and upper (subset_size[1]) bound on the size of subets of T that will be considered
when mapping vertices of S.
num_neighbors : int (default 1)
The number of closest neighbors to query from the KDTree--the neighbor with minimium overlap is chosen.
Increasing this reduces overlap, but increases runtime.
Returns
-------
placement : dict
A mapping from vertices of S (keys) to subsets of vertices of T (values).
"""
# Extract the target graph
T = T_layout.G
# A new layout for subsets of T.
T_subgraph_layout = {}
# Get connected subgraphs to consider mapping to
T_subgraphs = _get_connected_subgraphs(T, subset_size[1])
# Calculate the barycenter (centroid) of each subset
for k in range(subset_size[0], subset_size[1]+1):
if k == 1:
for subgraph in T_subgraphs[k]:
v, = subgraph # Unpack the subgraph of size 1
T_subgraph_layout[subgraph] = T_layout[v]
else:
for subgraph in T_subgraphs[k]:
T_subgraph_layout[subgraph] = np.mean(
np.array([T_layout[v] for v in subgraph]), axis=0)
# Use scipy's KDTree to solve the nearest neighbor problem.
# This requires a few lookup tables
T_subset_lookup = {tuple(p): V for V, p in T_subgraph_layout.items()}
layout_points = [tuple(p) for p in T_subgraph_layout.values()]
overlap_counter = Counter()
try:
tree = spatial.KDTree(layout_points) # This fails for the empty graph
except ValueError:
pass
placement = {}
for u, u_pos in S_layout.items():
distances, v_indices = tree.query([u_pos], num_neighbors)
# KDTree.query either returns a (num_neighbors, ) shaped arrays if num_neighbors == 1
# or (1, num_neighbors) shaped arrays if num_neighbors != 1
if num_neighbors != 1:
v_indices = v_indices[0]
distances = distances[0]
placement[u] = _minimize_overlap(
distances, v_indices, T_subset_lookup, layout_points, overlap_counter)
return placement
def _get_connected_subgraphs(G, k, single_set=False):
"""
Finds all connectected subgraphs S of G within a given subset_size.
Parameters
----------
G : networkx graph
The graph you want to find all connected subgraphs of.
k : int
An upper bound of the size | |
:param str id: The unique identifier for the rule.
:param str prefix: The object key prefix identifying one or more objects to which the rule applies.
"""
pulumi.set(__self__, "enabled", enabled)
if abort_incomplete_multipart_upload_days is not None:
pulumi.set(__self__, "abort_incomplete_multipart_upload_days", abort_incomplete_multipart_upload_days)
if expiration is not None:
pulumi.set(__self__, "expiration", expiration)
if id is not None:
pulumi.set(__self__, "id", id)
if noncurrent_version_expiration is not None:
pulumi.set(__self__, "noncurrent_version_expiration", noncurrent_version_expiration)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
@property
@pulumi.getter
def enabled(self) -> bool:
"""
Specifies whether the lifecycle rule is active.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="abortIncompleteMultipartUploadDays")
def abort_incomplete_multipart_upload_days(self) -> Optional[int]:
"""
Specifies the number of days after initiating a multipart upload when the multipart upload must be completed.
"""
return pulumi.get(self, "abort_incomplete_multipart_upload_days")
@property
@pulumi.getter
def expiration(self) -> Optional['outputs.ObjectStorageBucketLifecycleRuleExpiration']:
return pulumi.get(self, "expiration")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The unique identifier for the rule.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="noncurrentVersionExpiration")
def noncurrent_version_expiration(self) -> Optional['outputs.ObjectStorageBucketLifecycleRuleNoncurrentVersionExpiration']:
return pulumi.get(self, "noncurrent_version_expiration")
@property
@pulumi.getter
def prefix(self) -> Optional[str]:
"""
The object key prefix identifying one or more objects to which the rule applies.
"""
return pulumi.get(self, "prefix")
@pulumi.output_type
class ObjectStorageBucketLifecycleRuleExpiration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "expiredObjectDeleteMarker":
suggest = "expired_object_delete_marker"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectStorageBucketLifecycleRuleExpiration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectStorageBucketLifecycleRuleExpiration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectStorageBucketLifecycleRuleExpiration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
date: Optional[str] = None,
days: Optional[int] = None,
expired_object_delete_marker: Optional[bool] = None):
"""
:param str date: Specifies the date after which you want the corresponding action to take effect.
:param int days: Specifies the number of days non-current object versions expire.
:param bool expired_object_delete_marker: On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Linode Object Storage to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.
"""
if date is not None:
pulumi.set(__self__, "date", date)
if days is not None:
pulumi.set(__self__, "days", days)
if expired_object_delete_marker is not None:
pulumi.set(__self__, "expired_object_delete_marker", expired_object_delete_marker)
@property
@pulumi.getter
def date(self) -> Optional[str]:
"""
Specifies the date after which you want the corresponding action to take effect.
"""
return pulumi.get(self, "date")
@property
@pulumi.getter
def days(self) -> Optional[int]:
"""
Specifies the number of days non-current object versions expire.
"""
return pulumi.get(self, "days")
@property
@pulumi.getter(name="expiredObjectDeleteMarker")
def expired_object_delete_marker(self) -> Optional[bool]:
"""
On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Linode Object Storage to delete expired object delete markers. This cannot be specified with Days or Date in a Lifecycle Expiration Policy.
"""
return pulumi.get(self, "expired_object_delete_marker")
@pulumi.output_type
class ObjectStorageBucketLifecycleRuleNoncurrentVersionExpiration(dict):
def __init__(__self__, *,
days: int):
"""
:param int days: Specifies the number of days non-current object versions expire.
"""
pulumi.set(__self__, "days", days)
@property
@pulumi.getter
def days(self) -> int:
"""
Specifies the number of days non-current object versions expire.
"""
return pulumi.get(self, "days")
@pulumi.output_type
class ObjectStorageKeyBucketAccess(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "bucketName":
suggest = "bucket_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ObjectStorageKeyBucketAccess. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ObjectStorageKeyBucketAccess.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ObjectStorageKeyBucketAccess.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
bucket_name: str,
cluster: str,
permissions: str):
"""
:param str bucket_name: The unique label of the bucket to which the key will grant limited access.
:param str cluster: The Object Storage cluster where a bucket to which the key is granting access is hosted.
:param str permissions: This Limited Access Key’s permissions for the selected bucket. *Changing `permissions` forces the creation of a new Object Storage Key.* (`read_write`, `read_only`)
"""
pulumi.set(__self__, "bucket_name", bucket_name)
pulumi.set(__self__, "cluster", cluster)
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> str:
"""
The unique label of the bucket to which the key will grant limited access.
"""
return pulumi.get(self, "bucket_name")
@property
@pulumi.getter
def cluster(self) -> str:
"""
The Object Storage cluster where a bucket to which the key is granting access is hosted.
"""
return pulumi.get(self, "cluster")
@property
@pulumi.getter
def permissions(self) -> str:
"""
This Limited Access Key’s permissions for the selected bucket. *Changing `permissions` forces the creation of a new Object Storage Key.* (`read_write`, `read_only`)
"""
return pulumi.get(self, "permissions")
@pulumi.output_type
class StackScriptUserDefinedField(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "manyOf":
suggest = "many_of"
elif key == "oneOf":
suggest = "one_of"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StackScriptUserDefinedField. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StackScriptUserDefinedField.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StackScriptUserDefinedField.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
default: Optional[str] = None,
example: Optional[str] = None,
label: Optional[str] = None,
many_of: Optional[str] = None,
name: Optional[str] = None,
one_of: Optional[str] = None):
"""
:param str label: The StackScript's label is for display purposes only.
"""
if default is not None:
pulumi.set(__self__, "default", default)
if example is not None:
pulumi.set(__self__, "example", example)
if label is not None:
pulumi.set(__self__, "label", label)
if many_of is not None:
pulumi.set(__self__, "many_of", many_of)
if name is not None:
pulumi.set(__self__, "name", name)
if one_of is not None:
pulumi.set(__self__, "one_of", one_of)
@property
@pulumi.getter
def default(self) -> Optional[str]:
return pulumi.get(self, "default")
@property
@pulumi.getter
def example(self) -> Optional[str]:
return pulumi.get(self, "example")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
The StackScript's label is for display purposes only.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="manyOf")
def many_of(self) -> Optional[str]:
return pulumi.get(self, "many_of")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="oneOf")
def one_of(self) -> Optional[str]:
return pulumi.get(self, "one_of")
@pulumi.output_type
class UserDomainGrant(dict):
def __init__(__self__, *,
id: int,
permissions: str):
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def permissions(self) -> str:
return pulumi.get(self, "permissions")
@pulumi.output_type
class UserFirewallGrant(dict):
def __init__(__self__, *,
id: int,
permissions: str):
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "permissions", permissions)
@property
@pulumi.getter
def id(self) -> int:
return pulumi.get(self, "id")
@property
@pulumi.getter
def permissions(self) -> str:
return pulumi.get(self, "permissions")
@pulumi.output_type
class UserGlobalGrants(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "accountAccess":
suggest = "account_access"
elif key == "addDomains":
suggest = "add_domains"
elif key == "addFirewalls":
suggest = "add_firewalls"
elif key == "addImages":
suggest = "add_images"
elif key == "addLinodes":
suggest = "add_linodes"
elif key == "addLongview":
suggest = "add_longview"
elif key == "addNodebalancers":
suggest = "add_nodebalancers"
elif key == "addStackscripts":
suggest = "add_stackscripts"
elif key == "addVolumes":
suggest = "add_volumes"
elif key == "cancelAccount":
suggest = "cancel_account"
elif key == "longviewSubscription":
suggest = "longview_subscription"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in UserGlobalGrants. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
UserGlobalGrants.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
UserGlobalGrants.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
account_access: Optional[str] = None,
add_domains: Optional[bool] = None,
add_firewalls: Optional[bool] = None,
add_images: Optional[bool] = None,
add_linodes: Optional[bool] = None,
add_longview: Optional[bool] = None,
add_nodebalancers: Optional[bool] = None,
add_stackscripts: Optional[bool] = None,
add_volumes: Optional[bool] = None,
cancel_account: Optional[bool] = None,
longview_subscription: Optional[bool] = None):
if account_access is not None:
pulumi.set(__self__, "account_access", account_access)
if add_domains is not None:
pulumi.set(__self__, "add_domains", add_domains)
if add_firewalls is not None:
pulumi.set(__self__, "add_firewalls", add_firewalls)
if add_images is not None:
pulumi.set(__self__, "add_images", add_images)
if add_linodes is not None:
pulumi.set(__self__, "add_linodes", add_linodes)
if add_longview is not None:
pulumi.set(__self__, "add_longview", add_longview)
if add_nodebalancers is not None:
pulumi.set(__self__, "add_nodebalancers", add_nodebalancers)
if add_stackscripts is not None:
pulumi.set(__self__, "add_stackscripts", add_stackscripts)
if add_volumes is not None:
pulumi.set(__self__, "add_volumes", add_volumes)
if cancel_account is not None:
pulumi.set(__self__, "cancel_account", cancel_account)
if longview_subscription is not None:
pulumi.set(__self__, "longview_subscription", longview_subscription)
@property
@pulumi.getter(name="accountAccess")
def account_access(self) -> Optional[str]:
return pulumi.get(self, "account_access")
@property
@pulumi.getter(name="addDomains")
def add_domains(self) -> Optional[bool]:
return pulumi.get(self, "add_domains")
@property
@pulumi.getter(name="addFirewalls")
def add_firewalls(self) -> Optional[bool]:
return pulumi.get(self, "add_firewalls")
@property
@pulumi.getter(name="addImages")
def add_images(self) -> Optional[bool]:
return pulumi.get(self, "add_images")
@property
@pulumi.getter(name="addLinodes")
def add_linodes(self) -> Optional[bool]:
| |
import base64
import json
import os
import pathlib
import sys
import time
from test.utils import HERE, SIMPLE_PATH
from urllib.parse import urljoin
import hypothesis
import pytest
import requests
import yaml
from _pytest.main import ExitCode
from hypothesis import HealthCheck, Phase, Verbosity
from schemathesis import Case, DataGenerationMethod, fixups
from schemathesis._compat import metadata
from schemathesis.checks import ALL_CHECKS
from schemathesis.cli import reset_checks
from schemathesis.hooks import unregister_all
from schemathesis.loaders import from_uri
from schemathesis.models import Endpoint
from schemathesis.runner import DEFAULT_CHECKS
from schemathesis.targets import DEFAULT_TARGETS
PHASES = "explicit, reuse, generate, target, shrink"
if metadata.version("hypothesis") < "4.5":
PHASES = "explicit, reuse, generate, shrink"
HEALTH_CHECKS = "data_too_large|filter_too_much|too_slow|return_value|large_base_example|not_a_test_method"
if metadata.version("hypothesis") < "5.0":
HEALTH_CHECKS = (
"data_too_large|filter_too_much|too_slow|return_value|hung_test|large_base_example|not_a_test_method"
)
def test_commands_help(cli):
result = cli.main()
assert result.exit_code == ExitCode.OK, result.stdout
lines = result.stdout.split("\n")
assert lines[11] == " replay Replay requests from a saved cassette."
assert lines[12] == " run Perform schemathesis test."
result_help = cli.main("--help")
result_h = cli.main("-h")
assert result.stdout == result_h.stdout == result_help.stdout
def test_run_subprocess(testdir):
# To verify that CLI entry point is installed properly
result = testdir.run("schemathesis")
assert result.ret == ExitCode.OK
def test_commands_version(cli):
result = cli.main("--version")
assert result.exit_code == ExitCode.OK, result.stdout
assert "version" in result.stdout.split("\n")[0]
@pytest.mark.parametrize(
"args, error",
(
(("run",), "Error: Missing argument 'SCHEMA'."),
(("run", "not-url"), "Error: Invalid SCHEMA, must be a valid URL or file path."),
(("run", SIMPLE_PATH), 'Error: Missing argument, "--base-url" is required for SCHEMA specified by file.'),
(("run", SIMPLE_PATH, "--base-url=test"), "Error: Invalid base URL"),
(("run", SIMPLE_PATH, "--base-url=127.0.0.1:8080"), "Error: Invalid base URL"),
(
("run", "http://127.0.0.1", "--request-timeout=-5"),
"Error: Invalid value for '--request-timeout': -5 is smaller than the minimum valid value 1.",
),
(
("run", "http://127.0.0.1", "--request-timeout=0"),
"Error: Invalid value for '--request-timeout': 0 is smaller than the minimum valid value 1.",
),
(
("run", "http://127.0.0.1", "--method=+"),
"Error: Invalid value for '--method' / '-M': Invalid regex: nothing to repeat at position 0",
),
(
("run", "http://127.0.0.1", "--auth=123"),
"Error: Invalid value for '--auth' / '-a': Should be in KEY:VALUE format. Got: 123",
),
(
("run", "http://127.0.0.1", "--auth=:pass"),
"Error: Invalid value for '--auth' / '-a': Username should not be empty",
),
(
("run", "http://127.0.0.1", "--auth=тест:pass"),
"Error: Invalid value for '--auth' / '-a': Username should be latin-1 encodable",
),
(
("run", "http://127.0.0.1", "--auth=user:тест"),
"Error: Invalid value for '--auth' / '-a': Password should be latin-1 encodable",
),
(
("run", "http://127.0.0.1", "--auth-type=random"),
"Error: Invalid value for '--auth-type' / '-A': invalid choice: random. (choose from basic, digest)",
),
(
("run", "http://127.0.0.1", "--header=123"),
"Error: Invalid value for '--header' / '-H': Should be in KEY:VALUE format. Got: 123",
),
(
("run", "http://127.0.0.1", "--header=:"),
"Error: Invalid value for '--header' / '-H': Header name should not be empty",
),
(
("run", "http://127.0.0.1", "--header= :"),
"Error: Invalid value for '--header' / '-H': Header name should not be empty",
),
(
("run", "http://127.0.0.1", "--hypothesis-phases=explicit,first,second"),
"Error: Invalid value for '--hypothesis-phases': invalid choice(s): first, second. "
f"Choose from {PHASES}",
),
(
("run", "http://127.0.0.1", "--hypothesis-deadline=wrong"),
"Error: Invalid value for '--hypothesis-deadline': wrong is not a valid integer or None",
),
(
("run", "http://127.0.0.1", "--hypothesis-deadline=0"),
"Error: Invalid value for '--hypothesis-deadline': 0 is not in the valid range of 1 to 86399999913600000.",
),
(
("run", "http://127.0.0.1", "--header=тест:test"),
"Error: Invalid value for '--header' / '-H': Header name should be latin-1 encodable",
),
(
("run", "http://127.0.0.1", "--header=test:тест"),
"Error: Invalid value for '--header' / '-H': Header value should be latin-1 encodable",
),
(("run", "//test"), "Error: Invalid SCHEMA, must be a valid URL or file path."),
(
("run", "http://127.0.0.1", "--max-response-time=0"),
"Error: Invalid value for '--max-response-time': 0 is smaller than the minimum valid value 1.",
),
),
)
def test_commands_run_errors(cli, args, error):
# When invalid arguments are passed to CLI
result = cli.main(*args)
# Then an appropriate error should be displayed
assert result.exit_code == ExitCode.INTERRUPTED, result.stdout
assert result.stdout.strip().split("\n")[-1] == error
def test_commands_run_help(cli):
result_help = cli.main("run", "--help")
assert result_help.exit_code == ExitCode.OK, result_help.stdout
assert result_help.stdout.strip().split("\n") == [
"Usage: schemathesis run [OPTIONS] SCHEMA",
"",
" Perform schemathesis test against an API specified by SCHEMA.",
"",
" SCHEMA must be a valid URL or file path pointing to an Open API / Swagger",
" specification.",
"",
"Options:",
" -c, --checks [not_a_server_error|status_code_conformance|"
"content_type_conformance|response_headers_conformance|response_schema_conformance|all]",
" List of checks to run.",
" -D, --data-generation-method [positive]",
" Defines how Schemathesis generates data for",
" tests.",
"",
" --max-response-time INTEGER RANGE",
" A custom check that will fail if the response",
" time is greater than the specified one in",
" milliseconds.",
"",
" -t, --target [response_time|all]",
" Targets for input generation.",
" -x, --exitfirst Exit instantly on first error or failed test.",
" -a, --auth TEXT Server user and password. Example:",
" USER:PASSWORD",
"",
" -A, --auth-type [basic|digest] The authentication mechanism to be used.",
" Defaults to 'basic'.",
"",
" -H, --header TEXT Custom header in a that will be used in all",
" requests to the server. Example:",
r" Authorization: Bearer\ 123",
"",
" -E, --endpoint TEXT Filter schemathesis test by endpoint pattern.",
r" Example: users/\d+",
"",
" -M, --method TEXT Filter schemathesis test by HTTP method.",
" -T, --tag TEXT Filter schemathesis test by schema tag",
" pattern.",
"",
" -O, --operation-id TEXT Filter schemathesis test by operationId",
" pattern.",
"",
" -w, --workers INTEGER RANGE Number of workers to run tests.",
" -b, --base-url TEXT Base URL address of the API, required for",
" SCHEMA if specified by file.",
"",
" --app TEXT WSGI/ASGI application to test.",
" --request-timeout INTEGER RANGE",
" Timeout in milliseconds for network requests",
" during the test run.",
"",
" --request-tls-verify TEXT Controls whether Schemathesis verifies the",
" server's TLS certificate. You can also pass",
" the path to a CA_BUNDLE file for private",
" certs.",
"",
" --validate-schema BOOLEAN Enable or disable validation of input schema.",
" --skip-deprecated-endpoints Skip testing of deprecated endpoints.",
" --junit-xml FILENAME Create junit-xml style report file at given",
" path.",
"",
" --show-errors-tracebacks Show full tracebacks for internal errors.",
" --store-network-log FILENAME Store requests and responses into a file.",
" --fixups [fast_api|all] Install specified compatibility fixups.",
" --stateful [links] Utilize stateful testing capabilities.",
" --stateful-recursion-limit INTEGER RANGE",
" Limit recursion depth for stateful testing.",
" --force-schema-version [20|30] Force Schemathesis to parse the input schema",
" with the specified spec version.",
"",
" --hypothesis-deadline INTEGER RANGE",
" Duration in milliseconds that each individual",
" example with a test is not allowed to exceed.",
"",
" --hypothesis-derandomize Use Hypothesis's deterministic mode.",
" --hypothesis-max-examples INTEGER RANGE",
" Maximum number of generated examples per each",
" method/endpoint combination.",
"",
f" --hypothesis-phases [{PHASES.replace(', ', '|')}]",
" Control which phases should be run.",
" --hypothesis-report-multiple-bugs BOOLEAN",
" Raise only the exception with the smallest",
" minimal example.",
"",
" --hypothesis-seed INTEGER Set a seed to use for all Hypothesis tests.",
f" --hypothesis-suppress-health-check [{HEALTH_CHECKS}]",
" Comma-separated list of health checks to",
" disable.",
"",
" --hypothesis-verbosity [quiet|normal|verbose|debug]",
" Verbosity level of Hypothesis messages.",
" -v, --verbosity Reduce verbosity of error output.",
" -h, --help Show this message and exit.",
]
SCHEMA_URI = "https://example.com/swagger.json"
@pytest.mark.parametrize(
"args, expected",
(
([], {}),
(["--exitfirst"], {"exit_first": True}),
(["--workers=2"], {"workers_num": 2}),
(["--hypothesis-seed=123"], {"seed": 123}),
(
[
"--hypothesis-deadline=1000",
"--hypothesis-derandomize",
"--hypothesis-max-examples=1000",
"--hypothesis-phases=explicit,generate",
"--hypothesis-report-multiple-bugs=0",
"--hypothesis-suppress-health-check=too_slow,filter_too_much",
"--hypothesis-verbosity=normal",
],
{
"hypothesis_options": {
"deadline": 1000,
"derandomize": True,
"max_examples": 1000,
"phases": [Phase.explicit, Phase.generate],
"report_multiple_bugs": False,
"suppress_health_check": [HealthCheck.too_slow, HealthCheck.filter_too_much],
"verbosity": Verbosity.normal,
}
},
),
(["--hypothesis-deadline=None"], {"hypothesis_options": {"deadline": None}}),
(["--max-response-time=10"], {"max_response_time": 10}),
),
)
def test_execute_arguments(cli, mocker, simple_schema, args, expected):
response = requests.Response()
response.status_code = 200
response._content = json.dumps(simple_schema).encode()
mocker.patch("schemathesis.loaders.requests.get", return_value=response)
execute = mocker.patch("schemathesis.runner.execute_from_schema", autospec=True)
result = cli.run(SCHEMA_URI, *args)
expected = {
"app": None,
"base_url": None,
"checks": DEFAULT_CHECKS,
"targets": DEFAULT_TARGETS,
"endpoint": (),
"method": (),
"tag": (),
"operation_id": (),
"schema_uri": SCHEMA_URI,
"validate_schema": True,
"data_generation_methods": [DataGenerationMethod.default()],
"skip_deprecated_endpoints": False,
"force_schema_version": None,
"loader": from_uri,
"hypothesis_options": {},
"workers_num": 1,
"exit_first": False,
"fixups": (),
"stateful": None,
"stateful_recursion_limit": 5,
"auth": None,
"auth_type": None,
"headers": {},
"request_timeout": None,
"request_tls_verify": True,
"store_interactions": False,
"seed": None,
"max_response_time": None,
**expected,
}
assert result.exit_code == ExitCode.OK, result.stdout
assert execute.call_args[1] == expected
@pytest.mark.parametrize(
"args, expected",
(
(["--auth=test:test"], {"auth": ("test", "test"), "auth_type": "basic"}),
(["--auth=test:test", "--auth-type=digest"], {"auth": ("test", "test"), "auth_type": "digest"}),
(["--auth=test:test", "--auth-type=DIGEST"], {"auth": ("test", "test"), "auth_type": "digest"}),
(["--header=Authorization:Bearer 123"], {"headers": {"Authorization": "Bearer 123"}}),
(["--header=Authorization: Bearer 123 "], {"headers": {"Authorization": "Bearer 123 "}}),
(["--method=POST", "--method", "GET"], {"method": ("POST", "GET")}),
(["--method=POST", "--auth=test:test"], {"auth": ("test", "test"), "auth_type": | |
m.x866*m.b634 <= 0)
m.c868 = Constraint(expr=m.x217*m.x217 - m.x867*m.b634 <= 0)
m.c869 = Constraint(expr=m.x218*m.x218 - m.x868*m.b634 <= 0)
m.c870 = Constraint(expr=m.x219*m.x219 - m.x869*m.b634 <= 0)
m.c871 = Constraint(expr=m.x220*m.x220 - m.x870*m.b634 <= 0)
m.c872 = Constraint(expr=m.x221*m.x221 - m.x871*m.b634 <= 0)
m.c873 = Constraint(expr=m.x222*m.x222 - m.x872*m.b634 <= 0)
m.c874 = Constraint(expr=m.x223*m.x223 - m.x873*m.b634 <= 0)
m.c875 = Constraint(expr=m.x224*m.x224 - m.x874*m.b634 <= 0)
m.c876 = Constraint(expr=m.x225*m.x225 - m.x875*m.b634 <= 0)
m.c877 = Constraint(expr=m.x226*m.x226 - m.x876*m.b635 <= 0)
m.c878 = Constraint(expr=m.x227*m.x227 - m.x877*m.b635 <= 0)
m.c879 = Constraint(expr=m.x228*m.x228 - m.x878*m.b635 <= 0)
m.c880 = Constraint(expr=m.x229*m.x229 - m.x879*m.b635 <= 0)
m.c881 = Constraint(expr=m.x230*m.x230 - m.x880*m.b635 <= 0)
m.c882 = Constraint(expr=m.x231*m.x231 - m.x881*m.b635 <= 0)
m.c883 = Constraint(expr=m.x232*m.x232 - m.x882*m.b635 <= 0)
m.c884 = Constraint(expr=m.x233*m.x233 - m.x883*m.b635 <= 0)
m.c885 = Constraint(expr=m.x234*m.x234 - m.x884*m.b635 <= 0)
m.c886 = Constraint(expr=m.x235*m.x235 - m.x885*m.b635 <= 0)
m.c887 = Constraint(expr=m.x236*m.x236 - m.x886*m.b635 <= 0)
m.c888 = Constraint(expr=m.x237*m.x237 - m.x887*m.b635 <= 0)
m.c889 = Constraint(expr=m.x238*m.x238 - m.x888*m.b635 <= 0)
m.c890 = Constraint(expr=m.x239*m.x239 - m.x889*m.b635 <= 0)
m.c891 = Constraint(expr=m.x240*m.x240 - m.x890*m.b635 <= 0)
m.c892 = Constraint(expr=m.x241*m.x241 - m.x891*m.b635 <= 0)
m.c893 = Constraint(expr=m.x242*m.x242 - m.x892*m.b635 <= 0)
m.c894 = Constraint(expr=m.x243*m.x243 - m.x893*m.b635 <= 0)
m.c895 = Constraint(expr=m.x244*m.x244 - m.x894*m.b635 <= 0)
m.c896 = Constraint(expr=m.x245*m.x245 - m.x895*m.b635 <= 0)
m.c897 = Constraint(expr=m.x246*m.x246 - m.x896*m.b635 <= 0)
m.c898 = Constraint(expr=m.x247*m.x247 - m.x897*m.b635 <= 0)
m.c899 = Constraint(expr=m.x248*m.x248 - m.x898*m.b635 <= 0)
m.c900 = Constraint(expr=m.x249*m.x249 - m.x899*m.b635 <= 0)
m.c901 = Constraint(expr=m.x250*m.x250 - m.x900*m.b635 <= 0)
m.c902 = Constraint(expr=m.x251*m.x251 - m.x901*m.b636 <= 0)
m.c903 = Constraint(expr=m.x252*m.x252 - m.x902*m.b636 <= 0)
m.c904 = Constraint(expr=m.x253*m.x253 - m.x903*m.b636 <= 0)
m.c905 = Constraint(expr=m.x254*m.x254 - m.x904*m.b636 <= 0)
m.c906 = Constraint(expr=m.x255*m.x255 - m.x905*m.b636 <= 0)
m.c907 = Constraint(expr=m.x256*m.x256 - m.x906*m.b636 <= 0)
m.c908 = Constraint(expr=m.x257*m.x257 - m.x907*m.b636 <= 0)
m.c909 = Constraint(expr=m.x258*m.x258 - m.x908*m.b636 <= 0)
m.c910 = Constraint(expr=m.x259*m.x259 - m.x909*m.b636 <= 0)
m.c911 = Constraint(expr=m.x260*m.x260 - m.x910*m.b636 <= 0)
m.c912 = Constraint(expr=m.x261*m.x261 - m.x911*m.b636 <= 0)
m.c913 = Constraint(expr=m.x262*m.x262 - m.x912*m.b636 <= 0)
m.c914 = Constraint(expr=m.x263*m.x263 - m.x913*m.b636 <= 0)
m.c915 = Constraint(expr=m.x264*m.x264 - m.x914*m.b636 <= 0)
m.c916 = Constraint(expr=m.x265*m.x265 - m.x915*m.b636 <= 0)
m.c917 = Constraint(expr=m.x266*m.x266 - m.x916*m.b636 <= 0)
m.c918 = Constraint(expr=m.x267*m.x267 - m.x917*m.b636 <= 0)
m.c919 = Constraint(expr=m.x268*m.x268 - m.x918*m.b636 <= 0)
m.c920 = Constraint(expr=m.x269*m.x269 - m.x919*m.b636 <= 0)
m.c921 = Constraint(expr=m.x270*m.x270 - m.x920*m.b636 <= 0)
m.c922 = Constraint(expr=m.x271*m.x271 - m.x921*m.b636 <= 0)
m.c923 = Constraint(expr=m.x272*m.x272 - m.x922*m.b636 <= 0)
m.c924 = Constraint(expr=m.x273*m.x273 - m.x923*m.b636 <= 0)
m.c925 = Constraint(expr=m.x274*m.x274 - m.x924*m.b636 <= 0)
m.c926 = Constraint(expr=m.x275*m.x275 - m.x925*m.b636 <= 0)
m.c927 = Constraint(expr=m.x276*m.x276 - m.x926*m.b637 <= 0)
m.c928 = Constraint(expr=m.x277*m.x277 - m.x927*m.b637 <= 0)
m.c929 = Constraint(expr=m.x278*m.x278 - m.x928*m.b637 <= 0)
m.c930 = Constraint(expr=m.x279*m.x279 - m.x929*m.b637 <= 0)
m.c931 = Constraint(expr=m.x280*m.x280 - m.x930*m.b637 <= 0)
m.c932 = Constraint(expr=m.x281*m.x281 - m.x931*m.b637 <= 0)
m.c933 = Constraint(expr=m.x282*m.x282 - m.x932*m.b637 <= 0)
m.c934 = Constraint(expr=m.x283*m.x283 - m.x933*m.b637 <= 0)
m.c935 = Constraint(expr=m.x284*m.x284 - m.x934*m.b637 <= 0)
m.c936 = Constraint(expr=m.x285*m.x285 - m.x935*m.b637 <= 0)
m.c937 = Constraint(expr=m.x286*m.x286 - m.x936*m.b637 <= 0)
m.c938 = Constraint(expr=m.x287*m.x287 - m.x937*m.b637 <= 0)
m.c939 = Constraint(expr=m.x288*m.x288 - m.x938*m.b637 <= 0)
m.c940 = Constraint(expr=m.x289*m.x289 - m.x939*m.b637 <= 0)
m.c941 = Constraint(expr=m.x290*m.x290 - m.x940*m.b637 <= 0)
m.c942 = Constraint(expr=m.x291*m.x291 - m.x941*m.b637 <= 0)
m.c943 = Constraint(expr=m.x292*m.x292 - m.x942*m.b637 <= 0)
m.c944 = Constraint(expr=m.x293*m.x293 - m.x943*m.b637 <= 0)
m.c945 = Constraint(expr=m.x294*m.x294 - m.x944*m.b637 <= 0)
m.c946 = Constraint(expr=m.x295*m.x295 - m.x945*m.b637 <= 0)
m.c947 = Constraint(expr=m.x296*m.x296 - m.x946*m.b637 <= 0)
m.c948 = Constraint(expr=m.x297*m.x297 - m.x947*m.b637 <= 0)
m.c949 = Constraint(expr=m.x298*m.x298 - m.x948*m.b637 <= 0)
m.c950 = Constraint(expr=m.x299*m.x299 - m.x949*m.b637 <= 0)
m.c951 = Constraint(expr=m.x300*m.x300 - m.x950*m.b637 <= 0)
m.c952 = Constraint(expr=m.x301*m.x301 - m.x951*m.b638 <= 0)
m.c953 = Constraint(expr=m.x302*m.x302 - m.x952*m.b638 <= 0)
m.c954 = Constraint(expr=m.x303*m.x303 - m.x953*m.b638 <= 0)
m.c955 = Constraint(expr=m.x304*m.x304 - m.x954*m.b638 <= 0)
m.c956 = Constraint(expr=m.x305*m.x305 - m.x955*m.b638 <= 0)
m.c957 = Constraint(expr=m.x306*m.x306 - m.x956*m.b638 <= 0)
m.c958 = Constraint(expr=m.x307*m.x307 - m.x957*m.b638 <= 0)
m.c959 = Constraint(expr=m.x308*m.x308 - m.x958*m.b638 <= 0)
m.c960 = Constraint(expr=m.x309*m.x309 - m.x959*m.b638 <= 0)
m.c961 = Constraint(expr=m.x310*m.x310 - m.x960*m.b638 <= 0)
m.c962 = Constraint(expr=m.x311*m.x311 - m.x961*m.b638 <= 0)
m.c963 = Constraint(expr=m.x312*m.x312 - m.x962*m.b638 <= 0)
m.c964 = Constraint(expr=m.x313*m.x313 - m.x963*m.b638 <= 0)
m.c965 = Constraint(expr=m.x314*m.x314 - m.x964*m.b638 <= 0)
m.c966 = Constraint(expr=m.x315*m.x315 - m.x965*m.b638 <= 0)
m.c967 = Constraint(expr=m.x316*m.x316 - m.x966*m.b638 <= 0)
m.c968 = Constraint(expr=m.x317*m.x317 - m.x967*m.b638 <= 0)
m.c969 = Constraint(expr=m.x318*m.x318 - m.x968*m.b638 <= 0)
m.c970 = Constraint(expr=m.x319*m.x319 - m.x969*m.b638 <= 0)
m.c971 = Constraint(expr=m.x320*m.x320 - m.x970*m.b638 <= 0)
m.c972 = Constraint(expr=m.x321*m.x321 - m.x971*m.b638 <= 0)
m.c973 = Constraint(expr=m.x322*m.x322 - m.x972*m.b638 <= 0)
m.c974 = Constraint(expr=m.x323*m.x323 - m.x973*m.b638 <= 0)
m.c975 = Constraint(expr=m.x324*m.x324 - m.x974*m.b638 <= 0)
m.c976 = Constraint(expr=m.x325*m.x325 - m.x975*m.b638 <= 0)
m.c977 = Constraint(expr=m.x326*m.x326 - m.x976*m.b639 <= 0)
m.c978 = Constraint(expr=m.x327*m.x327 - m.x977*m.b639 <= 0)
m.c979 = Constraint(expr=m.x328*m.x328 - m.x978*m.b639 <= 0)
m.c980 = Constraint(expr=m.x329*m.x329 - m.x979*m.b639 <= 0)
m.c981 = Constraint(expr=m.x330*m.x330 - m.x980*m.b639 <= 0)
m.c982 = Constraint(expr=m.x331*m.x331 - m.x981*m.b639 <= 0)
m.c983 = Constraint(expr=m.x332*m.x332 - m.x982*m.b639 <= 0)
m.c984 = Constraint(expr=m.x333*m.x333 - m.x983*m.b639 <= 0)
m.c985 = Constraint(expr=m.x334*m.x334 - m.x984*m.b639 <= 0)
m.c986 = Constraint(expr=m.x335*m.x335 - m.x985*m.b639 <= 0)
m.c987 = Constraint(expr=m.x336*m.x336 - m.x986*m.b639 <= 0)
m.c988 = Constraint(expr=m.x337*m.x337 - m.x987*m.b639 <= 0)
m.c989 = Constraint(expr=m.x338*m.x338 - m.x988*m.b639 <= 0)
m.c990 = Constraint(expr=m.x339*m.x339 - m.x989*m.b639 <= 0)
m.c991 = Constraint(expr=m.x340*m.x340 - m.x990*m.b639 <= 0)
m.c992 = Constraint(expr=m.x341*m.x341 - m.x991*m.b639 <= 0)
m.c993 = Constraint(expr=m.x342*m.x342 - m.x992*m.b639 <= 0)
m.c994 = Constraint(expr=m.x343*m.x343 - m.x993*m.b639 <= 0)
m.c995 = Constraint(expr=m.x344*m.x344 - m.x994*m.b639 <= 0)
m.c996 = Constraint(expr=m.x345*m.x345 - m.x995*m.b639 <= 0)
m.c997 = Constraint(expr=m.x346*m.x346 - m.x996*m.b639 <= 0)
m.c998 = Constraint(expr=m.x347*m.x347 - m.x997*m.b639 <= 0)
m.c999 = Constraint(expr=m.x348*m.x348 - m.x998*m.b639 <= 0)
m.c1000 = Constraint(expr=m.x349*m.x349 - m.x999*m.b639 <= 0)
m.c1001 = Constraint(expr=m.x350*m.x350 - m.x1000*m.b639 <= 0)
m.c1002 = Constraint(expr=m.x351*m.x351 - m.x1001*m.b640 <= 0)
m.c1003 = Constraint(expr=m.x352*m.x352 - m.x1002*m.b640 <= 0)
m.c1004 = Constraint(expr=m.x353*m.x353 - m.x1003*m.b640 <= 0)
m.c1005 = Constraint(expr=m.x354*m.x354 - m.x1004*m.b640 <= 0)
m.c1006 = Constraint(expr=m.x355*m.x355 - m.x1005*m.b640 <= 0)
m.c1007 = Constraint(expr=m.x356*m.x356 - m.x1006*m.b640 <= 0)
m.c1008 = Constraint(expr=m.x357*m.x357 - m.x1007*m.b640 <= 0)
m.c1009 = Constraint(expr=m.x358*m.x358 - m.x1008*m.b640 <= 0)
m.c1010 = Constraint(expr=m.x359*m.x359 - m.x1009*m.b640 <= 0)
m.c1011 = Constraint(expr=m.x360*m.x360 - m.x1010*m.b640 <= 0)
m.c1012 = Constraint(expr=m.x361*m.x361 - m.x1011*m.b640 <= 0)
m.c1013 = Constraint(expr=m.x362*m.x362 - m.x1012*m.b640 <= 0)
m.c1014 = Constraint(expr=m.x363*m.x363 - m.x1013*m.b640 <= 0)
m.c1015 = Constraint(expr=m.x364*m.x364 - m.x1014*m.b640 <= 0)
m.c1016 = Constraint(expr=m.x365*m.x365 - m.x1015*m.b640 <= 0)
m.c1017 = Constraint(expr=m.x366*m.x366 - m.x1016*m.b640 <= 0)
m.c1018 = Constraint(expr=m.x367*m.x367 - m.x1017*m.b640 <= 0)
m.c1019 = Constraint(expr=m.x368*m.x368 - m.x1018*m.b640 <= 0)
m.c1020 = Constraint(expr=m.x369*m.x369 - m.x1019*m.b640 <= 0)
m.c1021 = Constraint(expr=m.x370*m.x370 - m.x1020*m.b640 <= 0)
m.c1022 = Constraint(expr=m.x371*m.x371 - m.x1021*m.b640 <= 0)
m.c1023 = Constraint(expr=m.x372*m.x372 - m.x1022*m.b640 <= 0)
m.c1024 = Constraint(expr=m.x373*m.x373 - m.x1023*m.b640 <= 0)
m.c1025 = Constraint(expr=m.x374*m.x374 - m.x1024*m.b640 <= 0)
m.c1026 = Constraint(expr=m.x375*m.x375 - m.x1025*m.b640 <= 0)
m.c1027 = Constraint(expr=m.x376*m.x376 - m.x1026*m.b641 <= 0)
m.c1028 = Constraint(expr=m.x377*m.x377 - m.x1027*m.b641 <= 0)
m.c1029 = Constraint(expr=m.x378*m.x378 - m.x1028*m.b641 <= 0)
m.c1030 = Constraint(expr=m.x379*m.x379 - m.x1029*m.b641 <= 0)
m.c1031 = Constraint(expr=m.x380*m.x380 - m.x1030*m.b641 <= 0)
m.c1032 = Constraint(expr=m.x381*m.x381 - m.x1031*m.b641 <= 0)
m.c1033 = Constraint(expr=m.x382*m.x382 - m.x1032*m.b641 <= 0)
m.c1034 = Constraint(expr=m.x383*m.x383 - m.x1033*m.b641 <= 0)
m.c1035 = Constraint(expr=m.x384*m.x384 - m.x1034*m.b641 <= 0)
m.c1036 = Constraint(expr=m.x385*m.x385 - m.x1035*m.b641 <= 0)
m.c1037 = Constraint(expr=m.x386*m.x386 - m.x1036*m.b641 <= 0)
m.c1038 = Constraint(expr=m.x387*m.x387 - m.x1037*m.b641 <= 0)
m.c1039 = Constraint(expr=m.x388*m.x388 - m.x1038*m.b641 <= 0)
m.c1040 = Constraint(expr=m.x389*m.x389 - m.x1039*m.b641 <= 0)
m.c1041 = Constraint(expr=m.x390*m.x390 - m.x1040*m.b641 <= 0)
m.c1042 = Constraint(expr=m.x391*m.x391 - m.x1041*m.b641 <= 0)
m.c1043 = Constraint(expr=m.x392*m.x392 - m.x1042*m.b641 <= 0)
m.c1044 = Constraint(expr=m.x393*m.x393 - m.x1043*m.b641 <= 0)
m.c1045 = Constraint(expr=m.x394*m.x394 - m.x1044*m.b641 <= 0)
m.c1046 = Constraint(expr=m.x395*m.x395 - m.x1045*m.b641 <= 0)
m.c1047 = Constraint(expr=m.x396*m.x396 - m.x1046*m.b641 <= 0)
m.c1048 = Constraint(expr=m.x397*m.x397 - m.x1047*m.b641 <= 0)
m.c1049 = Constraint(expr=m.x398*m.x398 - m.x1048*m.b641 <= 0)
m.c1050 = Constraint(expr=m.x399*m.x399 - m.x1049*m.b641 <= 0)
m.c1051 = Constraint(expr=m.x400*m.x400 - m.x1050*m.b641 <= 0)
m.c1052 = Constraint(expr=m.x401*m.x401 - m.x1051*m.b642 <= 0)
m.c1053 = Constraint(expr=m.x402*m.x402 - m.x1052*m.b642 <= 0)
m.c1054 = Constraint(expr=m.x403*m.x403 - m.x1053*m.b642 <= 0)
m.c1055 = Constraint(expr=m.x404*m.x404 - m.x1054*m.b642 <= 0)
m.c1056 = Constraint(expr=m.x405*m.x405 - m.x1055*m.b642 <= 0)
m.c1057 = Constraint(expr=m.x406*m.x406 - m.x1056*m.b642 <= 0)
m.c1058 = Constraint(expr=m.x407*m.x407 - m.x1057*m.b642 <= 0)
m.c1059 = Constraint(expr=m.x408*m.x408 - m.x1058*m.b642 <= 0)
m.c1060 = Constraint(expr=m.x409*m.x409 - m.x1059*m.b642 <= 0)
m.c1061 = Constraint(expr=m.x410*m.x410 - m.x1060*m.b642 <= 0)
m.c1062 = Constraint(expr=m.x411*m.x411 - m.x1061*m.b642 <= 0)
m.c1063 = Constraint(expr=m.x412*m.x412 - m.x1062*m.b642 <= 0)
m.c1064 = Constraint(expr=m.x413*m.x413 - m.x1063*m.b642 <= 0)
m.c1065 = Constraint(expr=m.x414*m.x414 - m.x1064*m.b642 <= 0)
m.c1066 = Constraint(expr=m.x415*m.x415 - m.x1065*m.b642 <= 0)
m.c1067 = Constraint(expr=m.x416*m.x416 - m.x1066*m.b642 <= 0)
m.c1068 = Constraint(expr=m.x417*m.x417 - m.x1067*m.b642 <= 0)
m.c1069 = Constraint(expr=m.x418*m.x418 - m.x1068*m.b642 <= 0)
m.c1070 = Constraint(expr=m.x419*m.x419 - m.x1069*m.b642 <= 0)
m.c1071 = Constraint(expr=m.x420*m.x420 - m.x1070*m.b642 <= 0)
m.c1072 = Constraint(expr=m.x421*m.x421 - m.x1071*m.b642 <= 0)
m.c1073 = Constraint(expr=m.x422*m.x422 - m.x1072*m.b642 <= 0)
m.c1074 = Constraint(expr=m.x423*m.x423 - m.x1073*m.b642 <= 0)
m.c1075 = Constraint(expr=m.x424*m.x424 - m.x1074*m.b642 <= 0)
m.c1076 = Constraint(expr=m.x425*m.x425 - m.x1075*m.b642 <= 0)
m.c1077 = Constraint(expr=m.x426*m.x426 - m.x1076*m.b643 <= 0)
m.c1078 = Constraint(expr=m.x427*m.x427 - m.x1077*m.b643 <= 0)
m.c1079 = Constraint(expr=m.x428*m.x428 - m.x1078*m.b643 <= 0)
m.c1080 = Constraint(expr=m.x429*m.x429 - m.x1079*m.b643 <= | |
<filename>teek/_widgets/base.py<gh_stars>0
import collections.abc
import contextlib
import functools
import keyword
import operator
import re
import teek
from teek._tcl_calls import counts, from_tcl, make_thread_safe
from teek._structures import ConfigDict, CgetConfigureConfigDict, after_quit
_widgets = {}
_class_bindings = {}
after_quit.connect(_widgets.clear)
after_quit.connect(_class_bindings.clear)
# like what you would expect to get for combining @classmethod and @property,
# but doesn't do any magic with assigning, only getting
class _ClassProperty:
def __init__(self, getter):
assert isinstance(getter.__name__, str)
self._getter = getter
def __get__(self, instance_or_none, claas):
if instance_or_none is None:
return self._getter(claas)
attribute = self._getter.__name__
classname = claas.__name__
raise AttributeError(
"the %s attribute must be used like %s.%s, "
"not like some_%s_instance.%s"
% (attribute, classname, attribute,
classname.lower(), attribute))
class StateSet(collections.abc.MutableSet):
def __init__(self, widget):
self._widget = widget
def __repr__(self):
# yes, this uses [] even though it behaves like a set, that's the best
# thing i thought of
return '<state set: %r>' % (list(self),)
def __iter__(self):
return iter(self._widget._call([str], self._widget, 'state'))
def __len__(self):
return len(self._widget._call([str], self._widget, 'state'))
def __contains__(self, state):
return self._widget._call(bool, self._widget, 'instate', state)
def add(self, state):
self._widget._call(None, self._widget, 'state', state)
def discard(self, state):
self._widget._call(None, self._widget, 'state', '!' + state)
class GridRowOrColumnConfig(ConfigDict):
def __init__(self, configure_method):
super().__init__()
self._types.update({
'minsize': teek.ScreenDistance,
'weight': float,
'uniform': str,
'pad': teek.ScreenDistance,
})
self._configure = configure_method
def _set(self, option, value):
self._configure(None, '-' + option, value)
def _get(self, option):
return self._configure(self._types.get(option, str), '-' + option)
def _list_options(self):
return (key.lstrip('-') for key in self._configure({}).keys())
class GridRowOrColumn:
def __init__(self, widget, row_or_column, number):
super().__init__()
self._widget = widget
self._row_or_column = row_or_column
self._number = number
self.config = GridRowOrColumnConfig(self._configure)
def __repr__(self):
return (
"<grid %s %d: has a config attribute and a get_slaves() method>"
% (self._row_or_column, self._number))
def __eq__(self, other):
if not isinstance(other, GridRowOrColumn):
return NotImplemented
return (self._widget == other._widget and
self._row_or_column == other._row_or_column and
self._number == other._number)
def __hash__(self):
return hash((self._widget, self._row_or_column, self._number))
def _configure(self, returntype, *args):
return self._widget._call(
returntype, 'grid', self._row_or_column + 'configure',
self._widget, self._number, *args)
def get_slaves(self):
return self._widget._call(
[Widget], 'grid', 'slaves', self._widget,
'-' + self._row_or_column, self._number)
# make things more tkinter-user-friendly
def _tkinter_hint(good, bad):
def dont_use_this(self, *args, **kwargs):
raise TypeError("use %s, not %s" % (good, bad))
return dont_use_this
class Widget:
"""This is a base class for all widgets.
All widgets inherit from this class, and they have all the attributes
and methods documented here.
Don't create instances of ``Widget`` yourself like ``Widget(...)``; use one
of the classes documented below instead. However, you can use ``Widget``
with :func:`isinstance`; e.g. ``isinstance(thingy, teek.Widget)`` returns
``True`` if ``thingy`` is a teek widget.
.. attribute:: config
A dict-like object that represents the widget's options.
>>> window = teek.Window()
>>> label = teek.Label(window, text='Hello World')
>>> label.config
<a config object, behaves like a dict>
>>> label.config['text']
'Hello World'
>>> label.config['text'] = 'New Text'
>>> label.config['text']
'New Text'
>>> label.config.update({'text': 'Even newer text'})
>>> label.config['text']
'Even newer text'
>>> import pprint
>>> pprint.pprint(dict(label.config)) # prints everything nicely \
# doctest: +ELLIPSIS
{...,
'text': 'Even newer text',
...}
.. attribute:: state
Represents the Ttk state of the widget. The state object behaves like a
:class:`set` of strings. For example, ``widget.state.add('disabled')``
makes a widget look like it's grayed out, and
``widget.state.remove('disabled')`` undoes that. See ``STATES`` in
:man:`ttk_intro(3tk)` for more details about states.
.. note::
Only Ttk widgets have states, and this attribute is set to None for
non-Ttk widgets. If you don't know what Ttk is, you should read
about it in :ref:`the teek tutorial <tcl-tk-tkinter-teek>`.
Most teek widgets are ttk widgets, but some aren't, and that's
mentioned in the documentation of those widgets.
.. attribute:: tk_class_name
Tk's class name of the widget class, as a string.
This is a class attribute, but it can be accessed from instances as
well:
>>> text = teek.Text(teek.Window())
>>> text.tk_class_name
'Text'
>>> teek.Text.tk_class_name
'Text'
Note that Tk's class names are sometimes different from the names of
Python classes, and this attribute can also be None in some special
cases.
>>> teek.Label.tk_class_name
'TLabel'
>>> class AsdLabel(teek.Label):
... pass
...
>>> AsdLabel.tk_class_name
'TLabel'
>>> print(teek.Window.tk_class_name)
None
>>> print(teek.Widget.tk_class_name)
None
.. attribute:: command_list
A list of command strings from :func:`.create_command`.
Append a command to this if you want the command to be deleted with
:func:`.delete_command` when the widget is destroyed (with e.g.
:meth:`.destroy`).
"""
_widget_name = None
tk_class_name = None
@make_thread_safe
def __init__(self, parent, **kwargs):
if type(self)._widget_name is None:
raise TypeError("cannot create instances of %s directly, "
"use one of its subclasses instead"
% type(self).__name__)
if parent is None:
parentpath = ''
else:
parentpath = parent.to_tcl()
self.parent = parent
# yes, it must be lowercase
safe_class_name = re.sub(r'\W', '_', type(self).__name__).lower()
# use some_widget.to_tcl() to access the _widget_path
self._widget_path = '%s.%s%d' % (
parentpath, safe_class_name, next(counts[safe_class_name]))
# TODO: some config options can only be given when the widget is
# created, add support for them
self._call(None, type(self)._widget_name, self.to_tcl())
_widgets[self.to_tcl()] = self
self.config = CgetConfigureConfigDict(
lambda returntype, *args: self._call(returntype, self, *args))
self._init_config() # subclasses should override this and use super
# support kwargs like from_=1, because from=1 is invalid syntax
for invalid_syntax in keyword.kwlist:
if invalid_syntax + '_' in kwargs:
kwargs[invalid_syntax] = kwargs.pop(invalid_syntax + '_')
self.config.update(kwargs)
# command strings that are deleted when the widget is destroyed
self.command_list = []
self.bindings = BindingDict( # BindingDict is defined below
lambda returntype, *args: self._call(returntype, 'bind', self, *args),
self.command_list)
self.bind = self.bindings._convenience_bind
if type(self)._widget_name.startswith('ttk::'):
self.state = StateSet(self)
else:
self.state = None
def _init_config(self):
# width and height aren't here because they are integers for some
# widgets and ScreenDistances for others... and sometimes the manual
# pages don't say which, so i have checked them by hand
self.config._types.update({
# ttk_widget(3tk)
'class': str,
'cursor': str,
'style': str,
# options(3tk)
'activebackground': teek.Color,
'activeborderwidth': teek.ScreenDistance,
'activeforeground': teek.Color,
'anchor': str,
'background': teek.Color,
'bg': teek.Color,
#'bitmap': ???,
'borderwidth': teek.ScreenDistance,
'bd': teek.ScreenDistance,
'cursor': str,
'compound': str,
'disabledforeground': teek.Color,
'exportselection': bool,
'font': teek.Font,
'foreground': teek.Color,
'fg': teek.Color,
'highlightbackground': teek.Color,
'highlightcolor': teek.Color,
'highlightthickness': str,
'insertbackground': teek.Color,
'insertborderwidth': teek.ScreenDistance,
'insertofftime': int,
'insertontime': int,
'insertwidth': teek.ScreenDistance,
'jump': bool,
'justify': str,
'orient': str,
'padx': teek.ScreenDistance,
'pady': teek.ScreenDistance,
'relief': str,
'repeatdelay': int,
'repeatinterval': int,
'selectbackground': teek.Color,
'selectborderwidth': teek.ScreenDistance,
'selectforeground': teek.Color,
'setgrid': bool,
'text': str,
'troughcolor': teek.Color,
'wraplength': teek.ScreenDistance,
# these options are in both man pages
'textvariable': teek.StringVar,
'underline': int,
'image': teek.Image,
# 'xscrollcommand' and 'yscrollcommand' are done below
'takefocus': str, # this one is harder to do right than you think
# other stuff that many things seem to have
'padding': teek.ScreenDistance,
'state': str,
})
for option_name in ('xscrollcommand', 'yscrollcommand'):
self.config._special[option_name] = functools.partial(
self._create_scroll_callback, option_name)
@classmethod
@make_thread_safe
def from_tcl(cls, path_string):
"""Creates a widget from a Tcl path name.
In Tcl, widgets are represented as commands, and doing something to the
widget invokes the command. Use this method if you know the Tcl command
and you would like to have a widget object instead.
This method raises :exc:`TypeError` if it's called from a different
``Widget`` subclass than what the type of the ``path_string`` widget
is:
>>> window = teek.Window()
>>> teek.Button.from_tcl(teek.Label(window).to_tcl()) \
# doctest: +ELLIPSIS
Traceback (most recent call last):
...
TypeError: '...' is a Label, not a Button
"""
if path_string == '.':
# this kind of sucks, i might make a _RootWindow class later
return None
result = _widgets[path_string]
if not isinstance(result, cls):
raise TypeError("%r is a %s, not a %s" % (
path_string, type(result).__name__, cls.__name__))
return result
def to_tcl(self):
"""Returns the widget's Tcl command name. See :meth:`from_tcl`."""
return self._widget_path
def __repr__(self):
class_name = type(self).__name__
if getattr(teek, class_name, None) is type(self):
result = 'teek.%s widget' % class_name
else:
result = '{0.__module__}.{0.__name__} widget'.format(type(self))
if not self.winfo_exists():
# _repr_parts() doesn't need to work with destroyed widgets
return '<destroyed %s>' % result
parts = self._repr_parts()
if parts:
result += ': ' + ', '.join(parts)
return '<' + result + '>'
def _repr_parts(self):
# overrided in subclasses
return []
def _create_scroll_callback(self, option_name):
result = teek.Callback()
command_string = teek.create_command(result.run, [float, float])
self.command_list.append(command_string)
self._call(None, self, 'configure', '-' + option_name, command_string)
return result
__getitem__ = _tkinter_hint("widget.config['option']", "widget['option']")
__setitem__ = _tkinter_hint("widget.config['option']", "widget['option']")
cget = _tkinter_hint("widget.config['option']", "widget.cget('option')")
configure = _tkinter_hint("widget.config['option'] = value",
"widget.configure(option=value)")
# like _tcl_calls.tcl_call, but with better error handling
@make_thread_safe
def _call(self, *args, | |
<filename>lib/devops/supervisor.py
import fcntl
import os
import random
import select
import signal
import sys
import time
import traceback
from devops.file_and_dir_helpers import *
from angel.util.pidfile import *
from devops.unix_helpers import set_proc_title
from angel.stats.disk_stats import disk_stats_get_usage_for_path
from devops.process_helpers import *
import angel.settings
# This function is similar to Python's subprocess module, with some tweaks and customizations.
# Like subprocess, it forks a child process, waits for it to exit, and re-starts it on exit. It never returns.
# Our supervisor handles shutdown conditions, calling a stop_func when the supervisor process receives SIGTERM.
# We also handle log rotation, rolling over stdout/stderr when the supervisor process receives SIGWINCH.
# Most other signals are propogated to the child process -- that is, sending the supervisor process SIGHUP will
# be passed through to the child process.
def supervisor_manage_process(config, name, pid_filename_for_daemon, run_as_user, run_as_group, log_basepath,
restart_daemon_on_exit, process_oom_adjustment, init_func, exec_func, stop_func):
''' Creates and manages a child process, running given functions.
- If init_func is defined, it is called in the child process first. If it returns a non-zero status, then supervisor will exit.
- exec_func is then called. If restart_daemon_on_exit is True, exec_func is restarted whenever it exits.
- If stop_func is defined, it is called when this managing process receives a SIGTERM.
- pid_filename_for_daemon is used by this manager process to update status info and track that the manager should be running.
- process_oom_adjustment is a value, typically between -15 and 0, that indicates to the Linux kernel how "important" the process is.
This function never returns.
'''
# Create supervisor logger:
supervisor_logfile_path = launcher_get_logpath(config, log_basepath, 'supervisor')
if 0 != create_dirs_if_needed(os.path.dirname(supervisor_logfile_path), owner_user=run_as_user, owner_group=run_as_group):
print >>sys.stderr, "Supervisor error: unable to create log dirs."
os._exit(0) # Never return
try:
supervisor_logger = SupervisorLogger(open(supervisor_logfile_path, 'a', buffering=0))
except Exception as e:
print >>sys.stderr, "Supervisor error: unable to create supervisor log (%s: %s)." % (supervisor_logfile_path, e)
os._exit(0) # Never return
# Send SIGTERM to the supervisor daemon to tell it to quit the child process and exit.
# Send SIGWINCH to the supervisor daemon to tell it to rotate logs.
# Any other trappable_signal is sent to the child process to do any service-defined logic as necessary.
trappable_signals = (signal.SIGINT, signal.SIGWINCH, signal.SIGHUP, signal.SIGTERM, signal.SIGUSR1, signal.SIGUSR2, signal.SIGQUIT)
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = False
global run_init_instead_of_exec
run_init_instead_of_exec = False
set_proc_title('supervisor[%s]: starting' % name)
# Always run supervisor with kernel out-of-memory flags set to hold off on killing us.
# This is reset back up to 0 in the child process (or whatever process_oom_adjustment is set to).
set_process_oom_factor(-15)
supervisor_pid = os.getpid()
child_pid = None
daemon_start_time = int(time.time())
last_start_time = None
start_count = 0
continous_restarts = 0
min_delay_between_continous_restarts = 5
max_delay_between_continous_restarts = 30
restart_delay_jitter = 60 # If we hit max_delay, we'll re-try at some interval between (max_delay - jitter) and (max_delay)
# Define a function that waits for a child pid to exit OR for us to receive a signal:
def _supervisor_daemon_waitpid(pid):
if pid is None or pid < 2:
supervisor_logger.warn("Supervisor[%s]: can't wait on invalid pid %s." % (name, pid))
return -1
try:
# To-do: periodically wake up and check that pid_filename_for_daemon contains our pid, or exit
(wait_pid, wait_exitcode) = os.waitpid(pid, 0)
return (wait_exitcode >> 8) % 256
except OSError:
return -2 # waitpid will throw an OSError when our supervisor recieves a kill signal (i.e. SIGTERM to tell us to exit); our code below will loop and re-call this.
return -3
# Define a function that receives a signal and passes it through to our child process:
def _supervisor_daemon_signal_passthru(signum, frame):
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
try:
supervisor_logger.info("_supervisor_daemon_signal_passthru: kill -%s %s" % (signum, child_pid))
os.kill(child_pid, signum)
except Exception as e:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: unable to send signal %s to pid %s: %s" % (name, supervisor_pid, os.getpid(), child_pid, signum, child_pid, e))
# Define a function that receives a signal and rotates logs:
def _supervisor_daemon_rotate_logs(signum, frame):
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: rotate logs not implemented yet; log_basepath=%s" % (name, supervisor_pid, os.getpid(), child_pid, log_basepath))
# Define a function that receives a signal and cleanly shuts down the server:
def _supervisor_daemon_quit(signum, frame):
# Flag that quit has been requested:
global supervisor_daemon_exit_requested
supervisor_daemon_exit_requested = True
if child_pid is None or child_pid < 2:
# This can happen if the supervised child was *just* killed, or isn't running yet (during a re-spawn).
supervisor_logger.warn("Supervisor: invalid pid %s found during kill -%s of process %s" % (child_pid, signum, name))
return
# Check if we're still in an init phase (can't call stop_func on something that hasn't actually started):
global run_init_instead_of_exec
if run_init_instead_of_exec:
# if we're currently invoking a custom init function, then we need to send the supervisor process the kill signal directly so it exits
return _supervisor_daemon_signal_passthru(signum, frame)
# Run stop function if given, otherwise pass along given kill signal to child process:
if stop_func is not None:
try:
import threading
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); calling stop function" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name))
ret_val = stop_func(child_pid)
supervisor_logger.info("Supervisor %s[%s/%s managing %s]: quit request received (sig %s in thread %s); stop function done (%s)" % (name, supervisor_pid, os.getpid(), child_pid, signum, threading.currentThread().name, ret_val))
return
except Exception:
supervisor_logger.error("Supervisor %s[%s/%s managing %s]: error in stop function: %s" % (name, supervisor_pid, os.getpid(), child_pid, traceback.format_exc(sys.exc_info()[2])))
else:
supervisor_logger.warn("Supervisor %s[%s/%s managing %s]: no stop function given" % (name, supervisor_pid, os.getpid(), child_pid))
return _supervisor_daemon_signal_passthru(signum, frame)
def _install_signal_functions():
signal.signal(signal.SIGWINCH, _supervisor_daemon_rotate_logs)
signal.signal(signal.SIGTERM, _supervisor_daemon_quit)
for sig in trappable_signals:
if sig not in (signal.SIGWINCH, signal.SIGTERM):
signal.signal(sig, _supervisor_daemon_signal_passthru)
def _remove_signal_functions():
for sig in trappable_signals:
signal.signal(sig, signal.SIG_DFL)
def _sleep_without_signal_functions(duration):
# Because there are cases where *we* need to be interrupted:
_remove_signal_functions()
time.sleep(duration)
_install_signal_functions()
# Install signal functions:
_install_signal_functions()
# chdir() to /, to avoid potentially holding a mountpoint open:
os.chdir('/')
# Reset umask:
os.umask(022)
# Redirect STDOUT/STDERR:
# (Redirects run as separate threads in our supervisor process -- don't move these to the child process; os.exec will wipe them out.)
os.setsid()
stdout_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, ''), run_as_user=run_as_user, run_as_group=run_as_group)
stderr_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'error'), run_as_user=run_as_user, run_as_group=run_as_group)
supervisor_redirector = SupervisorStreamRedirector(supervisor_logger, launcher_get_logpath(config, log_basepath, 'supervisor'), run_as_user=run_as_user, run_as_group=run_as_group)
stdout_redirector.startRedirectThread(sys.stdout)
stderr_redirector.startRedirectThread(sys.stderr)
supervisor_redirector.startRedirectThread(supervisor_logger.logger_fd)
# Close STDIN:
sys.stdin.close()
os.close(0)
new_stdin = open(os.devnull, 'r', 0) # So FD 0 isn't available
#new_stdin = open(os.devnull, 'r', 0)
#try:
# os.dup2(new_stdin.fileno(), sys.stdin.fileno())
#except ValueError:
# print >>sys.stderr, "Can't set up STDIN, was it closed on us?"
# Loop until shutdown requested, handling signals and logs and making sure that our server remains running:
while not supervisor_daemon_exit_requested:
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.warn("Supervisor[%s/%s]: Warning: invalid pid %s in lock file %s. Re-checking..." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
try:
time.sleep(0.5)
except:
pass
if not is_pid_in_pidfile_our_pid(pid_filename_for_daemon):
supervisor_logger.error("Supervisor[%s/%s]: FATAL: invalid pid %s in lock file %s. Exiting now." % (supervisor_pid, os.getpid(), get_pid_from_pidfile(pid_filename_for_daemon), pid_filename_for_daemon))
sys.stdout.flush()
sys.stderr.flush()
time.sleep(0.5) # Need to sleep so that logger threads can write out above stderr message. Gross, but it works.
os._exit(1)
lockfile_pid = get_pid_from_pidfile(pid_filename_for_daemon)
if lockfile_pid is None or supervisor_pid != lockfile_pid:
supervisor_logger.error("Supervisor[%s/%s]: FATAL: lock file %s not owned by current process! (pid is %s) Exiting now." % (supervisor_pid, os.getpid(), pid_filename_for_daemon, lockfile_pid))
os._exit(1)
one_time_run = False
run_init_instead_of_exec = False
if start_count == 0 and init_func is not None:
run_init_instead_of_exec = True
if not restart_daemon_on_exit:
# This is a clever trick: we might want to run a command in the background one-time (i.e. priming a service).
# By passing restart_daemon_on_exit as false from way up above us in the callstack,
# we can use our run logic inside the supervisor process and let it exit cleanly.
# This works by reading one_time_run after we've started and flipping supervisor_daemon_exit_requested to True.
one_time_run = True
try:
log_disk_stats = disk_stats_get_usage_for_path(config['LOG_DIR'])
data_disk_stats = disk_stats_get_usage_for_path(config['DATA_DIR'])
run_disk_stats = disk_stats_get_usage_for_path(config['RUN_DIR'])
if log_disk_stats is not None and data_disk_stats is not None and run_disk_stats is not None:
# Only do this check when we | |
instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
async def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('6', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_addresses(self, addresses):
result = {}
for i in range(0, len(addresses)):
address = self.parse_deposit_address(addresses[i])
code = address['currency']
result[code] = address
return result
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string(depositAddress, 'memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addresses = self.parse_deposit_addresses(response)
address = self.safe_value(addresses, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a `fee` string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = await self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return {
'info': response,
'id': self.safe_string(response, 'withdrawal_id'),
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VirtualMachineArgs', 'VirtualMachine']
@pulumi.input_type
class VirtualMachineArgs:
def __init__(__self__, *,
sql_license_type: pulumi.Input[str],
virtual_machine_id: pulumi.Input[str],
auto_backup: Optional[pulumi.Input['VirtualMachineAutoBackupArgs']] = None,
auto_patching: Optional[pulumi.Input['VirtualMachineAutoPatchingArgs']] = None,
key_vault_credential: Optional[pulumi.Input['VirtualMachineKeyVaultCredentialArgs']] = None,
r_services_enabled: Optional[pulumi.Input[bool]] = None,
sql_connectivity_port: Optional[pulumi.Input[int]] = None,
sql_connectivity_type: Optional[pulumi.Input[str]] = None,
sql_connectivity_update_password: Optional[pulumi.Input[str]] = None,
sql_connectivity_update_username: Optional[pulumi.Input[str]] = None,
storage_configuration: Optional[pulumi.Input['VirtualMachineStorageConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a VirtualMachine resource.
:param pulumi.Input[str] sql_license_type: The SQL Server license type. Possible values are `AHUB` (Azure Hybrid Benefit) and `PAYG` (Pay-As-You-Go). Changing this forces a new resource to be created.
:param pulumi.Input[str] virtual_machine_id: The ID of the Virtual Machine. Changing this forces a new resource to be created.
:param pulumi.Input['VirtualMachineAutoBackupArgs'] auto_backup: An `auto_backup` block as defined below. This block can be added to an existing resource, but removing this block forces a new resource to be created.
:param pulumi.Input['VirtualMachineAutoPatchingArgs'] auto_patching: An `auto_patching` block as defined below.
:param pulumi.Input['VirtualMachineKeyVaultCredentialArgs'] key_vault_credential: (Optional) An `key_vault_credential` block as defined below.
:param pulumi.Input[bool] r_services_enabled: Should R Services be enabled?
:param pulumi.Input[int] sql_connectivity_port: The SQL Server port. Defaults to `1433`.
:param pulumi.Input[str] sql_connectivity_type: The connectivity type used for this SQL Server. Defaults to `PRIVATE`.
:param pulumi.Input[str] sql_connectivity_update_password: The SQL Server sysadmin login password.
:param pulumi.Input[str] sql_connectivity_update_username: The SQL Server sysadmin login to create.
:param pulumi.Input['VirtualMachineStorageConfigurationArgs'] storage_configuration: An `storage_configuration` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "sql_license_type", sql_license_type)
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if auto_backup is not None:
pulumi.set(__self__, "auto_backup", auto_backup)
if auto_patching is not None:
pulumi.set(__self__, "auto_patching", auto_patching)
if key_vault_credential is not None:
pulumi.set(__self__, "key_vault_credential", key_vault_credential)
if r_services_enabled is not None:
pulumi.set(__self__, "r_services_enabled", r_services_enabled)
if sql_connectivity_port is not None:
pulumi.set(__self__, "sql_connectivity_port", sql_connectivity_port)
if sql_connectivity_type is not None:
pulumi.set(__self__, "sql_connectivity_type", sql_connectivity_type)
if sql_connectivity_update_password is not None:
pulumi.set(__self__, "sql_connectivity_update_password", sql_connectivity_update_password)
if sql_connectivity_update_username is not None:
pulumi.set(__self__, "sql_connectivity_update_username", sql_connectivity_update_username)
if storage_configuration is not None:
pulumi.set(__self__, "storage_configuration", storage_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="sqlLicenseType")
def sql_license_type(self) -> pulumi.Input[str]:
"""
The SQL Server license type. Possible values are `AHUB` (Azure Hybrid Benefit) and `PAYG` (Pay-As-You-Go). Changing this forces a new resource to be created.
"""
return pulumi.get(self, "sql_license_type")
@sql_license_type.setter
def sql_license_type(self, value: pulumi.Input[str]):
pulumi.set(self, "sql_license_type", value)
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> pulumi.Input[str]:
"""
The ID of the Virtual Machine. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "virtual_machine_id")
@virtual_machine_id.setter
def virtual_machine_id(self, value: pulumi.Input[str]):
pulumi.set(self, "virtual_machine_id", value)
@property
@pulumi.getter(name="autoBackup")
def auto_backup(self) -> Optional[pulumi.Input['VirtualMachineAutoBackupArgs']]:
"""
An `auto_backup` block as defined below. This block can be added to an existing resource, but removing this block forces a new resource to be created.
"""
return pulumi.get(self, "auto_backup")
@auto_backup.setter
def auto_backup(self, value: Optional[pulumi.Input['VirtualMachineAutoBackupArgs']]):
pulumi.set(self, "auto_backup", value)
@property
@pulumi.getter(name="autoPatching")
def auto_patching(self) -> Optional[pulumi.Input['VirtualMachineAutoPatchingArgs']]:
"""
An `auto_patching` block as defined below.
"""
return pulumi.get(self, "auto_patching")
@auto_patching.setter
def auto_patching(self, value: Optional[pulumi.Input['VirtualMachineAutoPatchingArgs']]):
pulumi.set(self, "auto_patching", value)
@property
@pulumi.getter(name="keyVaultCredential")
def key_vault_credential(self) -> Optional[pulumi.Input['VirtualMachineKeyVaultCredentialArgs']]:
"""
(Optional) An `key_vault_credential` block as defined below.
"""
return pulumi.get(self, "key_vault_credential")
@key_vault_credential.setter
def key_vault_credential(self, value: Optional[pulumi.Input['VirtualMachineKeyVaultCredentialArgs']]):
pulumi.set(self, "key_vault_credential", value)
@property
@pulumi.getter(name="rServicesEnabled")
def r_services_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should R Services be enabled?
"""
return pulumi.get(self, "r_services_enabled")
@r_services_enabled.setter
def r_services_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "r_services_enabled", value)
@property
@pulumi.getter(name="sqlConnectivityPort")
def sql_connectivity_port(self) -> Optional[pulumi.Input[int]]:
"""
The SQL Server port. Defaults to `1433`.
"""
return pulumi.get(self, "sql_connectivity_port")
@sql_connectivity_port.setter
def sql_connectivity_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sql_connectivity_port", value)
@property
@pulumi.getter(name="sqlConnectivityType")
def sql_connectivity_type(self) -> Optional[pulumi.Input[str]]:
"""
The connectivity type used for this SQL Server. Defaults to `PRIVATE`.
"""
return pulumi.get(self, "sql_connectivity_type")
@sql_connectivity_type.setter
def sql_connectivity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_connectivity_type", value)
@property
@pulumi.getter(name="sqlConnectivityUpdatePassword")
def sql_connectivity_update_password(self) -> Optional[pulumi.Input[str]]:
"""
The SQL Server sysadmin login password.
"""
return pulumi.get(self, "sql_connectivity_update_password")
@sql_connectivity_update_password.setter
def sql_connectivity_update_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_connectivity_update_password", value)
@property
@pulumi.getter(name="sqlConnectivityUpdateUsername")
def sql_connectivity_update_username(self) -> Optional[pulumi.Input[str]]:
"""
The SQL Server sysadmin login to create.
"""
return pulumi.get(self, "sql_connectivity_update_username")
@sql_connectivity_update_username.setter
def sql_connectivity_update_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_connectivity_update_username", value)
@property
@pulumi.getter(name="storageConfiguration")
def storage_configuration(self) -> Optional[pulumi.Input['VirtualMachineStorageConfigurationArgs']]:
"""
An `storage_configuration` block as defined below.
"""
return pulumi.get(self, "storage_configuration")
@storage_configuration.setter
def storage_configuration(self, value: Optional[pulumi.Input['VirtualMachineStorageConfigurationArgs']]):
pulumi.set(self, "storage_configuration", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _VirtualMachineState:
def __init__(__self__, *,
auto_backup: Optional[pulumi.Input['VirtualMachineAutoBackupArgs']] = None,
auto_patching: Optional[pulumi.Input['VirtualMachineAutoPatchingArgs']] = None,
key_vault_credential: Optional[pulumi.Input['VirtualMachineKeyVaultCredentialArgs']] = None,
r_services_enabled: Optional[pulumi.Input[bool]] = None,
sql_connectivity_port: Optional[pulumi.Input[int]] = None,
sql_connectivity_type: Optional[pulumi.Input[str]] = None,
sql_connectivity_update_password: Optional[pulumi.Input[str]] = None,
sql_connectivity_update_username: Optional[pulumi.Input[str]] = None,
sql_license_type: Optional[pulumi.Input[str]] = None,
storage_configuration: Optional[pulumi.Input['VirtualMachineStorageConfigurationArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_machine_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VirtualMachine resources.
:param pulumi.Input['VirtualMachineAutoBackupArgs'] auto_backup: An `auto_backup` block as defined below. This block can be added to an existing resource, but removing this block forces a new resource to be created.
:param pulumi.Input['VirtualMachineAutoPatchingArgs'] auto_patching: An `auto_patching` block as defined below.
:param pulumi.Input['VirtualMachineKeyVaultCredentialArgs'] key_vault_credential: (Optional) An `key_vault_credential` block as defined below.
:param pulumi.Input[bool] r_services_enabled: Should R Services be enabled?
:param pulumi.Input[int] sql_connectivity_port: The SQL Server port. Defaults to `1433`.
:param pulumi.Input[str] sql_connectivity_type: The connectivity type used for this SQL Server. Defaults to `PRIVATE`.
:param pulumi.Input[str] sql_connectivity_update_password: The SQL Server sysadmin login password.
:param pulumi.Input[str] sql_connectivity_update_username: The SQL Server sysadmin login to create.
:param pulumi.Input[str] sql_license_type: The SQL Server license type. Possible values are `AHUB` (Azure Hybrid Benefit) and `PAYG` (Pay-As-You-Go). Changing this forces a new resource to be created.
:param pulumi.Input['VirtualMachineStorageConfigurationArgs'] storage_configuration: An `storage_configuration` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] virtual_machine_id: The ID of the Virtual Machine. Changing this forces a new resource to be created.
"""
if auto_backup is not None:
pulumi.set(__self__, "auto_backup", auto_backup)
if auto_patching is not None:
pulumi.set(__self__, "auto_patching", auto_patching)
if key_vault_credential is not None:
pulumi.set(__self__, "key_vault_credential", key_vault_credential)
if r_services_enabled is not None:
pulumi.set(__self__, "r_services_enabled", r_services_enabled)
if sql_connectivity_port is not None:
pulumi.set(__self__, "sql_connectivity_port", sql_connectivity_port)
if sql_connectivity_type is not None:
pulumi.set(__self__, "sql_connectivity_type", sql_connectivity_type)
if sql_connectivity_update_password is not None:
pulumi.set(__self__, "sql_connectivity_update_password", <PASSWORD>_connectivity_update_password)
if sql_connectivity_update_username is not None:
pulumi.set(__self__, "sql_connectivity_update_username", sql_connectivity_update_username)
if sql_license_type is not None:
pulumi.set(__self__, "sql_license_type", sql_license_type)
if storage_configuration is not None:
pulumi.set(__self__, "storage_configuration", storage_configuration)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
@property
@pulumi.getter(name="autoBackup")
def auto_backup(self) -> Optional[pulumi.Input['VirtualMachineAutoBackupArgs']]:
"""
An `auto_backup` block as defined below. This block can be added to an existing resource, but removing this block forces a new resource to be created.
"""
return pulumi.get(self, "auto_backup")
@auto_backup.setter
def auto_backup(self, value: Optional[pulumi.Input['VirtualMachineAutoBackupArgs']]):
pulumi.set(self, "auto_backup", value)
@property
@pulumi.getter(name="autoPatching")
def auto_patching(self) -> Optional[pulumi.Input['VirtualMachineAutoPatchingArgs']]:
"""
An `auto_patching` block as defined below.
"""
return pulumi.get(self, "auto_patching")
@auto_patching.setter
def auto_patching(self, value: Optional[pulumi.Input['VirtualMachineAutoPatchingArgs']]):
pulumi.set(self, "auto_patching", value)
@property
@pulumi.getter(name="keyVaultCredential")
def key_vault_credential(self) -> Optional[pulumi.Input['VirtualMachineKeyVaultCredentialArgs']]:
"""
(Optional) An `key_vault_credential` block as defined below.
"""
return pulumi.get(self, "key_vault_credential")
@key_vault_credential.setter
def key_vault_credential(self, value: Optional[pulumi.Input['VirtualMachineKeyVaultCredentialArgs']]):
pulumi.set(self, "key_vault_credential", value)
@property
@pulumi.getter(name="rServicesEnabled")
def r_services_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Should R Services be enabled?
"""
return pulumi.get(self, "r_services_enabled")
@r_services_enabled.setter
def r_services_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "r_services_enabled", value)
@property
@pulumi.getter(name="sqlConnectivityPort")
def sql_connectivity_port(self) -> Optional[pulumi.Input[int]]:
"""
The SQL Server port. Defaults to `1433`.
"""
return pulumi.get(self, "sql_connectivity_port")
@sql_connectivity_port.setter
def sql_connectivity_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sql_connectivity_port", value)
@property
@pulumi.getter(name="sqlConnectivityType")
def sql_connectivity_type(self) -> Optional[pulumi.Input[str]]:
"""
The connectivity type used for this SQL Server. Defaults to `PRIVATE`.
"""
return pulumi.get(self, "sql_connectivity_type")
@sql_connectivity_type.setter
def sql_connectivity_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_connectivity_type", value)
@property
@pulumi.getter(name="sqlConnectivityUpdatePassword")
def sql_connectivity_update_password(self) -> Optional[pulumi.Input[str]]:
"""
The SQL Server sysadmin login password.
"""
return pulumi.get(self, "sql_connectivity_update_password")
@sql_connectivity_update_password.setter
def sql_connectivity_update_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_connectivity_update_password", value)
@property
@pulumi.getter(name="sqlConnectivityUpdateUsername")
def sql_connectivity_update_username(self) -> Optional[pulumi.Input[str]]:
"""
The SQL Server sysadmin login to create.
"""
return pulumi.get(self, "sql_connectivity_update_username")
@sql_connectivity_update_username.setter
def sql_connectivity_update_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "sql_connectivity_update_username", value)
| |
# coding: utf-8
# # Two-dimensional flows: vorticity and stream function method
# v3.80, 14 April 2018, by <NAME>
#
# $\newcommand{\V}[1]{\vec{\boldsymbol{#1}}}$
# $\newcommand{\I}[1]{\widehat{\boldsymbol{\mathrm{#1}}}}$
# $\newcommand{\pd}[2]{\frac{\partial#1}{\partial#2}}$
# $\newcommand{\pdt}[1]{\frac{\partial#1}{\partial t}}$
# $\newcommand{\ddt}[1]{\frac{\D#1}{\D t}}$
# $\newcommand{\D}{\mathrm{d}}$
# $\newcommand{\Ii}{\I{\imath}}$
# $\newcommand{\Ij}{\I{\jmath}}$
# $\newcommand{\Ik}{\I{k}}$
# $\newcommand{\VU}{\V{U}}$
# $\newcommand{\del}{\boldsymbol{\nabla}}$
# $\newcommand{\dt}{\cdot}$
# $\newcommand{\x}{\times}$
# $\newcommand{\dv}{\del\cdot}$
# $\newcommand{\curl}{\del\times}$
# $\newcommand{\lapl}{\nabla^2}$
# In[1]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import display,clear_output
import time as Time
import math, os
import numpy as np
import scipy.fftpack
import matplotlib
matplotlib.rcParams.update({'font.size': 22})
from IPython.core.display import HTML
import urllib.request
# In[2]:
HTML(urllib.request.urlopen('http://metrprof.xyz/metr4323.css').read().decode())
#HTML( open('metr4323.css').read() ) #or use this, if you have downloaded metr4233.css to your computer
# ### Example simulation
#
# Here is my animation of an [unstable ring of vorticity](http://12characters.net/explore/vortringi.html),
# (in this notebook, experiment `nexp=15`) computed in a `1025x1025` domain. On my computer with an I3 processor, the wall-clock time was about 90 minutes for the simulation.
# ### The model
#
# The equations for two-dimensional, inviscid, incompressible flow of constant density:
#
# $$
# \pdt{u} + u \pd{u}{x} + v \pd{u}{y}= - \pd{P}{x}
# $$
#
# $$
# \pdt{v} + u \pd{v}{x} + v \pd{v}{y}= - \pd{P}{y}
# $$
#
# $$
# \delta \equiv \pd{u}{x} + \pd{v}{y} = 0
# $$
#
# where $P$ denotes pressure divided by density: $P \equiv p/\rho$. We will refer to $P$ as "pressure".
#
# Are the equations dimensionless? We make no claims yet. Symbolically, the dimensionless equations and dimensional equations will look the same.
#
# In contrast to the shallow-water equations, we don't see the height of the free surface $h(x,y,t)$ anymore, we just see pressure $P(x,y,t)$. Also we see that the divergence of velocity is required to be zero. In shallow-water theory, we saw that convergence raises $h$. Here $h$ is required to be a constant: a rigid surface exists at both the top and bottom boundary.
#
# Peculiarly, we don't see any equation that prognosticates $P(x,y,t)$. We can think of this rigid-lid model as having a free surface with very large $g$. Any small deflection of $h$ upward would cause a big increase in pressure that would cause divergence to bring down $h$ very quickly.
#
# As a consequence, gravity waves would have a very large speed.
# We don't seek to explicitly model these gravity waves. That would require a very small time step in the numerical simulations. We only want to model their effect on maintaining $\delta = 0$, or very close to $0$.
#
# Recall the definition of vertical vorticity:
# $$
# \zeta \equiv \pd{v}{x} - \pd{u}{y}
# $$
#
# You can show rather easily, from the top 3 equations, that vorticity $\zeta$ is
# (theoretically) conserved when $\pd{u}{x} + \pd{v}{y} = 0$:
# $$
# \ddt{\zeta} =0
# $$
#
# In our previous notebook, with a flexible upper lid, we did the more complicated derivation for conservation of potential vorticity:
# $$
# \ddt{}\left(\frac{ \zeta + f}{h} \right) =0
# $$
# Obviously, from that derivation, taking the case where $f$ and $h$ are constant, we recover $\ddt{} \zeta =0$
# ### Let's assume pressure has done it's job
#
# If $\delta=0$, there must exist a *stream function* $\psi(x,y)$ consistent with $\VU = u\Ii + v\Ij$:
#
# $$
# v = \pd{\psi}{x}
# $$
# and
# $$
# u = - \pd{\psi}{y}
# $$
#
# With $(u,v)$ given this way, we are assured that:
# $$
# \delta \equiv \pd{u}{x} + \pd{v}{y} =
# -\pd{^2\psi}{ x \partial y} + \pd{^2 \psi}{y \partial x} = 0
# $$
#
# Streamlines are curves on which $\psi$ is constant. You can easily show $\VU \dt \del \psi =0$, which means $\VU$ is tangent to streamlines.
#
# You can also show that $U^2 = |\del\psi|^2$, which means $\VU$ is inversely proportional to the distance between streamlines.
# ### Solving for $\psi$, u and v from $\zeta$
#
# If we are given $\psi$, we can easily determine $u$,$v$ and then $\zeta$, using derivatives, or their equivalent for a gridded model.
#
# Going the other way, meaning given $\zeta$ and then determining $\psi$, is a more difficult. But if we can do that, it would be very useful.
#
# We first recognize how $\zeta$ is related to $\psi$:
# $$
# \zeta \equiv \pd{v}{x} - \pd{u}{y} = \pd{^2\psi}{x^2} + \pd{^2\psi}{y^2} = \lapl \psi
# $$
#
# We can forecast $\zeta$ forward in time for one time-step with $\ddt{\zeta}=0$ or:
#
# $$
# \pdt{\zeta} = - u \pd{\zeta}{x} - v\pd{\zeta}{y}
# $$
#
# We then solve $\lapl \psi = \zeta$ for $\psi$, and then solve for the updated $u$ and $v$.
# ### The Poisson equation
#
# $\lapl$ is called the *Laplacian*. Finding a function $\psi$ such that its Laplacian $\lapl\psi$ satisfies a specified function is called *solving the Poisson equation*:
#
# $$
# \lapl \psi = \zeta
# $$
# Others might say that $\psi$ is the *inverse Laplacian* of $\zeta$.
# ### The Fourier method for solving the Poisson equation
#
# The model domain will be $0\le x\le L$ and $0 \le y \le W$.
# In our simulations, $\zeta=0$ on the boundaries initially, and so remains $0$.
# So $\zeta(x,y)$ can be written as a Fourier series using sinusoids:
#
# $$
# \zeta(x,y) = \sum_{k,m} A_{k,m} \sin(kx)\sin(my)
# $$
# where
# $$
# k = K\frac{\pi}{L} \qquad m = M\frac{\pi}{W}
# $$
# and $K$ and $M$ are integers: $K=1,2,3 \ldots K_{max}$ and $M=1,2,3,\ldots M_{max}$.
#
# For an `Nx` by `Ny` grid, we will need $K_{max}=$ `Nx` -2 and $M_{max}=$ `Ny` -2. There will be as many
# Fourier modes as there are internal grid points. For example, 1000 interior grid point of $\zeta$ are completely
# specified by 1000 unique values of $A_{k,m}$.
#
# Using "the most important numerical algorithm of our lifetime",
# the [Fast Fourier Transform](https://en.wikipedia.org/wiki/Fast_Fourier_transform) or FFT,
# we can quickly find the coefficients $A_{k,m}$ given the gridded $\zeta(x,y)$.
#
# If we seek
# $$
# \psi(x,y) = \sum_{k,m} B_{k,m} \sin(kx)\sin(my)
# $$
# you might think that since
# $$
# \lapl \psi(x,y) = \lapl \sum_{k,m} B_{k,m} \sin(kx)\sin(my) = \sum_{k,m} B_{k,m} \lapl \sin(kx)\sin(my)=- \sum_{k,m} B_{k,m} (k^2+m^2) \sin(kx)\sin(my)
# $$
# $$
# =\zeta(x,y) = \sum_{k,m} A_{k,m} \sin(kx)\sin(my)
# $$
# that we take the coefficients $A_{k,m}$ found from the FFT of $\zeta$, we then set
# $$
# B_{k,m} = \frac{-1}{k^2+m^2}A_{k,m}
# $$
# The sum for $\psi$ (so-called invervse Fourier transform) would provide the answer for $\psi(x,y)$.
#
#
# But it might be a bit more difficult than that for a grid-point model. If we are trying to enforce:
#
#
# $$
# \lapl \psi(x,y) = \frac{1}{\Delta x^2} \left[ \psi(x-\Delta x, y) -2 \psi(x, y) + \psi(x+\Delta x, y) \right]
# +\frac{1}{\Delta y^2} \left[ \psi(x, y-\Delta y) -2 \psi(x, y) + \psi(x,y+\Delta y) \right]
# $$
#
# $$
# \lapl \psi = \lapl \sum_{k,m} B_{k,m} \sin(kx)\sin(my) = \sum_{k,m} B_{k,m} \lapl \sin(kx)\sin(my)
# $$
# $$
# =\sum_{k,m}
# \left\{ \frac{2}{\Delta x^2}\left[ \cos(k\Delta x) -1 \right]
# +\frac{2}{\Delta y^2}\left[ \cos(m\Delta y) -1 \right] \right\}
# B_{k,m} \sin(kx)\sin(my)
# $$
#
# So
# $$
# B_{k,m} = \frac{1}{ \frac{2}{\Delta x^2}\left[ \cos(k\Delta x) -1 \right]
# +\frac{2}{\Delta y^2}\left[ \cos(m\Delta y) -1 \right] } A_{k,m}
# $$
#
# Note that for a sufficiently small $k$ and $m$, the "discrete" form above simplifies to the "calculus" form
# $$
# B_{k,m} = \frac{-1}{k^2+m^2}A_{k,m}
# $$
# We will explore in the tasks whether the discrete form or the calculus form provides any significant difference in a simulation.
# #### But where did that "discrete" form come from?
#
# I will do simpler a demonstration. If $f(x) = \sin(kx)$, then the calculus second derivative is $f''(x) = -k^2\sin(kx)$.
#
# The finite difference approximation is
#
# $$
# \pd{^2f}{x^2} =
# \frac{1}{\Delta x^2} \left[ f(x-\Delta x) -2 f(x) + f(x+\Delta x) \right]
# $$
# $$
# = \frac{1}{\Delta x^2} \left[ \sin(kx-k\Delta x) -2 \sin(x) + \sin(kx+\Delta kx) \right]
# $$
# $$
# = \frac{1}{\Delta x^2} \left[ \sin(kx)\cos(k\Delta x)- \cos(kx)\sin(k\Delta x) -2 \sin(x)
# + \sin(kx)\cos(\Delta kx) + \cos(kx)\sin(\Delta kx) \right]
# $$
# $$
# = \frac{1}{\Delta x^2} \left[ 2\cos(k\Delta x) -2 \right] \sin(x)
# $$
# <hr/>
# ## Functions:
# In[3]:
# Expands the margins of a matplotlib axis,
# and so prevents arrows on boundaries from being clipped.
def stop_clipping(ax,marg=.02): # default is 2% increase
l,r,b,t = ax.axis()
dx,dy = r-l, t-b
ax.axis([l-marg*dx, r+marg*dx, b-marg*dy, t+marg*dy])
# In[4]:
# dqdt requires a | |
<gh_stars>0
import locale
import logging
from django.db import connection
from django.core.exceptions import FieldError
from django.db.models import Q, Count, Min, Max
import lfs.catalog.models
from lfs.catalog.settings import CONFIGURABLE_PRODUCT
from lfs.catalog.settings import PRODUCT_WITH_VARIANTS
from lfs.catalog.settings import PROPERTY_VALUE_TYPE_FILTER
from lfs.catalog.settings import STANDARD_PRODUCT
from lfs.manufacturer.models import Manufacturer
logger = logging.getLogger(__name__)
# TODO: Add unit test
def get_current_top_category(request, obj):
"""
Returns the current top category of a product.
"""
if obj.__class__.__name__.lower() == "product":
category = obj.get_current_category(request)
else:
category = obj
if category is None:
return category
while category.parent is not None:
category = category.parent
return category
def get_price_filters(category, product_filter, price_filter, manufacturer_filter):
""" Creates price filter based on the min and max prices of the category's
products
"""
# If a price filter is set we return just this.
if price_filter:
return {
"show_reset": True,
"min": locale.format("%.2f", price_filter["min"]),
"max": locale.format("%.2f", price_filter["max"]),
"disabled": False,
}
# Base are the filtered products
products = get_filtered_products_for_category(category, product_filter, price_filter, None, manufacturer_filter)
if not products:
return []
all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True)))
res = all_products.aggregate(min_price=Min('effective_price'), max_price=Max('effective_price'))
pmin, pmax = res['min_price'], res['max_price']
disabled = (pmin and pmax) is None
try:
pmin = locale.format("%.2f", pmin)
except TypeError:
pmin = 0.0
try:
pmax = locale.format("%.2f", pmax)
except TypeError:
pmax = 0.0
return {
"show_reset": False,
"min": pmin,
"max": pmax,
"disabled": disabled,
}
def get_manufacturer_filters(category, product_filter, price_filter, manufacturer_filter):
"""Creates manufacturer filter links based on the manufacturers bound to the products in category
"""
# Base are the filtered products
products = get_filtered_products_for_category(category, product_filter, price_filter, None, None)
if not products:
return []
all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True)))
# And their parents
# product_ids = []
# for product in products:
# if product.parent:
# product_ids.append(product.parent_id)
# else:
# product_ids.append(product.pk)
out = {"show_reset": False}
if manufacturer_filter:
out = {
"show_reset": True
}
else:
manufacturer_filter = []
qs = Manufacturer.objects.filter(products__in=all_products).annotate(products_count=Count('products'))
out['items'] = [{'obj': obj, 'selected': obj.pk in manufacturer_filter} for obj in qs]
return out
def get_product_filters(category, product_filter, price_filter, manufacturer_filter, sorting):
"""Returns the next product filters based on products which are in the given
category and within the result set of the current filters.
"""
mapping_manager = MappingCache()
properties_mapping = get_property_mapping()
options_mapping = get_option_mapping()
property_ids = _get_property_ids()
product_ids = _get_product_ids(category)
set_filters = dict(product_filter)
# Number Fields
number_fields_dict = {}
if property_ids and product_ids:
cursor = connection.cursor()
cursor.execute("""SELECT property_group_id, property_id, min(value_as_float), max(value_as_float)
FROM catalog_productpropertyvalue
WHERE type=%s
AND product_id IN (%s)
AND property_id IN (%s)
GROUP BY property_group_id, property_id""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids))
for row in cursor.fetchall():
property_group_id = row[0]
property_id = row[1]
prop = properties_mapping[property_id]
if prop.is_select_field or prop.is_text_field or not prop.filterable:
continue
# cache property groups for later use
property_group = mapping_manager.get(lfs.catalog.models.PropertyGroup, property_group_id)
key = '{0}_{1}'.format(property_group_id, property_id)
if key in product_filter.get("number-filter", {}):
pmin, pmax = product_filter.get("number-filter").get(key)['value'][0:2]
show_reset = True
else:
pmin, pmax = row[2:4]
show_reset = False
try:
pmin = locale.format("%.2f", float(pmin))
except TypeError:
pmin = 0.0
try:
pmax = locale.format("%.2f", float(pmax))
except TypeError:
pmax = 0.0
property_group_dict = number_fields_dict.setdefault(property_group_id, {'property_group': property_group,
'items': []})
property_group_dict['items'].append({
"id": property_id,
"property_group_id": property_group_id,
"position": prop.position,
"object": prop,
"name": prop.name,
"title": prop.title,
"unit": prop.unit,
"show_reset": show_reset,
"show_quantity": True,
"items": {"min": pmin, "max": pmax}
})
# convert to list ordered by property group name
number_fields = number_fields_dict.values()
number_fields = sorted(number_fields, key=lambda a: a["property_group"].name)
for pg in number_fields:
pg['items'] = sorted(pg['items'], key=lambda a: a['name'])
# Select Fields & Text Fields
select_fields_dict = {}
if property_ids and product_ids:
cursor = connection.cursor()
cursor.execute("""SELECT property_group_id, property_id, value
FROM catalog_productpropertyvalue
WHERE type=%s
AND product_id IN (%s)
AND property_id IN (%s)
GROUP BY property_group_id, property_id, value""" % (PROPERTY_VALUE_TYPE_FILTER, product_ids, property_ids))
for row in cursor.fetchall():
property_group_id = row[0]
property_id = row[1]
value = row[2]
prop = properties_mapping[property_id]
if prop.is_number_field or not prop.filterable:
continue
# use property group cache
property_group = mapping_manager.get(lfs.catalog.models.PropertyGroup, property_group_id)
property_group_dict = select_fields_dict.setdefault(property_group_id, {'property_group': property_group,
'properties': {}})
properties = property_group_dict['properties']
if prop.is_select_field:
name = options_mapping[value].name
position = options_mapping[value].position
else:
name = value
position = 10
if name == value and name == '':
continue
# initialize list of property values
properties.setdefault(property_id, [])
properties[property_id].append({
"id": property_id,
"property_group_id": property_group_id,
"value": value,
"name": name,
"title": prop.title,
"position": position,
"show_quantity": True,
})
# Creates the filters to count the existing products per property option,
# which is used within the filter portlet
new_product_filter = {}
if product_filter.get("number-filter"):
new_product_filter["number-filter"] = product_filter["number-filter"]
for property_group_id, property_group_dict in select_fields_dict.items():
properties = property_group_dict['properties']
for property_id, options in properties.items():
key = '{0}_{1}'.format(property_group_id, property_id)
for option in options:
# The option in question is used at any rate
new_product_filter["select-filter"] = {key: {'property_id': property_id,
'property_group_id': property_group_id,
'value': option["value"]}}
# All checked options of all other properties is also used
for f0, f1 in product_filter.get("select-filter", {}).items():
print f0, f1, key
if f0 != key:
new_product_filter["select-filter"][f0] = f1
# Tests if the option is checked
if (f0 == key) and (option["value"] in f1['value'].split("|")):
option["checked"] = True
option["quantity"] = len(get_filtered_products_for_category(category, new_product_filter, price_filter, None))
# Transform the property groups and properties inside into lists to be able to iterate over these in template
property_groups_list = select_fields_dict.values()
for property_group_dict in property_groups_list:
properties = property_group_dict['properties']
property_group_id = property_group_dict['property_group'].pk
result = []
# Transform the group properties into a list of dicts
for property_id, items in properties.items():
prop = properties_mapping[property_id]
items.sort(lambda a, b: cmp(a["position"], b["position"]))
# Move items with zero quantity to the end of the list
for x in range(0, len(items)):
if items[x]["quantity"] == 0:
items.insert(len(items), items.pop(x))
result.append({
"id": property_id,
"property_group_id": property_group_id,
"position": prop.position,
"unit": prop.unit,
"show_reset": '%s_%s' % (property_group_id, property_id) in set_filters.get('select-filter', {}).keys(),
"name": prop.name,
"title": prop.title,
"items": items,
})
result = sorted(result, key=lambda a: a["position"])
property_group_dict['properties'] = result
property_groups_list = sorted(property_groups_list, key=lambda a: a['property_group'].name)
return {
"select_fields": property_groups_list,
"number_fields": number_fields,
}
def _get_property_ids():
property_ids = lfs.catalog.models.ProductPropertyValue.objects.distinct().values_list('property_id', flat=True)
return ", ".join(map(str, property_ids))
def _get_product_ids(category):
products = category.get_all_products() if category.show_all_products else category.get_products()
if not products:
return ''
all_products = lfs.catalog.models.Product.objects.filter(Q(pk__in=products) | (Q(parent__in=products) & Q(active=True)))
product_ids = all_products.values_list('id', flat=True)
return ", ".join(map(str, product_ids))
# TODO: Implement this as a method of Category
def get_filtered_products_for_category(category, filters, price_filter, sorting, manufacturers_filter=None):
"""Returns products for given categories and current filters sorted by
current sorting.
"""
from lfs.catalog.models import Product, ProductPropertyValue
if filters:
if category.show_all_products:
products = category.get_all_products()
else:
products = category.get_products()
# All variants of category products
all_variants = Product.objects.filter(parent__in=products)
# Generate filter
filters_query = Q()
for filter_dict in filters.get("select-filter", {}).values():
property_group_id = filter_dict['property_group_id']
property_id = filter_dict['property_id']
value = filter_dict['value']
q_options = Q()
for option in value.split("|"):
q_options |= Q(value=option)
q = Q(property_group_id=property_group_id, property_id=property_id) & q_options
filters_query |= q
for key, values_dict in filters.get("number-filter", {}).items():
values = values_dict['value']
property_id = values_dict['property_id']
property_group_id = values_dict['property_group_id']
q = Q(property_group_id=property_group_id, property_id=property_id, value_as_float__range=(values[0], values[1]))
filters_query |= q
# The idea behind SQL query generated below is: If for every filter (property=value) for a product id exists
# a "product property value" the product matches.
#
# Example ValuesListQuerySet built by statements below is:
#
# ProductPropertyValue.objects.filter(Q(property_id=1, value='1') | Q(property_id=2, value='1'),
# product__in=products,
# type=PROPERTY_VALUE_TYPE_FILTER) \
# .values('product_id') \
# .annotate(cnt=Count('id')).filter(cnt=2).values_list('product_id', flat=True)
#
# it evaluates to:
#
# SELECT "catalog_productpropertyvalue"."product_id"
# FROM "catalog_productpropertyvalue"
# WHERE ((
# ("catalog_productpropertyvalue"."value" = 1 AND "catalog_productpropertyvalue"."property_id" = 1 )
# OR
# ("catalog_productpropertyvalue"."value" = 1 AND "catalog_productpropertyvalue"."property_id" = 2 )
# )
# AND "catalog_productpropertyvalue"."type" = 0
# AND "catalog_productpropertyvalue"."product_id" IN (SELECT U0."id"
# FROM "catalog_product" U0
# WHERE U0."name" LIKE %da% ESCAPE '\' ))
# GROUP BY "catalog_productpropertyvalue"."product_id"
# HAVING COUNT("catalog_productpropertyvalue"."id") = 2
length = len(filters.get("select-filter", {}).items()) + len(filters.get("number-filter", {}).items())
# PRODUCTS - get all products with matching filters.
matching_product_ids = ProductPropertyValue.objects.filter(product__in=products,
type=PROPERTY_VALUE_TYPE_FILTER)
if filters_query is not None:
matching_product_ids = matching_product_ids.filter(filters_query)
matching_product_ids = matching_product_ids.values('product_id').annotate(cnt=Count('id')) \
.filter(cnt=length).values_list('product_id', flat=True)
# VARIANTS - get matching variants and then their parents as we're interested in products with variants,
# not variants itself
matching_variant_ids = ProductPropertyValue.objects.filter(product__in=all_variants,
type=PROPERTY_VALUE_TYPE_FILTER)
if filters_query is not None:
matching_variant_ids = matching_variant_ids.filter(filters_query)
matching_variant_ids = matching_variant_ids.values('product_id').annotate(cnt=Count('id')) \
.filter(cnt=length).values_list('product_id', flat=True)
variant_products = Product.objects.filter(pk__in=matching_variant_ids)
# Merge results
products = Product.objects.filter(Q(pk__in=matching_product_ids) |
Q(pk__in=variant_products.values_list('parent_id', flat=True))).distinct()
else:
categories = [category]
if category.show_all_products:
categories.extend(category.get_all_children())
products = lfs.catalog.models.Product.objects.filter(
active=True,
categories__in=categories,
sub_type__in=[STANDARD_PRODUCT, PRODUCT_WITH_VARIANTS, CONFIGURABLE_PRODUCT]).distinct()
# TODO: It might be more effective to move price filters directly into if/else clause | |
APIEndpointUser = None
tempSubscription = classes.Subscription() # parent=targetUser.key leads to error down the stream
tempSubscription.url = url
success, endpoint = tempSubscription.rawSubscription().APIEndpoint()
if not success:
responses["response"] = endpoint
responses[
"explanation"
] = "Error while retrieving subscription’s associated API Endpoint object. See `response` field for details."
if log:
log.response = responses
log.put()
return
# ID by user, but source user doesn't hold the subscription
if sourceUser and not sourceUser.subscriptionByURL(url):
responses["response"] = "unknownSubscriptionForUser"
responses["explanation"] = "Source user does not hold this subscription"
if log:
log.response = responses
log.put()
return
# ID by API endpoint, but subscription's canonical url
# does not point to same API endpoint
elif endpoint and sourceAPIEndpoint and endpoint != sourceAPIEndpoint:
responses["response"] = "invalidSourceAPIEndpoint"
responses[
"explanation"
] = "Subscription’s canonical URL does not point at API Endpoint yielded by `APIKey` field"
if log:
log.response = responses
log.put()
return
elif sourceUser and targetUser and sourceUser == targetUser:
responses["response"] = "sourceAndTargetIdentical"
responses["explanation"] = "Source and target users are identical"
if log:
log.response = responses
log.put()
return
elif APIEndpointUser and targetUser and APIEndpointUser == targetUser:
responses["response"] = "sourceAndTargetIdentical"
responses["explanation"] = "Source and target users are identical"
if log:
log.response = responses
log.put()
return
else:
assert endpoint
assert targetUser
assert sourceUser or sourceAPIEndpoint
oldSubscriptions = targetUser.subscriptions()
oldURLs = [x.url for x in oldSubscriptions if (x is not None and x.key is not None)]
# Save new subscription
if url in oldURLs:
print("ALERT: targetUser already holds subscription")
if url not in oldURLs:
subscription = classes.Subscription(parent=targetUser.key)
subscription.url = url
subscription.type = "invitation"
subscription.confirmed = False
if sourceUser:
subscription.invitedByUserKey = sourceUser.key
elif sourceAPIEndpoint:
subscription.invitedByAPIEndpointKey = sourceAPIEndpoint.key
subscription.putnow()
# TODO
# It's not clear why the below block is necessary.
# However, without it, invitation revocations are working only on second
# attempt, which is the weirdest thing ever. I pulled my hair out for this.
# Before adding this block (taken from downloadUserSubscriptions()),
# revoking an invitation would only work on first attempt after the
# target user has reloaded their user account after the invitation was sent.
success, endpoint = subscription.rawSubscription().APIEndpoint()
endpoint.updateJSON()
if success:
endpointCommand = typeworld.api.EndpointResponse()
if not endpoint.endpointCommand:
success, message = endpoint.updateJSON(force=True)
if success:
endpointCommand.loadDict(endpoint.endpointCommand)
success, message = subscription.sendInvitationEmail()
if not success:
responses["response"] = message
responses[
"explanation"
] = "Error while sending invitation notification email. See `response` field for details."
if log:
log.response = responses
log.put()
return
if targetUser.stripeSubscriptionReceivesService("world.type.professional_user_plan"):
success, message = targetUser.announceChange(g.form._get("sourceAnonymousAppID"))
if not success:
responses["response"] = message
responses[
"explanation"
] = "Error while announcing invitation to target user. See `response` field for details."
if log:
log.response = responses
log.put()
return
if log:
log.response = responses
log.put()
def revokeSubscriptionInvitation(responses):
url = urllib.parse.unquote(g.form._get("subscriptionURL"))
if g.form._get("APIKey"):
sourceAPIEndpoint = classes.APIEndpoint.query(classes.APIEndpoint.APIKey == g.form._get("APIKey")).get(
read_consistency=ndb.STRONG
)
if sourceAPIEndpoint:
log = classes.APILog(parent=sourceAPIEndpoint.key)
log.command = "revokeSubscriptionInvitation"
log.incoming = dict(g.form)
else:
log = None
else:
sourceAPIEndpoint = None
log = None
success, message = typeworld.client.urlIsValid(url)
if not success:
responses["response"] = "invalidSubscriptionURL"
responses["explanation"] = f"The `subscriptionURL` is of an invalid format: {message}"
if log:
log.response = responses
log.put()
return
if g.form._get("APIKey") and not sourceAPIEndpoint:
responses["response"] = "invalidSourceAPIEndpoint"
responses["explanation"] = "The `APIKey` does not yield a valid API Endpoint"
if log:
log.response = responses
log.put()
return
if g.form._get("targetUserEmail"):
targetUser = classes.User.query(classes.User.email == g.form._get("targetUserEmail")).get(
read_consistency=ndb.STRONG
)
else:
targetUser = None
if not targetUser:
responses["response"] = "unknownTargetEmail"
responses["explanation"] = "The `targetUserEmail` does not yield a valid user account"
if log:
log.response = responses
log.put()
return
if g.form._get("sourceUserEmail"):
sourceUser = classes.User.query(classes.User.email == g.form._get("sourceUserEmail")).get(
read_consistency=ndb.STRONG
)
else:
sourceUser = None
if g.form._get("APIKey"):
sourceAPIEndpoint = classes.APIEndpoint.query(classes.APIEndpoint.APIKey == g.form._get("APIKey")).get(
read_consistency=ndb.STRONG
)
else:
sourceAPIEndpoint = None
# sourceUser = User.query(User.email == g.form._get('sourceUserEmail'))
# .get(read_consistency=ndb.STRONG)
# sourceAPIEndpoint = APIEndpoint.query(APIEndpoint.APIKey == g.form._get('APIKey'))
# .get(read_consistency=ndb.STRONG)
if not sourceUser and not sourceAPIEndpoint:
responses["response"] = "invalidSource"
responses["explanation"] = "A valid source could not be identified either by `sourceUserEmail` or by `APIKey`"
if log:
log.response = responses
log.put()
return
subscription = targetUser.subscriptionByURL(url, subscriptions=targetUser.subscriptionInvitations())
# TODO:
# not sure if this is good.
# Should iviters hold the subscription or not when they revoke an invitation?
# Subsequently, will invitees need to be revoked when a user deletes a subscription?
# ID by user, but source user doesn't hold the subscription
# elif sourceUser.exists and not sourceUser.subscriptionByURL(url):
# responses = {'response': 'invalidSource'}
# if subscription:
# ID by API endpoint, but subscription's canonical url does
# not point to same API endpoint
if subscription:
success, endpoint = subscription.rawSubscription().APIEndpoint()
if sourceAPIEndpoint and subscription and endpoint and (endpoint != sourceAPIEndpoint):
responses["response"] = "invalidSourceAPIEndpoint"
responses[
"explanation"
] = "Subscription’s canonical URL does not point at API Endpoint yielded by `APIKey` field"
if log:
log.response = responses
log.put()
return
if not subscription:
responses["response"] = "unknownSubscription"
responses["explanation"] = "Subscription does not exist"
if log:
log.response = responses
log.put()
return
targetUserHoldsSubscription = subscription.key.parent() == targetUser.key
invitedBySourceUser = sourceUser and subscription.invitedByUserKey == sourceUser.key
invitedByAPIEndpoint = sourceAPIEndpoint and subscription.invitedByAPIEndpointKey == sourceAPIEndpoint.key
if not targetUserHoldsSubscription:
responses["response"] = "unknownSubscription"
responses["explanation"] = "Target user does not hold this subscription"
if log:
log.response = responses
log.put()
return
if not invitedBySourceUser and not invitedByAPIEndpoint:
responses["response"] = "unknownSubscription"
responses["explanation"] = "Sender of invitation is unclear"
if log:
log.response = responses
log.put()
return
if sourceUser:
subscription.invitationRevokedByUserKey = sourceUser.key
elif sourceAPIEndpoint:
subscription.invitationRevokedByAPIEndpointKey = sourceAPIEndpoint.key
success, message = subscription.sendRevokedEmail()
if not success:
responses["response"] = message
responses["explanation"] = "Error while sending invitation revocation email. See `response` field for details."
if log:
log.response = responses
log.put()
return
subscription.key.delete()
if targetUser.stripeSubscriptionReceivesService("world.type.professional_user_plan"):
success, message = targetUser.announceChange(g.form._get("sourceAnonymousAppID"))
if not success:
responses["response"] = message
responses[
"explanation"
] = "Error while announcing invitation revocation to target user. See `response` field for details."
if log:
log.response = responses
log.put()
return
if log:
log.response = responses
log.put()
def acceptInvitations(responses):
if not g.form._get("anonymousUserID"):
responses["response"] = "userUnknown"
return
k = ndb.Key(urlsafe=g.form._get("anonymousUserID").encode())
if not k.id() or not k.get(read_consistency=ndb.STRONG):
responses["response"] = "userUnknown"
return
user = k.get(read_consistency=ndb.STRONG)
if g.form._get("secretKey") != user.secretKey:
responses["response"] = "secretKeyInvalid"
return
appInstance = classes.AppInstance.get_by_id(
g.form._get("anonymousAppID"), parent=user.key, read_consistency=ndb.STRONG
)
if not appInstance.key.id():
responses["response"] = "appInstanceUnknown"
return
else:
# Add new subscriptions
keys = [ndb.Key(urlsafe=x.encode()) for x in g.form._get("subscriptionIDs").split(",") if x and x != "empty"]
for subscription in user.subscriptions():
if subscription.key in keys:
subscription.confirmed = True
subscription.invitationAcceptedTime = helpers.now()
subscription.putnow()
success, message = subscription.sendAcceptedEmail()
if not success:
responses["response"] = "sendAcceptedEmail(): %s" % message
return
downloadUserSubscriptions(responses)
if user.stripeSubscriptionReceivesService("world.type.professional_user_plan"):
success, message = user.announceChange(g.form._get("sourceAnonymousAppID"))
if not success:
responses["response"] = message
return
def declineInvitations(responses):
if not g.form._get("anonymousUserID"):
responses["response"] = "userUnknown"
return
k = ndb.Key(urlsafe=g.form._get("anonymousUserID").encode())
if not k.id():
responses["response"] = "userUnknown"
return
user = k.get(read_consistency=ndb.STRONG)
if g.form._get("secretKey") != user.secretKey:
responses["response"] = "secretKeyInvalid"
appInstance = classes.AppInstance.get_by_id(
g.form._get("anonymousAppID"), parent=user.key, read_consistency=ndb.STRONG
)
if not appInstance.key.id():
responses["response"] = "appInstanceUnknown"
return
else:
# Delete subscriptions
keys = [ndb.Key(urlsafe=x.encode()) for x in g.form._get("subscriptionIDs").split(",") if x and x != "empty"]
for subscription in user.subscriptions():
if subscription.key in keys:
subscription.key.delete()
success, message = subscription.sendDeletedEmail()
if not success:
responses["response"] = "sendDeletedEmail(): %s" % message
return
downloadUserSubscriptions(responses)
if user.stripeSubscriptionReceivesService("world.type.professional_user_plan"):
success, message = user.announceChange(g.form._get("sourceAnonymousAppID"))
if not success:
responses["response"] = message
return
def userAppInstances(responses):
if not g.form._get("anonymousUserID"):
responses["response"] = "userUnknown"
return
k = ndb.Key(urlsafe=g.form._get("anonymousUserID").encode())
if not k.id():
responses["response"] = "userUnknown"
return
user = k.get(read_consistency=ndb.STRONG)
if g.form._get("secretKey") != user.secretKey:
responses["response"] = "secretKeyInvalid"
return
appInstance = classes.AppInstance.get_by_id(
g.form._get("anonymousAppID"), parent=user.key, read_consistency=ndb.STRONG
)
if not appInstance.key.id():
responses["response"] = "appInstanceUnknown"
return
if appInstance.revoked:
responses["response"] = "appInstanceRevoked"
return
responses["appInstances"] = []
instances = user.appInstances()
for instance in instances:
i = {}
i["anonymousAppID"] = instance.key.id()
for key in (
"machineModelIdentifier",
"machineHumanReadableName",
"machineSpecsDescription",
"machineOSVersion",
"machineNodeName",
"revoked",
):
i[key] = getattr(instance, key)
for key in ("lastUsed", "revokedTime"):
if getattr(instance, key):
i[key] = int(getattr(instance, key).timestamp())
else:
i[key] = ""
i["VM"] = instance.isVM()
i["image"] = instance.machineImage()
responses["appInstances"].append(i)
def revokeAppInstance(responses):
if not g.form._get("anonymousUserID"):
responses["response"] = "userUnknown"
return
k = ndb.Key(urlsafe=g.form._get("anonymousUserID").encode())
if not k.id() or not k.get(read_consistency=ndb.STRONG):
responses["response"] = "userUnknown"
return
user = k.get(read_consistency=ndb.STRONG)
if g.form._get("secretKey") != user.secretKey:
responses["response"] = "secretKeyInvalid"
appInstance = classes.AppInstance.get_by_id(
g.form._get("anonymousAppID"), parent=user.key, read_consistency=ndb.STRONG
)
if not appInstance.key.id():
responses["response"] = "appInstanceUnknown"
return
success, message = appInstance.revoke("revoked")
if success:
success, message = user.announceChange(g.form._get("sourceAnonymousAppID"))
if not success:
responses["response"] = "Announce | |
<filename>atoMEC/check_inputs.py
"""
The check_inputs module checks the validity of all user-defined inputs.
If inputs are invalid, InputError exceptions are raised. It also assigns
appropriate default inputs where none are supplied.
Classes
-------
* :class:`Atom` : Check the inputs from the :class:`atoMEC.Atom` object.
* :class:`ISModel` : Check the inputs from the :obj:`atoMEC.models.ISModel` class.
* :class:`EnergyCalcs` : Check the inputs from the\
:func:`atoMEC.models.ISModel.CalcEnergy` function.
* :class:`InputError` : Exit atoMEC and print relevant input error message.
* :class:`InputWarning` : Warn if inputs are considered outside of typical ranges.
"""
# standard python packages
import sys
# external packages
from mendeleev import element
import sqlalchemy.orm.exc as ele_chk
import numpy as np
from math import pi
# internal packages
from . import unitconv
from . import xc
from . import config
# define some custom types
intc = (int, np.integer) # unfifying type for integers
class Atom:
"""Check the inputs from the Atom class."""
def check_species(self, species):
"""
Check the species is a string and corresponds to an actual element.
Parameters
----------
species : str
chemical symbol for atomic species
Returns
-------
None
Raises
------
InputError.species_error
Chemical symbol is not valid
"""
if not isinstance(species, str):
raise InputError.species_error("element is not a string")
else:
try:
return element(species)
except ele_chk.NoResultFound:
raise InputError.species_error("invalid element")
def check_units_temp(self, units_temp):
"""
Check the units of temperature are accepted.
Parameters
----------
units_temp : str
units of temperature
Returns
-------
units_temp : str
units of temperature (if valid input) converted to lowercase
Raises
------
InputError.temp_error
unit of temperature is not accepted, i.e. not one of "ha", "ev" or "k"
"""
units_accepted = ["ha", "ev", "k"]
if units_temp.lower() not in units_accepted:
raise InputError.temp_error("units of temperature are not recognised")
return units_temp.lower()
def check_temp(self, temp, units_temp):
"""
Check the temperature is a float within a sensible range.
Parameters
----------
temp : float
temperature (in any accepted units)
units_temp : str
units of temperature
Returns
-------
temp : float
temperature in units of Hartree
Raises
------
InputError.temp_error
input temperature is not a positive number
InputWarning.temp_warning
input temperature is not inside a well-tested range
"""
if not isinstance(temp, (float, intc)):
raise InputError.temp_error("temperature is not a number")
else:
# convert internal temperature to hartree
if units_temp.lower() == "ev":
temp = unitconv.ev_to_ha * temp
elif units_temp.lower() == "k":
temp = unitconv.K_to_ha * temp
# check if temperature is within some reasonable limits
if temp < 0:
raise InputError.temp_error("temperature is negative")
if temp < 0.01:
print(InputWarning.temp_warning("low"))
return temp
elif temp > 3.5:
print(InputWarning.temp_warning("high"))
return temp
else:
return temp
def check_charge(self, charge):
"""
Check the net charge is an integer.
Parameters
----------
charge : int
the net charge
Returns
-------
charge : int
the net charge (if input valid)
Raises
------
InputError.charge_error
if charge is not an integer
"""
if not isinstance(charge, intc):
raise InputError.charge_error()
else:
return charge
def check_units_radius(self, units_radius):
"""
Check the units of radius are accepted.
Parameters
----------
units_radius : str
units of radius
Returns
-------
units_radius : str
units of radius (if accepted) converted to lowercase
Raises
------
InputError.density_error
if units of radius are not one of "bohr", "angstrom" or "ang"
"""
radius_units_accepted = ["bohr", "angstrom", "ang"]
if units_radius.lower() not in radius_units_accepted:
raise InputError.density_error("Radius units not recognised")
units_radius = units_radius.lower()
return units_radius
def check_units_density(self, units_density):
"""
Check the units of density are accepted.
Parameters
----------
units_density : str
units of density
Returns
-------
units_density : str
units of density (if accepted) converted to lowercase
Raises
------
InputError.density_error
if units of density are not one of "g/cm3" or "gcm3"
"""
density_units_accepted = ["g/cm3", "gcm3"]
if units_density.lower() not in density_units_accepted:
raise InputError.density_error("Density units not recognised")
return units_density.lower()
def check_radius(self, radius, units_radius):
"""
Check the Wigner-Seitz radius is valid and reasonable.
Parameters
----------
radius : float or int
Wigner-Seitz radius (in input units)
units_radius : str
input units of radius
Returns
-------
radius : float
Wigner-Seitz radius in Hartree units (Bohr)
Raises
------
InputError.density_error
if the radius is not a positive number > 0.1
"""
if not isinstance(radius, (float, intc)):
raise InputError.density_error("Radius is not a number")
else:
if units_radius == "angstrom" or units_radius == "ang":
radius = unitconv.angstrom_to_bohr * radius
if radius < 0.1:
raise InputError.density_error(
"Radius must be a positive number greater than 0.1"
)
return radius
def check_density(self, density):
r"""
Check the mass density is valid and reasonable.
Parameters
----------
density : float or int
mass density (in :math:`\mathrm{g\ cm}^{-3}`)
Returns
-------
density : float
mass density (in :math:`\mathrm{g\ cm}^{-3}`) if input accepted
Raises
------
InputError.density_error
if the density is not a positive number <= 100
"""
if not isinstance(density, (float, intc)):
raise InputError.density_error("Density is not a number")
else:
if density > 100 or density < 0:
raise InputError.density_error(
"Density must be a positive number less than 100"
)
return density
def check_rad_dens_init(self, atom, radius, density, units_radius, units_density):
"""
Check that at least one of radius or density is specified and reasonable.
In case both are specified, check they are compatible.
Parameters
----------
Atom : atoMEC.Atom
the main Atom object
radius : float or int
Wigner-Seitz radius
density : float or int
mass density
units_radius : str
units of radius
units_density : str
units of density
Returns
-------
radius, density : tuple of floats
the Wigner-Seitz radius and mass density if inputs are valid
Raises
------
InputError.density_error
if neither density nor radius is not given, or if one is invalid,
or if both are given and they are incompatible
"""
if not isinstance(density, (float, intc)):
raise InputError.density_error("Density is not a number")
if not isinstance(radius, (float, intc)):
raise InputError.density_error("Radius is not a number")
else:
if units_radius == "angstrom" or units_radius == "ang":
radius = unitconv.angstrom_to_bohr * radius
if density == -1 and radius != -1:
if radius < 0.1:
raise InputError.density_error(
"Radius must be a positive number greater than 0.1"
)
else:
density = self.radius_to_dens(atom, radius)
elif radius == -1 and density != -1:
if density > 100 or density < 0:
raise InputError.density_error(
"Density must be a positive number less than 100"
)
else:
radius = self.dens_to_radius(atom, density)
elif radius != -1 and density != -1:
density_test = self.radius_to_dens(atom, radius)
if abs((density_test - density) / density) > 5e-2:
raise InputError.density_error(
"Both radius and density are specified but they are not"
" compatible"
)
else:
density = density_test
elif radius == -1 and density == -1:
raise InputError.density_error(
"One of radius or density must be specified"
)
return radius, density
def radius_to_dens(self, atom, radius):
"""
Convert the Voronoi sphere radius to a mass density.
Parameters
----------
atom : atoMEC.Atom
the main Atom object
radius : float
the Wigner-Seitz radius
Returns
-------
density : float
the mass density
"""
# radius in cm
rad_cm = radius / unitconv.cm_to_bohr
# volume in cm
vol_cm = (4.0 * pi * rad_cm ** 3) / 3.0
# atomic mass in g
mass_g = config.mp_g * atom.at_mass
# density in g cm^-3
density = mass_g / vol_cm
return density
def dens_to_radius(self, atom, density):
"""
Convert the mass density to a Wigner-Seitz radius.
Parameters
----------
atom : atoMEC.Atom
the main Atom object
density : float
the mass density
Returns
-------
radius : float
the Wigner-Seitz radius
"""
# compute atomic mass in g
mass_g = config.mp_g * atom.at_mass
# compute volume and radius in cm^3/cm
vol_cm = mass_g / density
rad_cm = (3.0 * vol_cm / (4.0 * pi)) ** (1.0 / 3.0)
# convert to a.u.
radius = rad_cm * unitconv.cm_to_bohr
return radius
class ISModel:
"""Check the inputs for the IS model class."""
def check_xc(xc_func, xc_type):
"""
Check the exchange and correlation functionals are accepted.
Parameters
----------
xc_func : str or int
the libxc name or id of the x/c functional
xc_type : str
type i.e. "exchange" or "correlation"
Returns
-------
xc_func : str
the libxc name of the x/c functional (if valid input)
Raises
------
InputError.xc_error
if xc functional is not a valid libxc input or is not supported
by the current version of atoMEC
"""
# supported families of libxc functional by name
names_supp = ["lda"]
# supported families of | |
is None:
if row._table.is_root:
operations.append({"op": "delete",
"table": row._table.name,
"where": _where_uuid_equals(row.uuid)})
any_updates = True
else:
# Let ovsdb-server decide whether to really delete it.
pass
elif row._changes:
op = {"table": row._table.name}
if row._data is None:
op["op"] = "insert"
op["uuid-name"] = _uuid_name_from_uuid(row.uuid)
any_updates = True
op_index = len(operations) - 1
self._inserted_rows[row.uuid] = _InsertedRow(op_index)
else:
op["op"] = "update"
op["where"] = _where_uuid_equals(row.uuid)
row_json = {}
op["row"] = row_json
for column_name, datum in six.iteritems(row._changes):
if row._data is not None or not datum.is_default():
row_json[column_name] = (
self._substitute_uuids(datum.to_json()))
# If anything really changed, consider it an update.
# We can't suppress not-really-changed values earlier
# or transactions would become nonatomic (see the big
# comment inside Transaction._write()).
if (not any_updates and row._data is not None and
row._data[column_name] != datum):
any_updates = True
if row._data is None or row_json:
operations.append(op)
if row._mutations:
addop = False
op = {"table": row._table.name}
op["op"] = "mutate"
if row._data is None:
# New row
op["where"] = self._substitute_uuids(
_where_uuid_equals(row.uuid))
else:
# Existing row
op["where"] = _where_uuid_equals(row.uuid)
op["mutations"] = []
if '_removes' in row._mutations.keys():
for col, dat in six.iteritems(row._mutations['_removes']):
column = row._table.columns[col]
if column.type.is_map():
opdat = ["set"]
opdat.append(list(dat))
else:
opdat = ["set"]
inner_opdat = []
for ele in dat:
try:
datum = data.Datum.from_python(column.type,
ele, _row_to_uuid)
except error.Error:
return
inner_opdat.append(
self._substitute_uuids(datum.to_json()))
opdat.append(inner_opdat)
mutation = [col, "delete", opdat]
op["mutations"].append(mutation)
addop = True
if '_inserts' in row._mutations.keys():
for col, val in six.iteritems(row._mutations['_inserts']):
column = row._table.columns[col]
if column.type.is_map():
opdat = ["map"]
datum = data.Datum.from_python(column.type, val,
_row_to_uuid)
opdat.append(datum.as_list())
else:
opdat = ["set"]
inner_opdat = []
for ele in val:
try:
datum = data.Datum.from_python(column.type,
ele, _row_to_uuid)
except error.Error:
return
inner_opdat.append(
self._substitute_uuids(datum.to_json()))
opdat.append(inner_opdat)
mutation = [col, "insert", opdat]
op["mutations"].append(mutation)
addop = True
if addop:
operations.append(op)
any_updates = True
if self._fetch_requests:
for fetch in self._fetch_requests:
fetch["index"] = len(operations) - 1
operations.append({"op": "select",
"table": fetch["row"]._table.name,
"where": self._substitute_uuids(
_where_uuid_equals(fetch["row"].uuid)),
"columns": [fetch["column_name"]]})
any_updates = True
# Add increment.
if self._inc_row and any_updates:
self._inc_index = len(operations) - 1
operations.append({"op": "mutate",
"table": self._inc_row._table.name,
"where": self._substitute_uuids(
_where_uuid_equals(self._inc_row.uuid)),
"mutations": [[self._inc_column, "+=", 1]]})
operations.append({"op": "select",
"table": self._inc_row._table.name,
"where": self._substitute_uuids(
_where_uuid_equals(self._inc_row.uuid)),
"columns": [self._inc_column]})
# Add comment.
if self._comments:
operations.append({"op": "comment",
"comment": "\n".join(self._comments)})
# Dry run?
if self.dry_run:
operations.append({"op": "abort"})
if not any_updates:
self._status = Transaction.UNCHANGED
else:
msg = ovs.jsonrpc.Message.create_request("transact", operations)
self._request_id = msg.id
if not self.idl._session.send(msg):
self.idl._outstanding_txns[self._request_id] = self
self._status = Transaction.INCOMPLETE
else:
self._status = Transaction.TRY_AGAIN
self.__disassemble()
return self._status
def commit_block(self):
"""Attempts to commit this transaction, blocking until the commit
either succeeds or fails. Returns the final commit status, which may
be any Transaction.* value other than Transaction.INCOMPLETE.
This function calls Idl.run() on this transaction'ss IDL, so it may
cause Idl.change_seqno to change."""
while True:
status = self.commit()
if status != Transaction.INCOMPLETE:
return status
self.idl.run()
poller = ovs.poller.Poller()
self.idl.wait(poller)
self.wait(poller)
poller.block()
def get_increment_new_value(self):
"""Returns the final (incremented) value of the column in this
transaction that was set to be incremented by Row.increment. This
transaction must have committed successfully."""
assert self._status == Transaction.SUCCESS
return self._inc_new_value
def abort(self):
"""Aborts this transaction. If Transaction.commit() has already been
called then the transaction might get committed anyhow."""
self.__disassemble()
if self._status in (Transaction.UNCOMMITTED,
Transaction.INCOMPLETE):
self._status = Transaction.ABORTED
def get_error(self):
"""Returns a string representing this transaction's current status,
suitable for use in log messages."""
if self._status != Transaction.ERROR:
return Transaction.status_to_string(self._status)
elif self._error:
return self._error
else:
return "no error details available"
def __set_error_json(self, json):
if self._error is None:
self._error = ovs.json.to_string(json)
def get_insert_uuid(self, uuid):
"""Finds and returns the permanent UUID that the database assigned to a
newly inserted row, given the UUID that Transaction.insert() assigned
locally to that row.
Returns None if 'uuid' is not a UUID assigned by Transaction.insert()
or if it was assigned by that function and then deleted by Row.delete()
within the same transaction. (Rows that are inserted and then deleted
within a single transaction are never sent to the database server, so
it never assigns them a permanent UUID.)
This transaction must have completed successfully."""
assert self._status in (Transaction.SUCCESS,
Transaction.UNCHANGED)
inserted_row = self._inserted_rows.get(uuid)
if inserted_row:
return inserted_row.real
return None
def _increment(self, row, column):
assert not self._inc_row
self._inc_row = row
self._inc_column = column
def _fetch(self, row, column_name):
self._fetch_requests.append({"row": row, "column_name": column_name})
def _write(self, row, column, datum):
assert row._changes is not None
assert row._mutations is not None
txn = row._idl.txn
# If this is a write-only column and the datum being written is the
# same as the one already there, just skip the update entirely. This
# is worth optimizing because we have a lot of columns that get
# periodically refreshed into the database but don't actually change
# that often.
#
# We don't do this for read/write columns because that would break
# atomicity of transactions--some other client might have written a
# different value in that column since we read it. (But if a whole
# transaction only does writes of existing values, without making any
# real changes, we will drop the whole transaction later in
# ovsdb_idl_txn_commit().)
if (not column.alert and row._data and
row._data.get(column.name) == datum):
new_value = row._changes.get(column.name)
if new_value is None or new_value == datum:
return
txn._txn_rows[row.uuid] = row
if '_inserts' in row._mutations:
row._mutations['_inserts'].pop(column.name, None)
if '_removes' in row._mutations:
row._mutations['_removes'].pop(column.name, None)
row._changes[column.name] = datum.copy()
def insert(self, table, new_uuid=None):
"""Inserts and returns a new row in 'table', which must be one of the
ovs.db.schema.TableSchema objects in the Idl's 'tables' dict.
The new row is assigned a provisional UUID. If 'uuid' is None then one
is randomly generated; otherwise 'uuid' should specify a randomly
generated uuid.UUID not otherwise in use. ovsdb-server will assign a
different UUID when 'txn' is committed, but the IDL will replace any
uses of the provisional UUID in the data to be to be committed by the
UUID assigned by ovsdb-server."""
assert self._status == Transaction.UNCOMMITTED
if new_uuid is None:
new_uuid = uuid.uuid4()
row = Row(self.idl, table, new_uuid, None)
table.rows[row.uuid] = row
self._txn_rows[row.uuid] = row
return row
def _process_reply(self, msg):
if msg.type == ovs.jsonrpc.Message.T_ERROR:
self._status = Transaction.ERROR
elif not isinstance(msg.result, (list, tuple)):
# XXX rate-limit
vlog.warn('reply to "transact" is not JSON array')
else:
hard_errors = False
soft_errors = False
lock_errors = False
ops = msg.result
for op in ops:
if op is None:
# This isn't an error in itself but indicates that some
# prior operation failed, so make sure that we know about
# it.
soft_errors = True
elif isinstance(op, dict):
error = op.get("error")
if error is not None:
if error == "timed out":
soft_errors = True
elif error == "not owner":
lock_errors = True
elif error == "aborted":
pass
else:
hard_errors = True
self.__set_error_json(op)
else:
hard_errors = True
self.__set_error_json(op)
# XXX rate-limit
vlog.warn("operation reply is not JSON null or object")
if not soft_errors and not hard_errors and not lock_errors:
if self._inc_row and not self.__process_inc_reply(ops):
hard_errors = True
if self._fetch_requests:
if self.__process_fetch_reply(ops):
self.idl.change_seqno += 1
else:
hard_errors = True
for insert in six.itervalues(self._inserted_rows):
if not self.__process_insert_reply(insert, ops):
hard_errors = True
if hard_errors:
self._status = Transaction.ERROR
elif lock_errors:
self._status = Transaction.NOT_LOCKED
elif soft_errors:
self._status = Transaction.TRY_AGAIN
else:
self._status = Transaction.SUCCESS
@staticmethod
def __check_json_type(json, types, name):
if not json:
# XXX rate-limit
vlog.warn("%s is missing" % name)
return False
elif not isinstance(json, tuple(types)):
# XXX rate-limit
vlog.warn("%s has unexpected type %s" % (name, type(json)))
return False
else:
return True
def __process_fetch_reply(self, ops):
update = False
for fetch_request in self._fetch_requests:
row = fetch_request["row"]
column_name = fetch_request["column_name"]
index = fetch_request["index"]
table = row._table
select = ops[index]
fetched_rows = select.get("rows")
if not Transaction.__check_json_type(fetched_rows, (list, tuple),
'"select" reply "rows"'):
return False
if len(fetched_rows) != 1:
# XXX rate-limit
vlog.warn('"select" reply "rows" has %d elements '
'instead of 1' % len(fetched_rows))
continue
fetched_row = fetched_rows[0]
if not Transaction.__check_json_type(fetched_row, (dict,),
'"select" reply row'):
continue
column = table.columns.get(column_name)
datum_json = fetched_row.get(column_name)
datum = data.Datum.from_json(column.type, datum_json)
row._data[column_name] = datum
update = True
return update
def __process_inc_reply(self, ops):
if self._inc_index + 2 > len(ops):
# XXX rate-limit
vlog.warn("reply does not contain enough operations for "
"increment (has | |
# spese/views.py
''' app spese views:
- toggle(id), change amount sign
- add, create new expense
- transfer_funds, create new transfer fund
- index, list all expenses summary
- detail(id), show expense detail
- change(id), change expense
- delete(id), delete expense
- balance, reports by accounts, tags, ...
- repo_accounts
- repo_work_cost_types
- repo_tags
'''
#{ module history (notepad users: cursor prev.curly bracket, then ctrl+alt+b to highlight, alt+h to hide)
# [email protected] filters.py: + "not_for_work" & "description" filter fields
# +export_csv: export current expenses list to csv file
# [email protected] RepoItem: balance calculated in object instantiation instead
# of a parameter passing
# repo_accounts, repo_work_cost_types, repo_tags: commented out @login_required
# becouse these are module internal functions
# [email protected] index: chg passing request to django-filter to select
# the applicable account
# balance: + account & date filters by django-filter
# [email protected] index: + account & date filters by django-filter
# [email protected] toggle: + check request user against expense user
# detail: + check request user against expense user
# change: + check request user against expense user
# delete: + check request user against expense user
# [email protected] using transferFund.set, expense.get_companion
# [email protected] using transaction.atomic
# better reporting (splitted external flow from transfer funds)
# [email protected] changing entity name: from source to account
# [email protected] adding log facility in primary program points
# bug 0001: got a pdb error in transfer funds from production site.
# after introducing the log facility it doesn't appair again.
# it seems as I forgot a pdb.set_trace on. but it isn't in repository,
# [email protected] added a project css:
# - servizi/static/css/servizi.css: project specific css attributes
# reduced h1 height in servizi.css
# added a page_identification block in base.html feeded from apps
# [email protected] fighting to deploy the project to my production server
# releasing python+virtualenv+django in a centos 6+apache framework
# via mod_wsgi was harder than I aspected. However finally it worked!
# [email protected] moving some css from project to app:
# - spese/static/spese/spese.css; spese specific css attributes
# - /servizi/template/base.html: added {% block stylesheet %}
# - spese/template/spese/*.html: added {% load staticfiles %} and stylesheet block
# right justified the amount column in spese/template/spese/index.html
# adopted the css skeleton boilerplate (http://getskeleton.com/)
# modified spese/template/spese/*.html and /servizi/template/css/*.hmtl
# visual adjustments of User Interface
# [email protected] moving login from app to project:
# - /servizi/templates/login.html; login template
# - /servizi/servizi/forms.py; contains class LoginForm
# - login_url="/login/"; project url (...="login/" would kept hostname/spes/login)
#}
import csv
import pdb # python debugging
import sys
import logging
log = logging.getLogger(__name__) # log.debug, info, warning, error, critical("Hey there it works!!")
# django managing requests
from django.shortcuts import get_object_or_404, render
from django.http import Http404
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
# django forms
from django.forms import modelform_factory, HiddenInput
from django.utils import timezone
# from django.core.exceptions import ValidationError
# from django.utils.datastructures import MultiValueDictKeyError
from django.contrib import messages
# django authorization
from django.contrib.auth.decorators import login_required
# django models
from django.db import transaction
from django.db.models import Sum
from django.db.models import Q
# spese & taggit
from .models import Expense, Account, WCType, TransferFund
from .forms import ExpenseForm, TransferFundsForm
from .filters import ExpenseFilter
from .utils import get_accounts
from taggit.models import Tag
@login_required(login_url="/login/")
def toggle(request, expense_id):
''' change amount sign '''
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('spese:index'))
error = False
try:
expense.amount = -expense.amount
expense.save()
msg = "success toggling expense id {} for user {}".format(expense.id, expense.user.username)
except:
msg = 'Error <{}> while toggling expense {}'.format(sys.exc_info()[0], expense_id)
error = True
log.info(msg) if not error else log.error(msg)
messages.success(request, msg) if not error else messages.error(request, msg)
return HttpResponseRedirect(reverse('spese:detail', args=(expense.id,)))
@login_required(login_url="/login/")
def add(request):
''' create new expense '''
page_identification = 'Spese: new expense'
accounts = Account.objects.filter(users=request.user)
account_selected = None
tags_selected = []
if request.method == "POST":
form = ExpenseForm(request.POST)
account_selected = int(request.POST['account'])
tags_selected = request.POST.getlist('choice') # 'getlist' gets [] in case of no choices
if form.is_valid():
try:
with transaction.atomic():
expense = form.save(commit = False) # creates new expense not saved in DB
expense.user = request.user
expense.account = Account.objects.get(id=account_selected)
expense.save() # save new expense to DB (this set expense.pk)
# managing tags; this is a different step because it's a m:n relationship
tags = request.POST.getlist('choice') ## gets [] in case of no choices
expense.tags.set(*tags, clear=True) # set tags (clearing the old ones)
expense.save()
msg = 'success creating expense id {} for user {}'.format(expense.id, expense.user.username)
log.info(msg)
messages.success(request, msg)
except:
# error: Redisplay the expense change form
msg = 'Error <{}> while trying to create expense'.format(sys.exc_info()[0])
log.error(msg)
messages.error(request, msg)
else:
if 'save' in request.POST.keys():
# SAVE button: jump to show detail of new expense
return HttpResponseRedirect(reverse('spese:detail', args=(expense.id,)))
# SAVE & CONTINUE button: display again the form, with fields already loaded
else:
# first display of the form
form = ExpenseForm(initial={
'description': 'expense description',
'date': timezone.now(),
})
alltags = Tag.objects.all()
return render(request, 'spese/add.html', { 'page_identification': page_identification,
'operation': 'new',
'form': form,
'accounts': accounts,
'account_selected': account_selected,
'alltags': alltags,
'tags_selected': tags_selected,
}
)
@login_required(login_url="/login/")
def transfer_funds(request):
''' add transfer funds '''
page_identification = 'Spese: new transfer funds'
account_choices = get_accounts(request.user)
if not account_choices or len(account_choices) < 2:
msg = 'User {} has too few accounts to do a transfer funds'.format(request.user.username)
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('spese:index'))
account_selected = None
if request.method == "POST":
form = TransferFundsForm(request.POST, custom_choices=account_choices)
if form.is_valid():
try:
### TRACE ### pdb.set_trace()
tf_source_id = int(form['tf_source'].value())
tf_destination_id = int(form['tf_destination'].value())
with transaction.atomic():
expense = form.save(commit=False) # new expense: source account, no id
expense.user = request.user
expense.account = Account.objects.get(id=tf_source_id)
expense.save() # save to DB (got record id)
tfr = TransferFund()
tfr.set(expense, Account.objects.get(id=tf_destination_id))
msg = '{}: success transferring {} funds from {} to {}'.format( expense.user.username,
expense.amount,
tfr.source.account.name,
tfr.destination.account.name
)
log.info(msg)
messages.success(request, msg)
except:
msg = 'Error <{}> while trying to transfer funds'.format(sys.exc_info()[0])
log.error(msg)
messages.error(request, msg)
else:
# success: back to index
return HttpResponseRedirect(reverse('spese:index'))
# error: Redisplay the expense change form
else:
# first load of form
form = TransferFundsForm(custom_choices=account_choices,
initial={
'description': 'transferring funds',
'date': timezone.now(),
})
form.fields['work_cost_type'].widget = HiddenInput() # transfer funds haven't work cost type
return render(request, 'spese/transfer_funds.html', {'page_identification': page_identification,
'operation': 'add',
'form': form,
}
)
@login_required(login_url='/login/')
def index(request):
page_identification = 'Spese'
### TRACE ### pdb.set_trace()
e_l = ExpenseFilter(request.GET, request=request, queryset=Expense.objects.filter(user=request.user)) # expenses_list
request.session['filter_data'] = e_l.data
return render(request, 'spese/index.html', { 'page_identification': page_identification,
'expenses_list': e_l,
}
)
@login_required(login_url="/login/")
def export_csv(request):
# Create the HttpResponse object with the appropriate CSV header.
filter_data = request.session.get('filter_data')
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="expense_list.csv"'
e_l = ExpenseFilter(filter_data, request=request, queryset=Expense.objects.filter(user=request.user)) # expenses_list
# pdb.set_trace()
writer = csv.writer(response)
for row in e_l.qs:
writer.writerow([row.pk, row.account.name, row.work_cost_type.name if row.work_cost_type else '', row.date, row.description, row.amount])
return response
@login_required(login_url="login/")
def detail(request, expense_id):
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('spese:index'))
page_identification = 'Spese: show expense detail'
other_expense = expense.get_companion() # if expense is a transfer fund, this gets its companion
# if not expense.user == request.user:
# msg = "expense id {}: wrong user (it's {})".format(expense.id, expense.user.username)
# log.error(msg)
# messages.error(request, msg)
# return HttpResponseRedirect(reverse('spese:index'))
if other_expense:
###TRACE ### pdb.set_trace()
other_url = reverse('spese:detail', kwargs={'expense_id': str(other_expense.pk)})
messages.info( request,
'this is a transfer fund bound to <a href="{}">this one</a> in "{}" account'.format(other_url, other_expense.account.name),
extra_tags='safe'
)
return render(request, 'spese/detail.html', {'page_identification': page_identification,
'operation': 'show',
'expense': expense,
}
)
@login_required(login_url="/login/")
def change(request, expense_id):
''' be aware: this one changes every kind of expense; even transfer funds
in case of transfer funds:
- cannot change account in companion expense
- changes description, date and amount in companion expense
'''
expense = get_object_or_404(Expense, pk=expense_id)
# check expense user == request user, othewise bail out
if expense.user != request.user:
msg = "{}: access to expense id {} denied".format( request.user.username, expense.pk )
log.error(msg)
messages.error(request, msg)
return HttpResponseRedirect(reverse('spese:index'))
page_identification = 'Spese: edit expense detail'
accounts = Account.objects.filter(users=request.user)
account_selected = expense.account.pk
tags_selected = expense.tags.names
### TRACE ### pdb.set_trace()
other_expense = expense.get_companion() # if expense is a transfer fund, it gets its companion
id = expense.id
oid = other_expense.id if other_expense else None
if request.method == "POST":
form = ExpenseForm(request.POST, instance=expense)
account_selected | |
<filename>benchmarks/SimResults/combinations_spec_ml_fulltrained/cmp_bwavesGemsFDTDastaromnetpp/power.py
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0340428,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.229428,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.189927,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.256385,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.443966,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.254627,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.954977,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.224307,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.64415,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0358813,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00929415,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0797061,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0687359,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.115587,
'Execution Unit/Register Files/Runtime Dynamic': 0.0780301,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.201929,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.546098,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 2.21391,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.0015137,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.0015137,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00133414,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000525064,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000987397,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00534894,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0139518,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0660776,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.2031,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.178125,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.224429,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.62872,
'Instruction Fetch Unit/Runtime Dynamic': 0.487933,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0695911,
'L2/Runtime Dynamic': 0.0149867,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.56797,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.14099,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0754087,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0754087,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.92552,
'Load Store Unit/Runtime Dynamic': 1.58828,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.185945,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.37189,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0659925,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0670272,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.261333,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0292321,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.53392,
'Memory Management Unit/Runtime Dynamic': 0.0962593,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 21.3636,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.125182,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0146164,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.132739,
'Renaming Unit/Int Front End RAT/Subthreshold | |
(FB weights)
'deit_tiny_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'deit_small_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'deit_base_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD),
'deit_base_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0),
'deit_tiny_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')),
'deit_small_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_224': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_384': _cfg(
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, input_size=(3, 384, 384), crop_pct=1.0,
classifier=('head', 'head_dist')),
# ViT ImageNet-21K-P pretraining by MILL
'vit_base_patch16_224_miil_in21k': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm/vit_base_patch16_224_in21k_miil.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear', num_classes=11221,
),
'vit_base_patch16_224_miil': _cfg(
url='https://miil-public-eu.oss-eu-central-1.aliyuncs.com/model-zoo/ImageNet_21K_P/models/timm'
'/vit_base_patch16_224_1k_miil_84_4.pth',
mean=(0, 0, 0), std=(1, 1, 1), crop_pct=0.875, interpolation='bilinear',
),
}
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def normalize_fn(tensor, mean, std):
"""Differentiable version of torchvision.functional.normalize"""
# here we assume the color channel is in at dim=1
mean = mean[None, :, None, None]
std = std[None, :, None, None]
return tensor.sub(mean).div(std)
class NormalizeByChannelMeanStd(nn.Module):
def __init__(self, mean, std):
super(NormalizeByChannelMeanStd, self).__init__()
if not isinstance(mean, torch.Tensor):
mean = torch.tensor(mean)
if not isinstance(std, torch.Tensor):
std = torch.tensor(std)
self.register_buffer("mean", mean)
self.register_buffer("std", std)
def forward(self, tensor):
return normalize_fn(tensor, self.mean, self.std)
def extra_repr(self):
return 'mean={}, std={}'.format(self.mean, self.std)
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
Includes distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, representation_size=None, distilled=False,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None,
act_layer=None, weight_init='', avgpool=False):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
distilled (bool): model includes a distillation token and head as in DeiT models
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
weight_init: (str): weight init scheme
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_tokens = 2 if distilled else 1
self.avgpool = avgpool
if self.avgpool:
self.num_tokens = 0
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.patch_embed = embed_layer(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=128)
num_patches = self.patch_embed.num_patches
self.denormalize = NormalizeByChannelMeanStd(
mean=[-0.485/0.229, -0.456/0.224, -0.406/0.225], std=[1/0.229, 1/0.224, 1/0.225])
self.encoder = Encoder(ch=128, out_ch=3, ch_mult=(1,1,2,2,4), num_res_blocks=2, attn_resolutions=(16,), dropout=0.0, in_channels=3, resolution=256, z_channels=256, double_z=False)
self.quantize = VectorQuantizer(1024, 256, beta=0.25)
self.quant_conv = torch.nn.Conv2d(256, 256, 1)
sd = torch.load('/aisecurity-group-ds/common_data/pretrained_models/vqgan_imagenet_f16_1024.ckpt', map_location="cpu")["state_dict"]
keys = list(sd.keys())
encoder_weights, quantize_weights, quant_conv_weights = {}, {}, {}
for k in keys:
if 'quant_conv' in k and 'post' not in k:
quant_conv_weights[k.replace('quant_conv.', '')] = sd[k]
elif 'quantize' in k:
quantize_weights[k.replace('quantize.', '')] = sd[k]
elif 'encoder' in k:
encoder_weights[k.replace('encoder.', '')] = sd[k]
self.encoder.load_state_dict(encoder_weights)
self.quantize.load_state_dict(quantize_weights)
self.quant_conv.load_state_dict(quant_conv_weights)
self.encoder.eval().requires_grad_(False)
self.quant_conv.eval().requires_grad_(False)
if self.avgpool == False:
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
else:
self.cls_token = None
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, drop=drop_rate,
attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size and not distilled:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head(s)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = None
if distilled:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'nlhb', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.dist_token is not None:
trunc_normal_(self.dist_token, std=.02)
if mode.startswith('jax'):
# leave cls token as zeros to match jax impl
named_apply(partial(_init_vit_weights, head_bias=head_bias, jax_impl=True), self)
else:
if self.cls_token is not None:
trunc_normal_(self.cls_token, std=.02)
self.apply(_init_vit_weights)
def _init_weights(self, m):
# this fn left here for compat with downstream users
_init_vit_weights(m)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self):
if self.avgpool:
return {'pos_embed', 'dist_token'}
else:
return {'pos_embed', 'cls_token', 'dist_token'}
def get_classifier(self):
if self.dist_token is None:
return self.head
else:
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.num_tokens == 2:
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
h = self.encoder(self.denormalize(x)*2-1)
h = self.quant_conv(h)
x = self.patch_embed(x)
quant, _, _ = self.quantize(h.detach())
quant = quant.flatten(2).transpose(1, 2)
x = torch.cat((x, quant), dim=2)
if self.avgpool == False:
cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from <NAME>, thanks
if self.dist_token is None:
x = torch.cat((cls_token, x), dim=1)
else:
x = torch.cat((cls_token, self.dist_token.expand(x.shape[0], -1, -1), x), dim=1)
x = self.pos_drop(x + self.pos_embed)
x = self.blocks(x)
if self.dist_token is None:
if self.avgpool:
x = self.norm(self.avgpool(x.permute(0,2,1)).squeeze(2))
return self.pre_logits(x)
else:
x = self.norm(x)
return self.pre_logits(x[:, 0])
else:
return x[:, 0], x[:, 1]
def forward(self, x):
x = self.forward_features(x)
if self.head_dist is not None:
x, x_dist = self.head(x[0]), self.head_dist(x[1]) # x must be a tuple
if self.training and not torch.jit.is_scripting():
# during inference, return the average of both classifier predictions
return x, x_dist
else:
return (x + x_dist) / 2
else:
x = self.head(x)
return x
def _init_vit_weights(module: nn.Module, name: str = '', head_bias: float = 0., jax_impl: bool = False):
""" ViT weight initialization
* When called without n, head_bias, jax_impl args it will behave exactly the same
as my original init for compatibility with prev hparam / downstream use cases (ie DeiT).
* When called w/ valid n (module name) and jax_impl=True, will (hopefully) match JAX impl
"""
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
elif name.startswith('pre_logits'):
lecun_normal_(module.weight)
nn.init.zeros_(module.bias)
else:
if jax_impl:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
else:
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif jax_impl and isinstance(module, nn.Conv2d):
# NOTE conv was left to pytorch default in my original init
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm, nn.BatchNorm2d)):
nn.init.zeros_(module.bias)
nn.init.ones_(module.weight)
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only | |
dt
elif ':' in dt:
self.scenes[scId].time = dt
else:
if scn.find('Day') is not None:
self.scenes[scId].day = scn.find('Day').text
if scn.find('Hour') is not None:
self.scenes[scId].hour = scn.find('Hour').text
if scn.find('Minute') is not None:
self.scenes[scId].minute = scn.find('Minute').text
if scn.find('LastsDays') is not None:
self.scenes[scId].lastsDays = scn.find('LastsDays').text
if scn.find('LastsHours') is not None:
self.scenes[scId].lastsHours = scn.find('LastsHours').text
if scn.find('LastsMinutes') is not None:
self.scenes[scId].lastsMinutes = scn.find('LastsMinutes').text
if scn.find('ReactionScene') is not None:
self.scenes[scId].isReactionScene = True
else:
self.scenes[scId].isReactionScene = False
if scn.find('SubPlot') is not None:
self.scenes[scId].isSubPlot = True
else:
self.scenes[scId].isSubPlot = False
if scn.find('Goal') is not None:
self.scenes[scId].goal = scn.find('Goal').text
if scn.find('Conflict') is not None:
self.scenes[scId].conflict = scn.find('Conflict').text
if scn.find('Outcome') is not None:
self.scenes[scId].outcome = scn.find('Outcome').text
if scn.find('ImageFile') is not None:
self.scenes[scId].image = scn.find('ImageFile').text
if scn.find('Characters') is not None:
for crId in scn.find('Characters').iter('CharID'):
if self.scenes[scId].characters is None:
self.scenes[scId].characters = []
self.scenes[scId].characters.append(crId.text)
if scn.find('Locations') is not None:
for lcId in scn.find('Locations').iter('LocID'):
if self.scenes[scId].locations is None:
self.scenes[scId].locations = []
self.scenes[scId].locations.append(lcId.text)
if scn.find('Items') is not None:
for itId in scn.find('Items').iter('ItemID'):
if self.scenes[scId].items is None:
self.scenes[scId].items = []
self.scenes[scId].items.append(itId.text)
# Make sure that ToDo, Notes, and Unused type is inherited from the chapter.
for chId in self.chapters:
if self.chapters[chId].chType == 2:
# Chapter is "ToDo" type.
for scId in self.chapters[chId].srtScenes:
self.scenes[scId].isTodoScene = True
self.scenes[scId].isUnused = True
elif self.chapters[chId].chType == 1:
# Chapter is "Notes" type.
for scId in self.chapters[chId].srtScenes:
self.scenes[scId].isNotesScene = True
self.scenes[scId].isUnused = True
elif self.chapters[chId].isUnused:
for scId in self.chapters[chId].srtScenes:
self.scenes[scId].isUnused = True
return 'yWriter project data read in.'
def merge(self, source):
"""Update instance variables from a source instance.
Positional arguments:
source -- Novel subclass instance to merge.
Return a message beginning with the ERROR constant in case of error.
Overrides the superclass method.
"""
def merge_lists(srcLst, tgtLst):
"""Insert srcLst items to tgtLst, if missing.
"""
j = 0
for i in range(len(srcLst)):
if not srcLst[i] in tgtLst:
tgtLst.insert(j, srcLst[i])
j += 1
else:
j = tgtLst.index(srcLst[i]) + 1
if os.path.isfile(self.filePath):
message = self.read()
# initialize data
if message.startswith(ERROR):
return message
#--- Merge and re-order locations.
if source.srtLocations:
self.srtLocations = source.srtLocations
temploc = self.locations
self.locations = {}
for lcId in source.srtLocations:
# Build a new self.locations dictionary sorted like the source.
self.locations[lcId] = self.WE_CLASS()
if not lcId in temploc:
# A new location has been added
temploc[lcId] = self.WE_CLASS()
if source.locations[lcId].title:
# avoids deleting the title, if it is empty by accident
self.locations[lcId].title = source.locations[lcId].title
else:
self.locations[lcId].title = temploc[lcId].title
if source.locations[lcId].image is not None:
self.locations[lcId].image = source.locations[lcId].image
else:
self.locations[lcId].desc = temploc[lcId].desc
if source.locations[lcId].desc is not None:
self.locations[lcId].desc = source.locations[lcId].desc
else:
self.locations[lcId].desc = temploc[lcId].desc
if source.locations[lcId].aka is not None:
self.locations[lcId].aka = source.locations[lcId].aka
else:
self.locations[lcId].aka = temploc[lcId].aka
if source.locations[lcId].tags is not None:
self.locations[lcId].tags = source.locations[lcId].tags
else:
self.locations[lcId].tags = temploc[lcId].tags
for fieldName in self._LOC_KWVAR:
try:
self.locations[lcId].kwVar[fieldName] = source.locations[lcId].kwVar[fieldName]
except:
self.locations[lcId].kwVar[fieldName] = temploc[lcId].kwVar[fieldName]
#--- Merge and re-order items.
if source.srtItems:
self.srtItems = source.srtItems
tempitm = self.items
self.items = {}
for itId in source.srtItems:
# Build a new self.items dictionary sorted like the source.
self.items[itId] = self.WE_CLASS()
if not itId in tempitm:
# A new item has been added
tempitm[itId] = self.WE_CLASS()
if source.items[itId].title:
# avoids deleting the title, if it is empty by accident
self.items[itId].title = source.items[itId].title
else:
self.items[itId].title = tempitm[itId].title
if source.items[itId].image is not None:
self.items[itId].image = source.items[itId].image
else:
self.items[itId].image = tempitm[itId].image
if source.items[itId].desc is not None:
self.items[itId].desc = source.items[itId].desc
else:
self.items[itId].desc = tempitm[itId].desc
if source.items[itId].aka is not None:
self.items[itId].aka = source.items[itId].aka
else:
self.items[itId].aka = tempitm[itId].aka
if source.items[itId].tags is not None:
self.items[itId].tags = source.items[itId].tags
else:
self.items[itId].tags = tempitm[itId].tags
for fieldName in self._ITM_KWVAR:
try:
self.items[itId].kwVar[fieldName] = source.items[itId].kwVar[fieldName]
except:
self.items[itId].kwVar[fieldName] = tempitm[itId].kwVar[fieldName]
#--- Merge and re-order characters.
if source.srtCharacters:
self.srtCharacters = source.srtCharacters
tempchr = self.characters
self.characters = {}
for crId in source.srtCharacters:
# Build a new self.characters dictionary sorted like the source.
self.characters[crId] = self.CHARACTER_CLASS()
if not crId in tempchr:
# A new character has been added
tempchr[crId] = self.CHARACTER_CLASS()
if source.characters[crId].title:
# avoids deleting the title, if it is empty by accident
self.characters[crId].title = source.characters[crId].title
else:
self.characters[crId].title = tempchr[crId].title
if source.characters[crId].image is not None:
self.characters[crId].image = source.characters[crId].image
else:
self.characters[crId].image = tempchr[crId].image
if source.characters[crId].desc is not None:
self.characters[crId].desc = source.characters[crId].desc
else:
self.characters[crId].desc = tempchr[crId].desc
if source.characters[crId].aka is not None:
self.characters[crId].aka = source.characters[crId].aka
else:
self.characters[crId].aka = tempchr[crId].aka
if source.characters[crId].tags is not None:
self.characters[crId].tags = source.characters[crId].tags
else:
self.characters[crId].tags = tempchr[crId].tags
if source.characters[crId].notes is not None:
self.characters[crId].notes = source.characters[crId].notes
else:
self.characters[crId].notes = tempchr[crId].notes
if source.characters[crId].bio is not None:
self.characters[crId].bio = source.characters[crId].bio
else:
self.characters[crId].bio = tempchr[crId].bio
if source.characters[crId].goals is not None:
self.characters[crId].goals = source.characters[crId].goals
else:
self.characters[crId].goals = tempchr[crId].goals
if source.characters[crId].fullName is not None:
self.characters[crId].fullName = source.characters[crId].fullName
else:
self.characters[crId].fullName = tempchr[crId].fullName
if source.characters[crId].isMajor is not None:
self.characters[crId].isMajor = source.characters[crId].isMajor
else:
self.characters[crId].isMajor = tempchr[crId].isMajor
for fieldName in self._CRT_KWVAR:
try:
self.characters[crId].kwVar[fieldName] = source.characters[crId].kwVar[fieldName]
except:
self.characters[crId].kwVar[fieldName] = tempchr[crId].kwVar[fieldName]
#--- Merge scenes.
sourceHasSceneContent = False
for scId in source.scenes:
if not scId in self.scenes:
self.scenes[scId] = self.SCENE_CLASS()
if source.scenes[scId].title:
# avoids deleting the title, if it is empty by accident
self.scenes[scId].title = source.scenes[scId].title
if source.scenes[scId].desc is not None:
self.scenes[scId].desc = source.scenes[scId].desc
if source.scenes[scId].sceneContent is not None:
self.scenes[scId].sceneContent = source.scenes[scId].sceneContent
sourceHasSceneContent = True
if source.scenes[scId].isUnused is not None:
self.scenes[scId].isUnused = source.scenes[scId].isUnused
if source.scenes[scId].isNotesScene is not None:
self.scenes[scId].isNotesScene = source.scenes[scId].isNotesScene
if source.scenes[scId].isTodoScene is not None:
self.scenes[scId].isTodoScene = source.scenes[scId].isTodoScene
if source.scenes[scId].status is not None:
self.scenes[scId].status = source.scenes[scId].status
if source.scenes[scId].sceneNotes is not None:
self.scenes[scId].sceneNotes = source.scenes[scId].sceneNotes
if source.scenes[scId].tags is not None:
self.scenes[scId].tags = source.scenes[scId].tags
if source.scenes[scId].field1 is not None:
self.scenes[scId].field1 = source.scenes[scId].field1
if source.scenes[scId].field2 is not None:
self.scenes[scId].field2 = source.scenes[scId].field2
if source.scenes[scId].field3 is not None:
self.scenes[scId].field3 = source.scenes[scId].field3
if source.scenes[scId].field4 is not None:
self.scenes[scId].field4 = source.scenes[scId].field4
if source.scenes[scId].appendToPrev is not None:
self.scenes[scId].appendToPrev = source.scenes[scId].appendToPrev
if source.scenes[scId].date or source.scenes[scId].time:
if source.scenes[scId].date is not None:
self.scenes[scId].date = source.scenes[scId].date
if source.scenes[scId].time is not None:
self.scenes[scId].time = source.scenes[scId].time
elif source.scenes[scId].minute or source.scenes[scId].hour or source.scenes[scId].day:
self.scenes[scId].date = None
self.scenes[scId].time = None
if source.scenes[scId].minute is not None:
self.scenes[scId].minute = source.scenes[scId].minute
if source.scenes[scId].hour is not None:
self.scenes[scId].hour = source.scenes[scId].hour
if source.scenes[scId].day is not None:
self.scenes[scId].day = source.scenes[scId].day
if source.scenes[scId].lastsMinutes is not None:
self.scenes[scId].lastsMinutes = source.scenes[scId].lastsMinutes
if source.scenes[scId].lastsHours is not None:
self.scenes[scId].lastsHours = source.scenes[scId].lastsHours
if source.scenes[scId].lastsDays is not None:
self.scenes[scId].lastsDays = source.scenes[scId].lastsDays
if source.scenes[scId].isReactionScene is not None:
self.scenes[scId].isReactionScene = source.scenes[scId].isReactionScene
if source.scenes[scId].isSubPlot is not None:
self.scenes[scId].isSubPlot = source.scenes[scId].isSubPlot
if source.scenes[scId].goal is not None:
self.scenes[scId].goal = source.scenes[scId].goal
if source.scenes[scId].conflict is not None:
self.scenes[scId].conflict = source.scenes[scId].conflict
if source.scenes[scId].outcome is not None:
self.scenes[scId].outcome = source.scenes[scId].outcome
if source.scenes[scId].characters is not None:
self.scenes[scId].characters = []
for crId in source.scenes[scId].characters:
if crId in self.characters:
self.scenes[scId].characters.append(crId)
if source.scenes[scId].locations is not None:
self.scenes[scId].locations = []
for lcId in source.scenes[scId].locations:
if lcId in self.locations:
self.scenes[scId].locations.append(lcId)
if source.scenes[scId].items is not None:
self.scenes[scId].items = []
for itId in source.scenes[scId].items:
if itId in self.items:
self.scenes[scId].items.append(itId)
for fieldName in self._SCN_KWVAR:
try:
self.scenes[scId].kwVar[fieldName] = source.scenes[scId].kwVar[fieldName]
except:
pass
#--- Merge chapters.
for chId in source.chapters:
if not chId in self.chapters:
self.chapters[chId] = self.CHAPTER_CLASS()
if source.chapters[chId].title:
# avoids deleting the title, if it is empty by accident
self.chapters[chId].title = source.chapters[chId].title
if source.chapters[chId].desc is not None:
self.chapters[chId].desc = source.chapters[chId].desc
if source.chapters[chId].chLevel is not None:
self.chapters[chId].chLevel = source.chapters[chId].chLevel
if source.chapters[chId].oldType is not None:
self.chapters[chId].oldType = source.chapters[chId].oldType
if source.chapters[chId].chType is not None:
self.chapters[chId].chType = source.chapters[chId].chType
if source.chapters[chId].isUnused is not None:
self.chapters[chId].isUnused = source.chapters[chId].isUnused
if source.chapters[chId].suppressChapterTitle is not None:
self.chapters[chId].suppressChapterTitle = source.chapters[chId].suppressChapterTitle
if source.chapters[chId].suppressChapterBreak is not None:
self.chapters[chId].suppressChapterBreak = source.chapters[chId].suppressChapterBreak
if source.chapters[chId].isTrash is not None:
self.chapters[chId].isTrash = source.chapters[chId].isTrash
for fieldName in self._CHP_KWVAR:
try:
self.chapters[chId].kwVar[fieldName] = source.chapters[chId].kwVar[fieldName]
except:
pass
#--- Merge the chapter's scene list.
# New scenes may be added.
# Existing scenes may be moved to another chapter.
# Deletion of scenes is not considered.
# The scene's sort order may not change.
# Remove scenes that have been moved to another chapter from the scene list.
srtScenes = []
for scId in self.chapters[chId].srtScenes:
if scId in source.chapters[chId].srtScenes or not scId in source.scenes:
# The scene has not moved to another chapter or isn't imported
srtScenes.append(scId)
self.chapters[chId].srtScenes = srtScenes
# Add new or moved scenes to the | |
import pytest
# import unittest
import numpy as np
import femnurbs.SplineUsefulFunctions as SUF
def test_isValidU():
with pytest.raises(TypeError):
SUF.isValidU()
assert SUF.isValidU(0) is False
assert SUF.isValidU(1.2) is False
assert SUF.isValidU({}) is False
assert SUF.isValidU(-1) is False
assert SUF.isValidU({1: 1}) is False
assert SUF.isValidU([0, 0, 0, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1, 1, ]) is False
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 1, 1, 1]) is False
assert SUF.isValidU([-1, -1, 1, 1]) is False
assert SUF.isValidU([0, 0, 2, 2]) is False
assert SUF.isValidU([0, 0, 0.8, 0.2, 1, 1]) is False
assert SUF.isValidU([0, 0, 0, 1, 0.5, 1, 1]) is False
assert SUF.isValidU([0, 0, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0, 1, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.2, 0.8, 1, 1]) is True
assert SUF.isValidU([0, 0, 0, 0.5, 1, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.1, 0.5, 0.9, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 1, 1]) is True
assert SUF.isValidU([0, 0, 0.5, 0.5, 0.5, 1, 1]) is False
def test_UBezier():
for p in range(1, 10):
assert SUF.isValidU(SUF.UBezier(p=p)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UBezier(p=1)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UBezier(p=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UBezier(p=3)
np.testing.assert_almost_equal(Ugood, Utest)
def test_UUniform():
for p in range(1, 10):
for n in range(p + 1, 11):
assert SUF.isValidU(SUF.UUniform(p=p, n=n)) is True
Ugood = np.array([0, 0, 1, 1])
Utest = SUF.UUniform(p=1, n=2)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.5, 1, 1])
Utest = SUF.UUniform(p=1, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.25, 0.5, 0.75, 1, 1])
Utest = SUF.UUniform(p=1, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1])
Utest = SUF.UUniform(p=1, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=3)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.5, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=6)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1])
Utest = SUF.UUniform(p=2, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=4)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.5, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=5)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.25, 0.5, 0.75, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=7)
np.testing.assert_almost_equal(Ugood, Utest)
Ugood = np.array([0, 0, 0, 0, 0.2, 0.4, 0.6, 0.8, 1, 1, 1, 1])
Utest = SUF.UUniform(p=3, n=8)
np.testing.assert_almost_equal(Ugood, Utest)
def test_URandom():
Ntest = 100
for p in (1, 2, 3):
for n in range(p + 1, 30):
for zz in range(Ntest):
U = SUF.URandom(p=p, n=n)
assert SUF.isValidU(U) is True
assert SUF.getPfromU(U) == p
assert SUF.getNfromU(U) == n
def test_transpose():
II = np.eye(3)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(3)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
II = np.eye(4)
IItest = SUF.transpose(II, diagonal=2)
np.testing.assert_almost_equal(IItest, II)
def test_isSymetric():
II = np.eye(3)
assert SUF.isSymetric(II) is True
II = np.eye(4)
assert SUF.isSymetric(II) is True
II = np.eye(3)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.eye(4)
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 3, 4],
[4, 3, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[1, 2, 4, 4],
[4, 4, 2, 1]])
assert SUF.isSymetric(II, diagonal=2) is True
II = np.array([[7, 2, 4, 3],
[4, 4, 2, 7]])
assert SUF.isSymetric(II, diagonal=2) is False
II = np.array([[7, 2, 4, 7],
[7, 4, 2, 3]])
assert SUF.isSymetric(II, diagonal=2) is False
def test_getPfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UBezier(p=2)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UBezier(p=3)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UBezier(p=4)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 1
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 2
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 3
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getPfromU(U)
assert ptest == 4
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getPfromU(U)
assert ptest == 2
def test_getNfromU():
U = SUF.UBezier(p=1)
ptest = SUF.getNfromU(U)
assert ptest == 2
U = SUF.UBezier(p=2)
ptest = SUF.getNfromU(U)
assert ptest == 3
U = SUF.UBezier(p=3)
ptest = SUF.getNfromU(U)
assert ptest == 4
U = SUF.UBezier(p=4)
ptest = SUF.getNfromU(U)
assert ptest == 5
U = SUF.UUniform(p=1, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=2, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=3, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = SUF.UUniform(p=4, n=6)
ptest = SUF.getNfromU(U)
assert ptest == 6
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1])
ptest = SUF.getNfromU(U)
assert ptest == 5
def test_transformUtoH():
U = SUF.UBezier(p=1)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 0, 1, 0, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0, 0.25, 0.25, 0.25, 0.25, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 0, 1, 1, 1, 0, 0]) / 3
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 0, 1, 1, 0, 0, 0]) / 2
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0.2, 0.8, 1, 1, 1]) # p = 2 and n = 5
Hgood = np.array([0, 0.2, 0.6, 0.2, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = np.array([0, 0, 0, 0, 0.2, 0.8, 1, 1, 1, 1]) # p = 3 and n = 6
Hgood = np.array([0, 0, 0.2, 0.6, 0.2, 0, 0])
Htest = SUF.transformUtoH(U)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=2)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=3)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([1])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 1, 0])
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UBezier(p=4)
Hgood = np.array([0, 0, 1, 0, 0])
Htest = SUF.transformUtoH(U, j=3)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=1, n=6)
Hgood = np.array([0.2, 0.2, 0.2, 0.2, 0.2])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=2, n=6)
Hgood = np.array([0.25, 0.25, 0.25, 0.25])
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([1, 1, 1]) / 3
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=3, n=6)
Hgood = np.array([0, 1, 1, 1, 0]) / 3
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=0)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([1, 1]) / 2
Htest = SUF.transformUtoH(U, j=1)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 1, 1, 0]) / 2
Htest = SUF.transformUtoH(U, j=2)
np.testing.assert_almost_equal(Hgood, Htest)
U = SUF.UUniform(p=4, n=6)
Hgood = np.array([0, 0, 1, 1, 0, | |
c-c//8]))#input_tensor[:, :, :, c//8:]
shift_buffer = tf.get_default_graph().get_tensor_by_name(shift_buffer_name)
shift_concat = tf.concat((shift_buffer, x2), axis=3, name='shift_concat')
### END SHIFT CHANGES ###
### SHIFT CHANGES ###
# If splitting, (concat, input_tensor) are fed to DPU. Insert new placeholder for concat input
if SPLIT_MODEL:
_ = tf.identity(shift_concat, 'shift_concat_output')
shift_concat = tf.placeholder(tf.float32, shape=shift_concat.get_shape(), name='shift_concat_input')
### END SHIFT CHANGES ###
net = shift_concat
if depthwise_location == 'input':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if callable(expansion_size):
inner_size = expansion_size(num_inputs=prev_depth)
else:
inner_size = expansion_size
if inner_size > net.shape[3]:
if expansion_fn == split_conv:
expansion_fn = functools.partial(
expansion_fn,
num_ways=split_expansion,
divisible_by=split_divisible_by,
stride=1)
net = expansion_fn(
net,
inner_size,
scope='expand',
normalizer_fn=normalizer_fn,
**expansion_params)
net = tf.identity(net, 'expansion_output')
if endpoints is not None:
endpoints['expansion_output'] = net
if depthwise_location == 'expansion':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if expansion_transform:
net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor)
# Note in contrast with expansion, we always have
# projection to produce the desired output size.
if projection_fn == split_conv:
projection_fn = functools.partial(
projection_fn,
num_ways=split_projection,
divisible_by=split_divisible_by,
stride=1)
net = projection_fn(
net,
num_outputs,
scope='project',
normalizer_fn=normalizer_fn,
activation_fn=project_activation_fn,
**projection_params)
if endpoints is not None:
endpoints['projection_output'] = net
if depthwise_location == 'output':
if use_explicit_padding:
net = _fixed_padding(net, kernel_size, rate)
net = depthwise_func(net, activation_fn=None)
net = tf.identity(net, name='depthwise_output')
if endpoints is not None:
endpoints['depthwise_output'] = net
if callable(residual): # custom residual
net = residual(input_tensor=input_tensor, output_tensor=net)
elif (residual and
# stride check enforces that we don't add residuals when spatial
# dimensions are None
stride == 1 and
# Depth matches
net.get_shape().as_list()[3] ==
input_tensor.get_shape().as_list()[3]):
net += input_tensor
return tf.identity(net, name='output')
op = lib.op
expand_input = ops.expand_input_by_factor
### Import pytorch config before setting constant initializers through torch_params
if IMPORT_PYTORCH and not import_pytorch_weights():
sys.stderr.write("Error importing pytorch weights\n")
sys.exit(1)
# Based on V2_DEF from slim mobilenet_v2
V2_DEF_TSM = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6,
},
(ops.expanded_conv,expanded_conv_shift): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'},
#(slim.conv2d, slim.separable_conv2d): {'biases_initializer': tf.initializers.constant(0.01)} ### SHIFT CHANGE to allow untrained inference for testing ###
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3], **torch_params(0)),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16,
**torch_params(1)
),
op(ops.expanded_conv, stride=2, num_outputs=24, **torch_params(2)),
op(expanded_conv_shift, stride=1, num_outputs=24, shift_buffer_name='shift_buffer_0:0', **torch_params(3)), # Shift
op(ops.expanded_conv, stride=2, num_outputs=32, **torch_params(4)),
op(expanded_conv_shift, stride=1, num_outputs=32, shift_buffer_name='shift_buffer_1:0', **torch_params(5)), # Shift
op(expanded_conv_shift, stride=1, num_outputs=32, shift_buffer_name='shift_buffer_2:0', **torch_params(6)), # Shift
op(ops.expanded_conv, stride=2, num_outputs=64, **torch_params(7)),
op(expanded_conv_shift, stride=1, num_outputs=64, shift_buffer_name='shift_buffer_3:0', **torch_params(8)), # Shift
op(expanded_conv_shift, stride=1, num_outputs=64, shift_buffer_name='shift_buffer_4:0', **torch_params(9)), # Shift
op(expanded_conv_shift, stride=1, num_outputs=64, shift_buffer_name='shift_buffer_5:0', **torch_params(10)), # Shift
op(ops.expanded_conv, stride=1, num_outputs=96, **torch_params(11)),
op(expanded_conv_shift, stride=1, num_outputs=96, shift_buffer_name='shift_buffer_6:0', **torch_params(12)), # Shift
op(expanded_conv_shift, stride=1, num_outputs=96, shift_buffer_name='shift_buffer_7:0', **torch_params(13)), # Shift
op(ops.expanded_conv, stride=2, num_outputs=160, **torch_params(14)),
op(expanded_conv_shift, stride=1, num_outputs=160, shift_buffer_name='shift_buffer_8:0', **torch_params(15)), # Shift
op(expanded_conv_shift, stride=1, num_outputs=160, shift_buffer_name='shift_buffer_9:0', **torch_params(16)), # Shift
op(ops.expanded_conv, stride=1, num_outputs=320, **torch_params(17)),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280, **torch_params(18))
],
)
checkpoint = "mobilenet_v2_1.0_224.ckpt"
tf.reset_default_graph()
shift_buffer_shapes = [[1, 56, 56, 3],
[1, 28, 28, 4],
[1, 28, 28, 4],
[1, 14, 14, 8],
[1, 14, 14, 8],
[1, 14, 14, 8],
[1, 14, 14, 12],
[1, 14, 14, 12],
[1, 7, 7, 20],
[1, 7, 7, 20]]
#with tf.variable_scope("Mobilenet", "Mobilenet") as scope, tf.Session() as sess:
#shift_buffer = [tf.placeholder(tf.float32, shape=[1, 3, 56, 56], name='shift_buffer_0'),
# tf.placeholder(tf.float32, shape=[1, 4, 28, 28], name='shift_buffer_1'),
# tf.placeholder(tf.float32, shape=[1, 4, 28, 28], name='shift_buffer_2'),
# tf.placeholder(tf.float32, shape=[1, 8, 14, 14], name='shift_buffer_3'),
# tf.placeholder(tf.float32, shape=[1, 8, 14, 14], name='shift_buffer_4'),
# tf.placeholder(tf.float32, shape=[1, 8, 14, 14], name='shift_buffer_5'),
# tf.placeholder(tf.float32, shape=[1, 12, 14, 14], name='shift_buffer_6'),
# tf.placeholder(tf.float32, shape=[1, 12, 14, 14], name='shift_buffer_7'),
# tf.placeholder(tf.float32, shape=[1, 20, 7, 7], name='shift_buffer_8'),
# tf.placeholder(tf.float32, shape=[1, 20, 7, 7], name='shift_buffer_9')]
shift_buffer = [tf.placeholder(tf.float32, shape=shift_buffer_shapes[0], name='shift_buffer_0'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[1], name='shift_buffer_1'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[2], name='shift_buffer_2'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[3], name='shift_buffer_3'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[4], name='shift_buffer_4'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[5], name='shift_buffer_5'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[6], name='shift_buffer_6'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[7], name='shift_buffer_7'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[8], name='shift_buffer_8'),
tf.placeholder(tf.float32, shape=shift_buffer_shapes[9], name='shift_buffer_9')]
#FINAL_NODE_NAME="MobilenetV2/Conv_1/Relu6"
FINAL_NODE_NAME="MobilenetV2/Logits/output"
in_tensor = tf.placeholder(tf.float32, shape=(1,224,224,3), name='in_img')
print(torch_params(0)['normalizer_params']['param_initializers']['moving_mean'].get_config())
in_img = tf.identity(in_tensor)
net, endpoints = mobilenet_v2.mobilenet_base(in_img, conv_defs=V2_DEF_TSM)
# Add the classifier
with tf.variable_scope("MobilenetV2/Logits"):
kernel_initializer = None
bias_initializer = tf.zeros_initializer()
if IMPORT_PYTORCH:
kernel_initializer = torch_params(-1)["weights_initializer"]
bias_initializer = torch_params(-1)["biases_initializer"]
net = tf.nn.avg_pool(net, [1,7,7,1], 1, "VALID", name="AvgPool")
net = tf.squeeze(net, (1,2))
net = tf.layers.dense(net, 27, use_bias=True, trainable = False,
kernel_initializer = kernel_initializer,
bias_initializer = bias_initializer,
name="Linear")
#net = tf.layers.Conv2D(27, [1,1],
# kernel_initializer = kernel_initializer,
# bias_initializer = bias_initializer,
# name="Linear")(net)
#net = tf.keras.layers.Dense(27, use_bias=True,
# kernel_initializer = kernel_initializer,
# bias_initializer = bias_initializer,
# name="Linear")(net)
net = tf.identity(net, name="output")
#ema = tf.train.ExponentialMovingAverage(0.999)
#vars = ema.variables_to_restore()
#saver = tf.train.Saver(vars)
split_outputs = []
output_node_names = []
inputs = {}
frozen_graph_def = None
with tf.Session() as sess:
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
print(tf.global_variables()[3])
print(tf.global_variables()[3].initializer)
print("GLOBALS: " + str(tf.global_variables()))
print("NODES: " + str([x.name for x in graph.as_graph_def().node]))
sess.run(tf.global_variables_initializer())
#saver.restore(sess, "../tf_models/mobilenet_v2_1.0_224/mobilenet_v2_1.0_224.ckpt")
print([n.name for n in graph.get_operations()])
output_node_names = [FINAL_NODE_NAME]
# add shift buffer output nodes
for op in graph.get_operations():
if "Identity" in op.type and "shift_split_buffer_output" in op.name:
output_node_names.append(op.name)
inputs = {in_tensor: np.ones((1,224,224,3))}
for i,shape in enumerate(shift_buffer_shapes):
inputs[shift_buffer[i]] = np.zeros(shape)
# update outputs to include buffer shift outputs and inputs to expanded_conv_shift layers
# update inputs to include internal placeholders
if SPLIT_MODEL:
for op in graph.get_operations():
# CPU input for shift layer from previous layer
if "Identity" in op.type and "prev_conv_output" in op.name:
output_node_names.append(op.name)
# CPU -> DPU concat output
if "Identity" in op.type and "shift_concat_output" in op.name:
output_node_names.append(op.name)
# CPU input for shift
if op.type == "Placeholder" and "/input" in op.name:
inputs[op.name+':0'] = np.ones(op.outputs[0].get_shape())
# DPU input for conv
if op.type == "Placeholder" and "shift_concat_input" in op.name:
inputs[op.name+':0'] = np.ones(op.outputs[0].get_shape())
## Dump split inputs to pickle file for quantization
if DUMP_QUANTIZE:
assert not SPLIT_MODEL
input_dir = LOCAL_DIR if QUANTIZE_LOCAL else IMAGENET_DIR
assert os.path.isdir(input_dir)
inters = ["in_img:0"]
input_dict = {0: len(graph.get_operations())}
shift_in_dict = {0: 0} # No shift buffer in first layer. Dummy input
shift_out_dict = {}
for op in graph.get_operations():
in_search = re.search("shift(_(\d+))?/input$", op.name)
shift_in_search = re.search("shift(_(\d+))?/shift_concat$", op.name)
shift_out_search = re.search("shift(_(\d+))?/shift_split_buffer_output$", op.name)
if "Identity" in op.type and in_search:
n = 0
if in_search.group(1):
n = int(in_search.group(1)[1:])
input_dict[n+1] = len(inters)
inters.append(op.name + ":0")
elif shift_in_search:
n = 0
if shift_in_search.group(1):
n = int(shift_in_search.group(1)[1:])
shift_in_dict[n+1] = len(inters)
inters.append(op.name + ":0")
elif shift_out_search:
n = 0
if shift_out_search.group(1):
n = int(shift_out_search.group(1)[1:])
shift_out_dict[n] = len(inters)
inters.append(op.name + ":0")
print("DICTS:")
print(input_dict.keys())
print(shift_in_dict.keys())
print(shift_out_dict.keys())
#print("INTERS: " + str(inters))
inputs = {}
shift_data = []
for shape in shift_buffer_shapes:
shift_data.append(np.zeros(shape))
img_paths = []
if QUANTIZE_LOCAL:
for vid in sorted(os.listdir(input_dir))[:LOCAL_VIDS]:
vid_path = os.path.join(input_dir, vid)
num_frames = len(os.listdir(vid_path))
print(f"Vid {vid} has {num_frames} frames", file=sys.stderr)
for img in sorted(os.listdir(vid_path)):
img_paths.append(os.path.join(vid_path, img))
else:
img_paths = [os.path.join(input_dir, x) for x in sorted(os.listdir(input_dir))[:IMAGENET_IMGS]]
dump_data = {}
for img_num,p_img in enumerate(img_paths):
print(f"Processing calib data # {img_num}...", file=sys.stderr)
img = PIL.Image.open(p_img)
w,h = img.size
new_w = 0
new_h = 0
if w > h:
new_h = 256
new_w = (256*w)//h
else:
new_w = 256
new_h = (256*h)//w
img = img.resize((new_w, new_h), PIL.Image.BILINEAR)
left = (new_w - 224)//2
top = (new_h - 224)//2
img = img.crop((left, top, left+224, top+224))
img = np.array(img)/255.0
img = (img - np.array([0.485, 0.456, 0.406])) / np.array([0.229, 0.224, 0.225])
inputs[in_tensor] = np.expand_dims(img, axis=0)
for i in range(len(shift_buffer_shapes)):
inputs[shift_buffer[i]] = shift_data[i]
outputs = sess.run(inters, inputs)
if QUANTIZE_LOCAL:
for i in range(len(shift_out_dict)):
shift_data[i] = outputs[shift_out_dict[i]]
assert len(input_dict) == len(shift_in_dict)
for i in range(len(input_dict)):
shift = outputs[shift_in_dict[i]] if shift_in_dict[i] < len(outputs) else np.empty(1)
resid = outputs[input_dict[i]] if input_dict[i] < len(outputs) else np.empty(1)
if img_num == 0:
dump_data[i] = {}
dump_data[i][img_num] = {"shift_concat": shift.tolist(),
"resid": resid.tolist()}
print(f"Dumping Quantize Data...", file=sys.stderr)
for split_num in range(len(input_dict)):
with open(os.path.join("model_tf_split_export", f"model_tf_split_{split_num}", "inputs.pickle"), 'wb') as f:
pickle.dump(dump_data[split_num], f, pickle.HIGHEST_PROTOCOL)
if EXPORT:
print(f"Saving model...", file=sys.stderr)
print("INS: " + str(inputs.keys()))
print("OUTS: " + str(output_node_names))
sess.run([o + ":0" for o in output_node_names], inputs)
for op in graph.get_operations():
if "Identity" in op.type and "prev_conv_output" in op.name:
split_outputs.append(op.name)
split_outputs.append(FINAL_NODE_NAME)
saver = tf.train.Saver()
model_name = "model_tf_split" if SPLIT_MODEL else "model_tf"
save_dir = os.path.join(".", model_name)
print(f"Saving model to {save_dir}...")
ckpt_file = saver.save(sess, os.path.join(save_dir, model_name + ".ckpt"))
pbtxt_file = model_name + ".pbtxt"
tf.train.write_graph(graph_or_graph_def=input_graph_def, logdir=save_dir, name=pbtxt_file, as_text=True)
print("IN_TEST: " | |
save in self.estimators
if self.estimators is not None:
self.estimators.append(estimator_)
def _score_estimator(self, estimator_, X_val, y_val):
# Use multi-metric scorer here - handles not repeating calls to
# predict / predict proba, ect... - can safely wrap even single metrics
scorers = _MultimetricScorer(**self.ps.scorer)
scores = scorers(estimator_, X_val, np.array(y_val))
# Append each to scores, keeps track per fold
for scorer_str in self.scores:
score = scores[scorer_str]
self.scores[scorer_str].append(score)
# Optional verbose
self._print(f'{scorer_str}: {score_rep(score)}', level=1)
# Spacing for nice looking output
self._print(level=1)
def _save_preds(self, estimator, X_val, y_val):
if self.preds is None:
return
self._print('Saving predictions on validation set.', level=2)
for predict_func in ['predict', 'predict_proba', 'decision_function']:
# Get preds, skip if estimator doesn't have predict func
try:
preds = getattr(estimator, predict_func)(X_val)
except AttributeError:
continue
# Add to preds dict if estimator has predict func
try:
self.preds[predict_func].append(preds)
except KeyError:
self.preds[predict_func] = [preds]
# Add y_true
try:
self.preds['y_true'].append(np.array(y_val))
except KeyError:
self.preds['y_true'] = [np.array(y_val)]
def _compute_summary_scores(self):
self._print('Computing summary scores.', level=2)
self.mean_scores, self.std_scores = {}, {}
self.weighted_mean_scores = {}
for scorer_key in self.scores:
# Save mean under same name
scores = self.scores[scorer_key]
self.mean_scores[scorer_key] = np.mean(scores)
# Compute scores weighted by number of subjs
# Use val_subjects without NaN targets
weights = [len(self.val_subjects[i])
for i in range(len(self.val_subjects))]
self.weighted_mean_scores[scorer_key] =\
np.average(scores, weights=weights)
# Compute and add base micro std
self.std_scores[scorer_key] = np.std(scores)
# If more than 1 repeat, add the macro std
if self.n_repeats_ > 1:
scores = np.reshape(scores,
(self.n_repeats_, self.n_splits_))
self.std_scores[scorer_key + '_macro'] =\
np.std(np.mean(scores, axis=1))
# Add mean timing
if self.timing is not None:
self.mean_timing = {}
for time_key in self.timing:
self.mean_timing[time_key] = np.mean(self.timing[time_key])
def get_preds_dfs(self, drop_nan_targets=False):
'''This function can be used to return the raw predictions
made during evaluation as a list of pandas Dataframes.
Parameters
------------
drop_nan_targets : bool, optional
If False (default), then this method will return the
DataFrame of predictions including targets
with NaN. To skip these, e.g., in this case
of plotting against ground truth or computing
new metrics, set to True.
::
default = False
Returns
---------
dfs : list of pandas.DataFrame
list of dataframe's per fold, where each DataFrame
contains predictions made.
'''
dfs = []
# For each fold
for fold_indx in range(len(self.all_val_subjects)):
# Init df
df = pd.DataFrame(index=self.all_val_subjects[fold_indx])
# Add each predict type as a column
for predict_type in self.preds:
ps = self.preds[predict_type][fold_indx]
# Either float or multi-class case
if isinstance(ps[0], (float, np.floating)):
df[predict_type] = ps
else:
for cls in range(len(ps[0])):
df[predict_type + '_' + str(cls)] = ps[:, cls]
# Drop nan-cols if not requested
if drop_nan_targets:
nan_targets = df[df['y_true'].isna()].index
df = df.drop(nan_targets)
# Add to by fold list
dfs.append(df)
return dfs
def _get_display_name(self):
return str(self.__class__.__name__)
def __repr__(self):
rep = self._get_display_name() + '\n'
rep += '------------\n'
# Add scores + means pretty rep
for key in self.mean_scores:
rep += f'{key}: {score_rep(self.mean_scores[key])} '
rep += f'± {score_rep(self.std_scores[key])}\n'
rep += '\n'
# Show available saved attrs
saved_attrs = []
avaliable_methods = ['to_pickle', 'compare']
if self.estimators is not None:
saved_attrs.append('estimators')
avaliable_methods.append('get_X_transform_df')
avaliable_methods.append('get_inverse_fis')
avaliable_methods.append('run_permutation_test')
if self.preds is not None:
saved_attrs.append('preds')
avaliable_methods.append('get_preds_dfs')
avaliable_methods.append('subset_by')
if self.timing is not None:
saved_attrs.append('timing')
saved_attrs += ['estimator', 'train_subjects', 'val_subjects',
'feat_names', 'ps',
'mean_scores', 'std_scores',
'weighted_mean_scores', 'scores']
# Only show if different
ati_len = len(sum([list(e) for e in self.all_train_subjects], []))
ti_len = len(sum([list(e) for e in self.train_subjects], []))
if ati_len != ti_len:
saved_attrs.append('all_train_subjects')
avi_len = len(sum([list(e) for e in self.all_val_subjects], []))
vi_len = len(sum([list(e) for e in self.val_subjects], []))
if avi_len != vi_len:
saved_attrs.append('all_val_subjects')
if self.estimators is not None:
# Either or
if self.feature_importances_ is not None:
saved_attrs += ['fis_', 'feature_importances_']
avaliable_methods += ['get_fis', 'get_feature_importances']
elif self.coef_ is not None:
saved_attrs += ['fis_', 'coef_']
avaliable_methods += ['get_fis', 'get_coef_']
avaliable_methods.append('permutation_importance')
if self._store_cv:
saved_attrs += ['cv']
rep += 'Saved Attributes: ' + repr(saved_attrs) + '\n\n'
rep += 'Available Methods: ' + repr(avaliable_methods) + '\n\n'
# Use custom display str, no need to show scorer.
rep += 'Evaluated With:\n'
rep += self.ps._get_display_str(show_scorer=False) + '\n'
return rep
def _estimators_check(self):
if self.estimators is None:
raise RuntimeError('This method is not available unless '
'evaluate is run with store_estimators=True!')
def _dataset_check(self, dataset=None):
# If dataset not passed, try to use saved dataset ref
if dataset is None:
# Check for no saved
if not hasattr(self, '_dataset') or getattr(self, '_dataset') is None:
raise RuntimeError('No saved reference dataset, you must pass a dataset to use.')
# Use saved
dataset = self._dataset
return dataset
@property
def feature_importances_(self):
'''This property stores the mean values
across fitted estimators assuming each fitted estimator
has a non empty `feature_importances_` attribute.'''
self._estimators_check()
return get_mean_fis(self.estimators, 'feature_importances_')
def get_feature_importances(self):
'''This function returns each `feature_importances_`
value across fitted estimators. If None have this parameter,
it will return a list of None.
Returns
--------
feature_importances : list
A list of `feature_importances_` where each element
in the list refers to a fold from the evaluation.
'''
self._estimators_check()
return [estimator.feature_importances_
for estimator in self.estimators]
@property
def coef_(self):
'''This attribute represents the mean `coef_` as
a numpy array across all folds. This parameter will only
be available if all estimators have a non null `coef_` parameter
and each returns the same shape. See `fis_` for a more flexible
version of this parameter that can handle when there
are differing numbers of features.'''
self._estimators_check()
return get_mean_fis(self.estimators, 'coef_')
def get_coefs(self):
'''This function returns each `coef_`
value across fitted estimators. If None have this parameter,
it will return a list of None.
Returns
--------
coefs : list
A list of `coef_` where each element
in the list refers to a fold from the evaluation.
'''
self._estimators_check()
return [estimator.coef_
for estimator in self.estimators]
@property
def fis_(self):
'''This property stores the mean value
across each fold of the CV for either the `coef_`
or `feature_importance_` parameter.
Warnings
---------
If a feature is not present in all folds,
then it's mean value will be computed from only the
folds in which it was present.
When using transformers, for example one hot encoder,
since the encoding is done on the fly, there is no
guarantee that 'one hot encoder category_1' is actually
the same category 1 across folds.
If for some reason some folds have a model with feature
importances and other `coef_` they will still all be averaged
together, so make sure that this parameter is only used when
all of the underlying models across folds should have comparable
feature importances.
'''
# @TODO incoperate in information about the original
# class names here // maybe in specific objects like
# OneHotEncoder.
self._estimators_check()
# Grab fis as Dataframe or list of
fis = self.get_fis()
# Base case
if isinstance(fis, pd.DataFrame):
return fis.mean()
# Categorical case
return [fi.mean() for fi in fis]
def _get_base_fis_list(self):
self._estimators_check()
coefs = self.get_coefs()
feature_importances = self.get_feature_importances()
fis = []
for coef, fi, feat_names in zip(coefs, feature_importances,
self.feat_names):
if coef is not None:
fis.append(fi_to_series(coef, feat_names))
elif fi is not None:
fis.append(fi_to_series(fi, feat_names))
else:
fis.append(None)
return fis
def get_fis(self, mean=False, abs=False):
'''This method will return a pandas DataFrame with
each row a fold, and each column a feature if
the underlying model supported either the `coef_`
or `feature_importance_` parameters.
In the case that the underlying feature importances
or `coefs_` are not flat, e.g., in the case
of a one versus rest categorical model, then a list
multiple DataFrames will be returned, one for each class.
The order of the list will correspond to the order of classes.
Parameters
-----------
mean : bool, optional
If True, return the mean value
across evaluation folds as a pandas Series.
Any features with a mean value of 0 will
also be excluded. Otherwise, if default
of False, return raw values for each fold
as a Dataframe.
::
default = False
abs : bool, optional
If the feature importances
should be absolute values
or not.
::
default = False
Returns
--------
fis : pandas DataFrame or Series
Assuming mean=False, the
a pandas DataFrame where each row contains the
feature importances from an evaluation fold | |
-0.00461 0.00928 2005.001413
IGS_7587 4117361.7199 2517077.0399 4157679.2708 -0.01531 0.01213 0.01113 2005.001413
IGS_7589 4121934.2496 2652189.6210 4069034.4082 0.00106 -0.01590 -0.04446 2005.001413
IGS_7593 4075572.5569 931755.2587 4801584.4442 -0.01601 0.01710 0.01008 2005.001413
IGS_7596 4075582.2884 931837.4121 4801560.1731 -0.01545 0.01724 0.01124 2005.001413
IGS_7597 4075601.7671 931826.6851 4801547.8909 -0.01545 0.01724 0.01124 2005.001413
IGS_7601 2890652.6054 1310295.4335 5513958.7852 -0.01672 0.01297 0.00834 2005.001413
IGS_7602 2102904.1676 721602.4399 5958201.3004 -0.00004 0.00017 0.00016 2005.001413
IGS_7607 2988029.1092 655957.1504 5578669.3234 -0.00773 0.00954 0.02090 2005.001413
IGS_7610 -1995678.7334 -5037317.6994 3357328.0754 -0.01376 0.00059 -0.00615 2005.001413
IGS_7611 -1449752.4708 -4975298.5758 3709123.8837 -0.01422 0.00055 -0.00480 2005.001413
IGS_7612 -130872.3768 -4762317.1010 4226851.0193 -0.01536 0.00105 -0.00190 2005.001413
IGS_7613 -1324009.2265 -5332181.9551 3231962.4314 -0.01266 0.00034 -0.00468 2005.001413
IGS_7614 -2112065.0886 -3705356.5037 4726813.7331 -0.01462 0.00049 -0.00728 2005.001413
IGS_7615 2607848.5782 -5488069.6101 1932739.6353 0.00749 0.00917 0.01238 2005.001413
IGS_7616 -2409150.2559 -4478573.1681 3838617.3669 -0.01846 0.00656 -0.00333 2005.001413
IGS_7617 -5464075.0682 -2495248.6083 2148297.1036 -0.01383 0.06304 0.03213 2005.001413
IGS_7618 1446374.9893 -4447939.6608 4322306.1498 -0.01552 -0.00155 0.00422 2005.001413
IGS_7625 2612545.5535 -3426878.7655 4686756.1541 -0.01684 -0.00095 0.00858 2005.001413
IGS_7626 1091444.5019 -4351283.4306 4518706.8719 -0.03275 0.02706 -0.05182 2005.001413
IGS_7640 1492054.2400 -4887961.0012 -3803541.3216 0.03538 -0.00222 0.01694 2005.001413
IGS_7805 2892595.2576 1311807.9249 5512610.9772 -0.01569 0.01644 0.01398 2005.001413
IGS_7806 2892607.0051 1311813.1922 5512598.7530 -0.01569 0.01644 0.01398 2005.001413
IGS_7810 4331283.5546 567549.8898 4633140.3520 -0.01360 0.01811 0.01226 2005.001413
IGS_7811 3738332.6866 1148246.6146 5021816.0978 -0.01729 0.01564 0.00884 2005.001413
IGS_7820 -1281275.9698 5640727.3413 2682925.7547 -0.03052 -0.00683 -0.00820 2005.001413
IGS_7821 -2830744.4452 4676580.2926 3275072.8432 -0.03004 -0.01174 -0.01192 2005.001413
IGS_7823 5105602.9864 -555206.7057 3769654.5928 -0.00816 0.01830 0.01385 2005.001413
IGS_7824 5105473.8964 -555110.5725 3769892.9075 -0.00816 0.01830 0.01385 2005.001413
IGS_7825 -4467064.5967 2683034.8816 -3667007.5472 -0.03765 0.00155 0.04513 2005.001413
IGS_7830 4744552.5948 2119414.5089 3686245.0817 0.01663 -0.03007 -0.05102 2005.001413
IGS_7831 4728283.2206 2879670.5403 3156894.8555 -0.01893 0.01507 0.01890 2005.001413
IGS_7832 3992100.7724 4192172.6067 2670410.9214 -0.03110 0.01351 0.02650 2005.001413
IGS_7833 3899223.9988 396743.1512 5015074.0514 -0.01364 0.01722 0.00662 2005.001413
IGS_7834 4075529.7639 931781.5560 4801618.4101 -0.01545 0.01724 0.01124 2005.001413
IGS_7835 4581691.5261 556159.6912 4389359.5840 -0.01424 0.01877 0.01155 2005.001413
IGS_7836 3800639.5467 881982.1703 5028831.7612 -0.01620 0.01588 0.00911 2005.001413
IGS_7837 -2831088.3229 4676203.2356 3275172.7245 -0.03004 -0.01174 -0.01192 2005.001413
IGS_7838 -3822388.3172 3699363.6038 3507573.0809 0.00155 0.00907 -0.00049 2005.001413
IGS_7839 4194426.3766 1162694.1763 4647246.7304 -0.01624 0.01803 0.01120 2005.001413
IGS_7840 4033463.6085 23662.6167 4924305.2504 -0.01286 0.01709 0.01004 2005.001413
IGS_7841 3800432.1796 881692.0943 5029030.1272 -0.01620 0.01588 0.00911 2005.001413
IGS_7843 -4446477.3634 2678127.0091 -3696250.8619 -0.03836 0.00183 0.04528 2005.001413
IGS_7845 4581692.0596 556196.1763 4389355.1539 -0.01424 0.01877 0.01155 2005.001413
IGS_7848 4696991.9184 724001.6672 4239671.6437 -0.01473 0.01989 0.00989 2005.001413
IGS_7849 -4467063.9374 2683034.4837 -3667006.9954 -0.03765 0.00155 0.04513 2005.001413
IGS_7850 -1330008.2992 -5328391.5857 3236502.6770 -0.01282 0.00042 -0.00511 2005.001413
IGS_7853 -2410421.3385 -4477799.9973 3838689.9647 -0.01056 0.02672 -0.02316 2005.001413
IGS_7882 -1997242.5189 -5528040.8820 2468355.5773 -0.04565 0.02416 0.01735 2005.001413
IGS_7883 -2406127.3196 -4898367.9360 3290336.8951 -0.03242 0.02835 0.01661 2005.001413
IGS_7884 -1483442.9207 -5019625.6689 3635692.0507 -0.00961 -0.00290 0.00424 2005.001413
IGS_7886 -2517242.6470 -4198550.8236 4076570.2745 -0.01894 0.00789 -0.00444 2005.001413
IGS_7894 -2196778.0237 -4887336.9234 3448425.0419 -0.01064 0.00733 -0.01144 2005.001413
IGS_7907 1942792.2000 -5804077.6218 -1796918.9828 0.01235 0.00154 0.01513 2005.001413
IGS_7918 1130705.0936 -4831345.4414 3994116.0196 -0.01502 0.00001 0.00239 2005.001413
IGS_7920 1130741.6945 -4831369.7892 3994076.1487 -0.01502 0.00001 0.00239 2005.001413
IGS_7939 4641964.7397 1393070.2557 4133262.5104 -0.01814 0.01930 0.01471 2005.001413
IGS_7941 4641978.7136 1393067.6300 4133249.5497 -0.01814 0.01930 0.01471 2005.001413
IGS_8833 3899237.5996 396769.4526 5015055.3713 -0.01364 0.01722 0.00662 2005.001413
IGS_8834 4075576.7367 931785.5955 4801583.6546 -0.01545 0.01724 0.01124 2005.001413
IGS_ABER 3466272.2549 -125904.2271 5334662.4377 -0.01204 0.01533 0.00958 2005.001413
IGS_ACOR 4594489.7026 -678367.7714 4357066.1329 -0.00966 0.02294 0.01021 2005.001413
IGS_ADE1 -3939182.2340 3467075.3273 -3613220.5599 -0.04085 0.00298 0.04851 2005.001413
IGS_ADEA -1941059.6357 1628659.2017 -5833613.5303 0.00205 -0.01453 -0.00551 2005.001413
IGS_ADEB -1940878.3959 1628472.9398 -5833723.0411 0.00205 -0.01453 -0.00551 2005.001413
IGS_ADFB -1940878.5146 1628473.0411 -5833723.4131 0.00205 -0.01453 -0.00551 2005.001413
IGS_AIRA -3530185.4909 4118797.3322 3344036.9631 -0.02581 -0.00734 -0.01507 2005.001413
IGS_AIS1 -2430153.9000 -2737192.8976 5205816.5761 -0.01568 0.00140 -0.00554 2005.001413
IGS_AJAC 4696989.4355 723994.4687 4239678.5450 -0.01355 0.01922 0.01206 2005.001413
IGS_ALAC 5009051.1837 -42072.1978 3935057.7399 -0.00966 0.01995 0.01344 2005.001413
IGS_ALBH -2341332.9521 -3539049.5103 4745791.3241 -0.01016 -0.00077 -0.00477 2005.001413
IGS_ALGO 918129.3737 -4346071.2634 4561977.8573 -0.01578 -0.00407 0.00425 2005.001413
IGS_ALIC -4052052.1479 4212836.0682 -2545105.4002 -0.03955 -0.00505 0.05410 2005.001413
IGS_ALME 5105220.0929 -219278.5211 3804387.1303 -0.00787 0.01914 0.01339 2005.001413
IGS_ALRT 388042.6964 -740382.3865 6302001.8733 -0.02089 -0.00541 0.00612 2005.001413
IGS_AMC2 -1248596.1924 -4819428.2109 3976505.9974 -0.01463 0.00089 -0.00492 2005.001413
IGS_AMMN 4386124.4079 3172638.1720 3363685.3009 -0.02339 0.01115 0.01324 2005.001413
IGS_AMSA 1086061.5370 4927963.0110 -3887828.3038 -0.01178 0.00309 -0.00200 2005.001413
IGS_AMTB 1086062.9502 4927948.5183 -3887839.9941 -0.01178 0.00309 -0.00200 2005.001413
IGS_AMUB 1086062.9809 4927948.6300 -3887840.0807 -0.01178 0.00309 -0.00200 2005.001413
IGS_ANG1 -532087.0274 -5541064.6964 3103079.2151 -0.01337 -0.00477 -0.00022 2005.001413
IGS_ANKR 4121948.5245 2652187.9019 4069023.7558 -0.00779 -0.00365 0.00853 2005.001413
IGS_ANTC 1608539.5685 -4816369.7137 -3847798.5309 0.01731 -0.00182 0.00766 2005.001413
IGS_AOA1 -2547880.1401 -4628773.6307 3561050.0369 -0.02989 0.02874 0.01609 2005.001413
IGS_AOML 982296.7231 -5664607.2192 2752614.4985 -0.00992 -0.00076 0.00240 2005.001413
IGS_AREA 1942796.8503 -5804077.7081 -1796918.9739 0.01195 0.00522 0.01498 2005.001413
IGS_AREB 1942803.3077 -5804071.4346 -1796922.1392 -0.00309 -0.00645 0.00273 2005.001413
IGS_AREQ 1942826.1976 -5804070.3145 -1796894.2632 0.00273 -0.00528 0.00752 2005.001413
IGS_ARFB 1942803.3135 -5804071.5590 -1796922.1782 -0.00309 -0.00645 0.00273 2005.001413
IGS_ARMA 5991269.2470 773728.7441 2040688.6194 -0.01032 0.02283 0.01434 2005.001413
IGS_ARP3 -693606.0915 -5601311.8087 2960668.9382 -0.01393 0.00094 -0.00348 2005.001413
IGS_ARTU 1843956.7195 3016203.1120 5291261.7348 -0.02378 0.00905 0.00419 2005.001413
IGS_ASC1 6118526.0518 -1572344.7357 -876451.0672 -0.00016 -0.00572 0.01101 2005.001413
IGS_ASDB 6121161.5248 -1563943.2302 -872612.9934 -0.00324 -0.00896 0.01114 2005.001413
IGS_ASHV 673609.6084 -5148653.6307 3692529.5326 -0.01403 -0.00068 0.00168 2005.001413
IGS_ASPA -6100260.0508 -996503.4496 -1567977.8522 -0.01830 0.06140 0.03307 2005.001413
IGS_ATWC -2610866.6449 -1560571.0318 5587439.1144 -0.03826 -0.00884 -0.00434 2005.001413
IGS_AUCK -5105681.1754 461564.0313 -3782181.4818 -0.02347 -0.00241 0.03247 2005.001413
IGS_AUS5 -743774.1126 -5460642.4731 3200346.5017 -0.01098 -0.00034 -0.00324 2005.001413
IGS_AUTF 1360918.8666 -3420457.9228 -5191175.2167 0.01240 -0.00692 0.00512 2005.001413
IGS_AZCN -1572187.7222 -4864404.5704 3804295.0969 -0.01460 0.00130 -0.00488 2005.001413
IGS_AZCO -1857944.1867 -5124370.4076 3303748.6075 -0.01342 0.00141 -0.00631 2005.001413
IGS_AZRY -2385740.3276 -4758052.1529 3504739.0097 -0.02563 0.01924 0.01126 2005.001413
IGS_AZU1 -2472979.3783 -4671338.0392 3558107.7694 -0.02847 0.02399 0.01042 2005.001413
IGS_BADA -838277.9032 3865777.0126 4987626.6231 -0.02623 -0.00007 -0.00376 2005.001413
IGS_BADB -838277.9353 3865777.1486 4987626.7915 -0.02623 -0.00007 -0.00376 2005.001413
IGS_BAKE -289833.9880 -2756501.0592 5725162.2463 -0.01971 -0.00659 0.00827 2005.001413
IGS_BAKO -1836969.1285 6065617.0858 -716257.8103 -0.02423 -0.00963 -0.00896 2005.001413
IGS_BAN2 1344087.6387 6068610.2625 1429291.9682 -0.04242 0.00193 0.03477 2005.001413
IGS_BAR1 -2584162.9671 -4656252.9067 3498534.1877 -0.02966 0.03147 0.01853 2005.001413
IGS_BARB 3143384.5789 -5359714.5408 1434871.6304 0.00997 0.01064 0.01556 2005.001413
IGS_BARH 1693644.8423 -4239067.5648 4439567.2363 -0.01587 -0.00155 0.00513 2005.001413
IGS_BAY1 -3484297.2468 -1084760.1804 5213545.6606 -0.02051 0.00323 -0.01293 2005.001413
IGS_BAY2 -3484276.9548 -1084780.1983 5213554.9591 -0.02051 0.00323 -0.01293 2005.001413
IGS_BAYR 493530.0630 -4611778.1461 4363728.8723 -0.01588 -0.00017 -0.00017 2005.001413
IGS_BBRY -2386883.9012 -4708014.0941 3571866.7132 -0.02015 0.01237 0.00563 2005.001413
IGS_BDOS 3143382.2377 -5359714.8307 1434875.7965 0.00257 0.01197 0.01230 2005.001413
IGS_BELB 1106058.0147 -763816.8221 -6214233.6214 0.01031 -0.00413 0.00711 2005.001413
IGS_BELL 4775849.3971 116814.3676 4213018.9695 -0.01017 0.01883 0.01266 2005.001413
IGS_BEMB 1106046.6270 -763739.0101 -6214243.1954 0.01031 -0.00413 0.00711 2005.001413
IGS_BHR2 3633908.7964 4425275.5432 2799861.4941 -0.03243 0.00957 0.02698 2005.001413
IGS_BILI -2321893.0738 560096.8479 5894691.7961 -0.02083 -0.00394 -0.00763 2005.001413
IGS_BINT -2495021.4454 5858781.5601 360450.6814 -0.01992 -0.02370 -0.01101 2005.001413
IGS_BISH 1243767.1847 4513678.5168 4317880.3400 -0.02733 0.00406 0.00236 2005.001413
IGS_BJFS -2148744.0828 4426641.2726 4044655.9269 -0.03124 -0.00577 -0.00668 2005.001413
IGS_BLYT -2223206.7008 -4830299.7954 3510587.6146 -0.01518 0.00085 -0.00568 2005.001413
IGS_BOGO 3633738.9717 1397434.1328 5035353.4756 -0.01794 0.01505 0.00886 2005.001413
IGS_BOGT 1744399.0263 -6116037.5015 512731.7235 -0.01288 0.04448 0.01236 2005.001413
IGS_BOR1 3738358.4536 1148173.7096 5021815.7731 -0.01729 0.01564 0.00884 2005.001413
IGS_BORK 3770134.5093 446054.4631 5108091.1168 -0.01469 0.01636 0.00927 2005.001413
IGS_BRAZ 4115014.0741 -4550641.5592 -1741443.9512 -0.00062 -0.00493 0.01213 2005.001413
IGS_BREW -2112007.1449 -3705351.8251 4726827.1470 -0.01480 0.00004 -0.00702 2005.001413
IGS_BRFT 4985393.5283 -3954993.4124 -428426.7042 -0.00208 -0.00432 0.01238 2005.001413
IGS_BRMU 2304703.4755 -4874817.1769 3395186.9504 -0.01311 -0.00015 0.00671 2005.001413
IGS_BRST 4231162.5758 -332746.6779 4745130.9215 -0.01147 0.01718 0.01149 2005.001413
IGS_BRUS 4027893.7466 307045.8242 4919475.1207 -0.01339 0.01654 0.01058 2005.001413
IGS_BSHM 4395951.4857 3080707.0613 3433498.0327 -0.02045 0.01303 0.01781 2005.001413
IGS_BUCU 4093760.8647 2007793.8087 4445129.9745 -0.01630 0.01788 0.01073 2005.001413
IGS_BUDP 3513638.2527 778956.3979 5248216.4348 -0.01579 0.01490 0.00886 2005.001413
IGS_BUE2 2745499.0525 -4483636.5434 -3599054.4917 0.00447 -0.00794 0.00850 2005.001413
IGS_BUR1 -3989419.9352 2699532.9437 -4166619.5817 -0.03933 0.00785 0.04377 2005.001413
IGS_BZRG 4312657.4848 864634.6600 4603844.4431 -0.01530 0.01719 0.01196 2005.001413
IGS_CABL -2657528.4528 -3857584.2828 4314193.2684 -0.00544 0.00386 0.00204 2005.001413
IGS_CACB 4163493.9878 -4163782.1607 -2444561.4834 0.00385 -0.00313 0.01072 2005.001413
IGS_CADB 4163471.6589 -4163826.2229 -2444514.5621 0.00385 -0.00313 0.01072 2005.001413
IGS_CAGL 4893378.8188 772649.7894 4004182.1640 -0.01331 0.01972 0.01248 2005.001413
IGS_CAGS 1096349.0547 -4335060.6079 4533255.2024 -0.01543 -0.00425 0.00474 2005.001413
IGS_CAGZ 4893379.9719 772650.4790 4004180.0315 -0.01331 0.01972 0.01248 2005.001413
IGS_CANT 4625924.4776 -307096.5157 4365771.3993 -0.01117 0.01950 0.01118 2005.001413
IGS_CARR -2620447.3342 -4460941.4361 3718442.8692 -0.02384 0.02436 0.01481 2005.001413
IGS_CART 1567348.6000 -6075293.5195 1142850.8107 0.01223 0.00272 0.01143 2005.001413
IGS_CAS1 -901776.1526 2409383.3443 -5816748.4473 0.00141 -0.00872 -0.00512 2005.001413
IGS_CASA -2444430.4525 -4428687.6971 3875747.3738 -0.02146 0.01199 -0.00633 2005.001413
IGS_CASC 4917536.9440 -815726.2047 3965857.3855 -0.00746 0.01932 0.01312 2005.001413
IGS_CAT1 -2540622.1870 -4682555.3594 3495319.3517 -0.02956 0.02995 0.01770 2005.001413
IGS_CAT2 -2532493.6908 -4696708.6321 3483154.2523 -0.02947 0.03014 0.01744 2005.001413
IGS_CCJM -4488925.7355 3483903.1016 2887743.2946 0.02786 0.02484 0.01021 2005.001413
IGS_CCV3 921807.1597 -5535343.0403 3021430.7671 -0.01256 0.00161 0.00107 2005.001413
IGS_CEDU -3753472.5671 3912741.0022 -3347960.4514 -0.04192 0.00164 0.05033 2005.001413
IGS_CEUT 5150601.7917 -478834.3851 3718884.8641 -0.00894 0.01650 0.01361 2005.001413
IGS_CFAG 2016584.8721 -5050165.6353 -3323308.7566 0.00844 -0.00326 0.01035 2005.001413
IGS_CHAB -4590646.4220 -275436.3693 -4404628.6333 -0.02728 0.03855 0.02442 2005.001413
IGS_CHA1 946821.6679 -5284901.4924 3431363.1160 -0.01344 0.00230 0.00027 2005.001413
IGS_CHAN -2674427.2309 3757143.2183 4391521.6710 -0.02518 -0.00889 -0.00920 2005.001413
IGS_CHAT -4590671.1046 -275482.6785 -4404596.5729 -0.02505 0.03911 0.02413 2005.001413
IGS_CHB1 430716.8047 -4445226.4500 4538513.9775 -0.01634 -0.00271 0.00097 2005.001413
IGS_CHIL -2478003.2479 -4655349.0867 3577932.2672 -0.02708 0.02239 0.00883 2005.001413
IGS_CHIZ 4427603.2438 -31506.0447 4575621.8054 -0.01122 0.01875 0.01175 2005.001413
IGS_CHL1 1281265.6008 -4811189.0044 3973020.9143 -0.01441 -0.00091 0.00330 2005.001413
IGS_CHPI 4164613.8774 -4162456.8749 -2445028.7977 0.00159 -0.00674 0.01122 2005.001413
IGS_CHUM 1228950.7908 4508079.9105 4327868.5024 -0.02622 0.00631 0.00291 2005.001413
IGS_CHUR -236438.8574 -3307616.8350 5430049.2292 -0.01899 -0.00785 0.00727 2005.001413
IGS_CIBB -1836963.7769 6065626.6002 -716217.3088 -0.02321 -0.01111 -0.00624 2005.001413
IGS_CIC1 -2433177.0914 -4845044.8802 3348295.8661 -0.03141 0.02826 0.01703 2005.001413
IGS_CICB -1836963.7695 6065626.6388 -716217.2972 -0.02321 -0.01111 -0.00624 2005.001413
IGS_CICE -2433307.7131 -4844962.9995 3348363.3992 -0.03141 0.02826 0.01703 2005.001413
IGS_CIT1 -2491490.1573 -4660803.2972 3559128.9563 -0.02922 0.02395 0.01181 2005.001413
IGS_CKIS -5583182.1569 -2054143.5463 -2292166.7073 -0.03396 0.05384 0.03256 2005.001413
IGS_CLAR -2458218.3776 -4680467.5195 3556758.4679 -0.02776 0.02240 0.00919 2005.001413
IGS_CMP9 -2508506.0538 -4637174.8685 3579499.9044 -0.02818 0.02656 0.00926 2005.001413
IGS_CNMR -5087757.9005 3465028.8610 1664653.6928 0.01269 0.01394 0.01248 2005.001413
IGS_COCO -741950.4013 6190961.6417 -1337768.1683 -0.04672 0.00585 0.04957 2005.001413
IGS_COLA 1113278.3007 6233646.2911 760277.0232 -0.04719 0.00557 0.03689 2005.001413
IGS_CONZ 1492007.5807 -4887910.7095 -3803639.9247 | |
columns [b * k : b * k + b],
# where b is the dimension of box representation (4 or 5)
# Note that compared to Detectron1,
# we do not perform bounding box regression for background
# classes.
gt_class_cols = box_dim * fg_gt_classes[:, None] + torch.arange(
box_dim, device=device
)
gt_covar_class_cols = self.bbox_cov_dims * fg_gt_classes[
:, None
] + torch.arange(self.bbox_cov_dims, device=device)
loss_reg_normalizer = gt_classes.numel()
pred_proposal_deltas = pred_proposal_deltas[fg_inds[:, None], gt_class_cols]
gt_proposals_delta = gt_proposal_deltas[fg_inds]
if self.compute_bbox_cov:
pred_proposal_covs = pred_proposal_covs[
fg_inds[:, None], gt_covar_class_cols
]
pred_proposal_covs = clamp_log_variance(pred_proposal_covs)
if self.bbox_cov_loss == "negative_log_likelihood":
if self.bbox_cov_type == "diagonal":
# Ger foreground proposals.
_proposals_boxes = proposals_boxes.tensor[fg_inds]
# Compute regression negative log likelihood loss according to:
# "What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision?", NIPS 2017
loss_box_reg = (
0.5
* torch.exp(-pred_proposal_covs)
* smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
beta=self.smooth_l1_beta,
)
)
loss_covariance_regularize = 0.5 * pred_proposal_covs
loss_box_reg += loss_covariance_regularize
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
else:
# Multivariate Gaussian Negative Log Likelihood loss using pytorch
# distributions.multivariate_normal.log_prob()
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky
)
)
loss_box_reg = -multivariate_normal_dists.log_prob(
gt_proposals_delta
)
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
elif self.bbox_cov_loss == "second_moment_matching":
# Compute regression covariance using second moment
# matching.
loss_box_reg = smooth_l1_loss(
pred_proposal_deltas, gt_proposals_delta, self.smooth_l1_beta
)
errors = pred_proposal_deltas - gt_proposals_delta
if self.bbox_cov_type == "diagonal":
# Handel diagonal case
second_moment_matching_term = smooth_l1_loss(
torch.exp(pred_proposal_covs),
errors ** 2,
beta=self.smooth_l1_beta,
)
loss_box_reg += second_moment_matching_term
loss_box_reg = torch.sum(loss_box_reg) / loss_reg_normalizer
else:
# Handel full covariance case
errors = torch.unsqueeze(errors, 2)
gt_error_covar = torch.matmul(
errors, torch.transpose(errors, 2, 1)
)
# This is the cholesky decomposition of the covariance matrix.
# We reconstruct it from 10 estimated parameters as a
# lower triangular matrix.
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
predicted_covar = torch.matmul(
forecaster_cholesky,
torch.transpose(forecaster_cholesky, 2, 1),
)
second_moment_matching_term = smooth_l1_loss(
predicted_covar,
gt_error_covar,
beta=self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = (
torch.sum(loss_box_reg) + second_moment_matching_term
) / loss_reg_normalizer
elif self.bbox_cov_loss == "energy_loss":
forecaster_cholesky = covariance_output_to_cholesky(
pred_proposal_covs
)
# Define per-anchor Distributions
multivariate_normal_dists = (
distributions.multivariate_normal.MultivariateNormal(
pred_proposal_deltas, scale_tril=forecaster_cholesky
)
)
# Define Monte-Carlo Samples
distributions_samples = multivariate_normal_dists.rsample(
(self.bbox_cov_num_samples + 1,)
)
distributions_samples_1 = distributions_samples[
0 : self.bbox_cov_num_samples, :, :
]
distributions_samples_2 = distributions_samples[
1 : self.bbox_cov_num_samples + 1, :, :
]
# Compute energy score
loss_covariance_regularize = (
-smooth_l1_loss(
distributions_samples_1,
distributions_samples_2,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # Second term
gt_proposals_delta_samples = torch.repeat_interleave(
gt_proposals_delta.unsqueeze(0),
self.bbox_cov_num_samples,
dim=0,
)
loss_first_moment_match = (
2.0
* smooth_l1_loss(
distributions_samples_1,
gt_proposals_delta_samples,
beta=self.smooth_l1_beta,
reduction="sum",
)
/ self.bbox_cov_num_samples
) # First term
# Final Loss
loss_box_reg = (
loss_first_moment_match + loss_covariance_regularize
) / loss_reg_normalizer
elif self.bbox_cov_loss == "pmb_negative_log_likelihood":
losses = self.nll_od_loss_with_nms(
predictions, proposals, gt_instances
)
loss_box_reg = losses["loss_box_reg"]
use_nll_loss = True
else:
raise ValueError(
"Invalid regression loss name {}.".format(self.bbox_cov_loss)
)
# Perform loss annealing. Not really essential in Generalized-RCNN case, but good practice for more
# elaborate regression variance losses.
standard_regression_loss = smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",
)
standard_regression_loss = (
standard_regression_loss / loss_reg_normalizer
)
probabilistic_loss_weight = get_probabilistic_loss_weight(
current_step, self.annealing_step
)
loss_box_reg = (
(1.0 - probabilistic_loss_weight) * standard_regression_loss
+ probabilistic_loss_weight * loss_box_reg
)
if use_nll_loss:
loss_cls = (1.0 - probabilistic_loss_weight) * loss_cls
else:
loss_box_reg = smooth_l1_loss(
pred_proposal_deltas,
gt_proposals_delta,
self.smooth_l1_beta,
reduction="sum",
)
loss_box_reg = loss_box_reg / loss_reg_normalizer
if use_nll_loss:
losses["loss_cls"] = loss_cls
losses["loss_box_reg"] = loss_box_reg
else:
losses = {"loss_cls": loss_cls, "loss_box_reg": loss_box_reg}
return losses
def nll_od_loss_with_nms(
self,
predictions: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
proposals: List[Instances],
gt_instances,
):
self.ppp_intensity_function.update_distribution()
_, pred_deltas, _, pred_covs = predictions
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
scores = [score.clamp(1e-6, 1 - 1e-6) for score in scores]
image_shapes = [x.image_size for x in proposals]
# Apply NMS without score threshold
instances, kept_idx = fast_rcnn_inference(
boxes,
scores,
image_shapes,
0.0,
self.test_nms_thresh,
self.test_topk_per_image,
)
num_prop_per_image = [len(p) for p in proposals]
pred_covs = pred_covs.split(num_prop_per_image)
pred_deltas = pred_deltas.split(num_prop_per_image)
nll_gt_classes = [instances.gt_classes for instances in gt_instances]
gt_boxes = [instances.gt_boxes.tensor for instances in gt_instances]
kept_proposals = [
prop.proposal_boxes.tensor[idx] for prop, idx in zip(proposals, kept_idx)
]
gt_box_deltas = []
for i in range(len(gt_boxes)):
# Get delta between each GT and proposal, batch-wise
tmp = torch.stack(
[
self.box2box_transform.get_deltas(
kept_proposals[i],
gt_boxes[i][j].unsqueeze(0).repeat(len(kept_proposals[i]), 1),
)
for j in range(len(gt_boxes[i]))
]
)
gt_box_deltas.append(
tmp.permute(1, 0, 2)
) # [gt,pred,boxdim] -> [pred, gt, boxdim]
_, num_classes = scores[0].shape
num_classes -= 1 # do not count background class
pred_covs = [pred_cov[kept] for pred_cov, kept in zip(pred_covs, kept_idx)]
nll_pred_cov = [
covariance_output_to_cholesky(reshape_box_preds(cov, num_classes))
for cov in pred_covs
]
nll_scores = [score[kept] for score, kept in zip(scores, kept_idx)]
nll_pred_deltas = [
reshape_box_preds(delta[kept], num_classes)
for delta, kept in zip(pred_deltas, kept_idx)
]
if self.bbox_cov_dist_type == "gaussian":
regression_dist = (
lambda x, y: distributions.multivariate_normal.MultivariateNormal(
loc=x, scale_tril=y
)
)
elif self.bbox_cov_dist_type == "laplacian":
regression_dist = lambda x, y: distributions.laplace.Laplace(
loc=x, scale=y.diagonal(dim1=-2, dim2=-1) / np.sqrt(2)
)
else:
raise Exception(
f"Bounding box uncertainty distribution {self.bbox_cov_dist_type} is not available."
)
nll, associations, decompositions = negative_log_likelihood(
nll_scores,
nll_pred_deltas,
nll_pred_cov,
gt_boxes,
nll_gt_classes,
image_shapes,
regression_dist,
self.ppp_intensity_function,
self.nll_max_num_solutions,
scores_have_bg_cls=True,
target_deltas=gt_box_deltas,
matching_distance=self.matching_distance,
)
# Save some stats
storage = get_event_storage()
num_classes = self.num_classes
mean_variance = np.mean(
[
cov.unsqueeze(1)
.reshape(cov.shape[0], num_classes, -1)[:, :, :4]
.pow(2)
.mean()
.item()
for cov in nll_pred_cov
if cov.shape[0] > 0
]
)
storage.put_scalar("nll/mean_covariance", mean_variance)
ppp_intens = (
self.ppp_intensity_function.integrate(
torch.as_tensor(image_shapes).to(device), num_classes
)
.mean()
.item()
)
storage.put_scalar("nll/ppp_intensity", ppp_intens)
reg_loss = np.mean(
[
np.clip(
decomp["matched_bernoulli_reg"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_match = np.mean(
[
np.clip(
decomp["matched_bernoulli_cls"][0]
/ (decomp["num_matched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
cls_loss_no_match = np.mean(
[
np.clip(
decomp["unmatched_bernoulli"][0]
/ (decomp["num_unmatched_bernoulli"][0] + 1e-6),
-1e25,
1e25,
)
for decomp in decompositions
]
)
# Collect all losses
losses = dict()
losses["loss_box_reg"] = nll
# Add losses for logging, these do not propagate gradients
losses["loss_regression"] = torch.tensor(reg_loss).to(nll.device)
losses["loss_cls_matched"] = torch.tensor(cls_loss_match).to(nll.device)
losses["loss_cls_unmatched"] = torch.tensor(cls_loss_no_match).to(nll.device)
return losses
def inference(self, predictions, proposals):
"""
Returns:
list[Instances]: same as `fast_rcnn_inference`.
list[Tensor]: same as `fast_rcnn_inference`.
"""
boxes = self.predict_boxes(predictions, proposals)
scores = self.predict_probs(predictions, proposals)
image_shapes = [x.image_size for x in proposals]
return fast_rcnn_inference(
boxes,
scores,
image_shapes,
self.test_score_thresh,
self.test_nms_thresh,
self.test_topk_per_image,
)
def predict_boxes_for_gt_classes(self, predictions, proposals):
"""
Returns:
list[Tensor]: A list of Tensors of predicted boxes for GT classes in case of
class-specific box head. Element i of the list has shape (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
scores, proposal_deltas = predictions
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
N, B = proposal_boxes.shape
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
K = predict_boxes.shape[1] // B
if K > 1:
gt_classes = torch.cat([p.gt_classes for p in proposals], dim=0)
# Some proposals are ignored or have a background class. Their gt_classes
# cannot be used as index.
gt_classes = gt_classes.clamp_(0, K - 1)
predict_boxes = predict_boxes.view(N, K, B)[
torch.arange(N, dtype=torch.long, device=predict_boxes.device),
gt_classes,
]
num_prop_per_image = [len(p) for p in proposals]
return predict_boxes.split(num_prop_per_image)
def predict_boxes(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions. The ``proposal_boxes`` field is expected.
Returns:
list[Tensor]: A list of Tensors of predicted class-specific or class-agnostic boxes
for each image. Element i has shape (Ri, K * B) or (Ri, B), where Ri is
the number of predicted objects for image i and B is the box dimension (4 or 5)
"""
if not len(proposals):
return []
_, proposal_deltas, _, _ = predictions
num_prop_per_image = [len(p) for p in proposals]
proposal_boxes = [p.proposal_boxes for p in proposals]
proposal_boxes = proposal_boxes[0].cat(proposal_boxes).tensor
predict_boxes = self.box2box_transform.apply_deltas(
proposal_deltas, proposal_boxes
) # Nx(KxB)
return predict_boxes.split(num_prop_per_image)
def predict_probs(self, predictions, proposals):
"""
Args:
predictions: return values of :meth:`forward()`.
proposals (list[Instances]): proposals that match the features that were
used to compute predictions.
Returns:
list[Tensor]: A list of Tensors of predicted class probabilities for each image.
Element i has shape (Ri, K + 1), where Ri is the number of predicted objects
for image i.
"""
scores, _, | |
<reponame>Ishma59/molo<gh_stars>0
from molo.core.tests.constants import TEST_IMAGE_HASH
AVAILABLE_ARTICLES = {
"meta": {
"total_count": 3
},
"items": [
{
"id": 10,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/10/",
},
"title": "Test article 1",
"subtitle": "Sub for test article 1",
"body": [
{
"type": "paragraph",
"value": "Lorem ipsum dolor sit amet"
},
{
"type": "list",
"value": [
"Phasellus cursus eros turpis, vitae consequat sem."
]
},
{
"type": "paragraph",
"value": "Lorem ipsum"
}
],
"tags": [],
"commenting_state": "O",
"commenting_open_time": "2016-11-11T06:00:00Z",
"commenting_close_time": "2016-11-14T06:00:00Z",
"social_media_title": "test image",
"social_media_description": "test image description",
"related_sections": [],
"featured_in_latest": False,
"featured_in_latest_start_date": "2016-11-11T06:00:00Z",
"featured_in_latest_end_date": "2016-11-12T06:00:00Z",
"featured_in_section": False,
"featured_in_section_start_date": "2016-11-11T06:00:00Z",
"featured_in_homepage": False,
"featured_in_homepage_start_date": "2016-11-11T06:00:00Z",
"featured_in_homepage_end_date": "2016-11-12T06:00:00Z",
"feature_as_hero_article": True,
"promote_date": "2016-11-11T06:00:00Z",
"demote_date": "2016-11-14T06:00:00Z",
"metadata_tags": [],
"latest_revision_created_at": "2016-10-07T12:04:27.316423Z",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost/api/v2/images/13/"
}
}
},
{
"id": 11,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/11/",
},
"title": "Test article 2",
"subtitle": "Sub for test article 2",
"body": [
{
"type": "paragraph",
"value": "Lorem ipsum dolor sit amet."
},
{
"type": "list",
"value": [
"Phasellus cursus eros turpis, vitae consequat sem "
"dapibus at. Sed fermentum mauris vitae fringilla "
"tristique. In hac habitasse platea dictumst."
]
},
{
"type": "paragraph",
"value": "Lorem ipsum"
}
],
"tags": [
"Another",
"Test"
],
"commenting_state": "O",
"commenting_open_time": "2016-12-23T06:00:00Z",
"commenting_close_time": "2016-12-26T06:00:00Z",
"social_media_title": "test image",
"social_media_description": "test image description",
"related_sections": [
{
"id": 1,
"meta": {
"type": "core.ArticlePageRelatedSections"
}
}
],
"featured_in_latest": False,
"featured_in_latest_start_date": "2016-12-23T06:00:00Z",
"featured_in_latest_end_date": "2016-12-24T06:00:00Z",
"featured_in_section": False,
"featured_in_section_start_date": "2016-12-23T06:00:00Z",
"featured_in_homepage": False,
"featured_in_homepage_start_date": "2016-12-20T06:00:00Z",
"featured_in_homepage_end_date": "2016-12-21T06:00:00Z",
"feature_as_hero_article": True,
"promote_date": "2016-12-23T06:00:00Z",
"demote_date": "2016-12-26T06:00:00Z",
"metadata_tags": [],
"latest_revision_created_at": "2016-11-09T10:17:45.352864Z",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost/api/v2/images/17/"
}
}
},
{
"id": 12,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/12/",
},
"title": "Test article 3",
"subtitle": "Sub for test article 3",
"body": [
{
"type": "paragraph",
"value": "Lorem ipsum dolor sit amet."
},
{
"type": "list",
"value": [
"Phasellus cursus eros turpis, vitae consequat sem "
"dapibus at. Sed fermentum mauris vitae fringilla "
"tristique. In hac habitasse platea dictumst."
]
},
{
"type": "paragraph",
"value": "Lorem ipsum"
}
],
"tags": [],
"commenting_state": "O",
"commenting_open_time": "2017-01-25T06:00:00Z",
"commenting_close_time": "2017-01-27T06:00:00Z",
"social_media_title": "test image",
"social_media_description": "test image description",
"related_sections": [],
"featured_in_latest": False,
"featured_in_latest_start_date": "2016-12-23T06:00:00Z",
"featured_in_latest_end_date": "2016-12-24T06:00:00Z",
"featured_in_section": False,
"featured_in_section_start_date": "2016-12-23T06:00:00Z",
"featured_in_homepage": False,
"featured_in_homepage_start_date": "2016-12-23T06:00:00Z",
"featured_in_homepage_end_date": "2016-12-26T06:00:00Z",
"feature_as_hero_article": True,
"promote_date": "2017-01-25T06:00:00Z",
"demote_date": "2017-01-27T06:00:00Z",
"metadata_tags": [],
"latest_revision_created_at": "2016-10-10T11:04:36.153490Z",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost/api/v2/images/60/"
}
}
},
]
}
RELATED_IMAGE = {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost/api/v2/images/1/",
"tags": []
},
"title": "Image",
"width": 480,
"height": 480,
"file": "http://localhost:8000/media/original_images/test.png"
}
AVAILABLE_SECTIONS = {
"meta": {
"total_count": 3
},
"items": [
{
"id": 2,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/28/",
"html_url": "http://localhost/sections/wellbeing/"
"taking-care-yourself/"
},
"title": "Taking care of yourself",
"description": "",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost:8000/api/v2/images/1/"
}
},
"extra_style_hints": "",
"commenting_state": "D",
"commenting_open_time": "2017-01-25T06:00:00Z",
"commenting_close_time": "2017-01-26T06:00:00Z",
"time": [],
"monday_rotation": False,
"tuesday_rotation": False,
"wednesday_rotation": False,
"thursday_rotation": False,
"friday_rotation": False,
"saturday_rotation": False,
"sunday_rotation": False,
"content_rotation_start_date": "2017-01-25T06:00:00Z",
"content_rotation_end_date": "2017-01-26T06:00:00Z",
"latest_revision_created_at": "2016-10-04T10:23:59.504526Z"
},
{
"id": 3,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/3/",
},
"title": "Stress management",
"description": "",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost:8000/api/v2/images/1/"
}
},
"extra_style_hints": "",
"commenting_state": "D",
"commenting_open_time": "2017-01-25T06:00:00Z",
"commenting_close_time": "2017-01-26T06:00:00Z",
"time": [],
"monday_rotation": False,
"tuesday_rotation": False,
"wednesday_rotation": False,
"thursday_rotation": False,
"friday_rotation": False,
"saturday_rotation": False,
"sunday_rotation": False,
"content_rotation_start_date": "2017-01-25T06:00:00Z",
"content_rotation_end_date": "2017-01-26T06:00:00Z",
"latest_revision_created_at": "2016-10-04T10:24:05.826271Z"
},
{
"id": 4,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/4/",
},
"title": "Breastfeeding",
"description": "",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost:8000/api/v2/images/1/"
}
},
"extra_style_hints": "",
"commenting_state": "D",
"commenting_open_time": "2017-01-25T06:00:00Z",
"commenting_close_time": "2017-01-26T06:00:00Z",
"time": [],
"monday_rotation": False,
"tuesday_rotation": False,
"wednesday_rotation": False,
"thursday_rotation": False,
"friday_rotation": False,
"saturday_rotation": False,
"sunday_rotation": False,
"content_rotation_start_date": "2017-01-25T06:00:00Z",
"content_rotation_end_date": "2017-01-26T06:00:00Z",
"latest_revision_created_at": "2016-10-04T10:24:21.246246Z"
},
]
}
AVAILABLE_SECTION_CHILDREN = {
"meta": {
"total_count": 2
},
"items": [
{
"id": 11,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/11/",
},
"title": "Test article 11"
},
{
"id": 3,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/3/",
},
"title": "Test section 3"
},
]
}
LANGUAGE_LIST_RESPONSE = {
"meta": {
"total_count": 2
},
"items": [
{
"id": 1,
"meta": {
"type": "core.SiteLanguage",
"detail_url": "http://localhost:8000/api/v2/languages/1/"
}
},
{
"id": 2,
"meta": {
"type": "core.SiteLanguage",
"detail_url": "http://localhost:8000/api/v2/languages/2/"
}
}
]
}
LANGUAGE_RESPONSE_1 = {
"id": 1,
"meta": {
"type": "core.SiteLanguage",
"detail_url": "http://localhost:8000/api/v2/languages/1/"
},
"locale": "en",
"is_main_language": True,
"is_active": True
}
LANGUAGE_RESPONSE_2 = {
"id": 2,
"meta": {
"type": "core.SiteLanguage",
"detail_url": "http://localhost:8000/api/v2/languages/2/"
},
"locale": "fr",
"is_main_language": False,
"is_active": True
}
ARTICLE_PAGE_RESPONSE = {
"id": 9999,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/12/",
"html_url": "http://localhost:8000/sections/test-section/article-1/",
"slug": "article-1",
"show_in_menus": False,
"seo_title": "",
"search_description": "",
"first_published_at": "2017-07-07T09:48:40.807381Z",
"parent": {
"id": 11,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/11/",
"html_url": "http://localhost:8000/sections/test-section/"
},
"title": "Test Section"
},
"children": None,
"translations": [
{
"locale": "fr",
"id": 13
}
],
"main_language_children": None
},
"title": "Article 1",
"subtitle": "Subtitle for article 1",
"body": [
{
"type": "paragraph",
"value": (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, "
"sed do eiusmod tempor incididunt ut labore et dolore magna"
" aliqua. Ut enim ad minim veniam, quis nostrud exercitation"
" ullamco laboris nisi ut aliquip ex ea commodo consequat."
" Duis aute irure dolor in reprehenderit in voluptate velit"
" esse cillum dolore eu fugiat Nonea pariatur. Excepteur sint"
" occaecat cupidatat non proident, sunt in culpa qui officia "
"deserunt mollit anim id est laborum.")
}
],
"tags": [
"tag1",
"tag2",
"tag3"
],
"commenting_state": "D",
"commenting_open_time": "2017-07-05T19:23:00Z",
"commenting_close_time": "2016-09-29T20:00:00Z",
"social_media_title": "Social Media",
"social_media_description": "social_media_description",
"social_media_image": {
"id": 2,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost:8000/api/v2/images/2/"
},
"title": "car"
},
"related_sections": [
{
"id": 1,
"meta": {
"type": "core.ArticlePageRelatedSections"
},
"section": {
"id": 23,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/23/"
},
"title": "Another Section"
}
},
{
"id": 2,
"meta": {
"type": "core.ArticlePageRelatedSections"
},
"section": {
"id": 26,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:8000/api/v2/pages/26/"
},
"title": "Sub Section Test"
}
}
],
"featured_in_latest": False,
"featured_in_latest_start_date": "2017-07-16T18:17:04.642291Z",
"featured_in_latest_end_date": "2017-07-16T18:17:04.642291Z",
"featured_in_section": False,
"featured_in_section_start_date": "2017-07-16T18:17:04.642291Z",
"featured_in_section_end_date": "2017-07-16T18:17:04.642291Z",
"featured_in_homepage": True,
"featured_in_homepage_start_date": "2017-07-16T18:17:04.642291Z",
"featured_in_homepage_end_date": "2017-07-16T18:17:04.642291Z",
"feature_as_hero_article": False,
"promote_date": "2017-07-16T18:17:04.642291Z",
"demote_date": "2018-07-16T18:17:04.642291Z",
"metadata_tags": [
"metadata_tag1",
"metadata_tag2"
],
"latest_revision_created_at": "2017-07-16T18:17:04.642291Z",
"image": {
"id": 1,
"meta": {
"type": "wagtailimages.Image",
"detail_url": "http://localhost:8000/api/v2/images/1/"
},
"title": "Mountain"
},
"reaction_questions": [
{
"id": 1,
"meta": {
"type": "core.ArticlePageReactionQuestions"
},
"reaction_question": {
"id": 37,
"meta": {
"type": "core.ReactionQuestion",
"detail_url": "http://localhost:8000/api/v2/pages/37/"
},
"title": "How does this make you feel?"
}
},
{
"id": 2,
"meta": {
"type": "core.ArticlePageReactionQuestions"
},
"reaction_question": {
"id": 38,
"meta": {
"type": "core.ReactionQuestion",
"detail_url": "http://localhost:8000/api/v2/pages/38/"
},
"title": "What colour was the dog?"
}
}
],
"nav_tags": [
{
"id": 1,
"meta": {
"type": "core.ArticlePageTags"
},
"tag": {
"id": 35,
"meta": {
"type": "core.Tag",
"detail_url": "http://localhost:8000/api/v2/pages/35/"
},
"title": "NAV TAG 1"
}
}
],
"recommended_articles": [
{
"id": 1,
"meta": {
"type": "core.ArticlePageRecommendedSections"
},
"recommended_article": {
"id": 27,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/27/"
},
"title": "Article that is nested"
}
},
{
"id": 2,
"meta": {
"type": "core.ArticlePageRecommendedSections"
},
"recommended_article": {
"id": 22,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:8000/api/v2/pages/22/"
},
"title": "Article to Import 1"
}
}
],
"go_live_at": "2017-08-01T13:23:00Z",
"expire_at": "2017-08-31T13:23:00Z",
"expired": False,
}
ARTICLE_PAGE_RESPONSE_STREAM_FIELDS = {
"id": 92,
"meta": {
"type": "core.ArticlePage",
"detail_url": "http://localhost:9000/api/v2/pages/92/",
"html_url": "http://localhost:9000/sections/test-section/article-all-stream-fields/", # noqa
"slug": "article-all-stream-fields",
"show_in_menus": False,
"seo_title": "",
"search_description": "",
"first_published_at": "2017-08-23T08:56:14.263738Z",
"parent": {
"id": 11,
"meta": {
"type": "core.SectionPage",
"detail_url": "http://localhost:9000/api/v2/pages/11/",
"html_url": "http://localhost:9000/sections/test-section/"
},
"title": "Test Section"
},
"children": None,
"translations": [],
"main_language_children": None
},
"title": "ARTICLE WITH ALL THE STREAM FIELDS",
"subtitle": "",
"body": [
{
"type": "heading",
"value": "test heading"
},
{
"type": "paragraph",
"value": "test paragraph"
},
{
"type": "image",
"value": 297
},
{
"type": "list",
"value": [
"list item 1",
"list item 2",
"list item 3"
]
},
{
"type": "numbered_list",
"value": [
"numbered list 1",
"numbered list 2",
"numbered list 3"
]
},
{
"type": "page",
"value": 48
}
],
"tags": [],
"commenting_state": None,
"commenting_open_time": None,
"commenting_close_time": None,
"social_media_title": "",
"social_media_description": "",
"social_media_image": None,
"related_sections": [],
"featured_in_latest": False,
"featured_in_latest_start_date": None,
"featured_in_latest_end_date": None,
"featured_in_section": False,
"featured_in_section_start_date": None,
"featured_in_section_end_date": None,
"featured_in_homepage": False,
"featured_in_homepage_start_date": None,
"featured_in_homepage_end_date": None,
"feature_as_hero_article": False,
"promote_date": None,
"demote_date": | |
<reponame>jjlee0802cu/pylol
# MIT License
#
# Copyright (c) 2020 MiscellaneousStuff
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Features used for ML"""
import collections
from absl import logging
import random
import six
import enum
import numpy as np
from pylol.lib import actions
from pylol.lib import named_array
from pylol.lib import point
from pylol.lib import common
class ChampUnit(enum.IntEnum):
"""Indices into the `ChampUnit` observation."""
user_id = 0
position_x = 1
position_y = 2
facing_angle = 3
max_hp = 4
current_hp = 5
hp_regen = 6
max_mp = 7
current_mp = 8
mp_regen = 9
attack_damage = 10
attack_speed = 11
alive = 12
level = 13
armor = 14
mr = 15
current_gold = 16
current_xp = 17
death_count = 18
kill_count = 19
move_speed = 20
my_team = 21
neutal = 22
dx_to_me = 23
dy_to_me = 24
distance_to_me = 25
q_cooldown = 26
q_level = 27
w_cooldown = 28
w_level = 29
e_cooldown = 30
e_level = 31
r_cooldown = 32
r_level = 33
sum_1_cooldown = 34
sum_2_cooldown = 35
class AgentInterfaceFormat(object):
"""Observation and action interface format specific to a particular agent."""
def __init__(self, feature_dimensions=None):
"""Initializer.
Args:
feature_dimensions: Feature layer `Dimension`.
"""
if not feature_dimensions:
raise ValueError("Must set feature dimensions")
self._feature_dimensions = feature_dimensions
self._action_dimensions = feature_dimensions
@property
def feature_dimensions(self):
return self._feature_dimensions
@property
def action_dimensions(self):
return self._action_dimensions
def parse_agent_interface_format(feature_map=None, feature_move_range=None):
"""Creates an AgentInterfaceFormat object from keyword args.
Convenient when using dictionaries or command-line arguments for config.
Note that the feature_* and rgb_* properties define the respective spatial
observation dimensions and accept:
* None or 0 to disable that spatial observation.
* A single int for a square observation with that side length.
* A (int, int) tuple for a rectangular (width, height) observation.
Args:
feature_map: Map dimensions.
feature_move_range: Range of movement (divided by 100) the agent can move.
Returns:
An `AgentInterfaceFormat` object.
Raises:
ValueError: If an invalid parameter is specified.
"""
if feature_map and feature_move_range:
feature_dimensions = Dimensions(feature_map,
feature_move_range)
return AgentInterfaceFormat(feature_dimensions=feature_dimensions)
def _to_point(dims):
"""Convert (width, height) or size -> point.Point."""
assert dims
if isinstance(dims, (tuple, list)):
if len(dims) != 2:
raise ValueError(
"A two element tuple or list is expected here, got {}.".format(dims))
else:
width = int(dims[0])
height = int(dims[1])
if width <= 0 or height <= 0:
raise ValueError("Must specify +ve dims, got {}.".format(dims))
else:
return point.Point(width, height)
else:
size = int(dims)
if size <= 0:
raise ValueError(
"Must specify a +ve value for size, got {}.".format(dims))
else:
return point.Point(size, size)
class Dimensions(object):
"""Map dimensions configuration.
Map dimensions must be specified. Sizes must be positive.
Attributes:
map: A (width, height) int tuple or a single int to be used.
move_range: A (width, height) int tuple or a single int to be used.
"""
def __init__(self, map=None, move_range=None):
if not map:
raise ValueError("map must be set, map={}".format(map))
if not move_range:
raise ValueError("move_range must be set, move_range={}".format(move_range))
self._map = _to_point(map)
self._move_range = _to_point(move_range)
@property
def map(self):
return self._map
@property
def move_range(self):
return self._move_range
def __repr__(self):
return "Dimensions(map={}, move_range={})".format(self._map, self._move_range)
def __eq__(self, other):
return (isinstance(other, Dimensions) and self._map == other._map and
self._move_range == other._move_range)
def __ne__(self, other):
return not self == other
class Features(object):
"""Render feature layers from GameServer observation into numpy arrays."""
def __init__(self, agent_interface_format=None):
"""Initialize a Features instance matching the specified interface format.
Args:
agent_interface_format: See the documentation for `AgentInterfaceFormat`.
"""
if not agent_interface_format:
raise ValueError("Please specify agent_interface_format")
self._agent_interface_format = agent_interface_format
aif = self._agent_interface_format
self._valid_functions = _init_valid_functions(aif.action_dimensions)
def observation_spec(self):
"""The observation spec for the League of Legends v4.20 environment.
Returns:
The dict of observation names
"""
aif = self._agent_interface_format
obs_spec = named_array.NamedDict({
"my_id": (0,),
"game_time": (0,),
"me_unit": (len(ChampUnit),),
"enemy_unit": (len(ChampUnit),)
})
"""
if aif.feature_dimensions:
obs_spec["feature_map"] = (len(MAP_FEATURES),
aif.feature_dimensions.map.x,
aif.feature_dimensions.map.y)
obs_spec["feature_move_range"] = (len(MOVE_RANGE_FEATURES),
aif.feature_dimensions.move_range.x,
aif.feature_dimensions.move_range.y)
"""
obs_spec["available_actions"] = (0,)
return obs_spec
def action_spec(self):
"""The action space pretty complicated and fills the ValidFunctions."""
return self._valid_functions
def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
obs_available_actions = obs["available_actions"]
# print("AVAILABLE ACTIONS:", obs_available_actions)
if obs_available_actions["can_no_op"]: available_actions.add(0)
if obs_available_actions["can_move"]: available_actions.add(1)
if obs_available_actions["can_spell_0"] or \
obs_available_actions["can_spell_1"] or \
obs_available_actions["can_spell_2"] or \
obs_available_actions["can_spell_3"] or \
obs_available_actions["can_spell_4"] or \
obs_available_actions["can_spell_5"]:
available_actions.add(2)
"""
print("FUNCTIONS AVAILABLE:", actions.FUNCTIONS_AVAILABLE)
for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE):
if func.avail_fn(obs):
available_actions.add(i)
"""
return list(available_actions)
def transform_action(self, obs, func_call):
"""Transform an agent-style action to one that GameServer can consume.
Args:
obs: an observation extracted from redis from the previous step.
func_call: a `FunctionCall` to be turned into a a redis action.
Returns:
a corresponding `common.Action`.
Raises:
ValueError: if the action doesn't pass validation.
"""
"""
if isinstance(func_call, common.Action):
return func_call
"""
# Valid id?
func_id = func_call.function
try:
func = actions.FUNCTIONS[func_id]
except KeyError:
raise ValueError("Invalid function: %s." % func_id)
# Correct number of args?
if len(func_call.arguments) != len(func.args):
raise ValueError(
"Wrong number of arguments for function: %s, got: %s" % (
func, func_call.arguments))
# Args are valid?
aif = self._agent_interface_format
# print("FUNC:", func.args, func_call.arguments)
for t, arg in zip(func.args, func_call.arguments):
if t.name in ("position"):
sizes = aif.action_dimensions.map
elif t.name in ("move_range"):
sizes = aif.action_dimensions.move_range
else:
sizes = t.sizes
if len(sizes) != len(arg):
raise ValueError(
"Wrong number of values for argument of %s, got: %s" % (
func, func_call.arguments))
for s, a in zip(sizes, arg):
if not np.all(0 <= a) and np.all(a < s):
raise ValueError("Argument is out of range for %s, got: %s" % (
func, func_call.arguments))
# Convert them to python types
kwargs = {type_.name: type_.fn(a)
for type_, a in zip(func.args, func_call.arguments)}
# Get the issuers user_id from the observation
for champ_unit in obs["champ_units"]:
if champ_unit["distance_to_me"] == 0.0:
kwargs["user_id"] = champ_unit["user_id"]
# redis magic...
lol_action = common.Action()
kwargs["action"] = lol_action
actions.FUNCTIONS[func_id].function_type(**kwargs)
return lol_action
def transform_obs(self, obs):
"""Render some GameServer observations into something an agent can handle."""
# Get agents user id
me_id = None
enemy_id = None
me_unit = None
enemy_unit = None
for champ_unit in obs["observation"]["champ_units"]:
if champ_unit["distance_to_me"] == 0.0:
me_id = champ_unit["user_id"]
me_unit = champ_unit
else:
enemy_id = champ_unit["user_id"]
enemy_unit = champ_unit
# Observations of champion units in the game
champ_units = [named_array.NamedNumpyArray([
champ_unit["user_id"],
champ_unit["position"]["X"],
champ_unit["position"]["Y"],
champ_unit["facing_angle"],
champ_unit["max_hp"],
champ_unit["current_hp"],
champ_unit["hp_regen"],
champ_unit["max_mp"],
champ_unit["current_mp"],
champ_unit["mp_regen"],
champ_unit["attack_damage"],
champ_unit["attack_speed"] ,
champ_unit["alive"],
champ_unit["level"],
champ_unit["armor"],
champ_unit["mr"],
champ_unit["current_gold"],
champ_unit["current_xp"],
champ_unit["death_count"],
champ_unit["kill_count"],
champ_unit["move_speed"],
champ_unit["my_team"],
champ_unit["neutal"],
champ_unit["dx_to_me"],
champ_unit["dy_to_me"],
champ_unit["distance_to_me"],
champ_unit["q_cooldown"],
champ_unit["q_level"],
champ_unit["w_cooldown"],
champ_unit["w_level"],
champ_unit["e_cooldown"],
champ_unit["e_level"],
champ_unit["r_cooldown"],
champ_unit["r_level"],
champ_unit["sum_1_cooldown"],
champ_unit["sum_2_cooldown"],
], names=ChampUnit, dtype=np.float32) for champ_unit in obs["observation"]["champ_units"]]
# Observation output
out = named_array.NamedDict({
"my_id": float(me_id),
"game_time": float(obs["observation"]["game_time"]),
"me_unit": champ_units[0 if me_id == 1 else 1],
"enemy_unit": champ_units[0 if enemy_id == 1 else 1]
})
# Print original observation
# print("transform_obs().obs:", obs)
# Set available actions
out["available_actions"] = np.array(
self.available_actions(obs["observation"]), dtype=np.int32)
return out
def _init_valid_functions(action_dimensions):
"""Initialize ValidFunctions and set up the callbacks."""
sizes = {
"position": tuple(int(i) for i in action_dimensions.map),
"move_range": tuple(int(i) for i in action_dimensions.move_range)
}
types = actions.Arguments(*[
actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes))
for t in actions.TYPES])
functions = actions.Functions([
actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args))
for f in actions.FUNCTIONS])
return actions.ValidActions(types, functions)
def features_from_game_info(agent_interface_format=None):
"""Construct a Features | |
BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelUt(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModelUt, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoderUt(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelUtNoEmbedding(BertPreTrainedModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = modeling.BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = modeling.BertModel(config=config)
all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertModelUtNoEmbedding, self).__init__(config)
if config.use_mask_embeddings:
self.mask_embedding = nn.Embedding(2, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.mask_token_number = config.mask_token_number
self.use_mask_embeddings = True
else:
self.use_mask_embeddings = False
self.encoder = BertEncoderUt(config)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, | |
# DETECT OPERATIONS ON CONSTANTS, like x = y * 1024.23
# DETECT OPERATIONS ON UNKNOWN VARIABLES
if left_token and not left_units and (not left_token.isDimensionless): #and right_units:
if left_token.isNumber: # TRUE FOR EITHER INT OR FLOAT
self.is_unit_propagation_based_on_constants = True
token.is_unit_propagation_based_on_constants = True
elif left_token.str[:4] not in ['M_PI']:
self.is_unit_propagation_based_on_unknown_variable = True
token.is_unit_propagation_based_on_unknown_variable = True
if right_token and not right_units and (not right_token.isDimensionless): #and left_units:
if right_token.isNumber: # TRUE FOR EITHER INT OR FLOAT
self.is_unit_propagation_based_on_constants = True
token.is_unit_propagation_based_on_constants = True
elif right_token.str[:4] not in ['M_PI']:
self.is_unit_propagation_based_on_unknown_variable = True
token.is_unit_propagation_based_on_unknown_variable = True
do_propagate = False
else:
# ADDITION / SUBTRACTION - ATTEMPT TO MERGE
if token.str in ['+', '-', '+=', '-=']:
new_units = self.merge_units_by_set_union(left_units, right_units)
# MULTIPLICATION / DIVISION
elif token.str in ['*', '*=', '/', '/=']:
all_unit_dicts_from_multiplication = []
for unit_dict_left in left_units:
for unit_dict_right in right_units:
result_units = self.apply_multiplication_to_unit_dicts(
unit_dict_left,
unit_dict_right,
token.str)
if result_units:
all_unit_dicts_from_multiplication.append(result_units)
for u in all_unit_dicts_from_multiplication:
if u not in new_units:
new_units.append(u)
if new_units == []:
token.isDimensionless = True
elif {'wrong': 0.0} in new_units:
new_units = []
# UNIFY TOKENS FROM CHILDREN WITH CURRENT TOKEN
if new_units != token.units:
#if token.units and (not new_units):
# return
token.units = new_units
self.was_some_unit_changed = True
if do_propagate:
self.apply_propagation_status_to_token(token, left_token, right_token)
def apply_multiplication_to_unit_dicts(self, unit_dict_left, unit_dict_right, op):
''' APPLIES MULTIPLICATION AND DIVISION TO UNIT DICTIONARIES
(BY ADDING EXPONENTS)
input: unit_dict_left dictionary of units, eg: {'m':1, 's':-1}
unit_dict_right same
op string representing mult or div operators
returns: new dict with resulting units eg: {'m':2, 's':-2}
'''
#if unit_dict_left == {'radian': 1.0} and unit_dict_right == {'degree_360_unit': 1.0}:
# return {'degree_360': 1.0}
#elif unit_dict_left == {'degree_360_unit': 1.0} and unit_dict_right == {'radian': 1.0}:
# return {'degree_360': 1.0}
#elif unit_dict_left == {'degree_360': 1.0} and unit_dict_right == {'radian_unit': 1.0}:
# return {'radian': 1.0}
#elif unit_dict_left == {'radian_unit': 1.0} and unit_dict_right == {'degree_360': 1.0}:
# return {'radian': 1.0}
if unit_dict_right == {'degree_360_unit': 1.0}:
if unit_dict_left == {'radian': 1.0}:
return {'degree_360': 1.0}
elif unit_dict_left == {'second': -1.0}:
return {'degree_360': 1.0, 'second': -1.0}
else:
return {'wrong': 0.0}
elif unit_dict_left == {'degree_360_unit': 1.0}:
if unit_dict_right == {'radian': 1.0}:
return {'degree_360': 1.0}
elif unit_dict_right == {'second': -1.0}:
return {'degree_360': 1.0, 'second': -1.0}
else:
return {'wrong': 0.0}
elif unit_dict_right == {'radian_unit': 1.0}:
if unit_dict_left == {'degree_360': 1.0}:
return {'radian': 1.0}
elif unit_dict_left == {'degree_360': 1.0, 'second': -1.0}:
return {'second': -1.0}
else:
return {'wrong': 0.0}
elif unit_dict_left == {'radian_unit': 1.0}:
if unit_dict_right == {'degree_360': 1.0}:
return {'radian': 1.0}
elif unit_dict_right == {'degree_360': 1.0, 'second': -1.0}:
return {'second': -1.0}
else:
return {'wrong': 0.0}
return_dict = {}
# SPECIAL HANDLING FOR RADIANS AND QUATERNIONS
if unit_dict_left in self.my_symbol_helper.dimensionless_units \
and unit_dict_right in self.my_symbol_helper.dimensionless_units:
# SPECIAL CASE BOTH ARE RADIANS. CLOSED UNDER MULTIPLICATION
if op in ['*', '*=']:
return copy.deepcopy(unit_dict_left)
elif unit_dict_left in self.my_symbol_helper.dimensionless_units:
# DON'T PROPAGATE RADIANS
unit_dict_left = {}
elif unit_dict_right in self.my_symbol_helper.dimensionless_units:
# DON'T PROPAGATE RADIANS
unit_dict_right = {}
return_dict = copy.deepcopy(unit_dict_left)
for unit in unit_dict_right:
if unit in return_dict:
# BOTH DICTS HAVE SAME UNIT
if op in ['*', '*=']:
# ADD OF EXPONENT IS MULT
return_dict[unit] += unit_dict_right[unit]
elif op in ['/', '/=']:
# SUBTRACTION OF EXPONENT IS DIV
return_dict[unit] -= unit_dict_right[unit]
else:
# ONLY ONE SIDE HAS UNIT
if op in ['*', '*=']:
# COPY - THIS IS NOT A REFERNCE
return_dict[unit] = unit_dict_right[unit]
elif op in ['/', '/=']:
return_dict[unit] = -1 * unit_dict_right[unit]
# FILTER OUT ZEROs - UNITLESS
return_dict = {k: v for k, v in return_dict.items() if v != 0}
return return_dict
def merge_units_by_set_union(self, left_units, right_units):
''' input: {left, right}_units - lists of unit dictionaries.
result: set union of inputs
'''
if self.perform_intersection:
return self.merge_units_by_set_intersection(left_units, right_units)
new_units = []
if left_units and right_units:
if left_units == right_units:
# COPY EITHER ONE BECAUSE SAME
new_units = copy.deepcopy(left_units)
else:
new_units = copy.deepcopy(left_units)
for r in right_units:
if r not in new_units:
new_units.append(copy.deepcopy(r))
else:
if left_units:
new_units = copy.deepcopy(left_units)
elif right_units:
new_units = copy.deepcopy(right_units)
return new_units
def merge_units_by_set_intersection(self, left_units, right_units):
''' input: {left, right}_units - lists of unit dictionaries.
result: set intersection of inputs
'''
new_units = []
if self.perform_union_when_empty:
if not (left_units and right_units):
if left_units:
new_units = copy.deepcopy(left_units)
elif right_units:
new_units = copy.deepcopy(right_units)
self.perform_union_when_empty = False
return new_units
for r in right_units:
if r in left_units:
new_units.append(copy.deepcopy(r))
self.perform_union_when_empty = False
return new_units
def update_units_from_to(self, from_token, to_token):
is_updated = False
for u in from_token.units:
if u not in to_token.units:
to_token.units.append(u)
self.was_some_unit_changed = True
is_updated = True
self.apply_propagation_status_to_token(to_token, from_token, None)
return is_updated
def apply_propagation_status_to_token(self, token, left_token, right_token):
''' APPLIES PROPAGATION WEAKENING FROM CHILD TOKENS TO PARENT TOKEN
input: token, left_token, right_token
returns: none. side effect can change variables on 'token'
'''
left_constants = False
left_unknown_variable = False
right_constants = False
right_unknown_variable = False
if left_token:
if left_token.isKnown:
token.isKnown = True
if left_token.is_unit_propagation_based_on_weak_inference:
token.is_unit_propagation_based_on_weak_inference = True
left_constants = left_token.is_unit_propagation_based_on_constants
left_unknown_variable = left_token.is_unit_propagation_based_on_unknown_variable
if right_token:
if right_token.isKnown:
token.isKnown = True
if right_token.is_unit_propagation_based_on_weak_inference:
token.is_unit_propagation_based_on_weak_inference = True
right_constants = right_token.is_unit_propagation_based_on_constants
right_unknown_variable = right_token.is_unit_propagation_based_on_unknown_variable
token.is_unit_propagation_based_on_constants = left_constants or right_constants
token.is_unit_propagation_based_on_unknown_variable = left_unknown_variable or right_unknown_variable
def apply_known_status_to_token(self, token, left_token, right_token, both=False):
''' APPLIES STATUS FROM CHILD TOKENS TO PARENT TOKEN
input: token, left_token, right_token
returns: none. side effect can change variables on 'token'
'''
left_known = False
right_known = False
if left_token:
left_known = left_token.isKnown
if right_token:
right_known = right_token.isKnown
if both:
token.isKnown = left_known and right_known
else:
token.isKnown = left_known or right_known
def find_first_variable_token(self, token, left_token, right_token):
''' ASSUME THIS WILL BE CALLED ON LHS OF ASSIGNMENT.
SHOULD ONLY BE ONE VARIABLE
'''
if token.variable:
self.return_value_list.append(token)
def recurse_and_collect_string(self, token):
''' SIMPLE RECURSION FOR LEFT-HAND SIDE OF ASSIGNMENT STATMENTS
'''
# PROTECT FROM NULL
if not token:
return ''
my_return_string = ''
# LEFT RECURSE
if token.astOperand1:
my_return_string += self.recurse_and_collect_string(token.astOperand1)
# SELF
my_return_string += token.str
# RIGHT RECURSE
if token.astOperand2:
my_return_string += self.recurse_and_collect_string(token.astOperand2)
if token.str == '[':
my_return_string += ']'
return my_return_string
def collect_function_param_units_and_decorate_function(self, token):
''' COLLECT AVAILABLE UNITS ON FUNCTION PARAMETERS FROM AST
AND ADD THEM TO FUNCTION OBJECT
'''
if token.function:
function_args_units = []
if token.astParent.astOperand2:
if token.astParent.astOperand2.str == ',':
# MULITPLE ARGS
function_args_units = self.recurse_on_function_args(token.astParent.astOperand2)
else:
# ONLY ONE ARG
if token.astParent.astOperand2.units:
function_args_units = [(token.astParent.astOperand2, token.astParent.astOperand2.units)]
if function_args_units:
if len(function_args_units) != len(token.function.arg_units):
return
for i, u in enumerate(function_args_units):
(t, un) = u
new_dict = {'linenr': int(token.linenr),
'units': un,
'token': t,
'function': token}
if new_dict not in token.function.arg_units[i]:
token.function.arg_units[i].append(new_dict)
def recurse_on_function_args(self, comma_token):
''' RECURSIVELY COLLECT UNITS FOR FUNCTION ARGUMENTS
input: ccpcheck token object - str=','
output: list of units in arg order
'''
my_return_list = []
if comma_token.astOperand1:
if comma_token.astOperand1.str == ',':
left_branch_units_list = self.recurse_on_function_args(comma_token.astOperand1)
if left_branch_units_list:
for u in left_branch_units_list:
my_return_list.append(u)
else:
my_return_list.append((comma_token.astOperand1, comma_token.astOperand1.units))
if comma_token.astOperand2:
my_return_list.append((comma_token.astOperand2, comma_token.astOperand2.units))
return my_return_list
def clean_float_string(self, float_string):
''' REMOVES TRAILING 'f' OR 'l' FROM NUMBER, like '180.0f' becomes '180.0'
input: string
output string
'''
float_string = float_string.lower()
float_string = float_string.replace('f','')
float_string = float_string.replace('l','')
float_string = float_string.replace('u','')
return float_string
def apply_latest_arg_units(self, token, left_token, right_token):
if token.variable and token.variable.isArgument:
fun = token.scope.function
for argnr, arg in fun.argument.items():
if arg == token.variable:
break
arg_var_nr = eval(argnr)
unit_list = fun.arg_units[arg_var_nr - 1]
if unit_list:
units = (unit_list[-1])['units']
for u in units:
if u not in token.units:
token.units.append(u)
self.was_some_unit_changed = True
self.found_units_in_this_tree = True
def propagate_units_over_arg_expr(self, root_token):
tw = TreeWalker(None)
# ASSUME THE TOKENS COME BACK AS A SORTED LIST
break_point = 1000
i=0
tw.is_unit_propagation_based_on_constants = False
tw.is_unit_propagation_based_on_unknown_variable = False
# RESET TOKENS
tw.generic_recurse_and_apply_function(root_token, tw.reset_tokens)
# RESET THE TREE WALKER'S LINE NUMBERS
tw.reset_min_max_line_numbers()
# FIND THE MIN AND MAX LINE NUMBERS IN THIS AST : USED TO PROTECT LOOP FROM MULTI-LINE STATEMENTS
tw.generic_recurse_and_apply_function(root_token, tw.find_min_max_line_numbers)
# APPLY ARG UNITS
tw.generic_recurse_and_apply_function(root_token, tw.apply_latest_arg_units)
# APPLY UNITS TO KNOWN SYMBOLS
tw.generic_recurse_and_apply_function(root_token, tw.apply_known_symbols_units)
# APPLY UNITS TO CONVERSION FACTORS
tw.generic_recurse_and_apply_function(root_token, tw.apply_conversion_factor_units)
# APPLY DIMENSIONLESS UNITS
tw.generic_recurse_and_apply_function(root_token, tw.apply_dimensionless_units)
# CONTINUE TO ATTEMPT CHANGES UNTIL CHANGES CEASE
while tw.was_some_unit_changed:
if i>break_point:
s = "BREAKING WHILE LOOP AT %d" % | |
= self._check_feature_params()
num_frames = len(self.x)
self.STFT = P.zeros((self.nfft/2+1, num_frames), dtype='complex')
self.win = P.ones(self.wfft) if self.window=='rect' else P.np.sqrt(P.hanning(self.wfft))
x = P.zeros(self.wfft)
buf_frames = 0
for k, nex in enumerate(self.x):
x = self._shift_insert(x, nex, self.nhop)
if self.nhop >= self.wfft - k*self.nhop : # align buffer on start of audio
self.STFT[:,k-buf_frames]=P.rfft(self.win*x, self.nfft).T
else:
buf_frames+=1
self.STFT = self.STFT / self.nfft
self._fftfrqs = P.arange(0,self.nfft/2+1) * self.sample_rate/float(self.nfft)
self._have_stft=True
if self.verbosity:
print("Extracted STFT: nfft=%d, hop=%d" %(self.nfft, self.nhop))
self.inverse=self._istftm
self.X = abs(self.STFT)
if not self.magnitude:
self.X = self.X**2
return True
def _phase_map(self):
self.dphi = (2*P.pi * self.nhop * P.arange(self.nfft/2+1)) / self.nfft
A = P.diff(P.angle(self.STFT),1) # Complete Phase Map
U = P.c_[P.angle(self.STFT[:,0]), A - P.atleast_2d(self.dphi).T ]
U = U - P.np.round(U/(2*P.pi))*2*P.pi
self.dPhi = U
return U
# The missing phase reconstruction algorithm in Bregman
def _phase_rec(self, Phi_hat_rel):
"""
::
reconstruct relative phases extracted with self._phase_map()
"""
rp,dp = Phi_hat_rel, self.dphi
self.Phi_hat = (rp + P.np.tile(P.np.atleast_2d(dp).T, rp.shape[1])).cumsum(1)
return self.Phi_hat
def _pvoc(self, X_hat, Phi_hat=None, R=None):
"""
::
a phase vocoder - time-stretch
inputs:
X_hat - estimate of signal magnitude
[Phi_hat] - estimate of signal phase
[R] - resynthesis hop ratio
output:
updates self.X_hat with modified complex spectrum
"""
N = self.nfft
W = self.wfft
H = self.nhop
R = 1.0 if R is None else R
dphi = (2*P.pi * H * P.arange(N/2+1)) / N
print("Phase Vocoder Resynthesis...", N, W, H, R)
A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
phs = A[:,0]
self.X_hat = []
n_cols = X_hat.shape[1]
t = 0
while P.floor(t) < n_cols:
tf = t - P.floor(t)
idx = P.arange(2)+int(P.floor(t))
idx[1] = n_cols-1 if t >= n_cols-1 else idx[1]
Xh = X_hat[:,idx]
Xh = (1-tf)*Xh[:,0] + tf*Xh[:,1]
self.X_hat.append(Xh*P.exp( 1j * phs))
U = A[:,idx[1]] - A[:,idx[0]] - dphi
U = U - P.np.round(U/(2*P.pi))*2*P.pi
phs += (U + dphi)
t += P.randn()*P.sqrt(PVOC_VAR*R) + R # 10% variance
self.X_hat = P.np.array(self.X_hat).T
def _pvoc2(self, X_hat, Phi_hat=None, R=None):
"""
::
alternate (batch) implementation of phase vocoder - time-stretch
inputs:
X_hat - estimate of signal magnitude
[Phi_hat] - estimate of signal phase
[R] - resynthesis hop ratio
output:
updates self.X_hat with modified complex spectrum
"""
N, W, H = self.nfft, self.wfft, self.nhop
R = 1.0 if R is None else R
dphi = P.atleast_2d((2*P.pi * H * P.arange(N/2+1)) / N).T
print("Phase Vocoder Resynthesis...", N, W, H, R)
A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
U = P.diff(A,1) - dphi
U = U - P.np.round(U/(2*P.pi))*2*P.pi
t = P.arange(0,n_cols,R)
tf = t - P.floor(t)
phs = P.c_[A[:,0], U]
phs += U[:,idx[1]] + dphi # Problem, what is idx ?
Xh = (1-tf)*Xh[:-1] + tf*Xh[1:]
Xh *= P.exp( 1j * phs)
self.X_hat = Xh
def _overlap_add(self, X, usewin=True, resamp=None):
nfft = self.nfft
nhop = self.nhop
if resamp is None:
x = P.zeros((X.shape[0] - 1)*nhop + nfft)
for k in range(X.shape[0]):
x[ k * nhop : k * nhop + nfft ] += X[k] * self.win
else:
rfft = int(P.np.round(nfft * resamp))
x = P.zeros((X.shape[0] - 1)*nhop + rfft)
for k in range(X.shape[0]):
x[ k * nhop : k * nhop + rfft ] += sig.resample(X[k],rfft) * self.win
return x
def _istftm(self, X_hat=None, Phi_hat=None, pvoc=False, usewin=True, resamp=None):
"""
::
Inverse short-time Fourier transform magnitude. Make a signal from a |STFT| transform.
Uses phases from self.STFT if Phi_hat is None.
Inputs:
X_hat - N/2+1 magnitude STFT [None=abs(self.STFT)]
Phi_hat - N/2+1 phase STFT [None=exp(1j*angle(self.STFT))]
pvoc - whether to use phase vocoder [False]
usewin - whether to use overlap-add [False]
Returns:
x_hat - estimated signal
"""
if not self._have_stft:
return None
X_hat = self.X if X_hat is None else P.np.abs(X_hat)
if pvoc:
self._pvoc(X_hat, Phi_hat, pvoc)
else:
Phi_hat = P.angle(self.STFT) if Phi_hat is None else Phi_hat
self.X_hat = X_hat * P.exp( 1j * Phi_hat )
if usewin:
if self.win is None:
self.win = P.ones(self.wfft) if self.window=='rect' else P.np.sqrt(P.hanning(self.wfft))
if len(self.win) != self.nfft:
self.win = P.r_[self.win, P.np.zeros(self.nfft-self.wfft)]
if len(self.win) != self.nfft:
error.BregmanError("features_base.Features._istftm(): assertion failed len(self.win)==self.nfft")
else:
self.win = P.ones(self.nfft)
if resamp:
self.win = sig.resample(self.win, int(P.np.round(self.nfft * resamp)))
fp = self._check_feature_params()
self.x_hat = self._overlap_add(P.real(P.irfft(self.X_hat.T)), usewin=usewin, resamp=resamp)
if self.verbosity:
print("Extracted iSTFTM->self.x_hat")
return self.x_hat
def _power(self):
if not self._stft():
return False
fp = self._check_feature_params()
self.POWER=(P.absolute(self.STFT)**2).sum(0)
self._have_power=True
if self.verbosity:
print("Extracted POWER")
self.X=self.POWER
return True
def _cqft(self):
"""
::
Constant-Q Fourier transform.
"""
if not self._power():
return False
fp = self._check_feature_params()
if self.intensify:
self._cqft_intensified()
else:
self._make_log_freq_map()
self.CQFT=P.sqrt(P.array(P.mat(self.Q)*P.mat(P.absolute(self.STFT)**2)))
self._is_intensified=False
self._have_cqft=True
if self.verbosity:
print("Extracted CQFT: intensified=%d" %self._is_intensified)
self.inverse=self.icqft
self.X=self.CQFT
return True
def icqft(self, V_hat=None, **kwargs):
V_hat = self.X if V_hat is None else V_hat
return self._icqft(V_hat, **kwargs)
def _icqft(self, V_hat, **kwargs):
"""
::
Inverse constant-Q Fourier transform. Make a signal from a constant-Q transform.
"""
if not self._have_cqft:
return None
fp = self._check_feature_params()
X_hat = P.dot(self.Q.T, V_hat)
if self.verbosity:
print("iCQFT->X_hat")
self._istftm(X_hat, **kwargs)
return self.x_hat
def _cqft_intensified(self):
"""
::
Constant-Q Fourier transform using only max abs(STFT) value in each band
"""
if not self._have_stft:
if not self._stft():
return False
self._make_log_freq_map()
r,b=self.Q.shape
b,c=self.STFT.shape
self.CQFT=P.zeros((r,c))
for i in P.arange(r):
for j in P.arange(c):
self.CQFT[i,j] = (self.Q[i,:]*P.absolute(self.STFT[:,j])).max()
self._have_cqft=True
self._is_intensified=True
self.inverse=self.icqft
self.X=self.CQFT
return True
def _mfcc(self):
"""
::
DCT of the Log magnitude CQFT
"""
fp = self._check_feature_params()
if not self._cqft():
return False
self._make_dct()
AA = P.log10(P.clip(self.CQFT,0.0001,self.CQFT.max()))
self.MFCC = P.dot(self.DCT, AA)
self._have_mfcc=True
if self.verbosity:
print("Extracted MFCC: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify))
n=self.ncoef
l=self.lcoef
self.X=self.MFCC[l:l+n,:]
return True
def _lcqft(self):
"""
::
Apply low-lifter to MFCC and invert to CQFT domain
"""
fp = self._check_feature_params()
if not self._mfcc():
return False
a,b = self.CQFT.shape
a = (a-1)*2
n=self.ncoef
l=self.lcoef
AA = self.MFCC[l:l+n,:] # apply Lifter
self.LCQFT = 10**P.dot( self.DCT[l:l+n,:].T, AA )
self._have_lcqft=True
if self.verbosity:
print("Extracted LCQFT: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify))
self.inverse=self.icqft
self.X=self.LCQFT
return True
def _hcqft(self):
"""
::
Apply high lifter to MFCC and invert to CQFT domain
"""
fp = self._check_feature_params()
if not self._mfcc():
return False
a,b = self.CQFT.shape
n=self.ncoef
l=self.lcoef
AA = self.MFCC[n+l:a,:] # apply Lifter
self.HCQFT=10**P.dot( self.DCT[n+l:a,:].T, AA)
self._have_hcqft=True
if self.verbosity:
print("Extracted HCQFT: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify))
self.inverse=self.icqft
self.X=self.HCQFT
return True
def _chroma(self):
"""
::
Chromagram, like 12-BPO CQFT modulo one octave. Energy is folded onto first octave.
"""
fp = self._check_feature_params()
lo = self.lo
self.lo = 63.5444 # set to quarter tone below C
if not self._cqft():
return False
self.lo = lo # restore original lo edge
a,b = self.CQFT.shape
complete_octaves = a/self.nbpo # integer division, number of complete octaves
#complete_octave_bands = complete_octaves * self.nbpo
# column-major ordering, like a spectrogram, is in FORTRAN order
self.CHROMA=P.zeros((self.nbpo,b))
for k in P.arange(complete_octaves):
self.CHROMA += self.CQFT[k*self.nbpo:(k+1)*self.nbpo,:]
self.CHROMA = (self.CHROMA / complete_octaves)
self._have_chroma=True
if self.verbosity:
print("Extracted CHROMA: intensified=%d" %self.intensify)
self.inverse=self.ichroma
self.X=self.CHROMA
return True
def _chroma_hcqft(self):
"""
::
Chromagram formed by high-pass liftering in cepstral domain, then usual self.nbpo-BPO folding.
"""
fp = self._check_feature_params()
if not self._hcqft():
return False
a,b = self.HCQFT.shape
complete_octaves = a/self.nbpo # integer division, number of complete octaves
#complete_octave_bands = complete_octaves * self.nbpo
# column-major ordering, like a spectrogram, is in FORTRAN order
self.CHROMA=P.zeros((self.nbpo,b))
for k in P.arange(complete_octaves):
self.CHROMA += self.HCQFT[k*self.nbpo:(k+1)*self.nbpo,:]
self.CHROMA/= complete_octaves
self._have_chroma=True
if self.verbosity:
print("Extracted HCQFT CHROMA: lcoef=%d, ncoef=%d, intensified=%d" %(self.lcoef, self.ncoef, self.intensify))
self.inverse=self.ichroma
self.X=self.CHROMA
return True
def _ichroma(self, V, **kwargs):
"""
::
Inverse chromagram transform. Make a signal from a folded constant-Q transform.
"""
if not (self._have_hcqft or self._have_cqft):
return None
a,b = self.HCQFT.shape if self._have_hcqft else self.CQFT.shape
complete_octaves = a/self.nbpo # integer division, number of complete octaves
if P.remainder(a,self.nbpo):
complete_octaves += 1
X = P.repeat(V, complete_octaves, 0)[:a,:] # truncate if necessary
X /= X.max()
X *= P.atleast_2d(P.linspace(1,0,X.shape[0])).T # weight the spectrum
self.x_hat = self._icqft(X, **kwargs)
return self.x_hat
def ichroma(self, V, **kwargs):
"""
::
Inverse chromagram transform. Make a signal from a folded constant-Q transform.
"""
return self._ichroma(V, **kwargs)
def _extract_onsets(self):
"""
::
The simplest onset detector in the world: power envelope derivative zero crossings +/-
"""
fp = self._check_feature_params()
if not self._have_power:
return None
dd = P.diff(P.r_[0,self.POWER])
| |
self.display_message('capturing background image')
def OnLoadBgImage(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
per_cam_panel = self.per_cam_panel[cam_id]
ctrl = xrc.XRCCTRL(per_cam_panel,"TAKE_BG_IMAGE_ALLOW_WHEN_SAVING")
if not ctrl.GetValue() and cam_id in self.trx_writer:
dlg = wx.MessageDialog(self.wx_parent,
'Saving data - cannot take background image',
'FlyTrax error',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
return
# open dialog
dlg = wx.FileDialog( self.wx_parent, "open backsub output")
doit=False
try:
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetFilename()
dirname = dlg.GetDirectory()
doit=True
finally:
dlg.Destroy()
if doit:
filename = os.path.join(dirname,fname)
if filename.endswith('.mat'):
load_dict = scipy.io.loadmat( filename, squeeze_me=True )
newbg = load_dict['bg_img']
if 0:
print 'newbg.shape',newbg.shape
print 'newbg.dtype',newbg.dtype
print 'newbg.min()',newbg.min()
print 'newbg.max()',newbg.max()
newbg = numpy.clip(newbg,0,255)
newbg = newbg.astype(numpy.uint8)
else:
raise ValueError("don't know how to open background image file")
newbg_fi = FastImage.asfastimage(newbg)
self.load_bg_image[cam_id].put(newbg_fi)
self.display_message('background image loaded')
def OnEnableOngoingBg(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
if widget.GetValue():
per_cam_panel = self.per_cam_panel[cam_id]
ctrl = xrc.XRCCTRL(per_cam_panel,"TAKE_BG_IMAGE_ALLOW_WHEN_SAVING")
if not ctrl.GetValue() and cam_id in self.trx_writer:
dlg = wx.MessageDialog(self.wx_parent,
'Saving data - cannot take background image',
'FlyTrax error',
wx.OK | wx.ICON_ERROR
)
dlg.ShowModal()
dlg.Destroy()
return
self.enable_ongoing_bg_image[cam_id].set()
else:
self.enable_ongoing_bg_image[cam_id].clear()
self.display_message('enabled ongoing background image updates')
def OnSetNumBackgroundImages(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
val = int(widget.GetValue())
self.ongoing_bg_image_num_images[cam_id].set(val)
def OnSetBackgroundUpdateInterval(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
val = int(widget.GetValue())
self.ongoing_bg_image_update_interval[cam_id].set(val)
def OnTrackingEnabled(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
if widget.IsChecked():
self.tracking_enabled[cam_id].set()
else:
self.tracking_enabled[cam_id].clear()
def OnUseROI2(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
if widget.IsChecked():
self.use_roi2[cam_id].set()
else:
self.use_roi2[cam_id].clear()
def OnClearThreshold(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
newvalstr = widget.GetValue()
try:
newval = float(newvalstr)
except ValueError:
pass
else:
# only touch realtime_analysis in other thread
self.clear_threshold_value[cam_id] = newval
self.new_clear_threshold[cam_id].set()
self.display_message('set clear threshold %s'%str(newval))
event.Skip()
def OnDiffThreshold(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
newvalstr = widget.GetValue()
try:
newval = int(newvalstr)
except ValueError:
pass
else:
# only touch realtime_analysis in other thread
self.diff_threshold_value[cam_id] = newval
self.new_diff_threshold[cam_id].set()
self.display_message('set difference threshold %d'%newval)
event.Skip()
def OnHistoryBuflen(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
newvalstr = widget.GetValue()
try:
newval = int(newvalstr)
except ValueError:
pass
else:
self.history_buflen_value[cam_id] = newval
event.Skip()
def OnViewMaskMode(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
if widget.IsChecked():
self.view_mask_mode[cam_id].set()
else:
self.view_mask_mode[cam_id].clear()
def OnScrollMaskXCenter(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
self.new_mask_x_center[cam_id] = widget.GetValue()
def OnScrollMaskYCenter(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
self.new_mask_y_center[cam_id] = widget.GetValue()
def OnScrollMaskRadius(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
self.new_mask_radius[cam_id] = widget.GetValue()
def _process_frame_extract_roi( self, points, roi_sz,
fibuf, buf_offset, full_frame_live,
max_frame_size):
# called from self.process_frame()
n_pts = len(points)
if n_pts:
pt = points[0] # only operate on first point
(x,y,area,slope,eccentricity)=pt[:5]
# find software ROI
rx = int(round(x))
x0=rx-roi_sz.w//2
x1=x0+roi_sz.w
if x0<0:
x0=0
elif x1>=max_frame_size.w:
x0=max_frame_size.w-roi_sz.w
x1=max_frame_size.w
ry = int(round(y))
y0=ry-roi_sz.h//2
y1=y0+roi_sz.h
if y0<0:
y0=0
elif y1>=max_frame_size.h:
y0=max_frame_size.h-roi_sz.h
y1=max_frame_size.h
else: # no points found
x0 = 0
y0 = 0
# extract smaller image for saving
if fibuf.size == max_frame_size:
software_roi = fibuf.roi( x0, y0, roi_sz )
else:
# make sure we can do software_roi size live view
# 1. make full frame "live view"
l,b = buf_offset
roi_into_full_frame = full_frame_live.roi( l,b, fibuf.size )
fibuf.get_8u_copy_put(roi_into_full_frame,fibuf.size)
# 2. get software_roi view into it
tmp = full_frame_live.roi( x0, y0, roi_sz )
# 3. make copy of software_roi
software_roi = tmp.get_8u_copy(tmp.size) # copy
return software_roi, (x0,y0)
def OnMaxNPoints(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
newvalstr = widget.GetValue()
try:
newval = int(newvalstr)
except ValueError:
pass
else:
self.max_num_points[cam_id].set( newval )
event.Skip()
def OnROI2Radius(self,event):
widget = event.GetEventObject()
cam_id = self.widget2cam_id[widget]
newvalstr = widget.GetValue()
try:
newval = int(newvalstr)
except ValueError:
pass
else:
self.realtime_analyzer[cam_id].roi2_radius = newval
event.Skip()
def process_frame(self,cam_id,buf,buf_offset,timestamp,framenumber):
if self.pixel_format[cam_id]=='YUV422':
buf = imops.yuv422_to_mono8( numpy.asarray(buf) ) # convert
elif not (self.pixel_format[cam_id].startswith('MONO8') or
self.pixel_format[cam_id].startswith('RAW8')):
warnings.warn("flytrax plugin incompatible with data format")
return [], []
self.ticks_since_last_update[cam_id] += 1
start = time.time()
# this is called in realtime thread
fibuf = FastImage.asfastimage(buf) # FastImage view of image data (hardware ROI)
l,b = buf_offset
lbrt = l, b, l+fibuf.size.w-1, b+fibuf.size.h-1
view_mask_mode = self.view_mask_mode[cam_id]
newmask = self.newmask[cam_id]
clear_and_take_bg_image = self.clear_and_take_bg_image[cam_id]
load_bg_image = self.load_bg_image[cam_id]
enable_ongoing_bg_image = self.enable_ongoing_bg_image[cam_id]
data_queue = self.data_queues[cam_id] # transfers images and data to non-realtime thread
wxmessage_queue = self.wxmessage_queues[cam_id] # transfers and messages to non-realtime thread
new_clear_threshold = self.new_clear_threshold[cam_id]
new_diff_threshold = self.new_diff_threshold[cam_id]
realtime_analyzer = self.realtime_analyzer[cam_id]
realtime_analyzer.roi = lbrt # hardware ROI
max_frame_size = self.max_frame_size[cam_id]
full_frame_live = self.full_frame_live[cam_id]
running_mean_im = self.running_mean_im[cam_id]
display_active = self.display_active[cam_id]
history_buflen_value = self.history_buflen_value[cam_id]
use_roi2 = self.use_roi2[cam_id].isSet()
use_cmp = False # use variance-based background subtraction/analysis
draw_points = []
draw_linesegs = []
if newmask.is_new_value_waiting():
(x,y,radius), newmask_im = newmask.get_nowait()
self.realtime_mask_x_center[cam_id]=x
self.realtime_mask_y_center[cam_id]=y
self.realtime_mask_radius[cam_id]=radius
newmask_fi = FastImage.asfastimage( newmask_im )
assert newmask_fi.size == max_frame_size
mask_im = realtime_analyzer.get_image_view('mask')
newmask_fi.get_8u_copy_put(mask_im, max_frame_size)
del mask_im # don't leak view into other thread
if view_mask_mode.isSet():
w,h = max_frame_size.w, max_frame_size.h
x=self.realtime_mask_x_center.get(cam_id, w//2)
y=self.realtime_mask_y_center.get(cam_id, h//2)
radius=self.realtime_mask_radius.get(cam_id, max(w,h))
N = 64
theta = numpy.arange(N)*2*math.pi/N
xdraw = x+numpy.cos(theta)*radius
ydraw = y+numpy.sin(theta)*radius
for i in range(N-1):
draw_linesegs.append(
(xdraw[i],ydraw[i],xdraw[i+1],ydraw[i+1]))
draw_linesegs.append(
(xdraw[-1],ydraw[-1],xdraw[0],ydraw[0]))
if clear_and_take_bg_image.isSet():
# this is a view we write into
# copy current image into background image
running_mean8u_im = realtime_analyzer.get_image_view('mean')
if running_mean8u_im.size == fibuf.size:
srcfi = fibuf
bg_copy = srcfi.get_8u_copy(max_frame_size)
else:
srcfi = FastImage.FastImage8u(max_frame_size)
srcfi_roi = srcfi.roi(l,b,fibuf.size)
fibuf.get_8u_copy_put(srcfi_roi, fibuf.size)
bg_copy = srcfi # newly created, no need to copy
srcfi.get_32f_copy_put( running_mean_im, max_frame_size )
srcfi.get_8u_copy_put( running_mean8u_im, max_frame_size )
# make copy available for saving data
self.bg_update_lock.acquire()
self.full_bg_image[cam_id] = bg_copy
self.bg_update_lock.release()
clear_and_take_bg_image.clear()
del srcfi, bg_copy # don't pollute namespace
if not load_bg_image.empty():
try:
while 1:
new_bg_image_fastimage = load_bg_image.get_nowait()
except Queue.Empty:
pass
# this is a view we write into
# copy current image into background image
running_mean8u_im = realtime_analyzer.get_image_view('mean')
if running_mean8u_im.size == new_bg_image_fastimage.size:
new_bg_image_fastimage.get_32f_copy_put( running_mean_im, max_frame_size )
new_bg_image_fastimage.get_8u_copy_put( running_mean8u_im, max_frame_size )
# make copy available for saving data
self.bg_update_lock.acquire()
self.full_bg_image[cam_id] = new_bg_image_fastimage
self.bg_update_lock.release()
else:
wxmessage_queue.put( ('new background image must be same size as image frame',
'FlyTrax error',
wx.OK | wx.ICON_ERROR) )
if enable_ongoing_bg_image.isSet():
update_interval = self.ongoing_bg_image_update_interval[cam_id].get()
if self.ticks_since_last_update[cam_id]%update_interval == 0:
alpha = 1.0/self.ongoing_bg_image_num_images[cam_id].get()
if running_mean_im.size == fibuf.size:
srcfi = fibuf
else:
# This is inelegant (it creates a full frame), but it works.
srcfi = FastImage.FastImage8u(max_frame_size)
srcfi_roi = srcfi.roi(l,b,fibuf.size)
fibuf.get_8u_copy_put(srcfi_roi, fibuf.size)
running_mean8u_im = realtime_analyzer.get_image_view('mean')
# maintain running average
running_mean_im.toself_add_weighted( srcfi, max_frame_size, alpha )
# maintain 8bit unsigned background image
running_mean_im.get_8u_copy_put( running_mean8u_im, max_frame_size )
# make copy available for saving data
bg_copy = running_mean_im.get_8u_copy(running_mean_im.size)
self.bg_update_lock.acquire()
self.full_bg_image[cam_id] = bg_copy
self.bg_update_lock.release()
if new_clear_threshold.isSet():
nv = self.clear_threshold_value[cam_id]
realtime_analyzer.clear_threshold = nv
#print 'set clear',nv
new_clear_threshold.clear()
if new_diff_threshold.isSet():
nv = self.diff_threshold_value[cam_id]
realtime_analyzer.diff_threshold = nv
#print 'set diff',nv
new_diff_threshold.clear()
n_pts = 0
points = []
if self.tracking_enabled[cam_id].isSet():
max_num_points = self.max_num_points[cam_id].get_nowait()
realtime_analyzer.max_num_points = max_num_points # AttributeError here means old realtime_image_analysis
points = realtime_analyzer.do_work(fibuf,
timestamp, framenumber, use_roi2,
use_cmp=use_cmp)
self.roi_sz_lock.acquire()
try:
roi_display_sz = self.roi_display_sz
roi_save_fmf_sz = self.roi_save_fmf_sz
roi_send_sz = self.roi_send_sz
finally:
self.roi_sz_lock.release()
roi_display, (display_x0, display_y0) = self._process_frame_extract_roi(
points, roi_display_sz,
fibuf, buf_offset, full_frame_live,
max_frame_size)
roi_save_fmf, (fmf_save_x0, fmf_save_y0) = self._process_frame_extract_roi(
points, roi_save_fmf_sz,
fibuf, buf_offset, full_frame_live,
max_frame_size)
roi_send, (udp_send_x0, udp_send_y0) = self._process_frame_extract_roi(
points, roi_send_sz,
fibuf, buf_offset, full_frame_live,
max_frame_size)
n_pts = len(points)
if n_pts:
pt = points[0] # only operate on first point
(x,y,area,slope,eccentricity)=pt[:5]
# put data in queue for saving
numdata = (x,y, slope, fmf_save_x0, fmf_save_y0, timestamp, area, framenumber)
data = (roi_save_fmf, numdata)
data_queue.put( data )
runthread_remote_host = self.get_downstream_hosts()
n_downstream_hosts = len(runthread_remote_host)
if self.last_n_downstream_hosts != n_downstream_hosts:
ctrl = xrc.XRCCTRL(self.frame,'SEND_TO_IP_ENABLED')
ctrl.SetLabel('send data to %d receiver(s)'%n_downstream_hosts)
self.last_n_downstream_hosts = n_downstream_hosts
# send data over UDP
if self.send_over_ip.isSet() and runthread_remote_host is not None:
# XXX send these data
a = (roi_send, udp_send_x0, udp_send_y0)
databuf1 = struct.pack('cBLdfffffBBII',
'e',cam_no,framenumber,timestamp,
x,y,area,slope,eccentricity,
roi_send.size.w,roi_send.size.h,
udp_send_x0,udp_send_y0)
databuf2 = numpy.array(roi_send).tostring()
databuf = databuf1 + databuf2
#assert len(databuf2) == roi_send.size.w * roi_send.size.h
#print 'transmitting %d bytes to %d hosts'%(
# len(databuf),len(self.runthread_remote_host))
for remote_host in runthread_remote_host:
self.sockobj.sendto( databuf, remote_host)
if BGROI_IM:
running_mean8u_im = realtime_analyzer.get_image_view('mean')
tmp = running_mean8u_im.roi( display_x0, display_y0, self.roi_display_sz )
bgroi = tmp.get_8u_copy(tmp.size) # copy
if DEBUGROI_IM:
absdiff_im = realtime_analyzer.get_image_view('absdiff')
tmp = absdiff_im.roi( display_x0, display_y0, self.roi_display_sz )
debugroi = tmp.get_8u_copy(tmp.size) # copy
# live display of image
if display_active.isSet():
self.image_update_lock.acquire()
self.last_image = roi_display
self.last_image_cam_id = cam_id
self.last_image_format = 'MONO8' # forced in this routine
self.last_points = points
self.roi_display_lb = display_x0,display_y0
self.new_image = True
if BGROI_IM:
self.bgroi_image = bgroi
if DEBUGROI_IM:
self.debugroi_image = debugroi
self.image_update_lock.release()
if n_pts:
self.last_detection_list.append((x,y))
else:
self.last_detection_list.append(None)
if len(self.last_detection_list) > history_buflen_value:
self.last_detection_list = self.last_detection_list[-history_buflen_value:]
draw_points.extend([p for p | |
<gh_stars>0
# -*- coding: utf-8 -*-
from urllib.parse import quote_plus, urlencode
from plexapi import media, utils
from plexapi.exceptions import NotFound
class ArtUrlMixin(object):
""" Mixin for Plex objects that can have a background artwork url. """
@property
def artUrl(self):
""" Return the art url for the Plex object. """
art = self.firstAttr('art', 'grandparentArt')
return self._server.url(art, includeToken=True) if art else None
class ArtMixin(ArtUrlMixin):
""" Mixin for Plex objects that can have background artwork. """
def arts(self):
""" Returns list of available :class:`~plexapi.media.Art` objects. """
return self.fetchItems('/library/metadata/%s/arts' % self.ratingKey, cls=media.Art)
def uploadArt(self, url=None, filepath=None):
""" Upload a background artwork from a url or filepath.
Parameters:
url (str): The full URL to the image to upload.
filepath (str): The full file path the the image to upload.
"""
if url:
key = '/library/metadata/%s/arts?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/arts?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setArt(self, art):
""" Set the background artwork for a Plex object.
Parameters:
art (:class:`~plexapi.media.Art`): The art object to select.
"""
art.select()
class BannerUrlMixin(object):
""" Mixin for Plex objects that can have a banner url. """
@property
def bannerUrl(self):
""" Return the banner url for the Plex object. """
banner = self.firstAttr('banner')
return self._server.url(banner, includeToken=True) if banner else None
class BannerMixin(BannerUrlMixin):
""" Mixin for Plex objects that can have banners. """
def banners(self):
""" Returns list of available :class:`~plexapi.media.Banner` objects. """
return self.fetchItems('/library/metadata/%s/banners' % self.ratingKey, cls=media.Banner)
def uploadBanner(self, url=None, filepath=None):
""" Upload a banner from a url or filepath.
Parameters:
url (str): The full URL to the image to upload.
filepath (str): The full file path the the image to upload.
"""
if url:
key = '/library/metadata/%s/banners?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/banners?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setBanner(self, banner):
""" Set the banner for a Plex object.
Parameters:
banner (:class:`~plexapi.media.Banner`): The banner object to select.
"""
banner.select()
class PosterUrlMixin(object):
""" Mixin for Plex objects that can have a poster url. """
@property
def thumbUrl(self):
""" Return the thumb url for the Plex object. """
thumb = self.firstAttr('thumb', 'parentThumb', 'granparentThumb')
return self._server.url(thumb, includeToken=True) if thumb else None
@property
def posterUrl(self):
""" Alias to self.thumbUrl. """
return self.thumbUrl
class PosterMixin(PosterUrlMixin):
""" Mixin for Plex objects that can have posters. """
def posters(self):
""" Returns list of available :class:`~plexapi.media.Poster` objects. """
return self.fetchItems('/library/metadata/%s/posters' % self.ratingKey, cls=media.Poster)
def uploadPoster(self, url=None, filepath=None):
""" Upload a poster from a url or filepath.
Parameters:
url (str): The full URL to the image to upload.
filepath (str): The full file path the the image to upload.
"""
if url:
key = '/library/metadata/%s/posters?url=%s' % (self.ratingKey, quote_plus(url))
self._server.query(key, method=self._server._session.post)
elif filepath:
key = '/library/metadata/%s/posters?' % self.ratingKey
data = open(filepath, 'rb').read()
self._server.query(key, method=self._server._session.post, data=data)
def setPoster(self, poster):
""" Set the poster for a Plex object.
Parameters:
poster (:class:`~plexapi.media.Poster`): The poster object to select.
"""
poster.select()
class SplitMergeMixin(object):
""" Mixin for Plex objects that can be split and merged. """
def split(self):
""" Split duplicated Plex object into separate objects. """
key = '/library/metadata/%s/split' % self.ratingKey
return self._server.query(key, method=self._server._session.put)
def merge(self, ratingKeys):
""" Merge other Plex objects into the current object.
Parameters:
ratingKeys (list): A list of rating keys to merge.
"""
if not isinstance(ratingKeys, list):
ratingKeys = str(ratingKeys).split(',')
key = '%s/merge?ids=%s' % (self.key, ','.join([str(r) for r in ratingKeys]))
return self._server.query(key, method=self._server._session.put)
class UnmatchMatchMixin(object):
""" Mixin for Plex objects that can be unmatched and matched. """
def unmatch(self):
""" Unmatches metadata match from object. """
key = '/library/metadata/%s/unmatch' % self.ratingKey
self._server.query(key, method=self._server._session.put)
def matches(self, agent=None, title=None, year=None, language=None):
""" Return list of (:class:`~plexapi.media.SearchResult`) metadata matches.
Parameters:
agent (str): Agent name to be used (imdb, thetvdb, themoviedb, etc.)
title (str): Title of item to search for
year (str): Year of item to search in
language (str) : Language of item to search in
Examples:
1. video.matches()
2. video.matches(title="something", year=2020)
3. video.matches(title="something")
4. video.matches(year=2020)
5. video.matches(title="something", year="")
6. video.matches(title="", year=2020)
7. video.matches(title="", year="")
1. The default behaviour in Plex Web = no params in plexapi
2. Both title and year specified by user
3. Year automatically filled in
4. Title automatically filled in
5. Explicitly searches for title with blank year
6. Explicitly searches for blank title with year
7. I don't know what the user is thinking... return the same result as 1
For 2 to 7, the agent and language is automatically filled in
"""
key = '/library/metadata/%s/matches' % self.ratingKey
params = {'manual': 1}
if agent and not any([title, year, language]):
params['language'] = self.section().language
params['agent'] = utils.getAgentIdentifier(self.section(), agent)
else:
if any(x is not None for x in [agent, title, year, language]):
if title is None:
params['title'] = self.title
else:
params['title'] = title
if year is None:
params['year'] = self.year
else:
params['year'] = year
params['language'] = language or self.section().language
if agent is None:
params['agent'] = self.section().agent
else:
params['agent'] = utils.getAgentIdentifier(self.section(), agent)
key = key + '?' + urlencode(params)
data = self._server.query(key, method=self._server._session.get)
return self.findItems(data, initpath=key)
def fixMatch(self, searchResult=None, auto=False, agent=None):
""" Use match result to update show metadata.
Parameters:
auto (bool): True uses first match from matches
False allows user to provide the match
searchResult (:class:`~plexapi.media.SearchResult`): Search result from
~plexapi.base.matches()
agent (str): Agent name to be used (imdb, thetvdb, themoviedb, etc.)
"""
key = '/library/metadata/%s/match' % self.ratingKey
if auto:
autoMatch = self.matches(agent=agent)
if autoMatch:
searchResult = autoMatch[0]
else:
raise NotFound('No matches found using this agent: (%s:%s)' % (agent, autoMatch))
elif not searchResult:
raise NotFound('fixMatch() requires either auto=True or '
'searchResult=:class:`~plexapi.media.SearchResult`.')
params = {'guid': searchResult.guid,
'name': searchResult.name}
data = key + '?' + urlencode(params)
self._server.query(data, method=self._server._session.put)
class CollectionMixin(object):
""" Mixin for Plex objects that can have collections. """
def addCollection(self, collections, locked=True):
""" Add a collection tag(s).
Parameters:
collections (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('collection', collections, locked=locked)
def removeCollection(self, collections, locked=True):
""" Remove a collection tag(s).
Parameters:
collections (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('collection', collections, locked=locked, remove=True)
class CountryMixin(object):
""" Mixin for Plex objects that can have countries. """
def addCountry(self, countries, locked=True):
""" Add a country tag(s).
Parameters:
countries (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('country', countries, locked=locked)
def removeCountry(self, countries, locked=True):
""" Remove a country tag(s).
Parameters:
countries (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('country', countries, locked=locked, remove=True)
class DirectorMixin(object):
""" Mixin for Plex objects that can have directors. """
def addDirector(self, directors, locked=True):
""" Add a director tag(s).
Parameters:
directors (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('director', directors, locked=locked)
def removeDirector(self, directors, locked=True):
""" Remove a director tag(s).
Parameters:
directors (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('director', directors, locked=locked, remove=True)
class GenreMixin(object):
""" Mixin for Plex objects that can have genres. """
def addGenre(self, genres, locked=True):
""" Add a genre tag(s).
Parameters:
genres (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('genre', genres, locked=locked)
def removeGenre(self, genres, locked=True):
""" Remove a genre tag(s).
Parameters:
genres (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('genre', genres, locked=locked, remove=True)
class LabelMixin(object):
""" Mixin for Plex objects that can have labels. """
def addLabel(self, labels, locked=True):
""" Add a label tag(s).
Parameters:
labels (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
"""
self._edit_tags('label', labels, locked=locked)
def removeLabel(self, labels, locked=True):
""" Remove a label tag(s).
Parameters:
labels (list): List of strings.
locked (bool): True (default) to lock the field, False to unlock the field.
| |
optional, default is
None
column 0 is 'labelID'
column 1 is 'labelName'
Returns
-------
table : dataframe (Pandas)
The table contains all the features extracted from the spectrogram.
Keys are {'labelID', 'labelName, 'cyear', 'cmonth', 'cday', 'chour',
'cmin','csecond','cfreq','shp1,'shp2',...'shpn'}
"""
table = create_csv(shape_features, centroid_features, label_features)
table.to_csv(path_or_buf=filename,sep=',',mode=mode,header=True, index=False)
return table
def get_features_wrapper(im, ext, display=False, savefig=None, save_csv=None,
**kwargs):
"""
Computes shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters
Parameters
----------
im: 2D array
Input image to process (spectrogram)
ext : list of scalars [left, right, bottom, top], optional, default: None
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
display : boolean, optional, default is False
Display the signal if True
savefig : string, optional, default is None
Root filename (with full path) is required to save the figures. Postfix
is added to the root filename.
save_csv : string, optional, default is None
Root filename (with full path) is required to save the table. Postfix
is added to the root filename.
**kwargs, optional. This parameter is used by plt.plot and savefig functions
figsize : tuple of integers,
width, height in inches.
title : string,
title of the figure
xlabel : string, optional,
label of the horizontal axis
ylabel : string, optional,
label of the vertical axis
cmap : string or Colormap object,
See https://matplotlib.org/examples/color/colormaps_reference.html
in order to get all the existing colormaps
examples: 'hsv', 'hot', 'bone', 'tab20c', 'jet', 'seismic',
'viridis'...
vmin, vmax : scalar
`vmin` and `vmax` are used in conjunction with norm to normalize
luminance data. Note if you pass a `norm` instance, your
settings for `vmin` and `vmax` will be ignored.
ext : scalars (left, right, bottom, top),
The location, in data-coordinates, of the lower-left and
upper-right corners. If `None`, the image is positioned such that
the pixel centers fall on zero-based (row, column) indices.
dpi : integer, optional
Dot per inch.
For printed version, choose high dpi (i.e. dpi=300) => slow
For screen version, choose low dpi (i.e. dpi=96) => fast
format : string, optional
Format to save the figure
... and more, see matplotlib
Returns
-------
table : dataframe (Pandas)
The table contains all the features extracted from the spectrogram.
Keys are {'labelID', 'labelName, 'cyear', 'cmonth', 'cday', 'chour',
'cmin','csecond','cfreq','shp0,'shp1',...'shpn'}
params_shape: 2D numpy structured array
Parameters used to calculate 2D gabor kernels.
params_shape has 5 fields (theta, freq, bandwidth, gamma, pyr_level)
Each row corresponds to a shape (shp1, shp2...shpn)
"""
freq=kwargs.pop('freq',(0.75, 0.5))
ntheta=kwargs.pop('ntheta',2)
bandwidth=kwargs.pop('bandwidth', 1)
gamma=kwargs.pop('gamma', 1)
npyr=kwargs.pop('npyr', 3)
date=kwargs.pop('date', None)
im_rois=kwargs.pop('im_rois', None)
label_features=kwargs.pop('label_features', None)
params, kernels = filter_bank_2d_nodc(frequency=freq, ntheta=ntheta,
bandwidth=bandwidth,gamma=gamma,
display=display, savefig=savefig)
# multiresolution image filtering (Gaussian pyramids)
im_filtlist = filter_multires(im, ext, kernels, params, npyr=npyr,
display=display, savefig=savefig)
# Extract shape features for each roi
params_shape, shape = shape_features(im_filtlist=im_filtlist,
params = params,
im_rois=im_rois)
# Extract centroids features for each roi
centroid_features = centroid(im=im, ext=ext, date=date, im_rois=im_rois)
if save_csv :
table = save_csv(save_csv+'.csv',
shape, centroid_features, label_features,
display=display)
else:
table = create_csv(shape, centroid_features, label_features,
display=display)
return table, params_shape
def save_figlist(fname, figlist):
"""
Save a list of figures to file.
Parameters
----------
fname: string
suffix name to save the figure. Extension indicates the format
of the image
Returns
-------
Nothing
"""
for i, fig in enumerate(figlist):
fname_save='%d_%s' % (i, fname)
imsave(fname_save,fig)
def opt_shape_presets(resolution, opt_shape=None):
"""
Set values for multiresolution analysis using presets or custom parameters
Parameters
----------
resolution: str
Chooses the opt_shape presets.
Supportes presets are: 'low', 'med', 'high' and 'custom'
opt_shape: dict
Key and values for shape settings.
Valid keys are: ntheta, bandwidth, frequency, gamma, npyr
Returns
-------
opt_shape: dict
A valid dictionary with shape settings
"""
# Factory presets
opt_shape_low = dict(ntheta=2,
bandwidth=1,
frequency=(2**-1, 2**-2),
gamma=2,
npyr = 4)
opt_shape_med = dict(ntheta=4,
bandwidth=1,
frequency=(2**-1, 2**-2),
gamma=2,
npyr = 6)
opt_shape_high = dict(ntheta=8,
bandwidth=1,
frequency=(2**-0.5, 2**-1, 2**-1.5, 2**-2),
gamma=2,
npyr = 6)
if resolution == 'low':
opt_shape = opt_shape_low
elif resolution == 'med':
opt_shape = opt_shape_med
elif resolution == 'high':
opt_shape = opt_shape_high
elif resolution == 'custom':
if opt_shape is not None: # check valid values on opt_shape
if all (opt in opt_shape for opt in ('ntheta', 'bandwidth', 'frequency', 'gamma', 'npyr')):
pass
else:
print('Warning: opt_shape must have all keys-values pairs:')
print('ntheta, bandwidth, frequency, gamma, npyr')
print('Setting resolution to low')
opt_shape = opt_shape_low
else:
print('Warning: if resolution is set to custom, a valid opt_shape dictionnary should be provided.')
print('Setting resolution to low')
opt_shape = opt_shape_low
else:
print('Resolution should be: low, med or high. Setting resolution to low')
opt_shape = opt_shape_low
return opt_shape
def plot_shape(shape_plt, params, display_values=False):
"""
Plot shape features in 2D representation
Parameters
----------
shape: 1D array
params: structured array returned by maad.features_rois.shape_features
Returns
-------
plot
"""
unique_theta = np.unique(params.theta)
# compute shape of matrix
dirs_size = unique_theta.size
scale_size = np.unique(params.freq).size * np.unique(params.pyr_level).size
# reshape feature vector
idx = params.sort_values(['theta','pyr_level','scale']).index
if isinstance(shape_plt, pd.DataFrame):
shape_plt = np.reshape(shape_plt.iloc[0,idx].values, (dirs_size, scale_size))
elif isinstance(shape_plt, np.ndarray):
shape_plt = np.reshape(shape_plt[idx], (dirs_size, scale_size))
unique_scale = params.scale * 2**params.pyr_level[idx]
# get textlab
textlab = shape_plt
textlab = np.round(textlab,2)
# plot figure
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(111)
ax.imshow(shape_plt, aspect='auto', origin='lower', interpolation='None', cmap='viridis')
if display_values:
for (j,i),label in np.ndenumerate(textlab):
ax.text(i,j,label,ha='center',va='center')
else:
pass
yticklab = unique_theta
xticklab = np.reshape(unique_scale.values,
(dirs_size, scale_size))
ax.set_xticks(np.arange(scale_size))
ax.set_xticklabels(np.round(xticklab,2)[0,:])
ax.set_yticks(np.arange(dirs_size))
ax.set_yticklabels(yticklab)
ax.set_xlabel('Scale')
ax.set_ylabel('Theta')
plt.show()
def compute_rois_features(s, fs, rois_tf, opt_spec, opt_shape, flims):
"""
Computes shape and central frequency features from signal at specified
time-frequency limits defined by regions of interest (ROIs)
Parameters
----------
s: ndarray
Singal to be analysed
fs: int
Sampling frequency of the signal
rois_tf: pandas DataFrame
Time frequency limits for the analysis. Columns should have at
least min_t, max_t, min_f, max_f. Can be computed with multiple
detection methods, such as find_rois_cwt
opt_spec: dictionnary
Options for the spectrogram with keys, window lenght 'nperseg' and,
window overlap in percentage 'overlap'
opt_shape: dictionary
Options for the filter bank (kbank_opt) and the number of scales (npyr)
flims: list of 2 scalars
Minimum and maximum boundary frequency values in Hertz
Returns
-------
feature_rois: pandas Dataframe
A dataframe with each column corresponding to a feature
Example
-------
s, fs = sound.load('spinetail.wav')
rois_tf = find_rois_cwt(s, fs, flims=(3000, 8000), tlen=2, th=0.003)
opt_spec = {'nperseg': 512, 'overlap': 0.5}
opt_shape = opt_shape_presets('med')
features_rois = compute_rois_features(s, fs, rois_tf, opt_spec,
opt_shape, flims)
"""
im, dt, df, ext = sound.spectrogram(s, fs, nperseg=opt_spec['nperseg'],
overlap=opt_spec['overlap'], fcrop=flims,
rescale=False, db_range=opt_spec['db_range'])
# format rois to bbox
ts = np.arange(ext[0], ext[1], dt)
f = np.arange(ext[2],ext[3]+df,df)
rois_bbox = format_rois(rois_tf, ts, f, fmt='bbox')
# roi to image blob
im_blobs = rois_to_imblobs(np.zeros(im.shape), rois_bbox)
# get features: shape, center frequency
im = normalize_2d(im, 0, 1)
#im = gaussian(im) # smooth image
bbox, params, shape = shape_features(im, im_blobs, resolution='custom',
opt_shape=opt_shape)
_, cent = centroid(im, im_blobs)
cent['frequency']= f[round(cent.y).astype(int)] # y values to frequency
# format rois to time-frequency
rois_out = format_rois(bbox, ts, f, fmt='tf')
# combine into a single df
rois_features = pd.concat([rois_out, shape, cent.frequency], axis=1)
return rois_features
def shape_features_raw(im, resolution='low', opt_shape=None):
"""
Computes raw shape of 2D signal (image or spectrogram) at multiple resolutions
using 2D Gabor filters. Contrary to shape_feature, this function delivers the raw
response of the spectrogram to the filter bank.
Parameters
----------
im: 2D array
Input image to process
resolution:
Resolution of analysis, i.e. number of filters used.
Three presets are provided, 'low', 'mid' and 'high', which control
the number of filters.
opt_shape: dictionary (optional)
options for the filter bank | |
<filename>Python/python3_version/klampt/math/symbolic_io.py
from .symbolic import *
from .symbolic import _infix_operators,_prefix_operators,_builtin_functions
from ..io import loader
import json
from json import encoder
import weakref
import sys
VAR_PREFIX = ''
USER_DATA_PREFIX = '$'
NAMED_EXPRESSION_TAG = '#'
NAMED_EXPRESSION_PREFIX = '@'
_operator_precedence = {'pow':1,
'mul':2,'div':2.5,
'add':3,'sum':3,'sub':3.5,
'neg':4,
'not':5,
'and':6,'or':6,
'ge':7,'le':7,'eq':7,'ne':7}
#just a helper class to do some duck-typing
class _Object(object):
pass
if sys.version_info[0] == 2:
def byteify(input):
"""Helpful for converting unicode values in JSON loaded objects to strings"""
if isinstance(input, dict):
return {byteify(key): byteify(value)
for key, value in input.items()}
elif isinstance(input, list):
return [byteify(element) for element in input]
elif isinstance(input, str):
return input.encode('utf-8')
else:
return input
else:
def byteify(input):
return input
class _TaggedExpression(Expression):
def __init__(self,name):
self.name = name
Expression.__init__(self)
def indent(s,spaces):
if spaces <= 0: return s
return s.replace('\n','\n'+' '*spaces)
def _prettyPrintExpr(expr,astr,parseCompatible):
"""Returns a string representing this expression, where astr is a list of strings
representing each argument"""
if not isinstance(expr,OperatorExpression):
return exprToStr(expr,parseCompatible)
if len(expr.functionInfo.printers) > 0:
if parseCompatible and 'parse' in expr.functionInfo.printers:
return expr.functionInfo.printers['parse'](expr,astr)
if not parseCompatible and 'str' in expr.functionInfo.printers:
return expr.functionInfo.printers['str'](expr,astr)
if expr.functionInfo.name in _prefix_operators:
prefix = _prefix_operators[expr.functionInfo.name]
assert len(expr.args) == 1,"Weird, prefix operator %s has %d arguments? %s"%(expr.functionInfo.name,len(astr),",".join(astr))
return prefix + astr[0]
if expr.functionInfo.name in _infix_operators:
assert len(expr.args) == 2,"Weird, infix operator %s has %d arguments? %s"%(expr.functionInfo.name,len(astr),",".join(astr))
return astr[0] + _infix_operators[expr.functionInfo.name] + astr[1]
if expr.functionInfo.name == 'setitem':
vconst = to_const(expr.args[0])
iconst = to_const(expr.args[1])
if vconst is not None and iconst is not None:
if hasattr(iconst,'__iter__'):
indexset = set(iconst)
if parseCompatible:
astr[0] = '[' + ','.join(['0' if i in indexset else str(v) for i,v in enumerate(vconst)])+']'
else:
astr[0] = '[' + ','.join(['*' if i in indexset else str(v) for i,v in enumerate(vconst)])+']'
if expr.functionInfo.name == 'getitem':
if isinstance(expr.args[0],OperatorExpression) and astr[0][0] != '(' and expr.args[0].functionInfo.name in _infix_operators:
astr[0] = '(' + astr[0] + ')'
#if expr.functionInfo.name == 'getslice':
# if len(astr) <= 2:
# astr.append('')
# if len(astr) <= 3:
# astr.append('')
# return astr[0] + '[%s:%s:%s]'%(astr[1],astr[2],astr[3])
if isinstance(expr.args[1],slice):
start,stop,step = expr.args[1].start,expr.args[1].stop,expr.args[1].step
astr[1] = "%s:%s%s"%(("" if start is None else str(start)),
("" if (stop is None or stop > 900000000000) else str(stop)),
("" if step is None else ":"+str(step)))
return astr[0] + '[' +astr[1] + ']'
#default
if len(astr) > 1 and sum(len(a) for a in astr) > 80-2-len(expr.functionInfo.name):
res = expr.functionInfo.name + "("
res += ',\n '.join([indent(a,2) for a in astr]) + ')'
else:
res = expr.functionInfo.name + "("
res += ','.join(astr) + ')'
return res
def _make_tagged(expr,prefix="SubExp"):
"""Creates a copy of expr where each reference to a common subexpression is
replaced with a TaggedExpression. If there are no common subexpressions,
expr is returned."""
def _refspre(node):
if 'refs' in node._cache:
node._cache['refs'] += 1
return (False,True,node._cache['refs'])
node._cache['refs'] = 1
return (True,True,None)
expr._traverse(pre=_refspre,cache=False)
#all the cache values are now the number of references to a subexpression
def _hassubexpr_pre(node):
if node._cache['refs'] > 1:
#print "Node",node.functionInfo.name,"is a repeated subexpression"
node._cache['hassubexpr'] = True
return (False,True,True)
return (True,True,None)
def _hassubexpr_post(node,cvals):
if len(cvals) == 0:
return (True,False)
res = any(cvals)
#print "Child of",node.functionInfo.name,"has repeated subexpression"
node._cache['hassubexpr'] = res
if res: return (True,True)
return (True,False)
if not expr._traverse(pre=_hassubexpr_pre,post=_hassubexpr_post,cache=False):
#print "***Expression has no subexpressions***"
expr._clearCache('refs')
expr._clearCache('hassubexpr')
return expr
assert expr._cache.get('hassubexpr',False) == True
expr._clearCache('refs')
#print "***Expression has subexpressions***"
subexprs = dict()
def replace(node):
if not node._cache.get('hassubexpr',False): return node
if 'refs' in node._cache:
if 'id' not in node._cache:
#new detected subexpression, not added yet
tag = prefix+str(len(subexprs)+1)
node._cache['id'] = tag
subexprs[tag] = _TaggedExpression(tag)
node._cache['refs'] += 1
#print "Reference",node._cache['refs'],"to",node.functionInfo.name
return subexprs[node._cache['id']]
node._cache['refs'] = 1
if node._children is None:
return node
else:
assert isinstance(node,OperatorExpression)
#print "New reference to",node.functionInfo.name
creps = [replace(c) for c in node._children]
if any(cr is not c for (cr,c) in zip(creps,node._children)):
return OperatorExpression(node.functionInfo,creps,node.op)
else:
return node
repl = replace(expr)
expr._clearCache('refs')
expr._clearCache('hassubexpr')
#NEED TO CLEAR 'id' from cache after repl is used
return repl
def _to_jsonobj(val):
if isinstance(val,(bool,int,float)):
return val
elif isinstance(val,(np.ndarray,np.float64)):
return val.tolist()
elif isinstance(val,(list,tuple)):
return [_to_jsonobj(x) for x in val]
else:
try:
return loader.toJson(val)
except:
raise ValueError("Unable to convert object "+repr(val)+" to JSON object")
return None
def _json_complex(jsonval):
if isinstance(jsonval,dict):
return (len(jsonval) > 0)
elif isinstance(jsonval,(list,tuple)):
return any(_json_complex(v) for v in jsonval)
else:
return False
def _json_depth(jsonval):
if isinstance(jsonval,dict):
return 1 + max(_json_depth(v) for v in jsonval.values())
elif isinstance(jsonval,(list,tuple)):
return 1 + max(_json_depth(v) for v in jsonval)
else:
return 1
def exprToStr(expr,parseCompatible=True,expandSubexprs='auto'):
"""Converts an Expression to a printable or parseable string.
Args:
expr (Expression): the Expression to convert
parseCompatible (bool, optional): if True, the result is readable via exprFromStr()
expandSubexprs (str or bool, optional): whether to expand subexpressions. Can be:
* 'auto': if parseCompatible, equivalent to False.
if parseCompatible=False, equivalent to True.
* True: expands all common subexpressions
* False: does not expand common subexpressions.
* 'show': Internally used.
Returns:
(str): a printable or parsable string representing expr.
"""
if isinstance(expr,ConstantExpression):
if isinstance(expr.value,slice):
start,stop,step = expr.value.start,expr.value.stop,expr.value.step
return "%s:%s%s"%(("" if start is None else str(start)),
("" if (stop is None or stop > 900000000000) else str(stop)),
("" if step is None else ":"+str(step)))
try:
jsonval = _to_jsonobj(expr.value)
except:
return str(expr.value)
if parseCompatible:
return json.dumps(jsonval)
else:
#Note: DOESNT WORK IN Python 3
#original_float_repr = encoder.FLOAT_REPR
encoder.FLOAT_REPR = lambda o:format(o,'.14g')
try:
if _json_complex(jsonval):
res = json.dumps(jsonval,sort_keys=True, indent=4, separators=(',', ': '))
else:
res = json.dumps(jsonval,sort_keys=True)
except Exception:
print("Unable to dump constant expression",expr.value,"of type",expr.value.__class__.__name__)
def print_recursive(v,indent=0):
if hasattr(v,'__iter__'):
print(indent*' ',"Sub objects have type",[a.__class__.__name__ for a in v])
for a in v:
print_recursive(a,indent+2)
print_recursive(expr.value)
return "___JSON_ENCODE_ERROR___"
#encoder.FLOAT_REPR = original_float_repr
return res
elif isinstance(expr,VariableExpression):
if parseCompatible:
return VAR_PREFIX+expr.var.name
else:
return str(expr.var)
elif isinstance(expr,UserDataExpression):
return USER_DATA_PREFIX+expr.name
elif isinstance(expr,OperatorExpression):
if expandSubexprs == 'auto':
expandSubexprs = not parseCompatible
if expandSubexprs:
astr = []
for i,a in enumerate(expr.args):
a._parent = (weakref.ref(expr),i)
astr.append(exprToStr(a,parseCompatible,expandSubexprs))
if not isinstance(a,OperatorExpression) and expandSubexprs == 'show' and ('id' in a._cache or 'name' in a._cache):
#tagged subexprs need parenthesies
if astr[-1][-1] != ')':
astr[-1] = '('+astr[-1]+')'
astr[-1] = astr[-1] + NAMED_EXPRESSION_TAG + a._cache.get('id',a._cache.get('name'))
a._parent = None
res = _prettyPrintExpr(expr,astr,parseCompatible)
if expandSubexprs == 'show' and ('id' in expr._cache or 'name' in expr._cache):
#tagged subexprs need parenthesies
if res[-1] != ')':
res = '('+res+')'
return res + NAMED_EXPRESSION_TAG + expr._cache.get('id',expr._cache.get('name'))
oldparent = expr._parent
iscomplex = expr.depth() >= 0 and (expr.functionInfo.name in _operator_precedence)
expr._parent = oldparent
if iscomplex and (expr._parent is not None and not isinstance(expr._parent,str)):
if parseCompatible:
return '(' + res + ')'
else:
parent = expr._parent[0]()
if parent.functionInfo.name in _operator_precedence:
expr_precedence = _operator_precedence[expr.functionInfo.name]
parent_precedence = _operator_precedence[parent.functionInfo.name]
#if - is the first in a summation, don't parenthesize it
if expr._parent[1] == 0 and expr.functionInfo.name == 'neg' and parent.functionInfo.name in ['sum','add','sub']:
return res
if expr_precedence > parent_precedence:
return '(' + res + ')'
if expr_precedence == parent_precedence:
if expr.functionInfo is parent.functionInfo and expr.functionInfo.properties.get('associative',False):
return res
else:
return '(' + res + ')'
return res
else:
if not parseCompatible:
taggedexpr = _make_tagged(expr,"")
else:
taggedexpr = _make_tagged(expr)
res = exprToStr(taggedexpr,parseCompatible,'show')
if taggedexpr is not expr:
expr._clearCache('id',deep=True)
return res
elif isinstance(expr,_TaggedExpression):
return NAMED_EXPRESSION_PREFIX+expr.name
elif is_const(expr):
return str(expr)
else:
raise ValueError("Unknown type "+expr.__class__.__name__)
def exprFromStr(context,string,fmt=None,add=False):
"""Returns an Expression from a string. In auto mode, this reads in constants in klampt.loader JSON-
compatible format, standard variables in the form "x", user data in the form of strings prepended with $
(e.g., "$x"), and named expression references in the form of strings prepended with @.
Args:
context (Context): the context containing possible functions in string
string (str): the string to parse.
fmt (str, optional): specifies a format for the string. Can be None (auto), 'auto', or 'json'
add (bool, optional): if true, adds all variables referenced in the string to the context.
Otherwise, undefined variables are referred to as user data.
An exception is raised on parsing failure.
(Parsing is a little slow, so try not to use it in tight inner loops)
Returns:
(Expression): the expression represented by str.
"""
if len(string) == 0:
raise ValueError("Empty string provided")
if fmt == None:
if string[0] == '{':
fmt = 'json'
else:
fmt = 'auto'
if fmt == 'auto':
import re,ast
USERDATA_MARKER = '___'
EXPR_MARKER = '____'
TAGLIST_NAME = '__tagexprlist__'
taglist = context.expressions.copy()
def __settag__(self,tagname,taglist):
assert isinstance(tagname,ConstantExpression) and isinstance(tagname.value,str)
taglist[tagname.value] = self
return self
| |
now self.barsData have all the needed info :)
# barsData structure
# 0 bar center
# 1 perymeter
# 2 bar Current
# 3 cross section
# 4 Q power losses value
# 5 Ghtc to air thermal conductance
# 6 Gt 1/2lenght thermal conductance
# 7 phase number
# 8 New Thermal model DT - this one will calculated later below :)
# printing data for each bar
print('Bar {0:02d} ({5:01d}){1}; Power; {2:06.2f}; [W]; perymeter; {3} [mm]; Current; {4:.1f}; [A]'.format(i, center, Q, perymiter, BarCurrent, phase))
# print('** Bars Data **')
# print(self.barsData)
# print('** Bars Data **')
# lets figure out the needed size of Gthermal matrix
# it will be (bars# +3phases joints)x(the same)
vectorSize = len(self.barsData)+3
thG = np.zeros((vectorSize, vectorSize), dtype=float)
# TEMP: Hardcoded Gth between matrix
if self.Gmx.shape != (3,3):
GthermalMatrix = np.asarray(([0, 0, 0],
[0, 0, 0],
[0, 0, 0]))
else:
GthermalMatrix = self.Gmx
# DEBUG
print('--- Solving for temperatures ---')
print('The Thermal Cond Coeff Matrix')
print(GthermalMatrix)
print('Thermal Conductivity')
print(self.Gcon)
print('HTC')
print(self.HTC)
print('Results as bars temperatures')
# now we will loop twice over the bars
for i, fromBar in enumerate(self.barsData):
fromPhase = fromBar[7] - 1 # -1 due to the count from 0
for j, toBar in enumerate(self.barsData):
tempG = 0 # just to make sure we dont have something in it
if fromBar is toBar:
# the main digonal with
# GHtc and Gc and sum for all
# DEBUG
# print('({},{}) it is me!'.format(i,j))
tempG += fromBar[5] + 2 * fromBar[6]
# now we nwwd to loop again all
# others to get the sum of G
for otherToBar in self.barsData:
if otherToBar is not fromBar:
# the distance between to get thermal Conductance
distance = csd.n_getDistance(fromBar[0], otherToBar[0]) * 1e-3
# the area of the fom Bar as xsection for therm cond
thisXs = fromBar[1] * self.lenght * 1e-6
otherPhase = otherToBar[7] - 1
tempG += self.Gcon * (thisXs / distance) * GthermalMatrix[fromPhase, otherPhase]
else:
# DEBUG
# print('({},{}) someone else'.format(i,j))
otherPhase = toBar[7] - 1
# the distance between to get thermal Conductance
distance = csd.n_getDistance(fromBar[0], toBar[0]) * 1e-3
# the area of the fom Bar as xsection for therm cond
thisXs = fromBar[1] * self.lenght * 1e-6
tempG += -GthermalMatrix[otherPhase, fromPhase] * self.Gcon * (thisXs / distance)
# putting the calculated vaule in the thG matrix
thG[i, j] = tempG
# now we need to go for the last 3 rows and columns that
# are for the Tx (joints temperatures)
# the bar phase will determine which Tx we tackle
# Phase = 1 means position -3 in the cols >> col = Phase - 4
# so lets go once more thru the bars to fill last columns
for i, fromBar in enumerate(self.barsData):
phase = fromBar[7]
col = phase - 4
thG[i, col] = -2 * fromBar[6]
# and one more to fill the last rows
for j, fromBar in enumerate(self.barsData):
phase = fromBar[7]
row = phase - 4
thG[row, j] = 2 * fromBar[6]
# and last thing is the bottom rioght 3x3 area to fill for Tx'es
# in each phase as sum by bars -2*Gcondution_to_joint
# this could be incorporated to the loops above
# but is separated for clearer code
for fromBar in self.barsData:
phase = fromBar[7]
col_row = phase - 4
thG[col_row, col_row] += -2 * fromBar[6]
# and one for the Q vector
thQ = np.zeros((vectorSize), dtype=float)
for i, fromBar in enumerate(self.barsData):
thQ[i] = fromBar[4]
# Solving for thT vector solutions
thGinv = np.linalg.inv(thG)
thT = np.matmul(thGinv, thQ)
# DEBUG
# print('The G array')
# print(thG)
# print('The Q vector')
# print(thQ)
# print('The T vector')
# print(thT)
# cuts out the Tx joints
self.Tout = thT[:len(self.barsData)] # putting result to vector
# Preparing the output array of the temperatures
# First we need to rereate vector of temperture for each element
# in each of bar - as in general solutions vector
tmpVector = []
barElemVect = []
# going thrue each element in each bar
# creating the long vetor of temp risies
# and properly ordered elements vector
# that render where in oryginal xsec array was the element
for i, bar in enumerate(self.bars):
for element in bar:
tmpVector.append(self.Tout[i])
barElemVect.append(element)
# Now we prepare the array to display
self.tempriseResultsArray = csd.n_recreateresultsArray(
elementsVector=barElemVect,
resultsVector=tmpVector,
initialGeometryArray=self.XsecArr)
for i, temp in enumerate(self.Tout):
self.barsData[i].append(temp)
print('Bar {}: {:.2f}[K]'.format(i, temp))
print('Phase A joint: {:.2f}[K]'.format(thT[-3]))
print('Phase B joint: {:.2f}[K]'.format(thT[-2]))
print('Phase C joint: {:.2f}[K]'.format(thT[-1]))
# and now remembering all thermal results
self.Tout = thT
# Display the results:
# self.showResults()
def showResults(self):
title_font = { 'size':'11', 'color':'black', 'weight':'normal'}
axis_font = { 'size':'10'}
if np.sum(self.resultsArray) != 0:
# Cecking the area in array that is used by geometry to limit the display
min_row = int(np.min(self.elementsVector[:, 0]))
max_row = int(np.max(self.elementsVector[:, 0])+1)
min_col = int(np.min(self.elementsVector[:, 1]))
max_col = int(np.max(self.elementsVector[:, 1])+1)
# Cutting down results array to the area with geometry
tempriseArrayDisplay = self.tempriseResultsArray[min_row:max_row, min_col:max_col]
resultsArrayDisplay = self.resultsArray[min_row:max_row, min_col:max_col]
# Checking out what are the dimensions od the ploted area
# to make propper scaling
plotWidth = (resultsArrayDisplay.shape[1]) * self.dXmm
plotHeight = (resultsArrayDisplay.shape[0]) * self.dYmm
fig = plt.figure('Power Results Window')
ax = fig.add_subplot(1, 1, 1)
my_cmap = matplotlib.cm.get_cmap('jet')
my_cmap.set_under('w')
im = ax.imshow(resultsArrayDisplay,
cmap=my_cmap, interpolation='none',
vmin=0.8*np.min(self.resultsCurrentVector),
extent=[0, plotWidth, plotHeight, 0])
fig.colorbar(im, ax=ax, orientation='vertical',
label='Current Density [A/mm$^2$]',
alpha=0.5, fraction=0.046)
plt.axis('scaled')
# Putting the detected bars numvers on plot to reffer the console data
# And doing calculation for each bar
for i, bar in enumerate(self.bars):
x, y = csd.n_getCenter(bar)
x -= min_col * self.dXmm
y -= min_row * self.dYmm
ax.text(x, y, '[{}]'.format(i), horizontalalignment='center')
# self.console('bar {0:02d}: {1:.01f}[K]'.format(i, self.barsData[i][6]))
# *** end of the per bar analysis ***
ax.set_title(str(self.f)+'[Hz] / '+str(self.I)+'[A] / '+str(self.t) +
'[$^o$C] /'+str(self.lenght) +
'[mm]\n Power Losses {0[0]:.2f}[W] \n phA: {0[1]:.2f} phB: {0[2]:.2f} phC: {0[3]:.2f}'.format(self.powerLosses), **title_font)
plt.xlabel('size [mm]', **axis_font)
plt.ylabel('size [mm]', **axis_font)
fig.autofmt_xdate(bottom=0.2, rotation=45, ha='right')
plt.tight_layout()
self.showTemperatureResults()
plt.show()
def showTemperatureResults(self):
title_font = { 'size':'9', 'color':'black', 'weight':'normal'}
axis_font = { 'size':'9'}
if np.sum(self.resultsArray) != 0:
# Cecking the area in array that is used by geometry to limit the display
min_row = int(np.min(self.elementsVector[:, 0]))
max_row = int(np.max(self.elementsVector[:, 0])+1)
min_col = int(np.min(self.elementsVector[:, 1]))
max_col = int(np.max(self.elementsVector[:, 1])+1)
# Cutting down results array to the area with geometry
resultsArrayDisplay = self.tempriseResultsArray[min_row:max_row, min_col:max_col]
# Checking out what are the dimensions od the ploted area
# to make propper scaling
plotWidth = (resultsArrayDisplay.shape[1]) * self.dXmm
plotHeight = (resultsArrayDisplay.shape[0]) * self.dYmm
fig = plt.figure('Temperature Results Window')
ax = fig.add_subplot(1, 1, 1)
my_cmap = matplotlib.cm.get_cmap('jet')
my_cmap.set_under('w')
im = ax.imshow(resultsArrayDisplay,
cmap=my_cmap, interpolation='none',
vmin=0.8*np.min(self.Tout),
extent=[0, plotWidth, plotHeight, 0])
fig.colorbar(im, ax=ax, orientation='vertical',
label='Temperature Rise [K]',
alpha=0.5, fraction=0.046)
plt.axis('scaled')
# Putting the detected bars numvers on plot to reffer the console data
# And doing calculation for each bar
for i, bar in enumerate(self.bars):
x, y = csd.n_getCenter(bar)
x -= min_col * self.dXmm
y -= min_row * self.dYmm
DT = self.barsData[i][8]
ax.text(x, y, '[{}]\n{:.2f}'.format(i, DT), horizontalalignment='center', verticalalignment='center', fontsize=8)
# *** end of the per bar analysis ***
ax.set_title(str(self.f)+'[Hz] /' + str(self.t) + '[$^o$C] /'+
str(self.lenght) + '[mm] \n'
'Ia:{:.1f}A {:.0f}$^o$ '.format(float(self.I[0]), np.floor(float(self.I[1]))) +
'Ib:{:.1f}A {:.0f}$^o$ '.format(float(self.I[2]), np.floor(float(self.I[3]))) +
'Ic:{:.1f}A {:.0f}$^o$ \n'.format(float(self.I[4]), np.floor(float(self.I[5]))) +
'HTC: {}[W/m$^2$K] / ThermConv: {}[W/mK]'.format(self.HTC, self.Gcon ) +
'\n Joints Temp Rises: Fa:{:.2f}K Fb;{:.2f}K Fc:{:.2f}K'.format(self.Tout[-3], self.Tout[-2], self.Tout[-1])
, **title_font)
plt.xlabel('size [mm]', **axis_font)
plt.ylabel('size [mm]', **axis_font)
fig.autofmt_xdate(bottom=0.2, rotation=45, ha='right')
plt.tight_layout()
# plt.show()
class zWindow():
'''
This class define the main control window for handling
the analysis of equivalent phase impedance of given geometry.
'''
def __init__(self, master, XsecArr, dXmm, dYmm):
self.XsecArr = XsecArr
self.dXmm = dXmm
self.dYmm = dYmm
self.master = master
self.frame = tk.Frame(self.master)
self.frame.pack(padx=10, pady=10)
self.lab_Freq = tk.Label(self.frame,
text='Frequency [Hz]')
self.lab_Freq.pack()
self.Freq_txt = tk.Entry(self.frame)
self.Freq_txt.insert(5, '50')
self.Freq_txt.pack()
self.lab_Temp = tk.Label(self.frame,
text='Conductor temperature [degC]')
self.lab_Temp.pack()
self.Temp_txt = tk.Entry(self.frame)
self.Temp_txt.insert(5, '140')
self.Temp_txt.pack()
self.rButton = tk.Button(self.frame, text='Set Parameters',
command=self.readSettings)
self.rButton.pack()
self.bframe = tk.Frame(self.master)
| |
reg0, pos1),
(position_set_x, pos1, 12000),
(position_set_y, pos1, 1000),
(overlay_set_size, reg0, pos1),
(try_end),
(str_store_player_username, s1, ":player_no"),
(create_text_overlay, reg0, s1, 0),
(overlay_set_color, reg0, ":font_color"),
(position_set_x, pos1, 750),
(position_set_y, pos1, 750),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, ":cur_x"),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(player_get_ping, reg0, ":player_no"),
(create_text_overlay, reg0, "str_reg0", tf_right_align),
(overlay_set_color, reg0, ":font_color"),
(position_set_x, pos1, 750),
(position_set_y, pos1, 750),
(overlay_set_size, reg0, pos1),
(store_add, ":sub_cur_x", ":cur_x", 215), #200
(position_set_x, pos1, ":sub_cur_x"),
(position_set_y, pos1, ":cur_y"),
(overlay_set_position, reg0, pos1),
(val_sub, ":cur_y", 20),
(try_end),
(try_end),
(omit_key_once, key_mouse_scroll_up),
(omit_key_once, key_mouse_scroll_down),
(presentation_set_duration, 999999),
]),
(ti_on_presentation_run,
[(store_trigger_param_1, ":cur_time"),
(try_begin),
(this_or_next|key_clicked, key_mouse_scroll_up),
(key_clicked, key_mouse_scroll_down),
(omit_key_once, key_mouse_scroll_up),
(omit_key_once, key_mouse_scroll_down),
(try_end),
(try_begin),
(eq, "$g_multiplayer_stats_chart_opened_manually", 1),
(neg|game_key_is_down, gk_leave),
(assign, "$g_multiplayer_stats_chart_opened_manually", 0),
(clear_omitted_keys),
(presentation_set_duration, 0),
(try_end),
(try_begin),
(store_mul, ":update_period_time_limit", "$g_stats_chart_update_period", 1000),
(gt, ":cur_time", ":update_period_time_limit"),
(clear_omitted_keys),
(presentation_set_duration, 0),
(start_presentation, "prsnt_coop_stats_chart"),#
(try_end),
]),
]),
# inventory
("coop_item_select", prsntf_manual_end_only, 0, [
(ti_on_presentation_load,
[(set_fixed_point_multiplier, 1000),
# (multiplayer_get_my_player, ":my_player_no"),
(assign, "$g_presentation_obj_item_select_1", -1),
(assign, "$g_presentation_obj_item_select_2", -1),
(assign, "$g_presentation_obj_item_select_3", -1),
(assign, "$g_presentation_obj_item_select_4", -1),
(assign, "$g_presentation_obj_item_select_5", -1),
(assign, "$g_presentation_obj_item_select_6", -1),
(assign, "$g_presentation_obj_item_select_7", -1),
(assign, "$g_presentation_obj_item_select_8", -1),
(assign, "$g_presentation_obj_item_select_9", -1),
(assign, "$g_presentation_obj_item_select_10", -1),
(assign, "$g_presentation_obj_item_select_11", -1),
(assign, "$g_presentation_obj_item_select_12", -1),
(assign, "$g_presentation_obj_item_select_13", -1),
(assign, "$g_presentation_obj_item_select_14", -1),
(assign, "$g_presentation_obj_item_select_15", -1),
(assign, "$g_presentation_obj_item_select_16", -1),
(try_begin),
(neq, "$g_current_opened_item_details", -1),
(close_item_details),
(assign, "$g_current_opened_item_details", -1),
(try_end),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 0),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 0),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_1", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 526),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_1", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_1", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800), # coop make these smaller so they don't overlap and close item details after opening
(overlay_set_size, "$g_presentation_obj_item_select_1", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 475),
(overlay_set_position, "$g_presentation_obj_item_select_1", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 1),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 1),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_2", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 426),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_2", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_2", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_2", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 375),
(overlay_set_position, "$g_presentation_obj_item_select_2", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 2),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 2),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_3", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 326),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_3", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_3", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_3", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 275),
(overlay_set_position, "$g_presentation_obj_item_select_3", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 3),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 3),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_4", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 950),
(position_set_y, pos1, 226),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_4", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_4", "mesh_mp_inventory_slot_equip", "mesh_mp_inventory_slot_equip"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_4", pos1),
(position_set_x, pos1, 899),
(position_set_y, pos1, 175),
(overlay_set_position, "$g_presentation_obj_item_select_4", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 4),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 4),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_5", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 576),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_5", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_5", "mesh_mp_inventory_slot_helmet", "mesh_mp_inventory_slot_helmet"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_5", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 525),
(overlay_set_position, "$g_presentation_obj_item_select_5", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 5),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 5),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_6", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 476),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_6", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_6", "mesh_mp_inventory_slot_armor", "mesh_mp_inventory_slot_armor"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_6", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 425),
(overlay_set_position, "$g_presentation_obj_item_select_6", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 6),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 6),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_7", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 376),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_7", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_7", "mesh_mp_inventory_slot_boot", "mesh_mp_inventory_slot_boot"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_7", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 325),
(overlay_set_position, "$g_presentation_obj_item_select_7", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 7),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 7),
(try_begin),
(ge, ":selected_item_id", 0),
(create_image_button_overlay, "$g_presentation_obj_item_select_8", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 276),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_8", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_8", "mesh_mp_inventory_slot_glove", "mesh_mp_inventory_slot_glove"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_8", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 225),
(overlay_set_position, "$g_presentation_obj_item_select_8", pos1),
# (store_add, ":selected_item_index", slot_player_selected_item_indices_begin, 8),
# (player_get_slot, ":selected_item_id", ":my_player_no", ":selected_item_index"),
(troop_get_slot, ":selected_item_id", "trp_temp_troop", 8),
(try_begin),
(ge, ":selected_item_id", 0),
# (eq, "$g_horses_are_avaliable", 1),
(create_image_button_overlay, "$g_presentation_obj_item_select_9", "mesh_mp_inventory_slot_empty", "mesh_mp_inventory_slot_empty"),
(create_mesh_overlay_with_item_id, reg0, ":selected_item_id"),
(position_set_x, pos1, 53),
(position_set_y, pos1, 176),
(overlay_set_position, reg0, pos1),
(assign, "$g_inside_obj_9", reg0),
(else_try),
(create_image_button_overlay, "$g_presentation_obj_item_select_9", "mesh_mp_inventory_slot_horse", "mesh_mp_inventory_slot_horse"),
(try_end),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, "$g_presentation_obj_item_select_9", pos1),
(position_set_x, pos1, 2),
(position_set_y, pos1, 125),
(overlay_set_position, "$g_presentation_obj_item_select_9", pos1),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 0),
(position_set_y, pos1, 14),
(overlay_set_position, reg0, pos1),
(create_mesh_overlay, reg0, "mesh_mp_inventory_right"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 894),
(position_set_y, pos1, 65),
(overlay_set_position, reg0, pos1),
# (create_text_overlay, "$g_presentation_obj_item_select_10", "str_done", 0),
# (create_in_game_button_overlay, "$g_presentation_obj_item_select_10", "str_reset_to_default", 0),
# (overlay_set_color, "$g_presentation_obj_item_select_10", 0xFFFFFF),
# (position_set_x, pos1, 605),
# (position_set_y, pos1, 25),
# (overlay_set_position, "$g_presentation_obj_item_select_10", pos1),
(create_in_game_button_overlay, "$g_presentation_obj_item_select_11", "str_done", 0),
(overlay_set_color, "$g_presentation_obj_item_select_11", 0xFFFFFF),
(position_set_x, pos1, 500),
(position_set_y, pos1, 25),
(overlay_set_position, "$g_presentation_obj_item_select_11", pos1),
# (create_text_overlay, "$g_presentation_obj_item_select_12", "str_done", 0),
# (multiplayer_get_my_player, ":my_player_no"),
# (player_get_gold, ":player_gold", ":my_player_no"),
# (call_script, "script_multiplayer_calculate_cur_selected_items_cost", ":my_player_no", 1),
# (create_text_overlay, "$g_presentation_obj_item_select_12", "str_total_item_cost_reg0", tf_left_align|tf_single_line|tf_with_outline),
# (try_begin),
# (ge, ":player_gold", reg0),
# (overlay_set_color, "$g_presentation_obj_item_select_12", 0xFFFFFF),
# (else_try),
# (overlay_set_color, "$g_presentation_obj_item_select_12", 0xFF0000),
# (try_end),
# (position_set_x, pos1, 680),
# (position_set_y, pos1, 652),
# (overlay_set_position, "$g_presentation_obj_item_select_12", pos1),
# (store_add, "$g_presentation_obj_item_select_next", "$g_presentation_obj_item_select_12", 1),
(store_add, "$g_presentation_obj_item_select_next", "$g_presentation_obj_item_select_11", 3), #add 3 to skip container, need to be > item11 + 2
# (player_get_troop_id, ":my_troop_no", ":my_player_no"),
(try_begin),
(eq, "$g_presentation_state", 1),
(create_mesh_overlay, reg0, "mesh_mp_inventory_right_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 881),
(position_set_y, pos1, 515),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 2),
(create_mesh_overlay, reg0, "mesh_mp_inventory_right_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 881),
(position_set_y, pos1, 415),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 3),
(create_mesh_overlay, reg0, "mesh_mp_inventory_right_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 881),
(position_set_y, pos1, 315),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 4),
(create_mesh_overlay, reg0, "mesh_mp_inventory_right_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 881),
(position_set_y, pos1, 215),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 5),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 106),
(position_set_y, pos1, 565),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 6),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 106),
(position_set_y, pos1, 465),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 7),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 106),
(position_set_y, pos1, 365),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 8),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 106),
(position_set_y, pos1, 265),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(else_try),
(eq, "$g_presentation_state", 9),
# (eq, "$g_horses_are_avaliable", 1),
(create_mesh_overlay, reg0, "mesh_mp_inventory_left_arrow"),
(position_set_x, pos1, 800),
(position_set_y, pos1, 800),
(overlay_set_size, reg0, pos1),
(position_set_x, pos1, 106),
(position_set_y, pos1, 165),
(overlay_set_position, reg0, pos1),
(call_script, "script_coop_display_available_items_from_inventory"),
(try_end),
(presentation_set_duration, 999999),
] ),
(ti_on_presentation_mouse_enter_leave,
[(store_trigger_param_1, ":object"),
(store_trigger_param_2, ":enter_leave"),
# (assign, reg1, ":object"),
# (try_begin),
# (eq, ":enter_leave", 0),
# (str_store_string, s42, "@Enter"),
# (else_try),
# (str_store_string, s42, "@Leave"),
# (try_end),
# (display_message, "@{s42} obj {reg1} "),
(try_begin),
(eq, "$g_close_equipment_selection", 0),
(try_begin),
(eq, ":enter_leave", 0),
(assign, ":item_no", -1),
(try_begin),
(ge, ":object", "$g_presentation_obj_item_select_next"),
(store_sub, ":tested_object", ":object", "$g_presentation_obj_item_select_next"),
(store_mod, ":mod_value", ":tested_object", 2),
(store_sub, ":mod_value", 1, ":mod_value"),
(val_div, ":tested_object", 2),
(store_add, ":cur_slot", multi_data_item_button_indices_begin, ":tested_object"),
(troop_get_slot, ":item_no", "trp_multiplayer_data", ":cur_slot"),
(store_add, ":cur_imod_slot", ":cur_slot", 100),
(troop_get_slot, ":item_imod", "trp_temp_troop", ":cur_imod_slot"),
(assign, ":target_obj", ":object"),
(val_add, ":target_obj", ":mod_value"),
(else_try),
(eq, ":object", "$g_presentation_obj_item_select_1"),
# (store_add, ":player_slot_index", slot_player_selected_item_indices_begin, | |
input, output,
calc_latency=calc_latency,
func_real_latency=estim_net,
func_accum_latency=estim_accum_by_graph
)
mobile_net.save_modules_nnp(
OUTPUT_DIR + 'mn' + str(i), active_only=True,
calc_latency=calc_latency,
func_latency=estim_accum_by_graph
)
if onnx:
mobile_net.convert_npp_to_onnx(OUTPUT_DIR)
# 4 **************************
if exp_nr == 4:
from nnabla_nas.module import static as smo
input1 = nn.Variable((1, 256, 32, 32))
input2 = nn.Variable((1, 384, 32, 32))
input3 = nn.Variable((1, 128, 32, 32))
input4 = nn.Variable((1, 768, 32, 32))
input5 = nn.Variable((1, 1280, 32, 32))
input6 = nn.Variable((1, 2048, 32, 32))
input7 = nn.Variable((1, 512, 32, 32))
input8 = nn.Variable((1, 192, 32, 32))
input9 = nn.Variable((1, 224, 32, 32))
static_input1 = smo.Input(value=input1)
static_input2 = smo.Input(value=input2)
static_input3 = smo.Input(value=input3)
static_input4 = smo.Input(value=input4)
static_input5 = smo.Input(value=input5)
static_input6 = smo.Input(value=input6)
static_input7 = smo.Input(value=input7)
static_input8 = smo.Input(value=input8)
static_input9 = smo.Input(value=input9)
myconv1 = smo.Conv(parents=[static_input1], in_channels=256,
out_channels=128, kernel=(1, 1), pad=None, group=1)
myconv2 = smo.Conv(parents=[static_input2], in_channels=384,
out_channels=128, kernel=(1, 1), pad=None, group=1)
myconv3 = smo.Conv(parents=[static_input3], in_channels=128,
out_channels=256, kernel=(1, 1), pad=None, group=1)
myconv4 = smo.Conv(parents=[static_input4], in_channels=768,
out_channels=256, kernel=(1, 1))
myconv5 = smo.Conv(parents=[static_input5], in_channels=1280,
out_channels=256, kernel=(1, 1), pad=None, group=1)
myconv6 = smo.Conv(parents=[static_input6], in_channels=2048,
out_channels=256, kernel=(1, 1), pad=None, group=1)
myconv7 = smo.Conv(parents=[static_input7], in_channels=512,
out_channels=512, kernel=(3, 3), pad=(1, 1), group=1
)
myconv8 = smo.Conv(parents=[static_input8], in_channels=192,
out_channels=512, kernel=(7, 7), pad=(3, 3), group=1
)
myconv9 = smo.Conv(parents=[static_input9], in_channels=224,
out_channels=128, kernel=(5, 5), pad=(2, 2), group=1
)
output1 = myconv1()
output2 = myconv2()
output3 = myconv3()
output4 = myconv4()
output5 = myconv5()
output6 = myconv6()
output7 = myconv7()
output8 = myconv8()
output9 = myconv9()
N = 10
for i in range(0, N):
mean_time = estim_fwd(output1)
print("1, ", mean_time)
mean_time = estim_fwd(output2)
print("2, ", mean_time)
mean_time = estim_fwd(output3)
print("3, ", mean_time)
mean_time = estim_fwd(output4)
print("4, ", mean_time)
mean_time = estim_fwd(output5)
print("5, ", mean_time)
mean_time = estim_fwd(output6)
print("6, ", mean_time)
mean_time = estim_fwd(output7)
print("7, ", mean_time)
mean_time = estim_fwd(output8)
print("8, ", mean_time)
mean_time = estim_fwd(output9)
print("9, ", mean_time)
N = 100
from nnabla_nas.utils.estimator.latency import LatencyGraphEstimator
for i in range(0, N):
estimation = LatencyGraphEstimator(n_run=100, ext_name='cpu')
latency = estimation.get_estimation(myconv1)
latency = estimation.get_estimation(myconv2)
latency = estimation.get_estimation(myconv3)
latency = estimation.get_estimation(myconv4)
latency = estimation.get_estimation(myconv5)
latency = estimation.get_estimation(myconv6)
latency = estimation.get_estimation(myconv7)
latency = estimation.get_estimation(myconv8)
latency = estimation.get_estimation(myconv9)
estimation = LatencyGraphEstimator(n_run=100, ext_name='cpu')
latency = estimation.get_estimation(myconv9)
latency = estimation.get_estimation(myconv8)
latency = estimation.get_estimation(myconv7)
latency = estimation.get_estimation(myconv6)
latency = estimation.get_estimation(myconv5)
latency = estimation.get_estimation(myconv4)
latency = estimation.get_estimation(myconv3)
latency = estimation.get_estimation(myconv2)
latency = estimation.get_estimation(myconv1)
estimation = LatencyGraphEstimator(n_run=100, ext_name='cpu')
latency = estimation.get_estimation(myconv6)
latency = estimation.get_estimation(myconv9)
latency = estimation.get_estimation(myconv1)
latency = estimation.get_estimation(myconv4)
latency = estimation.get_estimation(myconv8)
latency = estimation.get_estimation(myconv3)
latency = estimation.get_estimation(myconv5)
latency = estimation.get_estimation(myconv7)
latency = estimation.get_estimation(myconv2)
latency += 0 # to avoid lint/flake8 error
# 5 **************************
if exp_nr == 5:
from nnabla_nas.module import static as smo
from nnabla_nas.utils.estimator.latency import LatencyGraphEstimator
from numpy.random import permutation
import numpy as np
run_also_ours_at_the_end = True
N_conv = 50 # number of different convolutions tried
in_sizes = np.random.randint(low=1, high=1000, size=N_conv)
out_sizes = np.random.randint(low=1, high=600, size=N_conv)
kernel_sizes = np.random.randint(low=1, high=7, size=N_conv)
feat_sizes = np.random.randint(low=16, high=48, size=N_conv)
N = 100
for j in range(N):
estimation = LatencyGraphEstimator(n_run=100, ext_name='cpu')
print('****************** RUN ********************')
for i in permutation(N_conv):
input = nn.Variable((1, in_sizes[i],
feat_sizes[i], feat_sizes[i]))
static_input = smo.Input(value=input)
myconv = smo.Conv(parents=[static_input],
in_channels=in_sizes[i],
out_channels=out_sizes[i],
kernel=(kernel_sizes[i], kernel_sizes[i]),
pad=None, group=1
)
output = myconv()
latency = estimation.get_estimation(myconv)
latency += 0 # to avoid lint/flake8 error
if run_also_ours_at_the_end is True:
print('*********** NOW IT IS OUR TURN ***********')
for i in range(N_conv):
input = nn.Variable((1, in_sizes[i],
feat_sizes[i], feat_sizes[i]))
static_input = smo.Input(value=input)
myconv = smo.Conv(parents=[static_input],
in_channels=in_sizes[i],
out_channels=out_sizes[i],
kernel=(kernel_sizes[i], kernel_sizes[i]),
pad=None, group=1
)
output = myconv()
mean_time = estim_fwd(output, n_run=100) * 1000 # in ms
print('Our_Conv : 100 :', mean_time, ':',
'[(1, ' + str(in_sizes[i]) + ', ' + str(feat_sizes[i]) +
', ' + str(feat_sizes[i]) + ')]',
':', out_sizes[i], ':', kernel_sizes[i]
)
# 6 **************************
if exp_nr == 6:
import onnx
load_onnx = False
if len(sys.argv) > 2:
INPUT_DIR = sys.argv[2]
else:
INPUT_DIR = './logs/zoph/one_net/'
if len(sys.argv) > 3:
load_onnx = True
existing_networks = glob.glob(INPUT_DIR + '/*' + os.path.sep)
all_nets_latencies = dict.fromkeys([])
all_nets = dict.fromkeys([])
net_idx = 0
for network in existing_networks:
all_blocks = glob.glob(network + '**/*.acclat', recursive=True)
blocks = dict.fromkeys([])
block_idx = 0
this_net_accumulated_latency = 0.0
this_net_accumulated_latency_of_convs = 0.0
this_net_accumulated_latency_of_relus = 0.0
this_net_accumulated_latency_of_bns = 0.0
this_net_accumulated_latency_of_merges = 0.0
this_net_accumulated_latency_of_pools = 0.0
this_net_accumulated_latency_of_reshapes = 0.0
this_net_accumulated_latency_of_affines = 0.0
this_net_accumulated_latency_of_add2s = 0.0
for block_lat in all_blocks:
block = block_lat[:-7] + '.onnx'
print('.... READING .... --> ' + block)
# Reading latency for each of the blocks of layers
with open(block_lat, 'r') as f:
block_latency = float(f.read())
this_net_accumulated_latency += block_latency
# Layer-type-wise latencies tested
# for Zoph
# for Random Wired networks
# for mobilenet
layer_name = block.split('/')[-1].split('.')[-2]
if layer_name.find('bn') != -1:
this_net_accumulated_latency_of_bns += block_latency
elif layer_name.find('batchnorm') != -1:
this_net_accumulated_latency_of_bns += block_latency
elif layer_name.find('relu') != -1:
this_net_accumulated_latency_of_relus += block_latency
elif layer_name.find('conv') != -1:
this_net_accumulated_latency_of_convs += block_latency
elif layer_name.find('merg') != -1:
this_net_accumulated_latency_of_merges += block_latency
elif layer_name.find('pool') != -1:
this_net_accumulated_latency_of_pools += block_latency
elif layer_name.find('con') != -1: # from concat
this_net_accumulated_latency_of_merges += block_latency
elif layer_name.find('reshape') != -1:
this_net_accumulated_latency_of_reshapes += block_latency
elif layer_name.find('linear') != -1:
this_net_accumulated_latency_of_affines += block_latency
elif layer_name.find('add2') != -1:
this_net_accumulated_latency_of_add2s += block_latency
this_block = dict.fromkeys([])
this_block['latency'] = block_latency
if load_onnx:
# Interesting FIELDS in params.graph:
# 'input', 'name', 'node', 'output'
params = onnx.load(block)
this_block['name'] = params.graph.name
this_block['input'] = params.graph.input
this_block['output'] = params.graph.output
this_block['nodes'] = params.graph.node
blocks[block_idx] = this_block
block_idx += 1
net_realat_file = network[:-1] + '.realat'
with open(net_realat_file, 'r') as f:
this_net_real_latency = float(f.read())
net_acclat_file = network[:-1] + '.acclat'
with open(net_acclat_file, 'r') as f:
this_net_acc_latency = float(f.read())
this_net = dict.fromkeys([])
this_net['real_latency'] = this_net_real_latency
this_net['accum_latency_graph'] = this_net_acc_latency
this_net['accum_latency_module'] = this_net_accumulated_latency
if load_onnx:
net_file = network[:-1] + '.onnx'
print('xxxx READING xxxx --> ' + net_file)
params = onnx.load(net_file)
this_net['name'] = params.graph.name
this_net['input'] = params.graph.input
this_net['output'] = params.graph.output
this_net['nodes'] = params.graph.node
all_nets_latencies[net_idx] = [
this_net_real_latency,
this_net_acc_latency,
this_net_accumulated_latency,
this_net_accumulated_latency_of_convs,
this_net_accumulated_latency_of_bns,
this_net_accumulated_latency_of_relus,
this_net_accumulated_latency_of_pools,
this_net_accumulated_latency_of_merges,
this_net_accumulated_latency_of_reshapes,
this_net_accumulated_latency_of_affines,
this_net_accumulated_latency_of_add2s,
]
all_nets[net_idx] = this_net
net_idx += 1
# Compare accumulated latency to net latencies, do a plot:
print('LATENCY Results from ' + INPUT_DIR)
print('NETWORK, LAYER-WISE (by graph), ',
'LAYER-WISE (by module), of CONVs, of BNs, of RELUs, ',
'of POOLs, of MERGEs/CONCATs, of RESHAPEs, of AFFINEs, ',
'of ADD2 layers'
)
for i in range(len(all_nets_latencies)):
# print(all_nets_latencies[i])
print(['%7.3f' % val for val in all_nets_latencies[i]])
if __name__ == '__main__':
if len(sys.argv) > 1:
if len(sys.argv) > 2:
if (sys.argv[2] == 'O') \
or (sys.argv[2] == 'LO') \
or (sys.argv[2] == 'OL'):
onnx = True
else:
onnx = False
pass
if (sys.argv[2] == 'L') \
or (sys.argv[2] == 'LO') \
or (sys.argv[2] == 'OL'):
calc_latency = True
else:
calc_latency = False
pass
if (sys.argv[2] == 'L') \
or (sys.argv[2] == 'LO') \
or (sys.argv[2] == 'OL') \
or (sys.argv[2] == 'O'):
if len(sys.argv) > 3:
if len(sys.argv) > 4:
calc_latency_and_onnx(int(sys.argv[1]),
calc_latency=calc_latency,
ext_name=sys.argv[3],
device_id=int(sys.argv[4]),
onnx=onnx
)
else:
calc_latency_and_onnx(int(sys.argv[1]),
calc_latency=calc_latency,
ext_name=sys.argv[3],
onnx=onnx
)
pass
else:
calc_latency_and_onnx(int(sys.argv[1]),
calc_latency=calc_latency,
onnx=onnx
)
pass
else:
if len(sys.argv) > 3:
calc_latency_and_onnx(int(sys.argv[1]),
calc_latency=calc_latency,
ext_name=sys.argv[2],
device_id=int(sys.argv[3]),
onnx=onnx
)
else:
calc_latency_and_onnx(int(sys.argv[1]),
calc_latency=calc_latency,
ext_name=sys.argv[2],
onnx=onnx
)
pass
pass
else:
calc_latency_and_onnx(int(sys.argv[1]), calc_latency=False)
else:
print('Usage: python calc_latency_export_onnx.py <id> ' +
'[L|LO|O|<path>] [<ext_name> [<device-id>]]')
print('If L is used, the estimation for latency will be calculated')
print('If O is used, the exporting to ONNX will be done')
print('If LO or OL is used, both the latency estimation and the ' +
'exporting to ONNX will be done')
print('Possible values for <ext_name>: cpu|cuda|cudnn. Default = cpu')
print('Possible values for <device_id>: 0..7 . Default = 0')
print('Possible values for <id>:')
print('# 10 : ZOPH - (one net) - create 1 instance of ZOPH network,' +
'save it and its modules, calc latency, export to ONNX')
print('# 11 : ZOPH - (many different nets)- sample a set of N ZOPH ' +
'networks, calculate latency, export all of them (whole net ' +
'and modules), convert to ONNX')
print('# 12 : ZOPH - (one net many times) Sample one ZOPH network, ' +
'calculate latency of this network N times')
print('# 20 : RANDOM WIRED - (one net) - create 1 instance of ' +
'random wired search space network, save it and | |
# Copyright 2015 TellApart, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Registration implementation for Azure Load Balancers.
"""
__copyright__ = 'Copyright (C) 2015 TellApart, Inc. All Rights Reserved.'
from tellapart.aurproxy.register.azuretools import AzureRegisterer
from tellapart.aurproxy.register.base import (
RegistrationAction,
RegistrationActionReason
)
from tellapart.aurproxy.util import get_logger
from time import time
logger = get_logger(__name__)
def upsert_pool(bp, pools):
pools = (pools or [])
# avoid double-add if the pool is already present
pools = [p for p in pools if p.id != bp.id]
pools.append(bp)
return pools
def drop_pool(bp, pools):
return [p for p in pools if p.id != bp.id]
class AzureException(BaseException): pass
class BaseAzureLbRegisterer(AzureRegisterer):
def __init__(self, lb_names, region, subscription_id, tenant_id, client_id=None, client_secret=None):
"""
Common code for Azure load balancer Registerers.
Args:
lb_names - str - Comma-delimited list of ELB names.
region - str - Azure region name (EG: 'westcentralus','westus','eastus').
subscription_id - str - Azure subscription ID (as GUID).
tenant_id - str - Azure tenant ID (as GUID).
client_id - str - Azure client (application) ID (as GUID).
client_secret - str - Azure client secret key.
"""
super(BaseAzureLbRegisterer, self).__init__(
region, subscription_id, tenant_id, client_id, client_secret)
self._lb_names = lb_names.split(',')
@property
def lbs(self):
"""
Retrieves specified Azure LoadBalancer instances.
Returns:
List of LoadBalancers objects (azure.mgmt.network.models.LoadBalancer) matching the list of names this class was instantiated with.
"""
lbs_iter = self.conn.network.load_balancers.list_all()
# iterates all paged items into a single list, filtering on current lb names
lb_list = [b for b in lbs_iter if b.name in self._lb_names]
return lb_list
def match_load_balancer_and_vm(self, lb, vm):
"""
Given a load balancer and a VM, builds a dict of matched objects.
Properties: load_balancer, vm, network_interface, backend_pool, ip_config
"""
if not lb:
return None
if not vm:
return None
for nf in vm.network_profile.network_interfaces:
nic = self.get_network_interface(nf.id)
bp_item = self._match_backend_pool(lb, nic)
if bp_item:
bp_item['network_interface'] = nic
bp_item['vm'] = vm
bp_item['load_balancer'] = lb
return bp_item
return None
def add_vm_to_load_balancer(self, lb, vm, backend_pool=None):
"""
Adds the VM to the load balancer.
Args:
lb - azure.mgmt.network.v2018_02_01.models.LoadBalancer - LoadBalancer object
vm - azure.mgmt.compute.v2017_03_30.models.VirtualMachine - VirtualMachine object
backend_pool - str - The name of the backend pool within the load balancer to add the VM. If not specified, the first pool will be chosen.
Returns:
Bool whether add was successful
"""
# cannot add VM if load balancer doesn't exist or has no backend pools
if not lb or not lb.backend_address_pools or len(lb.backend_address_pools) == 0:
logger.warn('no lb backend pools to register with!')
return False
if not vm:
logger.warn('no vm to register!')
return False
bp = self._find_backend_pool(lb, None)
match = self._match_ip_config(vm)
if not match or not bp:
logger.warn('failed to find nic without pooling ip config for this vm!')
return False
match['ip_config'].load_balancer_backend_address_pools = upsert_pool(
bp, match['ip_config'].load_balancer_backend_address_pools)
nic = match['network_interface']
return self.save_network_interface(nic)
def remove_vm_from_load_balancer(self, lb, vm):
"""
Removes the VM from the load balancer.
Args:
lb - azure.mgmt.network.v2018_02_01.models.LoadBalancer - LoadBalancer object
vm - azure.mgmt.compute.v2017_03_30.models.VirtualMachine - VirtualMachine object
Returns:
Bool whether remove was successful
"""
match = self.match_load_balancer_and_vm(lb, vm)
if not match: return False
# remove the link between the VM's IP config object and the corresponding load balancer backend pool
nic = match['network_interface']
match['ip_config'].load_balancer_backend_address_pools = drop_pool(
match['backend_pool'], match['ip_config'].load_balancer_backend_address_pools)
return self.save_network_interface(nic)
def _match_backend_pool(self, lb, nic):
"""
Given a load balancer and a network interface, attempts to find a match on backend pool and ip config.
"""
for ip in nic.ip_configurations:
# if ip.load_balancer_backend_address_pools:
for bp in (ip.load_balancer_backend_address_pools or []):
for lbp in lb.backend_address_pools:
if (lbp.id == bp.id):
return {'backend_pool': bp, 'ip_config': ip}
return None
def _find_backend_pool(self, lb, bp_name):
"""
Given a load balancer, attempts to find the backend pool, and if not found, returns the first backend pool.
"""
if not lb or not lb.backend_address_pools or len(lb.backend_address_pools) == 0:
return None
bp_list = [b for b in lb.backend_address_pools if b.name == bp_name]
if len(bp_list) > 0:
return bp_list[0]
else:
return lb.backend_address_pools[0]
def _match_ip_config(self, vm):
"""
Finds the ip config object within the network interface to associate the backend pool for the VM.
"""
for nf in vm.network_profile.network_interfaces:
nic = self.get_network_interface(nf.id)
if nic.primary:
return {'network_interface': nic, 'ip_config': next(iter(nic.ip_configurations))}
return None
class AzureLbSelfRegisterer(BaseAzureLbRegisterer):
"""
Registerer that adds and removes current machine from configured ELBs.
"""
def add(self):
"""
Add the current instance to all configured LBs.
Assumes that this code is running on an Azure instance.
"""
instance_id = self.get_current_instance_id()
vm = self.get_current_machine()
for lb in self.lbs:
# Note: This only finds the VM in one of the balancer's backend pools
match = self.match_load_balancer_and_vm(lb, vm)
if not match:
self.record(lb.name,
instance_id,
RegistrationAction.REGISTER,
[RegistrationActionReason.NOT_YET_REGISTERED])
if not self.add_vm_to_load_balancer(lb, vm):
raise AzureException("failed to register vm {} with lb {}".format(vm, lb))
else:
logger.info("registered vm {} with lb {}".format(vm, lb))
else:
self.record(lb.name,
instance_id,
RegistrationAction.NONE,
[RegistrationActionReason.ALREADY_REGISTERED])
def remove(self):
"""
Remove the current instance from all configured LBs.
Assumes that this code is running on an Azure instance.
"""
instance_id = self.get_current_instance_id()
vm = self.get_current_machine()
for lb in self.lbs:
match = self.match_load_balancer_and_vm(lb, vm)
#if instance_id in self._get_elb_instance_ids(elb):
if match:
self.record(lb.name,
instance_id,
RegistrationAction.REMOVE)
self.remove_vm_from_load_balancer(lb, vm)
else:
self.record(lb.name,
instance_id,
RegistrationAction.NONE,
[RegistrationActionReason.NOT_ALREADY_REGISTERED])
# TODO refactor with less spite
class AzureGatewaySelfRegisterer(AzureRegisterer):
def __init__(self, lb_names, region, subscription_id, tenant_id, client_id=None, client_secret=None, refresh_interval=0):
"""
Common code for Azure application gateway Registerers.
Args:
lb_names - str - Comma-delimited list of ELB names.
region - str - Azure region name (EG: 'westcentralus','westus','eastus').
subscription_id - str - Azure subscription ID (as GUID).
tenant_id - str - Azure tenant ID (as GUID).
client_id - str - Azure client (application) ID (as GUID).
client_secret - str - Azure client secret key.
Note: client id must be authorized to read/list Network resources as well as Virtual Machine
Resources. Giving this prinicipal "Virtual Machine Contributor" and "Network Contributor" role
assignments is suitable.
"""
super(AzureGatewaySelfRegisterer, self).__init__(
region, subscription_id, tenant_id, client_id, client_secret)
self._lb_names = lb_names.split(',')
self._refresh_interval_secs = int(refresh_interval)
self.perform_check = (self._refresh_interval_secs > 0)
self._last_checked = 0
self._last_result = True
self._check_states = [True for lb in self._lb_names]
@property
def lbs(self):
"""
Retrieves specified Azure ApplicationGateway instances.
Returns:
List of LoadBalancers objects (azure.mgmt.network.models.LoadBalancer) matching the list of names this class was instantiated with.
"""
lbs_iter = self.conn.network.application_gateways.list_all()
# iterates all paged items into a single list, filtering on current lb names
lb_list = [b for b in lbs_iter if b.name in self._lb_names]
return lb_list
def match_load_balancer_and_vm(self, lb, vm):
"""
Given a load balancer and a VM, builds a dict of matched objects.
Properties: load_balancer, vm, network_interface, backend_pool, ip_config
"""
if not lb:
return None
if not vm:
return None
for nf in vm.network_profile.network_interfaces:
nic = self.get_network_interface(nf.id)
bp_item = self._match_backend_pool(lb, nic)
if bp_item:
bp_item['network_interface'] = nic
bp_item['vm'] = vm
bp_item['load_balancer'] = lb
return bp_item
return None
def add_vm_to_load_balancer(self, lb, vm, backend_pool=None):
"""
Adds the VM to the load balancer (gateway).
Args:
lb - azure.mgmt.network.v2018_02_01.models.ApplicationGateway - ApplicationGateway object
vm - azure.mgmt.compute.v2017_03_30.models.VirtualMachine - VirtualMachine object
backend_pool - str - The name of the backend pool within the load balancer to add the VM. If not specified, the first pool will be chosen.
Returns:
Bool whether add was successful
"""
# cannot add VM if load balancer doesn't exist or has no backend pools
if not lb or not lb.backend_address_pools or len(lb.backend_address_pools) == 0:
logger.warn('no lb backend pools to register with!')
return False
if not vm:
logger.warn('no vm to register!')
return False
bp = self._find_backend_pool(lb, None)
match = self._match_ip_config(vm)
if not match or not bp:
logger.warn('failed to find nic without pooling ip config for this vm!')
return False
match['ip_config'].application_gateway_backend_address_pools = upsert_pool(
bp, match['ip_config'].application_gateway_backend_address_pools)
nic = match['network_interface']
return self.save_network_interface(nic)
def remove_vm_from_load_balancer(self, lb, vm):
"""
Removes the VM from the load balancer.
Args:
lb - azure.mgmt.network.v2018_02_01.models.ApplicationGateway - ApplicationGateway object
vm - azure.mgmt.compute.v2017_03_30.models.VirtualMachine - VirtualMachine object
| |
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
import netaddr
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.subnets import utils
LOG = logging.getLogger(__name__)
class CreateNetworkInfoAction(workflows.Action):
net_name = forms.CharField(max_length=255,
label=_("Network Name"),
required=False)
if api.neutron.is_port_profiles_supported():
widget = None
else:
widget = forms.HiddenInput()
net_profile_id = forms.ChoiceField(label=_("Network Profile"),
required=False,
widget=widget)
admin_state = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"),
required=False,
help_text=_("The state to start"
" the network in."))
def __init__(self, request, *args, **kwargs):
super(CreateNetworkInfoAction, self).__init__(request,
*args, **kwargs)
if api.neutron.is_port_profiles_supported():
self.fields['net_profile_id'].choices = (
self.get_network_profile_choices(request))
def get_network_profile_choices(self, request):
profile_choices = [('', _("Select a profile"))]
for profile in self._get_profiles(request, 'network'):
profile_choices.append((profile.id, profile.name))
return profile_choices
def _get_profiles(self, request, type_p):
profiles = []
try:
profiles = api.neutron.profile_list(request, type_p)
except Exception:
msg = _('Network Profiles could not be retrieved.')
exceptions.handle(request, msg)
return profiles
# TODO(absubram): Add ability to view network profile information
# in the network detail if a profile is used.
class Meta(object):
name = _("Network")
help_text = _("Create a new network. "
"In addition, a subnet associated with the network "
"can be created in the next panel.")
class CreateNetworkInfo(workflows.Step):
action_class = CreateNetworkInfoAction
contributes = ("net_name", "admin_state", "net_profile_id")
class CreateSubnetInfoAction(workflows.Action):
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetdetail'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
subnet_name = forms.CharField(max_length=255,
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'with_subnet',
}),
label=_("Subnet Name"),
required=False)
cidr = forms.IPField(label=_("Network Address"),
required=False,
initial="",
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'with_subnet',
'data-is-required': 'true'
}),
help_text=_("Network address in CIDR format "
"(e.g. 192.168.0.0/24, 2001:DB8::/48)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
ip_version = forms.ChoiceField(choices=[(4, 'IPv4'), (6, 'IPv6')],
widget=forms.Select(attrs={
'class': 'switchable switched',
'data-slug': 'ipversion',
'data-switch-on': 'with_subnet'
}),
label=_("IP Version"))
gateway_ip = forms.IPField(
label=_("Gateway IP"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'with_subnet gateway_ip'
}),
required=False,
initial="",
help_text=_("IP address of Gateway (e.g. 192.168.0.254) "
"The default value is the first IP of the "
"network address "
"(e.g. 192.168.0.1 for 192.168.0.0/24, "
"2001:DB8::1 for 2001:DB8::/48). "
"If you use the default, leave blank. "
"If you do not want to use a gateway, "
"check 'Disable Gateway' below."),
version=forms.IPv4 | forms.IPv6,
mask=False)
no_gateway = forms.BooleanField(label=_("Disable Gateway"),
widget=forms.CheckboxInput(attrs={
'class': 'switched switchable',
'data-slug': 'gateway_ip',
'data-switch-on': 'with_subnet',
'data-hide-on-checked': 'true'
}),
initial=False,
required=False)
msg = _('Specify "Network Address" or '
'clear "Create Subnet" checkbox.')
class Meta(object):
name = _("Subnet")
help_text = _('Create a subnet associated with the new network, '
'in which case "Network Address" must be specified. '
'If you wish to create a network without a subnet, '
'uncheck the "Create Subnet" checkbox.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetInfoAction, self).__init__(request, context, *args,
**kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ip_version'].widget = forms.HiddenInput()
self.fields['ip_version'].initial = 4
def _check_subnet_data(self, cleaned_data, is_create=True):
cidr = cleaned_data.get('cidr')
ip_version = int(cleaned_data.get('ip_version'))
gateway_ip = cleaned_data.get('gateway_ip')
no_gateway = cleaned_data.get('no_gateway')
if not cidr:
raise forms.ValidationError(self.msg)
if cidr:
subnet = netaddr.IPNetwork(cidr)
if subnet.version != ip_version:
msg = _('Network Address and IP version are inconsistent.')
raise forms.ValidationError(msg)
if (ip_version == 4 and subnet.prefixlen == 32) or \
(ip_version == 6 and subnet.prefixlen == 128):
msg = _("The subnet in the Network Address is "
"too small (/%s).") % subnet.prefixlen
raise forms.ValidationError(msg)
if not no_gateway and gateway_ip:
if netaddr.IPAddress(gateway_ip).version is not ip_version:
msg = _('Gateway IP and IP version are inconsistent.')
raise forms.ValidationError(msg)
if not is_create and not no_gateway and not gateway_ip:
msg = _('Specify IP address of gateway or '
'check "Disable Gateway".')
raise forms.ValidationError(msg)
def clean(self):
cleaned_data = super(CreateSubnetInfoAction, self).clean()
with_subnet = cleaned_data.get('with_subnet')
if not with_subnet:
return cleaned_data
self._check_subnet_data(cleaned_data)
return cleaned_data
class CreateSubnetInfo(workflows.Step):
action_class = CreateSubnetInfoAction
contributes = ("with_subnet", "subnet_name", "cidr",
"ip_version", "gateway_ip", "no_gateway")
class CreateSubnetDetailAction(workflows.Action):
enable_dhcp = forms.BooleanField(label=_("Enable DHCP"),
initial=True, required=False)
ipv6_modes = forms.ChoiceField(
label=_("IPv6 Address Configuration Mode"),
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'ipversion',
'data-ipversion-6': _("IPv6 Address Configuration Mode"),
}),
initial=utils.IPV6_DEFAULT_MODE,
required=False,
help_text=_("Specifies how IPv6 addresses and additional information "
"are configured. We can specify SLAAC/DHCPv6 stateful/"
"DHCPv6 stateless provided by OpenStack, "
"or specify no option. "
"'No options specified' means addresses are configured "
"manually or configured by a non-OpenStack system."))
allocation_pools = forms.CharField(
widget=forms.Textarea(attrs={'rows': 4}),
label=_("Allocation Pools"),
help_text=_("IP address allocation pools. Each entry is: "
"start_ip_address,end_ip_address "
"(e.g., 192.168.1.100,192.168.1.120) "
"and one entry per line."),
required=False)
dns_nameservers = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("DNS Name Servers"),
help_text=_("IP address list of DNS name servers for this subnet. "
"One entry per line."),
required=False)
host_routes = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows': 4}),
label=_("Host Routes"),
help_text=_("Additional routes announced to the hosts. "
"Each entry is: destination_cidr,nexthop "
"(e.g., 192.168.200.0/24,10.56.1.254) "
"and one entry per line."),
required=False)
class Meta(object):
name = _("Subnet Details")
help_text = _('Specify additional attributes for the subnet.')
def __init__(self, request, context, *args, **kwargs):
super(CreateSubnetDetailAction, self).__init__(request, context,
*args, **kwargs)
if not getattr(settings, 'OPENSTACK_NEUTRON_NETWORK',
{}).get('enable_ipv6', True):
self.fields['ipv6_modes'].widget = forms.HiddenInput()
def populate_ipv6_modes_choices(self, request, context):
return [(value, _("%s (Default)") % label)
if value == utils.IPV6_DEFAULT_MODE
else (value, label)
for value, label in utils.IPV6_MODE_CHOICES]
def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(ip)s)')
% {'field_name': field_name, 'ip': ip})
raise forms.ValidationError(msg)
def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, ValueError):
msg = (_('%(field_name)s: Invalid IP address (value=%(network)s)')
% {'field_name': field_name, 'network': network})
raise forms.ValidationError(msg)
def _check_allocation_pools(self, allocation_pools):
for p in allocation_pools.split('\n'):
p = p.strip()
if not p:
continue
pool = p.split(',')
if len(pool) != 2:
msg = _('Start and end addresses must be specified '
'(value=%s)') % p
raise forms.ValidationError(msg)
start, end = [self._convert_ip_address(ip, "allocation_pools")
for ip in pool]
if start > end:
msg = _('Start address is larger than end address '
'(value=%s)') % p
raise forms.ValidationError(msg)
def _check_dns_nameservers(self, dns_nameservers):
for ns in dns_nameservers.split('\n'):
ns = ns.strip()
if not ns:
continue
self._convert_ip_address(ns, "dns_nameservers")
def _check_host_routes(self, host_routes):
for r in host_routes.split('\n'):
r = r.strip()
if not r:
continue
route = r.split(',')
if len(route) != 2:
msg = _('Host Routes format error: '
'Destination CIDR and nexthop must be specified '
'(value=%s)') % r
raise forms.ValidationError(msg)
self._convert_ip_network(route[0], "host_routes")
self._convert_ip_address(route[1], "host_routes")
def clean(self):
cleaned_data = super(CreateSubnetDetailAction, self).clean()
self._check_allocation_pools(cleaned_data.get('allocation_pools'))
self._check_host_routes(cleaned_data.get('host_routes'))
self._check_dns_nameservers(cleaned_data.get('dns_nameservers'))
return cleaned_data
class CreateSubnetDetail(workflows.Step):
action_class = CreateSubnetDetailAction
contributes = ("enable_dhcp", "ipv6_modes", "allocation_pools",
"dns_nameservers", "host_routes")
class CreateNetwork(workflows.Workflow):
slug = "create_network"
name = _("Create Network")
finalize_button_name = _("Create")
success_message = _('Created network "%s".')
failure_message = _('Unable to create network "%s".')
default_steps = (CreateNetworkInfo,
CreateSubnetInfo,
CreateSubnetDetail)
wizard = True
def get_success_url(self):
return reverse("horizon:project:networks:index")
def get_failure_url(self):
return reverse("horizon:project:networks:index")
def format_status_message(self, message):
name = self.context.get('net_name') or self.context.get('net_id', '')
return message % name
def _create_network(self, request, data):
try:
params = {'name': data['net_name'],
'admin_state_up': (data['admin_state'] == 'True')}
if api.neutron.is_port_profiles_supported():
params['net_profile_id'] = data['net_profile_id']
network = api.neutron.network_create(request, **params)
self.context['net_id'] = network.id
msg = (_('Network "%s" was successfully created.') %
network.name_or_id)
LOG.debug(msg)
return network
except Exception as e:
msg = (_('Failed to create network "%(network)s": %(reason)s') %
{"network": data['net_name'], "reason": e})
LOG.info(msg)
redirect = self.get_failure_url()
exceptions.handle(request, msg, redirect=redirect)
return False
def _setup_subnet_parameters(self, params, data, is_create=True):
"""Setup subnet parameters
This methods setups subnet parameters which are available
in both create and update.
"""
is_update = not is_create
params['enable_dhcp'] = data['enable_dhcp']
if int(data['ip_version']) == 6:
ipv6_modes = utils.get_ipv6_modes_attrs_from_menu(
data['ipv6_modes'])
if ipv6_modes[0] and is_create:
params['ipv6_ra_mode'] = ipv6_modes[0]
if ipv6_modes[1] and is_create:
params['ipv6_address_mode'] = ipv6_modes[1]
if data['allocation_pools']:
pools = [dict(zip(['start', 'end'], pool.strip().split(',')))
for pool in data['allocation_pools'].split('\n')
if pool.strip()]
params['allocation_pools'] = pools
if data['host_routes'] or is_update:
routes = [dict(zip(['destination', 'nexthop'],
route.strip().split(',')))
for route in data['host_routes'].split('\n')
if route.strip()]
params['host_routes'] = routes
if data['dns_nameservers'] or is_update:
nameservers = [ns.strip()
for ns in data['dns_nameservers'].split('\n')
if ns.strip()]
params['dns_nameservers'] = nameservers
def _create_subnet(self, request, data, network=None, tenant_id=None,
no_redirect=False):
if network:
network_id = network.id
network_name | |
TO FIND THIS MAPPING NOW
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementTet(C)[0]
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:3].astype(np.int64) - self.faces[:,:3].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:3],self.faces[:,:3],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingTet(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesTet()
all_faces = np.concatenate((self.elements[:,:3],self.elements[:,[0,1,3]],
self.elements[:,[0,2,3]],self.elements[:,[1,2,3]]),axis=0).astype(np.int64)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = np.zeros((self.all_faces.shape[0],2),dtype=np.int64)
face_elements[:,0] = idx % self.elements.shape[0]
face_elements[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_elements
return self.face_to_element
def ArrangeFacesTet(self):
"""Arranges all the faces of tetrahedral elements
with triangular type node ordering """
if self.all_faces is None:
self.all_faces = self.GetFacesTet()
if self.face_to_element is None:
self.GetElementsFaceNumberingTet()
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
node_arranger = NodeArrangementTet(p-1)[0]
# for i in range(self.face_to_element.shape[0]):
# self.all_faces = self.elements[self.face_to_element[i,0],node_arranger[self.face_to_element[i,1],:]]
self.all_faces = self.elements[self.face_to_element[:,0][:,None],node_arranger[self.face_to_element[:,1],:]]
def GetElementsEdgeNumberingQuad(self):
"""Finds edges of elements and their flags saying which edge they are [0,1,2,3].
At most a quad can have all its four edges on the boundary.
output:
edge_elements: [1D array] array containing elements which have edges
on the boundary
Note that this method sets the self.edge_to_element to edge_elements,
so the return value is not strictly necessary
"""
if isinstance(self.edge_to_element,np.ndarray):
if self.edge_to_element.shape[0] > 1:
return self.edge_to_element
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
if self.all_edges is None:
self.GetEdgesQuad()
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.int64)
all_edges, idx = unique2d(all_edges,consider_sort=True,order=False, return_index=True)
edge_elements = np.zeros((all_edges.shape[0],2),dtype=np.int64)
# edge_elements = np.zeros((self.edges.shape[0],2),dtype=np.int64)
edge_elements[:,0] = idx % self.elements.shape[0]
edge_elements[:,1] = idx // self.elements.shape[0]
self.edge_to_element = edge_elements
return self.edge_to_element
def GetElementsWithBoundaryEdgesQuad(self):
"""Finds elements which have edges on the boundary.
At most a quad can have all its four edges on the boundary.
output:
boundary_edge_to_element: [2D array] array containing elements which have face
on the boundary [cloumn 0] and a flag stating which edges they are [column 1]
"""
if isinstance(self.boundary_edge_to_element,np.ndarray):
if self.boundary_edge_to_element.shape[1] > 1 and self.boundary_edge_to_element.shape[0] > 1:
return self.boundary_edge_to_element
# DO NOT COMPUTE EDGES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.edges is not None
p = self.InferPolynomialDegree()
# FIND WHICH FACE NODES ARE IN WHICH ELEMENT
node_arranger = NodeArrangementQuad(p-1)[0]
# GET ALL EDGES FROM THE ELEMENT CONNECTIVITY
all_edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],
self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(self.edges.dtype)
# GET UNIQUE ROWS
uniques, idx, inv = unique2d(all_edges,consider_sort=True,order=False,return_index=True,return_inverse=True)
# ROWS THAT APPEAR ONLY ONCE CORRESPOND TO BOUNDARY EDGES
freqs_inv = itemfreq(inv)
edges_ext_flags = freqs_inv[freqs_inv[:,1]==1,0]
# NOT ARRANGED
edges = uniques[edges_ext_flags,:]
# DETERMINE WHICH FACE OF THE ELEMENT THEY ARE
boundary_edge_to_element = np.zeros((edges_ext_flags.shape[0],2),dtype=np.int64)
# FURTHER RE-ARRANGEMENT / ARANGE THE NODES BASED ON THE ORDER THEY APPEAR
# IN ELEMENT CONNECTIVITY
# THIS STEP IS NOT NECESSARY INDEED - ITS JUST FOR RE-ARANGMENT OF EDGES
all_edges_in_edges = in2d(all_edges,self.edges,consider_sort=True)
all_edges_in_edges = np.where(all_edges_in_edges==True)[0]
boundary_edge_to_element[:,0] = all_edges_in_edges % self.elements.shape[0]
boundary_edge_to_element[:,1] = all_edges_in_edges // self.elements.shape[0]
# ARRANGE FOR ANY ORDER OF BASES/ELEMENTS AND ASSIGN DATA MEMBERS
self.boundary_edge_to_element = boundary_edge_to_element
return self.boundary_edge_to_element
def GetElementsWithBoundaryFacesHex(self):
"""Finds elements which have faces on the boundary.
At most a hexahedral can have all its 8 faces on the boundary.
output:
boundary_face_to_element: [2D array] array containing elements which have face
on the boundary [column 0] and a flag stating which faces they are [column 1]
"""
# DO NOT COMPUTE FACES AND RAISE BECAUSE OF CYCLIC DEPENDENCIES
assert self.elements is not None
assert self.faces is not None
if self.boundary_face_to_element is not None:
return self.boundary_face_to_element
# THIS METHOD ALWAYS RETURNS THE FACE TO ELEMENT ARRAY, AND DOES NOT CHECK
# IF THIS HAS BEEN COMPUTED BEFORE, THE REASON BEING THAT THE FACES CAN COME
# EXTERNALLY WHOSE ARRANGEMENT WOULD NOT CORRESPOND TO THE ONE USED INTERNALLY
# HENCE THIS MAPPING BECOMES NECESSARY
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.faces.dtype)
all_faces_in_faces = in2d(all_faces,self.faces[:,:4],consider_sort=True)
all_faces_in_faces = np.where(all_faces_in_faces==True)[0]
boundary_face_to_element = np.zeros((all_faces_in_faces.shape[0],2),dtype=np.int64)
boundary_face_to_element[:,0] = all_faces_in_faces % self.elements.shape[0]
boundary_face_to_element[:,1] = all_faces_in_faces // self.elements.shape[0]
# SO FAR WE HAVE COMPUTED THE ELEMENTS THAT CONTAIN FACES, HOWEVER
# NOTE THAT WE STILL HAVE NOT COMPUTED A MAPPING BETWEEN ELEMENTS AND
# FACES. WE ONLY KNOW WHICH ELEMENTS CONTAIN FACES FROM in2d.
# WE NEED TO FIND THIS MAPPING NOW
# WE NEED TO DO THIS DUMMY RECONSTRUCTION OF FACES BASED ON ELEMENTS
faces = self.elements[boundary_face_to_element[:,0][:,None],
node_arranger[boundary_face_to_element[:,1],:]].astype(self.faces.dtype)
# CHECK FOR THIS CONDITION AS ARRANGEMENT IS NO LONGER MAINTAINED
assert np.sum(faces[:,:4].astype(np.int64) - self.faces[:,:4].astype(np.int64)) == 0
# NOW GET THE ROW MAPPING BETWEEN OLD FACES AND NEW FACES
from Florence.Tensor import shuffle_along_axis
row_mapper = shuffle_along_axis(faces[:,:4],self.faces[:,:4],consider_sort=True)
# UPDATE THE MAP
boundary_face_to_element[:,:] = boundary_face_to_element[row_mapper,:]
self.boundary_face_to_element = boundary_face_to_element
return self.boundary_face_to_element
def GetElementsFaceNumberingHex(self):
"""Finds which faces belong to which elements and which faces of the elements
they are e.g. 0, 1, 2 or 3.
output:
face_elements: [2D array] nfaces x 2 array containing elements which have face
on the boundary with their flags
Note that this method also sets the self.face_to_element to face_elements,
so the return value is not strictly necessary
"""
if isinstance(self.face_to_element,np.ndarray):
if self.face_to_element.shape[0] > 1:
return self.face_to_element
assert self.elements is not None
# GET ALL FACES FROM ELEMENT CONNECTIVITY
if self.all_faces is None:
self.GetFacesHex()
C = self.InferPolynomialDegree() - 1
node_arranger = NodeArrangementHex(C)[0]
all_faces = np.concatenate((np.concatenate((
np.concatenate((np.concatenate((np.concatenate((self.elements[:,node_arranger[0,:]],
self.elements[:,node_arranger[1,:]]),axis=0),self.elements[:,node_arranger[2,:]]),axis=0),
self.elements[:,node_arranger[3,:]]),axis=0),self.elements[:,node_arranger[4,:]]),axis=0),
self.elements[:,node_arranger[5,:]]),axis=0).astype(self.all_faces.dtype)
_,idx = unique2d(all_faces,consider_sort=True,order=False, return_index=True)
face_elements = np.zeros((self.all_faces.shape[0],2),dtype=np.int64)
face_elements[:,0] = idx % self.elements.shape[0]
face_elements[:,1] = idx // self.elements.shape[0]
self.face_to_element = face_elements
return self.face_to_element
def ArrangeFacesHex(self):
"""Arranges all the faces of hexahedral elements
with quadrilateral type node ordering """
if self.all_faces is None:
self.all_faces = self.GetFacesHex()
if self.face_to_element is None:
self.GetElementsFaceNumberingHex()
# DETERMINE DEGREE
p = self.InferPolynomialDegree()
node_arranger = NodeArrangementHex(p-1)[0]
self.all_faces = self.elements[self.face_to_element[:,0][:,None],node_arranger[self.face_to_element[:,1],:]]
def GetNodeCommonality(self):
"""Finds the elements sharing a node.
The return values are linked lists [list of numpy of arrays].
Each numpy array within the list gives the elements that contain a given node.
As a result the size of the linked list is nnode
outputs:
els: [list of numpy arrays] element numbers containing nodes
pos: [list of numpy arrays] elemental positions of the nodes
res_flat: [list of numpy arrays] position of nodes in the
flattened element connectivity.
"""
self.__do_essential_memebers_exist__()
elements = self.elements.ravel()
idx_sort = np.argsort(elements)
sorted_elements = elements[idx_sort]
vals, idx_start = np.unique(sorted_elements, return_index=True)
# Sets of indices
flat_pos = np.split(idx_sort, idx_start[1:])
els = np.split(idx_sort // int(self.elements.shape[1]), idx_start[1:])
pos = np.split(idx_sort % int(self.elements.shape[1]), idx_start[1:])
# In case one wants to return only the duplicates i.e. filter keeping only items occurring more than once
# vals, idx_start, count = np.unique(sorted_elements, return_counts=True, return_index=True)
# vals = vals[count > 1]
# res = filter(lambda x: x.size > 1, res)
return els, pos, flat_pos
def Read(self, filename=None, element_type="tri", reader_type=None, reader_type_format=None,
reader_type_version=None, order=0, read_surface_info=False, **kwargs):
"""Convenience mesh reader method to dispatch call to subsequent apporpriate methods"""
if reader_type != 'read_separate':
if not isinstance(filename,str):
raise ValueError("filename must be a string")
return
if reader_type is None:
if filename.split('.')[-1] == "msh":
reader_type = "gmsh"
elif filename.split('.')[-1] == "obj":
reader_type = "obj"
elif filename.split('.')[-1] == "unv":
reader_type = "unv"
elif filename.split('.')[-1] == "fro":
reader_type = "fro"
elif filename.split('.')[-1] == "dat":
for key in kwargs.keys():
inkey = insensitive(key)
if "connectivity" in inkey and "delimiter" not in inkey:
reader_type = "read_separate"
break
if reader_type is None:
raise ValueError("Mesh file format was not undertood. Please specify it | |
<reponame>lweydemann/ascat
# Copyright (c) 2020, TU Wien, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of TU Wien, Department of Geodesy and Geoinformation
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL TU WIEN DEPARTMENT OF GEODESY AND
# GEOINFORMATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Tests for level 2 readers.
"""
from datetime import datetime
import numpy as np
import numpy.testing as nptest
import os
import pytest
import unittest
import sys
from ascat.read_native.bufr import AscatL2SsmBufr
from ascat.read_native.bufr import AscatL2SsmBufrChunked
from ascat.read_native.bufr import AscatL2SsmBufrFile
from ascat.read_native.nc import AscatL2SsmNcFile
import ascat.level2 as level2
float32_nan = np.finfo(np.float32).min
@pytest.mark.skipif(sys.platform == 'win32', reason="Does not work on Windows")
class Test_AscatL2SsmBufr_ioclass_kws(unittest.TestCase):
def setUp(self):
data_path = os.path.join(os.path.dirname(__file__), 'ascat_test_data',
'hsaf', 'h07')
self.reader = AscatL2SsmBufr(data_path,
msg_name_lookup={65: 'ssm',
74: 'ssm mean'})
def tearDown(self):
self.reader = None
def test_offset_getting(self):
"""
test getting the image offsets for a known day
2010-05-01
"""
timestamps = self.reader.tstamps_for_daterange(
datetime(2010, 5, 1), datetime(2010, 5, 1, 12))
timestamps_should = [datetime(2010, 5, 1, 8, 33, 1)]
assert sorted(timestamps) == sorted(timestamps_should)
def test_image_reading(self):
data, meta, timestamp, lons, lats, time_var = self.reader.read(
datetime(2010, 5, 1, 8, 33, 1))
ssm_should = np.array([51.2, 65.6, 46.2, 56.9, 61.4, 61.5, 58.1, 47.1,
72.7, 13.8, 60.9, 52.1, 78.5, 57.8, 56.2, 79.8,
67.7, 53.8, 86.5, 29.4, 50.6, 88.8, 56.9, 68.9,
52.4, 64.4, 81.5, 50.5, 84., 79.6, 47.4, 79.5,
46.9, 60.7, 81.3, 52.9, 84.5, 25.5, 79.2, 93.3,
52.6, 93.9, 74.4, 91.4, 76.2, 92.5, 80., 88.3,
79.1, 97.2, 56.8])
lats_should = np.array([70.21162, 69.32506, 69.77325, 68.98149,
69.12295, 65.20364, 67.89625, 67.79844,
67.69112, 67.57446, 67.44865, 67.23221,
66.97207, 66.7103, 66.34695, 65.90996,
62.72462, 61.95761, 61.52935, 61.09884,
60.54359, 65.60223, 65.33588, 65.03098,
64.58972, 61.46131, 60.62553, 59.52057,
64.27395, 63.80293, 60.6569, 59.72684,
58.74838, 63.42774])
ssm_mean_should = np.array([0.342, 0.397, 0.402, 0.365, 0.349,
0.354, 0.37, 0.36, 0.445, 0.211,
0.394, 0.354, 0.501, 0.361, 0.366,
0.45, 0.545, 0.329, 0.506, 0.229,
0.404, 0.591, 0.348, 0.433, 0.29,
0.508, 0.643, 0.343, 0.519, 0.61,
0.414, 0.594, 0.399, 0.512, 0.681,
0.457, 0.622, 0.396, 0.572, 0.7,
0.302, 0.722, 0.493, 0.747, 0.521,
0.72, 0.578, 0.718, 0.536, 0.704,
0.466]) * 100
nptest.assert_allclose(lats[25:-1:30], lats_should, atol=1e-5)
nptest.assert_allclose(data['ssm'][
15:-1:20], ssm_should, atol=0.01)
nptest.assert_allclose(data['ssm mean'][15:-1:20],
ssm_mean_should,
atol=0.01)
class Test_AscatL2SsmBufrFile(unittest.TestCase):
def setUp(self):
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_L2_SM_125', 'bufr', 'Metop_B')
fname = os.path.join(
data_path,
'M01-ASCA-ASCSMR02-NA-5.0-20170220050900.000000000Z-20170220055833-1207110.bfr')
self.reader = AscatL2SsmBufrFile(fname)
def tearDown(self):
self.reader = None
def test_image_reading(self):
data, meta, timestamp, lons, lats, time_var = self.reader.read()
ssm_should = np.array(
[29.2, 30.2, 35.7, 38.6, 37.5, 37.6, 40.5, 44.5, 40.7,
39.7, 41.5, 38.8, 34.5, 36.8, 39.4, 41.2, 42.4, 42.9,
39.3, 30.5, 26.7, 26.5, 26.7, 23.9, 26.2])
lats_should = np.array(
[64.74398, 64.81854, 64.89284, 64.96688, 65.04066, 65.11416,
65.18739, 65.26036, 65.33304, 65.40545, 65.47758, 65.54942,
65.62099, 65.69226, 65.76324, 65.83393, 65.90432, 65.97442,
66.04422, 66.11371, 66.1829, 66.25177, 66.32034, 66.38859,
66.45653])
ssm_mean_should = np.array(
[36.7, 35.4, 33.4, 32.5, 32.5, 32., 31.2, 29.4, 28.7,
27.6, 25.8, 25.4, 25.5, 25.3, 24.4, 23.4, 22.3, 21.3,
20.4, 20.4, 19.9, 19.7, 20.3, 21.5, 22.9])
nptest.assert_allclose(lats[:25], lats_should, atol=1e-5)
nptest.assert_allclose(data['Surface Soil Moisture (Ms)'][
:25], ssm_should, atol=1e-5)
nptest.assert_allclose(data['Mean Surface Soil Moisture'][:25],
ssm_mean_should,
atol=1e-5)
def test_image_reading_masked(self):
data, meta, timestamp, lons, lats, time_var = self.reader.read(
ssm_masked=True)
ssm_should = np.array(
[15.6, 10.8, 15.3, 15.9, 19.8, 27., 27.8, 26.8, 28.6,
35.6, 36., 32.3, 27.6, 31.2, 36.8, 13.4, 18.7, 23.1,
24.5, 22.1, 17.1, 17.9, 17.8, 21.1, 23.])
lats_should = np.array(
[54.27036, 54.3167, 54.36279, 54.40862, 54.45419, 54.49951,
54.54456, 54.58936, 54.6339, 54.67818, 54.72219, 54.76594,
54.80943, 54.85265, 54.89561, 56.95692, 56.98178, 57.00631,
57.03053, 57.05442, 57.07799, 57.10123, 57.12415, 57.14675,
57.16902])
ssm_mean_should = np.array(
[24.4, 22.2, 21., 19.6, 18.7, 19.1, 19.1, 19.9, 19.9,
20.1, 20.9, 21., 19.6, 17.3, 16.9, 25.6, 25.6, 24.9,
23.6, 22.9, 22.4, 23.2, 24.1, 24.5, 26.1])
nptest.assert_allclose(lats[10000:10025], lats_should, atol=1e-5)
nptest.assert_allclose(data['Surface Soil Moisture (Ms)']
[10000:10025], ssm_should,
atol=1e-5)
nptest.assert_allclose(data['Mean Surface Soil Moisture']
[10000:10025], ssm_mean_should,
atol=1e-5)
class Test_AscatL2SsmNcFile(unittest.TestCase):
def setUp(self):
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_L2_SM_125', 'nc', 'Metop_A')
fname = os.path.join(
data_path,
'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_20170220041500_53652_eps_o_125_ssm_l2.nc')
self.reader = AscatL2SsmNcFile(fname)
def tearDown(self):
self.reader = None
def test_image_reading(self):
data, meta, timestamp, lons, lats, time_var = self.reader.read()
ssm_should = np.array([2.96000004, 0., 0., 0., 0., 0., 0., 0., 0.,
1.82999992, 3.32999992, 4.78999996, 4.31999969,
2.53999996, 0., 3.83999991, 5.76999998, 1.5,
2.44000006, 4.11999989, 2.25999999, 2.65999985,
5.5999999, 5.53999996, 4.85999966])
lats_should = np.array([62.60224, 62.67133, 62.74015, 62.80871, 62.877,
62.94502, 63.01276, 63.08024, 63.14743,
63.21435, 63.28098, 63.34734, 63.41341,
63.47919, 63.54468, 63.60988, 63.67479,
63.7394, 63.80372, 63.86773, 63.93144,
63.99485, 64.05795, 64.12075, 64.18323])
ssm_mean_should = np.array([21.26000023, 21.27999878, 21.38999939,
22.43000031, 23.36999893, 24.51000023,
26.01000023, 27.04999924, 26.94999886,
26.63999939, 27.09999847, 27.56999969,
27.43000031, 26.64999962, 26.53999901,
27.48999977, 28.20999908, 28.38999939,
28.79999924, 29.21999931, 30.01000023,
30.97999954, 31.27999878, 31.8599987,
32.05999756])
nptest.assert_allclose(lats[:25], lats_should, atol=1e-5)
nptest.assert_allclose(data['soil_moisture'][
:25], ssm_should, atol=1e-5)
nptest.assert_allclose(data['mean_soil_moisture'][:25],
ssm_mean_should,
atol=1e-5)
def test_image_reading_masked(self):
data, meta, timestamp, lons, lats, time_var = self.reader.read(
ssm_masked=True)
ssm_should = np.array(
[33.39999771118164, 27.06999969482422,
20.649999618530273, 18.28999900817871,
24.229999542236328, 24.939998626708984,
23.639999389648438, 20.3799991607666,
14.15999984741211, 10.059999465942383,
9.539999961853027, 9.019999504089355,
9.420000076293945, 12.279999732971191,
21.529998779296875, 33.880001068115234,
39.57999801635742, 35.34000015258789,
38.88999938964844, 44.459999084472656,
46.66999816894531, 40.12999725341797,
38.39999771118164, 43.959999084472656,
33.43000030517578])
lats_should = np.array(
[65.11197384, 65.17437784, 65.23645384, 65.29819884, 65.35961083,
65.42068783, 65.48142683, 65.54182483, 65.60187983, 65.66158983,
65.72095083, 65.77996183, 65.83861883, 68.62952883, 68.66132883,
68.69261383, 68.72337983, 68.75362483, 68.78334683, 68.81254383,
68.84121383, 68.86935383, 68.89696283, 68.92403783, 68.95057683])
ssm_mean_should = np.array([26.85999870300293, 25.90999984741211,
25.670000076293945, 25.81999969482422,
24.65999984741211, 22.6299991607666,
20.389999389648438, 18.94999885559082,
17.68000030517578, 16.28999900817871,
15.130000114440918, 14.739999771118164,
15.5,
26.51999855041504, 31.529998779296875,
36.09000015258789, 40.36000061035156,
42.61000061035156, 45.529998779296875,
47.939998626708984, 47.45000076293945,
44.689998626708984, 41.12999725341797,
37.59000015258789, 33.09000015258789])
nptest.assert_allclose(lats[50000:50025], lats_should, atol=1e-5)
nptest.assert_allclose(data['soil_moisture'][50000:50025], ssm_should,
atol=1e-5)
nptest.assert_allclose(data['mean_soil_moisture'][50000:50025],
ssm_mean_should,
atol=1e-5)
class Test_AscatL2SsmNcFile_vsAscatL2SsmBufrFile(unittest.TestCase):
def setUp(self):
data_path = os.path.join(
os.path.dirname(__file__), 'ascat_test_data', 'eumetsat',
'ASCAT_L2_SM_125')
fname_nc = os.path.join(
data_path, 'nc', 'Metop_A',
'W_XX-EUMETSAT-Darmstadt,SURFACE+SATELLITE,METOPA+ASCAT_C_EUMP_20170220041500_53652_eps_o_125_ssm_l2.nc')
self.reader_nc = AscatL2SsmNcFile(fname_nc)
fname_bufr = os.path.join(
data_path, 'bufr', 'Metop_A',
'M02-ASCA-ASCSMR02-NA-5.0-20170220041500.000000000Z-20170220055656-1207110.bfr')
self.reader_bufr = AscatL2SsmBufrFile(fname_bufr)
def tearDown(self):
self.reader_nc = None
self.reader_bufr = None
def test_image_reading(self):
data_nc, meta, timestamp, lons_nc, lats_nc, time_var_nc = self.reader_nc.read()
data_bufr, meta, timestamp, lons_bufr, lats_bufr, time_var_bufr = self.reader_bufr.read()
nptest.assert_allclose(lats_nc, lats_bufr, atol=1e-4)
nc_bufr_matching = {
'slope40': 'Slope At 40 Deg Incidence Angle',
'sigma40_error': 'Estimated Error In Sigma0 At 40 Deg Incidence Angle',
'utc_line_nodes': None,
'jd': 'jd',
'wet_backscatter': 'Wet Backscatter',
'swath_indicator': None,
'frozen_soil_probability': 'Frozen Land Surface Fraction',
'wetland_flag': 'Inundation And Wetland Fraction',
# The processing flag definition between BUFR and netCDF is slightly different
# 'proc_flag1': 'Soil Moisture Processing Flag',
'proc_flag2': None,
'abs_line_number': None,
'sat_track_azi': None,
'sigma40': 'Backscatter',
'soil_moisture': 'Surface Soil Moisture (Ms)',
'soil_moisture_error': 'Estimated Error In Surface Soil Moisture',
'rainfall_flag': 'Rain Fall Detection',
'soil_moisture_sensitivity': 'Soil Moisture Sensitivity',
'corr_flags': 'Soil Moisture Correction Flag',
'dry_backscatter': 'Dry Backscatter',
'aggregated_quality_flag': None,
'mean_soil_moisture': 'Mean Surface Soil Moisture',
'as_des_pass': None,
'slope40_error': 'Estimated Error In Slope At 40 Deg Incidence Angle',
'topography_flag': 'Topographic Complexity',
'snow_cover_probability': 'Snow Cover'}
# 'Direction Of Motion Of Moving Observing Platform']
# BUFR files contain less accurate data so we only compare to one 0.1
# accuracy.
for nc_name in nc_bufr_matching:
bufr_name = nc_bufr_matching[nc_name]
if bufr_name is None:
continue
# flags and probabilities do not have the same NaN value so we mask
# the invalid values for comparison
if nc_name in ['snow_cover_probability',
'rainfall_flag',
'topography_flag',
'frozen_soil_probability',
'wetland_flag',
'snow_cover_probability']:
valid = np.where(data_nc[nc_name] != 255)
data_nc[nc_name] = data_nc[nc_name][valid]
data_bufr[bufr_name] = data_bufr[bufr_name][valid]
nptest.assert_allclose(data_nc[nc_name],
data_bufr[bufr_name], atol=0.1)
def test_AscatL2SsmBufrChunked():
data_path = os.path.join(
os.path.dirname(
__file__), 'ascat_test_data', 'eumetsat', 'ASCAT_L2_SM_125',
'PDU', 'Metop_B')
day_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_%Y%m%d*_125_ssm_l2.bin'
file_search_str = 'W_XX-EUMETSAT-Darmstadt,SOUNDING+SATELLITE,METOPB+ASCAT_C_EUMP_{datetime}*_125_ssm_l2.bin'
datetime_format = '%Y%m%d%H%M%S'
filename_datetime_format = (63, 77, '%Y%m%d%H%M%S')
reader = AscatL2SsmBufrChunked(data_path, month_path_str='',
day_search_str=day_search_str,
file_search_str=file_search_str,
datetime_format=datetime_format,
filename_datetime_format=filename_datetime_format)
intervals = reader.tstamps_for_daterange(datetime(2017, 2, 20, 5),
datetime(2017, 2, 20, | |
import numpy as np
import torch as tr
import itertools as it
from abstract_machine import make_abstract_machine, memorize_env
from neural_virtual_machine import NeuralVirtualMachine as NVMNet
def hadamard_matrix(N):
H = tr.tensor([[1.]])
while H.shape[0] < N:
H = tr.cat((
tr.cat((H, H), dim=1),
tr.cat((H, -H), dim=1),
), dim=0)
# return H / H.shape[0]**.5
return H
class NVMRegister:
def __init__(self, name, size, codec, σ=None):
if σ is None: σ = lambda v: v
self.name = name
self.size = size
self.codec = codec
self.σ = σ
self.content = tr.zeros(self.size)
self.old_content = tr.zeros(self.size)
self.new_content = tr.zeros(self.size)
# faster decoding
self.decode_tokens = list(codec.keys())
self.decode_matrix = tr.stack([codec[token] for token in self.decode_tokens])
def __str__(self):
return "reg %s: %s, %s, %s" % (self.name,
self.decode(self.old_content),
self.decode(self.content),
self.decode(self.new_content)
)
def reset(self, content):
self.content = content
self.new_content = tr.zeros(self.size)
self.old_content = tr.zeros(self.size)
def activate(self):
self.content = self.σ(self.content)
def update(self):
# shift buffers
self.old_content = self.content
self.content = self.new_content
self.new_content = tr.zeros(self.size)
def encode(self, token):
if token not in self.codec:
raise KeyError("%s not in %s codec" % (token, self.name))
return self.codec[token]
def decode(self, content):
# for token, pattern in self.codec.items():
# if (pattern*content > 0).all(): return token
# return None
return self.decode_tokens[self.decode_matrix.mv(content).argmax()]
def FSER(W, x, y):
# dW = tr.outer(y - W.mv(x), x) / float(W.shape[1])
dW = (y - W.mv(x)).reshape(-1,1) * x / float(W.shape[1]) # backwards compatible
return dW
class NVMConnection:
def __init__(self, name, src, dst):
self.name = name
self.src = src
self.dst = dst
self.W = tr.zeros(dst.size, src.size)
def __str__(self):
return "%s (%s -> %s)" % (self.name, self.src.name, self.dst.name)
def __setitem__(self, key, val):
x = self.src.encode(key)
y = self.dst.encode(val)
self.W = self.W + FSER(self.W, x, y)
def reset(self, W=None):
if W is None: W = tr.zeros(self.dst.size, self.src.size)
self.W = W
def store(self, gate = 1.0):
dW = FSER(self.W, self.src.content, self.dst.content)
# self.W += gate * dW # bad if self.W is a leaf variable requiring grad
self.W = self.W + gate * dW
def recall(self, gate = 1.0):
# self.dst.new_content = self.dst.new_content * (1. - gate)
self.dst.new_content = self.dst.new_content + self.W.mv(self.src.content) * gate
class NeuralVirtualMachine:
def __init__(self, env, registers, connections):
self.registers = registers
self.connections = connections
self.connection_names = list(sorted(connections.keys()))
self.env = env
self.tick_counter = 0
def pullback(self, t, b=0):
self.tick_counter = t
for name in self.net.activities:
self.registers[name].content = self.net.activities[name][t][b].squeeze()
if t > 0: self.registers[name].old_content = self.net.activities[name][t-1][b].squeeze()
def dbg(self):
print("****************** dbg: tick %d **********************" % self.tick_counter)
print(self.inst_at.get(self.registers["ipt"].decode(self.registers["ipt"].content), "internal"))
for register in self.registers.values():
print(" ", register)
print(register.content.detach().numpy())
# for connection in self.connections.values(): print(" ", connection)
def tick(self, diff_gates=False):
# apply current gates
gates = self.registers["gts"].content
split = int(len(gates)/2)
gs, gr = gates[:split], gates[split:] # store, recall
# storage
for c, name in enumerate(self.connection_names):
# if diff_gates or gs[c] > 0.5: self.connections[name].store(gs[c])
if diff_gates: self.connections[name].store(gs[c])
elif gs[c] > 0.5: self.connections[name].store()
# # recall
# for register in self.registers.values():
# register.new_content = register.content.clone() # clone important since gr is a view, don't want to update gates
# recalled = set() # track if each register is recall target
# for c, name in enumerate(self.connection_names):
# # if diff_gates or gr[c] > 0.5:
# # self.connections[name].recall(gr[c])
# dst = self.connections[name].dst
# if diff_gates:
# if dst.name not in recalled: dst.new_content = dst.new_content * (1 - gr[c])
# self.connections[name].recall(gr[c])
# elif gr[c] > 0.5:
# if dst.name not in recalled: dst.new_content = tr.zeros(dst.size)
# self.connections[name].recall()
# recalled.add(dst.name)
# recall
recalled = set()
for register in self.registers.values():
register.new_content = tr.zeros(register.size)
for c, name in enumerate(self.connection_names):
if diff_gates:
self.connections[name].recall(gr[c])
recalled.add(self.connections[name].dst.name)
elif gr[c] > 0.5:
self.connections[name].recall()
recalled.add(self.connections[name].dst.name)
for register in self.registers.values():
if register.name not in recalled: register.new_content = register.content
# shift buffers and apply activation function
for register in self.registers.values():
register.update()
register.activate()
self.tick_counter += 1
# self-loop indicates end-of-program
ipt = self.registers["ipt"]
if ipt.decode(ipt.content) == ipt.decode(ipt.old_content): return True # program done
else: return False # program not done
def mount(self, routine):
# initialize default gates at initial ipt
self.registers["ipt"].reset(self.ipt_of[routine])
self.registers["spt"].reset(self.registers["spt"].encode(0))
self.registers["gts"].reset(self.registers["gts"].encode(((), ("gts","ipt"))))
self.tick_counter = 0
def reset(self, contents):
for name, register in self.registers.items():
register.reset(tr.zeros(register.size))
if name in contents: register.reset(contents[name])
if name == "ipt": register.reset(register.encode(0))
if name == "gts": register.reset(register.encode(((), ("gts","ipt"))))
def size(self):
reg_sizes, conn_sizes, total = {}, {}, 0
for name, reg in self.registers.items():
reg_sizes[name] = (reg.size, len(reg.codec))
for name, conn in self.connections.items():
conn_sizes[name] = conn.W.shape
total += conn.W.shape[0] * conn.W.shape[1]
return reg_sizes, conn_sizes, total
def run(self, dbg=False):
self.mount("main")
if dbg: self.dbg()
ipt = self.registers["ipt"]
tar = self.registers["tar"]
target_changed = True
while True:
# done = self.tick()
self.net.tick()
self.pullback(self.net.tick_counter)
done = ipt.decode(ipt.content) == ipt.decode(ipt.old_content)
if dbg: self.dbg()
if target_changed:
position = self.registers["jnt"].content.detach().numpy()
self.env.goto_position(position)
target_changed = (tar.decode(tar.content) != tar.decode(tar.old_content))
if done: break
return self.tick_counter
def get_state(self):
registers = {name: reg.content for name, reg in self.registers.items()}
connections = {name: conn.W for name, conn in self.connections.items()}
return registers, connections
def reset_state(self, registers, connections):
for name, content in registers.items():
self.registers[name].reset(content)
for name, W in connections.items():
self.connections[name].reset(W)
def decode(self, register_name, time_step, batch_index=0):
v = self.net.activities[register_name][time_step]
batch_index = min(batch_index, v.shape[0]-1) # common state across batch may not be broadcasted
return self.registers[register_name].decode(v[batch_index,:,0])
def hadamard_codec(tokens):
N = len(tokens)
H = hadamard_matrix(N)
return H.shape[0], {token: H[t] for t, token in enumerate(tokens)}
def virtualize(am, σ=None, detach_gates=True):
registers = {}
tokens = {
"ipt": list(range(len(am.connections["ipt"].memory)+1)),
"spt": list(range(am.spt_range)),
"loc": am.locs + ["nil"],
"tar": list(it.product(range(am.num_bases), range(am.max_levels+1), [0, 1])) + ["rest"],
"obj": am.objs + ["nil"]
}
for name in ["r0", "r1", "jmp"]:
tokens[name] = am.objs + am.locs + ["nil"]
for name in tokens:
size, codec = hadamard_codec(tokens[name])
registers[name] = NVMRegister(name, size, codec, σ=σ)
jnt_codec = {key: tr.tensor(val).float() for key, val in am.ik.items()}
registers["jnt"] = NVMRegister("jnt", am.env.num_joints, jnt_codec)
# use nvm net to setup gate index
plastic = ("obj", "loc", "spt", "spt > r0", "spt > r1")
persistent = ["spt", "jmp", "r0", "r1", "obj", "loc", "tar", "jnt"]
connectivity = {c.name: (c.src.name, c.dst.name) for c in am.connections.values()}
connectivity.update({"%s <" % name: (name, name) for name in persistent})
net = NVMNet(
register_sizes = {name: reg.size for name, reg in registers.items()},
gate_register_name = "gts",
connectivity = connectivity,
# activators={name: (lambda v: v) for name in ["jnt", "gts"]},
activators={name: (lambda v: v) for name in ["jnt"]},
plastic_connections = plastic,
detach_gates=detach_gates)
# set up gate register
gts_codec = {}
for store, recall in am.connections["gts"].memory.values():
# add persistence to recall
persist = tuple(recall)
for name in registers:
if name not in [am.connections[rec].dst.name for rec in recall]:
persist += ("%s <" % name,)
# set gate values
g = tr.zeros(net.register_sizes["gts"])
for name in persist: g[net.recall_index[name]] = 1.
for name in store: g[net.storage_index[name]] = 1.
gts_codec[store, recall] = g
registers["gts"] = NVMRegister("gts", net.register_sizes["gts"], gts_codec)
# setup VM connections
connections = {}
for name, am_conn in am.connections.items():
src, dst = registers[am_conn.src.name], registers[am_conn.dst.name]
connections[name] = NVMConnection(name, src, dst)
for key, val in am_conn.memory.items(): connections[name][key] = val
# setup persistence connections
for name in persistent:
connections["%s <" % name] = NVMConnection("%s <" % name, registers[name], registers[name])
connections["%s <" % name].W = tr.eye(registers[name].size)
nvm = NeuralVirtualMachine(am.env, registers, connections)
nvm.net = net
nvm.ipt_of = {
routine: registers["ipt"].encode(ipt)
for routine, ipt in am.ipt_of.items()}
nvm.inst_at = dict(am.inst_at)
nvm.objs = list(am.objs)
nvm.locs = list(am.locs)
return nvm
if __name__ == "__main__":
# x = tr.tensor([1., 1., -1.])
# y = tr.tensor([-1., 1., 1.])
# W = tr.zeros(3,3)
# FSER(W, x, y)
# input('.')
np.set_printoptions(linewidth=5000)
import sys
sys.path.append('../../envs')
from blocks_world import BlocksWorldEnv, random_thing_below
num_bases = 3
# num_blocks, max_levels = 7, 3
num_blocks, max_levels = 3, 3
thing_below = random_thing_below(num_blocks, max_levels, num_bases)
goal_thing_below = random_thing_below(num_blocks, max_levels, num_bases)
# # thing_below = {"b0": "t0", "b1": "t1", "b2": "t2", "b3": "b2", "b4": "b3", "b5": "t5", "b6":"b5"})
# thing_below = {"b%d" % n: "t%d" % n for n in range(num_blocks)}
# thing_below["b0"] = "b1"
# # thing_below["b3"] = "b2"
# goal_thing_below = {"b%d" % n: "t%d" % n for n in range(num_blocks)}
# goal_thing_below.update({"b1": "b0", "b2": "b3"})
env = BlocksWorldEnv(show=True)
env.load_blocks(thing_below, num_bases)
am = make_abstract_machine(env, num_bases, max_levels, gen_regs=["r0","r1"])
nvm = virtualize(am)
net = nvm.net
goal_thing_above = env.invert(goal_thing_below)
for key, val in goal_thing_above.items():
if | |
iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_portal_members_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_portal_members_post`")
collection_formats = {}
resource_path = '/Portals/{id}/portalMembers'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PortalMember',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_put(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_put(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param Portal data: Model instance data
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_put_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_put_with_http_info(id, **kwargs)
return data
def portals_id_put_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_put_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param Portal data: Model instance data
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_put`")
collection_formats = {}
resource_path = '/Portals/{id}'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Portal',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_replace_post(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_replace_post(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param Portal data: Model instance data
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_replace_post_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_replace_post_with_http_info(id, **kwargs)
return data
def portals_id_replace_post_with_http_info(self, id, **kwargs):
"""
Replace attributes for a model instance and persist it into the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_replace_post_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Model id (required)
:param Portal data: Model instance data
:return: Portal
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'data']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_replace_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_replace_post`")
collection_formats = {}
resource_path = '/Portals/{id}/replace'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'data' in params:
body_params = params['data']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Portal',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_team_get(self, id, **kwargs):
"""
Fetches belongsTo relation team.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_team_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_team_get_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_team_get_with_http_info(id, **kwargs)
return data
def portals_id_team_get_with_http_info(self, id, **kwargs):
"""
Fetches belongsTo relation team.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_team_get_with_http_info(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param bool refresh:
:return: Team
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'refresh']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method portals_id_team_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params) or (params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `portals_id_team_get`")
collection_formats = {}
resource_path = '/Portals/{id}/team'.replace('{format}', 'json')
path_params = {}
if 'id' in params:
path_params['id'] = params['id']
query_params = {}
if 'refresh' in params:
query_params['refresh'] = params['refresh']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Team',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def portals_id_template_folders_count_get(self, id, **kwargs):
"""
Counts templateFolders of Portal.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.portals_id_template_folders_count_get(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str id: Portal id (required)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.portals_id_template_folders_count_get_with_http_info(id, **kwargs)
else:
(data) = self.portals_id_template_folders_count_get_with_http_info(id, **kwargs)
return data
def portals_id_template_folders_count_get_with_http_info(self, id, **kwargs):
"""
Counts templateFolders of Portal.
| |
sha1="a2e319eb7cfaa630efc5ceb69e6f8f081e90abae",
size=274561130,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p75798894-p76057549.7z"),
page_ids=range(75798894, 76057550),
darus_id=93997,
sha1="7fbc055c4867916d2eb3d714ab0bd3a8fdc7757c",
size=492816562,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p76057550-p76341886.7z"),
page_ids=range(76057550, 76341887),
darus_id=93998,
sha1="6172815f6b9a9652c6a1b5cc5c2b79a38636a895",
size=619299145,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p76341887-p76605599.7z"),
page_ids=range(76341887, 76605600),
darus_id=94000,
sha1="36915d0f8cacea0b42dc583878d81ba4251770b9",
size=624513171,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p76605600-p76913094.7z"),
page_ids=range(76605600, 76913095),
darus_id=94002,
sha1="97eacae8d8926dcd42f3b1ce58fc43071e92f3bd",
size=694825259,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p76913095-p77225521.7z"),
page_ids=range(76913095, 77225522),
darus_id=94004,
sha1="49a19b144d3aa09a7c5e46081a7e1beecfc15361",
size=704424616,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p77225522-p77508526.7z"),
page_ids=range(77225522, 77508527),
darus_id=94006,
sha1="121fc8f9e32837e5a51a011851a8fa4786e2d5d3",
size=680968473,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p77508527-p77814777.7z"),
page_ids=range(77508527, 77814778),
darus_id=94007,
sha1="4b2426346629b70d72ee3ed752a4e65656708c9f",
size=696780406,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p77814778-p78132877.7z"),
page_ids=range(77814778, 78132878),
darus_id=94009,
sha1="2ede6fc4d73df0f5852ad203a6d8513b182963d3",
size=775538893,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p78132878-p78441256.7z"),
page_ids=range(78132878, 78441257),
darus_id=94010,
sha1="63fa8d68768e7ff2d6df0870d26aa42955f7067c",
size=710233631,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p78441257-p78742233.7z"),
page_ids=range(78441257, 78742234),
darus_id=94013,
sha1="78260abaff2dc647c3233f7bb810f2c875b0243a",
size=675729569,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p78742234-p79059758.7z"),
page_ids=range(78742234, 79059759),
darus_id=94014,
sha1="ce5ba46102652a6e593df9df3347bef08ef28097",
size=658424831,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p79059759-p79361786.7z"),
page_ids=range(79059759, 79361787),
darus_id=94017,
sha1="0a0890510bede6a7cbf174f57895e414b825cc96",
size=674476337,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p79361787-p79647381.7z"),
page_ids=range(79361787, 79647382),
darus_id=94018,
sha1="3e82024a3c9198a228b2cbbcf612d799b4f5be78",
size=654472223,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p79647382-p79992873.7z"),
page_ids=range(79647382, 79992874),
darus_id=94020,
sha1="3c1e8b2a3f97be4cf309da2cc3607dc82ccc3129",
size=729056632,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p79992874-p80301005.7z"),
page_ids=range(79992874, 80301006),
darus_id=94022,
sha1="333890d2080df688fd3a1b23ae556b14d9727d90",
size=651453221,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p80301006-p80600437.7z"),
page_ids=range(80301006, 80600438),
darus_id=94023,
sha1="f2f86bb73d5e7e56f2a8338de2e922e03dd9eeb2",
size=651752506,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p80600438-p80916588.7z"),
page_ids=range(80600438, 80916589),
darus_id=94025,
sha1="1ff40be2fd05e9602bdcf282f12c136906fdba44",
size=714419347,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p80916589-p81212912.7z"),
page_ids=range(80916589, 81212913),
darus_id=94028,
sha1="a1da9bfdb0e96f7fcb823893618cdf3d0e3412de",
size=663154300,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p81212913-p81443489.7z"),
page_ids=range(81212913, 81443490),
darus_id=94030,
sha1="fff53a6c67f8889e73195591bd152edc61f76f72",
size=489872551,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p81443490-p81662126.7z"),
page_ids=range(81443490, 81662127),
darus_id=94031,
sha1="1ca7da0259c1da31acb750eab712635519c92baf",
size=431476857,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p81662127-p81876610.7z"),
page_ids=range(81662127, 81876611),
darus_id=94034,
sha1="c0ab7449057fd08e21c046d1d609bd453fbb066e",
size=428449739,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p81876611-p82093102.7z"),
page_ids=range(81876611, 82093103),
darus_id=94035,
sha1="268b4f0c7053bc5113f230ed0439bbf0aa852a31",
size=429329565,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p82093103-p82327848.7z"),
page_ids=range(82093103, 82327849),
darus_id=94036,
sha1="3ce78f27db540cf206267ef6e9d6f5b378d8f7a2",
size=448608717,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p82327849-p82584701.7z"),
page_ids=range(82327849, 82584702),
darus_id=94038,
sha1="d2dcddcd1e1a9528eb346b263b5060e575e18336",
size=559467797,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p82584702-p82893938.7z"),
page_ids=range(82584702, 82893939),
darus_id=94039,
sha1="f0eb441ff55abd65eb6e3de20b5f24f825cbf84f",
size=734734989,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p82893939-p83156119.7z"),
page_ids=range(82893939, 83156120),
darus_id=94041,
sha1="62fa127b2c875030842791a84adaa1b83c7d629d",
size=587248458,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p83156120-p83360344.7z"),
page_ids=range(83156120, 83360345),
darus_id=94044,
sha1="cf0560a71020118357f68e7a14a98c9c73bf591f",
size=457046889,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p83360345-p83651670.7z"),
page_ids=range(83360345, 83651671),
darus_id=94045,
sha1="58ca3f7a9e833434cc126acb63db0a944d7bc9e8",
size=699153270,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p83651671-p83954551.7z"),
page_ids=range(83651671, 83954552),
darus_id=94046,
sha1="040af32acd8225dc5b739e712712755ac3cf04f6",
size=708961961,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p83954552-p84259846.7z"),
page_ids=range(83954552, 84259847),
darus_id=94048,
sha1="d72b7e38a4ef38edaab5006f698a8527eff55807",
size=644496679,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p84259847-p84521458.7z"),
page_ids=range(84259847, 84521459),
darus_id=94049,
sha1="27d3c77082e2def2436b19ea48e28aefef0bec4d",
size=618972271,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p84521459-p84793967.7z"),
page_ids=range(84521459, 84793968),
darus_id=94051,
sha1="d8a1a5a146b3b370e80a8d8e94eec8a145c3f498",
size=628661533,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p84793968-p85019030.7z"),
page_ids=range(84793968, 85019031),
darus_id=94052,
sha1="46a598995f228c5bbed54bdeb31db7b7effcbba5",
size=511898936,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p85019031-p85282160.7z"),
page_ids=range(85019031, 85282161),
darus_id=94054,
sha1="3e358c20637742e9d73e47284c5ff9393f6317b8",
size=500892247,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p85282161-p85564548.7z"),
page_ids=range(85282161, 85564549),
darus_id=94055,
sha1="b0024c73c318f71a5538b66c8047367403cdbba8",
size=612206737,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p85564549-p85828759.7z"),
page_ids=range(85564549, 85828760),
darus_id=94058,
sha1="4ded9fba1696fff592950765181f000dddbb3f65",
size=609965347,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p85828760-p86092058.7z"),
page_ids=range(85828760, 86092059),
darus_id=94059,
sha1="339385c7783d69f0f3cf1cd99733515d655aa0eb",
size=588489974,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p86092059-p86284502.7z"),
page_ids=range(86092059, 86284503),
darus_id=94060,
sha1="b6b2b80e8f8a36f03fc368ac79f8a5501bfe0014",
size=486915248,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p86284503-p86429039.7z"),
page_ids=range(86284503, 86429040),
darus_id=94062,
sha1="103d10a420320b89520ad2015a387d304974647b",
size=360189092,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p86429040-p86585418.7z"),
page_ids=range(86429040, 86585419),
darus_id=94063,
sha1="6467a51e04e0eeea4951bee66e87e98e17da69ca",
size=382269106,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p86585419-p86715323.7z"),
page_ids=range(86585419, 86715324),
darus_id=94064,
sha1="73cde4ce76fcd25c18ec3e2074b70b86a9123499",
size=309403957,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p86715324-p86851252.7z"),
page_ids=range(86715324, 86851253),
darus_id=94065,
sha1="005ad2a980489b4a76c78e8e6ff3a7945ba12df9",
size=381027327,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p86851253-p87045527.7z"),
page_ids=range(86851253, 87045528),
darus_id=94066,
sha1="370017a2f38ce4305166be40b04be87e831b138a",
size=462731546,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p87045528-p87327857.7z"),
page_ids=range(87045528, 87327858),
darus_id=94068,
sha1="588e2f077a332ac1298171c42dc8774b31e0410a",
size=608032511,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p87327858-p87618662.7z"),
page_ids=range(87327858, 87618663),
darus_id=94069,
sha1="dda50f2b6dcd3e1e656120d361e1a3af9a795f4e",
size=685703516,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p87618663-p87906392.7z"),
page_ids=range(87618663, 87906393),
darus_id=94071,
sha1="e71b304d325b746e28056387a5166b9b1e418645",
size=669484191,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p87906393-p88159767.7z"),
page_ids=range(87906393, 88159768),
darus_id=94073,
sha1="04747cc160bdddc0644d328491202611bb9a881c",
size=574789405,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p88159768-p88185873.7z"),
page_ids=range(88159768, 88185874),
darus_id=94074,
sha1="44e1b3ef524b461b297996f54a5d75995d93010a",
size=60678838,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p88185874-p88412096.7z"),
page_ids=range(88185874, 88412097),
darus_id=94075,
sha1="8ca33480b7ebacfe6ceb7a1cf517bb88e55243a5",
size=535302736,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p88412097-p88667091.7z"),
page_ids=range(88412097, 88667092),
darus_id=94076,
sha1="0728c83cd42b5cbf7acc4e49ce345f8f03ebbb30",
size=584300713,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p88667092-p88945256.7z"),
page_ids=range(88667092, 88945257),
darus_id=94077,
sha1="918897c09cef10329c22ccf813d088c9008ef441",
size=622572304,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p88945257-p89259026.7z"),
page_ids=range(88945257, 89259027),
darus_id=94080,
sha1="b39f63c8e54e3cd094a31aa57305cc3ff50f8215",
size=697690948,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p89259027-p89564684.7z"),
page_ids=range(89259027, 89564685),
darus_id=94081,
sha1="316f02d9f3cf2ff476d969bdb46413f87ca00dea",
size=679980291,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p89564685-p89865552.7z"),
page_ids=range(89564685, 89865553),
darus_id=94084,
sha1="aaa55a96ae136860fed2773f6b375ce6f65f57fc",
size=621313998,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p89865553-p90174213.7z"),
page_ids=range(89865553, 90174214),
darus_id=94085,
sha1="90b128a4b1bf116b691143349951a5d7e1eadf51",
size=671882993,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p90174214-p90480539.7z"),
page_ids=range(90174214, 90480540),
darus_id=94086,
sha1="d85475c5ec3e0f34e84815149e25bc6c83b95550",
size=656326359,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p90480540-p90763520.7z"),
page_ids=range(90480540, 90763521),
darus_id=94088,
sha1="754e625f5cce18d3668e8ae3a1a372fdc3cf832d",
size=611460448,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p90763521-p91067225.7z"),
page_ids=range(90763521, 91067226),
darus_id=94089,
sha1="49be29adff95c43938c8faf1486051cb7fc42df3",
size=710594697,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p91067226-p91360555.7z"),
page_ids=range(91067226, 91360556),
darus_id=94090,
sha1="5cab571af92998503b90fc563ba49b2ca07776a0",
size=698257738,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p91360556-p91649338.7z"),
page_ids=range(91360556, 91649339),
darus_id=94093,
sha1="79fdcb8985f76772039ab53b4ca12a886198f140",
size=748075270,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p91649339-p91931885.7z"),
page_ids=range(91649339, 91931886),
darus_id=94094,
sha1="c768ca122b5b6855c38516aa13857265e2ba26f5",
size=705865674,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p91931886-p92215811.7z"),
page_ids=range(91931886, 92215812),
darus_id=94095,
sha1="1bcce872790bf1bd6a7f260258e9f47d7d7c55b6",
size=679310577,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p92215812-p92471598.7z"),
page_ids=range(92215812, 92471599),
darus_id=94097,
sha1="9643ecb5c01803cb811000120383578d2de90faf",
size=597423202,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p92471599-p92802871.7z"),
page_ids=range(92471599, 92802872),
darus_id=94098,
sha1="910db6095ed4936eea9aef613c8b1f95290576da",
size=662373295,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p92802872-p93213049.7z"),
page_ids=range(92802872, 93213050),
darus_id=94099,
sha1="7eb2134097f48bf2369b62bbb83784d36e480ac5",
size=795747432,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p93213050-p93559563.7z"),
page_ids=range(93213050, 93559564),
darus_id=94102,
sha1="30de4149de7f959cf3e3d92e77ec4c70a1516e8a",
size=675675452,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p93559564-p93811658.7z"),
page_ids=range(93559564, 93811659),
darus_id=94103,
sha1="d5974bcc6471adc6745002151612c9db3e026f9f",
size=509722653,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p93811659-p94007998.7z"),
page_ids=range(93811659, 94007999),
darus_id=94104,
sha1="e24fd6126a42c77155bd9a8fd8eeaedcdd6edec6",
size=391138161,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p94007999-p94204307.7z"),
page_ids=range(94007999, 94204308),
darus_id=94106,
sha1="27cba0d7221777ad0bdc70b8f5b20ae30fb8a0a6",
size=381672380,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p94204308-p94480546.7z"),
page_ids=range(94204308, 94480547),
darus_id=94107,
sha1="6d05dc7b2e632d6bc2759ed34843f8ac87a82ce5",
size=539515392,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p94480547-p94749608.7z"),
page_ids=range(94480547, 94749609),
darus_id=94108,
sha1="36653ce3a1c6e5211f08559b5ac99274b8057f41",
size=471387511,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p94749609-p94878145.7z"),
page_ids=range(94749609, 94878146),
darus_id=94110,
sha1="85b79cf9c2148ca3cdc3f63d0b526f4b842a58f1",
size=287044143,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p94878146-p95086419.7z"),
page_ids=range(94878146, 95086420),
darus_id=94111,
sha1="f2a61f36cf5fbaa656e70f81401af76ed906f026",
size=420874462,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p95086420-p95344487.7z"),
page_ids=range(95086420, 95344488),
darus_id=94112,
sha1="a2989aefdc43c159c818c3d9b2d2e1925d148f15",
size=463571267,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p95344488-p95640825.7z"),
page_ids=range(95344488, 95640826),
darus_id=94114,
sha1="3e4724152f68276fcb2ab0b8f3ec2bb9a705bf10",
size=450314373,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p95640826-p95977886.7z"),
page_ids=range(95640826, 95977887),
darus_id=94115,
sha1="1521274c1fb43238fed07c720f002b4fc4e0cd03",
size=533533287,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p95977887-p96232211.7z"),
page_ids=range(95977887, 96232212),
darus_id=94117,
sha1="6933a57e3c6f83ef95b1c7790b0f68a42864bd5e",
size=451033272,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p96232212-p96398115.7z"),
page_ids=range(96232212, 96398116),
darus_id=94119,
sha1="87e9c9a8395cc3b1ce20a241f5b65120e5462ad9",
size=316661461,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p96398116-p96615754.7z"),
page_ids=range(96398116, 96615755),
darus_id=94120,
sha1="f413c8c1c5f53f9b95e5cdcf86a4b580b3babb11",
size=471059243,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p96615755-p96945285.7z"),
page_ids=range(96615755, 96945286),
darus_id=94121,
sha1="4083a208f3e65e6a07b2764cfa9897a4d3c3fece",
size=665026345,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p96945286-p97209076.7z"),
page_ids=range(96945286, 97209077),
darus_id=94123,
sha1="83236a98859a4bb15f62d1b698e05f1288e898b0",
size=518164596,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p97209077-p97513591.7z"),
page_ids=range(97209077, 97513592),
darus_id=94125,
sha1="c81e840c9ee6e072cbebaa5a61813213e1fb9f83",
size=531635415,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p97513592-p97819559.7z"),
page_ids=range(97513592, 97819560),
darus_id=94126,
sha1="365ebfbc2d99abdaf45c5e81c48da0b5a68b7dca",
size=500083415,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p97819560-p98037695.7z"),
page_ids=range(97819560, 98037696),
darus_id=94128,
sha1="15846f402da2f4147d64999b93ef8553fc9e709e",
size=433479075,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p98037696-p98276762.7z"),
page_ids=range(98037696, 98276763),
darus_id=94130,
sha1="c7106567f040b6cc9ed204bf5bdad5089d655b61",
size=474505432,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p98276763-p98537017.7z"),
page_ids=range(98276763, 98537018),
darus_id=94131,
sha1="6fcec6c2049c1ee03dda75df38278c6752cf7f2e",
size=504107090,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p98537018-p98835301.7z"),
page_ids=range(98537018, 98835302),
darus_id=94132,
sha1="cdba2dc219feb0cddea57b4333a6e7dea3a82d62",
size=491128845,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p98835302-p99056982.7z"),
page_ids=range(98835302, 99056983),
darus_id=94133,
sha1="3f72878eb8853f8e28e242b29c72072e96da525d",
size=381117084,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p99056983-p99328254.7z"),
page_ids=range(99056983, 99328255),
darus_id=94134,
sha1="e5381d5782847ffd1cf2008c0b516a32c1ed3697",
size=410688133,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p99328255-p99609495.7z"),
page_ids=range(99328255, 99609496),
darus_id=94135,
sha1="804f07b430a34c96a2e6aacb573443562c60fe93",
size=479134350,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p99609496-p99845468.7z"),
page_ids=range(99609496, 99845469),
darus_id=94137,
sha1="084b6f66c7717ffcf046f34ff2037d824731bc23",
size=397507216,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p99845469-p100084903.7z"),
page_ids=range(99845469, 100084904),
darus_id=94138,
sha1="67410aa342cd17a2cefd852a2283019c36bdaca5",
size=435053179,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p100084904-p100302386.7z"),
page_ids=range(100084904, 100302387),
darus_id=92941,
sha1="0baddbcaba82229b81338b9164230db0996f7ad8",
size=400070953,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p100302387-p100543457.7z"),
page_ids=range(100302387, 100543458),
darus_id=92942,
sha1="b26d4bbe7213b00f557f5565d11fff13bb0067a8",
size=444994911,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p100543458-p100805044.7z"),
page_ids=range(100543458, 100805045),
darus_id=92943,
sha1="c27aec64e66ef4fee4b35d4bda68be7b7670360b",
size=506169587,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p100805045-p100973765.7z"),
page_ids=range(100805045, 100973766),
darus_id=92944,
sha1="98191d2e7769d0b2107074a80bcd89436cd0da72",
size=347945090,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p100973766-p101112507.7z"),
page_ids=range(100973766, 101112508),
darus_id=92945,
sha1="9dd3bbf7cc2a5089489ea7e84b55a383bdfed9a9",
size=285137737,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir / (prefix + "p101112508-p101303678.7z"),
page_ids=range(101112508, 101303679),
darus_id=92946,
sha1="fe294fae7ee167510b8b0f34c25764bbf0e0331b",
size=303715418,
auto_download=auto_download,
),
WikidatedV1_0EntityStreamsFile(
archive_path=dataset_dir | |
#!/usr/bin/env python
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
import roslib; roslib.load_manifest('vigir_behavior_surprise_run_1_thor')
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, Logger
from flexbe_states.input_state import InputState
from flexbe_states.operator_decision_state import OperatorDecisionState
from flexbe_states.log_state import LogState
from vigir_flexbe_states.plan_affordance_state import PlanAffordanceState
from vigir_flexbe_states.execute_trajectory_msg_state import ExecuteTrajectoryMsgState
from flexbe_states.calculation_state import CalculationState
from vigir_flexbe_states.get_template_affordance_state import GetTemplateAffordanceState
from vigir_flexbe_states.finger_configuration_state import FingerConfigurationState
from vigir_flexbe_states.moveit_predefined_pose_state import MoveitPredefinedPoseState
from vigir_flexbe_states.change_control_mode_action_state import ChangeControlModeActionState
from vigir_flexbe_states.footstep_plan_relative_state import FootstepPlanRelativeState
from vigir_flexbe_states.execute_step_plan_action_state import ExecuteStepPlanActionState
from flexbe_states.decision_state import DecisionState
from vigir_flexbe_states.check_current_control_mode_state import CheckCurrentControlModeState
from vigir_flexbe_states.create_step_goal_state import CreateStepGoalState
from vigir_flexbe_states.plan_footsteps_state import PlanFootstepsState
from vigir_flexbe_states.get_template_stand_pose_state import GetTemplateStandPoseState
from vigir_flexbe_states.get_template_pregrasp_state import GetTemplatePregraspState
from vigir_flexbe_states.plan_endeffector_pose_state import PlanEndeffectorPoseState
from vigir_flexbe_states.get_template_finger_config_state import GetTemplateFingerConfigState
from vigir_flexbe_states.plan_endeffector_cartesian_waypoints_state import PlanEndeffectorCartesianWaypointsState
from vigir_flexbe_states.hand_trajectory_state import HandTrajectoryState
from vigir_flexbe_states.get_template_grasp_state import GetTemplateGraspState
from vigir_flexbe_states.look_at_target_state import LookAtTargetState
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Mon Jun 01 2015
@author: <NAME>, <NAME>
'''
class SurpriseRun1THORSM(Behavior):
'''
Behavior for the surprise task on run 1 of the DRC Finals.
'''
def __init__(self):
super(SurpriseRun1THORSM, self).__init__()
self.name = 'Surprise Run 1 THOR'
# parameters of this behavior
self.add_parameter('hand_side', 'left')
self.add_parameter('hand_type', 'vt_hand')
self.add_parameter('parameter_set', 'drc_step_2D')
# references to used behaviors
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
self._pull_displacement = 0
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
pull_affordance = "pull"
affordance_controller = ExecuteTrajectoryMsgState.CONTROLLER_LEFT_ARM if self.hand_side == "left" else ExecuteTrajectoryMsgState.CONTROLLER_RIGHT_ARM
pull_displacement = 0.3 # meters
# x:383 y:840, x:483 y:490
_state_machine = OperatableStateMachine(outcomes=['finished', 'failed'])
_state_machine.userdata.hand_side = self.hand_side
_state_machine.userdata.none = None
_state_machine.userdata.step_back_distance = 1.0 # meters
_state_machine.userdata.grasp_preference = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
self._pull_displacement = pull_displacement
# [/MANUAL_CREATE]
# x:1033 y:40, x:333 y:90, x:1033 y:190
_sm_go_to_grasp_0 = OperatableStateMachine(outcomes=['finished', 'failed', 'again'], input_keys=['hand_side', 'grasp_preference', 'template_id'], output_keys=['grasp_preference'])
with _sm_go_to_grasp_0:
# x:33 y:49
OperatableStateMachine.add('Get_Grasp_Info',
GetTemplateGraspState(),
transitions={'done': 'Extract_Frame_Id', 'failed': 'failed', 'not_available': 'Inform_Grasp_Failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Low, 'not_available': Autonomy.Low},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'grasp': 'grasp_pose'})
# x:40 y:293
OperatableStateMachine.add('Convert_Waypoints',
CalculationState(calculation=lambda msg: [msg.pose]),
transitions={'done': 'Plan_To_Grasp'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_waypoints'})
# x:242 y:292
OperatableStateMachine.add('Plan_To_Grasp',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Grasp_Pose', 'incomplete': 'Move_To_Grasp_Pose', 'failed': 'Decide_Which_Grasp'},
autonomy={'planned': Autonomy.Low, 'incomplete': Autonomy.High, 'failed': Autonomy.High},
remapping={'waypoints': 'grasp_waypoints', 'hand': 'hand_side', 'frame_id': 'grasp_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:494 y:175
OperatableStateMachine.add('Move_To_Grasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Optional_Template_Adjustment', 'failed': 'Decide_Which_Grasp'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Low},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:226 y:177
OperatableStateMachine.add('Inform_Grasp_Failed',
LogState(text="No grasp choice left!", severity=Logger.REPORT_WARN),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:970 y:294
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'again'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:41 y:178
OperatableStateMachine.add('Extract_Frame_Id',
CalculationState(calculation=lambda pose: pose.header.frame_id),
transitions={'done': 'Convert_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_pose', 'output_value': 'grasp_frame_id'})
# x:712 y:78
OperatableStateMachine.add('Optional_Template_Adjustment',
OperatorDecisionState(outcomes=["grasp", "pregrasp", "skip"], hint="Consider adjusting the template's pose", suggestion="skip"),
transitions={'grasp': 'Get_Grasp_Info', 'pregrasp': 'again', 'skip': 'finished'},
autonomy={'grasp': Autonomy.Full, 'pregrasp': Autonomy.Full, 'skip': Autonomy.High})
# x:754 y:294
OperatableStateMachine.add('Decide_Which_Grasp',
OperatorDecisionState(outcomes=["same", "next"], hint='Try the same grasp or the next one?', suggestion='same'),
transitions={'same': 'Optional_Template_Adjustment', 'next': 'Increase_Preference_Index'},
autonomy={'same': Autonomy.High, 'next': Autonomy.High})
# x:133 y:390, x:433 y:190, x:983 y:140
_sm_perform_grasp_1 = OperatableStateMachine(outcomes=['finished', 'failed', 'next'], input_keys=['hand_side', 'grasp_preference', 'template_id', 'pregrasp_pose'], output_keys=['grasp_preference'])
with _sm_perform_grasp_1:
# x:68 y:76
OperatableStateMachine.add('Get_Finger_Configuration',
GetTemplateFingerConfigState(),
transitions={'done': 'Close_Fingers', 'failed': 'failed', 'not_available': 'Inform_Closing_Failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High, 'not_available': Autonomy.High},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'finger_config': 'finger_config'})
# x:293 y:328
OperatableStateMachine.add('Convert_Waypoints',
CalculationState(calculation=lambda msg: [msg.pose]),
transitions={'done': 'Plan_Back_To_Pregrasp'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pregrasp_pose', 'output_value': 'pregrasp_waypoints'})
# x:496 y:328
OperatableStateMachine.add('Plan_Back_To_Pregrasp',
PlanEndeffectorCartesianWaypointsState(ignore_collisions=True, include_torso=False, keep_endeffector_orientation=False, allow_incomplete_plans=True, vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_Back_To_Pregrasp_Pose', 'incomplete': 'Move_Back_To_Pregrasp_Pose', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'incomplete': Autonomy.High, 'failed': Autonomy.Low},
remapping={'waypoints': 'pregrasp_waypoints', 'hand': 'hand_side', 'frame_id': 'pregrasp_frame_id', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:662 y:228
OperatableStateMachine.add('Move_Back_To_Pregrasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'Increase_Preference_Index', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Low},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:296 y:228
OperatableStateMachine.add('Extract_Frame_Id',
CalculationState(calculation=lambda pose: pose.header.frame_id),
transitions={'done': 'Convert_Waypoints'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'pregrasp_pose', 'output_value': 'pregrasp_frame_id'})
# x:673 y:128
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'next'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:81 y:228
OperatableStateMachine.add('Close_Fingers',
HandTrajectoryState(hand_type=self.hand_type),
transitions={'done': 'finished', 'failed': 'Extract_Frame_Id'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'finger_trajectory': 'finger_config', 'hand_side': 'hand_side'})
# x:490 y:75
OperatableStateMachine.add('Inform_Closing_Failed',
LogState(text="No grasp choice left!", severity=Logger.REPORT_WARN),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Off})
# x:733 y:190, x:383 y:40
_sm_go_to_pregrasp_2 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'grasp_preference', 'template_id'], output_keys=['grasp_preference', 'pregrasp_pose'])
with _sm_go_to_pregrasp_2:
# x:27 y:68
OperatableStateMachine.add('Get_Pregrasp_Info',
GetTemplatePregraspState(),
transitions={'done': 'Plan_To_Pregrasp_Pose', 'failed': 'failed', 'not_available': 'Inform_Pregrasp_Failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High, 'not_available': Autonomy.High},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'pre_grasp': 'pregrasp_pose'})
# x:269 y:153
OperatableStateMachine.add('Inform_Pregrasp_Failed',
LogState(text="No grasp choice left!", severity=Logger.REPORT_WARN),
transitions={'done': 'failed'},
autonomy={'done': Autonomy.Low})
# x:537 y:228
OperatableStateMachine.add('Move_To_Pregrasp_Pose',
ExecuteTrajectoryMsgState(controller=arm_controller),
transitions={'done': 'finished', 'failed': 'Decide_Which_Pregrasp'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.High},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:25 y:328
OperatableStateMachine.add('Increase_Preference_Index',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'Get_Pregrasp_Info'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:266 y:228
OperatableStateMachine.add('Plan_To_Pregrasp_Pose',
PlanEndeffectorPoseState(ignore_collisions=False, include_torso=False, allowed_collisions=[], planner_id="RRTConnectkConfigDefault"),
transitions={'planned': 'Move_To_Pregrasp_Pose', 'failed': 'Decide_Which_Pregrasp'},
autonomy={'planned': Autonomy.Low, 'failed': Autonomy.High},
remapping={'target_pose': 'pregrasp_pose', 'hand': 'hand_side', 'joint_trajectory': 'joint_trajectory'})
# x:266 y:327
OperatableStateMachine.add('Decide_Which_Pregrasp',
OperatorDecisionState(outcomes=["same", "next"], hint='Try the same pregrasp or the next one?', suggestion='same'),
transitions={'same': 'Get_Pregrasp_Info', 'next': 'Increase_Preference_Index'},
autonomy={'same': Autonomy.High, 'next': Autonomy.High})
# x:30 y:444, x:162 y:478, x:230 y:478
_sm_planning_pipeline_3 = OperatableStateMachine(outcomes=['finished', 'failed', 'aborted'], input_keys=['stand_pose'], output_keys=['plan_header'])
with _sm_planning_pipeline_3:
# x:34 y:57
OperatableStateMachine.add('Create_Step_Goal',
CreateStepGoalState(pose_is_pelvis=True),
transitions={'done': 'Plan_To_Waypoint', 'failed': 'failed'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Full},
remapping={'target_pose': 'stand_pose', 'step_goal': 'step_goal'})
# x:553 y:481
OperatableStateMachine.add('Modify_Plan',
InputState(request=InputState.FOOTSTEP_PLAN_HEADER, message='Modify plan, VALIDATE, and confirm.'),
transitions={'received': 'finished', 'aborted': 'aborted', 'no_connection': 'failed', 'data_error': 'failed'},
autonomy={'received': Autonomy.Low, 'aborted': Autonomy.Full, 'no_connection': Autonomy.Full, 'data_error': Autonomy.Full},
remapping={'data': 'plan_header'})
# x:34 y:484
OperatableStateMachine.add('Plan_To_Waypoint',
PlanFootstepsState(mode=self.parameter_set),
transitions={'planned': 'Modify_Plan', 'failed': 'Decide_Replan_without_Collision'},
autonomy={'planned': Autonomy.Off, 'failed': Autonomy.Full},
remapping={'step_goal': 'step_goal', 'plan_header': 'plan_header'})
# x:139 y:314
OperatableStateMachine.add('Decide_Replan_without_Collision',
OperatorDecisionState(outcomes=['replan', 'fail'], hint='Try replanning without collision avoidance.', suggestion='replan'),
transitions={'replan': 'Replan_without_Collision', 'fail': 'failed'},
autonomy={'replan': Autonomy.Low, 'fail': Autonomy.Full})
# x:319 y:406
OperatableStateMachine.add('Replan_without_Collision',
PlanFootstepsState(mode='drc_step_no_collision'),
transitions={'planned': 'Modify_Plan', 'failed': 'failed'},
autonomy={'planned': Autonomy.Off, 'failed': Autonomy.Full},
remapping={'step_goal': 'step_goal', 'plan_header': 'plan_header'})
# x:1103 y:424, x:130 y:478
_sm_grasp_trigger_4 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'template_id', 'grasp_preference'])
with _sm_grasp_trigger_4:
# x:86 y:72
OperatableStateMachine.add('Go_to_Pregrasp',
_sm_go_to_pregrasp_2,
transitions={'finished': 'Open_Fingers', 'failed': 'Grasp_Manually'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id', 'pregrasp_pose': 'pregrasp_pose'})
# x:789 y:172
OperatableStateMachine.add('Perform_Grasp',
_sm_perform_grasp_1,
transitions={'finished': 'finished', 'failed': 'Grasp_Manually', 'next': 'Close_Fingers'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'next': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id', 'pregrasp_pose': 'pregrasp_pose'})
# x:332 y:178
OperatableStateMachine.add('Open_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=0.0),
transitions={'done': 'Go_to_Grasp', 'failed': 'Grasp_Manually'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.High},
remapping={'hand_side': 'hand_side'})
# x:332 y:28
OperatableStateMachine.add('Close_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=1.0),
transitions={'done': 'Go_to_Pregrasp', 'failed': 'Grasp_Manually'},
autonomy={'done': Autonomy.High, 'failed': Autonomy.High},
remapping={'hand_side': 'hand_side'})
# x:324 y:428
OperatableStateMachine.add('Grasp_Manually',
OperatorDecisionState(outcomes=["fingers_closed", "abort"], hint="Grasp the object manually, continue when fingers are closed.", suggestion=None),
transitions={'fingers_closed': 'finished', 'abort': 'failed'},
autonomy={'fingers_closed': Autonomy.Full, 'abort': Autonomy.Full})
# x:543 y:172
OperatableStateMachine.add('Go_to_Grasp',
_sm_go_to_grasp_0,
transitions={'finished': 'Perform_Grasp', 'failed': 'Grasp_Manually', 'again': 'Close_Fingers'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'again': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'grasp_preference': 'grasp_preference', 'template_id': 'template_id'})
# x:30 y:478, x:130 y:478, x:230 y:478
_sm_walk_to_template_5 = OperatableStateMachine(outcomes=['finished', 'failed', 'aborted'], input_keys=['template_id', 'grasp_preference', 'hand_side'])
with _sm_walk_to_template_5:
# x:265 y:28
OperatableStateMachine.add('Decide_Request_Template',
DecisionState(outcomes=['request', 'continue'], conditions=lambda x: 'continue' if x is not None else 'request'),
transitions={'request': 'Request_Template', 'continue': 'Get_Stand_Pose'},
autonomy={'request': Autonomy.Low, 'continue': Autonomy.Off},
remapping={'input_value': 'template_id'})
# x:1033 y:106
OperatableStateMachine.add('Increment_Stand_Pose',
CalculationState(calculation=lambda x: x + 1),
transitions={'done': 'Inform_About_Retry'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'grasp_preference', 'output_value': 'grasp_preference'})
# x:1162 y:29
OperatableStateMachine.add('Inform_About_Retry',
LogState(text="Stand pose choice failed. Trying again.", severity=Logger.REPORT_INFO),
transitions={'done': 'Get_Stand_Pose'},
autonomy={'done': Autonomy.Off})
# x:567 y:118
OperatableStateMachine.add('Inform_About_Fail',
LogState(text="Unable to find a suitable stand pose for the template.", severity=Logger.REPORT_WARN),
transitions={'done': 'Decide_Repeat_Request'},
autonomy={'done': Autonomy.Off})
# x:554 y:274
OperatableStateMachine.add('Get_Goal_from_Operator',
InputState(request=InputState.WAYPOINT_GOAL_POSE, message="Provide a waypoint in front of the template."),
transitions={'received': 'Walk_To_Waypoint', 'aborted': 'aborted', 'no_connection': 'failed', 'data_error': 'failed'},
autonomy={'received': Autonomy.Low, 'aborted': Autonomy.Full, 'no_connection': Autonomy.Full, 'data_error': Autonomy.Full},
remapping={'data': 'plan_header'})
# x:279 y:110
OperatableStateMachine.add('Request_Template',
InputState(request=InputState.SELECTED_OBJECT_ID, message="Specify target template"),
transitions={'received': 'Get_Stand_Pose', 'aborted': 'aborted', 'no_connection': 'failed', 'data_error': 'failed'},
autonomy={'received': Autonomy.Off, 'aborted': Autonomy.Full, 'no_connection': Autonomy.Full, 'data_error': Autonomy.Full},
remapping={'data': 'template_id'})
# x:825 y:461
OperatableStateMachine.add('Wait_For_Stand',
CheckCurrentControlModeState(target_mode=CheckCurrentControlModeState.STAND, wait=True),
transitions={'correct': 'finished', 'incorrect': 'failed'},
autonomy={'correct': Autonomy.Low, 'incorrect': Autonomy.Full},
remapping={'control_mode': 'control_mode'})
# x:1143 y:277
OperatableStateMachine.add('Decide_Stand_Preference',
OperatorDecisionState(outcomes=["same", "next", "abort"], hint="Same or next stand pose?", suggestion="next"),
transitions={'same': 'Inform_About_Retry', 'next': 'Increment_Stand_Pose', 'abort': 'aborted'},
autonomy={'same': Autonomy.Full, 'next': Autonomy.Full, 'abort': Autonomy.Full})
# x:842 y:152
OperatableStateMachine.add('Planning_Pipeline',
_sm_planning_pipeline_3,
transitions={'finished': 'Walk_To_Waypoint', 'failed': 'Decide_Stand_Preference', 'aborted': 'Decide_Stand_Preference'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'aborted': Autonomy.Inherit},
remapping={'stand_pose': 'stand_pose', 'plan_header': 'plan_header'})
# x:833 y:276
OperatableStateMachine.add('Walk_To_Waypoint',
ExecuteStepPlanActionState(),
transitions={'finished': 'Wait_For_Stand', 'failed': 'Decide_Stand_Preference'},
autonomy={'finished': Autonomy.Off, 'failed': Autonomy.Full},
remapping={'plan_header': 'plan_header'})
# x:554 y:195
OperatableStateMachine.add('Decide_Repeat_Request',
OperatorDecisionState(outcomes=['repeat_id', 'request_goal'], hint=None, suggestion=None),
transitions={'repeat_id': 'Request_Template', 'request_goal': 'Get_Goal_from_Operator'},
autonomy={'repeat_id': Autonomy.Low, 'request_goal': Autonomy.High})
# x:547 y:27
OperatableStateMachine.add('Get_Stand_Pose',
GetTemplateStandPoseState(),
transitions={'done': 'Planning_Pipeline', 'failed': 'Inform_About_Fail', 'not_available': 'Inform_About_Fail'},
autonomy={'done': Autonomy.Off, 'failed': Autonomy.Low, 'not_available': Autonomy.High},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'preference': 'grasp_preference', 'stand_pose': 'stand_pose'})
# x:133 y:340, x:383 y:140
_sm_perform_step_back_6 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['step_back_distance'])
with _sm_perform_step_back_6:
# x:78 y:78
OperatableStateMachine.add('Plan_Steps_Back',
FootstepPlanRelativeState(direction=FootstepPlanRelativeState.DIRECTION_BACKWARD),
transitions={'planned': 'Do_Steps_Back', 'failed': 'failed'},
autonomy={'planned': Autonomy.High, 'failed': Autonomy.Full},
remapping={'distance': 'step_back_distance', 'plan_header': 'plan_header'})
# x:74 y:228
OperatableStateMachine.add('Do_Steps_Back',
ExecuteStepPlanActionState(),
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'plan_header': 'plan_header'})
# x:133 y:340, x:333 y:90
_sm_release_trigger_7 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['hand_side', 'none'])
with _sm_release_trigger_7:
# x:82 y:78
OperatableStateMachine.add('Open_Fingers',
FingerConfigurationState(hand_type=self.hand_type, configuration=0),
transitions={'done': 'Take_Hand_Back', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'hand_side': 'hand_side'})
# x:96 y:178
OperatableStateMachine.add('Take_Hand_Back',
LogState(text="Take hand slightly back", severity=Logger.REPORT_HINT),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Full})
# x:733 y:240, x:33 y:289
_sm_pull_trigger_8 = OperatableStateMachine(outcomes=['finished', 'failed'], input_keys=['template_id', 'hand_side', 'none'])
with _sm_pull_trigger_8:
# x:202 y:28
OperatableStateMachine.add('Ready_To_Pull',
LogState(text="Ready to pull the trigger down", severity=Logger.REPORT_INFO),
transitions={'done': 'Get_Pull_Affordance'},
autonomy={'done': Autonomy.High})
# x:192 y:328
OperatableStateMachine.add('Plan_Pull',
PlanAffordanceState(vel_scaling=0.1, planner_id="RRTConnectkConfigDefault"),
transitions={'done': 'Execute_Pull', 'incomplete': 'Execute_Pull', 'failed': 'failed'},
autonomy={'done': Autonomy.High, 'incomplete': Autonomy.High, 'failed': Autonomy.Full},
remapping={'affordance': 'affordance', 'hand': 'hand_side', 'reference_point': 'none', 'joint_trajectory': 'joint_trajectory', 'plan_fraction': 'plan_fraction'})
# x:176 y:428
OperatableStateMachine.add('Execute_Pull',
ExecuteTrajectoryMsgState(controller=affordance_controller),
transitions={'done': 'Decide_Repeat_Pull', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'joint_trajectory': 'joint_trajectory'})
# x:183 y:228
OperatableStateMachine.add('Scale_Pull_Affordance',
CalculationState(calculation=lambda x: x),
transitions={'done': 'Plan_Pull'},
autonomy={'done': Autonomy.Off},
remapping={'input_value': 'affordance', 'output_value': 'affordance'})
# x:173 y:128
OperatableStateMachine.add('Get_Pull_Affordance',
GetTemplateAffordanceState(identifier=pull_affordance),
transitions={'done': 'Scale_Pull_Affordance', 'failed': 'failed', 'not_available': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full, 'not_available': Autonomy.Full},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'affordance': 'affordance'})
# x:437 y:228
OperatableStateMachine.add('Decide_Repeat_Pull',
OperatorDecisionState(outcomes=['done', 'repeat'], hint="Pull further?", suggestion='done'),
transitions={'done': 'finished', 'repeat': 'Get_Pull_Affordance'},
autonomy={'done': Autonomy.High, 'repeat': Autonomy.Full})
with _state_machine:
# x:73 y:78
OperatableStateMachine.add('Request_Trigger_Template',
InputState(request=InputState.SELECTED_OBJECT_ID, message="Place trigger template"),
transitions={'received': 'Decide_Walking', 'aborted': 'failed', 'no_connection': 'failed', 'data_error': 'failed'},
autonomy={'received': Autonomy.Low, 'aborted': Autonomy.Full, 'no_connection': Autonomy.Full, 'data_error': Autonomy.Full},
remapping={'data': 'template_id'})
# x:337 y:78
OperatableStateMachine.add('Decide_Walking',
OperatorDecisionState(outcomes=["walk", "stand"], hint="Walk to template?", suggestion="walk"),
transitions={'walk': 'Walk_To_Template', 'stand': 'Set_Manipulate'},
autonomy={'walk': Autonomy.High, 'stand': Autonomy.Full})
# x:844 y:322
OperatableStateMachine.add('Pull_Trigger',
_sm_pull_trigger_8,
transitions={'finished': 'Release_Trigger', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'template_id': 'template_id', 'hand_side': 'hand_side', 'none': 'none'})
# x:836 y:422
OperatableStateMachine.add('Release_Trigger',
_sm_release_trigger_7,
transitions={'finished': 'Warn_Stand', 'failed': 'Warn_Stand'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'none': 'none'})
# x:826 y:678
OperatableStateMachine.add('Go_To_Stand_Pose',
MoveitPredefinedPoseState(target_pose=MoveitPredefinedPoseState.STAND_POSE, vel_scaling=0.1, ignore_collisions=False, link_paddings={}, is_cartesian=False),
transitions={'done': 'Set_Stand', 'failed': 'failed'},
autonomy={'done': Autonomy.Low, 'failed': Autonomy.Full},
remapping={'side': 'none'})
# x:566 y:78
OperatableStateMachine.add('Set_Manipulate',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.MANIPULATE),
transitions={'changed': 'Set_Template_Frame', 'failed': 'failed'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.Full})
# x:858 y:578
OperatableStateMachine.add('Warn_Stand',
LogState(text="Will go to stand now", severity=Logger.REPORT_INFO),
transitions={'done': 'Go_To_Stand_Pose'},
autonomy={'done': Autonomy.High})
# x:566 y:678
OperatableStateMachine.add('Set_Stand',
ChangeControlModeActionState(target_mode=ChangeControlModeActionState.STAND),
transitions={'changed': 'Decide_Step_Back', 'failed': 'failed'},
autonomy={'changed': Autonomy.Low, 'failed': Autonomy.Full})
# x:337 y:678
OperatableStateMachine.add('Decide_Step_Back',
OperatorDecisionState(outcomes=["walk", "stand"], hint="Step back?", suggestion="walk"),
transitions={'walk': 'Perform_Step_Back', 'stand': 'finished'},
autonomy={'walk': Autonomy.High, 'stand': Autonomy.Full})
# x:77 y:672
OperatableStateMachine.add('Perform_Step_Back',
_sm_perform_step_back_6,
transitions={'finished': 'finished', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'step_back_distance': 'step_back_distance'})
# x:330 y:172
OperatableStateMachine.add('Walk_To_Template',
_sm_walk_to_template_5,
transitions={'finished': 'Set_Manipulate', 'failed': 'failed', 'aborted': 'Set_Manipulate'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit, 'aborted': Autonomy.Inherit},
remapping={'template_id': 'template_id', 'grasp_preference': 'grasp_preference', 'hand_side': 'hand_side'})
# x:841 y:222
OperatableStateMachine.add('Grasp_Trigger',
_sm_grasp_trigger_4,
transitions={'finished': 'Pull_Trigger', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'hand_side': 'hand_side', 'template_id': 'template_id', 'grasp_preference': 'grasp_preference'})
# x:846 y:128
OperatableStateMachine.add('Look_At_Trigger',
LookAtTargetState(),
transitions={'done': 'Grasp_Trigger'},
autonomy={'done': Autonomy.Off},
remapping={'frame': 'template_frame'})
# x:837 y:28
OperatableStateMachine.add('Set_Template_Frame',
CalculationState(calculation=lambda x: "template_tf_%d" % x),
transitions={'done': 'Look_At_Trigger'},
autonomy={'done': Autonomy.Low},
remapping={'input_value': 'template_id', 'output_value': 'template_frame'})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
def scale_pull_affordance(self, affordance):
affordance.displacement | |
<filename>workflows/cloudify_system_workflows/snapshots/snapshot_restore.py<gh_stars>0
import re
import os
import json
import time
import uuid
import base64
import shutil
import zipfile
import tempfile
import threading
import subprocess
from contextlib import contextmanager
from cloudify.workflows import ctx
from cloudify.manager import get_rest_client
from cloudify.exceptions import NonRecoverableError
from cloudify.constants import (
NEW_TOKEN_FILE_NAME,
FILE_SERVER_SNAPSHOTS_FOLDER,
)
from cloudify.snapshots import SNAPSHOT_RESTORE_FLAG_FILE
from cloudify.utils import ManagerVersion, get_local_rest_certificate
from cloudify_rest_client.executions import Execution
from . import networks, utils
from cloudify_system_workflows.deployment_environment import \
_create_deployment_workdir
from cloudify_system_workflows.snapshots import npm
from .agents import Agents
from .postgres import Postgres
from .credentials import restore as restore_credentials
from .constants import (
ADMIN_DUMP_FILE,
ADMIN_TOKEN_SCRIPT,
ALLOW_DB_CLIENT_CERTS_SCRIPT,
ARCHIVE_CERT_DIR,
CERT_DIR,
DENY_DB_CLIENT_CERTS_SCRIPT,
HASH_SALT_FILENAME,
INTERNAL_CA_CERT_FILENAME,
INTERNAL_CA_KEY_FILENAME,
INTERNAL_CERT_FILENAME,
INTERNAL_KEY_FILENAME,
INTERNAL_P12_FILENAME,
METADATA_FILENAME,
M_SCHEMA_REVISION,
M_STAGE_SCHEMA_REVISION,
M_COMPOSER_SCHEMA_REVISION,
M_VERSION,
MANAGER_PYTHON,
V_4_0_0,
V_4_2_0,
V_4_3_0,
V_4_4_0,
V_4_6_0,
V_5_0_5,
V_5_3_0,
V_6_0_0,
SECURITY_FILE_LOCATION,
SECURITY_FILENAME,
REST_AUTHORIZATION_CONFIG_PATH,
STAGE_USER,
STAGE_APP,
COMPOSER_USER,
COMPOSER_APP
)
from .utils import is_later_than_now, parse_datetime_string
class SnapshotRestore(object):
SCHEMA_REVISION_4_0 = '333998bc1627'
def __init__(self,
config,
snapshot_id,
force,
timeout,
premium_enabled,
user_is_bootstrap_admin,
restore_certificates,
no_reboot):
self._config = utils.DictToAttributes(config)
self._snapshot_id = snapshot_id
self._force = force
self._timeout = timeout
self._restore_certificates = restore_certificates
self._no_reboot = no_reboot
self._premium_enabled = premium_enabled
self._user_is_bootstrap_admin = user_is_bootstrap_admin
self._post_restore_commands = []
self._tempdir = None
self._snapshot_version = None
self._client = get_rest_client()
self._manager_version = utils.get_manager_version(self._client)
self._encryption_key = None
self._service_management = None
self._semaphore = threading.Semaphore(
self._config.snapshot_restore_threads)
def restore(self):
self._mark_manager_restoring()
self._tempdir = tempfile.mkdtemp('-snapshot-data')
snapshot_path = self._get_snapshot_path()
ctx.logger.debug('Going to restore snapshot, '
'snapshot_path: {0}'.format(snapshot_path))
try:
metadata = self._extract_snapshot_archive(snapshot_path)
self._snapshot_version = ManagerVersion(metadata[M_VERSION])
schema_revision = metadata.get(
M_SCHEMA_REVISION,
self.SCHEMA_REVISION_4_0,
)
stage_revision = metadata.get(M_STAGE_SCHEMA_REVISION) or ''
if stage_revision and self._premium_enabled:
stage_revision = re.sub(r".*\n", '', stage_revision)
composer_revision = metadata.get(M_COMPOSER_SCHEMA_REVISION) or ''
if composer_revision == '20170601133017-4_1-init.js':
# Old composer metadata always incorrectly put the first
# migration not the last one. As we don't support anything
# earlier than the last migration before 5.3, this will always
# be the right answer
composer_revision = '20171229105614-4_3-blueprint-repo.js'
if composer_revision and self._premium_enabled:
composer_revision = re.sub(r".*\n", '', composer_revision)
self._validate_snapshot()
with Postgres(self._config) as postgres:
utils.sudo(ALLOW_DB_CLIENT_CERTS_SCRIPT)
self._restore_files_to_manager()
utils.sudo(DENY_DB_CLIENT_CERTS_SCRIPT)
self._service_management = \
json.loads(postgres.get_service_management())
with self._pause_services():
self._restore_db(
postgres,
schema_revision,
stage_revision,
composer_revision
)
self._restore_hash_salt()
self._encrypt_secrets(postgres)
self._encrypt_rabbitmq_passwords(postgres)
self._possibly_update_encryption_key()
self._generate_new_rest_token()
self._restart_rest_service()
self._restart_stage_service()
self._restore_credentials(postgres)
self._restore_amqp_vhosts_and_users()
self._restore_agents()
self._restore_deployment_envs()
self._restore_scheduled_executions()
self._restore_inter_deployment_dependencies()
self._update_roles_and_permissions()
self._update_deployment_statuses()
self._update_node_instance_indices()
self._set_default_user_profile_flags()
self._create_system_filters()
postgres.refresh_roles()
if self._restore_certificates:
self._restore_certificate()
finally:
self._trigger_post_restore_commands()
ctx.logger.debug('Removing temp dir: {0}'.format(self._tempdir))
shutil.rmtree(self._tempdir)
@contextmanager
def _pause_services(self):
"""Stop db-using services for the duration of this context"""
# While the snapshot is being restored, the database is downgraded
# and upgraded back, and these services must not attempt to use it
to_pause = ['cloudify-amqp-postgres', 'cloudify-execution-scheduler']
for service in to_pause:
utils.run_service(self._service_management, 'stop', service)
try:
yield
finally:
for service in to_pause:
utils.run_service(self._service_management, 'start', service)
def _generate_new_rest_token(self):
"""
`snapshot restore` is triggered with a REST call that is authenticated
using security keys that are located in opt/manager/rest-security.conf.
During restore the rest-security.conf is changed, therefore any
restart of the REST service will result in authentication failure
(security config is loaded when the REST service starts).
Gunicorn restarts REST workers every 1000 calls.
Our solution:
1. At the earliest stage possible create a new valid REST token
using the new rest-security.conf file
2. Restart REST service
3. Continue with restore snapshot
(CY-767)
"""
self._generate_new_token()
new_token = self._get_token_from_file()
# Replace old token with new one in the workflow context, and create
# new REST client
ctx._context['rest_token'] = new_token
self._client = get_rest_client()
def _restart_rest_service(self):
utils.run_service(
self._service_management,
'restart',
'cloudify-restservice'
)
self._wait_for_rest_to_restart()
def _restart_stage_service(self):
utils.run_service(
self._service_management,
'restart',
'cloudify-stage'
)
def _wait_for_rest_to_restart(self, timeout=60):
deadline = time.time() + timeout
while True:
time.sleep(0.5)
if time.time() > deadline:
raise NonRecoverableError(
'Failed to restart cloudify-restservice.')
try:
self._client.manager.get_status()
break
except Exception:
pass
def _update_roles_and_permissions(self):
ctx.logger.info('Updating roles and permissions')
if os.path.exists(REST_AUTHORIZATION_CONFIG_PATH):
utils.run(['/opt/manager/scripts/load_permissions.py'])
def _create_system_filters(self):
ctx.logger.info('Creating system filters')
if self._snapshot_version < V_6_0_0:
utils.run(['/opt/manager/scripts/create_system_filters.py'])
def _update_deployment_statuses(self):
ctx.logger.info('Updating deployment statuses.')
if self._snapshot_version < V_5_3_0:
dir_path = os.path.dirname(os.path.realpath(__file__))
scrip_path = os.path.join(
dir_path,
'populate_deployment_statuses.py'
)
command = [MANAGER_PYTHON, scrip_path, self._tempdir]
utils.run(command)
def _update_node_instance_indices(self):
ctx.logger.info('Updating node indices.')
if self._snapshot_version < V_5_0_5:
with Postgres(self._config) as postgres:
postgres.run_query(
'update node_instances ni set index=u.rank '
'from (select node_instances._storage_id, rank() '
'over (partition by node_instances._node_fk '
'order by node_instances._storage_id) '
'from node_instances) u '
'where ni._storage_id = u._storage_id;'
)
def _set_default_user_profile_flags(self):
if self._snapshot_version < V_5_3_0:
ctx.logger.info(
'Disabling `getting started` for all existing users.')
users = self._client.users.list()
for user in users:
self._client.users.set_show_getting_started(user.username,
False)
def _generate_new_token(self):
dir_path = os.path.dirname(os.path.realpath(__file__))
script_path = os.path.join(dir_path, 'generate_new_rest_token.py')
command = [MANAGER_PYTHON, script_path, self._tempdir]
utils.run(command)
def _get_token_from_file(self):
"""
The new token in saved at the snapshot`s temp dir (which is passed as
an argument to the 'generate_new_rest_token.py' script).
"""
new_token_path = os.path.join(self._tempdir, NEW_TOKEN_FILE_NAME)
with open(new_token_path, 'r') as f:
new_token = f.read()
return new_token
def _possibly_update_encryption_key(self):
with open(SECURITY_FILE_LOCATION) as security_conf_file:
rest_security_conf = json.load(security_conf_file)
enc_key = base64.urlsafe_b64decode(str(
rest_security_conf['encryption_key'],
))
if len(enc_key) == 32:
ctx.logger.info(
'Updating encryption key for AES256'
)
subprocess.check_call([
'/opt/cloudify/encryption/update-encryption-key', '--commit'
])
def _restore_deployment_envs(self):
deps = utils.get_dep_contexts(self._snapshot_version)
for tenant, deployments in deps:
ctx.logger.info('Creating deployment dirs for %s', tenant)
for deployment_id in deployments:
_create_deployment_workdir(
deployment_id=deployment_id,
tenant=tenant,
logger=ctx.logger,
)
ctx.logger.info('Successfully created deployment dirs.')
def _restore_inter_deployment_dependencies(self):
# managers older than 4.6.0 didn't have the support get_capability.
# manager newer than 5.0.5 have the inter deployment dependencies as
# part of the database dump
if (self._snapshot_version < V_4_6_0 or
self._snapshot_version > V_5_0_5):
return
ctx.logger.info('Restoring inter deployment dependencies')
update_service_composition = (self._snapshot_version == V_5_0_5)
script_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'restore_idd_script.py'
)
cmd = ['/opt/manager/env/bin/python',
script_path,
ctx.tenant_name,
str(update_service_composition)]
restore_idd_script = subprocess.run(cmd)
if restore_idd_script.returncode:
restore_idd_log_path = 'mgmtworker/logs/restore_idd.log'
raise NonRecoverableError('Failed to restore snapshot, could not '
'create the inter deployment '
'dependencies. See log {0} for more '
'details'.format(restore_idd_log_path))
ctx.logger.info('Successfully restored inter deployment dependencies.')
def _restore_amqp_vhosts_and_users(self):
subprocess.check_call(
[MANAGER_PYTHON, self._get_script_path('restore_amqp.py')]
)
def _restore_certificate(self):
archive_cert_dir = os.path.join(self._tempdir, ARCHIVE_CERT_DIR)
existing_cert_dir = os.path.dirname(get_local_rest_certificate())
restored_cert_dir = '{0}_from_snapshot_{1}'.format(existing_cert_dir,
self._snapshot_id)
# Put the certificates where we need them
utils.copy_snapshot_path(archive_cert_dir, restored_cert_dir)
certs = [
INTERNAL_CA_CERT_FILENAME,
INTERNAL_CA_KEY_FILENAME,
INTERNAL_CERT_FILENAME,
INTERNAL_KEY_FILENAME,
INTERNAL_P12_FILENAME,
]
# Restore each cert from the snapshot over the current manager one
for cert in certs:
self._post_restore_commands.append(
'mv -f {source_dir}/{cert} {dest_dir}/{cert}'.format(
dest_dir=existing_cert_dir,
source_dir=restored_cert_dir,
cert=cert,
)
)
if not os.path.exists(
os.path.join(archive_cert_dir, INTERNAL_CA_CERT_FILENAME)):
for source, target in \
[(INTERNAL_CERT_FILENAME, INTERNAL_CA_CERT_FILENAME),
(INTERNAL_KEY_FILENAME, INTERNAL_CA_KEY_FILENAME)]:
source = os.path.join(CERT_DIR, source)
target = os.path.join(CERT_DIR, target)
self._post_restore_commands.append(
'cp {source} {target}'.format(
source=source,
target=target,
)
)
if not self._no_reboot:
self._post_restore_commands.append('sudo shutdown -r now')
def _load_admin_dump(self):
# This should only have been called if the hash salt was found, so
# there should be no case where this gets called but the file does not
# exist.
admin_dump_file_path = os.path.join(self._tempdir, ADMIN_DUMP_FILE)
with open(admin_dump_file_path) as admin_dump_handle:
admin_account = json.load(admin_dump_handle)
return admin_account
def _restore_admin_user(self):
admin_account = self._load_admin_dump()
with Postgres(self._config) as postgres:
psql_command = ' '.join(postgres.get_psql_command())
psql_command += ' -c '
update_prefix = '"UPDATE users SET '
# Hardcoded uid as we only allow running restore on a clean manager
# at the moment, so admin must be the first user (ID=0)
update_suffix = ' WHERE users.id=0"'
# Discard the id, we don't need it
admin_account.pop('id')
updates = []
for column, value in admin_account.items():
if value:
updates.append("{column}='{value}'".format(
column=column,
value=value,
))
updates = ','.join(updates)
updates = updates.replace('$', '\\$')
command = psql_command + update_prefix + updates + update_suffix
# We have to do this after the restore process or it'll break the
# workflow execution updating and thus cause the workflow to fail
self._post_restore_commands.append(command)
# recreate the admin REST token file
self._post_restore_commands.append(
'sudo {0}'.format(ADMIN_TOKEN_SCRIPT))
def _get_admin_user_token(self):
return self._load_admin_dump()['api_token_key']
def _trigger_post_restore_commands(self):
# The last thing the workflow does is delete the tempdir.
command = 'while [[ -d {tempdir} ]]; do sleep 0.5; done; '.format(
tempdir=self._tempdir,
)
# Give a short delay afterwards for the workflow to be marked as
# completed, in case of any delays that might be upset by certs being
# messed around with while running.
command += 'sleep 3; '
self._post_restore_commands.append(
'rm -f {0}'.format(SNAPSHOT_RESTORE_FLAG_FILE)
)
command += '; '.join(self._post_restore_commands)
ctx.logger.info(
'After restore, the following commands will run: {cmds}'.format(
cmds=command,
)
)
subprocess.Popen(command, shell=True)
def _validate_snapshot(self):
validator = SnapshotRestoreValidator(
self._snapshot_version,
self._premium_enabled,
self._user_is_bootstrap_admin,
self._client,
self._force,
self._tempdir
)
validator.validate()
def _restore_files_to_manager(self):
ctx.logger.info('Restoring files from the archive to the manager')
utils.copy_files_between_manager_and_snapshot(
self._tempdir,
self._config,
to_archive=False,
tenant_name=None,
)
# Only restore stage files to their correct location
# if this snapshot version is the same as the manager version
# or from 4.3 onwards we support stage upgrade
if self._snapshot_version == self._manager_version or \
self._snapshot_version >= V_4_3_0:
stage_restore_override = True
else:
stage_restore_override = | |
#!/usr/bin/env python
import random
import os_ident
import uncrc32
try: import pcap as pcapy
except: import pcapy
from impacket import ImpactPacket
from impacket import ImpactDecoder
from impacket.ImpactPacket import TCPOption
from impacket.examples import logger
#defaults
MAC = "01:02:03:04:05:06"
IP = "192.168.67.254"
IFACE = "eth0"
OPEN_TCP_PORTS = [80, 443]
OPEN_UDP_PORTS = [111]
UDP_CMD_PORT = 12345
nmapOSDB = '/usr/share/nmap/nmap-os-db'
# Fingerprint = 'Adtran NetVanta 3200 router' # CD=Z TOSI=Z <----------- NMAP detects it as Linux!!!
# Fingerprint = 'ADIC Scalar 1000 tape library remote management unit' # DFI=S
# Fingerprint = 'Siemens Gigaset SX541 or USRobotics USR9111 wireless DSL modem' # DFI=O U1(DF=N IPL=38)
# Fingerprint = 'Apple Mac OS X 10.5.6 (Leopard) (Darwin 9.6.0)' # DFI=Y SI=S U1(DF=Y)
Fingerprint = 'Sun Solaris 10 (SPARC)'
# Fingerprint = 'Sun Solaris 9 (x86)'
# Fingerprint = '3Com OfficeConnect 3CRWER100-75 wireless broadband router' # TI=Z DFI=N !SS TI=Z II=I
# Fingerprint = 'WatchGuard Firebox X5w firewall/WAP' # TI=RD
# no TI=Hex
# Fingerprint = 'FreeBSD 6.0-STABLE - 6.2-RELEASE' # TI=RI
# Fingerprint = 'Microsoft Windows 98 SE' # TI=BI ----> BROKEN! nmap shows no SEQ() output
# Fingerprint = 'Microsoft Windows NT 4.0 SP5 - SP6' # TI=BI TOSI=S SS=S
# Fingerprint = 'Microsoft Windows Vista Business' # TI=I U1(IPL=164)
# Fingerprint = 'FreeBSD 6.1-RELEASE' # no TI (TI=O)
# Fingerprint = '2Wire 1701HG wireless ADSL modem' # IE(R=N)
# Fingerprint = 'Cisco Catalyst 1912 switch' # TOSI=O SS=S
O_ETH = 0
O_IP = 1
O_ARP = 1
O_UDP = 2
O_TCP = 2
O_ICMP = 2
O_UDP_DATA = 3
O_ICMP_DATA = 3
def string2tuple(string):
if string.find(':') >= 0:
return [int(x) for x in string.split(':')]
else:
return [int(x) for x in string.split('.')]
class Responder:
templateClass = None
signatureName = None
def __init__(self, machine):
self.machine = machine
print "Initializing %s" % self.__class__.__name__
self.initTemplate()
self.initFingerprint()
def initTemplate(self):
if not self.templateClass:
self.template_onion = None
else:
try:
probe = self.templateClass(0, ['0.0.0.0',self.getIP()],[0, 0])
except:
probe = self.templateClass(0, ['0.0.0.0',self.getIP()])
self.template_onion = [probe.get_packet()]
try:
while 1: self.template_onion.append(self.template_onion[-1].child())
except: pass
# print "Template: %s" % self.template_onion[O_ETH]
# print "Options: %r" % self.template_onion[O_TCP].get_padded_options()
# print "Flags: 0x%04x" % self.template_onion[O_TCP].get_th_flags()
def initFingerprint(self):
if not self.signatureName:
self.fingerprint = None
else:
self.fingerprint = self.machine.fingerprint.get_tests()[self.signatureName].copy()
def isMine(self, in_onion):
return False
def buildAnswer(self, in_onion):
return None
def sendAnswer(self, out_onion):
self.machine.sendPacket(out_onion)
def process(self, in_onion):
if not self.isMine(in_onion): return False
print "Got packet for %s" % self.__class__.__name__
out_onion = self.buildAnswer(in_onion)
if out_onion: self.sendAnswer(out_onion)
return True
def getIP(self):
return self.machine.ipAddress
# Generic Responders (does the word Responder exist?)
class ARPResponder(Responder):
def isMine(self, in_onion):
if len(in_onion) < 2: return False
if in_onion[O_ARP].ethertype != ImpactPacket.ARP.ethertype:
return False
return (
in_onion[O_ARP].get_ar_op() == 1 and # ARP REQUEST
in_onion[O_ARP].get_ar_tpa() == string2tuple(self.machine.ipAddress))
def buildAnswer(self, in_onion):
eth = ImpactPacket.Ethernet()
arp = ImpactPacket.ARP()
eth.contains(arp)
arp.set_ar_hrd(1) # Hardward type Ethernet
arp.set_ar_pro(0x800) # IP
arp.set_ar_op(2) # REPLY
arp.set_ar_hln(6)
arp.set_ar_pln(4)
arp.set_ar_sha(string2tuple(self.machine.macAddress))
arp.set_ar_spa(string2tuple(self.machine.ipAddress))
arp.set_ar_tha(in_onion[O_ARP].get_ar_sha())
arp.set_ar_tpa(in_onion[O_ARP].get_ar_spa())
eth.set_ether_shost(arp.get_ar_sha())
eth.set_ether_dhost(arp.get_ar_tha())
return [eth, arp]
class IPResponder(Responder):
def buildAnswer(self, in_onion):
eth = ImpactPacket.Ethernet()
ip = ImpactPacket.IP()
eth.contains(ip)
eth.set_ether_shost(in_onion[O_ETH].get_ether_dhost())
eth.set_ether_dhost(in_onion[O_ETH].get_ether_shost())
ip.set_ip_src(in_onion[O_IP].get_ip_dst())
ip.set_ip_dst(in_onion[O_IP].get_ip_src())
ip.set_ip_id(self.machine.getIPID())
return [eth, ip]
def sameIPFlags(self, in_onion):
if not self.template_onion: return True
return (self.template_onion[O_IP].get_ip_off() & 0xe000) == (in_onion[O_IP].get_ip_off() & 0xe000)
def isMine(self, in_onion):
if len(in_onion) < 2: return False
return (
(in_onion[O_IP].ethertype == ImpactPacket.IP.ethertype) and
(in_onion[O_IP].get_ip_dst() == self.machine.ipAddress) and
self.sameIPFlags(in_onion)
)
def setTTLFromFingerprint(self, out_onion):
f = self.fingerprint
# Test T: Initial TTL = range_low-range_hi, base 16
# Assumption: we are using the minimum in the TTL range
try:
ttl = f['T'].split('-')
ttl = int(ttl[0], 16)
except:
ttl = 0x7f
# Test TG: Initial TTL Guess. It's just a number, we prefer this
try: ttl = int(f['TG'], 16)
except: pass
out_onion[O_IP].set_ip_ttl(ttl)
class ICMPResponder(IPResponder):
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
icmp = ImpactPacket.ICMP()
out_onion[O_IP].contains(icmp)
out_onion.append(icmp)
icmp.set_icmp_id(in_onion[O_ICMP].get_icmp_id())
icmp.set_icmp_seq(in_onion[O_ICMP].get_icmp_seq())
out_onion[O_IP].set_ip_id(self.machine.getIPID_ICMP())
return out_onion
def isMine(self, in_onion):
if not IPResponder.isMine(self, in_onion): return False
if len(in_onion) < 3: return False
return (
(in_onion[O_ICMP].protocol == ImpactPacket.ICMP.protocol) and
self.sameICMPTemplate(in_onion))
def sameICMPTemplate(self, in_onion):
t_ip = self.template_onion[O_IP]
t_icmp = self.template_onion[O_ICMP]
t_icmp_datalen = self.template_onion[O_ICMP_DATA].get_size()
return (
(t_ip.get_ip_tos() == in_onion[O_IP].get_ip_tos()) and
(t_ip.get_ip_df() == in_onion[O_IP].get_ip_df()) and
(t_icmp.get_icmp_type() == in_onion[O_ICMP].get_icmp_type()) and
(t_icmp.get_icmp_code() == in_onion[O_ICMP].get_icmp_code()) and
(t_icmp_datalen == in_onion[O_ICMP_DATA].get_size())
)
class UDPResponder(IPResponder):
def isMine(self, in_onion):
return (
IPResponder.isMine(self, in_onion) and
(len(in_onion) >= 3) and
(in_onion[O_UDP].protocol == ImpactPacket.UDP.protocol)
)
class OpenUDPResponder(UDPResponder):
def isMine(self, in_onion):
return (
UDPResponder.isMine(self, in_onion) and
self.machine.isUDPPortOpen(in_onion[O_UDP].get_uh_dport()))
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
udp = ImpactPacket.UDP()
out_onion[O_IP].contains(udp)
out_onion.append(udp)
udp.set_uh_dport(in_onion[O_UDP].get_uh_sport())
udp.set_uh_sport(in_onion[O_UDP].get_uh_dport())
return out_onion
class ClosedUDPResponder(UDPResponder):
def isMine(self, in_onion):
return (
UDPResponder.isMine(self, in_onion) and
not self.machine.isUDPPortOpen(in_onion[O_UDP].get_uh_dport()))
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
icmp = ImpactPacket.ICMP()
out_onion[O_IP].contains(icmp)
out_onion.append(icmp)
icmp.contains(in_onion[O_IP])
out_onion += in_onion[O_IP:]
icmp.set_icmp_type(icmp.ICMP_UNREACH)
icmp.set_icmp_code(icmp.ICMP_UNREACH_PORT)
return out_onion
class TCPResponder(IPResponder):
def buildAnswer(self, in_onion):
out_onion = IPResponder.buildAnswer(self, in_onion)
tcp = ImpactPacket.TCP()
out_onion[O_IP].contains(tcp)
out_onion.append(tcp)
tcp.set_th_dport(in_onion[O_TCP].get_th_sport())
tcp.set_th_sport(in_onion[O_TCP].get_th_dport())
return out_onion
def sameTCPFlags(self, in_onion):
if not self.template_onion: return True
in_flags = in_onion[O_TCP].get_th_flags() & 0xfff
t_flags = self.template_onion[O_TCP].get_th_flags() & 0xfff
return in_flags == t_flags
def sameTCPOptions(self, in_onion):
if not self.template_onion: return True
in_options = in_onion[O_TCP].get_padded_options()
t_options = self.template_onion[O_TCP].get_padded_options()
return in_options == t_options
def isMine(self, in_onion):
if not IPResponder.isMine(self, in_onion): return False
if len(in_onion) < 3: return False
return (
in_onion[O_TCP].protocol == ImpactPacket.TCP.protocol and
self.sameTCPFlags(in_onion) and
self.sameTCPOptions(in_onion)
)
class OpenTCPResponder(TCPResponder):
def isMine(self, in_onion):
return (
TCPResponder.isMine(self, in_onion) and
in_onion[O_TCP].get_SYN() and
self.machine.isTCPPortOpen(in_onion[O_TCP].get_th_dport()))
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
out_onion[O_TCP].set_SYN()
out_onion[O_TCP].set_ACK()
out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
return out_onion
class ClosedTCPResponder(TCPResponder):
def isMine(self, in_onion):
return (
TCPResponder.isMine(self, in_onion) and
in_onion[O_TCP].get_SYN() and
not self.machine.isTCPPortOpen(in_onion[O_TCP].get_th_dport()))
def buildAnswer(self, in_onion):
out_onion = TCPResponder.buildAnswer(self, in_onion)
out_onion[O_TCP].set_RST()
out_onion[O_TCP].set_ACK()
out_onion[O_TCP].set_th_ack(in_onion[O_TCP].get_th_seq()+1)
out_onion[O_TCP].set_th_seq(self.machine.getTCPSequence())
return out_onion
class UDPCommandResponder(OpenUDPResponder):
# default UDP_CMD_PORT is 12345
# use with:
# echo cmd:exit | nc -u $(IP) $(UDP_CMD_PORT)
# echo cmd:who | nc -u $(IP) $(UDP_CMD_PORT)
def set_port(self, port):
self.port = port
self.machine.openUDPPort(port)
return self
def isMine(self, in_onion):
return (
OpenUDPResponder.isMine(self, in_onion))# and
#in_onion[O_UDP].get_uh_dport() == self.port)
def buildAnswer(self, in_onion):
cmd = in_onion[O_UDP_DATA].get_bytes().tostring()
if cmd[:4] == 'cmd:': cmd = cmd[4:].strip()
print "Got command: %r" % cmd
if cmd == 'exit':
from sys import exit
exit()
out_onion = OpenUDPResponder.buildAnswer(self, in_onion)
out_onion.append(ImpactPacket.Data())
out_onion[O_UDP].contains(out_onion[O_UDP_DATA])
if cmd == 'who':
out_onion[O_UDP_DATA].set_data(self.machine.fingerprint.get_id())
return out_onion
# NMAP2 specific responders
class NMAP2UDPResponder(ClosedUDPResponder):
signatureName = 'U1'
# No real need to filter
# def isMine(self, in_onion):
# return (
# ClosedUDPResponder.isMine(self, inOnion) and
# (in_onion[O_UDP_DATA].get_size() == 300))
def buildAnswer(self, in_onion):
out_onion = ClosedUDPResponder.buildAnswer(self, in_onion)
f = self.fingerprint
# assume R = Y
try:
if (f['R'] == 'N'): return None
except: pass
# Test DF: Don't fragment IP bit set = [YN]
if (f['DF'] == 'Y'): out_onion[O_IP].set_ip_df(True)
else: out_onion[O_IP].set_ip_df(False)
self.setTTLFromFingerprint(out_onion)
# UN. Assume 0
try: un = int(f['UN'],16)
except: un = 0
out_onion[O_ICMP].set_icmp_void(un)
# RIPL. Assume original packet just quoted
try:
ripl = int(f['RIPL'],16) # G generates exception
out_onion[O_ICMP_DATA].set_ip_len(ripl)
except:
pass
# RID. Assume original packet just quoted
try:
rid = int(f['RID'],16) # G generates exception
out_onion[O_ICMP_DATA].set_ip_id(rid)
except:
pass
# RIPCK. Assume original packet just quoted
try: ripck = f['RIPCK']
except: ripck = 'G'
if ripck == 'I': out_onion[O_ICMP_DATA].set_ip_sum(0x6765)
elif ripck == 'Z': out_onion[O_ICMP_DATA].set_ip_sum(0)
elif ripck == 'G': out_onion[O_ICMP_DATA].auto_checksum = 0
# RUCK. Assume original packet just quoted
try:
ruck = int(f['RUCK'], 16)
out_onion[O_ICMP_DATA+1].set_uh_sum(ruck)
except:
out_onion[O_ICMP_DATA+1].auto_checksum = 0
# RUD. Assume original packet just quoted
try: rud = f['RUD']
except: rud = 'G'
if rud == 'I':
udp_data = out_onion[O_ICMP_DATA+2]
udp_data.set_data('G'*udp_data.get_size())
# IPL. Assume all original packet is quoted
# This has to be the last thing we do
# as we are going to render the packet before doing it
try: ipl = int(f['IPL'], 16)
except: ipl = None
if not ipl is None:
data = out_onion[O_ICMP_DATA].get_packet()
out_onion[O_ICMP].contains(ImpactPacket.Data())
ip_and_icmp_len = out_onion[O_IP].get_size()
data = data[:ipl - ip_and_icmp_len]
data += '\x00'*(ipl-len(data)-ip_and_icmp_len)
out_onion = out_onion[:O_ICMP_DATA]
out_onion.append(ImpactPacket.Data(data))
out_onion[O_ICMP].contains(out_onion[O_ICMP_DATA])
return out_onion
class NMAP2ICMPResponder(ICMPResponder):
def buildAnswer(self, in_onion):
f = self.fingerprint
# assume R = Y
try:
if (f['R'] == 'N'): return None
except: pass
out_onion = ICMPResponder.buildAnswer(self, in_onion)
# assume DFI = N
try: dfi = f['DFI']
except: dfi = 'N'
if dfi == 'N': out_onion[O_IP].set_ip_df(False)
elif dfi == 'Y': out_onion[O_IP].set_ip_df(True)
elif dfi == 'S': out_onion[O_IP].set_ip_df(in_onion[O_IP].get_ip_df())
elif dfi == 'O': out_onion[O_IP].set_ip_df(not in_onion[O_IP].get_ip_df())
else: raise Exception('Unsupported IE(DFI=%s)' % dfi)
# assume DLI = S
try: dli = f['DLI']
except: dli = 'S'
if dli == 'S': out_onion[O_ICMP].contains(in_onion[O_ICMP_DATA])
elif dli != 'Z': raise Exception('Unsupported IE(DFI=%s)' % dli)
self.setTTLFromFingerprint(out_onion)
# assume SI = S
try: si = f['SI']
except: si = 'S'
if si == 'S': out_onion[O_ICMP].set_icmp_seq(in_onion[O_ICMP].get_icmp_seq())
elif si == 'Z': | |
Blade 2, Node 7
self.B2N8Alpha = False #Angle of attack at Blade 2, Node 8
self.B2N9Alpha = False #Angle of attack at Blade 2, Node 9
self.B3N1Alpha = False #Angle of attack at Blade 3, Node 1
self.B3N2Alpha = False #Angle of attack at Blade 3, Node 2
self.B3N3Alpha = False #Angle of attack at Blade 3, Node 3
self.B3N4Alpha = False #Angle of attack at Blade 3, Node 4
self.B3N5Alpha = False #Angle of attack at Blade 3, Node 5
self.B3N6Alpha = False #Angle of attack at Blade 3, Node 6
self.B3N7Alpha = False #Angle of attack at Blade 3, Node 7
self.B3N8Alpha = False #Angle of attack at Blade 3, Node 8
self.B3N9Alpha = False #Angle of attack at Blade 3, Node 9
self.B1N1Theta = False #Pitch+Twist angle at Blade 1, Node 1
self.B1N2Theta = False #Pitch+Twist angle at Blade 1, Node 2
self.B1N3Theta = False #Pitch+Twist angle at Blade 1, Node 3
self.B1N4Theta = False #Pitch+Twist angle at Blade 1, Node 4
self.B1N5Theta = False #Pitch+Twist angle at Blade 1, Node 5
self.B1N6Theta = False #Pitch+Twist angle at Blade 1, Node 6
self.B1N7Theta = False #Pitch+Twist angle at Blade 1, Node 7
self.B1N8Theta = False #Pitch+Twist angle at Blade 1, Node 8
self.B1N9Theta = False #Pitch+Twist angle at Blade 1, Node 9
self.B2N1Theta = False #Pitch+Twist angle at Blade 2, Node 1
self.B2N2Theta = False #Pitch+Twist angle at Blade 2, Node 2
self.B2N3Theta = False #Pitch+Twist angle at Blade 2, Node 3
self.B2N4Theta = False #Pitch+Twist angle at Blade 2, Node 4
self.B2N5Theta = False #Pitch+Twist angle at Blade 2, Node 5
self.B2N6Theta = False #Pitch+Twist angle at Blade 2, Node 6
self.B2N7Theta = False #Pitch+Twist angle at Blade 2, Node 7
self.B2N8Theta = False #Pitch+Twist angle at Blade 2, Node 8
self.B2N9Theta = False #Pitch+Twist angle at Blade 2, Node 9
self.B3N1Theta = False #Pitch+Twist angle at Blade 3, Node 1
self.B3N2Theta = False #Pitch+Twist angle at Blade 3, Node 2
self.B3N3Theta = False #Pitch+Twist angle at Blade 3, Node 3
self.B3N4Theta = False #Pitch+Twist angle at Blade 3, Node 4
self.B3N5Theta = False #Pitch+Twist angle at Blade 3, Node 5
self.B3N6Theta = False #Pitch+Twist angle at Blade 3, Node 6
self.B3N7Theta = False #Pitch+Twist angle at Blade 3, Node 7
self.B3N8Theta = False #Pitch+Twist angle at Blade 3, Node 8
self.B3N9Theta = False #Pitch+Twist angle at Blade 3, Node 9
self.B1N1Phi = False #Inflow angle at Blade 1, Node 1
self.B1N2Phi = False #Inflow angle at Blade 1, Node 2
self.B1N3Phi = False #Inflow angle at Blade 1, Node 3
self.B1N4Phi = False #Inflow angle at Blade 1, Node 4
self.B1N5Phi = False #Inflow angle at Blade 1, Node 5
self.B1N6Phi = False #Inflow angle at Blade 1, Node 6
self.B1N7Phi = False #Inflow angle at Blade 1, Node 7
self.B1N8Phi = False #Inflow angle at Blade 1, Node 8
self.B1N9Phi = False #Inflow angle at Blade 1, Node 9
self.B2N1Phi = False #Inflow angle at Blade 2, Node 1
self.B2N2Phi = False #Inflow angle at Blade 2, Node 2
self.B2N3Phi = False #Inflow angle at Blade 2, Node 3
self.B2N4Phi = False #Inflow angle at Blade 2, Node 4
self.B2N5Phi = False #Inflow angle at Blade 2, Node 5
self.B2N6Phi = False #Inflow angle at Blade 2, Node 6
self.B2N7Phi = False #Inflow angle at Blade 2, Node 7
self.B2N8Phi = False #Inflow angle at Blade 2, Node 8
self.B2N9Phi = False #Inflow angle at Blade 2, Node 9
self.B3N1Phi = False #Inflow angle at Blade 3, Node 1
self.B3N2Phi = False #Inflow angle at Blade 3, Node 2
self.B3N3Phi = False #Inflow angle at Blade 3, Node 3
self.B3N4Phi = False #Inflow angle at Blade 3, Node 4
self.B3N5Phi = False #Inflow angle at Blade 3, Node 5
self.B3N6Phi = False #Inflow angle at Blade 3, Node 6
self.B3N7Phi = False #Inflow angle at Blade 3, Node 7
self.B3N8Phi = False #Inflow angle at Blade 3, Node 8
self.B3N9Phi = False #Inflow angle at Blade 3, Node 9
self.B1N1Curve = False #Curvature angle at Blade 1, Node 1
self.B1N2Curve = False #Curvature angle at Blade 1, Node 2
self.B1N3Curve = False #Curvature angle at Blade 1, Node 3
self.B1N4Curve = False #Curvature angle at Blade 1, Node 4
self.B1N5Curve = False #Curvature angle at Blade 1, Node 5
self.B1N6Curve = False #Curvature angle at Blade 1, Node 6
self.B1N7Curve = False #Curvature angle at Blade 1, Node 7
self.B1N8Curve = False #Curvature angle at Blade 1, Node 8
self.B1N9Curve = False #Curvature angle at Blade 1, Node 9
self.B2N1Curve = False #Curvature angle at Blade 2, Node 1
self.B2N2Curve = False #Curvature angle at Blade 2, Node 2
self.B2N3Curve = False #Curvature angle at Blade 2, Node 3
self.B2N4Curve = False #Curvature angle at Blade 2, Node 4
self.B2N5Curve = False #Curvature angle at Blade 2, Node 5
self.B2N6Curve = False #Curvature angle at Blade 2, Node 6
self.B2N7Curve = False #Curvature angle at Blade 2, Node 7
self.B2N8Curve = False #Curvature angle at Blade 2, Node 8
self.B2N9Curve = False #Curvature angle at Blade 2, Node 9
self.B3N1Curve = False #Curvature angle at Blade 3, Node 1
self.B3N2Curve = False #Curvature angle at Blade 3, Node 2
self.B3N3Curve = False #Curvature angle at Blade 3, Node 3
self.B3N4Curve = False #Curvature angle at Blade 3, Node 4
self.B3N5Curve = False #Curvature angle at Blade 3, Node 5
self.B3N6Curve = False #Curvature angle at Blade 3, Node 6
self.B3N7Curve = False #Curvature angle at Blade 3, Node 7
self.B3N8Curve = False #Curvature angle at Blade 3, Node 8
self.B3N9Curve = False #Curvature angle at Blade 3, Node 9
self.B1N1Cl = False #Lift force coefficient at Blade 1, Node 1
self.B1N2Cl = False #Lift force coefficient at Blade 1, Node 2
self.B1N3Cl = False #Lift force coefficient at Blade 1, Node 3
self.B1N4Cl = False #Lift force coefficient at Blade 1, Node 4
self.B1N5Cl = False #Lift force coefficient at Blade 1, Node 5
self.B1N6Cl = False #Lift force coefficient at Blade 1, Node 6
self.B1N7Cl = False #Lift force coefficient at Blade 1, Node 7
self.B1N8Cl = False #Lift force coefficient at Blade 1, Node 8
self.B1N9Cl = False #Lift force coefficient at Blade 1, Node 9
self.B2N1Cl = False #Lift force coefficient at Blade 2, Node 1
self.B2N2Cl = False #Lift force coefficient at Blade 2, Node 2
self.B2N3Cl = False #Lift force coefficient at Blade 2, Node 3
self.B2N4Cl = False #Lift force coefficient at Blade 2, Node 4
self.B2N5Cl = False #Lift force coefficient at Blade 2, Node 5
self.B2N6Cl = False #Lift force coefficient at Blade 2, Node 6
self.B2N7Cl = False #Lift force coefficient at Blade 2, Node 7
self.B2N8Cl = False #Lift force coefficient at Blade 2, Node 8
self.B2N9Cl = False #Lift force coefficient at Blade 2, Node 9
self.B3N1Cl = False #Lift force coefficient at Blade 3, Node 1
self.B3N2Cl = False #Lift force coefficient at Blade 3, Node 2
self.B3N3Cl = False #Lift force coefficient at Blade 3, Node 3
self.B3N4Cl = False #Lift force coefficient at Blade 3, Node 4
self.B3N5Cl = False #Lift force coefficient at Blade 3, Node 5
self.B3N6Cl = False #Lift force coefficient at Blade 3, Node 6
self.B3N7Cl = False #Lift force coefficient at Blade 3, Node 7
self.B3N8Cl = False #Lift force coefficient at Blade 3, Node 8
self.B3N9Cl = False #Lift force coefficient at Blade 3, Node 9
self.B1N1Cd = False #Drag force coefficient at Blade 1, Node 1
self.B1N2Cd = False #Drag force coefficient at Blade 1, Node 2
self.B1N3Cd = False #Drag force coefficient at Blade 1, Node 3
self.B1N4Cd = False #Drag force coefficient at Blade 1, Node 4
self.B1N5Cd = False #Drag force coefficient at Blade 1, Node 5
self.B1N6Cd = False #Drag force coefficient at Blade 1, Node 6
self.B1N7Cd = False #Drag force coefficient at Blade 1, Node 7
self.B1N8Cd = False #Drag force coefficient at Blade 1, Node 8
self.B1N9Cd = False #Drag force coefficient at Blade 1, Node 9
self.B2N1Cd = False #Drag force coefficient at Blade 2, Node 1
self.B2N2Cd = False #Drag force coefficient at Blade 2, Node 2
self.B2N3Cd = False #Drag force coefficient at Blade 2, Node 3
self.B2N4Cd = False #Drag force coefficient at Blade 2, Node 4
self.B2N5Cd = False #Drag force coefficient at Blade 2, Node 5
self.B2N6Cd = False #Drag force coefficient at Blade 2, Node 6
self.B2N7Cd = False #Drag force coefficient at Blade 2, Node 7
self.B2N8Cd = False #Drag force coefficient at Blade | |
# -*- coding: utf-8 -*-
import time
from operator import itemgetter
from os import path, listdir, stat
from bisect import bisect_left
from sys import getsizeof
import shutil
aa_monoisotopic_masses = {'A': 71.037113805,
'C': 103.009184505 + 57.021463735,
'D': 115.026943065,
'E': 129.042593135,
'F': 147.068413945,
'G': 57.021463735,
'H': 137.058911875,
'I': 113.084064015,
'K': 128.09496305,
'L': 113.084064015,
'M': 131.040484645,
'N': 114.04292747,
'P': 97.052763875,
'Q': 128.05857754,
'R': 156.10111105,
'S': 87.032028435,
'T': 101.047678505,
'V': 99.068413945,
'W': 186.07931298,
'Y': 163.063328575,
'X':0,
'B':0,
'J':0,
'O':0,
'U':0,
'Z':0,
'?':0,
'-':0,
'*':0}
HOH = 18.0105647
TMT_monoisotopicmass = 229.162932178
def calcmass_cmm(pepstring, TMT_mod):
if TMT_mod ==0:
return [aa_monoisotopic_masses[aa] for aa in pepstring]
# TMT tags composed of amine-reactive NHS-ester group, a spacer arm and a mass reporter.
# Lysine side-chain NH2 or n-terminus NH2
# if C at n-terminus is carbamidomethylated, TMT can still react with NH2
elif TMT_mod ==1:
TMTmodssum = []
for i, aa in enumerate(pepstring):
# if n-terminus of peptide, add 229.162932
if i==0:
# if n-terminus aa is also a K, add 229.162932 again (lysine side-chain)
if aa == 'K':
TMTmodssum.append(aa_monoisotopic_masses[aa]+TMT_monoisotopicmass+TMT_monoisotopicmass)
else:
TMTmodssum.append(aa_monoisotopic_masses[aa]+TMT_monoisotopicmass)
# if not aa at n-terminus of peptide but a K, add 229.162932
else:
if aa == 'K':
TMTmodssum.append(aa_monoisotopic_masses[aa]+TMT_monoisotopicmass)
else:
TMTmodssum.append(aa_monoisotopic_masses[aa])
return TMTmodssum
def CopyPeptideS1(pepseq, info, M_ranges, M, C, perbin_bytes, output_directory,
TMT_labeling):
if any(aa in pepseq for aa in 'XBJOUZ?-*'):
return True
else:
pepmass = round(sum(calcmass_cmm(pepseq, TMT_labeling))+HOH,4) # compute mass
bin_i = bisect_left(M_ranges, pepmass)
M[bin_i].append(pepseq.replace('L','I')+'\t'+str(pepmass)+'\t'+info+'\n')
C[bin_i] += getsizeof(pepseq) + 8
if C[bin_i]>perbin_bytes:
outfile = open(path.join(output_directory, 'DBPart_'+str(bin_i)+'.txt'),'a')
outfile.write(''.join(M[bin_i]))
outfile.close()
M[bin_i] = []
C[bin_i] = getsizeof([])
return False
def CopyPeptideS2(pepseq, info, M_ranges, M, C, perbin_bytes, output_directory,
min_pep_len, max_pep_len, leftanchor, rightanchor, TMT_labeling):
pepseq = pepseq.replace('L','I')
peplength = len(pepseq)
pepmass = calcmass_cmm(pepseq, TMT_labeling)
# if TMT, remove TMT_monoisotopicmass from N-term for now, add back later
if TMT_labeling==1 and pepmass:
pepmass[0] = pepmass[0] - TMT_monoisotopicmass
for i in xrange(peplength):
#if left anchor, and pepseq[0]=='M' and pre == '-', need to make semi tryptic peps with M removed
if leftanchor == True:
if pepseq[0] == 'M' and info[-2]=='-':
if min_pep_len<=(peplength-1)-i<=max_pep_len:
l_spep = pepseq[1:peplength-i]
if any(aa in l_spep for aa in 'XBJOUZ?-*'):
l_spep= ''
if l_spep:
if TMT_labeling==0:
l_spep_mass = round(sum(pepmass[1:peplength-i])+HOH, 4)
if TMT_labeling==1:
l_spep_mass = round(sum(pepmass[1:peplength-i])+HOH +TMT_monoisotopicmass, 4)
bin_i = bisect_left(M_ranges, l_spep_mass)
if i!=0:
lspep_info = info[:-2]+'-' #preaa should always be -
lspep_info += pepseq[peplength-i]
else: #0 is the M-cleaved peptide, do not need to update info
lspep_info = info[0:]
M[bin_i].append(l_spep+'\t'+str(l_spep_mass)+'\t'+lspep_info+'\n')
C[bin_i] += getsizeof(l_spep) + 8
if C[bin_i]>perbin_bytes:
outfile = open(path.join(output_directory, 'DBPart_'+str(bin_i)+'.txt'),'a')
outfile.write(''.join(M[bin_i]))
outfile.close()
M[bin_i] = []
C[bin_i] = getsizeof([])
if min_pep_len<=peplength-i<=max_pep_len:
l_spep = ''
r_spep = ''
if leftanchor == True:
l_spep = pepseq[0:peplength-i]
if any(aa in l_spep for aa in 'XBJOUZ?-*'):
l_spep= ''
if l_spep:
if TMT_labeling==0:
l_spep_mass = round(sum(pepmass[0:peplength-i])+HOH, 4)
if TMT_labeling==1:
l_spep_mass = round(sum(pepmass[0:peplength-i])+HOH +TMT_monoisotopicmass, 4)
bin_i = bisect_left(M_ranges, l_spep_mass)
if i!=0: #0 is the fully-tryptic peptide, do not need to update info
lspep_info = info[:-1] #preaa stays the same
lspep_info += pepseq[peplength-i]
else:
lspep_info = info[0:]
M[bin_i].append(l_spep+'\t'+str(l_spep_mass)+'\t'+lspep_info+'\n')
C[bin_i] += getsizeof(l_spep) + 8
if C[bin_i]>perbin_bytes:
outfile = open(path.join(output_directory, 'DBPart_'+str(bin_i)+'.txt'),'a')
outfile.write(''.join(M[bin_i]))
outfile.close()
M[bin_i] = []
C[bin_i] = getsizeof([])
if rightanchor == True:
if i>0:
r_spep = pepseq[i:peplength]
if any(aa in r_spep for aa in 'XBJOUZ?-*'):
r_spep = ''
if r_spep:
if TMT_labeling==0:
r_spep_mass = round(sum(pepmass[i:peplength])+HOH, 4)
if TMT_labeling==1:
r_spep_mass = round(sum(pepmass[i:peplength])+HOH +TMT_monoisotopicmass, 4)
bin_i = bisect_left(M_ranges, r_spep_mass)
if pepseq[0] == 'M' and info[-2]=='-' and i==1:
rspep_info = info[:-2]+'-'+info[-1]
else:
rspep_info = info[:-2]+pepseq[i-1]+info[-1] #postaa stays the same
M[bin_i].append(r_spep+'\t'+str(r_spep_mass)+'\t'+rspep_info+'\n')
C[bin_i] += getsizeof(r_spep) + 8
if C[bin_i]>perbin_bytes:
outfile = open(path.join(output_directory, 'DBPart_'+str(bin_i)+'.txt'),'a')
outfile.write(''.join(M[bin_i]))
outfile.close()
M[bin_i] = []
C[bin_i] = getsizeof([])
return
def TrypticDigest(prot, fasta_protcount,
min_pep_len, max_pep_len,
miscleavage, M_ranges,
output_directory, massbin_n,
perbin_bytes, td, reverseseq, stage, TMT_labeling):
list_bytes = getsizeof([])
# C[i] is the current number of entries for ith bin
C = [list_bytes for x in xrange(massbin_n)]
# M is peptide buffer
M = [[] for x in xrange(massbin_n)]
b=0 # previous cutsite
a=0 # cutsite before b
if stage == 'S1':
protnum = -1
f_idx = 0
fasta_ID = fasta_protcount[f_idx][0]
currentmax = fasta_protcount[f_idx][1]
for i in xrange(len(prot)-1):
if prot[i]=='$':
protnum+=1
if protnum>=currentmax:
f_idx+=1
fasta_ID = fasta_protcount[f_idx][0]
currentmax = fasta_protcount[f_idx][1]
protnum = 0
if (prot[i] in ['K','R'] and prot[i+1]!='P') or prot[i+1]=='$':
nonstandardaa = 'not assigned'
# peptide @ prot[b+1:i+1]
if min_pep_len<=i-b<=max_pep_len:
peptide = prot[b+1:i+1]
if td==True:
if reverseseq ==True:
d_protnum = (currentmax-1)-protnum
else:
d_protnum = protnum
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+(prot[b]+prot[i+1]).replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+(prot[b]+prot[i+1]).replace('$','-')
nonstandardaa = CopyPeptideS1(peptide, add_info, M_ranges,
M, C, perbin_bytes, output_directory, TMT_labeling)
if nonstandardaa!=True:
# N-term Methionine cleaved peptide @ prot[b+2:i+1]
if prot[b]=='$' and prot[b+1]=='M':
if min_pep_len<=i-(b+1)<=max_pep_len:
peptide = prot[b+2:i+1]
if td==True:
if reverseseq ==True:
d_protnum = (currentmax-1)-protnum
else:
d_protnum = protnum
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+'-'+prot[i+1].replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+'-'+prot[i+1].replace('$','-')
CopyPeptideS1(peptide, add_info, M_ranges,
M, C, perbin_bytes, output_directory, TMT_labeling)
if miscleavage ==True:
# when b!=a, miscleavage possible
if b!=a:
# Miscleavage peptide @ prot[a+1:i+1]
if min_pep_len<=i-a<=max_pep_len:
peptide = prot[a+1:i+1]
if td==True:
if reverseseq ==True:
d_protnum = (currentmax-1)-protnum
else:
d_protnum = protnum
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+(prot[a]+prot[i+1]).replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+(prot[a]+prot[i+1]).replace('$','-')
CopyPeptideS1(peptide, add_info, M_ranges,
M, C, perbin_bytes, output_directory, TMT_labeling)
# N-term Methionine cleaved Miscleavage peptide @ prot[a+2:i+1]
if prot[a] == '$' and prot[a+1]=='M':
if min_pep_len<=i-a-1<=max_pep_len:
peptide = prot[a+2:i+1]
if td==True:
if reverseseq ==True:
d_protnum = (currentmax-1)-protnum
else:
d_protnum = protnum
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+'-'+prot[i+1].replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+'-'+prot[i+1].replace('$','-')
CopyPeptideS1(peptide, add_info, M_ranges,
M, C, perbin_bytes, output_directory, TMT_labeling)
#if nonstandardaa not assigned for original peptide (due to length)
if nonstandardaa == 'not assigned':
if any(aa in prot[b+1:i+1] for aa in 'XBJOUZ?-*'):
nonstandardaa = True
if nonstandardaa == True:
a=i
if prot[i+1]=='$':
a+=1
b=a
continue
# update a and b
a = b
b = i
if prot[i+1]=='$':
a = i+1
b = a
if stage == 'S2':
protnum = -1
f_idx = 0
fasta_ID = fasta_protcount[f_idx][0]
currentmax = fasta_protcount[f_idx][1]
for i in xrange(len(prot)-1):
if prot[i]=='$':
protnum+=1
if protnum>=currentmax:
f_idx+=1
fasta_ID = fasta_protcount[f_idx][0]
currentmax = fasta_protcount[f_idx][1]
protnum = 0
if (prot[i] in ['K','R'] and prot[i+1]!='P') or prot[i+1]=='$':
if miscleavage == False:
peptide = prot[b+1:i+1]
if td==True:
if reverseseq ==True:
d_protnum = (currentmax-1)-protnum
else:
d_protnum = protnum
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+(prot[b]+prot[i+1]).replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+(prot[b]+prot[i+1]).replace('$','-')
CopyPeptideS2(peptide, add_info, M_ranges, M, C, perbin_bytes,
output_directory, min_pep_len, max_pep_len, True, True, TMT_labeling)
if miscleavage == True:
peptide = prot[a+1:i+1]
if td==True:
if reverseseq ==True:
d_protnum = (currentmax-1)-protnum
else:
d_protnum = protnum
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+(prot[a]+prot[i+1]).replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+(prot[a]+prot[i+1]).replace('$','-')
CopyPeptideS2(peptide, add_info, M_ranges, M, C, perbin_bytes,
output_directory, min_pep_len, max_pep_len, True, True, TMT_labeling)
if prot[a]=='$':
peptide = prot[a+1:b+1]
if td==True:
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+(prot[a]+prot[b+1]).replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+(prot[a]+prot[b+1]).replace('$','-')
CopyPeptideS2(peptide, add_info, M_ranges, M, C, perbin_bytes,
output_directory, min_pep_len, max_pep_len, False, True, TMT_labeling)
if prot[i+1]=='$':
peptide = prot[b+1:i+1]
if td==True:
add_info = fasta_ID+'|'+'d_'+str(d_protnum)+'|'+(prot[b]+prot[i+1]).replace('$','-')
else:
add_info = fasta_ID+'|'+str(protnum)+'|'+(prot[b]+prot[i+1]).replace('$','-')
CopyPeptideS2(peptide, add_info, M_ranges, M, C, perbin_bytes,
output_directory, min_pep_len, max_pep_len, True, False, TMT_labeling)
a = b
b = i
if prot[i+1]=='$':
a = i+1
b = a
for bin_i in xrange(massbin_n):
if M[bin_i]:
outfile = open(path.join(output_directory, 'DBPart_'+str(bin_i)+'.txt'),'a')
outfile.write(''.join(M[bin_i]))
outfile.close()
return
def MakePeptides(output_directory, fastadir, B1size, B2size,
min_pep_len, max_pep_len, miscleavage,
massbin_n, M_ranges, fastaidxmap, td, reverseseq, TMT_labeling, stage):
begintime = time.time()
maxbytes = B2size/massbin_n
mfasta_sizes = 0
mfasta_files = []
for fastafile in listdir(fastadir):
mfasta_sizes+=stat(path.join(fastadir, fastafile)).st_size
mfasta_files.append(fastafile)
if mfasta_sizes>B1size:
print '\treading ', len(mfasta_files), ' fasta files at ', mfasta_sizes/(1024*1024.0), ' bytes.'
proteins = []
fasta_protcount = []
for fastafile in mfasta_files:
with open(path.join(fastadir, fastafile),'r') as infile:
fasta_ID = fastaidxmap[path.basename(fastafile).replace('.fasta','')]
fasta_sequences = ''.join(['$' if line[0]=='>' else line.strip() for line in infile.readlines()])
fasta_protcount.append((fasta_ID, fasta_sequences.count('$')))
if td ==True:
if reverseseq ==True:
proteins.append('$'+fasta_sequences[::-1][:-1])
else:
proteins.append(fasta_sequences)
else:
proteins.append(fasta_sequences)
proteins = ''.join(proteins)+'$'
TrypticDigest(proteins,fasta_protcount,
min_pep_len, max_pep_len,
miscleavage, M_ranges,
output_directory, massbin_n,
maxbytes, td, reverseseq, stage, TMT_labeling)
mfasta_sizes = 0
mfasta_files = []
# remaining fasta files
if mfasta_files:
print '\t#reading ', len(mfasta_files), ' fasta files at ', mfasta_sizes/(1024*1024.0), ' bytes.'
| |
<reponame>amalrkrishna/virtualnav-mpu6050<filename>maze.py<gh_stars>1-10
from __future__ import division
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.error import *
from OpenGL.GL import *
from PIL import Image
from scipy import integrate
from operator import add, mul, div
from copy import deepcopy
from maze_gen import NORTH, WEST, SOUTH, EAST, N, towards, isCoordinateInRange
from matplotlib.pyplot import plot, draw, show, ion
import ctypes
import copy
import sys
import math
import time
import datetime
import traceback
import urllib
import threading
import matplotlib.pyplot as plt
import matplotlib.animation as animation
name = 'maze'
win_width, win_height = 800, 600
heading = [0, 0, 0]
loc = [0.0, 0.0, 0.0]
map_scale = 30.
keybuffer = {}
maze = []
visited = []
values = []
shader = 0
tex_wall = 0
tex_sand = 0
timer = 0
alert = 0
done = False
fps = 0
vel_x = 0
vel_y = 0
accel_x = 0
accel_y = 0
accel_xarray = []
SKYBOX_TOP = 1
SKYBOX_BOTTOM = 2
SKYBOX_LEFT = 3
SKYBOX_RIGHT = 4
SKYBOX_FRONT = 5
SKYBOX_BACK = 6
SKYBOX_SIZE = 32
tex_skybox = {}
vert_skybox = {}
texc_skybox = {}
RotatingSpeed = 0.0025
MovingSpeed = 0.005
DEBUG_COLLISON = False
DEBUG_FUNCTRACE = False
DEBUG_DRAW = False
DEBUG_AUTOPILOT = False
HUD_ALWAYS_ON = False
ENABLE_AUTOPILOT = False
def read_values():
link = "http://10.42.0.3:8080" # Change this address to your settings
f = urllib.urlopen(link)
myfile = f.read()
return myfile.split(" ")
def main():
global heading, loc, keybuffer, timer, alert
looper = loop()
looper.start()
time.sleep(4)
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(640,360)
glutCreateWindow(name)
glutDisplayFunc(display)
glutReshapeFunc(resize)
glutKeyboardFunc(keyPressed)
glutKeyboardUpFunc(keyReleased)
glutIdleFunc(idleFunc)
loadShaders()
loadTextures()
loadSkybox()
loadMaze()
setupLights()
glClearColor(0.,0.,0.,1.)
glShadeModel(GL_SMOOTH)
glEnable(GL_DEPTH_TEST)
glEnable(GL_TEXTURE_CUBE_MAP)
glDisable(GL_CULL_FACE)
heading = [0, 0, -1]
loc = [0.5, 0.5, -0.5]
for i in range(256):
keybuffer[i] = False
timer = time.time()
print 'timer has started.'
glutMainLoop()
return
def idleFunc():
glutPostRedisplay()
def translate_maze(i):
r = ''
if i & NORTH:
r += 'N'
if i & SOUTH:
r += 'S'
if i & WEST:
r += 'W'
if i & EAST:
r += 'E'
return r
def loadMaze():
global maze
with open('maze_gen.out', 'r') as f:
for row in f:
maze.append(map(int, row.split(' ')))
for i in range(N):
visited.append([False] * N)
for i in range(N):
visited.append([False] * N)
print 'first row:', map(translate_maze, maze[0])
PILOT_WALKING = 1
PILOT_TURNLEFT = 2
PILOT_TURNRIGHT = 3
PILOT_REVERSE = 4
PILOT_BACKTRACK_WALKING = 5
PILOT_BACKTRACK_TURNLEFT = 6
PILOT_BACKTRACK_TURNRIGHT = 7
PILOT_COMPLETE = 10
PILOT_ACTION_FORWARD = 1
PILOT_ACTION_LEFT = 2
PILOT_ACTION_RIGHT = 4
Left = {NORTH: WEST, WEST: SOUTH, SOUTH: EAST, EAST: NORTH}
Right = {NORTH: EAST, EAST: SOUTH, SOUTH: WEST, WEST: NORTH}
pilot_heading = NORTH
pilot_status = PILOT_WALKING
pilot_walked = 0.
pilot_rotated = 0.
pilot_stack = []
pilot_hint = []
pilot_stepped = 0
def translate_status(s):
return {
PILOT_WALKING: 'PILOT_WALKING',
PILOT_WALKING: 'PILOT_WALKING',
PILOT_TURNLEFT: 'PILOT_TURNLEFT',
PILOT_TURNRIGHT: 'PILOT_TURNRIGHT',
PILOT_REVERSE: 'PILOT_REVERSE',
PILOT_BACKTRACK_WALKING: 'PILOT_BACKTRACK_WALKING',
PILOT_BACKTRACK_TURNLEFT: 'PILOT_BACKTRACK_TURNLEFT',
PILOT_BACKTRACK_TURNRIGHT: 'PILOT_BACKTRACK_TURNRIGHT',
PILOT_COMPLETE: 'PILOT_COMPLETE'
}[s]
def translate_action(act):
ret = ''
if act & PILOT_ACTION_FORWARD:
ret += 'FORWARD '
if act & PILOT_ACTION_LEFT:
ret += 'LEFT '
if act & PILOT_ACTION_RIGHT:
ret += 'RIGHT '
return ret
def heading_vector(d):
return {
NORTH: [0, 0, -1],
SOUTH: [0, 0, 1],
WEST: [-1, 0, 0],
EAST: [1, 0, 0]
}[d]
def loadTextureFromRawData(img_w, img_h, data):
tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, tex)
glPixelStorei(GL_UNPACK_ALIGNMENT,1)
glTexImage2D(GL_TEXTURE_2D, 0, 3, img_w, img_h, 0, GL_RGBA, GL_UNSIGNED_BYTE, data)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
return tex
def loadTextureFromFile(fname):
img = Image.open(fname)
w, h = img.size
dat = img.tostring('raw', 'RGBX', 0, -1)
return loadTextureFromRawData(w, h, dat)
def loadSkybox():
global tex_skybox, vert_skybox, texc_skybox
fname = {
SKYBOX_RIGHT: 'skybox_right.jpg',
SKYBOX_TOP: 'skybox_top.jpg',
SKYBOX_FRONT: 'skybox_front.jpg',
SKYBOX_LEFT: 'skybox_left.jpg',
SKYBOX_BOTTOM: 'skybox_bottom.jpg',
SKYBOX_BACK: 'skybox_back.jpg'
}
for f,n in fname.iteritems():
tex_skybox[f] = loadTextureFromFile(n)
vert_skybox[SKYBOX_BACK] = [
[SKYBOX_SIZE, SKYBOX_SIZE, SKYBOX_SIZE],
[SKYBOX_SIZE, -SKYBOX_SIZE, SKYBOX_SIZE],
[-SKYBOX_SIZE, SKYBOX_SIZE, SKYBOX_SIZE],
[-SKYBOX_SIZE, -SKYBOX_SIZE, SKYBOX_SIZE]]
texc_skybox[SKYBOX_BACK] = [[0,1], [0,0], [1,1], [1,0]]
vert_skybox[SKYBOX_LEFT] = [
[-SKYBOX_SIZE, SKYBOX_SIZE, SKYBOX_SIZE],
[-SKYBOX_SIZE, -SKYBOX_SIZE, SKYBOX_SIZE],
[-SKYBOX_SIZE, SKYBOX_SIZE, -SKYBOX_SIZE],
[-SKYBOX_SIZE, -SKYBOX_SIZE, -SKYBOX_SIZE]]
texc_skybox[SKYBOX_LEFT] = [[0,1], [0,0], [1,1], [1,0]]
vert_skybox[SKYBOX_FRONT] = [
[-SKYBOX_SIZE, SKYBOX_SIZE, -SKYBOX_SIZE],
[-SKYBOX_SIZE, -SKYBOX_SIZE, -SKYBOX_SIZE],
[SKYBOX_SIZE, SKYBOX_SIZE, -SKYBOX_SIZE],
[SKYBOX_SIZE, -SKYBOX_SIZE, -SKYBOX_SIZE]]
texc_skybox[SKYBOX_FRONT] = [[0,1], [0,0], [1,1], [1,0]]
vert_skybox[SKYBOX_RIGHT] = [
[SKYBOX_SIZE, SKYBOX_SIZE, -SKYBOX_SIZE],
[SKYBOX_SIZE, -SKYBOX_SIZE, -SKYBOX_SIZE],
[SKYBOX_SIZE, SKYBOX_SIZE, SKYBOX_SIZE],
[SKYBOX_SIZE, -SKYBOX_SIZE, SKYBOX_SIZE]]
texc_skybox[SKYBOX_RIGHT] = [[0,1], [0,0], [1,1], [1,0]]
vert_skybox[SKYBOX_TOP] = [
[SKYBOX_SIZE, SKYBOX_SIZE, SKYBOX_SIZE],
[-SKYBOX_SIZE, SKYBOX_SIZE, SKYBOX_SIZE],
[SKYBOX_SIZE, SKYBOX_SIZE, -SKYBOX_SIZE],
[-SKYBOX_SIZE, SKYBOX_SIZE, -SKYBOX_SIZE]]
texc_skybox[SKYBOX_TOP] = [[0,1], [0,0], [1,1], [1,0]]
def loadTextures():
global tex_wall, tex_sand
tex_wall = loadTextureFromFile('brick.jpg')
tex_sand = loadTextureFromFile('sand.jpg')
def setupLights():
lightZeroPosition = [10., 10., 10., 1.]
lightZeroColor = [1.0, 1.0, 1.0, 1.0]
glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition)
glLightfv(GL_LIGHT0, GL_DIFFUSE, lightZeroColor)
glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1)
glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05)
def setCamera():
global heading, loc
if DEBUG_FUNCTRACE:
print 'functrace: setCamera'
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
at = map(add, loc, heading)
params = deepcopy(loc)
params.extend(at)
params.extend([0., 1., 0.])
gluLookAt(*params)
glutPostRedisplay()
def getRVcoordinates(loc):
rel_x, rel_y = math.floor(loc[0]), math.floor(-loc[2])
vrt_x, vrt_y = int(rel_x), int(rel_y)
return rel_x, rel_y, vrt_x, vrt_y
def checkXBlocked(old_loc, new_loc, rx, ry, vx, vy):
if new_loc[0] - rx < 0.2 and new_loc[0] - old_loc[0] < 0:
if DEBUG_COLLISON:
print 'trying reach west:',
if maze[vx][vy] & WEST:
if DEBUG_COLLISON:
print 'rejected'
new_loc[0] = rx + 0.21
else:
if DEBUG_COLLISON:
print 'accepted'
return maze[vx][vy] & WEST
if new_loc[0] - rx > 0.8 and new_loc[0] - old_loc[0] > 0:
if DEBUG_COLLISON:
print 'trying reach east:',
if maze[vx][vy] & EAST:
if DEBUG_COLLISON:
print 'rejected'
new_loc[0] = rx + 0.79
else:
if DEBUG_COLLISON:
print 'accepted'
return maze[vx][vy] & EAST
return False
def checkYBlocked(old_loc, new_loc, rx, ry, vx, vy):
if -new_loc[2] - ry < 0.2 and -new_loc[2] - -old_loc[2] < 0:
if DEBUG_COLLISON:
print 'trying reach south:',
if maze[vx][vy] & SOUTH:
if DEBUG_COLLISON:
print 'rejected'
new_loc[2] = -(ry + 0.21)
else:
if DEBUG_COLLISON:
print 'accepted'
return maze[vx][vy] & SOUTH
if -new_loc[2] - ry > 0.8 and -new_loc[2] - -old_loc[2] > 0:
if DEBUG_COLLISON:
print 'trying reach north:',
if maze[vx][vy] & NORTH:
if DEBUG_COLLISON:
print 'rejected'
new_loc[2] = -(ry + 0.79)
else:
if DEBUG_COLLISON:
print 'accepted'
return maze[vx][vy] & NORTH
return False
def checkBlocked(old_loc, new_loc):
if DEBUG_COLLISON:
print 'testing',old_loc,'against',new_loc
rx, ry, vx, vy = getRVcoordinates(old_loc)
if DEBUG_COLLISON:
print 'R', rx, ry, 'V', vx, vy
checkXBlocked(loc, new_loc, rx, ry, vx, vy)
checkYBlocked(loc, new_loc, rx, ry, vx, vy)
def extendsSight(i, j, d, n):
visited[i][j] = True
if n == 0:
return
if not maze[i][j] & d:
extendsSight(*map(add, [i,j], towards[d]), d = d, n = n - 1)
class loop(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.running = 1
def run(self):
global values
while True:
values = read_values()
def kill(self):
self.running = 0
accel_x_filt = []
accel_y_filt = []
gyro_z_filt = []
gyro_z_filt.append(0)
accel_x_filt.append(0)
accel_y_filt.append(0)
alpha = 0.5
def nextAnimation():
global heading, loc, done, timer, values, accel_x, accel_y, vel_x, vel_y, alpha, accel_x_filt, accel_y_filt, gyro_z_filt
gyro_z = int(values[0]) / 131
accel_x = int(values[1]) * 9.8 / 16384
accel_y = int(values[2]) * 9.8 / 16384
with open('log.txt', 'a+') as fp:
fp.write(str(gyro_z) + ',' + str(accel_x) + ',' + str(accel_y) + '\n')
fp.close()
gyro_z_filt.append(float(1-alpha)*float(gyro_z_filt[0]) + float(alpha * gyro_z))
accel_x_filt.append(float(1-alpha)*float(accel_x_filt[0]) + float(alpha * accel_x))
accel_y_filt.append(float(1-alpha)*float(accel_y_filt[0]) + float(alpha * accel_y))
with open('log_filt.txt', 'a+') as fp1:
fp1.write(str(gyro_z_filt[1]) + ',' + str(accel_x_filt[1]) + ',' + str(accel_y_filt[1]) + '\n')
fp1.close()
gyro_z_filt[0] = copy.copy(gyro_z_filt[1])
accel_x_filt[0] = copy.copy(accel_x_filt[1])
accel_y_filt[0] = copy.copy(accel_y_filt[1])
del(gyro_z_filt[1])
del(accel_x_filt[1])
del(accel_y_filt[1])
print "gyro_z: ", gyro_z_filt[0]
print "accel_x: ", accel_x_filt[0]
print "accel_y: ", accel_y_filt[0]
refresh = False
if float(gyro_z_filt[0]) > 8:
#if keybuffer[ord('a')] and not keybuffer[ord('d')]:
cos = math.cos(RotatingSpeed)
sin = math.sin(RotatingSpeed)
heading = [cos * heading[0] + sin * heading[2], heading[1], -sin * heading[0] + cos * heading[2]]
refresh = True
elif float(gyro_z_filt[0]) < -8:
#elif keybuffer[ord('d')] and not keybuffer[ord('a')]:
cos = math.cos(-RotatingSpeed)
sin = math.sin(-RotatingSpeed)
heading = [cos * heading[0] + sin * heading[2], heading[1], -sin * heading[0] + cos * heading[2]]
refresh = True
if float(accel_y_filt[0]) > 4:
#if keybuffer[ord('w')] and not keybuffer[ord('s')]:
new_loc = map(add, loc, map(lambda x: x * MovingSpeed, heading))
checkBlocked(loc, new_loc)
loc = new_loc
refresh = True
if float(accel_y_filt[0]) < -1.5:
#elif keybuffer[ord('s')] and not keybuffer[ord('w')]:
new_loc = map(add, loc, map(lambda x: x * -MovingSpeed, heading))
checkBlocked(loc, new_loc)
loc = new_loc
refresh = True
if refresh:
rx, ry, vx, vy = getRVcoordinates(loc)
for d in towards.iterkeys():
extendsSight(vx,vy,d,3)
if rx == N - 1 and ry == N - 1 and not done:
timer = time.time() - timer
done = True
print 'COMPLETE, TIME ELAPSED %.2fs' % timer
l = math.sqrt(reduce(add, map(mul, heading, heading)))
heading = map(div, heading, [l] * 3)
setCamera()
def resize(w, h):
global win_width, win_height
win_width, win_height = w, h
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60, float(w) / float(h) if h != 0 else float(w), 0.1, 10000)
glMatrixMode(GL_MODELVIEW)
setCamera()
def keyPressed(key, x, y):
global keybuffer
keybuffer[ord(key)] = True
glutPostRedisplay()
def keyReleased(key, x, y):
global keybuffer
keybuffer[ord(key)] = False
def | |
<reponame>CubeSkyy/ILU-RL<gh_stars>0
import os
import json
import pandas as pd
import argparse
import numpy as np
from pathlib import Path
from scipy import stats
import configparser
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
from analysis.utils import str2bool, get_emissions, get_vehicles, get_throughput
from ilurl.networks.base import Network
plt.style.use('ggplot')
FIGURE_X = 6.0
FIGURE_Y = 4.0
CONGESTED_INTERVAL = [28800.0, 32400.0] # 08h00 - 09h00
FREE_FLOW_INTERVAL = [79200.0, 82800.0] # 22h00 - 23h00
def get_arguments():
parser = argparse.ArgumentParser(
description="""
This script creates evaluation plots, given an experiment folder path.
(To be used with RL-algorithms)
"""
)
parser.add_argument('experiment_root_folder', type=str, nargs='?',
help='Experiment root folder.')
return parser.parse_args()
def print_arguments(args):
print('Arguments (analysis/test_plots.py):')
print('\tExperiment root folder: {0}\n'.format(args.experiment_root_folder))
def get_lanes_lengths(train_args):
network_args = {
'network_id': train_args['network'],
'horizon': int(train_args['experiment_time']),
'demand_type': train_args['demand_type'],
'demand_mode': train_args['demand_mode'],
'tls_type': train_args['tls_type']
}
network = Network(**network_args)
lanes_lengths = {
(edge['id'], int(lane['index'])): float(lane['length'])
for edge in network.edges
for lane in sorted(edge['lanes'], key=lambda x: int(x['index']))
}
return lanes_lengths
def get_length(row, lanes_lengths):
*edge, lid = row['lane'].split('_')
eid = '_'.join(edge)
lid = int(lid)
return lanes_lengths.get((eid, lid), 0)
def main(experiment_root_folder=None):
print('\nRUNNING analysis/test_plots.py\n')
if not experiment_root_folder:
args = get_arguments()
print_arguments(args)
experiment_root_folder = args.experiment_root_folder
# Prepare output folder.
output_folder_path = os.path.join(experiment_root_folder, 'plots/test')
print('Output folder: {0}\n'.format(output_folder_path))
os.makedirs(output_folder_path, exist_ok=True)
# Get cycle length from tls_config.json file.
config_files = list(Path(experiment_root_folder).rglob('tls_config.json'))
with config_files[0].open('r') as f:
json_file = json.load(f)
cycle_time = json_file['rl']['cycle_time']
# Get all *.csv files from experiment root folder.
csv_files = [str(p) for p in list(Path(experiment_root_folder).rglob('*-emission.csv'))]
print('Number of csv files found: {0}'.format(len(csv_files)))
# Get agent_type and demand_type.
train_config_path = list(Path(experiment_root_folder).rglob('train.config'))[0]
train_config = configparser.ConfigParser()
train_config.read(train_config_path)
agent_type = train_config['agent_type']['agent_type']
demand_type = train_config['train_args']['demand_type']
vehicles_appended = []
throughputs = []
global_throughputs = []
mean_values_per_eval = []
lanes_lengths = get_lanes_lengths(train_config['train_args'])
def fn(x):
return get_length(x, lanes_lengths)
for csv_file in csv_files:
print('Processing CSV file: {0}'.format(csv_file))
# Load CSV data.
df_csv = get_emissions(csv_file)
df_csv['length'] = df_csv.apply(fn, axis=1)
df_per_vehicle = get_vehicles(df_csv)
df_per_vehicle_mean = df_per_vehicle.mean()
if demand_type not in ('constant',):
# Congested regime.
df_congested_period = df_per_vehicle[(df_per_vehicle['finish'] > CONGESTED_INTERVAL[0]) \
& (df_per_vehicle['finish'] < CONGESTED_INTERVAL[1])]
df_congested_period_mean = df_congested_period.mean()
# Free-flow.
df_free_flow_period = df_per_vehicle[(df_per_vehicle['finish'] > FREE_FLOW_INTERVAL[0]) \
& (df_per_vehicle['finish'] < FREE_FLOW_INTERVAL[1])]
df_free_flow_period_mean = df_free_flow_period.mean()
mean_values_per_eval.append({'train_run': Path(csv_file).parts[-4],
'speed': df_per_vehicle_mean['speed'],
'velocity': df_per_vehicle_mean['velocity'],
'stops': df_per_vehicle_mean['stops'],
'waiting_time': df_per_vehicle_mean['waiting'],
'travel_time': df_per_vehicle_mean['total'],
'speed_congested': df_congested_period_mean['speed'],
'velocity_congested': df_congested_period_mean['velocity'],
'stops_congested': df_congested_period_mean['stops'],
'waiting_time_congested': df_congested_period_mean['waiting'],
'travel_time_congested': df_congested_period_mean['total'],
'speed_free_flow': df_free_flow_period_mean['speed'],
'velocity_free_flow': df_free_flow_period_mean['velocity'],
'stops_free_flow': df_free_flow_period_mean['stops'],
'waiting_time_free_flow': df_free_flow_period_mean['waiting'],
'travel_time_free_flow': df_free_flow_period_mean['total'],
'throughput': len(df_per_vehicle)})
else:
mean_values_per_eval.append({'train_run': Path(csv_file).parts[-4],
'speed': df_per_vehicle_mean['speed'],
'velocity': df_per_vehicle_mean['velocity'],
'stops': df_per_vehicle_mean['stops'],
'waiting_time': df_per_vehicle_mean['waiting'],
'travel_time': df_per_vehicle_mean['total'],
'throughput': len(df_per_vehicle)})
vehicles_appended.append(df_per_vehicle)
df_throughput = get_throughput(df_csv)
throughputs.append(df_throughput)
global_throughputs.append(len(df_per_vehicle))
df_vehicles_appended = pd.concat(vehicles_appended)
df_throughputs_appended = pd.concat(throughputs)
print(df_vehicles_appended.shape)
print(df_throughputs_appended.shape)
# Write mean values per eval into a csv file.
df_mean_metrics_per_eval = pd.DataFrame(mean_values_per_eval)
if demand_type not in ('constant',):
cols = ["train_run", "speed", "velocity", "stops", "waiting_time", "travel_time", "throughput",
"speed_congested", "velocity_congested", "stops_congested", "waiting_time_congested", "travel_time_congested",
"speed_free_flow", "velocity_free_flow", "stops_free_flow", "waiting_time_free_flow", "travel_time_free_flow"]
else:
cols = ["train_run", "speed", "velocity", "stops", "waiting_time",
"travel_time", "throughput"]
df_mean_metrics_per_eval.to_csv('{0}/{1}_metrics.csv'.format(
output_folder_path,
Path(experiment_root_folder).parts[-1]
),
float_format='%.3f',
columns=cols)
"""
Waiting time stats.
"""
# Describe waiting time.
print('Waiting time:')
df_stats = df_vehicles_appended['waiting'].describe()
df_stats.to_csv('{0}/waiting_time_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
# plt.hist(df_vehicles_appended['waiting'], density=True)
kde = stats.gaussian_kde(df_vehicles_appended['waiting'])
kde_x = np.linspace(df_vehicles_appended['waiting'].min(), df_vehicles_appended['waiting'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
waiting_time_hist_kde = pd.DataFrame()
waiting_time_hist_kde['x'] = kde_x
waiting_time_hist_kde['y'] = kde_y
plt.xlabel('Waiting time (s)')
plt.ylabel('Density')
# plt.title('Waiting time')
plt.savefig('{0}/waiting_time_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/waiting_time_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Travel time stats.
"""
# Describe travel time.
print('Travel time:')
df_stats = df_vehicles_appended['total'].describe()
df_stats.to_csv('{0}/travel_time_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
# plt.hist(df_vehicles_appended['total'], density=True)
kde = stats.gaussian_kde(df_vehicles_appended['total'])
kde_x = np.linspace(df_vehicles_appended['total'].min(), df_vehicles_appended['total'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
travel_time_hist_kde = pd.DataFrame()
travel_time_hist_kde['x'] = kde_x
travel_time_hist_kde['y'] = kde_y
plt.xlabel('Travel time (s)')
plt.ylabel('Density')
# plt.title('Travel time')
plt.savefig('{0}/travel_time_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/travel_time_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Speed stats.
"""
# Describe vehicles' speed.
print('Speed:')
df_stats = df_vehicles_appended['speed'].describe()
df_stats.to_csv('{0}/speed_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
# plt.hist(df_vehicles_appended['speed'], density=True)
kde = stats.gaussian_kde(df_vehicles_appended['speed'])
kde_x = np.linspace(df_vehicles_appended['speed'].min(), df_vehicles_appended['speed'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
speed_hist_kde = pd.DataFrame()
speed_hist_kde['x'] = kde_x
speed_hist_kde['y'] = kde_y
plt.xlabel('Speed (m/s)')
plt.ylabel('Density')
# plt.title('Vehicles\' speed')
plt.savefig('{0}/speeds_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/speeds_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Velocity stats.
"""
# Describe vehicles' velocity.
print('Velocity:')
df_stats = df_vehicles_appended['velocity'].describe()
df_stats.to_csv('{0}/velocity_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
kde = stats.gaussian_kde(df_vehicles_appended['velocity'])
kde_x = np.linspace(df_vehicles_appended['velocity'].min(), df_vehicles_appended['velocity'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
velocity_hist_kde = pd.DataFrame()
velocity_hist_kde['x'] = kde_x
velocity_hist_kde['y'] = kde_y
plt.xlabel('Speed (m/s)')
plt.ylabel('Density')
plt.savefig('{0}/velocity_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/velocity_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Stops stats.
"""
# Describe the number of stops.
print('Stops:')
df_stats = df_vehicles_appended['stops'].describe()
df_stats.to_csv('{0}/stops_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
counts = df_vehicles_appended['stops'].value_counts(normalize=True)
plt.bar(list(counts.index), counts.values)
# Store data in dataframe for further materialization.
stops_hist_kde = pd.DataFrame()
stops_hist_kde['x'] = list(counts.index)
stops_hist_kde['y'] = counts.values
plt.xlabel('Number of stops')
plt.ylabel('Density')
plt.savefig('{0}/stops_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/stops_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Throughput stats.
(For the entire rollout)
"""
print('Throughput:')
df_stats = pd.DataFrame(global_throughputs).describe()
df_stats.to_csv('{0}/throughput_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
if demand_type not in ('constant',):
# Filter data by congested hour interval.
df_vehicles_appended_congested = df_vehicles_appended[(df_vehicles_appended['finish'] > CONGESTED_INTERVAL[0]) \
& (df_vehicles_appended['finish'] < CONGESTED_INTERVAL[1])]
"""
Waiting time stats (congested).
"""
# Describe waiting time.
print('-'*25)
print('Waiting time (congested):')
df_stats = df_vehicles_appended_congested['waiting'].describe()
df_stats.to_csv('{0}/waiting_time_congested_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
kde = stats.gaussian_kde(df_vehicles_appended_congested['waiting'])
kde_x = np.linspace(df_vehicles_appended_congested['waiting'].min(),
df_vehicles_appended_congested['waiting'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
waiting_time_congested_hist_kde = pd.DataFrame()
waiting_time_congested_hist_kde['x'] = kde_x
waiting_time_congested_hist_kde['y'] = kde_y
plt.xlabel('Waiting time (s)')
plt.ylabel('Density')
# plt.title('Waiting time (congested)')
plt.savefig('{0}/waiting_time_congested_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/waiting_time_congested_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Travel time stats (congested).
"""
# Describe travel time.
print('Travel time (congested):')
df_stats = df_vehicles_appended_congested['total'].describe()
df_stats.to_csv('{0}/travel_time_congested_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
kde = stats.gaussian_kde(df_vehicles_appended_congested['total'])
kde_x = np.linspace(df_vehicles_appended_congested['total'].min(),
df_vehicles_appended_congested['total'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
travel_time_congested_hist_kde = pd.DataFrame()
travel_time_congested_hist_kde['x'] = kde_x
travel_time_congested_hist_kde['y'] = kde_y
plt.xlabel('Travel time (s)')
plt.ylabel('Density')
# plt.title('Travel time (congested)')
plt.savefig('{0}/travel_time_congested_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/travel_time_congested_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Speed stats (congested).
"""
# Describe vehicles' speed.
print('Speed (congested):')
df_stats = df_vehicles_appended_congested['speed'].describe()
df_stats.to_csv('{0}/speed_congested_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
kde = stats.gaussian_kde(df_vehicles_appended_congested['speed'])
kde_x = np.linspace(df_vehicles_appended_congested['speed'].min(),
df_vehicles_appended_congested['speed'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
speed_congested_hist_kde = pd.DataFrame()
speed_congested_hist_kde['x'] = kde_x
speed_congested_hist_kde['y'] = kde_y
plt.xlabel('Speed (m/s)')
plt.ylabel('Density')
# plt.title('Vehicles\' speed (congested)')
plt.savefig('{0}/speeds_congested_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/speeds_congested_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Velocity stats (congested).
"""
# Describe vehicles' velocity.
print('Velocity (congested):')
df_stats = df_vehicles_appended_congested['velocity'].describe()
df_stats.to_csv('{0}/velocity_congested_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
kde = stats.gaussian_kde(df_vehicles_appended_congested['velocity'])
kde_x = np.linspace(df_vehicles_appended_congested['velocity'].min(),
df_vehicles_appended_congested['velocity'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
velocity_congested_hist_kde = pd.DataFrame()
velocity_congested_hist_kde['x'] = kde_x
velocity_congested_hist_kde['y'] = kde_y
plt.xlabel('Speed (m/s)')
plt.ylabel('Density')
plt.savefig('{0}/velocity_congested_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/velocity_congested_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Stops stats (congested).
"""
# Describe the number of stops.
print('Stops (congested):')
df_stats = df_vehicles_appended_congested['stops'].describe()
df_stats.to_csv('{0}/stops_congested_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
counts = df_vehicles_appended_congested['stops'].value_counts(normalize=True)
plt.bar(list(counts.index), counts.values)
# Store data in dataframe for further materialization.
stops_congested_hist_kde = pd.DataFrame()
stops_congested_hist_kde['x'] = list(counts.index)
stops_congested_hist_kde['y'] = counts.values
plt.xlabel('Number of stops')
plt.ylabel('Density')
plt.savefig('{0}/stops_congested_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/stops_congested_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
print('-'*25)
# Filter data by free-flow hour interval.
df_vehicles_appended_free_flow = df_vehicles_appended[(df_vehicles_appended['finish'] > FREE_FLOW_INTERVAL[0]) \
& (df_vehicles_appended['finish'] < FREE_FLOW_INTERVAL[1])]
"""
Waiting time stats (free-flow).
"""
# Describe waiting time.
print('Waiting time (free-flow):')
df_stats = df_vehicles_appended_free_flow['waiting'].describe()
df_stats.to_csv('{0}/waiting_time_free_flow_stats.csv'.format(output_folder_path),
float_format='%.3f', header=False)
print(df_stats)
print('\n')
# Histogram and KDE.
fig = plt.figure()
fig.set_size_inches(FIGURE_X, FIGURE_Y)
kde = stats.gaussian_kde(df_vehicles_appended_free_flow['waiting'])
kde_x = np.linspace(df_vehicles_appended_free_flow['waiting'].min(),
df_vehicles_appended_free_flow['waiting'].max(), 1000)
kde_y = kde(kde_x)
plt.plot(kde_x, kde_y, linewidth=3)
# Store data in dataframe for further materialization.
waiting_time_free_flow_hist_kde = pd.DataFrame()
waiting_time_free_flow_hist_kde['x'] = kde_x
waiting_time_free_flow_hist_kde['y'] = kde_y
plt.xlabel('Waiting time (s)')
plt.ylabel('Density')
# plt.title('Waiting time (Free-flow)')
plt.savefig('{0}/waiting_time_free_flow_hist.pdf'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.savefig('{0}/waiting_time_free_flow_hist.png'.format(output_folder_path), bbox_inches='tight', pad_inches=0)
plt.close()
"""
Travel time stats (free-flow).
"""
# Describe travel time.
print('Travel | |
from django.http.response import HttpResponse, JsonResponse
from django.urls import reverse
from django.shortcuts import redirect, render, get_object_or_404
from django.views.decorators.csrf import csrf_exempt ######## csrf token 오류 날 경우 추가!
from django.db.models import Q
from django.core.paginator import Paginator
from rest_framework.serializers import Serializer
from .models import Cal_result, Foods, Subscription_apply, Menu_saved
from . serializers import FoodsSerializer
from .forms import CalculationForm, ApplySubscriptionForm
from doobi.doobi_pack import calculatings, recommendations, save_user_db, load_user_db # 사용자 db옮기는 로직 doobi_pack 에 만들어놓음
from doobi.users.models import User
from doobi.doobi_pack.food_db_load import load_foods
from datetime import datetime, timedelta
import json
import time
import sweetify
# Create your views here.
def manual_recc(request):
if request.user.is_authenticated:
save_user_db.save_basic_profile(request)
dt_now = datetime.now()
last_profile_for_calculation = load_user_db.load_profile_for_calculation(request)
current_user_social_platform = load_user_db.load_user_social_platform(request)
if last_profile_for_calculation == None:
return render(
request,
'users/profile_need.html',
{
'nick_name':current_user_social_platform.get('nick_name'),
'social_platform':current_user_social_platform.get('social_platform'),
}
)
last_profiling_date = last_profile_for_calculation.get("created_at")
temp = last_profiling_date[:19]
dt_last_profiling_date = datetime.strptime(temp, "%Y-%m-%dT%H:%M:%S")
time_since_last_profiling_date = dt_now - dt_last_profiling_date
cal_result_data = load_user_db.load_cal_result_data(request)
last_cal_result_data = cal_result_data[len(cal_result_data) - 1]
# print("cal_res:", last_cal_result_data)
if request.method == "GET":
return render(
request,
'recc/main.html',
{
'nick_name':current_user_social_platform.get('nick_name'),
'social_platform':current_user_social_platform.get('social_platform'),
"profile_for_cal":last_profile_for_calculation,
'days_since_last_profiling':time_since_last_profiling_date.days,
'cal_res':last_cal_result_data,
}
)
else:
return redirect(reverse('users:loggingin'))
@csrf_exempt ########################### csrf token 오류 날 경우
def profiling(request, current_page):
print('profiling 값전달 확인:', current_page)
if request.user.is_authenticated:
current_user_social_platform = load_user_db.load_user_social_platform(request)
if request.method == "GET":
form = CalculationForm(req=request)
return render(
request,
"recc/profiling.html",
{"form":form,
'nick_name':current_user_social_platform.get('nick_name'),
'social_platform':current_user_social_platform.get('social_platform'),
'current_page':current_page
})
elif request.method == "POST":
if request.user.is_authenticated:
current_user = get_object_or_404(User, pk=request.user.id)
form = CalculationForm(request.POST, req=request)
if form.is_valid():
# print("form validation check")
profile = form.save(commit=False)
print('user_not_like type:', type(form.cleaned_data['user_not_like']))
print('user_not_like 확인:', form.cleaned_data['user_not_like'])
# # 1, 2순위 같은 것으로 제출한 폼도 처리!
# if form.cleaned_data['user_preference1'] == form.cleaned_data['user_preference2']:
# sweetify.sweetalert(request, "앗 잠시만요!", text='선호 1순위 식품과 2순위 식품이 중복되었습니다 ㅠㅠ', persistent='확인', icon="warning")
# return redirect(reverse('recc:profiling', kwargs={'current_page':current_page}))
# # print("user_not_like 개수:", len(form.cleaned_data['user_not_like']))
# if len(form.cleaned_data['user_not_like']) > 3:
# sweetify.sweetalert(request, "앗 잠시만요!", text='선호하지 않는 음식은 최대 3개까지만 설정할 수 있어요 ㅠㅠ', persistent='확인', icon="warning")
# return redirect(reverse('recc:profiling', kwargs={'current_page':current_page}))
# # 좋아하는 1, 2 순위 넣어놓고 해당 카테고리를 선호하지 않는다고 멍청한 폼 제출한 경우 처리
# preference_list = [form.cleaned_data['user_preference1'], form.cleaned_data['user_preference2']]
# for not_like in form.cleaned_data['user_not_like']:
# # print("not_like:", not_like)
# if int(not_like) in preference_list:
# sweetify.sweetalert(request, "앗 잠시만요!", text='선호 / 비선호 식품이 중복되었어요! ㅠㅠ', persistent='확인', icon="warning")
# return redirect(reverse('recc:profiling', kwargs={'current_page':current_page}))
profile.user = current_user
profile.save()
form.save_m2m() # save()로는 m2m form은 저장 안됨.
return redirect(reverse("recc:cal_exe", kwargs={'current_page':current_page}))
else:
msg = ""
for i in form.errors.values():
msg = i
if form.errors:
sweetify.sweetalert(request, "앗 잠시만요!", text=msg, persistent='확인', icon="warning")
# form validation 실패시 페이지 렌더링 다시 해줘야함
return render(
request,
"recc/profiling.html",
{"form":form,
'nick_name':current_user_social_platform.get('nick_name'),
'social_platform':current_user_social_platform.get('social_platform'),
'current_page':current_page
})
# return redirect(reverse('recc:profiling', kwargs={'current_page':current_page}))
else:
return redirect(reverse('users:loggingin'))
# calculation() 에서 post받았을 때 redirect로 나가야 장고폼 데이터가 db에 넘어감
# --> cal_exe하나 더 만들어서 여기서 profile load하고 calculation돌리고 dbsave까지!
def cal_exe(request, current_page):
print('cal_current_page:', current_page)
if request.user.is_authenticated:
if request.method == "GET":
if request.user.is_authenticated:
current_user = get_object_or_404(User, pk=request.user.id)
profile_for_calculation = load_user_db.load_profile_for_calculation(request)
if profile_for_calculation == None:
# print("사용자 정보가 없음 정보입력을 먼저 해주세요")
return redirect(reverse("recc:main"))
calc = calculatings.Calculatings(profile_for_calculation)
calcualtion_result = calc.calculating_target_nutr()
# print("calculation_result:", calcualtion_result)
# 이렇게 다른 모듈에서 psycopg2로 DB모델에 직접 접근하면
# timestamped나 foreign key 컬럼에 값을 저장하기 어려움
# 장고모델로 접근해야하는데.. 다른 모듈에서는 임포트가 왜 안되는지 모르겠으니까 여기서!
# 하려고 했는데 django-admin으로 doobi_libs 대체 앱 doobi_pack 만듬
save_user_db.save_calculation_result(current_user, calcualtion_result)
# cal 할때마다 유저 정보들 업데이트 (recc model에서는 User를 foreign key로 받기 때문에)
# 정보들 처리할 때 이게 나을 듯
user_update = User.objects.get(pk=request.user.id)
user_update.gender = profile_for_calculation.get("gender")
user_update.age = profile_for_calculation.get("age")
user_update.height = profile_for_calculation.get('height')
user_update.weight = profile_for_calculation.get("weight")
user_update.purpose = profile_for_calculation.get('purpose')
# user_update.diseases = profile_for_calculation.get('diseases')
user_update.diseases.set(profile_for_calculation.get('diseases'))
user_update.duration_aero_tr = profile_for_calculation.get('duration_aero_tr')
user_update.duration_weight_tr = profile_for_calculation.get('duration_weight_tr')
user_update.prot_amount_hope = profile_for_calculation.get('prot_amount_hope')
user_update.budget = profile_for_calculation.get('budget')
user_update.user_preference1 = profile_for_calculation.get('preference1')
user_update.user_preference2 = profile_for_calculation.get('preference2')
user_update.user_not_like = profile_for_calculation.get('user_not_like')
user_update.add_calories = profile_for_calculation.get('add_calories')
user_update.save()
if current_page == 'main':
return redirect(reverse('recc:main'))
elif current_page == 'mypage':
return redirect(reverse('users:mypage'))
else:
return redirect(reverse('recc:main'))
else:
return redirect(reverse('users:loggingin'))
# 버튼 클릭시 새로운 페이지를 보여주지는 않지만 식품추천 실행할 함수
# recc실행할 profile 받기 -> profile로 recc실행 -> recc_result콘솔출력 -> Db저장
# !!!!!!!!!!!!!!!!!!! request.user.is_athenticated가 왜 안되는지 모르겠다....
def recc_exe(request):
if request.user.is_authenticated:
if request.is_ajax():
# load_profile_for_recc에서 serializer data로 받은 것 ->
# cal_result, user_budget, user_preference -> 딕셔너리 형태로!
manual_recc_data = {
'cur_cal':float(request.GET.get('cur_cal')),
'cur_carb':float(request.GET.get('cur_carb')),
'cur_prot':float(request.GET.get('cur_prot')),
'cur_fat':float(request.GET.get('cur_fat')),
'cur_sugar':float(request.GET.get('cur_sugar')),
'cur_sodium':float(request.GET.get('cur_sodium')),
'cur_price':int(request.GET.get('cur_price'))
}
info_for_recc = load_user_db.load_profile_for_recc(request)
####################### manual_data 넘기기 추가
recommendation = recommendations.Recommendation(info_for_recc, manual_recc_data)
recc_result = recommendation.recommend_menu()
# print("recc_result:", recc_result)
if recc_result != None:
message = "식단 추천 완료"
else:
message = "만족하는 식단이 없습니다. 이미 식품을 많이 추가하신 건 아닌가요?!?!"
# 기존 recc_result db저장하던 것 일단 삭제
# !!!!!!!!!!!!!!!!잠시!!!!!!!!!!!!!!!!!
# recommendation_result = recommendation.recommend_food()
# # print(recommendation_result)
# if recommendation_result != None:
# # 이제 db저장은 사실 필요없기는 한데 확인용으로 넣어놓을 것.
# save_user_db.save_recommendation_result(request, recommendation_result)
# message = "식단 추천 완료"
# # time.sleep(1)
# # print(recommendation_result)
# # 추천받은 식품의 likes 정보 가져오기!! 애초에 food를 장고ORM으로 가져오면 또 안받아와도 되는데
# # 속도가 너무느려서 일단 psycopg로 빠르게 식품 db가져오고..
# # recommendation 후 like 정보 포함된 식품정보 다시 가져오기
# food_list = []
# for food in recommendation_result:
# food_list.append(food[0])
# foods = Foods.objects.filter(pk__in=food_list)
# foods_data = FoodsSerializer(foods, many=True).data
# elif recommendation_result == None:
# foods_data = None
# message = '만족하는 식단이 없습니다. 이미 식품을 많이 추가하신 건 아닌가요?!?!'
# context = {
# 'recc_result':foods_data,
# 'recc_message':message
# }
context = {
'recc_result':recc_result,
'recc_message':message
}
return HttpResponse(json.dumps(context), content_type='application/json')
else:
return redirect(reverse('users:loggingin'))
# 좋아요 기능!
def likes(request):
if request.is_ajax():
food_id = request.GET.get('food_id')
food = Foods.objects.get(pk=food_id) ####(id=product_id??)
current_user = request.user
if request.user.is_authenticated:
if food.likes.filter(id=current_user.id).exists():
# 여기서 id는 사용자 id인지... postgresql에서 보면 user_id로 되어있는데
food.likes.remove(current_user)
message = "좋아요 취소"
else:
food.likes.add(current_user)
message = "좋아요"
context = {
'likes_count':food.likes.count(),
'message':message,
}
# dictionary를 json형태로 만들어서 비동기로(httpresponse)넘겨줄 것
return HttpResponse(json.dumps(context), content_type='application/json')
def list_foods(request):
if request.is_ajax():
category = request.GET.get('category')
# db 접근해서 list_foods 로직 처리
# print("category:", category)
subcategory = request.GET.get('subcategory')
option = {
'nutr_targeting_check':request.GET.get('nutr_targeting_check'),
'random_foods_check':request.GET.get('random_foods_check'),
'cal_target':request.GET.get('cal_target'),
'carb_target':request.GET.get('carb_target'),
'prot_target':request.GET.get('prot_target'),
'fat_target':request.GET.get('fat_target'),
'cal_tot':request.GET.get('cal_tot'),
'carb_tot':request.GET.get('carb_tot'),
'prot_tot':request.GET.get('prot_tot'),
'fat_tot':request.GET.get('fat_tot'),
}
# print('target:', target)
foods = load_foods(category, subcategory, option)
if not foods:
message = '목표섭취량을 만족하는 식품이 없습니다.'
else:
message = '해당 카테고리의 식품을 추천해드렸습니다!'
context = {
'foods':foods,
'message':message
}
return HttpResponse(json.dumps(context), content_type='application/json')
def list_liked_foods(request):
if request.is_ajax():
liked_foods_data = load_user_db.load_liked_foods_data(request)
message = '고객님이 좋아하는 식품 리스트 입니다~!'
if not liked_foods_data:
message = '고객님이 좋아하는 식품이 아직 없습니다~!'
context = {
'foods':liked_foods_data,
'message':message
}
return HttpResponse(json.dumps(context), content_type='application/json')
def list_saved_menu(request):
if request.is_ajax():
saved_menu_list, save_time_list = load_user_db.load_saved_menu_data(request)
message = '고객님이 저장한 식단 리스트 입니다~!'
if not saved_menu_list:
message = '저장된 식단이 아직 없습니다~!'
context = {
'menu':saved_menu_list,
'time':save_time_list,
'message':message
}
return HttpResponse(json.dumps(context), content_type='application/json')
def list_searched_foods(request):
if request.is_ajax():
search_type = request.GET.get('search_type')
query = request.GET.get('query')
current_page = int(request.GET.get('current_page'))
# print(search_type, query)
# pagination 처리 일단 지움
# 1p -> 0~9 => [0:10] / 2p -> 10~19 [10:20] /
# num_per_page = 10
# start = (current_page-1) * num_per_page
# end = start + num_per_page
if search_type == '전체':
foods = Foods.objects.prefetch_related('likes').filter(
Q(product_name__icontains=query) | Q(company__icontains=query)
)
foods_count = foods.count()
if foods_count >= 100:
foods = foods[:100]
# foods_per_page = foods[start:end]
elif search_type == '식품명':
foods = Foods.objects.prefetch_related('likes').filter(product_name__icontains=query)
foods_count = foods.count()
if foods_count >= 100:
foods = foods[:100]
# foods_per_page = foods[start:end]
elif search_type == '회사명':
foods = Foods.objects.prefetch_related('likes').filter(company__icontains=query)
foods_count = foods.count()
if foods_count >= 100:
foods = foods[:100]
# foods_per_page = foods[start:end]
# pages = (foods_count // num_per_page) + 1
if not foods:
message = f'"{query}"로 검색하신 결과가 없습니다ㅠㅠ'
foods_data = None
elif foods_count >= 100:
message = f'{query}로 검색하신 결과가 100개 이상입니다. 검색어를 조금 더 자세히 적어주세요~!'
else:
message = f'"{query}" 총 {foods_count}개의 검색결과가 있습니다~!'
# 기존엔 pagination 직접 처리하려고 했는데.. 그냥 datatable이용!!
# foods_data = FoodsSerializer(foods_per_page, many=True).data
foods_data = FoodsSerializer(foods, many=True).data
context = {
'foods_count':foods_count,
# 'pages':pages,
'foods':foods_data,
'message':message,
}
return JsonResponse(context)
def save_menu(request):
if request.user.is_authenticated:
menu_list = json.loads(request.GET.get('menu_list'))
if not menu_list: # 빈배열이면... == None으로 안됨.
message = '장바구니에 담긴 식품이 없습니다~ 식품을 먼저 선택해주세요~'
else:
message = '식단이 저장되었습니다.'
### db 처리만 하면 된다!!!!!!
currnet_user = get_object_or_404(User, pk=request.user.id)
menu_saved = Menu_saved(
user = currnet_user,
menu = menu_list
)
menu_saved.save()
context = {'message':message}
return HttpResponse(json.dumps(context), content_type='application/json')
else:
return redirect(reverse('users:loggingin'))
def subscription(request):
if request.user.is_authenticated:
current_user_social_platform = load_user_db.load_user_social_platform(request)
current_user = get_object_or_404(User, pk=request.user.id)
current_user_applied_data = Subscription_apply.objects.filter(user=current_user).exists()
if request.method == "GET":
return render(
request,
"recc/subscription.html",
{
'nick_name':current_user_social_platform.get('nick_name'),
'social_platform':current_user_social_platform.get('social_platform'),
"applied_data":current_user_applied_data,
}
)
else:
return redirect(reverse('users:loggingin'))
def apply_subscription(request):
if request.user.is_authenticated:
current_user_social_platform | |
or quadratic. In general it sends M_k(chi) to M_k(1/chi)
"""
return self.modular_symbols(sign=1).atkin_lehner_operator(d).matrix()[0,0]
def twist(self, chi, level=None, check=True):
r"""
Return the twist of the newform ``self`` by the Dirichlet
character ``chi``.
If ``self`` is a newform `f` with character `\epsilon` and
`q`-expansion
.. math::
f(q) = \sum_{n=1}^\infty a_n q^n,
then the twist by `\chi` is the unique newform `f\otimes\chi`
with character `\epsilon\chi^2` and `q`-expansion
.. math::
(f\otimes\chi)(q) = \sum_{n=1}^\infty b_n q^n
satisfying `b_n = \chi(n) a_n` for all but finitely many `n`.
INPUT:
- ``chi`` -- a Dirichlet character. Note that Sage must be able to
determine a common base field into which both the Hecke eigenvalue
field of self, and the field of values of ``chi``, can be embedded.
- ``level`` -- (optional) the level `N` of the twisted form.
By default, the algorithm tries to compute `N` using
[Atkin-Li]_, Theorem 3.1.
- ``check`` -- (optional) boolean; if ``True`` (default), ensure that
the space of modular symbols that is computed is genuinely simple and
new. This makes it less likely that a wrong result is returned if an
incorrect ``level`` is specified.
OUTPUT:
The form `f\otimes\chi` as an element of the set of newforms
for `\Gamma_1(N)` with character `\epsilon\chi^2`.
EXAMPLES::
sage: G = DirichletGroup(3, base_ring=QQ)
sage: Delta = Newforms(SL2Z, 12)[0]; Delta
q - 24*q^2 + 252*q^3 - 1472*q^4 + 4830*q^5 + O(q^6)
sage: Delta.twist(G[0]) == Delta
True
sage: Delta.twist(G[1]) # long time (about 5 s)
q + 24*q^2 - 1472*q^4 - 4830*q^5 + O(q^6)
sage: M = CuspForms(Gamma1(13), 2)
sage: f = M.newforms('a')[0]; f
q + a0*q^2 + (-2*a0 - 4)*q^3 + (-a0 - 1)*q^4 + (2*a0 + 3)*q^5 + O(q^6)
sage: f.twist(G[1])
q - a0*q^2 + (-a0 - 1)*q^4 + (-2*a0 - 3)*q^5 + O(q^6)
sage: f = Newforms(Gamma1(30), 2, names='a')[1]; f
q + a1*q^2 - a1*q^3 - q^4 + (a1 - 2)*q^5 + O(q^6)
sage: f.twist(f.character())
Traceback (most recent call last):
...
NotImplementedError: cannot calculate 5-primary part of the level of the twist of q + a1*q^2 - a1*q^3 - q^4 + (a1 - 2)*q^5 + O(q^6) by Dirichlet character modulo 5 of conductor 5 mapping 2 |--> -1
sage: f.twist(f.character(), level=30)
q - a1*q^2 + a1*q^3 - q^4 + (-a1 - 2)*q^5 + O(q^6)
TESTS:
We test that feeding inappropriate values of the ``level`` parameter is handled gracefully::
sage: chi = DirichletGroup(1)[0]
sage: Delta.twist(chi, level=3)
Traceback (most recent call last):
...
ValueError: twist of q - 24*q^2 + 252*q^3 - 1472*q^4 + 4830*q^5 + O(q^6) by Dirichlet character modulo 1 of conductor 1 is not a newform of level 3
Twisting and twisting back works::
sage: f = Newforms(11)[0]
sage: chi = DirichletGroup(5).0
sage: f.twist(chi).twist(~chi, level=11) == f
True
AUTHORS:
- <NAME> (April 2015)
"""
from sage.modular.all import CuspForms
coercion_model = get_coercion_model()
R = coercion_model.common_parent(self.base_ring(), chi.base_ring())
N = self.level()
epsilon = self.character()
chi = chi.primitive_character()
if level is None:
N_epsilon = epsilon.conductor()
N_chi = chi.conductor()
G = DirichletGroup(N_epsilon.lcm(N_chi), base_ring=R)
epsilon_chi = G(epsilon) * G(chi)
N_epsilon_chi = epsilon_chi.conductor()
for q in N_chi.prime_divisors():
# See [Atkin-Li], Theorem 3.1.
alpha = N_epsilon.valuation(q)
beta = N_chi.valuation(q)
gamma = N.valuation(q)
delta = max(alpha + beta, 2*beta, gamma)
if delta == gamma and max(alpha + beta, 2*beta) < gamma:
continue
if delta > gamma and N_epsilon_chi.valuation(q) == max(alpha, beta):
continue
raise NotImplementedError('cannot calculate %s-primary part of the level of the twist of %s by %s'
% (q, self, chi))
level = lcm([N, N_epsilon * N_chi, N_chi**2])
# determine the character of the twisted form
G = DirichletGroup(lcm([N, chi.modulus(), level]), base_ring=R)
eps_new = (G(epsilon) * G(chi)**2).restrict(level)
# create an ambient space
D = ModularSymbols(eps_new, self.weight(), base_ring=R, sign=1).new_submodule()
S = CuspForms(eps_new, self.weight(), base_ring=R)
# pull out the eigenspace
for p in prime_range(500):
if p.divides(N) or p.divides(chi.level()):
continue
D = (D.hecke_operator(p) - self[p]*chi(p)).kernel()
if D.rank() == 1: break
if D.is_zero():
raise ValueError('twist of %s by %s is not a newform of level %s' % (self, chi, level))
else:
raise RuntimeError('unable to identify modular symbols for twist of %s by %s' % (self, chi))
return Newform(S, D, names='_', check=check)
class ModularFormElement(ModularForm_abstract, element.HeckeModuleElement):
def __init__(self, parent, x, check=True):
r"""
An element of a space of modular forms.
INPUT:
- ``parent`` - ModularForms (an ambient space of modular forms)
- ``x`` - a vector on the basis for parent
- ``check`` - if check is ``True``, check the types of the
inputs.
OUTPUT:
- ``ModularFormElement`` - a modular form
EXAMPLES::
sage: M = ModularForms(Gamma0(11),2)
sage: f = M.0
sage: f.parent()
Modular Forms space of dimension 2 for Congruence Subgroup Gamma0(11) of weight 2 over Rational Field
"""
if not isinstance(parent, space.ModularFormsSpace):
raise TypeError("First argument must be an ambient space of modular forms.")
element.HeckeModuleElement.__init__(self, parent, x)
def _compute_q_expansion(self, prec):
"""
Computes the q-expansion of self to precision prec.
EXAMPLES::
sage: f = EllipticCurve('37a').modular_form()
sage: f.q_expansion() ## indirect doctest
q - 2*q^2 - 3*q^3 + 2*q^4 - 2*q^5 + O(q^6)
sage: f._compute_q_expansion(10)
q - 2*q^2 - 3*q^3 + 2*q^4 - 2*q^5 + 6*q^6 - q^7 + 6*q^9 + O(q^10)
"""
return self.parent()._q_expansion(element = self.element(), prec=prec)
def _add_(self, other):
"""
Add self to other.
EXAMPLES::
sage: f = ModularForms(DirichletGroup(17).0^2,2).2
sage: g = ModularForms(DirichletGroup(17).0^2,2).1
sage: f
q + (-zeta8^2 + 2)*q^2 + (zeta8 + 3)*q^3 + (-2*zeta8^2 + 3)*q^4 + (-zeta8 + 5)*q^5 + O(q^6)
sage: g
1 + (-14/73*zeta8^3 + 57/73*zeta8^2 + 13/73*zeta8 - 6/73)*q^2 + (-90/73*zeta8^3 + 64/73*zeta8^2 - 52/73*zeta8 + 24/73)*q^3 + (-81/73*zeta8^3 + 189/73*zeta8^2 - 3/73*zeta8 + 153/73)*q^4 + (72/73*zeta8^3 + 124/73*zeta8^2 + 100/73*zeta8 + 156/73)*q^5 + O(q^6)
sage: f+g ## indirect doctest
1 + q + (-14/73*zeta8^3 - 16/73*zeta8^2 + 13/73*zeta8 + 140/73)*q^2 + (-90/73*zeta8^3 + 64/73*zeta8^2 + 21/73*zeta8 + 243/73)*q^3 + (-81/73*zeta8^3 + 43/73*zeta8^2 - 3/73*zeta8 + 372/73)*q^4 + (72/73*zeta8^3 + 124/73*zeta8^2 + 27/73*zeta8 + 521/73)*q^5 + O(q^6)
"""
return ModularFormElement(self.parent(), self.element() + other.element())
def __mul__(self, other):
r"""
Calculate the product self * other.
This tries to determine the
characters of self and other, in order to avoid having to compute a
(potentially very large) Gamma1 space. Note that this might lead to
a modular form that is defined with respect to a larger subgroup than
the factors are.
An example with character::
sage: f = ModularForms(DirichletGroup(3).0, 3).0
sage: f * f
1 + 108*q^2 + 144*q^3 + 2916*q^4 + 8640*q^5 + O(q^6)
sage: (f*f).parent()
Modular Forms space of dimension 3 for Congruence Subgroup Gamma0(3) of weight 6 over Rational Field
sage: (f*f*f).parent()
Modular Forms space of dimension 4, character [-1] and weight 9 over Rational Field
An example where the character is computed on-the-fly::
sage: f = ModularForms(Gamma1(3), 5).0
sage: f*f
1 - 180*q^2 - 480*q^3 + 8100*q^4 + 35712*q^5 + O(q^6)
sage: (f*f).parent()
Modular Forms space of dimension 4 for Congruence Subgroup Gamma0(3) of weight 10 over Rational Field
sage: f = ModularForms(Gamma1(3), 7).0
sage: f*f
q^2 - 54*q^4 + 128*q^5 + O(q^6)
sage: (f*f).parent()
Modular Forms space of dimension 5 for Congruence Subgroup Gamma0(3) of weight 14 over Rational Field
An example with no character::
sage: f = ModularForms(Gamma1(5), 2).0
sage: f*f
1 + 120*q^3 - 240*q^4 + 480*q^5 + O(q^6)
sage: (f*f).parent()
Modular Forms space of dimension 5 for Congruence Subgroup Gamma1(5) of weight 4 over Rational Field
TESTS:
This shows that the issue at :trac:`7548` is fixed::
sage: M = CuspForms(Gamma0(5*3^2), 2)
sage: f = M.basis()[0]
sage: 2*f
2*q - 2*q^4 + O(q^6)
sage: f*2
2*q - 2*q^4 + O(q^6)
"""
# boring case: scalar multiplication
if not isinstance(other, ModularFormElement):
return element.HeckeModuleElement.__mul__(self, other)
# first ensure the levels are equal
if self.level() != other.level():
raise NotImplementedError("Cannot multiply forms of different levels")
# find out about characters
try:
eps1 = self.character()
verbose("character of left is %s" % eps1)
eps2 = other.character()
verbose("character of right is %s" % eps2)
newchar = eps1 * eps2
verbose("character of product is %s" % newchar)
except (NotImplementedError, ValueError):
newchar | |
n) * s, s))
>>> node_groups = [ut.lmap(str, range(*o)) for o in ut.itertwo(offsets)]
>>> edge_groups = [ut.combinations(nodes, 2) for nodes in node_groups]
>>> graph = nx.Graph()
>>> [graph.add_nodes_from(nodes) for nodes in node_groups]
>>> [graph.add_edges_from(edges) for edges in edge_groups]
>>> for count, nodes in enumerate(node_groups):
... nx.set_node_attributes(graph, name='id', values=ut.dzip(nodes, [count]))
>>> layoutkw = dict(prog='neato')
>>> graph1, info1 = nx_agraph_layout(graph.copy(), inplace=True, groupby='id', **layoutkw)
>>> graph2, info2 = nx_agraph_layout(graph.copy(), inplace=True, **layoutkw)
>>> graph3, _ = nx_agraph_layout(graph1.copy(), inplace=True, **layoutkw)
>>> nx.set_node_attributes(graph1, name='pin', values='true')
>>> graph4, _ = nx_agraph_layout(graph1.copy(), inplace=True, **layoutkw)
>>> if pt.show_was_requested():
>>> pt.show_nx(graph1, layout='custom', pnum=(2, 2, 1), fnum=1)
>>> pt.show_nx(graph2, layout='custom', pnum=(2, 2, 2), fnum=1)
>>> pt.show_nx(graph3, layout='custom', pnum=(2, 2, 3), fnum=1)
>>> pt.show_nx(graph4, layout='custom', pnum=(2, 2, 4), fnum=1)
>>> pt.show_if_requested()
>>> g1pos = nx.get_node_attributes(graph1, 'pos')['1']
>>> g4pos = nx.get_node_attributes(graph4, 'pos')['1']
>>> g2pos = nx.get_node_attributes(graph2, 'pos')['1']
>>> g3pos = nx.get_node_attributes(graph3, 'pos')['1']
>>> print('g1pos = {!r}'.format(g1pos))
>>> print('g4pos = {!r}'.format(g4pos))
>>> print('g2pos = {!r}'.format(g2pos))
>>> print('g3pos = {!r}'.format(g3pos))
>>> assert np.all(g1pos == g4pos), 'points between 1 and 4 were pinned so they should be equal'
>>> #assert np.all(g2pos != g3pos), 'points between 2 and 3 were not pinned, so they should be different'
assert np.all(nx.get_node_attributes(graph1, 'pos')['1'] == nx.get_node_attributes(graph4, 'pos')['1'])
assert np.all(nx.get_node_attributes(graph2, 'pos')['1'] == nx.get_node_attributes(graph3, 'pos')['1'])
"""
# import networkx as nx
import pygraphviz
# graph_ = get_explicit_graph(orig_graph).copy()
graph_ = get_explicit_graph(orig_graph)
# only_explicit = True
# if only_explicit:
num_nodes = len(graph_)
is_large = num_nodes > LARGE_GRAPH
# layoutkw = layoutkw.copy()
draw_implicit = layoutkw.pop('draw_implicit', True)
pinned_groups = False
if groupby is not None:
pinned_groups, graph_ = _groupby_prelayout(
graph_, layoutkw=layoutkw, groupby=groupby
)
prog = layoutkw.pop('prog', 'dot')
if prog != 'dot':
layoutkw['overlap'] = layoutkw.get('overlap', 'false')
layoutkw['splines'] = layoutkw.get('splines', 'spline')
if prog == 'neato':
layoutkw['notranslate'] = 'true' # for neato postprocessing
if True:
argparts = ['-G%s=%s' % (key, str(val)) for key, val in layoutkw.items()]
splines = layoutkw['splines']
else:
# layoutkw is allowed to overwrite graph.graph['graph']
args_kw = graph_.graph.get('graph', {}).copy()
for key, val in layoutkw.items():
if key in GRAPHVIZ_KEYS.G and val is not None:
if key not in args_kw:
args_kw[key] = val
# del args_kw['sep']
# del args_kw['nodesep']
# del args_kw['overlap']
# del args_kw['notranslate']
argparts = ['-G{}={}'.format(key, val) for key, val in args_kw.items()]
splines = args_kw['splines']
args = ' '.join(argparts)
if verbose is None:
verbose = ut.VERBOSE
if verbose or is_large:
logger.info('[nx_agraph_layout] args = %r' % (args,))
# Convert to agraph format
agraph = make_agraph(graph_)
# Run layout
# logger.info('prog = %r' % (prog,))
if verbose > 3:
logger.info('BEFORE LAYOUT\n' + str(agraph))
if is_large:
logger.info(
'Preforming agraph layout on graph with %d nodes.'
'May take time' % (num_nodes)
)
# import warnings
# warnings.filterwarnings("error")
# import warnings
# flag = False
# for node in graph_.nodes():
# anode = pygraphviz.Node(agraph, node)
# ptstr_ = anode.attr['pos']
# logger.info('ptstr_ = %r' % (ptstr_,))
# FIXME; This spits out warnings on weird color input
# import warnings
# with warnings.catch_warnings(record=True):
# # warnings.filterwarnings('error')
# warnings.filterwarnings('ignore')
try:
agraph.layout(prog=prog, args=args)
except Exception as ex:
ut.printex(ex, tb=True)
# import utool
# utool.embed()
raise
# except RuntimeWarning as ex:
# ut.printex(ex, iswarning=True)
# flag = True
# if flag:
# import utool
# utool.embed()
if is_large:
logger.info('Finished agraph layout.')
if 0:
test_fpath = ut.truepath('~/test_graphviz_draw.png')
agraph.draw(test_fpath)
ut.startfile(test_fpath)
if verbose > 3:
logger.info('AFTER LAYOUT\n' + str(agraph))
# TODO: just replace with a single dict of attributes
node_layout_attrs = ut.ddict(dict)
edge_layout_attrs = ut.ddict(dict)
# for node in agraph.nodes():
for node in graph_.nodes():
anode = pygraphviz.Node(agraph, node)
node_attrs = parse_anode_layout_attrs(anode)
for key, val in node_attrs.items():
node_layout_attrs[key][node] = val
edges = list(ut.nx_edges(graph_, keys=True))
for edge in edges:
aedge = pygraphviz.Edge(agraph, *edge)
edge_attrs = parse_aedge_layout_attrs(aedge)
for key, val in edge_attrs.items():
edge_layout_attrs[key][edge] = val
if draw_implicit:
# ADD IN IMPLICIT EDGES
layout_edges = set(ut.nx_edges(graph_, keys=True))
orig_edges = set(ut.nx_edges(orig_graph, keys=True))
implicit_edges = list(orig_edges - layout_edges)
# all_edges = list(set.union(orig_edges, layout_edges))
needs_implicit = len(implicit_edges) > 0
if needs_implicit:
# Pin down positions
for node in agraph.nodes():
anode = pygraphviz.Node(agraph, node)
anode.attr['pin'] = 'true'
anode.attr['pos'] += '!'
# Add new edges to route
for iedge in implicit_edges:
data = orig_graph.get_edge_data(*iedge)
agraph.add_edge(*iedge, **data)
if ut.VERBOSE or verbose:
logger.info('BEFORE IMPLICIT LAYOUT\n' + str(agraph))
# Route the implicit edges (must use neato)
control_node = pygraphviz.Node(agraph, node)
# logger.info('control_node = %r' % (control_node,))
node1_attr1 = parse_anode_layout_attrs(control_node)
# logger.info('node1_attr1 = %r' % (node1_attr1,))
implicit_kw = layoutkw.copy()
implicit_kw['overlap'] = 'true'
# del implicit_kw['overlap'] # can cause node positions to change
argparts = ['-G%s=%s' % (key, str(val)) for key, val in implicit_kw.items()]
args = ' '.join(argparts)
if is_large:
logger.info(
'[nx_agraph_layout] About to draw implicit layout ' 'for large graph.'
)
agraph.layout(prog='neato', args='-n ' + args)
if is_large:
logger.info(
'[nx_agraph_layout] done with implicit layout for ' 'large graph.'
)
if False:
agraph.draw(ut.truepath('~/implicit_test_graphviz_draw.png'))
if ut.VERBOSE or verbose:
logger.info('AFTER IMPLICIT LAYOUT\n' + str(agraph))
control_node = pygraphviz.Node(agraph, node)
# logger.info('control_node = %r' % (control_node,))
node1_attr2 = parse_anode_layout_attrs(control_node)
# logger.info('node1_attr2 = %r' % (node1_attr2,))
# graph positions shifted
# This is not the right place to divide by 72
translation = node1_attr1['pos'] - node1_attr2['pos']
# logger.info('translation = %r' % (translation,))
# translation = np.array([0, 0])
# logger.info('translation = %r' % (translation,))
# for iedge in all_edges:
for iedge in implicit_edges:
aedge = pygraphviz.Edge(agraph, *iedge)
iedge_attrs = parse_aedge_layout_attrs(aedge, translation)
for key, val in iedge_attrs.items():
edge_layout_attrs[key][iedge] = val
if pinned_groups:
# Remove temporary pins put in place by groups
ut.nx_delete_node_attr(graph_, 'pin')
graph_layout_attrs = dict(splines=splines)
layout_info = {
'graph': graph_layout_attrs,
'edge': dict(edge_layout_attrs),
'node': dict(node_layout_attrs),
}
if inplace:
apply_graph_layout_attrs(orig_graph, layout_info)
graph = orig_graph
else:
# FIXME: there is really no point to returning graph unless we actually
# modify its attributes
graph = graph_
if return_agraph:
return graph, layout_info, agraph
else:
return graph, layout_info
def parse_point(ptstr):
try:
xx, yy = ptstr.strip('!').split(',')
xy = np.array((float(xx), float(yy)))
except Exception:
xy = None
return xy
def parse_anode_layout_attrs(anode):
node_attrs = {}
# try:
xx, yy = anode.attr['pos'].strip('!').split(',')
xy = np.array((float(xx), float(yy)))
# except Exception:
# xy = np.array((0.0, 0.0))
adpi = 72.0
width = float(anode.attr['width']) * adpi
height = float(anode.attr['height']) * adpi
node_attrs['width'] = width
node_attrs['height'] = height
node_attrs['size'] = (width, height)
node_attrs['pos'] = xy
return node_attrs
def parse_aedge_layout_attrs(aedge, translation=None):
"""
parse grpahviz splineType
"""
if translation is None:
translation = np.array([0, 0])
edge_attrs = {}
apos = aedge.attr['pos']
# logger.info('apos = %r' % (apos,))
end_pt = None
start_pt = None
# if '-' in apos:
# import utool
# utool.embed()
def safeadd(x, y):
if x is None or y is None:
return None
return x + y
strpos_list = apos.split(' ')
strtup_list = [ea.split(',') for ea in strpos_list]
ctrl_ptstrs = [ea for ea in strtup_list if ea[0] not in 'es']
end_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 'e']
start_ptstrs = [ea[1:] for ea in strtup_list[0:2] if ea[0] == 's']
assert len(end_ptstrs) <= 1
assert len(start_ptstrs) <= 1
if len(end_ptstrs) == 1:
end_pt = np.array([float(f) for f in end_ptstrs[0]])
if len(start_ptstrs) == 1:
start_pt = np.array([float(f) for f in start_ptstrs[0]])
ctrl_pts = np.array([tuple([float(f) for f in ea]) for ea in ctrl_ptstrs])
adata = aedge.attr
ctrl_pts = ctrl_pts
edge_attrs['pos'] = apos
edge_attrs['ctrl_pts'] = safeadd(ctrl_pts, translation)
edge_attrs['start_pt'] = safeadd(start_pt, translation)
edge_attrs['end_pt'] = safeadd(end_pt, translation)
edge_attrs['lp'] = safeadd(parse_point(adata.get('lp', None)), translation)
edge_attrs['label'] = adata.get('label', None)
edge_attrs['headlabel'] = adata.get('headlabel', None)
edge_attrs['taillabel'] = adata.get('taillabel', None)
edge_attrs['head_lp'] = safeadd(parse_point(adata.get('head_lp', None)), translation)
edge_attrs['tail_lp'] = safeadd(parse_point(adata.get('tail_lp', None)), translation)
return edge_attrs
def format_anode_pos(xy, pin=True):
xx, yy = xy
return '%f,%f%s' % (xx, yy, '!' * pin)
def _get_node_size(graph, node, node_size):
if node_size is not None and node in node_size:
return node_size[node]
node_dict = ut.nx_node_dict(graph)
nattrs = node_dict[node]
scale = nattrs.get('scale', 1.0)
if 'width' in nattrs and 'height' in nattrs:
width = nattrs['width'] * scale
height = nattrs['height'] * scale
elif 'radius' in nattrs:
width = height = nattrs['radius'] * scale
else:
if 'image' in nattrs:
img_fpath = nattrs['image']
import vtool as vt
width, height = vt.image.open_image_size(img_fpath)
else:
height = width = 1100 / 50 * scale
return width, height
@profile
def draw_network2(
graph,
layout_info,
ax,
as_directed=None,
hacknoedge=False,
hacknode=False,
verbose=None,
**kwargs
):
| |
<filename>src/MSI/optimization/opt_runner.py
import MSI.simulations as sim
import re
import MSI.cti_core.cti_processor as pr
import MSI.optimization.matrix_loader as ml
import MSI.simulations.absorbance.curve_superimpose as csp
import MSI.simulations.yaml_parser as yp
import MSI.simulations.instruments.shock_tube as st
import MSI.simulations.instruments.jsr_steadystate as jsr
import MSI.simulations.instruments.flames as fl
import MSI.simulations.instruments.ignition_delay as ig
import MSI.simulations.instruments.flow_reactor as fr
import pandas as pd
import numpy as np
import os
#acts as front end to the rest of the system
# takes the data from one experiment and puts it in a dict of dicts
# that follows the format of the S matrix
# only need the last 3 elements of the interpolated absorbance
# absorbance is of form [interp_original,interp_abs_kinetic_sens,interp_abs_phys_sens,interp_abs_coef_sens]
# where each list element is a dict. keys are wavelengths, values are the sensitivities for that wavelength
# psens should match interpolated_tp and species sens in size, but again is dict with wavelength keys
# index from 1, so if you have 3 experiments, their indices will be 1,2,3
class Optimization_Utility(object):
def __init__(self,yaml_list=None):
self.matrix = None
self.yaml_list=yaml_list
def build_single_exp_dict(self,exp_index:int,
simulation,
interpolated_kinetic_sens:dict,
interpolated_tp_sens:list,
interpolated_species_sens:list,
interpolated_absorbance:list=[],
experimental_data:list =[],
absorbance_experimental_data:list=[],
time_history_interpolated_against_absorbance_experiment:dict={},
absorbance_calculated_from_model=None,
yaml_dict:dict={},
interpolated_time_shift_sens=None,
interpolated_abs_time_shift = None):
exp_dict = {}
exp_dict['index'] = exp_index
exp_dict['simulation'] = simulation
#print(yaml_dict,'wtf is going on')
#print('HELLO I AM TRAPPED IN A COMPUTER')
if interpolated_kinetic_sens==None:
exp_dict['ksens'] = None
exp_dict['temperature'] = None
exp_dict['pressure'] = None
exp_dict['species'] = None
else:
#print('I LIKE POTATOES')
exp_dict['ksens'] = interpolated_kinetic_sens
exp_dict['temperature'] = interpolated_tp_sens[0]
exp_dict['pressure'] = interpolated_tp_sens[1]
exp_dict['species'] = interpolated_species_sens
exp_dict['simulation_type'] = simulation.fullParsedYamlFile['simulationType']
exp_dict['experiment_type'] = simulation.fullParsedYamlFile['experimentType']
exp_dict['observables'] = simulation.observables
#needs to be in the order of mole fraction csv files + concentration csv files
exp_dict['experimental_data'] = experimental_data
# start here
#print(exp_dict['simulation_type'])
if re.match('[Ss]pecies[- ][Pp]rofile',simulation.fullParsedYamlFile['experimentType']) and re.match('[Ss]hock [Tt]ube',simulation.fullParsedYamlFile['simulationType']):
exp_dict['concentration_observables'] = simulation.concentrationObservables
exp_dict['mole_fraction_observables'] = simulation.moleFractionObservables
exp_dict['time_shift'] = interpolated_time_shift_sens
exp_dict['uncertainty'] = self.build_uncertainty_shock_tube_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['simulation_type'] = simulation.fullParsedYamlFile['simulationType']
exp_dict['flame_speed_observables']= [None]
exp_dict['ignition_delay_observables'] = [None]
#print('FUCK SPYDER')
#decide how we want to build uncertainty dict and if we want to pass in the parsed yaml file?
elif re.match('[Ii]gnition[- ][Dd]elay',simulation.fullParsedYamlFile['experimentType']) and re.match('[Ss]hock[- ][Tt]ube',simulation.fullParsedYamlFile['simulationType']):
exp_dict['time_shift'] = interpolated_time_shift_sens
exp_dict['uncertainty']= self.build_uncertainty_ignition_delay_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['flame_speed_observables']= [None]
exp_dict['concentration_observables'] = [None]
exp_dict['mole_fraction_observables'] = [None]
exp_dict['ignition_delay_observables'] = simulation.ignitionDelayObservables
exp_dict['conditions_dict_list'] = simulation.fullParsedYamlFile['conditions_dict_list']
exp_dict['conditions_to_run']=simulation.fullParsedYamlFile['conditions_to_run']
elif re.match('[Jj][Ss][Rr]',yaml_dict['simulationType']) or re.match('[Jj]et[- ][Ss]tirred[- ][Rr]eactor',yaml_dict['simulationType']):
exp_dict['concentration_observables'] = simulation.concentrationObservables
exp_dict['mole_fraction_observables'] = simulation.moleFractionObservables
exp_dict['restime_sens']=interpolated_tp_sens[2]
exp_dict['volume']=yaml_dict['volume']
exp_dict['residence_time']=yaml_dict['residence_time']
exp_dict['uncertainty']=self.build_uncertainty_jsr_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['simulation_type'] = yaml_dict['simulationType']
exp_dict['flame_speed_observables']= [None]
exp_dict['ignition_delay_observables'] = [None]
elif re.match('[Ff]lame[ -][Ss]peed',yaml_dict['simulationType']) and re.match('[Oo][Nn][Ee]|[1][ -][dD][ -][Ff]lame',yaml_dict['experimentType']):
exp_dict['flame_speed_observables']= simulation.flameSpeedObservables
exp_dict['concentration_observables'] = [None]
exp_dict['mole_fraction_observables'] = [None]
exp_dict['uncertainty']=self.build_uncertainty_flame_speed_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['ignition_delay_observables'] = [None]
elif re.match('[Ss]pecies[- ][Pp]rofile',simulation.fullParsedYamlFile['experimentType']) and re.match('[Ff]low[ -][Rr]eactor',simulation.fullParsedYamlFile['simulationType']):
exp_dict['concentration_observables'] = simulation.concentrationObservables
exp_dict['mole_fraction_observables'] = simulation.moleFractionObservables
exp_dict['time_shift'] = interpolated_time_shift_sens
exp_dict['uncertainty'] = self.build_uncertainty_flow_reactor_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['simulation_type'] = simulation.fullParsedYamlFile['simulationType']
exp_dict['flame_speed_observables']= [None]
exp_dict['ignition_delay_observables'] = [None]
elif re.match('[Ss]pecies[- ][Pp]rofile',simulation.fullParsedYamlFile['experimentType']) and re.match('[Vv]ariable[ -][Pp]ressure[ -][Ss]hock [- ][Tt]ube',simulation.fullParsedYamlFile['simulationType']):
exp_dict['concentration_observables'] = simulation.concentrationObservables
exp_dict['mole_fraction_observables'] = simulation.moleFractionObservables
exp_dict['time_shift'] = interpolated_time_shift_sens
exp_dict['uncertainty'] = self.build_uncertainty_shock_tube_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['simulation_type'] = simulation.fullParsedYamlFile['simulationType']
exp_dict['flame_speed_observables']= [None]
exp_dict['ignition_delay_observables'] = [None]
elif re.match('[Ii]gnition[- ][Dd]elay',simulation.fullParsedYamlFile['experimentType']) and re.match('[Rr][Cc][Mm]',simulation.fullParsedYamlFile['simulationType']):
exp_dict['time_shift'] = interpolated_time_shift_sens
exp_dict['uncertainty']= self.build_uncertainty_ignition_delay_dict(exp_dict['simulation'].fullParsedYamlFile)
exp_dict['flame_speed_observables']= [None]
exp_dict['concentration_observables'] = [None]
exp_dict['mole_fraction_observables'] = [None]
exp_dict['ignition_delay_observables'] = simulation.ignitionDelayObservables
exp_dict['conditions_dict_list'] = simulation.fullParsedYamlFile['conditions_dict_list']
exp_dict['conditions_to_run']=simulation.fullParsedYamlFile['conditions_to_run']
if len(interpolated_absorbance) != 0:
exp_dict['absorbance_model_data'] = interpolated_absorbance[0]
exp_dict['absorbance_ksens'] = interpolated_absorbance[1]
exp_dict['absorbance_psens'] = interpolated_absorbance[2]
exp_dict['absorbance_time_shift'] = interpolated_abs_time_shift
exp_dict['perturbed_coef'] = interpolated_absorbance[3]
exp_dict['absorbance_observables'] = simulation.absorbanceObservables
exp_dict['absorbance_experimental_data'] = absorbance_experimental_data
exp_dict['absorbance_calculated_from_model'] = absorbance_calculated_from_model
exp_dict['time_history_interpolated_against_abs'] = time_history_interpolated_against_absorbance_experiment
return exp_dict
def load_exp_from_file(self,yaml_exp_file_list = []):
for file in yaml_exp_file_list:
continue
def build_uncertainty_flame_speed_dict(self,experiment_dictionary:dict={}):
uncertainty_dict={}
uncertainty_dict['temperature_relative_uncertainty'] = experiment_dictionary['inletTemperatureRelativeUncertainty']
uncertainty_dict['pressure_relative_uncertainty'] = experiment_dictionary['pressureRelativeUncertainty']
uncertainty_dict['species_relative_uncertainty'] = {'dictonary_of_values':experiment_dictionary['relativeUncertaintyBySpecies'],
'species':experiment_dictionary['species'], 'type_dict':experiment_dictionary['typeDict']}
uncertainty_dict['flame_speed_relative_uncertainty'] = experiment_dictionary['flameSpeedRelativeUncertainity']
uncertainty_dict['flame_speed_absolute_uncertainty'] = experiment_dictionary['flameSpeedAbsoluteUncertainty']
return uncertainty_dict
def build_uncertainty_ignition_delay_dict(self,experiment_dictionary:dict={}):
uncertainty_dict={}
uncertainty_dict['temperature_relative_uncertainty'] = experiment_dictionary['tempRelativeUncertainty']
uncertainty_dict['pressure_relative_uncertainty'] = experiment_dictionary['pressureRelativeUncertainty']
uncertainty_dict['species_relative_uncertainty'] = {'dictonary_of_values':experiment_dictionary['relativeUncertaintyBySpecies'],
'type_dict':experiment_dictionary['typeToSpeciesDict']}
uncertainty_dict['ignition_delay_relative_uncertainty'] = experiment_dictionary['ignitionDelayRelativeUncertainty']
uncertainty_dict['ignition_delay_absolute_uncertainty'] = experiment_dictionary['ignitionDelayAbsoluteUncertainty']
uncertainty_dict['time_shift_absolute_uncertainty'] = experiment_dictionary['timeShiftUncertainty']
return uncertainty_dict
def build_uncertainty_jsr_dict(self,experiment_dictionary:dict={}):
uncertainty_dict={}
#Don't worry about absorbance for now
uncertainty_dict['temperature_relative_uncertainty'] = experiment_dictionary['tempRelativeUncertainty']
uncertainty_dict['pressure_relative_uncertainty'] = experiment_dictionary['pressureRelativeUncertainty']
uncertainty_dict['species_relative_uncertainty'] = {'dictonary_of_values':experiment_dictionary['speciesUncertaintys'],
'species':experiment_dictionary['speciesNames']}
uncertainty_dict['restime_relative_uncertainty'] = experiment_dictionary['residenceTimeRelativeUncertainty']
uncertainty_dict['mole_fraction_relative_uncertainty'] = experiment_dictionary['moleFractionRelativeUncertainty']
uncertainty_dict['mole_fraction_absolute_uncertainty'] = experiment_dictionary['moleFractionAbsoluteUncertainty']
uncertainty_dict['concentration_relative_uncertainty'] = experiment_dictionary['concentrationRelativeUncertainty']
uncertainty_dict['concentration_absolute_uncertainty'] = experiment_dictionary['concentrationAbsoluteUncertainty']
return uncertainty_dict
def build_uncertainty_flow_reactor_dict(self,experiment_dictionary:dict={}):
uncertainty_dict={}
#Don't worry about absorbance for now
uncertainty_dict['temperature_relative_uncertainty'] = experiment_dictionary['tempRelativeUncertainty']
uncertainty_dict['pressure_relative_uncertainty'] = experiment_dictionary['pressureRelativeUncertainty']
uncertainty_dict['species_relative_uncertainty'] = {'dictonary_of_values':experiment_dictionary['speciesUncertaintys'],
'species':experiment_dictionary['speciesNames']}
uncertainty_dict['mole_fraction_relative_uncertainty'] = experiment_dictionary['moleFractionRelativeUncertainty']
uncertainty_dict['mole_fraction_absolute_uncertainty'] = experiment_dictionary['moleFractionAbsoluteUncertainty']
uncertainty_dict['concentration_relative_uncertainty'] = experiment_dictionary['concentrationRelativeUncertainity']
uncertainty_dict['concentration_absolute_uncertainty'] = experiment_dictionary['concentrationAbsoluteUncertainty']
uncertainty_dict['time_shift_uncertainty'] = experiment_dictionary['timeShiftUncertainty']
return uncertainty_dict
def build_uncertainty_shock_tube_dict(self,experiment_dictonarie:dict={}):
uncertainty_dict = {}
#need to make an exception to this if there is no absortpion in dict
if 'coupledCoefficients' in experiment_dictonarie.keys():
coupled_coefficients = experiment_dictonarie['coupledCoefficients']
coupled_coefficients = [item for sublist in coupled_coefficients for item in sublist]
uncertain_parameters_ones = experiment_dictonarie['uncertaintyParameterOnes']
uncertain_parameter_twos = experiment_dictonarie['uncertaintyParameterTwos']
zip_uncertain_paramters = list(zip(uncertain_parameters_ones,uncertain_parameter_twos))
dict_of_coupled_unc_and_param = dict(zip(coupled_coefficients,zip_uncertain_paramters))
uncertainty_dict['coupled_coef_and_uncertainty'] = dict_of_coupled_unc_and_param
uncertainty_dict['absorbance_relative_uncertainty'] = experiment_dictonarie['absorbanceRelativeUncertainty']
uncertainty_dict['absorbance_absolute_uncertainty'] = experiment_dictonarie['absorbanceAbsoluteUncertainty']
#finish making this dictonary
uncertainty_dict['temperature_relative_uncertainty'] = experiment_dictonarie['tempRelativeUncertainty']
uncertainty_dict['pressure_relative_uncertainty'] = experiment_dictonarie['pressureRelativeUncertainty']
uncertainty_dict['species_relative_uncertainty'] = {'dictonary_of_values':experiment_dictonarie['speciesUncertaintys'],
'species':experiment_dictonarie['speciesNames']}
uncertainty_dict['time_shift_absolute_uncertainty'] = experiment_dictonarie['timeShiftUncertainty']
uncertainty_dict['mole_fraction_relative_uncertainty'] = experiment_dictonarie['moleFractionRelativeUncertainty']
uncertainty_dict['mole_fraction_absolute_uncertainty'] = experiment_dictonarie['moleFractionAbsoluteUncertainty']
uncertainty_dict['concentration_relative_uncertainty'] = experiment_dictonarie['concentrationRelativeUncertainity']
uncertainty_dict['concentration_absolute_uncertainty'] = experiment_dictonarie['concentrationAbsoluteUncertainty']
return uncertainty_dict
def running_full_flame_speed(self,processor=None,
experiment_dictionary:dict={},
kineticSens = 1,
physicalSens =1,
dk =0.01,
exp_number = 1):
# flame_speed=fl.flamespeed_multi_condition(pressures:float,
# temperatures:float,
# observables:list,
# kineticSens:int,
# physicalSens:int,
# conditions:dict,
# thermalBoundary='Adiabatic',
# processor:ctp.Processor=None,
# save_physSensHistories=0,
# moleFractionObservables:list=[],
# absorbanceObservables:list=[],
# concentrationObservables:list=[],
# fullParsedYamlFile:dict={},
# flame_width:float=1.0,
# save_timeHistories:int=0,
# T_profile=pd.DataFrame(columns=['z','T']),
# soret=True,
# tol_ss=[1.0e-5, 1.0e-13],
# tol_ts=[1.0e-4, 1.0e-10],
# loglevel=1,
# flametype='Flame Speed',
# cti_path="")
experiment = 'not yet installed'
return experiment
def running_ignition_delay(self,processor=None,
experiment_dictionary:dict={},
kineticSens=1,
physicalSens=1,
dk=0.01,
exp_number=1):
if 'volumeTraceCsvList' in experiment_dictionary.keys():
ig_delay=ig.ignition_delay_wrapper(pressures=experiment_dictionary['pressures'],
temperatures=experiment_dictionary['temperatures'],
observables=experiment_dictionary['observables'],
kineticSens=kineticSens,
physicalSens=physicalSens,
conditions=experiment_dictionary['conditions_to_run'],
thermalBoundary=experiment_dictionary['thermalBoundary'],
mechanicalBoundary=experiment_dictionary['mechanicalBoundary'],
processor=processor,
cti_path="",
save_physSensHistories=1,
fullParsedYamlFile=experiment_dictionary,
save_timeHistories=1,
log_file=False,
log_name='log.txt',
timeshift=experiment_dictionary['time_shift'],
initialTime=experiment_dictionary['initialTime'],
finalTime=experiment_dictionary['finalTime'],
target=experiment_dictionary['target'],
target_type=experiment_dictionary['target_type'],
n_processors=2,
volumeTraceList = experiment_dictionary['volumeTraceCsvList'])
else:
ig_delay=ig.ignition_delay_wrapper(pressures=experiment_dictionary['pressures'],
temperatures=experiment_dictionary['temperatures'],
observables=experiment_dictionary['observables'],
kineticSens=kineticSens,
physicalSens=physicalSens,
conditions=experiment_dictionary['conditions_to_run'],
thermalBoundary=experiment_dictionary['thermalBoundary'],
mechanicalBoundary=experiment_dictionary['mechanicalBoundary'],
processor=processor,
cti_path="",
save_physSensHistories=1,
fullParsedYamlFile=experiment_dictionary,
save_timeHistories=1,
log_file=False,
log_name='log.txt',
timeshift=experiment_dictionary['time_shift'],
initialTime=experiment_dictionary['initialTime'],
finalTime=experiment_dictionary['finalTime'],
target=experiment_dictionary['target'],
target_type=experiment_dictionary['target_type'],
n_processors=2)
soln,ksen=ig_delay.run()
if 'volumeTraceCsvList' in experiment_dictionary.keys():
int_ksens_exp_mapped= ig_delay.map_and_interp_ksens()
tsoln=ig_delay.sensitivity_adjustment(temp_del = dk)
psoln=ig_delay.sensitivity_adjustment(pres_del = dk)
diluent=[]
if 'Diluent' in experiment_dictionary['typeToSpeciesDict'].keys() or 'diluent' in experiment_dictionary['typeToSpeciesDict'].keys():
diluent.append(experiment_dictionary['typeToSpeciesDict']['diluent'])
diluent=[item for sublist in diluent for item in sublist]
ssoln=ig_delay.species_adjustment(dk,diluents=diluent)
deltatsoln,deltatausens=ig_delay.calculate_time_shift_sens(soln['delay'].values,dtau=1e-8)
tsen=ig_delay.sensitivityCalculation(soln['delay'],tsoln['delay'])
psen=ig_delay.sensitivityCalculation(soln['delay'],psoln['delay'])
ssens=[]
# for j in range(len(experiment_dictionary['conditions_to_run'])):
for i in range(len(ssoln)):
ssens.append(ig_delay.sensitivityCalculation(soln['delay'],ssoln[i]['delay']))
species_length=len(set(experiment_dictionary['speciesNames']).difference(diluent))
list_of_ssens=[]
chunksize=int(len(ssens)/species_length)
#print(species_length,chunksize)
for i in range(species_length):
tempdata=[]
tempdata=pd.DataFrame(columns=['delay'])
tempdata['delay']=np.zeros(len(experiment_dictionary['conditions_to_run'])*len(experiment_dictionary['temperatures']))
for k in range(chunksize):
#print(ssens[i+int(k*(chunksize))]['delay'])
#print('Second array')
#print(np.array(tempdata['delay']))
tempdata['delay']=np.array(ssens[i+int(k*(chunksize))]['delay'])+np.array(tempdata['delay'])
#print(tempdata)
list_of_ssens.append(tempdata)
ssens=list_of_ssens
csv_paths = [x for x in experiment_dictionary['ignitionDelayCsvFiles'] if x is not None]
exp_data = ig_delay.importExperimentalData(csv_paths)
elif len(experiment_dictionary['temperatures']) == len(experiment_dictionary['pressures']) and len(experiment_dictionary['temperatures'])>1 and len(experiment_dictionary['pressures']) > 1 and 'volumeTraceCsvList' not in experiment_dictionary.keys():
int_ksens_exp_mapped= ig_delay.map_and_interp_ksens()
tsoln=ig_delay.sensitivity_adjustment(temp_del = dk)
psoln=ig_delay.sensitivity_adjustment(pres_del = dk)
diluent=[]
if 'Diluent' in experiment_dictionary['typeToSpeciesDict'].keys() or 'diluent' in experiment_dictionary['typeToSpeciesDict'].keys():
diluent.append(experiment_dictionary['typeToSpeciesDict']['diluent'])
diluent=[item for sublist in diluent for item in sublist]
ssoln=ig_delay.species_adjustment(dk,diluents=diluent)
deltatsoln,deltatausens=ig_delay.calculate_time_shift_sens(soln['delay'].values,dtau=1e-8)
tsen=ig_delay.sensitivityCalculation(soln['delay'],tsoln['delay'])
psen=ig_delay.sensitivityCalculation(soln['delay'],psoln['delay'])
ssens=[]
# for j in range(len(experiment_dictionary['conditions_to_run'])):
for i in range(len(ssoln)):
ssens.append(ig_delay.sensitivityCalculation(soln['delay'],ssoln[i]['delay']))
species_length=len(set(experiment_dictionary['speciesNames']).difference(diluent))
list_of_ssens=[]
chunksize=int(len(ssens)/species_length)
#print(species_length,chunksize)
for i in range(species_length):
tempdata=[]
tempdata=pd.DataFrame(columns=['delay'])
tempdata['delay']=np.zeros(len(experiment_dictionary['conditions_to_run'])*len(experiment_dictionary['temperatures']))
for k in range(chunksize):
#print(ssens[i+int(k*(chunksize))]['delay'])
#print('Second array')
#print(np.array(tempdata['delay']))
tempdata['delay']=np.array(ssens[i+int(k*(chunksize))]['delay'])+np.array(tempdata['delay'])
#print(tempdata)
list_of_ssens.append(tempdata)
ssens=list_of_ssens
csv_paths = [x for x in experiment_dictionary['ignitionDelayCsvFiles'] if x is not None]
exp_data = ig_delay.importExperimentalData(csv_paths)
else:
int_ksens_exp_mapped= ig_delay.map_and_interp_ksens()
tsoln=ig_delay.sensitivity_adjustment(temp_del = dk)
psoln=ig_delay.sensitivity_adjustment(pres_del = dk)
diluent=[]
if 'Diluent' in experiment_dictionary['typeToSpeciesDict'].keys() or 'diluent' in experiment_dictionary['typeToSpeciesDict'].keys():
diluent.append(experiment_dictionary['typeToSpeciesDict']['diluent'])
diluent=[item for sublist in diluent for item in sublist]
ssoln=ig_delay.species_adjustment(dk,diluents=diluent)
deltatsoln,deltatausens=ig_delay.calculate_time_shift_sens(soln['delay'].values,dtau=1e-8)
tsen=ig_delay.sensitivityCalculation(soln['delay'],tsoln['delay'])
psen=ig_delay.sensitivityCalculation(soln['delay'],psoln['delay'])
ssens=[]
# for j in range(len(experiment_dictionary['conditions_to_run'])):
for i in range(len(ssoln)):
ssens.append(ig_delay.sensitivityCalculation(soln['delay'],ssoln[i]['delay']))
species_length=len(set(experiment_dictionary['speciesNames']).difference(diluent))
list_of_ssens=[]
chunksize=int(len(ssens)/species_length)
#print(species_length,chunksize)
for i in range(species_length):
tempdata=[]
tempdata=pd.DataFrame(columns=['delay'])
tempdata['delay']=np.zeros(len(experiment_dictionary['conditions_to_run'])*len(experiment_dictionary['temperatures'])*len(experiment_dictionary['pressures']))
for k in range(chunksize):
#print(ssens[i+int(k*(chunksize))]['delay'])
#print('Second array')
#print(np.array(tempdata['delay']))
tempdata['delay']=np.array(ssens[i+int(k*(chunksize))]['delay'])+np.array(tempdata['delay'])
#print(tempdata)
list_of_ssens.append(tempdata)
ssens=list_of_ssens
csv_paths = [x for x in experiment_dictionary['ignitionDelayCsvFiles'] if x is not None]
exp_data = ig_delay.importExperimentalData(csv_paths)
experiment = self.build_single_exp_dict(exp_number,
ig_delay,
int_ksens_exp_mapped,
[tsen,psen],
ssens,
experimental_data = exp_data,
yaml_dict=experiment_dictionary,
interpolated_time_shift_sens=deltatausens)
return experiment
def running_flow_reactor(self,processor=None,
experiment_dictonary:dict={},
kineticSens = 1,
physicalSens = 1,
dk = 0.01,
exp_number = 1):
flow_reactor = fr.flow_reactor_wrapper(pressure = experiment_dictonary['pressure'],
temperatures = experiment_dictonary['temperatures'],
observables = experiment_dictonary['observables'],
moleFractionObservables = experiment_dictonary['moleFractionObservables'],
concentrationObservables = experiment_dictonary['concentrationObservables'],
fullParsedYamlFile = experiment_dictonary,
kineticSens=kineticSens,
physicalSens=physicalSens,
conditions=experiment_dictonary['conditions'],
thermalBoundary=experiment_dictonary['thermalBoundary'],
mechanicalBoundary=experiment_dictonary['mechanicalBoundary'],
processor=processor,
cti_path="",
save_physSensHistories=1,
save_timeHistories=1,
timeshifts=experiment_dictonary['timeShift'],
initialTime=experiment_dictonary['initialTime'],
residenceTimes=experiment_dictonary['residenceTimes'])
soln,ksen=flow_reactor.run(ksens_marker=kineticSens ,psens_marker=physicalSens)
int_ksens_exp_mapped= flow_reactor.map_and_interp_ksens()
tsoln=flow_reactor.sensitivity_adjustment(temp_del = dk)
psoln=flow_reactor.sensitivity_adjustment(pres_del = dk)
ssoln=flow_reactor.species_adjustment(dk)
tsen=flow_reactor.sensitivityCalculation(soln[flow_reactor.observables],
tsoln[flow_reactor.observables],
dk=dk)
psen=flow_reactor.sensitivityCalculation(soln[flow_reactor.observables],
psoln[flow_reactor.observables],dk=dk)
ssens=[]
for i in range(len(ssoln)):
ssens.append(flow_reactor.sensitivityCalculation(soln[flow_reactor.observables],
ssoln[i][flow_reactor.observables],
dk=dk))
time_shift_sens =[]
for i,timehist in enumerate(flow_reactor.fullTimeHistories):
time_shift_sens.append(flow_reactor.calculate_time_shift_sensitivity(flow_reactor,timehist,1e-8,flow_reactor.finalTimes[i]))
time_shift_sens_df = pd.concat(time_shift_sens,ignore_index=True)
#print(time_shift_sens_df)
csv_paths = [x for x in experiment_dictonary['moleFractionCsvFiles'] + experiment_dictonary['concentrationCsvFiles'] if x is not None]
#print(csv_paths)
exp_data = flow_reactor.importExperimentalData(csv_paths)
experiment = self.build_single_exp_dict(exp_number,
flow_reactor,
int_ksens_exp_mapped,
[tsen,psen],
ssens,
interpolated_time_shift_sens = time_shift_sens_df,
experimental_data = exp_data,
yaml_dict=experiment_dictonary)
| |
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division, print_function, unicode_literals, absolute_import
import sys,os,re
import xml.etree.ElementTree as ET
import diffUtils as DU
numTol = 1e-10 #effectively zero for our purposes
def findBranches(node,path,finished):
"""
Iterative process to convert XML tree into list of entries
@ In, node, ET.Element, whose children need sorting
@ In, path, list(ET.Element), leading to node
@ In, finished, list(list(ET.Element)), full entries
@ Out, finished, list(list(ET.Element)), of full entries
"""
for child in node:
npath = path[:]+[child]
if len(child)==0:
finished.append(npath)
else:
finished = findBranches(child,npath,finished)
return finished
def treeToList(node):
"""
Converts XML tree to list of entries. Useful to start recursive search.
@ In, node, ET.Element, the xml tree root node to convert
@ Out, treeToList, list(list(ET.Element)), of full paths to entries in xml tree
"""
flattened = findBranches(node,[node],[])
return list(tuple(f) for f in flattened)
def compareListEntry(aList,bList,**kwargs):
"""
Comparse flattened XML entries for equality
return bool is True if all tag, text, and attributes match, False otherwise
return qual is percent of matching terms
@ In, aList, list(ET.Element), first set
@ In, bList, list(ET.Element), second set
@ Out, compareListEntry, (bool,val), results
"""
numMatch = 0 #number of matching points between entries
totalMatchable = 0 #total tag, text, and attributes available to match
match = True #True if entries match
diff = [] #tuple of (element, diff code, correct (a) value, test (b) value)
options = kwargs
for i in range(len(aList)):
if i > len(bList) - 1:
match = False
diff.append((bList[-1],XMLDiff.missingChildNode,aList[i].tag,None))
#could have matched the tag and attributes
totalMatchable += 1 + len(aList[i].attrib.keys())
#if text isn't empty, could have matched text, too
if aList[i].text is not None and len(aList[i].text.strip())>0: totalMatchable+=1
continue
a = aList[i]
b = bList[i]
#match tag
same,note = DU.compareStringsWithFloats(a.tag,b.tag,options["rel_err"], options["zero_threshold"], options["remove_whitespace"], options["remove_unicode_identifier"])
totalMatchable += 1
if not same:
match = False
diff.append((b,XMLDiff.notMatchTag,a.tag,b.tag))
else:
numMatch += 1
#match text
#if (a.text is None or len(a.text)>0) and (b.text is None or len(b.text)>0):
same,note = DU.compareStringsWithFloats(a.text,b.text,options["rel_err"], options["zero_threshold"], options["remove_whitespace"], options["remove_unicode_identifier"])
if not same:
match = False
diff.append((b,XMLDiff.notMatchText,str(a.text),str(b.text)))
totalMatchable += 1
else:
if not(a.text is None or a.text.strip()!=''):
numMatch += 1
totalMatchable += 1
#match attributes
for attrib in a.attrib.keys():
totalMatchable += 1
if attrib not in b.attrib.keys():
match = False
diff.append((b,XMLDiff.missingAttribute,attrib,None))
continue
same,note = DU.compareStringsWithFloats(a.attrib[attrib],b.attrib[attrib],options["rel_err"], options["zero_threshold"], options["remove_whitespace"], options["remove_unicode_identifier"])
if not same:
match = False
diff.append((b,XMLDiff.notMatchAttribute,(a,attrib),(b,attrib)))
else:
numMatch += 1
#note attributes in b not in a
for attrib in b.attrib.keys():
if attrib not in a.attrib.keys():
match = False
diff.append((b,XMLDiff.extraAttribute,attrib,None))
totalMatchable += 1
# note elements in b not in a
if len(bList) > len(aList):
match = False
for j in range(i,len(bList)):
diff.append((aList[-1],XMLDiff.extraChildNode,bList[j].tag,None))
#count tag and attributes
totalMatchable += 1 + len(bList[j].attrib.keys())
#if text isn't empty, count text, too
if bList[i].text is not None and len(bList[i].text.strip())>0: totalMatchable+=1
return (match,float(numMatch)/float(totalMatchable),diff)
def compareUnorderedElement(a,b,*args,**kwargs):
"""
Compares two element trees and returns (same,message)
where same is true if they are the same,
and message is a list of the differences.
Uses list of tree entries to find best match, instead of climbing the tree
@ In, a, ET.Element, the first element
@ In, b, ET.Element, the second element
@ Out, compareUnorderedElement, (bool,[string]), results of comparison
"""
same = True
message = []
options = kwargs
matchvals = {}
diffs = {}
DU.setDefaultOptions(options)
def failMessage(*args):
"""
adds the fail message to the list
@ In, args, list, The arguments to the fail message (will be converted with str())
@ Out, failMessage, (bool,string), results
"""
printArgs = []
printArgs.extend(args)
argsExpanded = " ".join([str(x) for x in printArgs])
message.append(argsExpanded)
if a.text != b.text:
succeeded, note = DU.compareStringsWithFloats(a.text, b.text, options["rel_err"], options["zero_threshold"], options["remove_whitespace"], options["remove_unicode_identifier"])
if not succeeded:
same = False
failMessage(note)
return (same, message)
aList = treeToList(a)
bList = treeToList(b)
#search a for matches in b
for aEntry in aList:
matchvals[aEntry] = {}
diffs[aEntry] = {}
for bEntry in bList:
same,matchval,diff = compareListEntry(aEntry,bEntry,**options)
if same:
bList.remove(bEntry)
del matchvals[aEntry]
del diffs[aEntry]
#since we found the match, remove from other near matches
for closeKey in diffs.keys():
if bEntry in diffs[closeKey].keys():
del diffs[closeKey][bEntry]
del matchvals[closeKey][bEntry]
break
else:
matchvals[aEntry][bEntry] = matchval
diffs[aEntry][bEntry] = diff
if len(matchvals)==0: #all matches found
return (True,'')
else:
note = ''
for unmatched,close in matchvals.items():
#print the path without a match
note+='No match for '+'/'.join(list(m.tag for m in unmatched))+'\n'
#print the tree of the nearest match
note+=' Nearest unused match: '
close = sorted(list(close.items()),key=lambda x:x[1],reverse=True)
if len(close) > 1:
closest = '/'.join(list(c.tag for c in close[0][0]))
else:
closest = '-none found-'
note+=' '+ closest +'\n'
#print what was different between them
if len(close) > 1:
diff = diffs[unmatched][close[0][0]]
for b,code,right,miss in diff:
if b is None:
b = str(b)
if code is None:
code = str(code)
if right is None:
right = str(right)
if miss is None:
miss = str(miss)
if code == XMLDiff.missingChildNode:
note+=' <'+b.tag+'> is missing child node: <'+right+'> vs <'+miss+'>\n'
elif code == XMLDiff.missingAttribute:
note+=' <'+b.tag+'> is missing attribute: "'+right+'"\n'
elif code == XMLDiff.extraChildNode:
note+=' <'+b.tag+'> has extra child node: <'+right+'>\n'
elif code == XMLDiff.extraAttribute:
note+=' <'+b.tag+'> has extra attribute: "'+right+'" = "'+b.attrib[right]+'"\n'
elif code == XMLDiff.notMatchTag:
note+=' <'+b.tag+'> tag does not match: <'+right+'> vs <'+miss+'>\n'
elif code == XMLDiff.notMatchAttribute:
note+=' <'+b.tag+'> attribute does not match: "'+right[1]+'" = "'+right[0].attrib[right[1]]+'" vs "'+miss[0].attrib[miss[1]]+'"\n'
elif code == XMLDiff.notMatchText:
note+=' <'+b.tag+'> text does not match: "'+right+'" vs "'+miss+'"\n'
else:
note+=' UNRECOGNIZED OPTION: "'+b.tag+'" "'+str(code)+'": "'+str(right)+'" vs "'+str(miss)+'"\n'
return (False,[note])
def compareOrderedElement(a,b,*args,**kwargs):
"""
Compares two element trees and returns (same,message) where same is true if they are the same, and message is a list of the differences
@ In, a, ET.Element, the first element tree
@ In, b, ET.Element, the second element tree
@ In, args, dict, arguments
@ In, kwargs, dict, keyword arguments
accepted args:
- none -
accepted kwargs:
path: a string to describe where the element trees are located (mainly
used recursively)
@ Out, compareOrderedElement, (bool,[string]), results of comparison
"""
same = True
message = []
options = kwargs
path = kwargs.get('path','')
counter = kwargs.get('counter',0)
DU.setDefaultOptions(options)
def failMessage(*args):
"""
adds the fail message to the list
@ In, args, list, The arguments to the fail message (will be converted with str())
@ Out, failMessage, (bool,string), results
"""
printArgs = [path]
printArgs.extend(args)
argsExpanded = " ".join([str(x) for x in printArgs])
message.append(argsExpanded)
if a.tag != b.tag:
same = False
failMessage("mismatch tags ",a.tag,b.tag)
else:
path += a.tag + "/"
if a.text != b.text:
succeeded, note = DU.compareStringsWithFloats(a.text, b.text, options["rel_err"], options["zero_threshold"], options["remove_whitespace"], options["remove_unicode_identifier"])
if not succeeded:
same = False
failMessage(note)
return (same, message)
differentKeys = set(a.keys()).symmetric_difference(set(b.keys()))
sameKeys = set(a.keys()).intersection(set(b.keys()))
if len(differentKeys) != 0:
same = False
failMessage("mismatch attribute keys ",differentKeys)
for key in sameKeys:
if a.attrib[key] != b.attrib[key]:
same = False
failMessage("mismatch attribute ",key,a.attrib[key],b.attrib[key])
if len(a) != len(b):
same = False
failMessage("mismatch number of children ",len(a),len(b))
else:
if a.tag == b.tag:
#find all matching XML paths
#WARNING: this will mangle the XML, so other testing should happen above this!
found=[]
for i in range(len(a)):
subOptions = dict(options)
subOptions["path"] = path
(sameChild,messageChild) = compareOrderedElement(a[i],b[i],*args,**subOptions)
if sameChild: found.append((a[i],b[i]))
same = same and sameChild
#prune matches from trees
for children in found:
a.remove(children[0])
b.remove(children[1])
#once all pruning done, error on any remaining structure
if counter==0: #on head now, recursion is finished
if len(a)>0:
aString = ET.tostring(a)
if len(aString) > 80:
message.append('Branches in gold not matching test...\n'+path)
else:
message.append('Branches in gold not matching test...\n'+path+
" "+aString)
if len(b)>0:
bString = ET.tostring(b)
if len(bString) > 80:
message.append('Branches in test not matching gold...\n'+path)
else:
message.append('Branches in test not matching gold...\n'+path+
| |
<filename>recsim_ng/lib/tensorflow/log_probability.py
# coding=utf-8
# Copyright 2021 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Compute the joint log-probability of a Network given an observation."""
import functools
from typing import Callable, Collection, Mapping, Sequence, Text
import edward2 as ed # type: ignore
from recsim_ng.core import network as network_lib
from recsim_ng.core import value as value_lib
from recsim_ng.core import variable as variable_lib
from recsim_ng.lib import data
from recsim_ng.lib.tensorflow import field_spec
from recsim_ng.lib.tensorflow import runtime
import tensorflow as tf
FieldSpec = field_spec.FieldSpec
ValueSpec = value_lib.ValueSpec
FieldValue = value_lib.FieldValue
Value = value_lib.Value
Dependency = variable_lib.Dependency
ValueDef = variable_lib.ValueDef
Variable = variable_lib.Variable
Network = network_lib.Network
NetworkValue = network_lib.NetworkValue
NetworkValueTrajectory = runtime.NetworkValueTrajectory
_OBSERVATION_INDEX_FIELD = "__log_probability_observation_index"
def log_probability_from_value_trajectory(
variables,
value_trajectory,
num_steps,
graph_compile = True):
"""Log probability of a trajectory of network outputs.
Provides a direct interface to evaluate the outputs of a network simulation,
for example:
```
variables = story()
network = network_lib.Network(variables)
tf_runtime = runtime.TFRuntime(network)
trajectory = tf_runtime.trajectory(length=5)
log_p = log_probability_from_value_trajectory(variables, trajectory, 4)
```
Args:
variables: A collection of `Variable`s defining a dynamic Bayesian network
(DBN).
value_trajectory: A trajectory generated from `TFRuntime.trajectory`.
num_steps: The number of time steps over which to measure the probability.
graph_compile: Boolean indicating whether the log prob computation is run in
graph mode.
Returns:
A Tensor like that returned from `tfp.distributions.Distribution.log_prob`.
"""
variables = tuple(variables)
observations = replay_variables(variables, value_trajectory)
return log_probability(
variables, observations, num_steps, graph_compile=graph_compile)
def replay_variables(
variables,
value_trajectory):
"""Trajectory replay variables for log probability computation.
Given a sequence of variables and a trajectory of observed values of these
variables, this function constructs a sequence of observation variables with
corresponding to the simulation variables replaying their logged values.
Args:
variables: A sequence of `Variable`s defining a dynamic Bayesian network
(DBN).
value_trajectory: A trajectory generated from `TFRuntime.trajectory`.
Returns:
A sequence of `Variable`.
"""
observations = []
for var in variables:
obs = data.data_variable(
name=var.name + " obs",
spec=var.spec,
data_sequence=data.SlicedValue(value=value_trajectory[var.name]),
data_index_field=_OBSERVATION_INDEX_FIELD)
observations.append(obs)
return observations
def log_prob_variables_from_observation(
variables,
observation):
"""Log probability variables for a sequence of observational data.
Given a sequence of simulation variables and a corresponding sequence
observation variables (e.g. as generated by `replay_variables`), this function
generates sequence of log probability variables corresponding to the
simulation-observation variable pairs. A log probability variable has the
same fields as its simulation variable with values corresponding to
log p(observed field value | observations of dependencies), where p is
the probability mass / density function extracted from the variable's `fn`.
Deterministic field names are assigned a scalar value of 0.
Args:
variables: A sequence of `Variable`s defining a dynamic Bayesian network
(DBN).
observation: A sequence of `Variable`s that corresponds one-to-one with
`variables` and which defines an observation of the DBN.
Returns:
A sequence of `Variable`.
Throws:
ValueError if the number of simulation variables does not correspond to the
number of observation variables.
"""
if len(variables) != len(observation):
raise ValueError(
"number of observation variabbles ({}) does not match number of"
" variables ({})".format(len(observation), len(variables)))
observation_name = {
var.name: obs.name for var, obs in zip(variables, observation)
}
collision = set(observation_name).intersection(set(observation_name.values()))
if collision:
raise ValueError(
"variables and observation variables share the same names: {}".format(
collision))
log_prob_transformation = functools.partial(
_log_probability_from_observations, observation_name=observation_name)
log_prob_vars = list(map(log_prob_transformation, variables))
return log_prob_vars
def log_prob_variables_from_direct_output(
variables):
"""Log probability variables for outputs at simulation time.
Given a sequence of simulation variables, this function generates a sequence
of log probability variables containing the log probabilities of the values
of those fields of the variables which are stochastically generated. I.e.
the log probability variable contains log(p(X)) where X is the corresponding
field of the simulation variable. Deterministic field names are assigned a
scalar value of 0.
Args:
variables: A sequence of `Variable`s defining a dynamic Bayesian network
(DBN).
Returns:
A sequence of `Variable`.
Throws:
ValueError if the number of simulation variables does not correspond to the
number of observation variables.
"""
return [_log_probability_from_direct_output(var) for var in variables]
def total_log_prob_accumulator_variable(
log_prob_vars):
"""Accumulated joint log probability variable."""
log_prob_vars = list(log_prob_vars) # make sure ordering is stable.
log_prob_accum = Variable(
name="total_log_prob_accum", spec=ValueSpec(accum=FieldSpec()))
def summation(*log_probs):
reduced_log_probs = []
for value in log_probs:
reduced_log_probs.extend(
[tf.reduce_sum(v) for v in value.as_dict.values()])
return Value(accum=sum(reduced_log_probs))
def accumulate(prev, *log_probs):
return Value(accum=prev.get("accum") + summation(*log_probs).get("accum"))
log_prob_accum.initial_value = variable_lib.value(
fn=summation, dependencies=log_prob_vars)
log_prob_accum.value = variable_lib.value(
fn=accumulate, dependencies=[log_prob_accum.previous] + log_prob_vars)
return log_prob_accum
def log_prob_accumulator_variable(log_prob_var):
"""Temporal accumulation of log probability variables.
Given a log probability variable, outputs temporal per-field accumulator
of the log probability values of the variable up to the current time instance.
Args:
log_prob_var: An instance of `Variable` computing the per-time-step log
probability of an simulation-observation variable pair (e.g. as generated
by `log_prob_variables`.
Returns:
A `Variable` outputting the per-field sum of all values of the input
variable up to the current time-step.
"""
log_prob_accum = Variable(
name=log_prob_var.name + "_accum", spec=log_prob_var.spec)
def accumulate_fields(prev, log_prob):
accumulated_values = {}
for field_name, value in log_prob.as_dict.items():
prev_value = prev.get(field_name)
if not isinstance(prev_value, tf.Tensor):
prev_value = tf.constant(prev_value)
if not isinstance(value, tf.Tensor):
value = tf.constant(value)
shape_mismatch_msg = (
f"The log probability of field {field_name} of {log_prob_var.name}"
f" changes shape during iteration from {prev_value.shape} to"
f" {value.shape}.")
tf.debugging.assert_equal(
tf.shape(prev_value), tf.shape(value), message=shape_mismatch_msg)
accumulated_values[field_name] = prev_value + value
return Value(
**{
field_name: prev.get(field_name) + value
for field_name, value in log_prob.as_dict.items()
})
log_prob_accum.initial_value = variable_lib.value(
fn=lambda log_prob: log_prob, dependencies=[log_prob_var])
log_prob_accum.value = variable_lib.value(
fn=accumulate_fields,
dependencies=[log_prob_accum.previous, log_prob_var])
return log_prob_accum
def log_prob_accumulator_variables(
log_prob_vars):
"""List version of `log_prob_accumulator_variable`."""
return [log_prob_accumulator_variable(lpvar) for lpvar in log_prob_vars]
def log_probability(variables,
observation,
num_steps,
graph_compile = True):
"""Returns the joint log probability of an observation given a network.
Please note that the correctness of the result requires that all of the value
functions of all the `Variable`s create `ed.RandomVariable` objects in a
stable order. In other words, if a value function is invoked twice, it will
create logically corresponding `ed.RandomVariable` objects in the same order.
Args:
variables: A sequence of `Variable`s defining a dynamic Bayesian network
(DBN).
observation: A sequence of `Variable`s that corresponds one-to-one with
`variables` and which defines an observation of the DBN.
num_steps: The number of time steps over which to measure the probability.
graph_compile: Boolean indicating whether the computation should be run in
graph mode.
Returns:
A Tensor like that returned from `tfp.distributions.Distribution.log_prob`.
"""
log_prob_vars = log_prob_variables_from_observation(variables, observation)
accumulator = total_log_prob_accumulator_variable(log_prob_vars)
tf_runtime = runtime.TFRuntime(
network=Network(
variables=list(observation) + list(log_prob_vars) + [accumulator]),
graph_compile=graph_compile)
return tf_runtime.execute(num_steps)["total_log_prob_accum"].get("accum")
def _log_probability_from_direct_output(variable):
"""Generates log probability vars from the outputs of a given variable."""
log_prob_spec = {
field_name: FieldSpec() for field_name in variable.spec.as_dict.keys()
}
log_prob_variable = Variable(
name=variable.name + "_log_prob", spec=ValueSpec(**log_prob_spec))
def get_log_probs(value):
def lp_fn(field):
if not isinstance(field, ed.RandomVariable):
return tf.constant(0.0)
return field.distribution.log_prob(field.value)
return value.map(lp_fn)
log_prob_variable.value = variable_lib.value(
fn=get_log_probs, dependencies=(variable,))
return log_prob_variable
def _log_probability_from_observations(
variable, observation_name):
"""Returns a `Variable` that computes log-probability.
Given a `Variable` with inputs `[a_1, ..., a_k]` and output `b`, this returns
a `Variable` with inputs `[c_1, ..., c_k, d]` and output `{"log_prob": p}`
where `c_i` is the observed value of `a_i`, `d` is the observed value of `b`,
and `p` is the joint log-probability of `d` conditioned on `[c_1, ..., c_k]`.
Args:
variable: A `Variable`.
observation_name: A mapping that, given the `Variable` name of an input or
output of `variable`, gives the name of the corresponding observation
`Variable`.
Returns:
A `Variable` that yields the log-probability as described above.
"""
# TODO(ccolby): can we have remove_data_index for variables?
field_names_no_data = [
key for key in variable.spec.as_dict.keys() if not key.startswith("__")
]
log_prob_spec = {
field_name: FieldSpec() for field_name in field_names_no_data
}
transformed_variable = Variable(
name=variable.name + "_log_prob", spec=ValueSpec(**log_prob_spec))
def transform_value_def(value_def):
def rewire_dependency_to_observation(dep):
return Dependency(
variable_name=observation_name[dep.variable_name],
on_current_value=dep.on_current_value)
transformed_dependencies = ([
Dependency(
variable_name=observation_name[variable.name],
on_current_value=True)
] + list(map(rewire_dependency_to_observation, value_def.dependencies)))
def log_prob_fn(observed_output, *observed_inputs):
unfiltered_log_prob = _transform_fn(value_def.fn)(observed_output,
*observed_inputs)
filtered_dict = {
key: (unfiltered_log_prob.get(key)
if key in unfiltered_log_prob.as_dict else 0.0)
for key in field_names_no_data
}
| |
not app_created:
for app in created_apps:
assert app["id"] != context.current_application_id
return
found_app = False
for app in created_apps:
found_app = found_app or app["id"] == context.current_application_id
assert found_app
# If there is no key to check, we're done.
if state_key is None or state_key == "":
return
found_value_for_key = False
key_values = list()
if application_state == "local":
counter = 0
for local_state in account_info["apps-local-state"]:
if local_state["id"] == context.current_application_id:
key_values = local_state["key-value"]
counter = counter + 1
assert counter == 1
elif application_state == "global":
counter = 0
for created_app in account_info["created-apps"]:
if created_app["id"] == context.current_application_id:
key_values = created_app["params"]["global-state"]
counter = counter + 1
assert counter == 1
else:
raise NotImplementedError(
'test does not understand application state "'
+ application_state
+ '"'
)
assert len(key_values) > 0
for key_value in key_values:
found_key = key_value["key"]
if found_key == state_key:
found_value_for_key = True
found_value = key_value["value"]
if found_value["type"] == 1:
assert found_value["bytes"] == state_value
elif found_value["type"] == 0:
assert found_value["uint"] == int(state_value)
assert found_value_for_key
def load_resource(res):
"""load data from features/resources"""
dir_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(dir_path, "..", "features", "resources", res)
with open(path, "rb") as fin:
data = fin.read()
return data
@when('I compile a teal program "{program}"')
def compile_step(context, program):
data = load_resource(program)
source = data.decode("utf-8")
try:
context.response = context.app_acl.compile(source)
context.status = 200
except AlgodHTTPError as ex:
context.status = ex.code
context.response = dict(result="", hash="")
@then(
'it is compiled with {status} and "{result:MaybeString}" and "{hash:MaybeString}"'
)
def compile_check_step(context, status, result, hash):
assert context.status == int(status)
assert context.response["result"] == result
assert context.response["hash"] == hash
@when('I dryrun a "{kind}" program "{program}"')
def dryrun_step(context, kind, program):
data = load_resource(program)
sp = transaction.SuggestedParams(
int(1000), int(1), int(100), "", flat_fee=True
)
zero_addr = encoding.encode_address(bytes(32))
txn = transaction.Transaction(zero_addr, sp, None, None, "pay", None)
sources = []
if kind == "compiled":
lsig = transaction.LogicSig(data)
txns = [transaction.LogicSigTransaction(txn, lsig)]
elif kind == "source":
txns = [transaction.SignedTransaction(txn, None)]
sources = [DryrunSource(field_name="lsig", source=data, txn_index=0)]
else:
assert False, f"kind {kind} not in (source, compiled)"
drr = DryrunRequest(txns=txns, sources=sources)
context.response = context.app_acl.dryrun(drr)
@then('I get execution result "{result}"')
def dryrun_check_step(context, result):
ddr = context.response
assert len(ddr["txns"]) > 0
res = ddr["txns"][0]
if (
res["logic-sig-messages"] is not None
and len(res["logic-sig-messages"]) > 0
):
msgs = res["logic-sig-messages"]
elif (
res["app-call-messages"] is not None
and len(res["app-call-messages"]) > 0
):
msgs = res["app-call-messages"]
assert len(msgs) > 0
assert msgs[-1] == result
@when("we make any Dryrun call")
def dryrun_any_call_step(context):
context.response = context.acl.dryrun(DryrunRequest())
@then(
'the parsed Dryrun Response should have global delta "{creator}" with {action}'
)
def dryrun_parsed_response(context, creator, action):
ddr = context.response
assert len(ddr["txns"]) > 0
delta = ddr["txns"][0]["global-delta"]
assert len(delta) > 0
assert delta[0]["key"] == creator
assert delta[0]["value"]["action"] == int(action)
@given('dryrun test case with "{program}" of type "{kind}"')
def dryrun_test_case_step(context, program, kind):
if kind not in set(["lsig", "approv", "clearp"]):
assert False, f"kind {kind} not in (lsig, approv, clearp)"
prog = load_resource(program)
# check if source
if prog[0] > 0x20:
prog = prog.decode("utf-8")
context.dryrun_case_program = prog
context.dryrun_case_kind = kind
@then('status assert of "{status}" is succeed')
def dryrun_test_case_status_assert_step(context, status):
class TestCase(DryrunTestCaseMixin, unittest.TestCase):
"""Mock TestCase to test"""
ts = TestCase()
ts.algo_client = context.app_acl
lsig = None
app = None
if context.dryrun_case_kind == "lsig":
lsig = dict()
if context.dryrun_case_kind == "approv":
app = dict()
elif context.dryrun_case_kind == "clearp":
app = dict(on_complete=transaction.OnComplete.ClearStateOC)
if status == "PASS":
ts.assertPass(context.dryrun_case_program, lsig=lsig, app=app)
else:
ts.assertReject(context.dryrun_case_program, lsig=lsig, app=app)
def dryrun_test_case_global_state_assert_impl(
context, key, value, action, raises
):
class TestCase(DryrunTestCaseMixin, unittest.TestCase):
"""Mock TestCase to test"""
ts = TestCase()
ts.algo_client = context.app_acl
action = int(action)
val = dict(action=action)
if action == 1:
val["bytes"] = value
elif action == 2:
val["uint"] = int(value)
on_complete = transaction.OnComplete.NoOpOC
if context.dryrun_case_kind == "clearp":
on_complete = transaction.OnComplete.ClearStateOC
raised = False
try:
ts.assertGlobalStateContains(
context.dryrun_case_program,
dict(key=key, value=val),
app=dict(on_complete=on_complete),
)
except AssertionError:
raised = True
if raises:
ts.assertTrue(raised, "assertGlobalStateContains expected to raise")
@then('global delta assert with "{key}", "{value}" and {action} is succeed')
def dryrun_test_case_global_state_assert_step(context, key, value, action):
dryrun_test_case_global_state_assert_impl(
context, key, value, action, False
)
@then('global delta assert with "{key}", "{value}" and {action} is failed')
def dryrun_test_case_global_state_assert_fail_step(
context, key, value, action
):
dryrun_test_case_global_state_assert_impl(
context, key, value, action, True
)
@then(
'local delta assert for "{account}" of accounts {index} with "{key}", "{value}" and {action} is succeed'
)
def dryrun_test_case_local_state_assert_fail_step(
context, account, index, key, value, action
):
class TestCase(DryrunTestCaseMixin, unittest.TestCase):
"""Mock TestCase to test"""
ts = TestCase()
ts.algo_client = context.app_acl
action = int(action)
val = dict(action=action)
if action == 1:
val["bytes"] = value
elif action == 2:
val["uint"] = int(value)
on_complete = transaction.OnComplete.NoOpOC
if context.dryrun_case_kind == "clearp":
on_complete = transaction.OnComplete.ClearStateOC
app_idx = 1
accounts = [
Account(
address=ts.default_address(),
status="Offline",
apps_local_state=[ApplicationLocalState(id=app_idx)],
)
] * 2
accounts[int(index)].address = account
drr = ts.dryrun_request(
context.dryrun_case_program,
sender=accounts[0].address,
app=dict(app_idx=app_idx, on_complete=on_complete, accounts=accounts),
)
ts.assertNoError(drr)
ts.assertLocalStateContains(drr, account, dict(key=key, value=val))
@given("a new AtomicTransactionComposer")
def create_atomic_transaction_composer(context):
context.atomic_transaction_composer = (
atomic_transaction_composer.AtomicTransactionComposer()
)
context.method_list = []
@given("I make a transaction signer for the transient account.")
def create_transient_transaction_signer(context):
private_key = context.transient_sk
context.transaction_signer = (
atomic_transaction_composer.AccountTransactionSigner(private_key)
)
@when("I make a transaction signer for the {account_type} account.")
def create_transaction_signer(context, account_type):
if account_type == "transient":
private_key = context.transient_sk
elif account_type == "signing":
private_key = mnemonic.to_private_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
context.transaction_signer = (
atomic_transaction_composer.AccountTransactionSigner(private_key)
)
@step('I create the Method object from method signature "{method_signature}"')
def build_abi_method(context, method_signature):
context.abi_method = abi.Method.from_signature(method_signature)
if not hasattr(context, "method_list"):
context.method_list = []
context.method_list.append(context.abi_method)
@step("I create a transaction with signer with the current transaction.")
def create_transaction_with_signer(context):
context.transaction_with_signer = (
atomic_transaction_composer.TransactionWithSigner(
context.transaction, context.transaction_signer
)
)
@when("I add the current transaction with signer to the composer.")
def add_transaction_to_composer(context):
context.atomic_transaction_composer.add_transaction(
context.transaction_with_signer
)
def process_abi_args(method, arg_tokens):
method_args = []
for arg_index, arg in enumerate(method.args):
# Skip arg if it does not have a type
if isinstance(arg.type, abi.ABIType):
method_arg = arg.type.decode(
base64.b64decode(arg_tokens[arg_index])
)
method_args.append(method_arg)
elif arg.type == abi.ABIReferenceType.ACCOUNT:
method_arg = abi.AddressType().decode(
base64.b64decode(arg_tokens[arg_index])
)
method_args.append(method_arg)
elif (
arg.type == abi.ABIReferenceType.APPLICATION
or arg.type == abi.ABIReferenceType.ASSET
):
method_arg = abi.UintType(64).decode(
base64.b64decode(arg_tokens[arg_index])
)
method_args.append(method_arg)
else:
# Append the transaction signer as is
method_args.append(arg_tokens[arg_index])
return method_args
@step("I create a new method arguments array.")
def create_abi_method_args(context):
context.method_args = []
@step(
"I append the current transaction with signer to the method arguments array."
)
def append_txn_to_method_args(context):
context.method_args.append(context.transaction_with_signer)
@step(
'I append the encoded arguments "{method_args:MaybeString}" to the method arguments array.'
)
def append_app_args_to_method_args(context, method_args):
# Returns a list of ABI method arguments
app_args = method_args.split(",")
context.method_args += app_args
@step(
'I add a method call with the {account_type} account, the current application, suggested params, on complete "{operation}", current transaction signer, current method arguments.'
)
def add_abi_method_call(context, account_type, operation):
if account_type == "transient":
sender = context.transient_pk
elif account_type == "signing":
sender = mnemonic.to_public_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
app_args = process_abi_args(context.abi_method, context.method_args)
context.atomic_transaction_composer.add_method_call(
app_id=int(context.current_application_id),
method=context.abi_method,
sender=sender,
sp=context.suggested_params,
signer=context.transaction_signer,
method_args=app_args,
on_complete=operation_string_to_enum(operation),
)
@when(
'I add a method call with the {account_type} account, the current application, suggested params, on complete "{operation}", current transaction signer, current method arguments, approval-program "{approval_program_path:MaybeString}", clear-program "{clear_program_path:MaybeString}", global-bytes {global_bytes}, global-ints {global_ints}, local-bytes {local_bytes}, local-ints {local_ints}, extra-pages {extra_pages}.'
)
def add_abi_method_call_creation(
context,
account_type,
operation,
approval_program_path,
clear_program_path,
global_bytes,
global_ints,
local_bytes,
local_ints,
extra_pages,
):
if account_type == "transient":
sender = context.transient_pk
elif account_type == "signing":
sender = mnemonic.to_public_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
if approval_program_path:
with open(
dir_path + "/test/features/resources/" + approval_program_path,
"rb",
) as f:
approval_program = bytearray(f.read())
else:
approval_program = None
if clear_program_path:
with open(
dir_path + "/test/features/resources/" + clear_program_path, "rb"
) as f:
clear_program = bytearray(f.read())
else:
clear_program = None
local_schema = transaction.StateSchema(
num_uints=int(local_ints), num_byte_slices=int(local_bytes)
)
global_schema = transaction.StateSchema(
num_uints=int(global_ints), num_byte_slices=int(global_bytes)
)
extra_pages = int(extra_pages)
app_args = process_abi_args(context.abi_method, context.method_args)
context.atomic_transaction_composer.add_method_call(
app_id=int(context.current_application_id),
method=context.abi_method,
sender=sender,
sp=context.suggested_params,
signer=context.transaction_signer,
method_args=app_args,
on_complete=operation_string_to_enum(operation),
local_schema=local_schema,
global_schema=global_schema,
approval_program=approval_program,
clear_program=clear_program,
extra_pages=extra_pages,
)
@when(
'I add a method call with the {account_type} account, the current application, suggested params, on complete "{operation}", current transaction signer, current method arguments, approval-program "{approval_program_path:MaybeString}", clear-program "{clear_program_path:MaybeString}".'
)
def add_abi_method_call_creation(
context, account_type, operation, approval_program_path, clear_program_path
):
if account_type == "transient":
sender = context.transient_pk
elif account_type == "signing":
sender = mnemonic.to_public_key(context.signing_mnemonic)
else:
raise NotImplementedError(
"cannot make transaction signer for " + account_type
)
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path = os.path.dirname(os.path.dirname(dir_path))
if approval_program_path:
with open(
dir_path + "/test/features/resources/" + approval_program_path,
"rb",
) as f:
approval_program = bytearray(f.read())
else:
approval_program = None
if clear_program_path:
with open(
dir_path + "/test/features/resources/" + clear_program_path, "rb"
) as f:
clear_program = bytearray(f.read())
| |
the convex hull of 8 vertices
sage: P.is_combinatorially_isomorphic(polytopes.cube())
True
The SEP of a complete graph on `4` vertices is a cuboctahedron::
sage: G = graphs.CompleteGraph(4)
sage: P = G.symmetric_edge_polytope(); P
A 3-dimensional polyhedron in ZZ^4 defined as the convex hull of 12 vertices
sage: P.is_combinatorially_isomorphic(polytopes.cuboctahedron())
True
The SEP of a graph with edges on `n` vertices has dimension `n`
minus the number of connected components::
sage: n = randint(5, 12)
sage: G = Graph()
sage: while not G.num_edges():
....: G = graphs.RandomGNP(n, 0.2)
sage: P = G.symmetric_edge_polytope()
sage: P.ambient_dim() == n
True
sage: P.dim() == n - G.connected_components_number()
True
The SEP of a graph with edges is isomorphic
to the product of it's connected components with edges::
sage: n = randint(5, 12)
sage: G = Graph()
sage: while not G.num_edges():
....: G = graphs.RandomGNP(n, 0.2)
sage: P = G.symmetric_edge_polytope()
sage: components = [G.subgraph(c).symmetric_edge_polytope()
....: for c in G.connected_components()
....: if G.subgraph(c).num_edges()]
sage: P.is_combinatorially_isomorphic(product(components))
True
All trees on `n` vertices have isomorphic SEPs::
sage: n = randint(4, 10)
sage: G1 = graphs.RandomTree(n)
sage: G2 = graphs.RandomTree(n)
sage: P1 = G1.symmetric_edge_polytope()
sage: P2 = G2.symmetric_edge_polytope()
sage: P1.is_combinatorially_isomorphic(P2)
True
However, there are still many different SEPs::
sage: len(list(graphs(5)))
34
sage: polys = []
sage: for G in graphs(5):
....: P = G.symmetric_edge_polytope()
....: for P1 in polys:
....: if P.is_combinatorially_isomorphic(P1):
....: break
....: else:
....: polys.append(P)
....:
sage: len(polys)
25
A non-trivial example of two graphs with isomorphic SEPs::
sage: G1 = graphs.CycleGraph(4)
sage: G1.add_edges([[0, 5], [5, 2], [1, 6], [6, 2]])
sage: G2 = copy(G1)
sage: G1.add_edges([[2, 7], [7, 3]])
sage: G2.add_edges([[0, 7], [7, 3]])
sage: G1.is_isomorphic(G2)
False
sage: P1 = G1.symmetric_edge_polytope()
sage: P2 = G2.symmetric_edge_polytope()
sage: P1.is_combinatorially_isomorphic(P2)
True
Apparently, glueing two graphs together on a vertex
gives isomorphic SEPs::
sage: n = randint(3, 7)
sage: g1 = graphs.RandomGNP(n, 0.2)
sage: g2 = graphs.RandomGNP(n, 0.2)
sage: G = g1.disjoint_union(g2)
sage: H = copy(G)
sage: G.merge_vertices(((0, randrange(n)), (1, randrange(n))))
sage: H.merge_vertices(((0, randrange(n)), (1, randrange(n))))
sage: PG = G.symmetric_edge_polytope()
sage: PH = H.symmetric_edge_polytope()
sage: PG.is_combinatorially_isomorphic(PH)
True
TESTS:
Obtain the SEP with unsortable vertices::
sage: G = Graph([[1, (1, 2)]])
sage: G.symmetric_edge_polytope()
A 1-dimensional polyhedron in ZZ^2 defined as the convex hull of 2 vertices
"""
from itertools import chain
from sage.matrix.special import identity_matrix
from sage.geometry.polyhedron.parent import Polyhedra
dim = self.num_verts()
e = identity_matrix(dim).rows()
dic = {v: e[i] for i, v in enumerate(self)}
vertices = chain(((dic[i] - dic[j]) for i,j in self.edge_iterator(sort_vertices=False, labels=False)),
((dic[j] - dic[i]) for i,j in self.edge_iterator(sort_vertices=False, labels=False)))
parent = Polyhedra(ZZ, dim, backend=backend)
return parent([vertices, [], []], None)
def tachyon_vertex_plot(g, bgcolor=(1,1,1),
vertex_colors=None,
vertex_size=0.06,
pos3d=None,
**kwds):
"""
Helper function for plotting graphs in 3d with
:class:`~sage.plot.plot3d.tachyon.Tachyon`.
Returns a plot containing only the vertices, as well as the 3d position
dictionary used for the plot.
INPUT:
- ``pos3d`` -- a 3D layout of the vertices
- various rendering options
EXAMPLES::
sage: G = graphs.TetrahedralGraph()
sage: from sage.graphs.generic_graph import tachyon_vertex_plot
sage: T,p = tachyon_vertex_plot(G, pos3d=G.layout(dim=3))
sage: type(T)
<class 'sage.plot.plot3d.tachyon.Tachyon'>
sage: type(p)
<... 'dict'>
"""
assert pos3d is not None
from math import sqrt
from sage.plot.plot3d.tachyon import Tachyon
c = [0,0,0]
r = []
verts = list(g)
if vertex_colors is None:
vertex_colors = {(1,0,0): verts}
try:
for v in verts:
c[0] += pos3d[v][0]
c[1] += pos3d[v][1]
c[2] += pos3d[v][2]
except KeyError:
raise KeyError("you have not specified positions for all the vertices")
order = g.order()
c[0] /= order
c[1] /= order
c[2] /= order
for v in verts:
pos3d[v][0] -= c[0]
pos3d[v][1] -= c[1]
pos3d[v][2] -= c[2]
r.append(abs(sqrt((pos3d[v][0])**2 + (pos3d[v][1])**2 + (pos3d[v][2])**2)))
r = max(r)
if not r:
r = 1
for v in verts:
pos3d[v][0] /= r
pos3d[v][1] /= r
pos3d[v][2] /= r
TT = Tachyon(camera_center=(1.4, 1.4, 1.4), antialiasing=13, **kwds)
TT.light((4, 3, 2), 0.02, (1, 1, 1))
TT.texture('bg', ambient=1, diffuse=1, specular=0, opacity=1.0, color=bgcolor)
TT.plane((-1.6, -1.6, -1.6), (1.6, 1.6, 1.6), 'bg')
i = 0
for color in vertex_colors:
i += 1
TT.texture('node_color_%d'%i, ambient=0.1, diffuse=0.9,
specular=0.03, opacity=1.0, color=color)
for v in vertex_colors[color]:
TT.sphere((pos3d[v][0], pos3d[v][1], pos3d[v][2]), vertex_size, 'node_color_%d'%i)
return TT, pos3d
def graph_isom_equivalent_non_edge_labeled_graph(g, partition=None, standard_label=None, return_relabeling=False, return_edge_labels=False, inplace=False, ignore_edge_labels=False):
r"""
Helper function for canonical labeling of edge labeled (di)graphs.
Translates to a bipartite incidence-structure type graph appropriate for
computing canonical labels of edge labeled and/or multi-edge graphs.
Note that this is actually computationally equivalent to implementing a
change on an inner loop of the main algorithm -- namely making the
refinement procedure sort for each label.
If the graph is a multigraph, it is translated to a non-multigraph,
where each instance of multiple edges is converted to a single
edge labeled with a list ``[[label1, multiplicity], [label2,
multiplicity], ...]`` describing how many edges of each label were
originally there. Then in either case we are working on a graph
without multiple edges. At this point, we create another
(partially bipartite) graph, whose left vertices are the original
vertices of the graph, and whose right vertices represent the
labeled edges. Any unlabeled edges in the original graph are also
present in the new graph, and -- this is the bipartite aspect --
for every labeled edge `e` from `v` to `w` in the original graph,
there is an edge between the right vertex corresponding to `e` and
each of the left vertices corresponding to `v` and `w`. We
partition the left vertices as they were originally, and the right
vertices by common labels: only automorphisms taking edges to
like-labeled edges are allowed, and this additional partition
information enforces this on the new graph.
INPUT:
- ``g`` -- Graph or DiGraph
- ``partition`` -- list (default: ``None``); a partition of the
vertices as a list of lists of vertices. If given, the partition
of the vertices is as well relabeled
- ``standard_label`` -- (default: ``None``); edges in ``g`` with
this label are preserved in the new graph
- ``return_relabeling`` -- boolean (default: ``False``); whether
to return a dictionary containing the relabeling
- ``return_edge_labels`` -- boolean (default: ``False``); whether
the different ``edge_labels`` are returned (useful if inplace is
``True``)
- ``inplace`` -- boolean (default: ``False``); whether the input
(di)graph ``g`` is modified or the return a new (di)graph. Note
that attributes of ``g`` are *not* copied for speed issues, only
edges and vertices.
- ``ignore_edge_labels`` -- boolean (default: ``False``): if
``True``, ignore edge labels, so when constructing the new
graph, only multiple edges are replaced with vertices. Labels on
multiple edges are ignored -- only the multiplicity is relevant,
so multiple edges with the same multiplicity in the original
graph correspond to right vertices in the same partition in the
new graph.
OUTPUT:
- if ``inplace`` is ``False``: the unlabeled graph without
multiple edges
- the partition of the vertices
- if ``return_relabeling`` is ``True``: a dictionary containing
the relabeling
- if ``return_edge_labels`` is ``True``: the list of (former) edge
labels is returned
EXAMPLES::
sage: from sage.graphs.generic_graph import graph_isom_equivalent_non_edge_labeled_graph
sage: G = Graph(multiedges=True,sparse=True)
sage: G.add_edges((0, 1, i) for i in range(10))
sage: G.add_edge(1,2,'string')
sage: G.add_edge(2,123)
sage: graph_isom_equivalent_non_edge_labeled_graph(G, partition=[[0,123],[1,2]])
[Graph on 6 vertices, [[1, 0], [2, 3], [5], [4]]]
sage: g, part = graph_isom_equivalent_non_edge_labeled_graph(G)
sage: g, sorted(part)
(Graph on 6 vertices, [[0, 1, 2, 3], [4], [5]])
sage: g.edges(sort=True)
[(0, 3, None), (1, 4, None), (2, 4, None), (2, 5, None), (3, 5, None)]
sage: g = graph_isom_equivalent_non_edge_labeled_graph(G,standard_label='string',return_edge_labels=True)
sage: g[0]
Graph on 6 vertices
sage: g[0].edges(sort=True)
[(0, 5, None), (1, 4, None), (2, 3, None), (2, 4, None), (3, 5, None)]
sage: g[1]
[[0, 1, 2, 3], [4], [5]]
sage: g[2]
[[['string', 1]], [[0, 1], [1, 1], [2, 1], [3, 1], [4, 1], [5, 1], [6, 1], [7, 1], [8, 1], [9, 1]], [[None, 1]]]
sage: graph_isom_equivalent_non_edge_labeled_graph(G, inplace=True)
[[[0, 1, 2, 3], [5], [4]]]
sage: G.edges(sort=True)
[(0, 3, None), (1, 4, None), (2, 4, None), (2, 5, None), (3, 5, None)]
sage: G = Graph(multiedges=True,sparse=True)
sage: G.add_edges((0, 1) for i in range(10))
sage: G.add_edge(1, 2, | |
<reponame>stormtheh4ck3r/python-for-android
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module contains tests for L{twisted.internet.task.Cooperator} and
related functionality.
"""
from twisted.internet import reactor, defer, task
from twisted.trial import unittest
class FakeDelayedCall(object):
"""
Fake delayed call which lets us simulate the scheduler.
"""
def __init__(self, func):
"""
A function to run, later.
"""
self.func = func
self.cancelled = False
def cancel(self):
"""
Don't run my function later.
"""
self.cancelled = True
class FakeScheduler(object):
"""
A fake scheduler for testing against.
"""
def __init__(self):
"""
Create a fake scheduler with a list of work to do.
"""
self.work = []
def __call__(self, thunk):
"""
Schedule a unit of work to be done later.
"""
unit = FakeDelayedCall(thunk)
self.work.append(unit)
return unit
def pump(self):
"""
Do all of the work that is currently available to be done.
"""
work, self.work = self.work, []
for unit in work:
if not unit.cancelled:
unit.func()
class TestCooperator(unittest.TestCase):
RESULT = 'done'
def ebIter(self, err):
err.trap(task.SchedulerStopped)
return self.RESULT
def cbIter(self, ign):
self.fail()
def testStoppedRejectsNewTasks(self):
"""
Test that Cooperators refuse new tasks when they have been stopped.
"""
def testwith(stuff):
c = task.Cooperator()
c.stop()
d = c.coiterate(iter(()), stuff)
d.addCallback(self.cbIter)
d.addErrback(self.ebIter)
return d.addCallback(lambda result:
self.assertEquals(result, self.RESULT))
return testwith(None).addCallback(lambda ign: testwith(defer.Deferred()))
def testStopRunning(self):
"""
Test that a running iterator will not run to completion when the
cooperator is stopped.
"""
c = task.Cooperator()
def myiter():
for myiter.value in range(3):
yield myiter.value
myiter.value = -1
d = c.coiterate(myiter())
d.addCallback(self.cbIter)
d.addErrback(self.ebIter)
c.stop()
def doasserts(result):
self.assertEquals(result, self.RESULT)
self.assertEquals(myiter.value, -1)
d.addCallback(doasserts)
return d
def testStopOutstanding(self):
"""
An iterator run with L{Cooperator.coiterate} paused on a L{Deferred}
yielded by that iterator will fire its own L{Deferred} (the one
returned by C{coiterate}) when L{Cooperator.stop} is called.
"""
testControlD = defer.Deferred()
outstandingD = defer.Deferred()
def myiter():
reactor.callLater(0, testControlD.callback, None)
yield outstandingD
self.fail()
c = task.Cooperator()
d = c.coiterate(myiter())
def stopAndGo(ign):
c.stop()
outstandingD.callback('arglebargle')
testControlD.addCallback(stopAndGo)
d.addCallback(self.cbIter)
d.addErrback(self.ebIter)
return d.addCallback(
lambda result: self.assertEquals(result, self.RESULT))
def testUnexpectedError(self):
c = task.Cooperator()
def myiter():
if 0:
yield None
else:
raise RuntimeError()
d = c.coiterate(myiter())
return self.assertFailure(d, RuntimeError)
def testUnexpectedErrorActuallyLater(self):
def myiter():
D = defer.Deferred()
reactor.callLater(0, D.errback, RuntimeError())
yield D
c = task.Cooperator()
d = c.coiterate(myiter())
return self.assertFailure(d, RuntimeError)
def testUnexpectedErrorNotActuallyLater(self):
def myiter():
yield defer.fail(RuntimeError())
c = task.Cooperator()
d = c.coiterate(myiter())
return self.assertFailure(d, RuntimeError)
def testCooperation(self):
L = []
def myiter(things):
for th in things:
L.append(th)
yield None
groupsOfThings = ['abc', (1, 2, 3), 'def', (4, 5, 6)]
c = task.Cooperator()
tasks = []
for stuff in groupsOfThings:
tasks.append(c.coiterate(myiter(stuff)))
return defer.DeferredList(tasks).addCallback(
lambda ign: self.assertEquals(tuple(L), sum(zip(*groupsOfThings), ())))
def testResourceExhaustion(self):
output = []
def myiter():
for i in range(100):
output.append(i)
if i == 9:
_TPF.stopped = True
yield i
class _TPF:
stopped = False
def __call__(self):
return self.stopped
c = task.Cooperator(terminationPredicateFactory=_TPF)
c.coiterate(myiter()).addErrback(self.ebIter)
c._delayedCall.cancel()
# testing a private method because only the test case will ever care
# about this, so we have to carefully clean up after ourselves.
c._tick()
c.stop()
self.failUnless(_TPF.stopped)
self.assertEquals(output, range(10))
def testCallbackReCoiterate(self):
"""
If a callback to a deferred returned by coiterate calls coiterate on
the same Cooperator, we should make sure to only do the minimal amount
of scheduling work. (This test was added to demonstrate a specific bug
that was found while writing the scheduler.)
"""
calls = []
class FakeCall:
def __init__(self, func):
self.func = func
def __repr__(self):
return '<FakeCall %r>' % (self.func,)
def sched(f):
self.failIf(calls, repr(calls))
calls.append(FakeCall(f))
return calls[-1]
c = task.Cooperator(scheduler=sched, terminationPredicateFactory=lambda: lambda: True)
d = c.coiterate(iter(()))
done = []
def anotherTask(ign):
c.coiterate(iter(())).addBoth(done.append)
d.addCallback(anotherTask)
work = 0
while not done:
work += 1
while calls:
calls.pop(0).func()
work += 1
if work > 50:
self.fail("Cooperator took too long")
class UnhandledException(Exception):
"""
An exception that should go unhandled.
"""
class AliasTests(unittest.TestCase):
"""
Integration test to verify that the global singleton aliases do what
they're supposed to.
"""
def test_cooperate(self):
"""
L{twisted.internet.task.cooperate} ought to run the generator that it is
"""
d = defer.Deferred()
def doit():
yield 1
yield 2
yield 3
d.callback("yay")
it = doit()
theTask = task.cooperate(it)
self.assertIn(theTask, task._theCooperator._tasks)
return d
class RunStateTests(unittest.TestCase):
"""
Tests to verify the behavior of L{CooperativeTask.pause},
L{CooperativeTask.resume}, L{CooperativeTask.stop}, exhausting the
underlying iterator, and their interactions with each other.
"""
def setUp(self):
"""
Create a cooperator with a fake scheduler and a termination predicate
that ensures only one unit of work will take place per tick.
"""
self._doDeferNext = False
self._doStopNext = False
self._doDieNext = False
self.work = []
self.scheduler = FakeScheduler()
self.cooperator = task.Cooperator(
scheduler=self.scheduler,
# Always stop after one iteration of work (return a function which
# returns a function which always returns True)
terminationPredicateFactory=lambda: lambda: True)
self.task = self.cooperator.cooperate(self.worker())
self.cooperator.start()
def worker(self):
"""
This is a sample generator which yields Deferreds when we are testing
deferral and an ascending integer count otherwise.
"""
i = 0
while True:
i += 1
if self._doDeferNext:
self._doDeferNext = False
d = defer.Deferred()
self.work.append(d)
yield d
elif self._doStopNext:
return
elif self._doDieNext:
raise UnhandledException()
else:
self.work.append(i)
yield i
def tearDown(self):
"""
Drop references to interesting parts of the fixture to allow Deferred
errors to be noticed when things start failing.
"""
del self.task
del self.scheduler
def deferNext(self):
"""
Defer the next result from my worker iterator.
"""
self._doDeferNext = True
def stopNext(self):
"""
Make the next result from my worker iterator be completion (raising
StopIteration).
"""
self._doStopNext = True
def dieNext(self):
"""
Make the next result from my worker iterator be raising an
L{UnhandledException}.
"""
def ignoreUnhandled(failure):
failure.trap(UnhandledException)
return None
self._doDieNext = True
def test_pauseResume(self):
"""
Cooperators should stop running their tasks when they're paused, and
start again when they're resumed.
"""
# first, sanity check
self.scheduler.pump()
self.assertEquals(self.work, [1])
self.scheduler.pump()
self.assertEquals(self.work, [1, 2])
# OK, now for real
self.task.pause()
self.scheduler.pump()
self.assertEquals(self.work, [1, 2])
self.task.resume()
# Resuming itself shoult not do any work
self.assertEquals(self.work, [1, 2])
self.scheduler.pump()
# But when the scheduler rolls around again...
self.assertEquals(self.work, [1, 2, 3])
def test_resumeNotPaused(self):
"""
L{CooperativeTask.resume} should raise a L{TaskNotPaused} exception if
it was not paused; e.g. if L{CooperativeTask.pause} was not invoked
more times than L{CooperativeTask.resume} on that object.
"""
self.assertRaises(task.NotPaused, self.task.resume)
self.task.pause()
self.task.resume()
self.assertRaises(task.NotPaused, self.task.resume)
def test_pauseTwice(self):
"""
Pauses on tasks should behave like a stack. If a task is paused twice,
it needs to be resumed twice.
"""
# pause once
self.task.pause()
self.scheduler.pump()
self.assertEquals(self.work, [])
# pause twice
self.task.pause()
self.scheduler.pump()
self.assertEquals(self.work, [])
# resume once (it shouldn't)
self.task.resume()
self.scheduler.pump()
self.assertEquals(self.work, [])
# resume twice (now it should go)
self.task.resume()
self.scheduler.pump()
self.assertEquals(self.work, [1])
def test_pauseWhileDeferred(self):
"""
C{pause()}ing a task while it is waiting on an outstanding
L{defer.Deferred} should put the task into a state where the
outstanding L{defer.Deferred} must be called back I{and} the task is
C{resume}d before it will continue processing.
"""
self.deferNext()
self.scheduler.pump()
self.assertEquals(len(self.work), 1)
self.failUnless(isinstance(self.work[0], defer.Deferred))
self.scheduler.pump()
self.assertEquals(len(self.work), 1)
self.task.pause()
self.scheduler.pump()
self.assertEquals(len(self.work), 1)
self.task.resume()
self.scheduler.pump()
self.assertEquals(len(self.work), 1)
self.work[0].callback("STUFF!")
self.scheduler.pump()
self.assertEquals(len(self.work), 2)
self.assertEquals(self.work[1], 2)
def test_whenDone(self):
"""
L{CooperativeTask.whenDone} returns a Deferred which fires when the
Cooperator's iterator is exhausted. It returns a new Deferred each
time it is called; callbacks added to other invocations will not modify
the value that subsequent invocations will fire with.
"""
deferred1 = self.task.whenDone()
deferred2 = self.task.whenDone()
results1 = []
results2 = []
final1 = []
final2 = []
def callbackOne(result):
results1.append(result)
return 1
def callbackTwo(result):
results2.append(result)
return 2
deferred1.addCallback(callbackOne)
deferred2.addCallback(callbackTwo)
deferred1.addCallback(final1.append)
deferred2.addCallback(final2.append)
# exhaust the task iterator
# callbacks fire
self.stopNext()
self.scheduler.pump()
self.assertEquals(len(results1), 1)
self.assertEquals(len(results2), 1)
self.assertIdentical(results1[0], self.task._iterator)
self.assertIdentical(results2[0], self.task._iterator)
self.assertEquals(final1, [1])
self.assertEquals(final2, [2])
def test_whenDoneError(self):
"""
L{CooperativeTask.whenDone} returns a L{defer.Deferred} that will fail
when the iterable's C{next} method raises an exception, with that
exception.
"""
deferred1 = self.task.whenDone()
results = []
deferred1.addErrback(results.append)
self.dieNext()
self.scheduler.pump()
self.assertEquals(len(results), 1)
self.assertEquals(results[0].check(UnhandledException), UnhandledException)
def test_whenDoneStop(self):
"""
L{CooperativeTask.whenDone} returns a L{defer.Deferred} that fails with
L{TaskStopped} when the C{stop} method is called on that
L{CooperativeTask}.
"""
deferred1 = self.task.whenDone()
errors = []
deferred1.addErrback(errors.append)
self.task.stop()
self.assertEquals(len(errors), 1)
self.assertEquals(errors[0].check(task.TaskStopped), task.TaskStopped)
def test_whenDoneAlreadyDone(self):
"""
L{CooperativeTask.whenDone} will return a L{defer.Deferred} that will
succeed immediately if its iterator has already completed.
"""
self.stopNext()
self.scheduler.pump()
results = | |
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities to query and parse the metrics data."""
import abc
import datetime
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from pypowervm import adapter as pvm_adpt
from pypowervm.i18n import _
from pypowervm.tasks.monitor import lpar as lpar_mon
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import monitor as pvm_mon
from pypowervm.wrappers.pcm import lpar as lpar_pcm
from pypowervm.wrappers.pcm import phyp as phyp_mon
from pypowervm.wrappers.pcm import vios as vios_mon
LOG = logging.getLogger(__name__)
RAW_METRICS = 'RawMetrics'
@six.add_metaclass(abc.ABCMeta)
class MetricCache(object):
"""Provides a cache of the metrics data.
The core LongTermMetrics API only refreshes its internal metric data once
(generally) every 30 seconds. This class provides a generalized cache
of the metrics. It stores both the raw phyp and vios metrics (if
available) and will only refresh them after a specified time period has
elapsed (30 seconds by default).
"""
def __init__(self, adapter, host_uuid, refresh_delta=30, include_vio=True):
"""Creates an instance of the cache.
:param adapter: The pypowervm Adapter.
:param host_uuid: The UUID of the host CEC to maintain a metrics
cache for.
:param refresh_delta: (Optional) The interval in seconds at which the
metrics should be updated. Will only update if
the interval has been passed and the user invokes
a cache query. Will not update in the
background, only if the cache is used.
:param include_vio: (Optional) Defaults to True. If set to False, the
cur_vioses and prev_vioses will always be
unavailable. This increases the speed for refresh.
"""
# Ensure that the metric monitoring is enabled.
ensure_ltm_monitors(adapter, host_uuid)
# Save the data
self.adapter = adapter
self.host_uuid = host_uuid
self.refresh_delta = datetime.timedelta(seconds=refresh_delta)
self.include_vio = include_vio
self.is_first_pass = False
# Ensure these elements are defined up front.
self.cur_date, self.cur_phyp, self.cur_vioses, self.cur_lpars = (
None, None, None, None)
self.prev_date, self.prev_phyp, self.prev_vioses, self.prev_lpars = (
None, None, None, None)
# Run a refresh up front.
self._refresh_if_needed()
def _refresh_if_needed(self):
"""Refreshes the cache if needed."""
# The refresh is needed if the current date is none, or if the refresh
# time delta has been crossed.
refresh_needed = self.cur_date is None
# This is put into an if block so that we don't run the logic if
# cur_date is in fact None...
if not refresh_needed:
diff_date = datetime.datetime.now() - self.cur_date
refresh_needed = diff_date > self.refresh_delta
# At this point, if a refresh isn't needed, then exit.
if not refresh_needed:
return
self._set_prev()
self.cur_date, self.cur_phyp, self.cur_vioses, self.cur_lpars = (
latest_stats(self.adapter, self.host_uuid,
include_vio=self.include_vio))
# Have the class that is implementing the cache update its simplified
# representation of the data. Ex. LparMetricCache
self._update_internal_metric()
def _set_prev(self):
# On first boot, the cur data will be None. Query to seed it with the
# second latest data (which may also still be none if LTM was just
# turned on, but just in case).
self.is_first_pass = self.cur_date is None
if self.is_first_pass:
p_date, p_phyp, p_vioses, p_lpars = (
latest_stats(self.adapter, self.host_uuid,
include_vio=self.include_vio, second_latest=True))
self.prev_date, self.prev_phyp = p_date, p_phyp
self.prev_vioses, self.prev_lpars = p_vioses, p_lpars
else:
self.prev_date, self.prev_phyp = self.cur_date, self.cur_phyp,
self.prev_vioses, self.prev_lpars = self.cur_vioses, self.cur_lpars
def _update_internal_metric(self):
"""Save the raw metric to the transformed values.
Implemented by the child class. Should transform the phyp and
vios data into the format required by the implementor.
"""
raise NotImplementedError()
class LparMetricCache(MetricCache):
"""Provides a cache of metrics on a per LPAR level.
Metrics are expensive to gather and to parse. It is expensive because
the backing API gathers all of the metrics at the Hypervisor and Virtual
I/O Server levels. This returns all of the LPARs. Therefore, this cache
parses in all of the data once, and allows the invoker to get individual
LPAR metrics without having to re-query the API server.
This class provides a caching mechanism along with a built in refresh
mechanism if enough time has passed since last gathering the metrics.
This cache will obtain the metrics for a given system, separate them out
into an individual LparMetric cache. If another LPAR is required, the
cache will be used (so a subsequent API call is not required).
There is a refresh_interval as well. If the interval is passed, a
subsequent query of the metrics will force a refresh of the cache.
The previous metric is also saved within the cache. This is useful for
generating rates on the metrics (a previous element to compare against).
The cache will only contain the last two samples of hypervisor/vios data.
This is so that the current sample and the previous sample are maintained.
The data is maintained for all of the systems that metrics data has data
for - but this is still quite thin. This cache does not have support
to maintain additional samples.
Trimming is done upon each refresh (which is triggered by the
get_latest_metric). To wipe the cache, the user should just have the cache
go out of scope and it will be cleared. No manual clean up is required.
"""
def __init__(self, adapter, host_uuid, refresh_delta=30, include_vio=True):
"""Creates an instance of the cache.
:param adapter: The pypowervm Adapter.
:param host_uuid: The UUID of the host CEC to maintain a metrics
cache for.
:param refresh_delta: (Optional) The interval at which the metrics
should be updated. Will only update if the
interval has been passed and the user invokes a
cache query. Will not update in the background,
only if the cache is used.
:param include_vio: (Optional) Defaults to True. If set to False, the
cur_vioses and prev_vioses will always be
unavailable. This increases the speed for refresh.
"""
# Ensure these elements are defined up front so that references don't
# error out if they haven't been set yet. These will be the results
# from the vm_metrics method.
self.cur_metric, self.prev_metric = None, None
# Invoke the parent to seed the metrics.
super(LparMetricCache, self).__init__(adapter, host_uuid,
refresh_delta=refresh_delta,
include_vio=include_vio)
@lockutils.synchronized('pvm_lpar_metrics_get')
def get_latest_metric(self, lpar_uuid):
"""Returns the latest metrics for a given LPAR.
This will pull from the cache, but will refresh the cache if the
refresh interval has passed.
:param lpar_uuid: The UUID of the LPAR to query for the metrics.
:return: Two elements.
- First is the date of the metric.
- Second is the LparMetric
Note that both of these can be None. If the date of the
metric is None, that indicates that there was no previous
metric (or something is wrong with the gather flow).
If the date of the metric is None, then the second value will
be None as well.
If the date of the metric is set, but None is returned for
the value then the LPAR had no metrics for it. Scenarios can
occur where the current metric may have a value but not the
previous (ex. when a LPAR was just created).
"""
# Refresh if needed. Will no-op if no refresh is required.
self._refresh_if_needed()
# No metric, no operation.
if self.cur_metric is None:
return self.cur_date, None
return self.cur_date, self.cur_metric.get(lpar_uuid)
@lockutils.synchronized('pvm_lpar_metrics_get')
def get_previous_metric(self, lpar_uuid):
"""Returns the previous metric for a given LPAR.
This will NOT update the cache. That can only be triggered from the
get_latest_metric method.
:param lpar_uuid: The UUID of the LPAR to query for the metrics.
:return: Two elements.
- First is the date of the metric.
- Second is the LparMetric
Note that both of these can be None. If the date of the
metric is None, that indicates that there was no previous
metric (or something is wrong with the gather flow).
If the date of | |
# select colors
col = ''
if '5' in nm: col='orange'
if 'f_s001' in nm: col='black'
if nm == 'f_s1_neg': col='blue'
if nm == 'i_s1': col='red'
if nm == 'i_s01': col='green'
if '3' in nm: col = 'darkgreen'
if '7' in nm: col = 'darkorange'
if nm=='M0' or nm =='K0': col='black'
if nm=='M01_c' or nm=='K01_c': col='green'
if nm=='M03' or nm=='K03': col='darkgreen'
if 'M1' in nm or 'K1' in nm: col='red'
if 'ac' in nm: col='blue'
if 'noh' in nm: col='black'
if 'ini' in nm or 'hel' in nm: col='red'
if col == '':
print(nm + 'has not been considered, assigned to color blue')
col = 'blue'
return col
def plot_helicity_vs_t(runs, type='ini', save=True):
"""
Function that generates the plot of the total magnetic or kinetic helicity
as a function of time.
It corresponds to figure 4 of <NAME>, <NAME>, <NAME>,
and <NAME>, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary that includes the run variables
type -- selects the types of runs to be plotted (default 'ini', other
option is 'forc'), i.e., runs with an initial magnetic field
('ini') or runs in which the magnetic field is initially driven
during the simulation ('forc')
save -- option to save the figure in plots/sigmaM_vs_t_'type'.pdf'
(default True)
"""
if type == 'ini': RR = ['i_s01', 'i_s03', 'i_s05', 'i_s07', 'i_s1']
elif type == 'forc':
RR = ['f_s001', 'f_s001_neg', 'f_s03', 'f_s05', 'f_s07', 'f_s1_neg']
elif type == 'kin': RR = ['K0', 'K01_c', 'K03', 'K05', 'K1']
elif type == 'mag': RR = ['M0', 'M01_c', 'M03', 'M05', 'M1']
plt.figure(figsize=(12,8))
for i in RR:
run = runs.get(i)
col = assign_col(i)
if run.turb == 'm': sp = 'mag'
if run.turb == 'k': sp = 'kin'
t = np.array(run.spectra.get('t_hel' + sp), dtype='float')[:, 0]
t2 = run.spectra.get('t_' + sp)
EM = np.array(run.spectra.get(sp), dtype='float')
HkM = run.spectra.get('hel' + sp + '_comp')
k = run.spectra.get('k')
EMs_mean = np.trapz(EM, k, axis=1)
HMs_mean = np.trapz(HkM, k, axis=1)
EMs_mean = np.interp(t, t2, EMs_mean)
eps = abs(HMs_mean)/EMs_mean
plt.plot(t - 1, eps, '.', color=col)
if col == 'black': sig = 0
if col == 'green': sig = 0.1
if col == 'darkgreen': sig = 0.3
if col == 'orange': sig = 0.5
if col == 'darkorange': sig = 0.7
if col == 'red' or col == 'blue': sig = 1.
eps = 2*sig/(1 + sig**2)
plt.hlines(eps, 1e-5, 5, color=col, ls='dashed', lw=0.5)
if type == 'ini': tp = 'ini'
else: tp = 'forc'
#line_s0, line_s001, line_s01, line_s03, line_s05, line_s07, \
# line_s1, line_s1_neg, = get_lines_sig(tp)
#hdls1 = [line_s001, line_s01, line_s03, line_s05, line_s07, line_s1,]
#plt.legend(handles=hdls1, fontsize=24, loc='center left')
MM = 'M'
if type == 'kin': MM = 'K'
sig_s = r'$\sigma_{\rm %s}^{\rm %s} = $'%(MM, tp)
if type != 'forc':
plt.text(2.5e-3, .12, sig_s + ' 0.1', color='green', fontsize=24)
if type != 'ini':
plt.text(2.5e-3, -.08, sig_s + ' 0', color='black', fontsize=24)
else:
plt.text(2.5e-3, .04, sig_s + ' $\pm 0.01$', color='black', fontsize=24)
plt.text(2.5e-3, .47, sig_s + ' 0.3', color='darkgreen', fontsize=24)
plt.text(2.5e-3, .72, sig_s + ' 0.5', color='orange', fontsize=24)
if type == 'ini' or type == 'forc':
plt.text(2.5e-3, .86, sig_s + ' 0.7', color='darkorange', fontsize=24)
if type != 'forc':
plt.text(2.5e-3, 1.04, sig_s + ' $1$', color='red', fontsize=24)
else:
plt.text(2.5e-3, 1.04, sig_s + ' $-1$', color='blue', fontsize=24)
plot_sets.axes_lines()
plt.xscale('log')
plt.xlim(2e-3, 5e-1)
plt.ylim(-.1, 1.13)
if type == 'mag' or type == 'kin':
plt.ylim(-.15, 1.13)
plt.xlim(2e-3, 1.5e0)
plt.xlabel('$\delta t=t-1$')
plt.ylabel(r'$|{\cal P}_{\rm M}(t)|$')
plt.yticks(np.linspace(0, 1, 5))
if save: plt.savefig('plots/sigmaM_vs_t_' + type + '.pdf',
bbox_inches='tight')
def plot_PGW(runs, PPh='GW', type='ini', save=True):
"""
Function that plots the GW polarization spectra, averaging over
times after the GW energy and helicity have entered stationary oscillatory
stages (this needs to be previously computed and stored in the run variable,
see initialize_JCAP_2021.py).
It corresponds to figure 5 of <NAME>, <NAME>, <NAME>,
and <NAME>, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary of variables run with spectral information
type -- selects the types of runs to be plotted (default 'ini', other
option is 'forc'), i.e., runs with an initial magnetic field
('ini') or runs in which the magnetic field is initially driven
during the simulation ('forc')
save -- option to save the figure in plots/PGW_'type'_sigma.pdf'
(default True)
"""
plt.rcParams.update({'xtick.labelsize': 'xx-large',
'ytick.labelsize': 'xx-large',
'axes.labelsize': 'xx-large'})
if type == 'ini': RR = ['i_s01', 'i_s03', 'i_s05', 'i_s07', 'i_s1']
elif type == 'forc':
RR = ['f_s001', 'f_s001_neg', 'f_s03', 'f_s05', 'f_s07', 'f_s1_neg']
elif type == 'kin': RR = ['K0', 'K01_c', 'K03', 'K05', 'K1']
elif type == 'mag': RR = ['M0', 'M01_c', 'M03', 'M05', 'M1']
if PPh == 'GW': PP = 'GW'
if PPh == 'h': PP = 'h'
fig, ax = plt.subplots(figsize=(12,10))
for i in RR:
# select colors
col = assign_col(i)
run = runs.get(i)
k = run.spectra.get('k')[1:]
PGW = run.spectra.get('P' + PP + '_stat_sp')
PGW_min = run.spectra.get('P' + PP + '_min_sp')
PGW_max = run.spectra.get('P' + PP + '_max_sp')
plt.plot(k, PGW, color=col, lw=2)
plt.fill_between(k, PGW_min, PGW_max, alpha=0.3, color=col)
for i in range(0, len(k)):
plt.vlines(k[i], PGW_min[i], PGW_max[i], color=col, lw=0.6,
ls='dashed')
plot_sets.axes_lines()
plt.xscale('log')
plt.xlabel('$k$')
plt.ylabel(r'${\cal P}_{\rm %s} (k)$'%PP)
sigs = []
plt.xlim(120, 5e4)
tp = 'forc'
if type == 'forc':
sigs = ['0.01', '-0.01', '0.3', '0.5', '0.7', '-1']
cols = ['black', 'black', 'darkgreen', 'orange', 'darkorange', 'blue']
plt.ylim(-1.15, 1.15)
plt.yticks(np.linspace(-1, 1, 9))
if PPh == 'GW':
xxs = [7e2, 7e2, 7e2, 8e3, 2.5e3, 7e2]
yys = [.2, -.25, .4, .55, .9, -.9, -.9]
plt.text(3e4, -0.9, '(b)', fontsize=30)
else:
xxs = [5e2, 5e2, 5e2, 4e3, 3e3, 5e2]
yys = [.25, -.25, .5, .5, .7, -.8]
plt.text(3e4, -0.9, '(d)', fontsize=30)
else:
if type == 'ini':
tp = 'ini'
sigs = ['0.1', '0.3', '0.5', '0.7', '1']
cols = ['green', 'darkgreen', 'orange', 'darkorange', 'red']
if PPh == 'GW':
xxs = [1e4, 1e4, 1e4, 1e4, 1e3]
yys = [.3, .5, .75, 1.05, 1.05]
plt.text(3e4, -0.35, '(a)', fontsize=30)
else:
xxs = [1.5e4, 1.5e4, 1.5e4, 1.5e4, 1e3]
yys = [0.05, 0.35, 0.62, 1.05, 1.05]
plt.text(3e4, -0.35, '(c)', fontsize=30)
else:
line_s0, line_s001, line_s01, line_s03, line_s05, line_s07, \
line_s1, line_s1_neg, = get_lines_sig(tp)
hdls = [line_s1, line_s05, line_s03, line_s01, line_s0,]
plt.legend(handles=hdls, fontsize=24, loc='upper right',
frameon=False)
plt.xlim(120, 3e4)
plt.ylim(-.5, 1.2)
for i in range(0, len(sigs)):
plt.text(xxs[i], yys[i],
r'$\sigma_{\rm M}^{\rm %s}=%s$'%(tp, sigs[i]),
fontsize=30, color=cols[i])
if save: plt.savefig('plots/P' + PPh + '_' + type + '_sigma.pdf',
bbox_inches='tight')
def get_lines_sig(tp):
sigs = r'\sigma_{\rm M}^{\rm %s} ='%tp
line_s0, = plt.plot([], [], color='black', lw=3,
label=r'$%s 0$'%sigs)
line_s001, = plt.plot([], [], color='black', lw=3,
label=r'$%s \pm 0.01$'%sigs)
line_s01, = plt.plot([], [], color='green', lw=3, label=r'$%s 0.1$'%sigs)
line_s03, = plt.plot([], [], color='darkgreen', lw=3,
label=r'$%s 0.3$'%sigs)
line_s05, = plt.plot([], [], color='orange', lw=3, label=r'$%s 0.5$'%sigs)
line_s07, = plt.plot([], [], color='darkorange', lw=3,
label='$%s 0.7$'%sigs)
line_s1, = plt.plot([], [], color='red', lw=3, label=r'$%s 1$'%sigs)
line_s1_neg, = plt.plot([], [], color='blue', lw=3, label=r'$%s -1$'%sigs)
return (line_s0, line_s001, line_s01, line_s03, line_s05, line_s07,
line_s1, line_s1_neg,)
def plot_PGW_vs_PM(runs, save=True):
"""
Function that generates the plot of the total GW polarization PGW as a
function of the total fractional helicity of the sourcing magnetic PM or
velocity PK field.
It corresponds to figure 6 of <NAME>, <NAME>, <NAME>,
and <NAME>, "Polarization of gravitational waves from helical MHD
turbulent sources," https://arxiv.org/abs/2107.05356.
Arguments:
runs -- dictionary that includes the run variables
save -- option to save the figure in plots/PGW_vs_PM.pdf'
(default True)
"""
plt.figure(figsize=(12,8))
for i in runs:
run = runs.get(i)
k = run.spectra.get('k')
EGW = run.spectra.get('EGW_stat_sp')
HGW = run.spectra.get('helEGW_stat_sp')
t = run.spectra.get('t_mag')
indt = 0
EM = run.spectra.get('mag')[indt, :]
HM = run.spectra.get('helmag_comp')[indt, :]
PM = np.trapz(HM, k)/np.trapz(EM, k)
PGW = np.trapz(HGW, k[1:])/np.trapz(EGW, k[1:])
if 'i' in i: plt.plot(abs(PM), abs(PGW), 'o', color='blue')
else: plt.plot(abs(PM), abs(PGW), 'x', color='red')
col = assign_col(i)
if col == 'black': sig = 0
if col == 'green': sig = 0.1
if col == 'darkgreen': sig = 0.3
if col == 'orange': sig = 0.5
if col == 'darkorange': sig = 0.7
if col == 'red' or col == 'blue': sig = 1.
if col == 'blue': col = 'red'
eps = 2*sig/(1 + sig**2)
plt.vlines(eps, 0, | |
<filename>src/cuisine.py<gh_stars>0
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Project : Cuisine - Functions to write Fabric recipies
# -----------------------------------------------------------------------------
# Author : <NAME> <<EMAIL>>
# Author : <NAME> (gentoo port) <<EMAIL>>
# Author : <NAME> (distro checks and rpm port) <<EMAIL>>
# License : Revised BSD License
# -----------------------------------------------------------------------------
# Creation : 26-Apr-2010
# Last mod : 31-Oct-2011
# -----------------------------------------------------------------------------
"""
`cuisine` makes it easy to write automatic server installation
and configuration recipies by wrapping common administrative tasks
(installing packages, creating users and groups) in Python
functions.
`cuisine` is designed to work with Fabric and provide all you
need for getting your new server up and running in minutes.
Note, that right now, Cuisine only supports Debian-based Linux
systems.
See also:
- Deploying Django with Fabric
<http://lethain.com/entry/2008/nov/04/deploying-django-with-fabric>
- Notes on Python Fabric 0.9b1
<http://www.saltycrane.com/blog/2009/10/notes-python-fabric-09b1>`_
- EC2, fabric, and "err: stdin: is not a tty"
<http://blog.markfeeney.com/2009/12/ec2-fabric-and-err-stdin-is-not-tty.html>`_
:copyright: (c) 2011 by <NAME>, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import base64, bz2, crypt, hashlib, os, random, re, string, tempfile, subprocess
import fabric, fabric.api, fabric.operations, fabric.context_managers
VERSION = "0.1.1"
RE_SPACES = re.compile("[\s\t]+")
MAC_EOL = "\n"
UNIX_EOL = "\n"
WINDOWS_EOL = "\r\n"
# FIXME: MODE should be in the fabric env, as this is definitely not thread-safe
MODE_USER = "user"
MODE_SUDO = "sudo"
MODE = MODE_USER
# context managers and wrappers around fabric's run/sudo; used to
# either execute cuisine functions with sudo or as current user:
#
# with mode_sudo():
# pass
class mode_user(object):
"""Cuisine functions will be executed as the current user."""
def __init__(self):
global MODE
self._old_mode = MODE
MODE = MODE_USER
def __enter__(self):
pass
def __exit__(self, *args, **kws):
global MODE
MODE = self._old_mode
class mode_sudo(object):
"""Cuisine functions will be executed with sudo."""
def __init__(self):
global MODE
self._old_mode = MODE
MODE = MODE_SUDO
def __enter__(self):
pass
def __exit__(self, *args, **kws):
global MODE
MODE = self._old_mode
def run(*args, **kwargs):
"""A wrapper to Fabric's run/sudo commands, using the
'cuisine.MODE' global to tell wether the command should be run as
regular user or sudo."""
if MODE == MODE_SUDO:
return fabric.api.sudo(*args, **kwargs)
else:
return fabric.api.run(*args, **kwargs)
def run_local(command):
"""A wrapper around subprocess"""
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout
res = pipe.read()
# FIXME: Should stream the pipe, and only print it if fabric's properties allow it
print res
return pipe
def sudo(*args, **kwargs):
"""A wrapper to Fabric's run/sudo commands, using the
'cuisine.MODE' global to tell wether the command should be run as
regular user or sudo."""
return fabric.api.sudo(*args, **kwargs)
### decorators
def multiargs(function):
"""Decorated functions will be 'map'ed to every element of the
first argument if it is a list or a tuple, otherwise the function
will execute normally."""
def wrapper(*args, **kwargs):
if len(args) == 0:
return function()
arg = args[0]
args = args[1:]
if type(arg) in (tuple, list):
return map(lambda _: function(_, *args, **kwargs), arg)
else:
return function(arg, *args, **kwargs)
return wrapper
### text_<operation> functions
def text_detect_eol(text):
# FIXME: Should look at the first line
if text.find("\r\n") != -1:
return WINDOWS_EOL
elif text.find("\n") != -1:
return UNIX_EOL
elif text.find("\r") != -1:
return MAC_EOL
else:
return "\n"
def text_get_line(text, predicate):
"""Returns the first line that matches the given predicate."""
for line in text.split("\n"):
if predicate(line):
return line
return ""
def text_normalize(text):
"""Converts tabs and spaces to single space and strips the text."""
return RE_SPACES.sub(" ", text).strip()
def text_nospace(text):
"""Converts tabs and spaces to single space and strips the text."""
return RE_SPACES.sub("", text).strip()
def text_replace_line(text, old, new, find=lambda old, new: old == new,
process=lambda _: _):
"""Replaces lines equal to 'old' with 'new', returning the new
text and the count of replacements."""
res = []
replaced = 0
eol = text_detect_eol(text)
for line in text.split(eol):
if find(process(line), process(old)):
res.append(new)
replaced += 1
else:
res.append(line)
return eol.join(res), replaced
def text_ensure_line(text, *lines):
"""Ensures that the given lines are present in the given text,
otherwise appends the lines that are not already in the text at
the end of it."""
eol = text_detect_eol(text)
res = list(text.split(eol))
for line in lines:
assert line.find(eol) == -1, \
"No EOL allowed in lines parameter: " + repr(line)
found = False
for l in res:
if l == line:
found = True
break
if not found:
res.append(line)
return eol.join(res)
def text_strip_margin(text, margin="|"):
res = []
eol = text_detect_eol(text)
for line in text.split(eol):
l = line.split(margin, 1)
if len(l) == 2:
_, line = l
res.append(line)
return eol.join(res)
def text_template(text, variables):
"""Substitutes '${PLACEHOLDER}'s within the text with the
corresponding values from variables."""
template = string.Template(text)
return template.safe_substitute(variables)
### file_<operation> functions
def file_local_read(location):
"""Reads a *local* file from the given location, expanding '~' and
shell variables."""
p = os.path.expandvars(os.path.expanduser(location))
f = file(p, 'rb')
t = f.read()
f.close()
return t
def file_read(location):
"""Reads the *remote* file at the given location."""
# NOTE: We use base64 here to be sure to preserve the encoding (UNIX/DOC/MAC) of EOLs
return base64.b64decode(run('cat "%s" | base64' % (location)))
def file_exists(location):
"""Tests if there is a *remote* file at the given location."""
return run('test -f "%s" && echo OK ; true' % (location)) == "OK"
def file_attribs(location, mode=None, owner=None, group=None,
recursive=False):
"""Updates the mode/owner/group for the remote file at the given
location."""
recursive = recursive and "-R " or ""
if mode:
run('chmod %s %s "%s"' % (recursive, mode, location))
if owner:
run('chown %s %s "%s"' % (recursive, owner, location))
if group:
run('chgrp %s %s "%s"' % (recursive, group, location))
def file_attribs_get(location):
"""Return mode, owner, and group for remote path.
Return mode, owner, and group if remote path exists, 'None'
otherwise.
"""
if file_exists(location):
fs_check = run('stat %s %s' % (location, '--format="%a %U %G"'))
(mode, owner, group) = fs_check.split(' ')
return {'mode': mode, 'owner': owner, 'group': group}
else:
return None
def file_write(location, content, mode=None, owner=None, group=None):
"""Writes the given content to the file at the given remote
location, optionally setting mode/owner/group."""
# FIXME: Big files are never transferred properly!
# Gets the content signature and write it to a secure tempfile
sig = hashlib.sha256(content).hexdigest()
fd, local_path = tempfile.mkstemp()
os.write(fd, content)
# Upload the content if necessary
if not file_exists(location) or sig != file_sha256(location):
fabric.operations.put(local_path, location, use_sudo=(MODE == MODE_SUDO))
# Remove the local temp file
os.close(fd)
os.unlink(local_path)
# Ensure that the signature matches
assert sig == file_sha256(location)
def file_ensure(location, mode=None, owner=None, group=None,
recursive=False):
"""Updates the mode/owner/group for the remote file at the given
location."""
if file_exists(location):
file_attribs(location,mode=mode,owner=owner,group=group)
else:
file_write(location,"",mode=mode,owner=owner,group=group)
def file_upload(remote, local):
"""Uploads the local file to the remote location only if the remote location does not
exists or the content are different."""
# FIXME: Big files are never transferred properly!
f = file(local, 'rb')
content = f.read()
f.close()
sig = hashlib.sha256(content).hexdigest()
if not file_exists(remote) or sig != file_sha256(remote):
fabric.operations.put(local, remote, use_sudo=(MODE == MODE_SUDO))
def file_update(location, updater=lambda x: x):
"""Updates the content of the given by passing the existing
content of the remote file at the given location to the 'updater'
function.
For instance, if you'd like to convert an existing file to all
uppercase, simply do:
> file_update("/etc/myfile", lambda _:_.upper())
"""
assert file_exists(location), "File does not exists: " + location
new_content = updater(file_read(location))
assert type(new_content) in (str, unicode, fabric.operations._AttributeString), \
"Updater must be like (string)->string, got: %s() = %s" % \
(updater, type(new_content))
run('echo "%s" | base64 -d > "%s"' %
(base64.b64encode(new_content), location))
def file_append(location, content, mode=None, owner=None, group=None):
"""Appends the given content to the remote file at the given
location, optionally updating its mode/owner/group."""
run('echo "%s" | base64 -d >> "%s"' %
(base64.b64encode(content), location))
file_attribs(location, mode, owner, group)
def file_link(source, destination, symbolic=True, mode=None, owner=None, group=None):
"""Creates a (symbolic) link between source and destination on the remote host,
optionally setting its mode/owner/group."""
if symbolic:
run('ln -sf "%s" "%s"' % (source, destination))
else:
run('ln -f "%s" "%s"' % (source, destination))
file_attribs(destination, mode, owner, group)
def file_sha256(location):
"""Returns the SHA-256 sum (as a hex string) for the remote file at the given location"""
return run('sha256sum "%s" | cut -d" " -f1' % (location))
# TODO: From McCoy's version, consider merging
# def file_append( location, content, use_sudo=False, partial=False, escape=True):
# """Wrapper for fabric.contrib.files.append."""
# fabric.contrib.files.append(location, content, use_sudo, partial, escape)
### dir_<operation> functions
def dir_attribs(location, mode=None, owner=None, group=None, recursive=False):
"""Updates the mode/owner/group for the given remote directory."""
file_attribs(location, mode, owner, group, recursive)
def dir_exists(location):
"""Tells if there is a remote directory at the given location."""
return run('test -d "%s" && echo | |
_V: See the literature
"""
""" Initialize surrogate GP and related quantities, namely
_gp
_N, _Mx, _L,: GP training data dimensions N = dataset rows (datapoints), Mx = input columns, L = input columns
_lengthscale: ARD lengthscale vector
all of which are private and invariant.
"""
self._gp = gp
self._N, self._Mx, self._L = self._gp.N, self._gp.M, self._gp.L
self._lengthscale = self._gp.kernel.parameters.lengthscale[0, :]
if self._lengthscale.shape != (self._Mx,):
self._lengthscale = full(self._Mx, self._lengthscale[0], dtype=float, order=self.MEMORY_LAYOUT)
""" Initialize self.parameters.
For efficiency the parameters are mobilized in private, variant variables.
For calculation, _Theta is factored into _xi and Theta_old. """
self._Mu = Mu-1 if Mu-1 in range(self._Mx) else self._Mx - 1
if read_parameters:
super().__init__(self._gp.dir / self.NAME)
self.parameters_read = deepcopy(self.parameters)
#TODO: Consider trimming parameter dimension to Mu
else:
self.parameters_read = self.DEFAULT_PARAMETERS
self._Theta_old = eye(self._Mx, dtype=float, order=self.MEMORY_LAYOUT)
self._xi = None
self._D = -ones((self._L, self._L, self._Mx), dtype=float, order=self.MEMORY_LAYOUT)
self._S1 = -ones((self._L, self._L, self._Mx), dtype=float, order=self.MEMORY_LAYOUT)
""" Declare internal calculation stages. These are documented where they are calculated, in Sobol.calculate()."""
self._m = self._T_diagonal = self._Sigma_diagonal = self._Phi_diagonal = None
self._T_pre = self._T_pre_outer_square = self._Ft = self._Ft_1_Ft = None
self._objective_value = self._objective_jacobian = None
super().__init__(self._gp.dir / self.NAME, self.Parameters(self.Mu, self._Theta_old, self.Tensor3AsMatrix(self.D),
self.Tensor3AsMatrix(self.S), self.Tensor3AsMatrix(self._S1)))
self._validate_parameters()
self.calculate()
# noinspection PyPep8Naming
class ROM(Model):
""" Reduced Order Model (ROM) Calculator and optimizer.
This class is documented through its public properties."""
""" Required overrides."""
class GP_Initializer(IntEnum):
ORIGINAL = auto()
ORIGINAL_WITH_CURRENT_KERNEL = auto()
ORIGINAL_WITH_GUESSED_LENGTHSCALE = auto()
CURRENT = auto()
CURRENT_WITH_ORIGINAL_KERNEL = auto()
CURRENT_WITH_GUESSED_LENGTHSCALE = auto()
RBF = auto()
MEMORY_LAYOUT = "OVERRIDE_THIS with 'C','F' or 'A' (for C, Fortran or C-unless-All-input-is-Fortran-layout)."
Parameters = NamedTuple("Parameters", [('Mu', NP.Matrix), ('D', NP.Matrix), ('S1', NP.Matrix), ('S', NP.Matrix),
('lengthscale', NP.Matrix), ('log_likelihood', NP.Matrix)])
"""
**Mu** -- A numpy [[int]] specifying the number of input dimensions in the rotated basis u.
**D** -- An (L L, M) Matrix of cumulative conditional variances D[l,k,m] = S[l,k,m] D[l,k,M].
**S1** -- An (L L, M) Matrix of Sobol' main indices.
**S** -- An (L L, M) Matrix of Sobol' cumulative indices.
**lengthscale** -- A (1,M) Covector of ARD lengthscales, or a (1,1) RBF lengthscale.
**log_likelihood** -- A numpy [[float]] used to record the log marginal likelihood.
"""
DEFAULT_PARAMETERS = Parameters(*(atleast_2d(None),) * 6)
DEFAULT_OPTIMIZER_OPTIONS = {'iterations': 1, 'guess_identity_after_iteration': 1, 'sobol_optimizer_options': Sobol.DEFAULT_OPTIMIZER_OPTIONS,
'gp_initializer': GP_Initializer.CURRENT_WITH_GUESSED_LENGTHSCALE,
'gp_optimizer_options': GP.DEFAULT_OPTIMIZER_OPTIONS}
"""
**iterations** -- The number of ROM iterations. Each ROM iteration essentially calls Sobol.optimimize(options['sobol_optimizer_options'])
followed by GP.optimize(options['gp_optimizer_options'])).
**sobol_optimizer_options*** -- A Dict of Sobol optimizer options, similar to (and documented in) Sobol.DEFAULT_OPTIMIZER_OPTIONS.
**guess_identity_after_iteration** -- After this many ROM iterations, Sobol.optimize does no exploration,
just gradient descending from Theta = Identity Matrix.
**reuse_original_gp** -- True if GP.optimize is initialized each time from the GP originally provided.
**gp_optimizer_options** -- A Dict of GP optimizer options, similar to (and documented in) GP.DEFAULT_OPTIMIZER_OPTIONS.
"""
@classmethod
@abstractmethod
def from_ROM(cls, fold: Fold, name: str, suffix: str = ".0", Mu: int = -1, rbf_parameters: Optional[GP.Parameters] = None) -> 'ROM':
""" Create a ROM object from a saved ROM directory.
Args:
fold: The Fold housing the ROM to load.
name: The name of the saved ROM to create from.
suffix: The suffix to append to the most optimized gp.
Mu: The dimensionality of the rotated input basis u. If this is not in range(1, fold.M+1), Mu=fold.M is used.
Returns: The constructed ROM object
"""
optimization_count = [optimized.name.count(cls.OPTIMIZED_GB_EXT) for optimized in fold.dir.glob("name" + cls.OPTIMIZED_GB_EXT + "*")]
source_gp_name = name + cls.OPTIMIZED_GB_EXT * max(optimization_count)
destination_gp_name = source_gp_name + suffix
return cls(name=name,
sobol=Sobol.from_GP(fold, source_gp_name, destination_gp_name, Mu=Mu, read_parameters=True),
optimizer_options=None, rbf_parameters=rbf_parameters)
@classmethod
@abstractmethod
def from_GP(cls, fold: Fold, name: str, source_gp_name: str, optimizer_options: Dict, Mu: int = -1,
rbf_parameters: Optional[GP.Parameters] = None) -> 'ROM':
""" Create a ROM object from a saved GP directory.
Args:
fold: The Fold housing the ROM to load.
name: The name of the saved ROM to create from.
source_gp_name: The source GP directory.
Mu: The dimensionality of the rotated input basis u. If this is not in range(1, fold.M+1), Mu=fold.M is used.
optimizer_options: A Dict of ROM optimizer options.
Returns: The constructed ROM object
"""
return cls(name=name,
sobol=Sobol.from_GP(fold=fold, source_gp_name=source_gp_name, destination_gp_name=name + ".0", Mu=Mu),
optimizer_options=optimizer_options, rbf_parameters=rbf_parameters)
OPTIMIZED_GP_EXT = ".optimized"
REDUCED_FOLD_EXT = ".reduced"
""" End of required overrides."""
@property
def name(self) -> str:
""" The name of this ROM."""
return self.dir.name
@property
def sobol(self) -> Sobol:
""" The Sobol object underpinning this ROM."""
return self._sobol
@property
def gp(self) -> Sobol:
""" The GP underpinning this ROM."""
return self._gp
@property
def semi_norm(self) -> Sobol.SemiNorm:
""" A Sobol.SemiNorm on the (L,L) matrix of Sobol' indices, defining the ROM optimization objective ``semi_norm(D[:,:,m])``."""
return self._semi_norm
def gp_name(self, iteration: int) -> str:
""" The name of the GP produced by iteration."""
if iteration >= 0:
return "{0}.{1:d}".format(self.name, iteration)
else:
return "{0}{1}".format(self.name, self.OPTIMIZED_GB_EXT)
def _initialize_gp(self, iteration: int) -> GP:
if self._rbf_parameters is not None:
gp_initializer = self.GP_Initializer.RBF
parameters = self._rbf_parameters
gp_rbf = self.GPType(self._fold, self.gp_name(iteration) + ".rbf", parameters)
gp_rbf.optimize(**self._optimizer_options[-1]['gp_optimizer_options'])
gp_dir = gp_rbf.dir.parent / self.gp_name(iteration)
Model.copy(gp_rbf.dir, gp_dir)
kernel = type(self._gp.kernel)(None, None, gp_dir / GP.KERNEL_NAME)
kernel.make_ard(self._gp.M)
return self.GPType(self._fold, self.gp_name(iteration), parameters=None)
gp_initializer = self._optimizer_options[-1]['gp_initializer']
parameters = self._original_parameters if gp_initializer < self.GP_Initializer.CURRENT else self._gp.parameters
if isinstance(self._gp._kernel, model.gpy_.Kernel.ExponentialQuadratic):
if not self._gp.kernel.is_rbf:
if gp_initializer in (self.GP_Initializer.ORIGINAL_WITH_GUESSED_LENGTHSCALE, self.GP_Initializer.CURRENT_WITH_GUESSED_LENGTHSCALE):
lengthscale = einsum('MK, JK -> M', self._sobol.Theta_old, self._gp.kernel.parameters.lengthscale, optimize=True, dtype=float,
order=self.MEMORY_LAYOUT) * 0.5 * self._gp.M * (self._gp.M - arange(self._gp.M, dtype=float)) ** (-1)
elif gp_initializer in (self.GP_Initializer.CURRENT_WITH_ORIGINAL_KERNEL, self.GP_Initializer.ORIGINAL):
lengthscale = einsum('MK, JK -> M', self._Theta, self._original_parameters.kernel.parameters.lengthscale,
optimize=True, dtype=float, order=self.MEMORY_LAYOUT)
elif gp_initializer in (self.GP_Initializer.ORIGINAL_WITH_CURRENT_KERNEL, self.GP_Initializer.CURRENT):
lengthscale = einsum('MK, JK -> M', self._sobol.Theta_old, self._gp.kernel.parameters.lengthscale, optimize=True, dtype=float,
order=self.MEMORY_LAYOUT)
parameters = parameters._replace(kernel=self._gp.kernel.Parameters(lengthscale=lengthscale))
elif isinstance(self._gp._kernel, model.gpy_.Kernel.RationalQuadratic):
if not self._gp.kernel._is_rbf: # if its False it goes on to below. If its True (i.e. lengthscale not matrix)then where does it go?
if gp_initializer in (self.GP_Initializer.ORIGINAL_WITH_GUESSED_LENGTHSCALE, self.GP_Initializer.CURRENT_WITH_GUESSED_LENGTHSCALE):
lengthscale = einsum('MK, JK -> M', self._sobol.Theta_old, self._gp.kernel.parameters.lengthscale, optimize=True, dtype=float,
order=self.MEMORY_LAYOUT) * 0.5 * self._gp.M * (self._gp.M - arange(self._gp.M, dtype=float)) ** (-1)
elif gp_initializer == self.GP_Initializer.CURRENT_WITH_ORIGINAL_KERNEL:
lengthscale = einsum('MK, JK -> M', self._Theta, self._original_parameters.kernel.parameters.lengthscale,
optimize=True, dtype=float, order=self.MEMORY_LAYOUT)
elif gp_initializer == self.GP_Initializer.ORIGINAL_WITH_CURRENT_KERNEL:
lengthscale = einsum('MK, JK -> M', self._sobol.Theta_old, self._gp.kernel.parameters.lengthscale, optimize=True, dtype=float,
order=self.MEMORY_LAYOUT)
parameters = parameters._replace(kernel=self._gp.kernel.Parameters(lengthscale=lengthscale))
else:
print("The kernel chosen has not had the code written to have all it's parameters optimised.")
return self.GPType(self._fold, self.gp_name(iteration), parameters)
def optimize(self, options: Dict):
""" Optimize the model parameters. Do not call super().optimize, this interface only contains suggestions for implementation.
Args:
options: A Dict of implementation-dependent optimizer options, following the format of ROM.DEFAULT_OPTIMIZER_OPTIONS.
"""
if options is not self._optimizer_options[-1]:
self._optimizer_options.append(options)
self._semi_norm = Sobol.SemiNorm.from_meta(self._optimizer_options[-1]['sobol_optimizer_options']['semi_norm'])
self._sobol_reordering_options['semi_norm'] = self._semi_norm
self._optimizer_options[-1]['sobol_optimizer_options']['semi_norm'] = self._semi_norm.meta
self._write_optimizer_options(self._optimizer_options)
iterations = self._optimizer_options[-1]['iterations']
if iterations < 1 or self._optimizer_options[-1]['sobol_optimizer_options']['N_exploit'] < 1:
if not iterations <= 1:
warn("Your ROM optimization does not allow_rotation so iterations is set to 1, instead of {0:d}.".format(iterations), UserWarning)
iterations = 1
guess_identity_after_iteration = self._optimizer_options[-1]['guess_identity_after_iteration']
if guess_identity_after_iteration < 0:
guess_identity_after_iteration = iterations
sobol_guess_identity = {**self._optimizer_options[-1]['sobol_optimizer_options'], 'N_explore': 1}
self._Theta = self._sobol.Theta_old
for iteration in range(iterations):
self._gp = self._initialize_gp(iteration + 1)
self.calculate()
self.write_parameters(self.Parameters(
concatenate((self.parameters.Mu, atleast_2d(self._sobol.Mu)), axis=0),
concatenate((self.parameters.D, atleast_2d(self._semi_norm.value(self._sobol.D))), axis=0),
concatenate((self.parameters.S1, atleast_2d(self._semi_norm.value(self._sobol.S1))), axis=0),
concatenate((self.parameters.S, atleast_2d(self._semi_norm.value(self._sobol.S))), axis=0),
concatenate((self.parameters.lengthscale, atleast_2d(self._sobol.lengthscale)), axis=0),
concatenate((self.parameters.log_likelihood, atleast_2d(self._gp.log_likelihood)), axis=0)))
if iteration < guess_identity_after_iteration:
self._sobol.optimize(**self._optimizer_options[-1]['sobol_optimizer_options'])
else:
self._sobol.optimize(**sobol_guess_identity)
self._Theta = einsum('MK, KL -> ML', self._sobol.Theta_old, self._Theta)
self._gp = self._initialize_gp(-1)
self.calculate()
self._gp.test()
self.write_parameters(self.Parameters(
concatenate((self.parameters.Mu, atleast_2d(self._sobol.Mu)), axis=0),
concatenate((self.parameters.D, atleast_2d(self._semi_norm.value(self._sobol.D))), axis=0),
concatenate((self.parameters.S1, atleast_2d(self._semi_norm.value(self._sobol.S1))), axis=0),
concatenate((self.parameters.S, atleast_2d(self._semi_norm.value(self._sobol.S))), axis=0),
concatenate((self.parameters.lengthscale, atleast_2d(self._sobol.lengthscale)), axis=0),
concatenate((self.parameters.log_likelihood, atleast_2d(self._gp.log_likelihood)), axis=0)))
column_headings = ("x{:d}".format(i) for i in range(self._sobol.Mu))
frame = Frame(self._sobol.parameters_csv.Theta, DataFrame(self._Theta, columns=column_headings))
frame.write()
def reduce(self, Mu: int = -1):
"""
Args:
Mu: The reduced dimensionality Mu &le sobol.Mu. If Mu &le 0, then Mu = sobol.Mu.
Returns:
"""
def calculate(self):
""" Calculate the Model. """
self._gp.optimize(**self._optimizer_options[-1]['gp_optimizer_options'])
self._sobol = self.SobolType(self._gp)
def __init__(self, name: str, sobol: Sobol, optimizer_options: Dict = DEFAULT_OPTIMIZER_OPTIONS,
rbf_parameters: Optional[GP.Parameters] = None):
""" Initialize ROM object.
Args:
sobol: The Sobol object to construct the ROM from.
optimizer_options: A List[Dict] similar to (and documented in) ROM.DEFAULT_OPTIMIZER_OPTIONS.
"""
self._rbf_parameters = rbf_parameters
self._sobol = sobol
self._gp = sobol.gp
self._original_parameters = self._gp.parameters._replace(kernel=self._gp.kernel.parameters)
self._sobol_reordering_options = deepcopy(Sobol.DEFAULT_OPTIMIZER_OPTIONS)
self._fold = Fold(self._gp.fold.dir.parent, self._gp.fold.meta['k'], self._sobol.Mu)
self.SobolType = deepcopy(type(self._sobol))
self.GPType = deepcopy(type(self._gp))
if | |
in a 0. * `single-period` /
`climatology-period`: A single period of arbitrary length
:return: A data cube with the same dimensions. The dimension properties (name, type, labels, reference
system and resolution) remain unchanged.
"""
return anomaly(data=self, normals=normals, period=period)
def any(self, ignore_nodata=UNSET) -> 'ProcessBuilder':
"""
Is at least one value true?
:param self: A set of boolean values.
:param ignore_nodata: Indicates whether no-data values are ignored or not and ignores them by default.
:return: Boolean result of the logical operation.
"""
return any(data=self, ignore_nodata=ignore_nodata)
def apply(self, process, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to each pixel
:param self: A data cube.
:param process: A process that accepts and returns a single value and is applied on each individual
value in the data cube. The process may consist of multiple sub-processes and could, for example,
consist of processes such as ``abs()`` or ``linear_scale_range()``.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return apply(data=self, process=process, context=context)
def apply_dimension(self, process, dimension, target_dimension=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to pixels along a dimension
:param self: A data cube.
:param process: Process to be applied on all pixel values. The specified process needs to accept an
array and must return an array with at least one element. A process may consist of multiple sub-
processes.
:param dimension: The name of the source dimension to apply the process on. Fails with a
`DimensionNotAvailable` exception if the specified dimension does not exist.
:param target_dimension: The name of the target dimension or `null` (the default) to use the source
dimension specified in the parameter `dimension`. By specifying a target dimension, the source
dimension is removed. The target dimension with the specified name and the type `other` (see
``add_dimension()``) is created, if it doesn't exist yet.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values. All dimensions stay the same, except for the
dimensions specified in corresponding parameters. There are three cases how the dimensions can change:
1. The source dimension is the target dimension: - The (number of) dimensions remain unchanged as
the source dimension is the target dimension. - The source dimension properties name and type remain
unchanged. - The dimension labels, the reference system and the resolution are preserved only if the
number of pixel values in the source dimension is equal to the number of values computed by the
process. Otherwise, all other dimension properties change as defined in the list below. 2. The source
dimension is not the target dimension and the latter exists: - The number of dimensions decreases by
one as the source dimension is dropped. - The target dimension properties name and type remain
unchanged. All other dimension properties change as defined in the list below. 3. The source dimension
is not the target dimension and the latter does not exist: - The number of dimensions remain
unchanged, but the source dimension is replaced with the target dimension. - The target dimension
has the specified name and the type other. All other dimension properties are set as defined in the
list below. Unless otherwise stated above, for the given (target) dimension the following applies: -
the number of dimension labels is equal to the number of values computed by the process, - the
dimension labels are incrementing integers starting from zero, - the resolution changes, and - the
reference system is undefined.
"""
return apply_dimension(data=self, process=process, dimension=dimension, target_dimension=target_dimension, context=context)
def apply_kernel(self, kernel, factor=UNSET, border=UNSET, replace_invalid=UNSET) -> 'ProcessBuilder':
"""
Apply a spatial convolution with a kernel
:param self: A data cube.
:param kernel: Kernel as a two-dimensional array of weights. The inner level of the nested array aligns
with the `x` axis and the outer level aligns with the `y` axis. Each level of the kernel must have an
uneven number of elements, otherwise the process throws a `KernelDimensionsUneven` exception.
:param factor: A factor that is multiplied to each value after the kernel has been applied. This is
basically a shortcut for explicitly multiplying each value by a factor afterwards, which is often
required for some kernel-based algorithms such as the Gaussian blur.
:param border: Determines how the data is extended when the kernel overlaps with the borders. Defaults
to fill the border with zeroes. The following options are available: * *numeric value* - fill with a
user-defined constant number `n`: `nnnnnn|abcdefgh|nnnnnn` (default, with `n` = 0) * `replicate` -
repeat the value from the pixel at the border: `aaaaaa|abcdefgh|hhhhhh` * `reflect` - mirror/reflect
from the border: `fedcba|abcdefgh|hgfedc` * `reflect_pixel` - mirror/reflect from the center of the
pixel at the border: `gfedcb|abcdefgh|gfedcb` * `wrap` - repeat/wrap the image:
`cdefgh|abcdefgh|abcdef`
:param replace_invalid: This parameter specifies the value to replace non-numerical or infinite
numerical values with. By default, those values are replaced with zeroes.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return apply_kernel(data=self, kernel=kernel, factor=factor, border=border, replace_invalid=replace_invalid)
def apply_neighborhood(self, process, size, overlap=UNSET, context=UNSET) -> 'ProcessBuilder':
"""
Apply a process to pixels in a n-dimensional neighborhood
:param self: A data cube.
:param process: Process to be applied on all neighborhoods.
:param size: Neighborhood sizes along each dimension. This object maps dimension names to either a
physical measure (e.g. 100 m, 10 days) or pixels (e.g. 32 pixels). For dimensions not specified, the
default is to provide all values. Be aware that including all values from overly large dimensions may
not be processed at once.
:param overlap: Overlap of neighborhoods along each dimension to avoid border effects. For instance a
temporal dimension can add 1 month before and after a neighborhood. In the spatial dimensions, this is
often a number of pixels. The overlap specified is added before and after, so an overlap of 8 pixels
will add 8 pixels on both sides of the window, so 16 in total. Be aware that large overlaps increase
the need for computational resources and modifying overlapping data in subsequent operations have no
effect.
:param context: Additional data to be passed to the process.
:return: A data cube with the newly computed values and the same dimensions. The dimension properties
(name, type, labels, reference system and resolution) remain unchanged.
"""
return apply_neighborhood(data=self, process=process, size=size, overlap=overlap, context=context)
def arccos(self) -> 'ProcessBuilder':
"""
Inverse cosine
:param self: A number.
:return: The computed angle in radians.
"""
return arccos(x=self)
def arcosh(self) -> 'ProcessBuilder':
"""
Inverse hyperbolic cosine
:param self: A number.
:return: The computed angle in radians.
"""
return arcosh(x=self)
def arcsin(self) -> 'ProcessBuilder':
"""
Inverse sine
:param self: A number.
:return: The computed angle in radians.
"""
return arcsin(x=self)
def arctan(self) -> 'ProcessBuilder':
"""
Inverse tangent
:param self: A number.
:return: The computed angle in radians.
"""
return arctan(x=self)
def arctan2(self, x) -> 'ProcessBuilder':
"""
Inverse tangent of two numbers
:param self: A number to be used as the dividend.
:param x: A number to be used as the divisor.
:return: The computed angle in radians.
"""
return arctan2(y=self, x=x)
def ard_normalized_radar_backscatter(self, elevation_model=UNSET, contributing_area=UNSET, ellipsoid_incidence_angle=UNSET, noise_removal=UNSET) -> 'ProcessBuilder':
"""
CARD4L compliant SAR NRB generation
:param self: The source data cube containing SAR input.
:param elevation_model: The digital elevation model to use. Set to `null` (the default) to allow the
back-end to choose, which will improve portability, but reduce reproducibility.
:param contributing_area: If set to `true`, a DEM-based local contributing area band named
`contributing_area` is added. The values are given in square meters.
:param ellipsoid_incidence_angle: If set to `true`, an ellipsoidal incidence angle band named
`ellipsoid_incidence_angle` is added. The values are given in | |
<reponame>Hugo1991/TFM
import networkx as nx
import random as rnd
import matplotlib.pyplot as plt
import numpy as np
import multiprocessing as mp
from numpy import sqrt
from enum import Enum
class execution_type(Enum):
static_part1 = 0
static_part2 = 1
static = 2
dynamic = 3
class weight_edges(Enum):
linear = 0
variable = 1
class genetic_graph:
def __init__(self,G,configuration=weight_edges.variable):
self.G = G
self.configuration = configuration
self.createNodes()
self.createEdges()
operations = graph_operations(self.G)
operations.computeClusters()
nx.draw(self.G)
plt.show()
def createNodes(self):
nx.set_node_attributes(self.G,'concept',None)
rnd.seed()
value = 0
for i in self.G.nodes_iter():
self.G.node[i]['id'] = value
self.G.node[i]['concept'] = rnd.randint(0,9)
value = value + 1
def createEdges(self):
value = 0
for i in self.G.edges_iter():
self.G.edge[i[0]][i[1]]['id'] = value
if self.configuration == weight_edges.variable:
self.G.edge[i[0]][i[1]]['weight'] = 1
value = value + 1
class graph_operations:
def __init__(self,G,option=execution_type.dynamic):
self.nodes = []
self.G = G
self.option = option
self.createHubs()
def createHubs(self):
self.hub_vertexes = []
self.non_hub_vertexes = []
self.HVSs = []
self.clusters = []
self.num_percentage_vertexes = 20
self.num_hub_vertexes = int(self.G.number_of_nodes() * self.num_percentage_vertexes/100.0)
self.hub_score = 1
self.no_hub_score = 0.5
def get_non_hub_vertexes(self):
return self.non_hub_vertexes
def get_HVSs(self):
return self.HVSs
def getSalienceRanking(self):
for i in self.G.nodes_iter():
new_salience = node_values(self.G.node[i]['id'],len(self.G.neighbors(i)))
self.nodes.append(new_salience)
self.nodes = sorted(self.nodes, key = lambda node_values: node_values.get_value())
return self.nodes
def computeClusters(self):
# Obtener los HVS.
self.initHVS()
self.generateHVSs()
# Unir los HVS que presenten una conectividad interna menor que la que tienen entre si.
if self.option == execution_type.static_part1 or self.option == execution_type.static:
hvs_connectivity = HVSConnectivityGenetic(self)
connectivity = hvs_connectivity.evolution()
self.assignSolutiontoConnectivity(connectivity)
else:
self.interconnectHVSs()
print("HVSs",self.HVSs)
if self.option == execution_type.static_part2 or self.option == execution_type.static:
hvs_internal = HVSInternalGenetic(self)
internal = hvs_internal.evolution()
self.assignSolutiontoHVS(internal)
else:
self.similarityHVSs()
print("HVSs",self.HVSs)
# Extraer los que tienen solo un elemento y pasarlos a la lista de non hub vertices.
self.extractNodesWithOneVertex()
print("HVSs",self.HVSs)
# Asignar los 'non hub vertices' a los clusters
non_hub = NonHubGenetic(self)
solution = non_hub.evolution();
self.assignSolutiontoClusters(solution)
#self.assignNonHubToClusters()
print("Clusters:")
for i in range(len(self.clusters)):
print(self.clusters[i])
def assignSolutiontoConnectivity(self,solution):
for i in range(0,len(solution)):
connectivity = solution[i].get_value()[0]
if connectivity != -1:
new_connectivity = solution[i].get_value()[1]
position = solution[i].get_iden()
self.HVSs[new_connectivity].append(self.HVSs[position][connectivity])
self.HVSs[position].pop(connectivity)
i = 0
while i in range(0,len(self.HVSs)):
if len(self.HVSs[i]) == 0:
self.HVSs.pop(i)
else:
i = i + 1
def assignSolutiontoHVS(self,solution):
pops = []
for i in range(0,len(solution)):
connection = solution[i].get_value()
if connection != -1:
position = solution[i].get_iden()
self.HVSs[position].extend(self.HVSs[connection])
pops.append(connection)
for i in range(0,len(pops)):
self.HVSs.pop(i)
def assignSolutiontoClusters(self,solution):
for i in range(len(self.HVSs)):
self.clusters.append(self.HVSs[i])
for i in range(0,len(solution)):
chromosome = solution[i]
iden = chromosome.get_iden()
cluster = chromosome.get_value()
if cluster != -1:
self.clusters[cluster].append(iden)
def initHVS(self):
# Obtener los 'n' hub vertices y los 'N-n' no hub vertices.
ranking = self.getSalienceRanking()
stop = len(ranking) - self.num_hub_vertexes - 2
for i in range(len(ranking)-1,stop,-1):
self.hub_vertexes.append(ranking[i].get_iden())
print("hubs:",self.hub_vertexes)
start = len(ranking) - self.num_hub_vertexes - 2
for i in range(start,0,-1):
self.non_hub_vertexes.append(ranking[i].get_iden())
def generateHVSs(self):
# Inicialmente, creamos un HVS por cada Hub Vertice.
for i in range(len(self.hub_vertexes)):
iden = self.hub_vertexes[i]
hvs = []
hvs.append(iden)
self.HVSs.append(hvs)
def interconnectHVSs(self):
# Para cada hub vertice, comprobar si existe un HVS distinto al que pertenece
# con el que presente una mayor conectividad que al suyo propio.
change = True
while(change):
change = False
i = 0
while (i < len(self.HVSs)):
vertexes = self.HVSs[i]
j = 0
while (j < len(vertexes)):
iden = vertexes[j]
intraconnection = self.getConnectionWithHVS(iden,self.HVSs[i])
interconnection = self.getMaxConnectionWithHVSs(iden,intraconnection)
if interconnection[0] != -1 and interconnection[1] != 0: # Existe otro HVS con el que se encuentra mas conectado.
# Cambiar al vertice de HVS.
change = True
self.HVSs[i].pop(j)
self.HVSs[interconnection[0]].append(iden)
print iden
else:
j = j + 1
if len(vertexes) == 0:
self.HVSs.pop(i)
else:
i = i + 1
def similarityHVSs(self):
change = True
while(change):
change = False
pops = []
for i in range(len(self.HVSs)):
hvs1 = self.HVSs[i]
j = i
while (j < len(self.HVSs)):
hvs2 = self.HVSs[j]
intra_sim1 = self.getIntraSimilarity(hvs1)
intra_sim2 = self.getIntraSimilarity(hvs2)
inter_sim = self.getInterSimilarity(hvs1,hvs2)
if (inter_sim > intra_sim1 or inter_sim > intra_sim2):
# Unir ambos HVSs.
print ("entra")
self.HVSs[i].extend(hvs2)
pops.append(j)
change = True
j = j + 1
for i in pops:
print("entra")
self.HVSs.pop(i)
# Funcion que devuelve el nodo del grafo que tiene el identificador indicado.
def getNodeFromIden(self,iden):
result = None
for i in self.G.nodes_iter():
node = self.G.node[i]
if iden == node['id']:
result = node
break
return result
# Funcion que devuelve el HVS con el que un concepto presenta una mayor conectividad, si esta supera su conectividad interna.
def getMaxConnectionWithHVSs(self,iden,intraconnection):
max_connection = 0.0
max_position = -1
result = []
result.append(-1)
result.append(-1)
for i in range(len(self.HVSs)):
connection = self.getConnectionWithHVS(iden,self.HVSs[i]);
if (connection > max_connection):
max_connection = connection
max_position = i
if (max_connection > intraconnection):
result[0] = max_position
result[1] = max_connection
else:
result[0] = -1;
result[1] = -1;
return result
# Funcion que devuelve la conectividad de un concepto con respecto a un HVS.
def getConnectionWithHVS(self,iden,vertexes):
node = self.getNodeFromIden(iden)
print(node)
neighbors = self.G.neighbors(node['id'])
connection = 0.0
for i in range(len(neighbors)):
neighbor_iden = neighbors[i]
if neighbor_iden in vertexes:
neighbor = self.getNodeFromIden(neighbor_iden)
if self.G.has_edge(node['id'],neighbor['id']):
edge_data = self.G.get_edge_data(node['id'],neighbor['id'])
connection = edge_data['weight']
break
return connection
# Funcion que calcula la similitud (conectividad) entre los conceptos de un HVS.
def getIntraSimilarity(self,vertexes):
similarity = 0.0;
for i in range(len(vertexes)):
iden = vertexes[i]
node = self.getNodeFromIden(iden)
neighbors = self.G.neighbors(node['id'])
for j in range(len(neighbors)):
neighbor_iden = neighbors[j]
if neighbor_iden in vertexes:
neighbor = self.getNodeFromIden(neighbor_iden)
if self.G.has_edge(node['id'],neighbor['id']):
edge_data = self.G.get_edge_data(node['id'],neighbor['id'])
weight = edge_data['weight']
similarity = similarity + weight
return similarity
# Funcion que calcula la similitud (conectividad) entre dos HVSx.
def getInterSimilarity(self,hvs1,hvs2):
similarity = 0.0;
for i in range(len(hvs1)):
iden = hvs1[i]
node = self.getNodeFromIden(iden)
neighbors = self.G.neighbors(node['id'])
for j in range(len(neighbors)):
neighbor_iden = neighbors[j]
if neighbor_iden in hvs2:
neighbor = self.getNodeFromIden(neighbor_iden)
if self.G.has_edge(node['id'],neighbor['id']):
edge_data = self.G.get_edge_data(node['id'],neighbor['id'])
weight = edge_data['weight']
similarity = similarity + weight
return similarity
# Metodo que elimina los HVSs con conectividad 1.
def extractNodesWithOneVertex(self):
i = 0
while (i < len(self.HVSs)):
vertexes = self.HVSs[i]
if len(vertexes) <= 1:
self.non_hub_vertexes.append(vertexes[0])
self.HVSs.remove(vertexes)
else:
i = i + 1
# Dado un nodo, devuelve el HVS al que mas se asemeja, y a cuyo cluster.
def getMoreSimilarHVS(self,iden):
max_position = -1
max_similarity = 0.0
for i in range(len(self.HVSs)):
similarity = 0.0
vertexes = self.HVSs[i]
for j in range(len(vertexes)):
hv = vertexes[j]
hvnode = self.getNodeFromIden(hv)
node = self.getNodeFromIden(iden)
pos = self.find(node,hvnode)
if (pos != -1):
edge_data = self.G.get_edge_data(node['id'],self.G.node[pos]['id'])
weight = edge_data['weight']
similarity = similarity + weight
if (similarity > max_similarity):
max_position = i
max_similarity = similarity
return max_position
def find(self,node1,node2):
result = -1
processed = []
itr = nx.all_neighbors(self.G,node1['id'])
for i in itr:
if i not in processed:
processed.append(i)
if self.G.node[i]['concept'] == node2['concept']:
result = self.G.node[i]['id']
break
return result
class HVSConnectivityGenetic():
def __init__(self,graph_operations,limit=800,size=16,margin_crossover=0.6,prob_crossover=0.9,
margin_mutation=0.1,prob_mutation=0.4):
rnd.seed(0)
self.counter = 0
self.graph_operations = graph_operations
self.target = len(self.graph_operations.get_HVSs())
self.limit = limit
self.size = size
self.margin_crossover = margin_crossover
self.prob_crossover = prob_crossover
self.margin_mutation = margin_mutation
self.prob_mutation = prob_mutation
self.children = []
def init_population(self):
population = []
for _ in range(0,self.size):
chromosome = self.init_chromosome()
population.append(chromosome)
return population
def init_chromosome(self):
chromosome = []
for i in range(0,len(self.graph_operations.get_HVSs())):
value = rnd.randint(-1,len(self.graph_operations.get_HVSs()) - 1)
relation = node_values(i,[value,-1])
chromosome.append(relation)
return chromosome
def fitness(self,chromosome):
accurancy = 0
for i in range(0,len(chromosome)):
vertexes = self.graph_operations.get_HVSs()[i]
j = 0
found = False
while (j < len(vertexes) and not found):
iden = vertexes[j]
intraconnection = self.graph_operations.getConnectionWithHVS(iden,self.graph_operations.get_HVSs()[i])
interconnection = self.graph_operations.getMaxConnectionWithHVSs(iden,intraconnection)
if interconnection[0] != -1 and interconnection[1] != 0:
if chromosome[i].get_value()[0] == j:
found = True
chromosome[i].set_value([chromosome[i].get_value()[0],interconnection[0]])
accurancy = accurancy + 1
else:
j = j + 1
else:
j = j + 1
if found == False:
if chromosome[i].get_value()[0] == -1:
accurancy = accurancy + 1
return accurancy
def get_optimal(self,position):
vertexes = self.graph_operations.get_HVSs()[position]
result = -1
inter = -1
j = 0
found = False
while (j < len(vertexes) and not found):
iden = vertexes[j]
intraconnection = self.graph_operations.getConnectionWithHVS(iden,self.graph_operations.get_HVSs()[position])
interconnection = self.graph_operations.getMaxConnectionWithHVSs(iden,intraconnection)
if interconnection[0] != -1 and interconnection[1] != 0:
result = j
inter = interconnection[0]
found = True
else:
j = j + 1
return result,inter
| |
2. * norm(c1 - c2)
return abs(volume)
#return volume4(n1, n2, n3, n4) + volume4(n2, n3, n4, n5) + volume4(n2, n4, n5, n6)
def raw_fields(self):
list_fields = ['CPENTA', self.eid, self.Pid()] + self.node_ids
return list_fields
@property
def node_ids(self):
nids = self._node_ids(nodes=self.nodes_ref, allow_empty_nodes=False)
return nids
def cpenta_face(nid, nid_opposite, nids):
assert len(nids) == 6, nids
indx1 = nids.index(nid)
if nid_opposite is None:
if indx1 in [0, 1, 2]:
pack2 = tuple([2, 1, 0])
elif indx1 in [3, 4, 5]:
pack2 = tuple([3, 4, 5])
else:
raise RuntimeError(indx1)
assert len(pack2) == 3, pack2
else:
indx2 = nids.index(nid_opposite)
# offset so it's easier to map the nodes with the QRG
pack = tuple(sorted([indx1 + 1, indx2 + 1]))
_cpenta_mapper = {
# reverse points away from the element
#(1, 2) : [1, 2, 3], # close
#(2, 3) : [1, 2, 3],
#(1, 3) : [1, 2, 3],
#(4, 5) : [4, 5, 6], # far-reverse
#(5, 6) : [4, 5, 6],
#(4, 6) : [4, 5, 6],
(1, 5) : [4, 5, 2, 1], # bottom
(2, 4) : [4, 5, 2, 1],
(1, 6) : [1, 3, 6, 4], # left-reverse
(3, 4) : [1, 3, 6, 4],
(2, 6) : [2, 5, 6, 3], # right
(3, 5) : [2, 5, 6, 3],
}
try:
pack2 = _cpenta_mapper[pack]
except KeyError:
print('PLOAD4; remove a node')
raise
pack2 = [i - 1 for i in pack2]
return pack2
def cpenta_face_area_centroid_normal(nid, nid_opposite, nids, nodes_ref):
"""
Parameters
----------
nid : int
G1 - a grid point on the corner of a face
nid_opposite : int / None
G3 - the grid point diagonally opposite of G1
"""
face = cpenta_face(nid, nid_opposite, nids)
if nid_opposite is None:
n1i, n2i, n3i = face
p1 = nodes_ref[n1i].get_position()
p2 = nodes_ref[n2i].get_position()
p3 = nodes_ref[n3i].get_position()
a = p3 - p1
b = p2 - p1
centroid = (p1 + p2 + p3) / 3.
else:
n1i, n2i, n3i, n4i = face
p1 = nodes_ref[n1i].get_position()
p2 = nodes_ref[n2i].get_position()
p3 = nodes_ref[n3i].get_position()
p4 = nodes_ref[n4i].get_position()
a = p1 - p3
b = p2 - p4
centroid = (p1 + p2 + p3 + p4) / 4.
normal = cross(a, b)
n = norm(normal)
area = 0.5 * n
return face, area, centroid, normal / n
def chexa_face(nid_opposite, nid, nids):
assert len(nids) == 8, nids
g1i = nids.index(nid_opposite)
g3i = nids.index(nid)
for face in _chexa_faces:
if g1i in face and g3i in face:
found_face = face
found_face = _chexa_mapper[tuple([g1i, g3i])]
return found_face
def chexa_face_area_centroid_normal(nid, nid_opposite, nids, nodes_ref):
"""
Parameters
----------
nid : int
G1 - a grid point on the corner of a face
nid_opposite : int
G3 - the grid point diagonally opposite of G1
nodes_ref : List[GRID]
the GRID objects
# top (7-6-5-4)
# btm (0-1-2-3)
# left (0-3-7-4)
# right (5-6-2-1)
# front (4-5-1-0)
# back (2-6-7-3)
"""
face = chexa_face(nid_opposite, nid, nids)
nid1, nid2, nid3, nid4 = face
n1 = nodes_ref[nid1].get_position()
n2 = nodes_ref[nid2].get_position()
n3 = nodes_ref[nid3].get_position()
n4 = nodes_ref[nid4].get_position()
axb = cross(n3 - n1, n4 - n2)
areai = norm(axb)
centroid = (n1 + n2 + n3 + n4) / 4.
area = 0.5 * areai
normal = axb / areai
return face, area, centroid, normal
class CPENTA15(SolidElement):
"""
+---------+-----+-----+----+-----+-----+-----+-----+-----+
| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 |
+=========+=====+=====+====+=====+=====+=====+=====+=====+
| CPENTA | EID | PID | G1 | G2 | G3 | G4 | G5 | G6 |
+---------+-----+-----+----+-----+-----+-----+-----+-----+
| | G7 | G8 | G9 | G10 | G11 | G12 | G13 | G14 |
+---------+-----+-----+----+-----+-----+-----+-----+-----+
| | G15 | | | | | | | |
+---------+-----+-----+----+-----+-----+-----+-----+-----+
"""
type = 'CPENTA'
def __init__(self, eid, pid, nids, comment=''):
"""
Creates a CPENTA15
Parameters
----------
eid : int
element id
pid : int
property id (PSOLID, PLSOLID)
nids : List[int]
node ids; n=15
"""
SolidElement.__init__(self)
if comment:
self.comment = comment
#: Element ID
self.eid = eid
#: Property ID
self.pid = pid
nnodes = len(nids)
if nnodes < 15:
nids.extend((15 - nnodes) * [None])
self.nodes = self.prepare_node_ids(nids, allow_empty_nodes=True)
assert len(self.nodes) == 15
@classmethod
def add_card(cls, card, comment=''):
"""
Adds a CPENTA15 card from ``BDF.add_card(...)``
Parameters
----------
card : BDFCard()
a BDFCard object
comment : str; default=''
a comment for the card
"""
eid = integer(card, 1, 'eid')
pid = integer(card, 2, 'pid')
nids = [
integer(card, 3, 'nid1'),
integer(card, 4, 'nid2'),
integer(card, 5, 'nid3'),
integer(card, 6, 'nid4'),
integer(card, 7, 'nid5'),
integer(card, 8, 'nid6'),
integer_or_blank(card, 9, 'nid7'),
integer_or_blank(card, 10, 'nid8'),
integer_or_blank(card, 11, 'nid9'),
integer_or_blank(card, 12, 'nid10'),
integer_or_blank(card, 13, 'nid11'),
integer_or_blank(card, 14, 'nid12'),
integer_or_blank(card, 15, 'nid13'),
integer_or_blank(card, 16, 'nid14'),
integer_or_blank(card, 17, 'nid15'),
]
assert len(card) <= 18, f'len(CPENTA15 card) = {len(card):d}\ncard={card}'
return CPENTA15(eid, pid, nids, comment=comment)
@classmethod
def add_op2_data(cls, data, comment=''):
"""
Adds a CPENTA15 card from the OP2
Parameters
----------
data : List[varies]
a list of fields defined in OP2 format
comment : str; default=''
a comment for the card
"""
eid = data[0]
pid = data[1]
nids = [d if d > 0 else None for d in data[2:]]
assert len(data) == 17, 'len(data)=%s data=%s' % (len(data), data)
return CPENTA15(eid, pid, nids, comment=comment)
def cross_reference(self, model: BDF) -> None:
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by CPENTA eid=%s' % self.eid
self.nodes_ref = model.EmptyNodes(self.nodes, msg=msg)
self.pid_ref = model.Property(self.pid, msg=msg)
def safe_cross_reference(self, model: BDF, xref_errors):
"""
Cross links the card so referenced cards can be extracted directly
Parameters
----------
model : BDF()
the BDF object
"""
msg = ', which is required by CPENTA eid=%s' % self.eid
self.nodes_ref = model.EmptyNodes(self.nodes, msg=msg)
self.pid_ref = model.safe_property(self.pid, self.eid, xref_errors, msg=msg)
@property
def faces(self):
"""
Gets the faces of the element
Returns
-------
faces : Dict[int] = [face1, face2, ...]
key = face number
value = a list of nodes (integer pointers) as the values.
.. note:: The order of the nodes are consistent with normals that point outwards
The face numbering is meaningless
.. note:: The order of the nodes are consistent with ANSYS numbering; is this current?
.. warning:: higher order element ids not verified with ANSYS; is this current?
Examples
--------
>>> print(element.faces)
"""
n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13, n14, n15 = self.node_ids
faces = {
1 : [n1, n2, n3, n7, n8, n9],
2 : [n4, n5, n6, n10, n11, n12],
3 : [n1, n2, n5, n4, n7, n14, n10, n13],
4 : [n2, n3, n6, n5, n8, n15, n11, n14],
5 : [n3, n1, n4, n6, n9, n13, n12, n15],
}
return faces
def get_face(self, nid, nid_opposite):
nids = self.node_ids[:6]
return cpenta_face(nid_opposite, nid, nids)
def get_face_area_centroid_normal(self, nid, nid_opposite=None):
nids = self.node_ids[:6]
return cpenta_face_area_centroid_normal(nid, nid_opposite, nids, self.nodes_ref[:6])
def get_edge_ids(self):
"""
Return the edge IDs
"""
node_ids = self.node_ids
return [
# base
tuple(sorted([node_ids[0], node_ids[1]])),
tuple(sorted([node_ids[1], node_ids[2]])),
tuple(sorted([node_ids[2], node_ids[0]])),
# top
tuple(sorted([node_ids[3], node_ids[4]])),
tuple(sorted([node_ids[4], node_ids[5]])),
tuple(sorted([node_ids[5], node_ids[3]])),
# sides
tuple(sorted([node_ids[0], node_ids[3]])),
tuple(sorted([node_ids[1], node_ids[4]])),
tuple(sorted([node_ids[2], node_ids[5]])),
]
def _verify(self, xref):
eid = self.eid
pid = self.Pid()
nids = self.node_ids
assert isinstance(eid, int)
assert isinstance(pid, int)
for i, nid in enumerate(nids):
assert nid is None or isinstance(nid, int), 'nid%i is not an integer/blank; nid=%s' %(i, nid)
if xref:
centroid = self.Centroid()
volume = self.Volume()
assert isinstance(volume, float)
for i in range(3):
assert isinstance(centroid[i], float)
def Centroid(self):
"""
.. seealso:: CPENTA6.Centroid
"""
(n1, n2, n3, n4, n5, n6) = self.get_node_positions()[:6]
c1 = (n1 + n2 + n3) / 3.
c2 = (n4 + n5 + n6) / 3.
centroid = (c1 + c2) / 2.
return centroid
def Volume(self):
"""
.. seealso:: CPENTA6.Volume
"""
(n1, n2, n3, n4, n5, n6) = self.get_node_positions()[:6]
area1 = Area(n3 - n1, n2 - n1)
area2 = Area(n6 - n4, n5 - n4)
c1 = | |
time has passed
# since the last update.
time_since_stored = datetime.datetime.max - datetime.datetime.now()
counter_modify = 0
chat_request_action_on_receiver = None
if time_since_stored > datetime.timedelta(seconds = 1) or override_minimum_delay: # only process if X seconds have passed
# update the initiate_contact_object inside a transaction
try:
(counter_modify, chat_request_action_on_receiver, initiate_contact_object) = \
ndb.transaction(lambda: txn(initiate_contact_object_key, action))
initiate_contact_object_modified = True
except:
# transaction failed -- object not modified
logging.warning("Trasaction failed in modify_active_initiate_contact_object")
initiate_contact_object_modified = False
else:
# we do not write the initiate_contact_object since not enough time has passed since the last click
pass
return (initiate_contact_object, initiate_contact_object_modified, counter_modify,
chat_request_action_on_receiver, previous_chat_friend_stored_value)
except:
error_reporting.log_exception(logging.critical)
@ajax_call_requires_login
def store_initiate_contact(request, to_uid):
# stores updates to winks, favorites, etc. This is called when users click on the associated icon.
userobject = utils_top_level.get_userobject_from_request(request)
userobject_key = userobject.key
userobject_nid = userobject_key.id()
try:
possible_actions = ('wink', 'favorite', 'kiss', 'key', 'chat_friend', 'blocked')
other_userobject_key = ndb.Key(urlsafe = to_uid)
other_userobject = utils_top_level.get_object_from_string(to_uid)
if request.method != 'POST':
return HttpResponseBadRequest()
else:
action = request.POST.get('section_name', '')
if action in possible_actions:
initiate_contact_object = utils.get_initiate_contact_object(userobject_key, other_userobject_key, create_if_does_not_exist=True)
(initiate_contact_object, initiate_contact_object_modified, counter_modify, chat_request_action_on_receiver, active_previous_chat_friend_stored_value) =\
modify_active_initiate_contact_object(action, initiate_contact_object, userobject_key, other_userobject_key, )
if initiate_contact_object_modified:
owner_new_contact_counter_obj = userobject.new_contact_counter_ref.get()
# first check that the user has not exceeded their quota for the given action
request_denied = False
if counter_modify > 0:
if action == "key":
if owner_new_contact_counter_obj.num_sent_key >= constants.MAX_KEYS_SENT_ALLOWED:
response_text = "<p>%s" % ugettext("""You have exceeded the limit of %(max_keys)s on the number of keys that you can send.
Before sending additional keys, you must take back keys that you have given to other users.""") \
% {'max_keys' : constants.MAX_KEYS_SENT_ALLOWED}
request_denied = True
def max_chat_friends_response_text(chat_friend_requests_allowed):
return ugettext("""
You have reached the limit of %(max_requests)s on the number of chat friends
that you are allowed to request. Before requesting additional chat
friends, you must remove some of your current chat friends.""") \
% {'max_requests' : chat_friend_requests_allowed}
if action == "chat_friend":
if utils.get_client_vip_status(userobject) or initiate_contact_object.chat_friend_stored != "request_sent":
# VIP clients are allowed to have the max number of chat friends OR
# we allow people to accept friend requests even after their free limit on friends has been exceeded.
# ... But not to initiate new friend requests, However, we *never* allow them to exceed
# MAX_CHAT_FRIEND_REQUESTS_ALLOWED number of chat friends. This limit is in place because
# it could use excessive CPU resources if the list gets too big.
if owner_new_contact_counter_obj.num_sent_chat_friend >= MAX_CHAT_FRIEND_REQUESTS_ALLOWED:
response_text = "<p>%s" % max_chat_friends_response_text(MAX_CHAT_FRIEND_REQUESTS_ALLOWED)
request_denied = True
else:
# This user is neither VIP nor responding to a friend request, therefore they
# only have the free limit of friends
if owner_new_contact_counter_obj.num_sent_chat_friend >= GUEST_NUM_CHAT_FRIEND_REQUESTS_ALLOWED:
request_denied = True
response_text = "<p>%s" % max_chat_friends_response_text(GUEST_NUM_CHAT_FRIEND_REQUESTS_ALLOWED)
if constants.THIS_BUILD_ALLOWS_VIP_UPGRADES:
response_text += "<p>%s" % ugettext("""
If you wish to increase this limit to %(max_requests)s,
you could consider becoming a VIP member.""") % \
{'max_requests' : MAX_CHAT_FRIEND_REQUESTS_ALLOWED}
response_text += vip_render_payment_options.render_payment_options(request, userobject)
see_vip_benefits_txt = ugettext("See VIP benefits")
response_text += '<strong><a class="cl-dialog_anchor cl-see_all_vip_benefits" href="#">%s</a></strong>' % see_vip_benefits_txt
if request_denied:
# un-do the request (toggle it) by calling the same function again, Note that we override the
# minimum delay between clicks.
(initiate_contact_object, initiate_contact_object_modified, counter_modify, chat_request_action_on_receiver, active_previous_chat_friend_stored_value) =\
modify_active_initiate_contact_object(action, initiate_contact_object, userobject_key, other_userobject_key,
override_minimum_delay = True)
return HttpResponse(response_text)
# update the counter for the receiver, except for favorites and blocked since these fields
# will never be displayed or available to the "viewed" user.
action_postfix = "_since_last_reset"
if action != "favorite" and action != 'blocked':
if counter_modify != 0:
if action != "chat_friend":
action_prefix = "num_received_"
else:
#action == "chat_friend"
#
# Note chat_request_action_on_receiver should be either friend_request or connected
if chat_request_action_on_receiver == 'connected':
action_prefix = "num_connected_"
else:
action_prefix = "num_received_"
# update the chat_request status on the passive object.
update_bool = modify_passive_initiate_contact_object(chat_request_action_on_receiver, counter_modify, userobject_key, other_userobject_key)
if not update_bool:
error_reporting.log_exception(logging.critical,
error_message = "passive initiate_contact_object failed to update between %s and %s" %
(userobject.username, other_userobject.username))
if counter_modify > 0:
hours_between_notifications = utils.get_hours_between_notifications(other_userobject,
constants.hours_between_new_contacts_notifications)
else:
hours_between_notifications = "NA" # should not be required/accessed
# update the *receiver's* counters for kisses, winks, chat_friends, etc.
# Note: the behaviour of these counters for chat_friends is not 100% ideal, but it would require large
# changes to correct. Eg. if a user recieves a friend request that they accept , the received request still
# is counted as "1 new". However we can't reduce that counter by one, because we don't know if the user
# has already viewed the "friend requests" page, which would have already reset it to zero (and so subtracting
# one would make it go negative ..) ..
receiver_new_contact_counter_obj = modify_new_contact_counter(other_userobject.new_contact_counter_ref, \
action, action_prefix, action_postfix, counter_modify, hours_between_notifications,
update_notification_times = True,)
info_message = "Modifying %s on %s by %s" %(action, other_userobject.username, counter_modify)
logging.info(info_message)
# if notification for sending the user notification is past due, send it now.
if receiver_new_contact_counter_obj.when_to_send_next_notification <= datetime.datetime.now():
try:
# by construction, this should never execute unless the email address is valid - if email address is not
# valid, then the date of when_to_send_next_notification should be set to max.
assert(other_userobject.email_address_is_valid)
# add a one minute delay before sending the message notification - this means that if the user sends a wink (or whatever)
# and quickly changes their mind, the error checking code in the notification function will catch that the
# when_to_send_next_notification time is no longer valid, and the email will be cancelled.
countdown_time = 60
taskqueue.add(queue_name = 'fast-queue', countdown = countdown_time, \
url='/rs/admin/send_new_message_notification_email/', params = {
'uid': other_userobject.key.urlsafe()})
except:
error_reporting.log_exception(logging.critical)
if action == "chat_friend" or action == "key":
# Update the counters on *owners* "new_contact_counter_ref" object to reflect how many
# friend requests or how many keys they have sent.
# For now we only track keys and friend requests
action_prefix = "num_sent_"
action_postfix = ''
modify_new_contact_counter(userobject.new_contact_counter_ref, \
action, action_prefix, action_postfix, counter_modify,
hours_between_notifications = None, update_notification_times = False,)
elif action == 'favorite':
# we must check if the users have sent messages between them, and if so
# must update the users_have_sent_messages (ie. mail) object to reflect that this
# user is now a "favorite". This is necessary due to the dual structure of favorites
# which must appear in both the mailbox as well as the contacts lists.
if counter_modify == 1:
new_action_boolean = True;
if counter_modify == -1:
new_action_boolean = False
if counter_modify == 1 or counter_modify == -1:
update_users_have_sent_messages_object_favorite_val(userobject, other_userobject, new_action_boolean)
else:
error_reporting.log_exception(logging.warning, error_message = "unknown action: %s posted" % action)
return HttpResponse("OK")
# end if/else request.method != 'POST'
except:
error_message = "User %s has triggered an exception" % (userobject.username)
error_reporting.log_exception(logging.critical, error_message = error_message)
return HttpResponseServerError("Error")
def increase_reporting_or_reporter_unacceptable_count(model_class, userobject_key, increase_or_decrease_count):
# Function for keeping track of how many times a profile has been marked as unacceptalbe (or how many
# times a particular user has marked other profiles as unacceptable - depending on the model_class value)
# We expect model_class to contain either CountReportingProfile or CountUnacceptableProfile
# and it will update the appropriate counter.
def txn(profile_reporting_tracker):
try:
if not profile_reporting_tracker:
profile_reporting_tracker = model_class()
profile_reporting_tracker.profile_ref = userobject_key
if model_class == models.CountUnacceptableProfile:
# it is a new object, so this is the first time that it has been reported.
profile_reporting_tracker.datetime_first_reported_in_small_time_window = datetime.datetime.now()
profile_reporting_tracker.count += increase_or_decrease_count
if model_class == models.CountUnacceptableProfile:
if profile_reporting_tracker.datetime_first_reported_in_small_time_window + datetime.timedelta(hours = constants.SMALL_TIME_WINDOW_HOURS_FOR_COUNT_UNACCEPTABLE_PROFILE_REPORTS) < datetime.datetime.now():
# window has closed, start a new one.
profile_reporting_tracker.datetime_first_reported_in_small_time_window = datetime.datetime.now()
profile_reporting_tracker.num_times_reported_in_small_time_window = 1
else:
# within the window - so increase the count
profile_reporting_tracker.num_times_reported_in_small_time_window += 1
except:
error_reporting.log_exception(logging.error, "profile_reporting_tracker = %s" % repr(profile_reporting_tracker))
profile_reporting_tracker.put()
return profile_reporting_tracker
| |
== new_york_s.identifier
assert "7" == seven.subject.identifier
assert 100 == seven.weight
assert Subject.AGE_RANGE == seven.subject.type
from core.classifier import Classifier
classifier = Classifier.classifiers.get(seven.subject.type, None)
classifier.classify(seven.subject)
# If we import the same file again, we get the same list of Editions.
imported_editions_2, pools_2, works_2, failures_2 = OPDSImporter(
self._db, collection=None
).import_from_feed(feed)
assert imported_editions_2 == imported_editions
# importing with a collection and a lendable data source makes
# license pools and works.
imported_editions, pools, works, failures = OPDSImporter(
self._db,
collection=self._default_collection,
data_source_name=DataSource.OA_CONTENT_SERVER,
).import_from_feed(feed)
[crow_pool, mouse_pool] = sorted(
pools, key=lambda x: x.presentation_edition.title
)
assert self._default_collection == crow_pool.collection
assert self._default_collection == mouse_pool.collection
# Work was created for both books.
assert crow_pool.work is not None
assert Edition.BOOK_MEDIUM == crow_pool.presentation_edition.medium
assert mouse_pool.work is not None
assert Edition.PERIODICAL_MEDIUM == mouse_pool.presentation_edition.medium
work = mouse_pool.work
work.calculate_presentation()
assert 0.4142 == round(work.quality, 4)
assert Classifier.AUDIENCE_CHILDREN == work.audience
assert NumericRange(7, 7, "[]") == work.target_age
# Bonus: make sure that delivery mechanisms are set appropriately.
[mech] = mouse_pool.delivery_mechanisms
assert Representation.EPUB_MEDIA_TYPE == mech.delivery_mechanism.content_type
assert DeliveryMechanism.NO_DRM == mech.delivery_mechanism.drm_scheme
assert "http://www.gutenberg.org/ebooks/10441.epub.images" == mech.resource.url
def test_import_with_lendability(self):
"""Test that OPDS import creates Edition, LicensePool, and Work
objects, as appropriate.
When there is no Collection, it is appropriate to create
Editions, but not LicensePools or Works. When there is a
Collection, it is appropriate to create all three.
"""
feed = self.content_server_mini_feed
# This import will create Editions, but not LicensePools or
# Works, because there is no Collection.
importer_mw = OPDSImporter(
self._db, collection=None, data_source_name=DataSource.METADATA_WRANGLER
)
(
imported_editions_mw,
pools_mw,
works_mw,
failures_mw,
) = importer_mw.import_from_feed(feed)
# Both editions were imported, because they were new.
assert 2 == len(imported_editions_mw)
# But pools and works weren't created, because there is no Collection.
assert 0 == len(pools_mw)
assert 0 == len(works_mw)
# 1 error message, corresponding to the <simplified:message> tag
# at the end of content_server_mini.opds.
assert 1 == len(failures_mw)
# Try again, with a Collection to contain the LicensePools.
importer_g = OPDSImporter(
self._db,
collection=self._default_collection,
)
imported_editions_g, pools_g, works_g, failures_g = importer_g.import_from_feed(
feed
)
# now pools and works are in, too
assert 1 == len(failures_g)
assert 2 == len(pools_g)
assert 2 == len(works_g)
# The pools have presentation editions.
assert set(["The Green Mouse", "Johnny Crow's Party"]) == set(
[x.presentation_edition.title for x in pools_g]
)
# The information used to create the first LicensePool said
# that the licensing authority is Project Gutenberg, so that's used
# as the DataSource for the first LicensePool. The information used
# to create the second LicensePool didn't include a data source,
# so the source of the OPDS feed (the open-access content server)
# was used.
assert set([DataSource.GUTENBERG, DataSource.OA_CONTENT_SERVER]) == set(
[pool.data_source.name for pool in pools_g]
)
def test_import_with_unrecognized_distributor_creates_distributor(self):
"""We get a book from a previously unknown data source, with a license
that comes from a second previously unknown data source. The
book is imported and both DataSources are created.
"""
feed = self.sample_opds("unrecognized_distributor.opds")
self._default_collection.external_integration.setting(
"data_source"
).value = "some new source"
importer = OPDSImporter(
self._db,
collection=self._default_collection,
)
imported_editions, pools, works, failures = importer.import_from_feed(feed)
assert {} == failures
# We imported an Edition because there was metadata.
[edition] = imported_editions
new_data_source = edition.data_source
assert "some new source" == new_data_source.name
# We imported a LicensePool because there was an open-access
# link, even though the ultimate source of the link was one
# we'd never seen before.
[pool] = pools
assert "Unknown Source" == pool.data_source.name
# From an Edition and a LicensePool we created a Work.
assert 1 == len(works)
def test_import_updates_metadata(self):
feed = self.sample_opds("metadata_wrangler_overdrive.opds")
edition, is_new = self._edition(
DataSource.OVERDRIVE, Identifier.OVERDRIVE_ID, with_license_pool=True
)
[old_license_pool] = edition.license_pools
old_license_pool.calculate_work()
work = old_license_pool.work
feed = feed.replace("{OVERDRIVE ID}", edition.primary_identifier.identifier)
self._default_collection.external_integration.setting(
"data_source"
).value = DataSource.OVERDRIVE
imported_editions, imported_pools, imported_works, failures = OPDSImporter(
self._db,
collection=self._default_collection,
).import_from_feed(feed)
# The edition we created has had its metadata updated.
[new_edition] = imported_editions
assert new_edition == edition
assert "The Green Mouse" == new_edition.title
assert DataSource.OVERDRIVE == new_edition.data_source.name
# But the license pools have not changed.
assert edition.license_pools == [old_license_pool]
assert work.license_pools == [old_license_pool]
def test_import_from_license_source(self):
# Instead of importing this data as though it came from the
# metadata wrangler, let's import it as though it came from the
# open-access content server.
feed = self.content_server_mini_feed
importer = OPDSImporter(
self._db,
collection=self._default_collection,
)
(
imported_editions,
imported_pools,
imported_works,
failures,
) = importer.import_from_feed(feed)
# Two works have been created, because the content server
# actually tells you how to get copies of these books.
[crow, mouse] = sorted(imported_works, key=lambda x: x.title)
# Each work has one license pool.
[crow_pool] = crow.license_pools
[mouse_pool] = mouse.license_pools
# The OPDS importer sets the data source of the license pool
# to Project Gutenberg, since that's the authority that grants
# access to the book.
assert DataSource.GUTENBERG == mouse_pool.data_source.name
# But the license pool's presentation edition has a data
# source associated with the Library Simplified open-access
# content server, since that's where the metadata comes from.
assert (
DataSource.OA_CONTENT_SERVER
== mouse_pool.presentation_edition.data_source.name
)
# Since the 'mouse' book came with an open-access link, the license
# pool delivery mechanism has been marked as open access.
assert True == mouse_pool.open_access
assert (
RightsStatus.GENERIC_OPEN_ACCESS
== mouse_pool.delivery_mechanisms[0].rights_status.uri
)
# The 'mouse' work was marked presentation-ready immediately.
assert True == mouse_pool.work.presentation_ready
# The OPDS feed didn't actually say where the 'crow' book
# comes from, but we did tell the importer to use the open access
# content server as the data source, so both a Work and a LicensePool
# were created, and their data source is the open access content server,
# not Project Gutenberg.
assert DataSource.OA_CONTENT_SERVER == crow_pool.data_source.name
def test_import_from_feed_treats_message_as_failure(self):
feed = self.sample_opds("unrecognized_identifier.opds")
imported_editions, imported_pools, imported_works, failures = OPDSImporter(
self._db, collection=self._default_collection
).import_from_feed(feed)
[failure] = list(failures.values())
assert isinstance(failure, CoverageFailure)
assert True == failure.transient
assert "404: I've never heard of this work." == failure.exception
def test_import_edition_failure_becomes_coverage_failure(self):
# Make sure that an exception during import generates a
# meaningful error message.
feed = self.content_server_mini_feed
imported_editions, pools, works, failures = DoomedOPDSImporter(
self._db,
collection=self._default_collection,
).import_from_feed(feed)
# Only one book was imported, the other failed.
assert 1 == len(imported_editions)
# The other failed to import, and became a CoverageFailure
failure = failures["http://www.gutenberg.org/ebooks/10441"]
assert isinstance(failure, CoverageFailure)
assert False == failure.transient
assert "Utter failure!" in failure.exception
def test_import_work_failure_becomes_coverage_failure(self):
# Make sure that an exception while updating a work for an
# imported edition generates a meaningful error message.
feed = self.content_server_mini_feed
self._default_collection.external_integration.setting(
"data_source"
).value = DataSource.OA_CONTENT_SERVER
importer = DoomedWorkOPDSImporter(self._db, collection=self._default_collection)
imported_editions, pools, works, failures = importer.import_from_feed(feed)
# One work was created, the other failed.
assert 1 == len(works)
# There's an error message for the work that failed.
failure = failures["http://www.gutenberg.org/ebooks/10441"]
assert isinstance(failure, CoverageFailure)
assert False == failure.transient
assert "Utter work failure!" in failure.exception
def test_consolidate_links(self):
# If a link turns out to be a dud, consolidate_links()
# gets rid of it.
links = [None, None]
assert [] == OPDSImporter.consolidate_links(links)
links = [
LinkData(href=self._url, rel=rel, media_type="image/jpeg")
for rel in [
Hyperlink.OPEN_ACCESS_DOWNLOAD,
Hyperlink.IMAGE,
Hyperlink.THUMBNAIL_IMAGE,
Hyperlink.OPEN_ACCESS_DOWNLOAD,
]
]
old_link = links[2]
links = OPDSImporter.consolidate_links(links)
assert [
Hyperlink.OPEN_ACCESS_DOWNLOAD,
Hyperlink.IMAGE,
Hyperlink.OPEN_ACCESS_DOWNLOAD,
] == [x.rel for x in links]
link = links[1]
assert old_link == link.thumbnail
links = [
LinkData(href=self._url, rel=rel, media_type="image/jpeg")
for rel in [
Hyperlink.THUMBNAIL_IMAGE,
Hyperlink.IMAGE,
Hyperlink.THUMBNAIL_IMAGE,
Hyperlink.IMAGE,
]
]
t1, i1, t2, i2 = links
links = OPDSImporter.consolidate_links(links)
assert [Hyperlink.IMAGE, Hyperlink.IMAGE] == [x.rel for x in links]
assert t1 == i1.thumbnail
assert t2 == i2.thumbnail
links = [
LinkData(href=self._url, rel=rel, media_type="image/jpeg")
for rel in [Hyperlink.THUMBNAIL_IMAGE, Hyperlink.IMAGE, Hyperlink.IMAGE]
]
t1, i1, i2 = links
links = OPDSImporter.consolidate_links(links)
assert [Hyperlink.IMAGE, Hyperlink.IMAGE] == [x.rel for x in links]
assert t1 == i1.thumbnail
assert None == i2.thumbnail
def test_import_book_that_offers_no_license(self):
feed = self.sample_opds("book_without_license.opds")
importer = OPDSImporter(self._db, self._default_collection)
(
imported_editions,
imported_pools,
imported_works,
failures,
) = importer.import_from_feed(feed)
# We got an Edition for this book, but no LicensePool and no Work.
[edition] = imported_editions
assert "Howards End" == edition.title
assert [] == imported_pools
assert [] == imported_works
# We were able to figure out the medium of the | |
<reponame>xpdAcq/pdffitx
import inspect
import math
import pathlib
import typing
import typing as tp
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import tqdm.notebook as tqdm
import xarray as xr
from diffpy.srfit.fitbase import FitResults
from diffpy.srfit.fitbase.fitresults import initializeRecipe
from diffpy.srfit.fitbase.parameter import Parameter
from diffpy.srfit.fitbase.profile import Profile
from pyobjcryst.crystal import Crystal
from pyobjcryst.molecule import Molecule
from xarray.plot import FacetGrid
import pdffitx.modeling as md
def get_arg_names(func: tp.Callable) -> tp.List[str]:
"""Get all the names of arguments.
Parameters
----------
func
Returns
-------
"""
return inspect.getfullargspec(func)[0]
def rename_args(func: tp.Callable, prefix: str, first: str) -> tp.List[str]:
"""Reformat arguments for the characteristic function.
Parameters
----------
func
prefix
first
Returns
-------
"""
names = get_arg_names(func)
return [first] + [prefix + name for name in names[1:]]
def get_symbol(name: str) -> str:
"""A conventional rule to rename the parameter name to latex version."""
words = name.split("_")
if "scale" in words:
return "scale"
if "delta2" in words:
return r"$\delta_2$"
if "delta1" in words:
return r"$\delta_1$"
for word in ("a", "b", "c"):
if word in words:
return word
for word in ("alpha", "beta", "gamma"):
if word in words:
return rf"$\{word}$"
for word in (
'Uiso', 'U11', 'U12', 'U13', 'U21', 'U22', 'U23', 'U31', 'U32', 'U33',
'Biso', 'B11', 'B12', 'B13', 'B21', 'B22', 'B23', 'B31', 'B32', 'B33',
):
if word in words:
return rf"{word[0]}$_{{{word[1:]}}}$({words[1]})"
for word in ("x", "y", "z"):
if word in words:
return rf"{word}({words[1]})"
for word in ("psize", "psig", "sthick", "thickness", "radius"):
if word in words:
return rf"{word}"
return " ".join(words[1:])
def get_unit(name: str) -> str:
"""A conventional rule to get the unit."""
words = name.split("_")
if "scale" in words:
return ""
if "delta2" in words:
return r"Å$^2$"
if "delta1" in words:
return r"Å"
for word in ("a", "b", "c"):
if word in words:
return "Å"
for word in ("alpha", "beta", "gamma"):
if word in words:
return "deg"
for word in (
'Uiso', 'U11', 'U12', 'U13', 'U21', 'U22', 'U23', 'U31', 'U32', 'U33',
'Biso', 'B11', 'B12', 'B13', 'B21', 'B22', 'B23', 'B31', 'B32', 'B33',
):
if word in words:
return "Å$^2$"
for word in ("x", "y", "z"):
if word in words:
return "Å"
for word in ("psize", "psig", "sthick", "thickness", "radius"):
if word in words:
return "Å"
return ""
def plot_fits(fits: xr.Dataset, offset: float = 0., ax: plt.Axes = None, **kwargs) -> None:
"""Plot the fitted curves."""
if ax is None:
ax = plt.gca()
kwargs.setdefault("marker", "o")
kwargs.setdefault("fillstyle", "none")
kwargs.setdefault("ls", "none")
fits["y"].plot.line(ax=ax, **kwargs)
ax.plot(fits["x"], fits["ycalc"])
diff = fits["y"] - fits["ycalc"]
shift = offset + fits["y"].min() - diff.max()
diff += shift
ax.axhline(shift, ls='--', alpha=0.5, color="black")
ax.plot(fits["x"], diff)
return
def plot_like_xarray(fits: xr.Dataset, col=None, row=None, col_wrap=None, sharex=True, sharey=True,
figsize=None, aspect=1, size=3, subplot_kws=None, plot_func=plot_fits,
label: str = "rw", label_str: str = "{} = {:.2f}",
label_xy: typing.Tuple[float, float] = (0.7, 0.9),
**kwargs
) -> FacetGrid:
"""Plot data in a facet grid like xarray."""
facet: FacetGrid = FacetGrid(fits, col, row, col_wrap, sharex, sharey, figsize, aspect, size, subplot_kws)
axes: typing.Iterable[plt.Axes] = facet.axes.flatten()
# get the dimensions to plot
dims = []
if row is not None:
dims.append(row)
if col is not None:
dims.append(col)
# get the idxs
sizes = [fits.dims[d] for d in dims]
idxs = np.stack([np.ravel(i) for i in np.indices(sizes)]).transpose()
for ax, idx in zip(axes, idxs):
ax: plt.Axes
idx: np.ndarray
pos: dict = dict(zip(dims, idx))
sel_fits: xr.Dataset = fits.isel(pos)
plot_func(sel_fits, ax=ax, **kwargs)
if label:
l_val = sel_fits[label].item()
l_key = get_name(sel_fits[label])
l_text = label_str.format(l_key, l_val)
ax.annotate(l_text, xy=label_xy, xycoords='axes fraction')
return facet
def get_name(da: xr.DataArray) -> str:
for n in ["long_name", "standard_name", "short_name"]:
if n in da.attrs:
return da.attrs[n]
return str(da.name)
def plot_fits_along_dim(
fits: xr.Dataset, dim: str, num_row: int = 1, offset: float = 0.,
figure_config: dict = None, grid_config: dict = None, plot_config: dict = None,
subplot_config: dict = None
) -> tp.List[plt.Axes]:
"""Plot the fitted curves in multiple panels."""
if subplot_config is None:
subplot_config = {}
n = len(fits[dim])
num_col = math.ceil(n / num_row)
if grid_config is None:
grid_config = {}
grid_config.setdefault("wspace", 0.25)
grid_config.setdefault("hspace", 0.25)
if plot_config is None:
plot_config = {}
if figure_config is None:
figure_config = {}
figure_config.setdefault("figsize", (4 * num_col, 3 * num_row))
fig: plt.Figure = plt.figure(**figure_config)
grids = gridspec.GridSpec(num_row, num_col, figure=fig, **grid_config)
axes = []
for i, grid in zip(range(len(fits[dim])), grids):
fit = fits.isel({dim: i})
ax = fig.add_subplot(grid, **subplot_config)
axes.append(ax)
plot_fits(fit, offset, ax=ax, **plot_config)
return axes
class ModelBase:
"""The template for the model class."""
def __init__(self, recipe: md.MyRecipe):
self._recipe = recipe
self._contribution = next(iter(recipe.contributions.values()))
self._fit_result = FitResults(self._recipe, update=False)
self._verbose: int = 1
self._order: tp.List[tp.Union[str, tp.Iterable[str]]] = []
self._options: dict = {}
self._fit_state = None
self.cached_input: typing.Optional[xr.Dataset] = None
self.cached_output: typing.Optional[xr.Dataset] = None
self.cached_params: typing.Optional[typing.Dict[str, float]] = None
def parallel(self, ncpu: int) -> None:
"""Parallel computing.
Parameters
----------
ncpu :
Number of CPUs.
"""
fc = self.get_contribution()
for g in fc.generators.values():
g.parallel(ncpu)
def set_xrange(self, start: float = None, end: float = None, step: float = None) -> None:
"""Set fitting range.
Parameters
----------
start :
Start of x. x >= start
end :
End of x. x <= end
step :
Step of x. x[i] - x[i-1] == step
Returns
-------
None
"""
profile = self.get_profile()
profile.setCalculationRange(xmin=start, xmax=end, dx=step)
def set_verbose(self, level: int) -> None:
"""Set verbose level.
Parameters
----------
level :
The level used. 0 means quiet.
Returns
-------
None
"""
self._verbose = level
def get_verbose(self) -> int:
"""Get verbose level
Returns
-------
Verbose level.
"""
return self._verbose
def set_options(self, **kwargs) -> None:
"""Set options for fitting.
Parameters
----------
kwargs :
The options for the scipy.optimize.least_squares.
Returns
-------
None
"""
self._options = kwargs
def get_options(self) -> dict:
"""Get options for fitting.
Returns
-------
A dictionary of options.
"""
return self._options
def set_order(self, *order: tp.Union[str, tp.Iterable[str]]) -> None:
"""Set the order of fitting parameters.
Parameters
----------
order :
A list of list or string.
Returns
-------
None
Examples
--------
if order is ["A", ["B", "C"]], "A" will be first refined and "B", "C" will be added after and refined.
"""
order = list(order)
self._check_order(order)
self._order = order
def set_order_lst(self, order_lst: tp.List[tp.Union[str, tp.Iterable[str]]]) -> None:
return self.set_order(*order_lst)
def _check_order(self, order: tp.Any) -> None:
"""Check the order."""
tags = set(self._recipe._tagmanager.alltags())
if isinstance(order, str):
if not hasattr(self._recipe, order) and order not in tags:
raise ValueError("'{}' is not in the variable names.".format(order))
elif isinstance(order, tp.Iterable):
for x in order:
self._check_order(x)
else:
raise TypeError("'{}' is not allowed.".format(type(order)))
def get_order(self) -> tp.List[tp.Union[str, tp.Iterable[str]]]:
"""Get the order of the parameters
Returns
-------
A list of parameters.
"""
return self._order
def set_value(self, values: dict = None, **kwargs) -> None:
"""Set the parameter values.
Parameters
----------
kwargs :
In the format of param = value.
Returns
-------
None
"""
if values is not None:
kwargs.update(values)
self._check_params(kwargs.keys())
for name, value in kwargs.items():
var: Parameter = getattr(self._recipe, name)
var.setValue(value)
def get_param(self, name: str) -> Parameter:
"""Get the parameters."""
if not hasattr(self._recipe, name):
raise KeyError("No such parameter call '{}' in the recipe.".format(name))
return getattr(self._recipe, name)
def set_bound(self, bound: dict = None, **kwargs) -> None:
"""Set the bound.
Parameters
----------
kwargs :
In the form of param = (lb, ub)
Returns
-------
None
"""
if bound is not None:
kwargs.update(bound)
self._check_params(kwargs.keys())
for name, bound in kwargs.items():
var: Parameter = getattr(self._recipe, name)
var.boundRange(*bound)
def set_rel_bound(self, rel_bound: dict = None, **kwargs) -> None:
"""Set the bound relatively to current value.
Parameters
----------
kwargs :
In the form of param = (lb, ub)
Returns
-------
None
"""
if rel_bound is not None:
kwargs.update(rel_bound)
self._check_params(kwargs.keys())
for name, bound in kwargs.items():
var: Parameter = getattr(self._recipe, name)
var.boundWindow(*bound)
def _check_params(self, params):
"""Check the parameters."""
for param in params:
if not hasattr(self._recipe, param):
raise KeyError("There is no parameter called '{}'".format(param))
def _create_recipe(self) -> md.MyRecipe:
"""Place holder for the method to create the recipe."""
raise NotImplemented
def get_contribution(self) -> md.MyContribution:
"""Get the first contribution in recipe.
Returns
-------
A FitContribution.
"""
return self._contribution
def get_generators(self) -> tp.Dict[str, tp.Callable]:
"""Get the generators in a dictionary."""
return self.get_contribution().generators
def calc_phase(self, x: np.array, name: str) -> xr.DataArray:
"""Calculate the data from a generator.
Parameters
----------
x :
An array of x.
name :
The name of a generator.
Returns
-------
A xarray.DataArray of calculated y with x as | |
in schema and schema['default'] is not None:
print(wrapper.fill('Default: %s' % schema['default']))
print('')
@staticmethod
def _get_params_types(runner, action):
runner_params = runner.runner_parameters
action_params = action.parameters
parameters = copy.copy(runner_params)
parameters.update(copy.copy(action_params))
required = set([k for k, v in six.iteritems(parameters) if v.get('required')])
def is_immutable(runner_param_meta, action_param_meta):
# If runner sets a param as immutable, action cannot override that.
if runner_param_meta.get('immutable', False):
return True
else:
return action_param_meta.get('immutable', False)
immutable = set()
for param in parameters.keys():
if is_immutable(runner_params.get(param, {}),
action_params.get(param, {})):
immutable.add(param)
required = required - immutable
optional = set(parameters.keys()) - required - immutable
return parameters, required, optional, immutable
def _format_child_instances(self, children, parent_id):
'''
The goal of this method is to add an indent at every level. This way the
WF is represented as a tree structure while in a list. For the right visuals
representation the list must be a DF traversal else the idents will end up
looking strange.
'''
# apply basic WF formating first.
children = format_wf_instances(children)
# setup a depth lookup table
depth = {parent_id: 0}
result = []
# main loop that indents each entry correctly
for child in children:
# make sure child.parent is in depth and while at it compute the
# right depth for indentation purposes.
if child.parent not in depth:
parent = None
for instance in children:
if WF_PREFIX in instance.id:
instance_id = instance.id[instance.id.index(WF_PREFIX) + len(WF_PREFIX):]
else:
instance_id = instance.id
if instance_id == child.parent:
parent = instance
if parent and parent.parent and parent.parent in depth:
depth[child.parent] = depth[parent.parent] + 1
else:
depth[child.parent] = 0
# now ident for the right visuals
child.id = INDENT_CHAR * depth[child.parent] + child.id
result.append(self._format_for_common_representation(child))
return result
def _format_for_common_representation(self, task):
'''
Formats a task for common representation between mistral and action-chain.
'''
# This really needs to be better handled on the back-end but that would be a bigger
# change so handling in cli.
context = getattr(task, 'context', None)
if context and 'chain' in context:
task_name_key = 'context.chain.name'
elif context and 'mistral' in context:
task_name_key = 'context.mistral.task_name'
# Use LiveAction as the object so that the formatter lookup does not change.
# AKA HACK!
return models.action.LiveAction(**{
'id': task.id,
'status': task.status,
'task': jsutil.get_value(vars(task), task_name_key),
'action': task.action.get('ref', None),
'start_timestamp': task.start_timestamp,
'end_timestamp': getattr(task, 'end_timestamp', None)
})
def _sort_parameters(self, parameters, names):
"""
Sort a provided list of action parameters.
:type parameters: ``list``
:type names: ``list`` or ``set``
"""
sorted_parameters = sorted(names, key=lambda name:
self._get_parameter_sort_value(
parameters=parameters,
name=name))
return sorted_parameters
def _get_parameter_sort_value(self, parameters, name):
"""
Return a value which determines sort order for a particular parameter.
By default, parameters are sorted using "position" parameter attribute.
If this attribute is not available, parameter is sorted based on the
name.
"""
parameter = parameters.get(name, None)
if not parameter:
return None
sort_value = parameter.get('position', name)
return sort_value
def _get_inherited_env_vars(self):
env_vars = os.environ.copy()
for var_name in ENV_VARS_BLACKLIST:
if var_name.lower() in env_vars:
del env_vars[var_name.lower()]
if var_name.upper() in env_vars:
del env_vars[var_name.upper()]
return env_vars
class ActionRunCommand(ActionRunCommandMixin, resource.ResourceCommand):
def __init__(self, resource, *args, **kwargs):
super(ActionRunCommand, self).__init__(
resource, kwargs.pop('name', 'execute'),
'Invoke an action manually.',
*args, **kwargs)
self.parser.add_argument('ref_or_id', nargs='?',
metavar='ref-or-id',
help='Action reference (pack.action_name) ' +
'or ID of the action.')
self.parser.add_argument('parameters', nargs='*',
help='List of keyword args, positional args, '
'and optional args for the action.')
self.parser.add_argument('-h', '--help',
action='store_true', dest='help',
help='Print usage for the given action.')
self._add_common_options()
if self.name in ['run', 'execute']:
self.parser.add_argument('--trace-tag', '--trace_tag',
help='A trace tag string to track execution later.',
dest='trace_tag', required=False)
self.parser.add_argument('--trace-id',
help='Existing trace id for this execution.',
dest='trace_id', required=False)
self.parser.add_argument('-a', '--async',
action='store_true', dest='async',
help='Do not wait for action to finish.')
self.parser.add_argument('-e', '--inherit-env',
action='store_true', dest='inherit_env',
help='Pass all the environment variables '
'which are accessible to the CLI as "env" '
'parameter to the action. Note: Only works '
'with python, local and remote runners.')
self.parser.add_argument('-u', '--user', type=str, default=None,
help='User under which to run the action (admins only).')
if self.name == 'run':
self.parser.set_defaults(async=False)
else:
self.parser.set_defaults(async=True)
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
if not args.ref_or_id:
self.parser.error('Missing action reference or id')
action = self.get_resource(args.ref_or_id, **kwargs)
if not action:
raise resource.ResourceNotFoundError('Action "%s" cannot be found.'
% (args.ref_or_id))
runner_mgr = self.app.client.managers['RunnerType']
runner = runner_mgr.get_by_name(action.runner_type, **kwargs)
if not runner:
raise resource.ResourceNotFoundError('Runner type "%s" for action "%s" cannot be \
found.' % (action.runner_type, action.name))
action_ref = '.'.join([action.pack, action.name])
action_parameters = self._get_action_parameters_from_args(action=action, runner=runner,
args=args)
execution = models.LiveAction()
execution.action = action_ref
execution.parameters = action_parameters
execution.user = args.user
if not args.trace_id and args.trace_tag:
execution.context = {'trace_context': {'trace_tag': args.trace_tag}}
if args.trace_id:
execution.context = {'trace_context': {'id_': args.trace_id}}
action_exec_mgr = self.app.client.managers['LiveAction']
execution = action_exec_mgr.create(execution, **kwargs)
execution = self._get_execution_result(execution=execution,
action_exec_mgr=action_exec_mgr,
args=args, **kwargs)
return execution
class ActionExecutionBranch(resource.ResourceBranch):
def __init__(self, description, app, subparsers, parent_parser=None):
super(ActionExecutionBranch, self).__init__(
models.LiveAction, description, app, subparsers,
parent_parser=parent_parser, read_only=True,
commands={'list': ActionExecutionListCommand,
'get': ActionExecutionGetCommand})
# Register extended commands
self.commands['re-run'] = ActionExecutionReRunCommand(
self.resource, self.app, self.subparsers, add_help=False)
self.commands['cancel'] = ActionExecutionCancelCommand(
self.resource, self.app, self.subparsers, add_help=True)
self.commands['pause'] = ActionExecutionPauseCommand(
self.resource, self.app, self.subparsers, add_help=True)
self.commands['resume'] = ActionExecutionResumeCommand(
self.resource, self.app, self.subparsers, add_help=True)
self.commands['tail'] = ActionExecutionTailCommand(self.resource, self.app,
self.subparsers,
add_help=True)
POSSIBLE_ACTION_STATUS_VALUES = ('succeeded', 'running', 'scheduled', 'failed', 'canceling',
'canceled')
class ActionExecutionReadCommand(resource.ResourceCommand):
"""
Base class for read / view commands (list and get).
"""
@classmethod
def _get_exclude_attributes(cls, args):
"""
Retrieve a list of exclude attributes for particular command line arguments.
"""
exclude_attributes = []
result_included = False
trigger_instance_included = False
for attr in args.attr:
# Note: We perform startswith check so we correctly detected child attribute properties
# (e.g. result, result.stdout, result.stderr, etc.)
if attr.startswith('result'):
result_included = True
if attr.startswith('trigger_instance'):
trigger_instance_included = True
if not result_included:
exclude_attributes.append('result')
if not trigger_instance_included:
exclude_attributes.append('trigger_instance')
return exclude_attributes
class ActionExecutionListCommand(ActionExecutionReadCommand):
display_attributes = ['id', 'action.ref', 'context.user', 'status', 'start_timestamp',
'end_timestamp']
attribute_transform_functions = {
'start_timestamp': format_isodate_for_user_timezone,
'end_timestamp': format_isodate_for_user_timezone,
'parameters': format_parameters,
'status': format_status
}
def __init__(self, resource, *args, **kwargs):
self.default_limit = 50
super(ActionExecutionListCommand, self).__init__(
resource, 'list', 'Get the list of the %s most recent %s.' %
(self.default_limit, resource.get_plural_display_name().lower()),
*args, **kwargs)
self.resource_name = resource.get_plural_display_name().lower()
self.group = self.parser.add_argument_group()
self.parser.add_argument('-n', '--last', type=int, dest='last',
default=self.default_limit,
help=('List N most recent %s. Use -n -1 to fetch the full result \
set.' % self.resource_name))
self.parser.add_argument('-s', '--sort', type=str, dest='sort_order',
default='descending',
help=('Sort %s by start timestamp, '
'asc|ascending (earliest first) '
'or desc|descending (latest first)' % self.resource_name))
# Filter options
self.group.add_argument('--action', help='Action reference to filter the list.')
self.group.add_argument('--status', help=('Only return executions with the provided \
status. Possible values are \'%s\', \'%s\', \
\'%s\', \'%s\', \'%s\' or \'%s\''
'.' % POSSIBLE_ACTION_STATUS_VALUES))
self.group.add_argument('--trigger_instance',
help='Trigger instance id to filter the list.')
self.parser.add_argument('-tg', '--timestamp-gt', type=str, dest='timestamp_gt',
default=None,
help=('Only return executions with timestamp '
'greater than the one provided. '
'Use time in the format "2000-01-01T12:00:00.000Z".'))
self.parser.add_argument('-tl', '--timestamp-lt', type=str, dest='timestamp_lt',
default=None,
help=('Only return executions with timestamp '
'lower than the one provided. '
'Use time in the format "2000-01-01T12:00:00.000Z".'))
self.parser.add_argument('-l', '--showall', action='store_true',
help='')
# Display options
self.parser.add_argument('-a', '--attr', nargs='+',
default=self.display_attributes,
help=('List of attributes to include in the '
'output. "all" will return all '
'attributes.'))
self.parser.add_argument('-w', '--width', nargs='+', type=int,
default=None,
help=('Set the width of columns in output.'))
@add_auth_token_to_kwargs_from_cli
def run(self, args, **kwargs):
# Filtering options
if args.action:
kwargs['action'] = args.action
if args.status:
kwargs['status'] = args.status
if args.trigger_instance:
kwargs['trigger_instance'] = args.trigger_instance
if not args.showall:
# null is the magic string that translates to does not exist.
kwargs['parent'] = 'null'
if args.timestamp_gt:
kwargs['timestamp_gt'] = args.timestamp_gt
if args.timestamp_lt:
kwargs['timestamp_lt'] = args.timestamp_lt
if args.sort_order:
if args.sort_order in ['asc', 'ascending']:
kwargs['sort_asc'] = True
elif args.sort_order in ['desc', 'descending']:
kwargs['sort_desc'] = True
# We exclude "result" and "trigger_instance" attributes which can contain a lot of data
# since they are not displayed nor used which speeds the common operation substantially.
exclude_attributes = self._get_exclude_attributes(args=args)
exclude_attributes = ','.join(exclude_attributes)
kwargs['exclude_attributes'] = exclude_attributes
return self.manager.query_with_count(limit=args.last, **kwargs)
def run_and_print(self, args, **kwargs):
result, count = self.run(args, **kwargs)
instances = format_wf_instances(result)
if args.json or args.yaml:
self.print_output(reversed(instances), table.MultiColumnTable,
attributes=args.attr, widths=args.width,
json=args.json,
yaml=args.yaml,
attribute_transform_functions=self.attribute_transform_functions)
else:
# Include elapsed time for running executions
instances = format_execution_statuses(instances)
self.print_output(reversed(instances), table.MultiColumnTable,
attributes=args.attr, widths=args.width,
attribute_transform_functions=self.attribute_transform_functions)
if args.last and count and count > args.last:
table.SingleRowTable.note_box(self.resource_name, args.last)
class ActionExecutionGetCommand(ActionRunCommandMixin, ActionExecutionReadCommand):
display_attributes = ['id', 'action.ref', 'context.user', 'parameters', 'status',
'start_timestamp', 'end_timestamp', 'result', 'liveaction']
def __init__(self, resource, *args, **kwargs):
super(ActionExecutionGetCommand, self).__init__(
resource, 'get',
'Get individual %s.' % resource.get_display_name().lower(),
*args, **kwargs)
self.parser.add_argument('id',
help=('ID of the %s.' %
resource.get_display_name().lower()))
| |
from math import sin, cos, pi, sqrt, log
from numpy import log as clog
from numpy import exp as cexp
from numpy import ceil
# http://stackoverflow.com/questions/14132789/python-relative-imports-for-the-billionth-time
# Leaves me with only one choice ... :(
# Since I don't want to modify shell variables
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +
"/build")
try:
regmap_file = sys.argv[1].strip()
except Exception as e:
regmap_file = "regmap_gen_vmod1.json"
# Gang humbly requests that Q_1 be renamed Q_drive, and Q_2 as Q_probe.
# Should apply here, physics.tex, elsewhere?
# Note that Tstep is the ADC time step, also clocks the LLRF controller.
# Divide by two for the cavity simulator (rtsim) clock time step.
Tstep = 14/1320e6 # s
f0 = 1300e6 # Hz
nyquist_sign = -1 # -1 represents frequency inversion,
# as with high-side LO or even-numbered Nyquist zones.
#beam_current = 0.3e-3 # Amp
beam_current = 0
VPmax = 48.0 # V piezo drive max
# as we scale up, the following 10 parameters replicate per cavity:
PAmax = 6e3 # W RF amplifier max
PAbw = 1.5e6 # Hz bandwidth of power amplifier
cav_adc_max = 1.2 # sqrt(W)
rfl_adc_max = 180.0 # sqrt(W)
fwd_adc_max = 160.0 # sqrt(W)
phase_1 = 0 # forward monitor phase shift
phase_2 = 0 # reflected monitor prompt phase shift
a_cav_offset = 10
a_rfl_offset = 20
a_for_offset = 30
class Emode:
"""Cavity electrical mode"""
def __init__(self, name):
self.name=name
mode1 = Emode("pi")
mode1.RoverQ = 1036.0 # Ohm
mode1.foffset = 5.0 # Hz
mode1.peakV = 1.5e6 # V
mode1.Q_0 = 1e10 # internal loss
mode1.Q_1 = 8.1e4 # drive coupler (should be 4e7, maybe 8e4 for testing?)
mode1.Q_2 = 2e9 # field probe
mode1.phase_1 = 0
mode1.phase_2 = 0
mode2 = Emode("8pi/9")
mode2.RoverQ = 10.0 # Ohm
mode2.foffset = -8e5 # Hz
mode2.peakV = 0.15e6 # V
mode2.Q_0 = 1e10 # internal loss
mode2.Q_1 = 8.1e4 # drive coupler
mode2.Q_2 = 2e9 # field probe
mode2.phase_1 = 10.0
mode2.phase_2 = -180.0
class Mmode:
"""Cavity mechanical mode"""
def __init__(self,name):
self.name=name
# This mode is silly, but lets the frequency change on the time scale of
# software simulation = 40 us
mmode1 = Mmode("silly")
mmode1.freq = 30000 # Hz
mmode1.Q = 5.0 # unitless
mmode1.mx = 1.13 # sqrt(J) full-scale for resonator.v state
mmode1.piezo_hack = 0
mmode1.lorentz_en = 1
mmode2 = Mmode("piezo")
mmode2.freq = 100000 # Hz
mmode2.Q = 5.0 # unitless
mmode2.mx = 0 # disable
mmode2.piezo_hack = 80000
mmode2.lorentz_en = 0
# DDS setup for simulator should be static
# this construction is for 20 MHz / 94.286 MHz = 7/33
dds_num=7
dds_den=33
# The following three parameters are set in the Verilog at compile-time,
# not run-time. Top-level setting in vmod1_tb.v needs to be mirrored here.
lp_shift = 9 # see lp_pair.v, a.k.a. mode_shift
n_mech_modes = 7 # number of mechanical modes handled
df_scale = 9 # see cav4_freq.v
# ==== end of system configuration
# Read registers from regmap_gen_vmod1
sim_base=0 # base address for vmod1
from read_regmap import get_map, get_reg_info
regmap = get_map(regmap_file)
# ==== end of hardware register dictionaries
# scale a floating point number in range [-1,1) to fit in b-bit register
error_cnt=0
def fix(x,b,msg,opt=None):
global error_cnt
ss = 2**(b-1)
# cordic_g = 1.646760258
if (opt is "cordic"): ss = int(ss / 1.646760258)
xx = int(x*ss+0.5)
#print x,b,ss,xx
if (xx > ss-1):
xx = ss-1
print("# error: %f too big (%s)"%(x,msg))
error_cnt+=1
if (xx < -ss):
xx = -ss
print("# error: %f too small (%s)"%(x,msg))
error_cnt+=1
return xx
def set_reg(name, regmap):
val = globals()[name]
base_addr = regmap[name]
if type(val) is list:
for i, v in enumerate(val):
print base_addr + i, v, "#", name+ "[" + str(i) + "]"
else:
print base_addr, val, "#", name
# send a register value "out"
# looks address up in regmap[name]
# finds value via name in python global namespace
# value can be a scalar or a list
# prefix and name are used to give a helpful comment
def set_reg_old(offset,prefix,name,hierarchy):
if name in globals():
val = globals()[name] # globals() or locals()?
else:
pre = hierarchy[0]+"_"
if name.startswith(pre): sname = name.partition(pre)[2]
else: return
if sname in globals():
val = globals()[sname]
elif len(hierarchy) == 2:
pre = hierarchy[1]+"_"
if sname.startswith(pre): sname = name.partition(pre)[2]
else: return
if sname in globals():
val = globals()[sname]
else:
#print "# Key not found: %s"%(name)
return
else:
# print "# Key not found: %s"%(name)
return
addr = regmap[name]['base_addr']
if (type(val) is list):
for i,v in enumerate(val):
print addr+i, v, "#", prefix+name+"["+str(i)+"]"
else:
print addr, val, "#", prefix+name
regmap_global = {
'beam_phase_step':get_reg_info(regmap,[],"beam_phase_step")["base_addr"],
'beam_modulo' : get_reg_info(regmap,[],"beam_modulo")["base_addr"],
'drive_couple_out_coupling' : get_reg_info(regmap,[],"drive_couple_out_coupling")["base_addr"], # base address of 4 registers
'amp_lp_bw' : get_reg_info(regmap,[],"amp_lp_bw")["base_addr"],
'a_cav_offset' : get_reg_info(regmap,[],"a_cav_offset")["base_addr"],
'a_rfl_offset' : get_reg_info(regmap,[],"a_rfl_offset")["base_addr"],
'a_for_offset' : get_reg_info(regmap,[],"a_for_offset")["base_addr"],
'resonator_prop_const' :
get_reg_info(regmap,[],"resonator_prop_const")["base_addr"],
'cav4_elec_modulo' :
get_reg_info(regmap,[],"cav4_elec_modulo")["base_addr"],
'cav4_elec_phase_step' :
get_reg_info(regmap,[],"cav4_elec_phase_step")["base_addr"],
'cav4_elec_dot_0_k_out':
get_reg_info(regmap,['',0],["dot", "k_out"])["base_addr"],
'cav4_elec_outer_prod_0_k_out':
get_reg_info(regmap,['',0],["outer", "k_out"])["base_addr"],
'cav4_elec_dot_1_k_out':
get_reg_info(regmap,['',1],["dot", "k_out"])["base_addr"],
'cav4_elec_outer_prod_1_k_out':
get_reg_info(regmap,['',1],["outer", "k_out"])["base_addr"],
'cav4_elec_dot_2_k_out':
get_reg_info(regmap,['',2],["dot", "k_out"])["base_addr"],
'cav4_elec_outer_prod_2_k_out':
get_reg_info(regmap,['',2],["outer", "k_out"])["base_addr"],
'piezo_couple_k_out' : get_reg_info(regmap,[''],"piezo_couple")["base_addr"],
#'noise_couple' : get_reg_info(regmap,[''],"noise_couple")["base_addr"]
} # base address of 1024 registers
# ==== now start the application-specific computations
# Known not covered yet:
# Beam coupling
omega0 = f0*2*pi
mech_tstep = Tstep * n_mech_modes
interp_gain = n_mech_modes / 2**ceil(log(n_mech_modes)/log(2)) # interp0.v
print "# Globals"
beam_phase_step = 13 # beam1.v
beam_modulo = -1320 # beam1.v
amp_lp_bw = fix(Tstep*PAbw*32, 18, "amp_lp_bw")
cav4_elec_phase_step_h = int(dds_num*2**20/dds_den)
dds_mult = int(4096/dds_den)
cav4_elec_phase_step_l = (dds_num*2**20)%dds_den * dds_mult
cav4_elec_modulo = 4096 - dds_mult*dds_den
cav4_elec_phase_step = cav4_elec_phase_step_h << 12 | cav4_elec_phase_step_l
print "# dds",dds_mult,cav4_elec_phase_step_h, cav4_elec_phase_step_l, cav4_elec_modulo
# four registers of pair_couple.v
# neglect losses between directional coupler and cavity
drive_couple_out_coupling = [
fix(-sqrt(PAmax) / fwd_adc_max, 18, "out1", "cordic"),
fix(-sqrt(PAmax) / rfl_adc_max, 18, "out2", "cordic"),
fix(phase_1 / 180.0, 18, "out3"),
fix(phase_2 / 180.0, 18, "out4")]
# Mechanical modes
resonator_prop_const=[]
piezo_couple_k_out=[]
cav4_elec_dot_0_k_out=[]; cav4_elec_outer_prod_0_k_out=[]
cav4_elec_dot_1_k_out=[]; cav4_elec_outer_prod_1_k_out=[]
cav4_elec_dot_2_k_out=[]; cav4_elec_outer_prod_2_k_out=[]
for i,m in enumerate([mmode1,mmode2]):
print "# Cavity mechanical mode %d: %s"%(i,m.name)
w1 = mech_tstep * 2*pi*m.freq
# a1 + b1 * i represents the pole in the normalized s-plane
a1 = w1 * (-1/(2.0*m.Q))
b1 = w1 * sqrt(1-1/(4.0*m.Q**2))
z_pole = cexp(a1+b1*1j)
print "# z_pole = %7f + %7fi"%(z_pole.real,z_pole.imag)
a1 = z_pole.real-1.0
b1 = z_pole.imag
scale = int(-log(max(a1,b1))/log(4))
scale = max(min(scale,9),2)
a2 = a1 * 4**scale
b2 = b1 * 4**scale
print "# debug",w1,a1,b1,scale,a2,b2
#c1 = -w1**2 / (k*b1)
resonator_prop_const.append( (fix(a2,18,"a2")&(2**18-1)) + ((9-scale) << 18) )
resonator_prop_const.append( (fix(b2,18,"b2")&(2**18-1)) + ((9-scale) << 18) )
# the above is tested. Onwards to the work-in-progress
dc_gain = b2/(a2**2+b2**2) # resonator.v
print "# resonator mode DC gain %.4f"%dc_gain
net_coupling = 3.03e-8 # Hz / V^2, negative is implicit
Amn = sqrt(net_coupling/mode1.RoverQ) / omega0 # sqrt(J)/V^2
Cmn = - sqrt(net_coupling*mode1.RoverQ) * omega0 # 1/s/sqrt(J)
outer = m.lorentz_en * Amn / mmode1.mx * mode1.peakV**2 / dc_gain # dimensionless
inner = m.lorentz_en * Cmn * mmode1.mx * Tstep / 2**df_scale / interp_gain # dimensionless
# note that inner*outer = net_coupling * mode1.peakV**2 * Tstep
print "# outer =",outer,"inner =",inner
# Many factors of two identifiable in processing chain. See scaling.txt.
cav4_elec_outer_prod_0_k_out.append(fix(outer*512,18,"outer"))
cav4_elec_outer_prod_0_k_out.append(0)
cav4_elec_dot_0_k_out.append(0)
cav4_elec_dot_0_k_out.append(fix(inner*512,18,"inner"))
# Use second resonance to test piezo subsystem
# The scaling is still non-quantitative
piezo_couple_k_out.append(m.piezo_hack)
piezo_couple_k_out.append(0)
cav4_elec_dot_2_k_out.append(0)
cav4_elec_dot_2_k_out.append(m.piezo_hack)
for n in regmap_global.keys():
set_reg(n,regmap_global)
for i,m in enumerate([mode1]):
print "# Cavity electrical mode %d: %s"%(i,m.name)
Q_L = 1 / (1/m.Q_0 + 1/m.Q_1 + 1/m.Q_2)
# x is defined as sqrt(U)
xmax = m.peakV / sqrt(m.RoverQ*omega0)
# four registers of pair_couple.v
out_couple = [
fix(sqrt(omega0/m.Q_1) * xmax / rfl_adc_max, 18, m.name+".out1", "cordic"),
fix(sqrt(omega0/m.Q_2) * xmax / cav_adc_max, 18, m.name+".out2", "cordic"),
fix(m.phase_1 / 180.0, 18, m.name+"out3"),
fix(m.phase_2 / 180.0, 18, m.name+"out4")]
# see Pro tip in eav4_elec.v for better limit on foffset
# XXX document using 33 for what's really a 28-bit register
coarse_freq = fix(Tstep*nyquist_sign*m.foffset, 33, m.name+".coarse_freq")
V_achievable = 2*sqrt(PAmax*m.Q_1*m.RoverQ)
drive_coupling = fix( V_achievable / m.peakV, 18, m.name+".drive_coupling", "cordic")
# bandwidth in Hz = f_clk/2/2^shift/(2*pi) * bw_register/2^17 = omega_0/(2*pi*2*Q_L)
# XXX document origin of *2.0 better, compensates for shift right in lp_pair.v
bw = fix(Tstep*omega0/(2*Q_L)*(2**lp_shift)*2.0, 18, m.name+".bw")
beam_cpl = beam_current * m.RoverQ * Q_L / m.peakV # starting point
beam_cpl = -beam_cpl * beam_modulo / beam_phase_step**2
# compensate for hard-coded binary scaling in cav4_mode.v:
# beam_mag = beam_mag_wide[21:4];
# drive2 <= {beam_drv,6'b0};
beam_coupling = fix( beam_cpl/16, 18, m.name+".beam_coupling", "cordic")
regmap_emode = {
'coarse_freq':
get_reg_info(regmap,['', i],"coarse_freq")["base_addr"],
'drive_coupling':
get_reg_info(regmap,['', i],"drive_coupling")["base_addr"],
'bw':
get_reg_info(regmap,['', i],"bw")["base_addr"],
'out_couple':
get_reg_info(regmap,['', i],"out_coupling")["base_addr"],
'beam_coupling':
get_reg_info(regmap,['', i],"beam_coupling")["base_addr"]
}# base address of 4 registers
for n in regmap_emode:
set_reg(n, regmap_emode)
# keys = filter(lambda x: x.startswith("cav4_elec_mode_"+str(i)), regmap.keys())
# for n in keys:
# set_reg(m.name+".",n,["cav4_elec", "mode_"+str(i)])
# keys = filter(lambda x: x.startswith("cav4_elec_freq_"+str(i)), regmap.keys())
# for n in keys:
# set_reg(8*i,m.name+".",n,["cav4_elec", "freq_"+str(i)])
# Pseudo-random generator initialization, see tt800v.v and prng.v
prng_seed="pushmi-pullyu"
prng_seed=None
def push_seed(addr,hf):
for jx in range(25):
mm=hf.digest()
s=0
for | |
<gh_stars>1000+
# ----------------------------------------------------------------------------
# - Open3D: www.open3d.org -
# ----------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2018-2021 www.open3d.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# ----------------------------------------------------------------------------
"""Online 3D depth video processing pipeline.
- Connects to a RGBD camera or RGBD video file (currently
RealSense camera and bag file format are supported).
- Captures / reads color and depth frames. Allow recording from camera.
- Convert frames to point cloud, optionally with normals.
- Visualize point cloud video and results.
- Save point clouds and RGBD images for selected frames.
"""
import os
import json
import time
import logging as log
import argparse
import threading
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
import numpy as np
import open3d as o3d
import open3d.visualization.gui as gui
import open3d.visualization.rendering as rendering
# Camera and processing
class PipelineModel:
"""Controls IO (camera, video file, recording, saving frames). Methods run
in worker threads."""
def __init__(self,
update_view,
camera_config_file=None,
rgbd_video=None,
device=None):
"""Initialize.
Args:
update_view (callback): Callback to update display elements for a
frame.
camera_config_file (str): Camera configuration json file.
rgbd_video (str): RS bag file containing the RGBD video. If this is
provided, connected cameras are ignored.
device (str): Compute device (e.g.: 'cpu:0' or 'cuda:0').
"""
self.update_view = update_view
if device:
self.device = device.lower()
else:
self.device = 'cuda:0' if o3d.core.cuda.is_available() else 'cpu:0'
self.o3d_device = o3d.core.Device(self.device)
self.video = None
self.camera = None
self.flag_capture = False
self.cv_capture = threading.Condition() # condition variable
self.recording = False # Are we currently recording
self.flag_record = False # Request to start/stop recording
if rgbd_video: # Video file
self.video = o3d.t.io.RGBDVideoReader.create(rgbd_video)
self.rgbd_metadata = self.video.metadata
self.status_message = f"Video {rgbd_video} opened."
else: # RGBD camera
now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = f"{now}.bag"
self.camera = o3d.t.io.RealSenseSensor()
if camera_config_file:
with open(camera_config_file) as ccf:
self.camera.init_sensor(o3d.t.io.RealSenseSensorConfig(
json.load(ccf)),
filename=filename)
else:
self.camera.init_sensor(filename=filename)
self.camera.start_capture(start_record=False)
self.rgbd_metadata = self.camera.get_metadata()
self.status_message = f"Camera {self.rgbd_metadata.serial_number} opened."
log.info(self.rgbd_metadata)
# RGBD -> PCD
self.extrinsics = o3d.core.Tensor.eye(4,
dtype=o3d.core.Dtype.Float32,
device=self.o3d_device)
self.intrinsic_matrix = o3d.core.Tensor(
self.rgbd_metadata.intrinsics.intrinsic_matrix,
dtype=o3d.core.Dtype.Float32,
device=self.o3d_device)
self.depth_max = 3.0 # m
self.pcd_stride = 2 # downsample point cloud, may increase frame rate
self.flag_normals = False
self.flag_save_rgbd = False
self.flag_save_pcd = False
self.pcd_frame = None
self.rgbd_frame = None
self.executor = ThreadPoolExecutor(max_workers=3,
thread_name_prefix='Capture-Save')
self.flag_exit = False
@property
def max_points(self):
"""Max points in one frame for the camera or RGBD video resolution."""
return self.rgbd_metadata.width * self.rgbd_metadata.height
@property
def vfov(self):
"""Camera or RGBD video vertical field of view."""
return np.rad2deg(2 * np.arctan(self.intrinsic_matrix[1, 2].item() /
self.intrinsic_matrix[1, 1].item()))
def run(self):
"""Run pipeline."""
n_pts = 0
frame_id = 0
t1 = time.perf_counter()
if self.video:
self.rgbd_frame = self.video.next_frame()
else:
self.rgbd_frame = self.camera.capture_frame(
wait=True, align_depth_to_color=True)
pcd_errors = 0
while (not self.flag_exit and
(self.video is None or # Camera
(self.video and not self.video.is_eof()))): # Video
if self.video:
future_rgbd_frame = self.executor.submit(self.video.next_frame)
else:
future_rgbd_frame = self.executor.submit(
self.camera.capture_frame,
wait=True,
align_depth_to_color=True)
if self.flag_save_pcd:
self.save_pcd()
self.flag_save_pcd = False
try:
self.rgbd_frame = self.rgbd_frame.to(self.o3d_device)
self.pcd_frame = o3d.t.geometry.PointCloud.create_from_rgbd_image(
self.rgbd_frame, self.intrinsic_matrix, self.extrinsics,
self.rgbd_metadata.depth_scale, self.depth_max,
self.pcd_stride, self.flag_normals)
depth_in_color = self.rgbd_frame.depth.colorize_depth(
self.rgbd_metadata.depth_scale, 0, self.depth_max)
except RuntimeError:
pcd_errors += 1
if self.pcd_frame.is_empty():
log.warning(f"No valid depth data in frame {frame_id})")
continue
n_pts += self.pcd_frame.point['positions'].shape[0]
if frame_id % 60 == 0 and frame_id > 0:
t0, t1 = t1, time.perf_counter()
log.debug(f"\nframe_id = {frame_id}, \t {(t1-t0)*1000./60:0.2f}"
f"ms/frame \t {(t1-t0)*1e9/n_pts} ms/Mp\t")
n_pts = 0
frame_elements = {
'color': self.rgbd_frame.color.cpu(),
'depth': depth_in_color.cpu(),
'pcd': self.pcd_frame.cpu(),
'status_message': self.status_message
}
self.update_view(frame_elements)
if self.flag_save_rgbd:
self.save_rgbd()
self.flag_save_rgbd = False
self.rgbd_frame = future_rgbd_frame.result()
with self.cv_capture: # Wait for capture to be enabled
self.cv_capture.wait_for(
predicate=lambda: self.flag_capture or self.flag_exit)
self.toggle_record()
frame_id += 1
if self.camera:
self.camera.stop_capture()
else:
self.video.close()
self.executor.shutdown()
log.debug(f"create_from_depth_image() errors = {pcd_errors}")
def toggle_record(self):
if self.camera is not None:
if self.flag_record and not self.recording:
self.camera.resume_record()
self.recording = True
elif not self.flag_record and self.recording:
self.camera.pause_record()
self.recording = False
def save_pcd(self):
"""Save current point cloud."""
now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = f"{self.rgbd_metadata.serial_number}_pcd_{now}.ply"
# Convert colors to uint8 for compatibility
self.pcd_frame.point['colors'] = (self.pcd_frame.point['colors'] *
255).to(o3d.core.Dtype.UInt8)
self.executor.submit(o3d.t.io.write_point_cloud,
filename,
self.pcd_frame,
write_ascii=False,
compressed=True,
print_progress=False)
self.status_message = f"Saving point cloud to {filename}."
def save_rgbd(self):
"""Save current RGBD image pair."""
now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
filename = f"{self.rgbd_metadata.serial_number}_color_{now}.jpg"
self.executor.submit(o3d.t.io.write_image, filename,
self.rgbd_frame.color)
filename = f"{self.rgbd_metadata.serial_number}_depth_{now}.png"
self.executor.submit(o3d.t.io.write_image, filename,
self.rgbd_frame.depth)
self.status_message = (
f"Saving RGBD images to {filename[:-3]}.{{jpg,png}}.")
class PipelineView:
"""Controls display and user interface. All methods must run in the main thread."""
def __init__(self, vfov=60, max_pcd_vertices=1 << 20, **callbacks):
"""Initialize.
Args:
vfov (float): Vertical field of view for the 3D scene.
max_pcd_vertices (int): Maximum point clud verties for which memory
is allocated.
callbacks (dict of kwargs): Callbacks provided by the controller
for various operations.
"""
self.vfov = vfov
self.max_pcd_vertices = max_pcd_vertices
gui.Application.instance.initialize()
self.window = gui.Application.instance.create_window(
"Open3D || Online RGBD Video Processing", 1280, 960)
# Called on window layout (eg: resize)
self.window.set_on_layout(self.on_layout)
self.window.set_on_close(callbacks['on_window_close'])
self.pcd_material = o3d.visualization.rendering.Material()
self.pcd_material.shader = "defaultLit"
# Set n_pixels displayed for each 3D point, accounting for HiDPI scaling
self.pcd_material.point_size = int(4 * self.window.scaling)
# 3D scene
self.pcdview = gui.SceneWidget()
self.window.add_child(self.pcdview)
self.pcdview.enable_scene_caching(
True) # makes UI _much_ more responsive
self.pcdview.scene = rendering.Open3DScene(self.window.renderer)
self.pcdview.scene.set_background([1, 1, 1, 1]) # White background
self.pcdview.scene.set_lighting(
rendering.Open3DScene.LightingProfile.SOFT_SHADOWS, [0, -6, 0])
# Point cloud bounds, depends on the sensor range
self.pcd_bounds = o3d.geometry.AxisAlignedBoundingBox([-3, -3, 0],
[3, 3, 6])
self.camera_view() # Initially look from the camera
em = self.window.theme.font_size
# Options panel
self.panel = gui.Vert(em, gui.Margins(em, em, em, em))
self.panel.preferred_width = int(360 * self.window.scaling)
self.window.add_child(self.panel)
toggles = gui.Horiz(em)
self.panel.add_child(toggles)
toggle_capture = gui.ToggleSwitch("Capture / Play")
toggle_capture.is_on = False
toggle_capture.set_on_clicked(
callbacks['on_toggle_capture']) # callback
toggles.add_child(toggle_capture)
self.flag_normals = False
self.toggle_normals = gui.ToggleSwitch("Colors / Normals")
self.toggle_normals.is_on = False
self.toggle_normals.set_on_clicked(
callbacks['on_toggle_normals']) # callback
toggles.add_child(self.toggle_normals)
view_buttons = gui.Horiz(em)
self.panel.add_child(view_buttons)
view_buttons.add_stretch() # for centering
camera_view = gui.Button("Camera view")
camera_view.set_on_clicked(self.camera_view) # callback
view_buttons.add_child(camera_view)
birds_eye_view = gui.Button("Bird's eye view")
birds_eye_view.set_on_clicked(self.birds_eye_view) # callback
view_buttons.add_child(birds_eye_view)
view_buttons.add_stretch() # for centering
save_toggle = gui.Horiz(em)
self.panel.add_child(save_toggle)
save_toggle.add_child(gui.Label("Record / Save"))
self.toggle_record = None
if callbacks['on_toggle_record'] is not None:
save_toggle.add_fixed(1.5 * em)
self.toggle_record = gui.ToggleSwitch("Video")
self.toggle_record.is_on = False
self.toggle_record.set_on_clicked(callbacks['on_toggle_record'])
save_toggle.add_child(self.toggle_record)
save_buttons = gui.Horiz(em)
self.panel.add_child(save_buttons)
save_buttons.add_stretch() # for centering
save_pcd = gui.Button("Save Point cloud")
save_pcd.set_on_clicked(callbacks['on_save_pcd'])
save_buttons.add_child(save_pcd)
save_rgbd = gui.Button("Save RGBD frame")
save_rgbd.set_on_clicked(callbacks['on_save_rgbd'])
save_buttons.add_child(save_rgbd)
save_buttons.add_stretch() # for centering
self.video_size = (int(240 * self.window.scaling),
int(320 * self.window.scaling), 3)
self.show_color = gui.CollapsableVert("Color image")
self.show_color.set_is_open(False)
self.panel.add_child(self.show_color)
self.color_video = gui.ImageWidget(
o3d.geometry.Image(np.zeros(self.video_size, dtype=np.uint8)))
self.show_color.add_child(self.color_video)
self.show_depth = gui.CollapsableVert("Depth image")
self.show_depth.set_is_open(False)
self.panel.add_child(self.show_depth)
self.depth_video = gui.ImageWidget(
o3d.geometry.Image(np.zeros(self.video_size, dtype=np.uint8)))
self.show_depth.add_child(self.depth_video)
self.status_message = gui.Label("")
self.panel.add_child(self.status_message)
self.flag_exit = False
self.flag_gui_init = False
def update(self, frame_elements):
"""Update visualization with point cloud and images. Must run in main
thread since this makes GUI calls.
Args:
frame_elements: dict {element_type: geometry element}.
Dictionary of element types to geometry elements to be updated
in the GUI:
'pcd': point cloud,
'color': rgb image (3 channel, uint8),
'depth': depth image (uint8),
'status_message': message
"""
if not self.flag_gui_init:
# Set dummy point cloud to allocate graphics memory
dummy_pcd = o3d.t.geometry.PointCloud({
'positions':
o3d.core.Tensor.zeros((self.max_pcd_vertices, 3),
o3d.core.Dtype.Float32),
'colors':
o3d.core.Tensor.zeros((self.max_pcd_vertices, 3),
o3d.core.Dtype.Float32),
'normals':
o3d.core.Tensor.zeros((self.max_pcd_vertices, 3),
o3d.core.Dtype.Float32)
})
if self.pcdview.scene.has_geometry('pcd'):
self.pcdview.scene.remove_geometry('pcd')
self.pcd_material.shader = "normals" if self.flag_normals else "defaultLit"
self.pcdview.scene.add_geometry('pcd', dummy_pcd, self.pcd_material)
self.flag_gui_init = True
# TODO(ssheorey) Switch to update_geometry() after #3452 is fixed
if os.name == 'nt':
self.pcdview.scene.remove_geometry('pcd')
self.pcdview.scene.add_geometry('pcd', frame_elements['pcd'],
self.pcd_material)
else:
update_flags = (rendering.Scene.UPDATE_POINTS_FLAG |
rendering.Scene.UPDATE_COLORS_FLAG |
(rendering.Scene.UPDATE_NORMALS_FLAG
if self.flag_normals else 0))
self.pcdview.scene.scene.update_geometry('pcd',
frame_elements['pcd'],
update_flags)
# Update color and depth images
# TODO(ssheorey) Remove CPU transfer after we have CUDA -> OpenGL bridge
if self.show_color.get_is_open() and 'color' in frame_elements:
sampling_ratio | |
4',
u'95tChNVzodY',
u'completing-the-square-4'),
(u'Quadratic Formula 1',
u'iulx0z1lz8M',
u'quadratic-formula-1'),
(u'Quadratic Formula 2',
u'CLrImGKeuEI',
u'quadratic-formula-2'),
(u'Quadratic Formula 3',
u'XUvKjC21fYU',
u'quadratic-formula-3'),
(u'Applying Quadratic Functions 1',
u'Zoa485PqK_M',
u'applying-quadratic-functions-1'),
(u'Applying Quadratic Functions 2',
u'HtN86WyZ6zY',
u'applying-quadratic-functions-2'),
(u'Applying Quadratic Functions 3',
u'huZBkYgGrKo',
u'applying-quadratic-functions-3'),
(u'Non-Linear Systems of Equations 1',
u'hjigR_rHKDI',
u'non-linear-systems-of-equations-1'),
(u'Non-Linear Systems of Equations 2',
u'XPf8LMu7QSw',
u'non-linear-systems-of-equations-2'),
(u'Non-Linear Systems of Equations 3',
u'FksgVpM_iXs',
u'non-linear-systems-of-equations-3'),
(u'Simplifying Rational Expressions 1',
u'XChok8XlF90',
u'simplifying-rational-expressions-1'),
(u'Simplifying Rational Expressions 2',
u'dstNU7It-Ro',
u'simplifying-rational-expressions-2'),
(u'Simplifying Rational Expressions 3',
u'ey_b3aPsRl8',
u'simplifying-rational-expressions-3'),
(u'Multiplying and Dividing Rational Expressions 1',
u'3GL69IA2q4s',
u'multiplying-and-dividing-rational-expressions-1'),
(u'Multiplying and Dividing Rational Expressions 2',
u'f-wz_ZzSDdg',
u'multiplying-and-dividing-rational-expressions-2'),
(u'Multiplying and Dividing Rational Expressions 3',
u'gcnk8TnzsLc',
u'multiplying-and-dividing-rational-expressions-3'),
(u'Adding and Subtracting Rational Expressions 1',
u'c_N9G3N9Ubc',
u'adding-and-subtracting-rational-expressions-1'),
(u'Adding and Subtracting Rational Expressions 2',
u'3tmFTHOP6Pc',
u'adding-and-subtracting-rational-expressions-2'),
(u'Adding and Subtracting Rational Expressions 3',
u'IKsi-DQU2zo',
u'adding-and-subtracting-rational-expressions-3'),
(u'Solving Rational Equations 1',
u'Yaeze9u6Cv8',
u'solving-rational-equations-1'),
(u'Solving Rational Equations 2',
u'RdYA8ZpqdJE',
u'solving-rational-equations-2'),
(u'Solving Rational Equations 3',
u'5wUJLMWZ5Fw',
u'solving-rational-equations-3'),
(u'Applying Rational Equations 1',
u'gD7A1LA4jO8',
u'applying-rational-equations-1'),
(u'Applying Rational Equations 2',
u'E1j8W64NQ0Y',
u'applying-rational-equations-2'),
(u'Applying Rational Equations 3',
u'B0Z4s38YIgQ',
u'applying-rational-equations-3'),
(u'Number Sets 1',
u'aqsIWLqlDhE',
u'number-sets-1'),
(u'Number Sets 2',
u'qfQv8GzyjB4',
u'number-sets-2'),
(u'Number Sets 3',
u'psyWUUkI-aw',
u'number-sets-3'),
(u'Understanding Logical Statements 1',
u'I6hz8mhaRB0',
u'understanding-logical-statements-1'),
(u'Understanding Logical Statements 2',
u'0Sx5aJoAkW8',
u'understanding-logical-statements-2'),
(u'Understanding Logical Statements 3',
u'fs7fz3MLpC8',
u'understanding-logical-statements-3'),
(u'Understanding Logical Statements 4',
u'mLCD0ez-yO0',
u'understanding-logical-statements-4'),
(u'Understanding Logical Statements Commentary',
u'okNqf2NGnpk',
u'understanding-logical-statements-commentary'),
(u'U12_L1_T3_we1 Inductive Reasoning 1',
u'MnQ7Lizkpqk',
u'u12-l1-t3-we1-inductive-reasoning-1'),
(u'Inductive Reasoning 2',
u'ZSsBh4Ybbj8',
u'inductive-reasoning-2'),
(u'Inductive Reasoning 3',
u'pgfWkaySFGY',
u'inductive-reasoning-3'),
(u'Deductive Reasoning 1',
u'GEId0GonOZM',
u'deductive-reasoning-1'),
(u'Deductive Reasoning 2',
u'VMEV__2wW3E',
u'deductive-reasoning-2'),
(u'Deductive Reasoning 3',
u'lYLaaMObgkw',
u'deductive-reasoning-3'),
(u'Events and Outcomes 1',
u'cdRLBOnLTDk',
u'events-and-outcomes-1'),
(u'Events and Outcomes 2',
u'vB4FuGmfEGY',
u'events-and-outcomes-2'),
(u'Events and Outcomes 3',
u'3_otNr9kRuY',
u'events-and-outcomes-3'),
(u'U12_L2_T2_we1 Independent Events 1',
u'Za7G_eWKiF4',
u'u12-l2-t2-we1-independent-events-1'),
(u'Independent Events 2',
u'VWAfEbgf1Po',
u'independent-events-2'),
(u'Independent Events 3',
u'2MYA8Ba2PvM',
u'independent-events-3'),
(u'Permutations and Combinations 1',
u'oQpKtm5TtxU',
u'permutations-and-combinations-1'),
(u'Permutations and Combinations 2',
u'v9NLtiVt3XY',
u'permutations-and-combinations-2'),
(u'Permutations and Combinations 3',
u'SbpoyXTpC84',
u'permutations-and-combinations-3'),
(u'Permutations and Combinations 4',
u'DIjlllgq3dc',
u'permutations-and-combinations-4'),
(u'Probability of Dependent Events',
u'l9ft9jpriNA',
u'probability-of-dependent-events'),
(u'Probability of Dependent Events 2',
u'ccrYD6iX_SY',
u'probability-of-dependent-events-2'),
(u'U02_L2_T2_we1 Absolute Value Equations.avi',
u'ZEml96_kyN4',
u'u02-l2-t2-we1-absolute-value-equations-avi'),
(u'U06_L3_T1_we3 Graphing Systems of Inequalities',
u'BUmLw5m6F9s',
u'u06-l3-t1-we3-graphing-systems-of-inequalities'),
(u'U06_L1_T1_we1 Solving Systems by Graphing',
u'Li5XGPiLLAY',
u'u06-l1-t1-we1-solving-systems-by-graphing'),
(u'U05_L2_T1_WE2 Graphing Linear Inequalities in Two Variables',
u'FnrqBgot3jM',
u'u05-l2-t1-we2-graphing-linear-inequalities-in-two-variables'),
(u'U03_L2_T1_we2 Representing Functions as Graphs',
u'12w3qsF4xmE',
u'u03-l2-t1-we2-representing-functions-as-graphs'),
(u'U03_L1_T1_we2 Inductive Patterns',
u'GvbrtnEYRpY',
u'u03-l1-t1-we2-inductive-patterns')],
'Arithmetic': [(u'Basic Addition', u'AuX7nPBqDts', u'basic-addition'),
(u'Addition 2', u't2L3JFOqTEk', u'addition-2'),
(u'Level 2 Addition', u'27Kp7HJYj2c', u'level-2-addition'),
(u'Addition 3', u'e_SpXIw_Qts', u'addition-3'),
(u'Addition 4', u'fOXo4p4WDKM', u'addition-4'),
(u'Basic Subtraction', u'aNqG4ChKShI', u'basic-subtraction'),
(u'Subtraction 2', u'incKJchBCLo', u'subtraction-2'),
(u'Subtraction 3 Introduction to Borrowing or Regrouping',
u'ZaqOUE3H1mE',
u'subtraction-3---introduction-to-borrowing-or-regrouping'),
(u'Alternate mental subtraction method',
u'GBtcGO44e-A',
u'alternate-mental-subtraction-method'),
(u'Level 4 Subtraction',
u'omUfrXtHtN0',
u'level-4-subtraction'),
(u'Why borrowing works',
u'fWan_T0enj4',
u'why-borrowing-works'),
(u'Adding Decimals', u'SxZUFA2SGX8', u'adding-decimals'),
(u'Subtracting decimals',
u'0mOH-qNGM7M',
u'subtracting-decimals'),
(u'Basic Multiplication',
u'mvOkMYCygps',
u'basic-multiplication'),
(u'Multiplication 2 The Multiplication Tables',
u'xO_1bYgoQvA',
u'multiplication-2--the-multiplication-tables'),
(u'Multiplication 3 10.11.12 times tables',
u'qihoczo1Ujk',
u'multiplication-3--10-11-12-times-tables'),
(u'Multiplication 4 2-digit times 1-digit number',
u'OYYYc7ygd38',
u'multiplication-4--2-digit-times-1-digit-number'),
(u'Multiplication 5 2-digit times a 2-digit number',
u't8m0NalQtEk',
u'multiplication-5---2-digit-times-a-2-digit-number'),
(u'Multiplication 6 Multiple Digit Numbers',
u'-h3Oqhl8fPg',
u'multiplication-6--multiple-digit-numbers'),
(u'Multiplication 7 Old video giving more examples',
u'_k3aWF6_b4w',
u'multiplication-7--old-video-giving-more-examples'),
(u'Mulitplication 8 Multiplying decimals (Old video)',
u'm5z6pOsxF_8',
u'mulitplication-8--multiplying-decimals--old-video'),
(u'Lattice Multiplication',
u'gS6TfWUv97I',
u'lattice-multiplication'),
(u'Why Lattice Multiplication Works',
u'S3z4XqC_YSc',
u'why-lattice-multiplication-works'),
(u'Division 1', u'MTzTqvzWzm8', u'division-1'),
(u'Division 2', u'8Ft5iHhauJ0', u'division-2'),
(u'Division 3 More long division and remainder examples',
u'NcADzGz3bSI',
u'division-3--more-long-division-and-remainder-examples'),
(u'Level 4 division', u'gHTH6PKfpMc', u'level-4-division'),
(u'Converting fractions to decimals',
u'Gn2pdkvdbGQ',
u'converting-fractions-to-decimals'),
(u'Percent and decimals',
u'RvtdJnYFNhc',
u'percent-and-decimals'),
(u'Dividing decimal', u'S0uuK7SQcA8', u'dividing-decimal'),
(u'Ordering numeric expressions',
u'Llt-KkHugRQ',
u'ordering-numeric-expressions'),
(u'Greatest Common Divisor',
u'jFd-6EPfnec',
u'greatest-common-divisor'),
(u'Least Common Multiple',
u'cH-jaMCzIRk',
u'least-common-multiple'),
(u'Equivalent fractions',
u'U2ovEuEUxXQ',
u'equivalent-fractions'),
(u'Mixed numbers and improper fractions',
u'1xuf6ZKF1_I',
u'mixed-numbers-and-improper-fractions')],
'BankingandMoney': [(u'Banking 1', u'E-HOz8T6tAo', u'banking-1'),
(u"Banking 2 A bank's income statement",
u'h3lMANILkw0',
u'banking-2--a-bank-s-income-statement'),
(u'Banking 3 Fractional Reserve Banking',
u'nH2-37rTA8U',
u'banking-3--fractional-reserve-banking'),
(u'Banking 4 Multiplier effect and the money supply',
u'F7r7l1VG-Tw',
u'banking-4--multiplier-effect-and-the-money-supply'),
(u'Banking 5 Introduction to Bank Notes',
u'cNFLqhU4MN0',
u'banking-5--introduction-to-bank-notes'),
(u'Banking 6 Bank Notes and Checks',
u'IOzZVmgK3IM',
u'banking-6--bank-notes-and-checks'),
(u'Banking 7 Giving out loans without giving out gold',
u'On3c86V5A_E',
u'banking-7--giving-out-loans-without-giving-out-gold'),
(u'Banking 8 Reserve Ratios',
u'VP3nKDUw1jA',
u'banking-8--reserve-ratios'),
(u'Banking 9 More on Reserve Ratios (Bad sound)',
u'DFPBdbx0vFc',
u'banking-9--more-on-reserve-ratios--bad-sound'),
(u'Banking 10 Introduction to leverage (bad sound)',
u'8fxilNdEQTo',
u'banking-10---introduction-to-leverage--bad-sound'),
(u'Banking 11 A reserve bank',
u'M-4GWomLbpc',
u'banking-11--a-reserve-bank'),
(u'Banking 12 Treasuries (government debt)',
u'JBWdbzzYbtU',
u'banking-12---treasuries--government-debt'),
(u'Banking 13 Open Market Operations',
u'BTNarhvGX88',
u'banking-13--open-market-operations'),
(u'Banking 14 Fed Funds Rate',
u'IniG1KkPS2c',
u'banking-14--fed-funds-rate'),
(u'Banking 15 More on the Fed Funds Rate',
u'rgqFXkLAc-4',
u'banking-15--more-on-the-fed-funds-rate'),
(u'Banking 16 Why target rates vs. money supply',
u'yOgGhPIHnlA',
u'banking-16--why-target-rates-vs--money-supply'),
(u'Banking 17 What happened to the gold?',
u'NFDMXwwzyIM',
u'banking-17--what-happened-to-the-gold'),
(u'Banking 18 Big Picture Discussion',
u'T9byZBGtGuw',
u'banking-18--big-picture-discussion'),
(u'The Discount Rate',
u'FxkTSjctXdk',
u'the-discount-rate'),
(u'Repurchase Agreements (Repo transactions)',
u'QWninXOAMXE',
u'repurchase-agreements--repo-transactions'),
(u'Federal Reserve Balance Sheet',
u'MILF-9GeMDQ',
u'federal-reserve-balance-sheet'),
(u'Fractional Reserve Banking Commentary 1',
u'ZyyaE3DIxhc',
u'fractional-reserve-banking-commentary-1'),
(u'FRB Commentary 2 Deposit Insurance',
u'otstXFxMkl4',
u'frb-commentary-2--deposit-insurance'),
(u'FRB Commentary 3 Big Picture',
u'8SAMey9Gl5I',
u'frb-commentary-3--big-picture')],
'Biology': [(u'Introduction to Evolution and Natural Selection',
u'GcjgWov7mTM',
u'introduction-to-evolution-and-natural-selection'),
(u'Ape Clarification', u'oFGkYA_diDA', u'ape-clarification'),
(u'Intelligent Design and Evolution',
u'qxOEz9aPZNY',
u'intelligent-design-and-evolution'),
(u'Evolution Clarification',
u'nh1R-gyY7es',
u'evolution-clarification'),
(u'Natural Selection and the Owl Butterfly',
u'dR_BFmDMRaI',
u'natural-selection-and-the-owl-butterfly'),
(u'DNA', u'_-vZ_g7K6P0', u'dna'),
(u'Variation in a Species',
u'DuArVnT1i-E',
u'variation-in-a-species'),
(u'Chromosomes. Chromatids. Chromatin. etc.',
u's9HPNwXd9fk',
u'chromosomes--chromatids--chromatin--etc'),
(u'Mitosis. Meiosis and Sexual Reproduction',
u'kaSIjIzAtYA',
u'mitosis--meiosis-and-sexual-reproduction'),
(u'Phases of Mitosis', u'LLKX_4DHE3I', u'phases-of-mitosis'),
(u'Phases of Meiosis', u'ijLc52LmFQg', u'phases-of-meiosis'),
(u'Embryonic Stem Cells',
u'-yCIMk1x0Pk',
u'embryonic-stem-cells'),
(u'Cancer', u'RZhL7LDPk8w', u'cancer'),
(u'Introduction to Heredity',
u'eEUvRrhmcxM',
u'introduction-to-heredity'),
(u'Punnett Square Fun', u'D5ymMYcLtv0', u'punnett-square-fun'),
(u'Hardy-Weinberg Principle',
u'4Kbruik_LOo',
u'hardy-weinberg-principle'),
(u'Sex-Linked Traits', u'-ROhfKyxgCo', u'sex-linked-traits'),
(u'Bacteria', u'TDoGrbpJJ14', u'bacteria'),
(u'Viruses', u'0h5Jd7sgQWY', u'viruses'),
(u'ATP Adenosine Triphosphate',
u'YQfWiDlFEcA',
u'atp--adenosine-triphosphate'),
(u'Introduction to Cellular Respiration',
u'2f7YwCtHcgk',
u'introduction-to-cellular-respiration'),
(u'Oxidation and Reduction Review From Biological Point-of-View',
u'orI2m6IarJg',
u'oxidation-and-reduction-review-from-biological-point-of-view'),
(u'Oxidation and Reduction in Cellular Respiration',
u'_KyyVhlUDNU',
u'oxidation-and-reduction-in-cellular-respiration'),
(u'Glycolysis', u'FE2jfTXAJHg', u'glycolysis'),
(u'Krebs / Citric Acid Cycle',
u'juM2ROSLWfw',
u'krebs---citric-acid-cycle'),
(u'Electron Transport Chain',
u'mfgCcFXUZRk',
u'electron-transport-chain'),
(u'Oxidative Phosphorylation and Chemiosmosis',
u'W_Q17tqw_7A',
u'oxidative-phosphorylation-and-chemiosmosis'),
(u'Photosynthesis', u'-rsYk4eCKnA', u'photosynthesis'),
(u'Photosynthesis Light Reactions 1',
u'GR2GA7chA_c',
u'photosynthesis--light-reactions-1'),
(u'Photosynthesis Light Reactions and Photophosphorylation',
u'yfR36PMWegg',
u'photosynthesis---light-reactions-and-photophosphorylation'),
(u'Photosynthesis Calvin Cycle',
u'slm6D2VEXYs',
u'photosynthesis---calvin-cycle'),
(u'Photorespiration', u'EQvTEFCANTM', u'photorespiration'),
(u'C-4 Photosynthesis', u'7ynX_F-SwNY', u'c-4-photosynthesis'),
(u'CAM Plants', u'xp6Zj24h8uA', u'cam-plants'),
(u'Parts of a cell', u'Hmwvj9X4GNY', u'parts-of-a-cell'),
(u'Diffusion and Osmosis',
u'aubZU0iWtgI',
u'diffusion-and-osmosis'),
(u'The Lungs and Pulmonary System',
u'SPGRkexI_cs',
u'the-lungs-and-pulmonary-system'),
(u'Red blood cells', u'fLKOBQ6cZHA', u'red-blood-cells'),
(u'Circulatory System and the Heart',
u'QhiVnFvshZg',
u'circulatory-system-and-the-heart'),
(u'Hemoglobin', u'LWtXthfG9_M', u'hemoglobin'),
(u'Anatomy of a Neuron', u'ob5U8zPbAX4', u'anatomy-of-a-neuron'),
(u'Sodium Potassium Pump',
u'C_H-ONQFjpQ',
u'sodium-potassium-pump'),
(u'Correction to Sodium and Potassium Pump Video',
u'ye3rTjLCvAU',
u'correction-to-sodium-and-potassium-pump-video'),
(u'Electrotonic and Action Potentials',
u'gkQtRec2464',
u'electrotonic-and-action-potentials'),
(u'Saltatory Conduction in Neurons',
u'7wgb7ggzFNs',
u'saltatory-conduction-in-neurons'),
(u'Neuronal Synapses (Chemical)',
u'Tbq-KZaXiL4',
u'neuronal-synapses--chemical'),
(u'Myosin and Actin', u'zopoN2i7ALQ', u'myosin-and-actin'),
(u'Tropomyosin and troponin and their role in regulating muscle contraction',
u'LiOfeSsjrB8',
u'tropomyosin-and-troponin-and-their-role-in-regulating-muscle-contraction'),
(u'Role of the Sarcoplasmic Reticulum in Muscle Cells',
u'SauhB2fYQkM',
u'role-of-the-sarcoplasmic-reticulum-in-muscle-cells'),
(u'Anatomy of a muscle cell',
u'uY2ZOsCnXIA',
u'anatomy-of-a-muscle-cell'),
(u'Role of Phagocytes in Innate or Nonspecific Immunity',
u'O1N2rENXq_Y',
u'role-of-phagocytes-in-innate-or-nonspecific-immunity'),
(u'Types of immune responses Innate and Adaptive. Humoral vs. Cell-Mediated',
u'rp7T4IItbtM',
u'types-of-immune-responses--innate-and-adaptive---humoral-vs--cell-mediated'),
(u'B Lymphocytes (B cells)',
u'Z36dUduOk1Y',
u'b-lymphocytes--b-cells'),
(u'Professional Antigen Presenting Cells (APC) and MHC II complexes',
u'j_kSmmEpvQk',
u'professional-antigen-presenting-cells--apc--and-mhc-ii-complexes'),
(u'Helper T Cells', u'uwMYpTYsNZM', u'helper-t-cells'),
(u'Cytotoxic T Cells', u'oqI4skjr6lQ', u'cytotoxic-t-cells'),
(u'Review of B cells. CD4+ T cells and CD8+ T cells',
u'xaz5ftvZCyI',
u'review-of-b-cells---cd4--t-cells-and-cd8--t-cells'),
(u'Inflammatory Response',
u'FXSuEIMrPQk',
u'inflammatory-response'),
(u'The Kidney and Nephron',
u'cc8sUv2SuaY',
u'the-kidney-and-nephron'),
(u'Secondary Active Transport in the Nephron',
u'czY5nyvZ7cU',
u'secondary-active-transport-in-the-nephron')],
'BrainTeasers': [(u'Liar Truthteller Brain Teaser',
u'69rDtSpshAw',
u'liar-truthteller-brain-teaser'),
(u'Toggler Brain Teaser',
u'l3OkPYhDi9w',
u'toggler-brain-teaser'),
(u'Alien Abduction Brain Teaser',
u'K4pocYXOmTQ',
u'alien-abduction-brain-teaser'),
(u'Brain Teaser Blue Forehead Room',
u'rBaCDC52NOY',
u'brain-teaser--blue-forehead-room'),
(u'Blue Forehead Room Solution',
u'-xYkTJFbuM0',
u'blue-forehead-room-solution'),
(u'Forehead Numbers Brain Teaser',
u'jqpQPxWj8gE',
u'forehead-numbers-brain-teaser'),
(u'Light Bulb Switching Brain Teaser',
u'WNhxkpmVQYw',
u'light-bulb-switching-brain-teaser'),
(u'Path Counting Brain Teaser',
u'9QduzzW10uA',
u'path-counting-brain-teaser'),
(u'3-D Path Counting Brain Teaser',
u'wRxzDOloS3o',
u'3-d-path-counting-brain-teaser')],
'CAHSEEExampleProblems': [(u'CAHSEE Practice Problems 1-3',
u'_yv3BrPt-cs',
u'cahsee-practice--problems-1-3'),
(u'CAHSEE Practice Problems 4-9',
u'i1CSuZdIu98',
u'cahsee-practice--problems-4-9'),
(u'CAHSEE Practice Problems 10-12',
u'4PIn_9cxefI',
u'cahsee-practice--problems-10-12'),
(u'CAHSEE Practice Problems 13-14',
u'9xwAPXM0Flg',
u'cahsee-practice--problems-13-14'),
(u'CAHSEE Practice Problems 15-16',
u'MwdwuDy0rRs',
u'cahsee-practice--problems-15-16'),
(u'CAHSEE Practice Problems 17-19',
u'ah39bUxYDEg',
u'cahsee-practice--problems-17-19'),
(u'CAHSEE Practice Problems 20-22',
u'ceM2u6IABWU',
u'cahsee-practice--problems-20-22'),
(u'CAHSEE Practice Problems 23-27',
u'glumochcaw0',
u'cahsee-practice--problems-23-27'),
(u'CAHSEE Practice Problems 28-31',
u'6Sy4E_8Psos',
u'cahsee-practice--problems-28-31'),
(u'CAHSEE Practice Problems 32-34',
u'1k-jqxMuUaM',
u'cahsee-practice--problems-32-34'),
(u'CAHSEE Practice Problems 35-37',
u'3A305Mhtkug',
u'cahsee-practice--problems-35-37'),
(u'CAHSEE Practice Problems 38-42',
u'_qB9JCpr8Co',
u'cahsee-practice--problems-38-42'),
(u'CAHSEE Practice Problems 43-46',
u'_rof2ExUzOU',
u'cahsee-practice--problems-43-46'),
(u'CAHSEE Practice Problems 47-51',
u'ENg2ajvKYss',
u'cahsee-practice--problems-47-51'),
(u'CAHSEE Practice Problems 52-53',
u'FdD0Wu032R4',
u'cahsee-practice--problems-52-53')],
'Calculus': [(u'Introduction to Limits',
u'W0VWO4asgmk',
u'introduction-to-limits'),
(u'Limit Examples (part 1)',
u'GGQngIp0YGI',
u'limit-examples--part-1'),
(u'Limit Examples (part 2)',
u'YRw8udexH4o',
u'limit-examples--part-2'),
(u'Limit Examples (part3)',
u'gWSDDopD9sk',
u'limit-examples--part3'),
(u'Limit Examples w/ brain malfunction on first prob (part 4)',
u'xjkSE9cPqzo',
u'limit-examples-w--brain-malfunction-on-first-prob--part-4'),
(u'Squeeze Theorem', u'igJdDN-DPgA', u'squeeze-theorem'),
(u'Proof lim (sin x)/x',
u'Ve99biD1KtA',
u'proof--lim--sin-x--x'),
| |
way works, but need more work
names = ['libswipl.dylib', 'libpl.dylib']
path = os.environ.get('SWI_HOME_DIR')
if path is None:
path = os.environ.get('SWI_LIB_DIR')
if path is None:
path = os.environ.get('PLBASE')
if path is None:
swi_ver = get_swi_ver()
path = '/Applications/SWI-Prolog.app/Contents/swipl-' + swi_ver + '/lib/'
paths = [path]
for name in names:
for path in paths:
(path_res, back_path) = walk(path, name)
if path_res is not None:
os.environ['SWI_LIB_DIR'] = back_path
return (path_res, None)
return (None, None)
@staticmethod
def _findSwiplDar():
"""
This function uses several heuristics to guess where SWI-Prolog is
installed in MacOS.
:returns:
A tuple of (path to the swipl so, path to the resource file)
:returns type:
({str, None}, {str, None})
"""
# If the exec is in path
(path, swiHome) = SWIPl._findSwiplFromExec()
if path is not None:
return (path, swiHome)
# If it is not, use find_library
path = SWIPl._findSwiplPathFromFindLib()
if path is not None:
return (path, swiHome)
# Last guess, searching for the file
paths = ['.', './lib', '/usr/lib/', '/usr/local/lib', '/opt/local/lib']
names = ['libswipl.dylib', 'libpl.dylib']
for name in names:
for path in paths:
path = os.path.join(path, name)
if os.path.exists(path):
return (path, None)
return (None, None)
@staticmethod
def _findSwipl():
"""
This function makes a big effort to find the path to the SWI-Prolog shared
library. Since this is both OS dependent and installation dependent, we may
not aways succeed. If we do, we return a name/path that can be used by
CDLL(). Otherwise we raise an exception.
:return: Tuple. Fist element is the name or path to the library that can be
used by CDLL. Second element is the path were SWI-Prolog resource
file may be found (this is needed in some Linuxes)
:rtype: Tuple of strings
:raises ImportError: If we cannot guess the name of the library
"""
# Now begins the guesswork
platform = sys.platform[:3]
if platform == "win": # In Windows, we have the default installer
# path and the registry to look
(path, swiHome) = SWIPl._findSwiplWin()
elif platform in ("lin", "cyg"):
(path, swiHome) = SWIPl._findSwiplLin()
elif platform == "dar": # Help with MacOS is welcome!!
(path, swiHome) = SWIPl._findSwiplDar()
if path is None:
(path, swiHome) = SWIPl._findSwiplMacOSHome()
else:
# This should work for other UNIX
(path, swiHome) = SWIPl._findSwiplLin()
# This is a catch all raise
if path is None:
raise ImportError('Could not find the SWI-Prolog library in this '
'platform. If you are sure it is installed, please '
'open an issue.')
else:
return (path, swiHome)
@staticmethod
def _fixWindowsPath(dll):
"""
When the path to the DLL is not in Windows search path, Windows will not be
able to find other DLLs on the same directory, so we have to add it to the
path. This function takes care of it.
:parameters:
- `dll` (str) - File name of the DLL
"""
if sys.platform[:3] != 'win':
return # Nothing to do here
pathToDll = os.path.dirname(dll)
currentWindowsPath = os.getenv('PATH')
if pathToDll not in currentWindowsPath:
# We will prepend the path, to avoid conflicts between DLLs
newPath = pathToDll + ';' + currentWindowsPath
os.putenv('PATH', newPath)
def walk(path, name):
"""
This function is a 2-time recursive func,
that findin file in dirs
:parameters:
- `path` (str) - Directory path
- `name` (str) - Name of file, that we lookin for
:returns:
Path to the swipl so, path to the resource file
:returns type:
(str)
"""
back_path = path[:]
path = os.path.join(path, name)
if os.path.exists(path):
return path
else:
for dir_ in os.listdir(back_path):
path = os.path.join(back_path, dir_)
if os.path.isdir(path):
res_path = walk(path, name)
if res_path is not None:
return (res_path, back_path)
return None
def get_swi_ver():
import re
swi_ver = input(
'Please enter you SWI-Prolog version in format "X.Y.Z": ')
match = re.search(r'[0-9]\.[0-9]\.[0-9]')
if match is None:
raise InputError('Error, type normal version')
return swi_ver
_stringMap = {}
def str_to_bytes(string):
"""
Turns a string into a bytes if necessary (i.e. if it is not already a bytes
object or None).
If string is None, int or c_char_p it will be returned directly.
:param string: The string that shall be transformed
:type string: str, bytes or type(None)
:return: Transformed string
:rtype: c_char_p compatible object (bytes, c_char_p, int or None)
"""
if string is None or isinstance(string, (int, c_char_p)):
return string
if not isinstance(string, bytes):
if string not in _stringMap:
_stringMap[string] = string.encode()
string = _stringMap[string]
return string
def list_to_bytes_list(strList):
"""
This function turns an array of strings into a pointer array
with pointers pointing to the encodings of those strings
Possibly contained bytes are kept as they are.
:param strList: List of strings that shall be converted
:type strList: List of strings
:returns: Pointer array with pointers pointing to bytes
:raises: TypeError if strList is not list, set or tuple
"""
pList = c_char_p * len(strList)
# if strList is already a pointerarray or None, there is nothing to do
if isinstance(strList, (pList, type(None))):
return strList
if not isinstance(strList, (list, set, tuple)):
raise TypeError("strList must be list, set or tuple, not " +
str(type(strList)))
pList = pList()
for i, elem in enumerate(strList):
pList[i] = str_to_bytes(elem)
return pList
# create a decorator that turns the incoming strings into c_char_p compatible
# butes or pointer arrays
def check_strings(strings, arrays):
"""
Decorator function which can be used to automatically turn an incoming
string into a bytes object and an incoming list to a pointer array if
necessary.
:param strings: Indices of the arguments must be pointers to bytes
:type strings: List of integers
:param arrays: Indices of the arguments must be arrays of pointers to bytes
:type arrays: List of integers
"""
# if given a single element, turn it into a list
if isinstance(strings, int):
strings = [strings]
elif strings is None:
strings = []
# check if all entries are integers
for i,k in enumerate(strings):
if not isinstance(k, int):
raise TypeError(('Wrong type for index at {0} '+
'in strings. Must be int, not {1}!').format(i,k))
# if given a single element, turn it into a list
if isinstance(arrays, int):
arrays = [arrays]
elif arrays is None:
arrays = []
# check if all entries are integers
for i,k in enumerate(arrays):
if not isinstance(k, int):
raise TypeError(('Wrong type for index at {0} '+
'in arrays. Must be int, not {1}!').format(i,k))
# check if some index occurs in both
if set(strings).intersection(arrays):
raise ValueError('One or more elements occur in both arrays and ' +
' strings. One parameter cannot be both list and string!')
# create the checker that will check all arguments given by argsToCheck
# and turn them into the right datatype.
def checker(func):
def check_and_call(*args):
args = list(args)
for i in strings:
arg = args[i]
args[i] = str_to_bytes(arg)
for i in arrays:
arg = args[i]
args[i] = list_to_bytes_list(arg)
return func(*args)
return check_and_call
return checker
# PySwip constants
PYSWIP_MAXSTR = 1024
c_int_p = c_void_p
c_long_p = c_void_p
c_double_p = c_void_p
c_uint_p = c_void_p
# constants (from SWI-Prolog.h)
# PL_unify_term() arguments
PL_VARIABLE = 1 # nothing
PL_ATOM = 2 # const char
PL_INTEGER = 3 # int
PL_FLOAT = 4 # double
PL_STRING = 5 # const char *
PL_TERM = 6 #
# PL_unify_term()
PL_FUNCTOR = 10 # functor_t, arg ...
PL_LIST = 11 # length, arg ...
PL_CHARS = 12 # const char *
PL_POINTER = 13 # void *
# /* PlArg::PlArg(text, type) */
#define PL_CODE_LIST (14) /* [ascii...] */
#define PL_CHAR_LIST (15) /* [h,e,l,l,o] */
#define PL_BOOL (16) /* PL_set_feature() */
#define PL_FUNCTOR_CHARS (17) /* PL_unify_term() */
#define _PL_PREDICATE_INDICATOR (18) /* predicate_t (Procedure) */
#define PL_SHORT (19) /* short */
#define PL_INT (20) /* int */
#define PL_LONG (21) /* long */
#define PL_DOUBLE (22) /* double */
#define PL_NCHARS (23) /* unsigned, const char * */
#define PL_UTF8_CHARS (24) /* const char * */
#define PL_UTF8_STRING (25) /* const char * */
#define PL_INT64 (26) /* int64_t */
#define PL_NUTF8_CHARS (27) /* unsigned, const char * */
#define PL_NUTF8_CODES (29) /* unsigned, const char * */
#define PL_NUTF8_STRING (30) /* unsigned, const char * */
#define PL_NWCHARS (31) /* unsigned, const wchar_t * */
#define PL_NWCODES (32) /* unsigned, const wchar_t * */
#define PL_NWSTRING (33) /* unsigned, | |
Encoding.SIGNED]:
raise ValueError("{} is not a recognised encoding.".format(encoding))
self.encoding = encoding
if self.verbose:
print(f"\tHilbert encoding: {Encoding.BINARY}")
if self.verbose:
print(f"\tPreparing basis information", end="...")
if not make_basis:
raise NotImplementedError("_HilbertRestricted must have make_basis=True.")
self._idx_basis_vec = self.to_idx_tensor([2 ** n for n in range(N)])
self.basis_states, self.basis_idxs, self.restricted2full_basis_idxs = self.__prepare_basis()
if self.N <= 30:
# Faster look up, but requires more memory.
self.use_full2restricted_lut_arr = True
full2restricted_basis_idxs = -1 * np.ones(2 ** self.N)
full2restricted_basis_idxs[self.restricted2full_basis_idxs] = np.arange(len(self.restricted2full_basis_idxs))
self.full2restricted_basis_idxs = self.to_idx_tensor(full2restricted_basis_idxs)
else:
self.use_full2restricted_lut_arr = False
self.full2restricted_basis_idxs = defaultdict(lambda: -1,
np.stack([self.restricted2full_basis_idxs,
np.arange(len(self.restricted2full_basis_idxs))]).T)
if self.verbose:
print("done.")
self.subspaces = {(None, None): (self.basis_states.clone(), self.basis_idxs.clone())}
def __prepare_basis(self):
alpha_set_bits = np.array(list(combinations(np.arange(0, self.N, step=2), self.N_alpha)))
beta_set_bits = np.array(list(combinations(np.arange(1, self.N, step=2), self.N_beta)))
# alphabeta_set_bits = np.array(list(product(alpha_set_bits, beta_set_bits))).reshape(-1, self.N_up)
alphabeta_set_bits = np.array([np.concatenate(x) for x in product(alpha_set_bits, beta_set_bits)])
restricted_basis = np.zeros((len(alphabeta_set_bits), self.N), dtype=self._state_np_dtype)
restricted_basis[
np.broadcast_to(np.arange(len(alphabeta_set_bits))[:, None], alphabeta_set_bits.shape),
alphabeta_set_bits
] = 1
alphabeta_set_bits = (2 ** alphabeta_set_bits).sum(-1)
restricted_hilbert_idxs = np.arange(len(restricted_basis))
if self.encoding == Encoding.SIGNED:
restricted_basis = 2 * restricted_basis - 1
return (self.to_state_tensor(restricted_basis),
self.to_idx_tensor(restricted_hilbert_idxs),
self.to_idx_tensor(alphabeta_set_bits))
def __check_config(self, N_up, N_alpha, N_beta, N_occ, N_exc_max):
if ((N_up is not None)
and (N_alpha is not None)
and (N_beta is not None)):
assert N_up == N_alpha + N_beta, f"N_up ({N_up}) must be the sum of N_alpha ({N_alpha}) and N_beta ({N_beta})"
elif ((N_alpha is not None) and (N_beta is not None)):
N_up = N_alpha + N_beta
elif ((N_up is not None) and (N_alpha is not None)):
N_beta = N_up - N_alpha
elif ((N_up is not None) and (N_beta is not None)):
N_alpha = N_up - N_beta
elif (N_alpha is not None):
N_up = N_alpha
N_beta = 0
elif (N_beta is not None):
N_up = N_beta
N_alpha = 0
elif (N_up is not None):
assert N_up <= self.N, f"N_up ({N_up}) must be <= N ({self.N})"
if (N_occ is not None):
if N_occ == self.N_occ:
N_occ = None
else:
N_occ -= self.N_occ
if (N_exc_max is not None):
assert N_exc_max <= N_up, f"Maximum number of excitations (N_exc) can not exceed total number of 1's (N_up)."
return N_up, N_alpha, N_beta, N_occ, N_exc_max
def get_subspace(self, N_up=None, N_alpha=None, N_beta=None,
N_occ=None, N_exc_max=None,
ret_states=True, ret_idxs=False, use_restricted_idxs=False):
if N_up is None:
N_up = self.N_up
if N_alpha is None:
N_alpha = self.N_alpha
if N_beta is None:
N_beta = self.N_beta
N_up, N_alpha, N_beta, N_occ, N_exc_max = self.__check_config(N_up,
N_alpha,
N_beta,
N_occ,
N_exc_max)
key = (<KEY>
if key in self.subspaces:
space_states, space_idxs = self.subspaces[key]
else:
mask = None
space_states, space_idxs = self.basis_states.clone(), self.basis_idxs.clone()
if N_occ is not None:
mask_occ = (space_states[:, :N_occ] > 0).sum(1) == N_occ
if mask is not None:
mask = (mask & mask_occ)
else:
mask = mask_occ
if N_exc_max is not None:
mask_exc = (space_states[:, self.N_up:] > 0).sum(1) <= N_exc_max
if mask is not None:
mask = (mask & mask_exc)
else:
mask = mask_exc
if mask is not None:
space_states = space_states[mask]
space_states = space_states[mask]
self.subspaces[key] = (space_states, space_idxs)
if not use_restricted_idxs:
space_idxs = self.restricted2full_basis_idxs[space_idxs.long()]
if ret_states and ret_idxs:
return space_states, space_idxs
elif ret_states:
return space_states
elif ret_idxs:
return space_idxs
def get_basis(self, ret_states=True, ret_idxs=False, use_restricted_idxs=False):
basis_states = self.basis_states.clone()
basis_idxs = self.basis_idxs.clone()
if not use_restricted_idxs:
basis_idxs = self.restricted2full_basis_idxs[basis_idxs.long()]
if ret_states and ret_idxs:
return self.to_state_tensor(basis_states), self.to_idx_tensor(basis_idxs)
elif ret_states:
return self.to_state_tensor(basis_states)
elif ret_idxs:
return self.to_idx_tensor(basis_idxs)
def state2idx(self, state, use_restricted_idxs=False):
if type(state) is np.ndarray:
state = torch.from_numpy(state)
state_clamped = state.clamp_min(0)
idxs = self.to_idx_tensor((state_clamped * self._idx_basis_vec).sum(dim=-1, keepdim=True))
if use_restricted_idxs:
# idxs = self.full2restricted_basis_idxs[idxs]
idxs = self.full2restricted_idx(idxs)
return self.to_idx_tensor(idxs)
def idx2state(self, idx, use_restricted_idxs=False):
if not torch.is_tensor(idx):
idx = torch.LongTensor(idx)
if not use_restricted_idxs:
# idx = self.full2restricted_basis_idxs[idx]
idx = self.full2restricted_idx(idx)
state = self.basis_states.index_select(0, idx.long())
return self.to_state_tensor(state)
def restricted2full_idx(self, idx):
np_out = False
if not torch.is_tensor(idx):
idx = torch.LongTensor(idx)
np_out = True
idx = self.restricted2full_basis_idxs[idx.long()]
if np_out:
idx = self.to_idx_array(idx)
else:
idx = self.to_idx_tensor(idx)
return idx
def full2restricted_idx(self, idx):
if self.use_full2restricted_lut_arr:
return self.__full2restricted_idx_arr_lut(idx)
else:
return self.__full2restricted_idx_dic_lut(idx)
def __full2restricted_idx_arr_lut(self, idx):
np_out = False
if not torch.is_tensor(idx):
idx = torch.LongTensor(idx)
np_out = True
idx = self.full2restricted_basis_idxs[idx.long()]
if np_out:
idx = self.to_idx_array(idx)
else:
idx = self.to_idx_tensor(idx)
return idx
def __full2restricted_idx_dic_lut(self, idx):
if torch.is_tensor(idx):
idx = idx.numpy()
np_out = False
else:
np_out = True
if len(idx) > 1:
idx = np.fromiter( itemgetter(*idx.squeeze())(self.full2restricted_basis_idxs), self._idx_np_dtype, count=len(idx) )
else:
idx = np.array([self.full2restricted_basis_idxs[idx[0]]])
if np_out:
idx = self.to_idx_array(idx)
else:
idx = self.to_idx_tensor(idx)
return idx
class _HilbertPartiallyRestricted(_HilbertBase):
def __init__(self, N, N_alpha, N_beta,
encoding=Encoding.BINARY, make_basis=True, verbose=False):
self.N = N
self.N_alpha = self.__to_arr(N_alpha)
self.N_beta = self.__to_arr(N_beta)
self.N_up = self.__to_arr(self.N_alpha + self.N_beta)
self.N_occ = 0
self.__check_config(self.N_up, self.N_alpha, self.N_beta, None, None)
self.size = sum( int(comb(math.ceil(self.N / 2), N_alpha) * comb(math.floor(self.N / 2), N_beta))
for N_alpha, N_beta in zip(self.N_alpha, self.N_beta) )
self.verbose = verbose
if self.verbose:
print("preparing _HilbertRestricted...")
# min_bits = math.ceil(math.log(self.size, 2))
self._state_torch_dtype, self._state_np_dtype = torch.int8, np.int8
# if N < 8:
# self._idx_torch_dtype, self._idx_np_dtype = torch.int8, np.int8
if N < 16:
self._idx_torch_dtype, self._idx_np_dtype = torch.int16, np.int16
elif N < 30:
self._idx_torch_dtype, self._idx_np_dtype = torch.int32, np.int32
else:
self._idx_torch_dtype, self._idx_np_dtype = torch.int64, np.int64
if encoding not in [Encoding.BINARY, Encoding.SIGNED]:
raise ValueError("{} is not a recognised encoding.".format(encoding))
self.encoding = encoding
if self.verbose:
print(f"\tHilbert encoding: {Encoding.BINARY}")
if self.verbose:
print(f"\tPreparing basis information", end="...")
if not make_basis:
raise NotImplementedError("_HilbertRestricted must have make_basis=True.")
self._idx_basis_vec = self.to_idx_tensor([2 ** n for n in range(N)])
self.basis_states, self.basis_idxs, self.restricted2full_basis_idxs = self.__prepare_basis()
if self.N <= 30:
# Faster look up, but requires more memory.
self.use_full2restricted_lut_arr = True
full2restricted_basis_idxs = -1 * np.ones(2 ** self.N)
full2restricted_basis_idxs[self.restricted2full_basis_idxs] = np.arange(len(self.restricted2full_basis_idxs))
self.full2restricted_basis_idxs = self.to_idx_tensor(full2restricted_basis_idxs)
else:
self.use_full2restricted_lut_arr = False
self.full2restricted_basis_idxs = defaultdict(lambda: -1,
np.stack([self.restricted2full_basis_idxs,
np.arange(len(self.restricted2full_basis_idxs))]).T)
if self.verbose:
print("done.")
self.subspaces = {(None, None): (self.basis_states.clone(), self.basis_idxs.clone())}
def __prepare_basis(self):
num_states = 0
restricted_basis, restricted_hilbert_idxs, alphabeta_set_bits = [], [], []
for n_alpha, n_beta in zip(self.N_alpha, self.N_beta):
alpha_set_bits = np.array(list(combinations(np.arange(0, self.N, step=2), n_alpha)))
beta_set_bits = np.array(list(combinations(np.arange(1, self.N, step=2), n_beta)))
# alphabeta_set_bits = np.array(list(product(alpha_set_bits, beta_set_bits))).reshape(-1, self.N_up)
_alphabeta_set_bits = np.array([np.concatenate(x) for x in product(alpha_set_bits, beta_set_bits)])
_restricted_basis = np.zeros((len(_alphabeta_set_bits), self.N), dtype=self._state_np_dtype)
_restricted_basis[
np.broadcast_to(np.arange(len(_alphabeta_set_bits))[:, None], _alphabeta_set_bits.shape),
_alphabeta_set_bits
] = 1
_alphabeta_set_bits = (2 ** _alphabeta_set_bits).sum(-1)
_restricted_hilbert_idxs = num_states + np.arange(len(_restricted_basis))
num_states += len(_restricted_basis)
if self.encoding == Encoding.SIGNED:
_restricted_basis = 2 * _restricted_basis - 1
restricted_basis.append(_restricted_basis)
restricted_hilbert_idxs.append(_restricted_hilbert_idxs)
alphabeta_set_bits.append(_alphabeta_set_bits)
restricted_basis = np.concatenate(restricted_basis)
restricted_hilbert_idxs = np.concatenate(restricted_hilbert_idxs)
alphabeta_set_bits = np.concatenate(alphabeta_set_bits)
return (self.to_state_tensor(restricted_basis),
self.to_idx_tensor(restricted_hilbert_idxs),
self.to_idx_tensor(alphabeta_set_bits))
def __to_arr(self, N):
if N is None:
return None
if not isinstance(N, Iterable):
N = [N]
if not isinstance(N, np.ndarray):
N = np.array(N)
return N
def __check_config(self, N_up, N_alpha, N_beta, N_occ, N_exc_max):
N_up, N_alpha, N_beta, N_occ, N_exc_max = (self.__to_arr(N_up),
self.__to_arr(N_alpha),
self.__to_arr(N_beta),
self.__to_arr(N_occ),
self.__to_arr(N_exc_max))
if (N_up is None) and (N_alpha is not None) and (N_beta is not None):
N_up = N_alpha + N_beta
assert all(N_up == N_alpha + N_beta), f"N_up ({N_up}) must be the sum of N_alpha ({N_alpha}) and N_beta ({N_beta})"
if ((N_up is not None) and (N_alpha is not None)):
N_beta = N_up - N_alpha
elif ((N_up is not None) and (N_beta is not None)):
N_alpha = N_up - N_beta
elif (N_alpha is not None):
N_up = N_alpha
N_beta = 0
elif (N_beta is not None):
N_up = N_beta
N_alpha = 0
elif (N_up is not None):
assert all(N_up <= self.N), f"N_up ({N_up}) must be <= N ({self.N})"
if (N_occ is not None):
if N_occ == self.N_occ:
N_occ = None
else:
N_occ -= self.N_occ
if (N_exc_max is not None):
assert N_exc_max <= N_up, f"Maximum number of excitations (N_exc) can not exceed total number of 1's (N_up)."
return N_up, N_alpha, N_beta, N_occ, N_exc_max
def get_subspace(self, N_up=None, N_alpha=None, N_beta=None,
N_occ=None, N_exc_max=None,
ret_states=True, ret_idxs=False, use_restricted_idxs=False):
if N_occ is not None or N_exc_max is not None:
# legacy arguments not needed in the final experiments.
raise NotImplementedError()
key = (N_occ, N_exc_max)
if key in self.subspaces:
space_states, space_idxs = self.subspaces[key]
else:
space_states, space_idxs = self.basis_states.clone(), self.basis_idxs.clone()
self.subspaces[key] = (space_states, space_idxs)
if not use_restricted_idxs:
space_idxs = self.restricted2full_basis_idxs[space_idxs.long()]
if ret_states and ret_idxs:
return space_states, space_idxs
elif ret_states:
return space_states
elif ret_idxs:
return space_idxs
def get_basis(self, ret_states=True, ret_idxs=False, use_restricted_idxs=False):
basis_states = self.basis_states.clone()
basis_idxs = self.basis_idxs.clone()
if not use_restricted_idxs:
basis_idxs = self.restricted2full_basis_idxs[basis_idxs.long()]
if ret_states and ret_idxs:
return self.to_state_tensor(basis_states), self.to_idx_tensor(basis_idxs)
elif ret_states:
return self.to_state_tensor(basis_states)
elif ret_idxs:
return self.to_idx_tensor(basis_idxs)
def state2idx(self, state, use_restricted_idxs=False):
if type(state) is np.ndarray:
state = torch.from_numpy(state)
state_clamped = state.clamp_min(0)
idxs | |
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 18:23:35 2018
This script contains the core for the linear Kramer-Kronig analysis
@author: <NAME> (<EMAIL> / <EMAIL>)
"""
import numpy as np
from lmfit import minimize, Minimizer, Parameters, Parameter, report_fit
### Simulation Functions
##
#
def KK_RC2(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
)
def KK_RC3(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
)
def KK_RC4(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
)
def KK_RC5(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
)
def KK_RC6(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
)
def KK_RC7(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
)
def KK_RC8(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
)
def KK_RC9(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
)
def KK_RC10(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
)
def KK_RC11(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
)
def KK_RC12(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
)
def KK_RC13(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / (1 + w * 1j * t_values[12]))
)
def KK_RC14(w, Rs, R_values, t_values):
"""
Kramers-Kronig Function: -RC-
<NAME> (<EMAIL> / <EMAIL>)
"""
return (
Rs
+ (R_values[0] / (1 + w * 1j * t_values[0]))
+ (R_values[1] / (1 + w * 1j * t_values[1]))
+ (R_values[2] / (1 + w * 1j * t_values[2]))
+ (R_values[3] / (1 + w * 1j * t_values[3]))
+ (R_values[4] / (1 + w * 1j * t_values[4]))
+ (R_values[5] / (1 + w * 1j * t_values[5]))
+ (R_values[6] / (1 + w * 1j * t_values[6]))
+ (R_values[7] / (1 + w * 1j * t_values[7]))
+ (R_values[8] / (1 + w * 1j * t_values[8]))
+ (R_values[9] / (1 + w * 1j * t_values[9]))
+ (R_values[10] / (1 + w * 1j * t_values[10]))
+ (R_values[11] / (1 + w * 1j * t_values[11]))
+ (R_values[12] / | |
<reponame>dalessan/qm-bootloader
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE INTEL CORPORATION OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Quark Microcontroller Firmware Update Module
This module provides classes to create and manipulate firmware images for Quark
Microcontrollers."""
from __future__ import print_function, division, absolute_import
import re
import struct
import hashlib
import hmac
_ENDIAN = "<" # Defines the endian for struct packing. ('<'=little, '>'=big)
# The possible types of extended header.
_QFU_EXT_HDR_NONE = 0
_QFU_EXT_HDR_SHA256 = 1
_QFU_EXT_HDR_HMAC256 = 2
class QFUException(Exception):
"""QFU Exception."""
def __init__(self, message):
super(QFUException, self).__init__(message)
class QFUDefineParser(object):
"""A simple parser for C header files to extract #define of integers
Note:
We only parse simple #define macros like::
#define DFU_ID_VENDOR (0x1200)"""
defines = {}
VENDOR_ID = "QFU_VENDOR_ID"
PRODUCT_ID_DFU = "QFU_DFU_PRODUCT_ID"
PRODUCT_ID_APP = "QFU_APP_PRODUCT_ID"
VERSION = "QFU_VERSION"
BLOCK_SIZE = "QFU_BLOCK_SIZE"
SVN = "QFU_SVN"
# Compiled regular expressions for `#define A int` or `#define A (int)`
_re_int_line = re.compile(
r"^\s*\#define\s+(\S+)\s+\(?(\d+)\)?")
# Compiled regular expressions for `#define A hex` or `#define A (hex)`
_re_hex_line = re.compile(
r"^\s*\#define\s+(\S+)\s+\(?0x([0-9,a-f,A-F]+)\)?")
def _check_line(self, line):
"""Search for valid defines in a line."""
match = self._re_hex_line.match(line)
if match:
grp = match.groups()
self.defines[grp[0]] = int(grp[1], 16)
return
match = self._re_int_line.match(line)
if match:
grp = match.groups()
self.defines[grp[0]] = int(grp[1])
return
def __init__(self, open_file):
"""Opens and parses a C header like file for integer defines."""
for line in open_file.readlines():
self._check_line(line)
class QFUImage(object):
"""Creates a QFU compatible file from a binary file."""
def __init__(self):
self.ext_headers = []
def make(self, header, image_data, key=None, add_sha256=False):
"""Assembles the QFU Header and the binary data.
Args:
header (QFUHeader): Header containing all relevant information to
create the image.
image_data (string): Input file data.
add_sha256 (Bool): Add a sha256 hash to the header.
Returns:
The newly constructed binary data."""
ext_header = QFUExtHeaderNone()
if add_sha256:
ext_header = QFUExtHeaderSHA256(image_data)
elif key:
ext_header = QFUExtHeaderHMAC256(image_data, header, key)
data_blocks = ((len(image_data) - 1) // header.block_size) + 1
header_blocks = ((header.SIZE + ext_header.size() - 1)
// header.block_size) + 1
header.num_blocks = data_blocks + header_blocks
header.add_extended_header(ext_header)
# Set QFU header and DFU suffix.
content = header.packed_qfu_header
content += image_data
return content
class QFUExtHeader(object):
"""Generic Extended header class."""
def __init__(self, ext_hdr_id):
self.content = ""
self.hdr_id = ext_hdr_id
def size(self):
"""Return the size of the extended header, which is a minimum of 4"""
return 4
def compute(self):
pass
class QFUExtHeaderNone(QFUExtHeader):
"""None-Extended Header class. This header contains of empty 32 bits."""
def __init__(self):
self._struct = struct.Struct("%sHH" % _ENDIAN)
super(QFUExtHeaderNone, self).__init__(_QFU_EXT_HDR_NONE)
def compute(self):
"""Compute extended header content."""
self.content = self._struct.pack(self.hdr_id, 0)
def size(self):
"""Return the size of the extended header (4 bytes)"""
return super(QFUExtHeaderNone, self).size()
class QFUExtHeaderSHA256(QFUExtHeader):
"""SHA256 extended header class.
Params:
data (`string`): Content of the binary file."""
def __init__(self, file_content):
self.data = file_content
self._struct = struct.Struct("%sHH32s" % _ENDIAN)
super(QFUExtHeaderSHA256, self).__init__(_QFU_EXT_HDR_SHA256)
def compute(self):
"""Compute extended header content."""
if not self.data:
raise QFUException("No data defined for SHA256 calculation.")
hasher = hashlib.sha256()
hasher.update(self.data)
self.content = self._struct.pack(self.hdr_id, 0, hasher.digest())
def size(self):
"""Return the size of the extended hdr (4bytes + 32bytes = 36bytes)"""
return 32 + super(QFUExtHeaderSHA256, self).size()
class QFUExtHeaderHMAC256(QFUExtHeader):
"""HMAC256 extended header class."""
def __init__(self, data, header, key):
self.data = data
self.key = key
self.svn = header.svn
self.header = header
self.data_blocks = ((len(data) - 1) // header.block_size) + 1
super(QFUExtHeaderHMAC256, self).__init__(_QFU_EXT_HDR_HMAC256)
def compute_blocks(self, block_size, block_cnt):
"""Compute the sha checksum for each block.
Args:
block_size (`int`): Size of each block.
block_cnt (`int`): Number of blocks."""
sha_blocks = ""
block_struct = struct.Struct("%s32s" % _ENDIAN)
# Caculate hash for all blocks
nr_blocks = len(self.data) // block_size
start = 0
end = block_size
for i in range(0, nr_blocks):
hasher = hashlib.sha256()
hash_data = self.data[start:end]
hasher.update(hash_data)
sha_blocks += block_struct.pack(hasher.digest())
start += block_size
end += block_size
# Handle the last block if present.'
if(start < len(self.data)):
hasher = hashlib.sha256()
hash_data = self.data[start:len(self.data)]
hasher.update(hash_data)
sha_blocks += block_struct.pack(hasher.digest())
return sha_blocks
def compute(self):
"""Compute extended header content."""
header_struct = struct.Struct("%sHHI" % _ENDIAN)
if not self.data:
raise QFUException("No data defined for SHA256 calculation.")
if not self.key:
raise QFUException("No key defined for HMAC256 calculation.")
# if not self.svn:
# raise QFUException("No Security version number defined.")
self.content = header_struct.pack(self.hdr_id, 0, self.svn)
self.content += self.compute_blocks(self.header.block_size,
self.header.num_blocks)
# Sign the header
self.content += hmac.new(bytes(self.key),
(bytes(self.header.get_base_header()) +
bytes(self.content)),
digestmod = hashlib.sha256).digest()
def size(self):
"""Return the size of the extended header 4 bytes as usal + 4 bytes SVN
+ sha256 for each block + final hmac256."""
return (4 + (self.data_blocks * 32) + 32 +
super(QFUExtHeaderHMAC256, self).size())
class QFUHeader(object):
"""The class holding QFU Header and DFU Suffix information
Attributes:
id_vendor (int): The DFU/USB vendor id.
id_product (int): The DFU/USB product id.
id_product_dfu (int): The DFU specific product id.
partition_id (int): Target partition number.
version (int): Firmware version of this image.
block_size (int): The DFU block size.
num_blocks (int): The number of blocks in this image.
ext_headers(`list`): List of extended headers.
"""
SIZE = 20
id_vendor = 0
id_product = 0
id_product_dfu = 0
partition_id = 0
version = 0
block_size = None
num_blocks = 0
ext_headers = []
svn = 0
# Different structure formats. _ENDIAN defines little or big-endian.
# H stands for uint16, I for uint32 and c for a single character.
_header_struct = struct.Struct("%sHHHHIHH" % _ENDIAN)
def __init__(self):
self.ext_headers = []
pass
def add_extended_header(self, header):
"""Add an extended header.
Args:
header (`QFUExtHeader`): extended header."""
self.ext_headers.insert(-1, header)
def print_info(self, prefix=""):
"""Prints verbose QFU Header and information."""
inset = " " * len(prefix)
print("%sQFU-Header content:" % prefix)
print("%s Partition: %d" % (inset, self.partition_id))
print("%s VID: 0x%04x" % (inset, self.id_vendor))
print("%s PID: 0x%04x" % (inset, self.id_product))
print("%s DFU PID: 0x%04x" % (inset, self.id_product_dfu))
print("%s Version: %d" % (inset, self.version))
print("%s Block Size: %d" % (inset, self.block_size))
print("%s Blocks: %d" % (inset, self.num_blocks))
def overwrite_config_parameters(self, args):
"""Read arguments from the command line and overwrites the config
parameters
Args:
args: Command-line arguments.
"""
if args.vid is not None:
self.id_vendor = args.vid
if args.app_pid is not None:
self.id_product = args.app_pid
if args.app_version is not None:
self.version = args.app_version
if args.block_size is not None:
self.block_size = args.block_size
if args.svn is not None:
self.svn = args.svn
if args.dfu_pid is not None:
self.id_product_dfu = args.dfu_pid
if self.block_size is None:
if args.soc == "quark_se":
self.block_size = 4096
else:
self.block_size = 2048
def set_from_file(self, open_file):
"""Read configuration file (C-header format) and update header
information.
Args:
open_file (file): An open file with read permission. The file
needs to contain C-header style defines."""
conf = QFUDefineParser(open_file)
# Map configuration to class variables.
if QFUDefineParser.VENDOR_ID in conf.defines:
self.id_vendor = conf.defines[QFUDefineParser.VENDOR_ID]
if QFUDefineParser.PRODUCT_ID_APP in conf.defines:
self.id_product = conf.defines[QFUDefineParser.PRODUCT_ID_APP]
if QFUDefineParser.PRODUCT_ID_DFU in conf.defines:
self.id_product_dfu = conf.defines[QFUDefineParser.PRODUCT_ID_DFU]
if QFUDefineParser.VERSION in conf.defines:
self.version = conf.defines[QFUDefineParser.VERSION]
if QFUDefineParser.BLOCK_SIZE in conf.defines:
self.block_size = conf.defines[QFUDefineParser.BLOCK_SIZE]
if QFUDefineParser.SVN in conf.defines:
self.svn = conf.defines[QFUDefineParser.SVN]
def set_from_data(self, data):
"""Update header information from binary data string.
| |
<reponame>amyhxqin/heartbit
# -*- coding: utf-8 -*-
"""Utilities for preprocessing sequence data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import random
from six.moves import range
from . import get_keras_submodule
keras_utils = get_keras_submodule('utils')
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the end.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
lengths = []
for x in sequences:
if not hasattr(x, '__len__'):
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
lengths.append(len(x))
num_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((num_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
def make_sampling_table(size, sampling_factor=1e-5):
"""Generates a word rank-based probabilistic sampling table.
Used for generating the `sampling_table` argument for `skipgrams`.
`sampling_table[i]` is the probability of sampling
the word i-th most common word in a dataset
(more common words should be sampled less frequently, for balance).
The sampling probabilities are generated according
to the sampling distribution used in word2vec:
```
p(word) = (min(1, sqrt(word_frequency / sampling_factor) /
(word_frequency / sampling_factor)))
```
We assume that the word frequencies follow Zipf's law (s=1) to derive
a numerical approximation of frequency(rank):
`frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))`
where `gamma` is the Euler-Mascheroni constant.
# Arguments
size: Int, number of possible words to sample.
sampling_factor: The sampling factor in the word2vec formula.
# Returns
A 1D Numpy array of length `size` where the ith entry
is the probability that a word of rank i should be sampled.
"""
gamma = 0.577
rank = np.arange(size)
rank[0] = 1
inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank)
f = sampling_factor * inv_fq
return np.minimum(1., f / np.sqrt(f))
def skipgrams(sequence, vocabulary_size,
window_size=4, negative_samples=1., shuffle=True,
categorical=False, sampling_table=None, seed=None):
"""Generates skipgram word pairs.
This function transforms a sequence of word indexes (list of integers)
into tuples of words of the form:
- (word, word in the same window), with label 1 (positive samples).
- (word, random word from the vocabulary), with label 0 (negative samples).
Read more about Skipgram in this gnomic paper by Mikolov et al.:
[Efficient Estimation of Word Representations in
Vector Space](http://arxiv.org/pdf/1301.3781v3.pdf)
# Arguments
sequence: A word sequence (sentence), encoded as a list
of word indices (integers). If using a `sampling_table`,
word indices are expected to match the rank
of the words in a reference dataset (e.g. 10 would encode
the 10-th most frequently occurring token).
Note that index 0 is expected to be a non-word and will be skipped.
vocabulary_size: Int, maximum possible word index + 1
window_size: Int, size of sampling windows (technically half-window).
The window of a word `w_i` will be
`[i - window_size, i + window_size+1]`.
negative_samples: Float >= 0. 0 for no negative (i.e. random) samples.
1 for same number as positive samples.
shuffle: Whether to shuffle the word couples before returning them.
categorical: bool. if False, labels will be
integers (eg. `[0, 1, 1 .. ]`),
if `True`, labels will be categorical, e.g.
`[[1,0],[0,1],[0,1] .. ]`.
sampling_table: 1D array of size `vocabulary_size` where the entry i
encodes the probability to sample a word of rank i.
seed: Random seed.
# Returns
couples, labels: where `couples` are int pairs and
`labels` are either 0 or 1.
# Note
By convention, index 0 in the vocabulary is
a non-word and will be skipped.
"""
couples = []
labels = []
for i, wi in enumerate(sequence):
if not wi:
continue
if sampling_table is not None:
if sampling_table[wi] < random.random():
continue
window_start = max(0, i - window_size)
window_end = min(len(sequence), i + window_size + 1)
for j in range(window_start, window_end):
if j != i:
wj = sequence[j]
if not wj:
continue
couples.append([wi, wj])
if categorical:
labels.append([0, 1])
else:
labels.append(1)
if negative_samples > 0:
num_negative_samples = int(len(labels) * negative_samples)
words = [c[0] for c in couples]
random.shuffle(words)
couples += [[words[i % len(words)],
random.randint(1, vocabulary_size - 1)]
for i in range(num_negative_samples)]
if categorical:
labels += [[1, 0]] * num_negative_samples
else:
labels += [0] * num_negative_samples
if shuffle:
if seed is None:
seed = random.randint(0, 10e6)
random.seed(seed)
random.shuffle(couples)
random.seed(seed)
random.shuffle(labels)
return couples, labels
def _remove_long_seq(maxlen, seq, label):
"""Removes sequences that exceed the maximum length.
# Arguments
maxlen: Int, maximum length of the output sequences.
seq: List of lists, where each sublist is a sequence.
label: List where each element is an integer.
# Returns
new_seq, new_label: shortened lists for `seq` and `label`.
"""
new_seq, new_label = [], []
for x, y in zip(seq, label):
if len(x) < maxlen:
new_seq.append(x)
new_label.append(y)
return new_seq, new_label
class TimeseriesGenerator(keras_utils.Sequence):
"""Utility class for generating batches of temporal data.
This class takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as
stride, length of history, etc., to produce batches for
training/validation.
# Arguments
data: Indexable generator (such as list or Numpy array)
containing consecutive data points (timesteps).
The data should be at 2D, and axis 0 is expected
to be the time dimension.
targets: Targets corresponding to timesteps in `data`.
It should have same length as `data`.
length: Length of the output sequences (in number of timesteps).
sampling_rate: Period between successive individual timesteps
within sequences. For rate `r`, timesteps
`data[i]`, `data[i-r]`, ... `data[i - length]`
are used for create a sample sequence.
stride: Period between successive output sequences.
For stride `s`, consecutive output samples would
be centered around `data[i]`, `data[i+s]`, `data[i+2*s]`, etc.
start_index: Data points earlier than `start_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
end_index: Data points later than `end_index` will not be used
in the output sequences. This is useful to reserve part of the
data for test or validation.
shuffle: Whether to shuffle output samples,
or instead draw them in chronological order.
reverse: Boolean: if `true`, timesteps in each output sample will be
in reverse chronological order.
batch_size: Number of timeseries samples in each batch
(except | |
y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MMeshSmoothOptions(object):
"""
Options for control of smooth mesh generation.
"""
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
boundaryRule = None
divisions = None
keepBorderEdge = None
keepHardEdge = None
propEdgeHardness = None
smoothUVs = None
smoothness = None
__new__ = None
kCreaseAll = 1
kCreaseEdge = 2
kInvalid = -1
kLast = 3
kLegacy = 0
class MPointArray(object):
"""
Array of MPoint values.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __contains__(*args, **kwargs):
"""
x.__contains__(y) <==> y in x
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __delslice__(*args, **kwargs):
"""
x.__delslice__(i, j) <==> del x[i:j]
Use of negative indices is not supported.
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __getslice__(*args, **kwargs):
"""
x.__getslice__(i, j) <==> x[i:j]
Use of negative indices is not supported.
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(n) <==> x*n
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(n) <==> n*x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __setslice__(*args, **kwargs):
"""
x.__setslice__(i, j, y) <==> x[i:j]=y
Use of negative indices is not supported.
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def append(*args, **kwargs):
"""
Add a value to the end of the array.
"""
pass
def clear(*args, **kwargs):
"""
Remove all elements from the array.
"""
pass
def copy(*args, **kwargs):
"""
Replace the array contents with that of another or of a compatible Python sequence.
"""
pass
def insert(*args, **kwargs):
"""
Insert a new value into the array at the given index.
"""
pass
def remove(*args, **kwargs):
"""
Remove an element from the array.
"""
pass
def setLength(*args, **kwargs):
"""
Grow or shrink the array to contain a specific number of elements.
"""
pass
sizeIncrement = None
__new__ = None
class MTypeId(object):
"""
Stores a Maya object type identifier.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def id(*args, **kwargs):
"""
Returns the type id as a long.
"""
pass
__new__ = None
class MFloatPoint(object):
"""
3D point with single-precision coordinates.
"""
def __add__(*args, **kwargs):
"""
x.__add__(y) <==> x+y
"""
pass
def __delitem__(*args, **kwargs):
"""
x.__delitem__(y) <==> del x[y]
"""
pass
def __div__(*args, **kwargs):
"""
x.__div__(y) <==> x/y
"""
pass
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __getitem__(*args, **kwargs):
"""
x.__getitem__(y) <==> x[y]
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __iadd__(*args, **kwargs):
"""
x.__iadd__(y) <==> x+=y
"""
pass
def __imul__(*args, **kwargs):
"""
x.__imul__(y) <==> x*=y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __isub__(*args, **kwargs):
"""
x.__isub__(y) <==> x-=y
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __len__(*args, **kwargs):
"""
x.__len__() <==> len(x)
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __mul__(*args, **kwargs):
"""
x.__mul__(y) <==> x*y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __radd__(*args, **kwargs):
"""
x.__radd__(y) <==> y+x
"""
pass
def __rdiv__(*args, **kwargs):
"""
x.__rdiv__(y) <==> y/x
"""
pass
def __repr__(*args, **kwargs):
"""
x.__repr__() <==> repr(x)
"""
pass
def __rmul__(*args, **kwargs):
"""
x.__rmul__(y) <==> y*x
"""
pass
def __rsub__(*args, **kwargs):
"""
x.__rsub__(y) <==> y-x
"""
pass
def __setitem__(*args, **kwargs):
"""
x.__setitem__(i, y) <==> x[i]=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def __sub__(*args, **kwargs):
"""
x.__sub__(y) <==> x-y
"""
pass
def cartesianize(*args, **kwargs):
"""
Convert point to cartesian form.
"""
pass
def distanceTo(*args, **kwargs):
"""
Return distance between this point and another.
"""
pass
def homogenize(*args, **kwargs):
"""
Convert point to homogenous form.
"""
pass
def isEquivalent(*args, **kwargs):
"""
Test for equivalence of two points, within a tolerance.
"""
pass
def rationalize(*args, **kwargs):
"""
Convert point to rational form.
"""
pass
w = None
x = None
y = None
z = None
__new__ = None
kOrigin = None
kTolerance = 1e-10
class MPlug(object):
"""
Create and access dependency node plugs.
"""
def __eq__(*args, **kwargs):
"""
x.__eq__(y) <==> x==y
"""
pass
def __ge__(*args, **kwargs):
"""
x.__ge__(y) <==> x>=y
"""
pass
def __gt__(*args, **kwargs):
"""
x.__gt__(y) <==> x>y
"""
pass
def __init__(*args, **kwargs):
"""
x.__init__(...) initializes x; see help(type(x)) for signature
"""
pass
def __le__(*args, **kwargs):
"""
x.__le__(y) <==> x<=y
"""
pass
def __lt__(*args, **kwargs):
"""
x.__lt__(y) <==> x<y
"""
pass
def __ne__(*args, **kwargs):
"""
x.__ne__(y) <==> x!=y
"""
pass
def __str__(*args, **kwargs):
"""
x.__str__() <==> str(x)
"""
pass
def array(*args, **kwargs):
"""
Returns a plug for the array of plugs of which this plug is an element.
"""
pass
def | |
keep = cepca_fft(OGD, OGB, xcoords, ycoords,
zcoords, cm, debug=0)
if debug==1:
print("Calculated Jbhelper and sbsbt with FFT.\n"
+ "Current Runtime: " + str(time.time() - starttime))
#set the values where we'd divide by zero to zero
sbsbt[:,:,rmv[0],rmv[1],rmv[2]]=0
#sbsbt[:,:,keep[0],keep[1],keep[2]]/=Vb[keep[0],keep[1],keep[2]]
# the above would work if I use // instead of /
#and for the nonzero values, calculate 1/Vb * sbsbt
sbsbt[:,:,keep[0],keep[1],keep[2]] = np.divide(sbsbt[:,:,keep[0],keep[1],
keep[2]],
Vb[keep[0],keep[1],keep[2]])
#calculate Jb = Jbhelper - 1/Vb * sbsbt w/o using
#np.reshape and dividing by zero (thank you Virginie for the trick)
Jb -= sbsbt
# Jb is 3x3xRest and for np.linalg.eigh we need a structure
# of Restx3x3
Jb = np.transpose(Jb, (2, 3, 4, 0, 1))
#calculate eigenvalues, where eigvals contains the eigvalues in
#ascending order.
eigvals, eigvects = np.linalg.eigh(Jb)
if debug==1:
print("Calculated all eigvals (and eigvects).\n"
+ "Current Runtime: " + str(time.time() - starttime))
jbev1 = eigvals[:,:,:,0]
jbev2 = eigvals[:,:,:,1]
jbev3 = eigvals[:,:,:,2]
jbevc1 = eigvects[:,:,:,0,:]
jbevc2 = eigvects[:,:,:,1,:]
jbevc3 = eigvects[:,:,:,2,:]
#order eigenvalues and eigenvectors
#order==0 means no ordering, order==1 means bigger eigenvalues are
#first, order==2 uses the error approximations from Pottmann07.
if order==1:
jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_size(jbev1, jbev2, jbev3,
jbevc1, jbevc2, jbevc3)
elif order==2:
jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_error(kr,jbev1, jbev2, jbev3,
jbevc1, jbevc2, jbevc3)
elif order==3:
OGD,jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_cdiffnrmls(OGD, cm, jbev1,
jbev2, jbev3,
jbevc1, jbevc2,
jbevc3)
elif order==4:
OGD,jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_errnrmlsmix(OGD, cm, kr,
jbev1, jbev2,
jbev3, jbevc1,
jbevc2, jbevc3)
#this ends the ordering stuff!
#now calculate principal curvatures
kappa1 = 6/(np.pi*kr**6)*(jbev2-3*jbev1)+8/(5*kr)
kappa2 = 6/(np.pi*kr**6)*(jbev1-3*jbev2)+8/(5*kr)
#now output the results!
if debug==1:
print("Success!.\n"
+ "Current Runtime: " + str(time.time() - starttime))
if debug==0:
if mask==0:
return kappa1, kappa2, jbevc1, jbevc2, jbevc3
else:
return kappa1, kappa2, jbevc1, jbevc2, jbevc3, OGD
#two principal curvatures, two principal directions, one surface normal
else:
print("Debug: Returning kappa1, kappa2, jbev1, jbev2, jbev3, jbevc1,\
jbevc2, jbevc3. Good luck!")
return kappa1, kappa2, jbev1, jbev2, jbev3, jbevc1, jbevc2, jbevc3
def cepca_ocg(inpoccgrid, kr=3, order=2, cm=1, debug=0):
"""
Estimate curvature of an object, given as an occupancy grid, using integral
invariants and pca. Returns two principal curvatures, two principal
directions, and the surface normal.
Input:
-'inp' is a strict or relaxed occupancy grid
-'kr' is the kernel radius.
-'cm' stands for convolution mode.If ==1, the fft with zero-padding is
used. If cm<1, then the discrete convolution with a certain kind of
padding is used. If cm==0, zero-padding, if cm==0.25, 'reflect',
if cm==0.50, 'nearest', if cm==0.75, 'mirror', if cm==0.95, 'wrap'.
-'order' is the parameter that specifies the order of the eigenvalues.
If order==0, then the order is not changed at all.
If order==1, then the eigenvalues and eigenvectors are ordered
according to the values of the eigenvalues. I.e. the biggest
eigenvalue is first, 2nd biggest is 2nd, etc etc.
If order==2, then we use the error approximations of Pottmann07 to
estimate which eigenvalues are the "first" and "second" eigenvalues
that are needed to calculate the principal curvatures.
If order ==3 and cm==1.5 then the reflect padding is used in the
central difference computation. If order==3,cm==1, then zero padding.
In all other cases for order==3, the same padding as in the
convolution in the separate cm modes is used.
-the debug parameter is just there to enable easier debugging, e.g.
by printing certain statements at a time.
Uses a ball kernel for the neighborhood intersection.
Returns the two principal curvatures 'kappa1', 'kappa2' and the two
principal directions jbevc1, jbevc2, and the surface normal jbevc3 for
every point on the occupancy grid. These vectores can be wrong in areas
where kappa1==kappa2.
"""
if debug==1:
starttime = time.time()
OGD, OGB, xcoords, \
ycoords, zcoords = og.constructpcagrids_ocg(inpoccgrid, kr=kr, variant=1,
debug=0)
if debug==1:
print("Got all the Occupancy Grids.\n"
+"Shape of Domain OccGrid: " + str(np.shape(OGD)) + ".\n"
+ "Current Runtime: " + str(time.time() - starttime))
"""
To calculate PCA in ball neighborhoods, we have to calculate 3 integrals:
1) Vb = 1D \ast 1B, where 1D=OGD, 1B=OGB,
2) sb = 1/Vb * (sb1, sb2, sb3)^T = 1/Vb * (1D \ast (-X*1B)
[1D \ast -x*1B]
= 1/Vb * [1D \ast -y*1B]
[1D \ast -z*1B] ,
where X=(x,y,z)^T, ogbx=-x*1B, ..., sbhelper1= 1D \ast -x*1B,
sbhelper2=...
3) Jb = (1D \ast (1B*X*X^T))- Vb*sb*sb^T
[1D \ast x^2*1B, 1D \ast xy*1B, 1D \ast xz*1B]
= [1D \ast xy*1B, 1D \ast y^2*1B, 1D \ast yz*1B]
[1D \ast xz*1B, 1D \ast yz*1B, 1D \ast z^2*1B]
[sb1^2, sb1*sb2, sb1*sb3]
- Vb * 1/Vb^2 [sb1*sb2, sb2^2, sb1*sb3]
[sb1*sb3, sb1*sb3, sb3^2 ],
where Jbhxx=1D \ast x^2*1B, Jbhxy=1D \ast xy*1B, ..., ogbxx=x^2*1B,
ogbxy=xy*1B, ...
"""
if cm<1:
Jb, sbsbt, Vb, rmv, keep = cepca_discreteconvolution(OGD, OGB, xcoords,
ycoords, zcoords,
cm, debug=0)
if debug==1:
print("Calculated Jbhelper and sbsbt.\n"
+ "Current Runtime: " + str(time.time() - starttime))
else:
Jb, sbsbt, Vb, rmv, keep = cepca_fft(OGD, OGB, xcoords, ycoords,
zcoords, cm, debug=0)
if debug==1:
print("Calculated Jbhelper and sbsbt with FFT.\n"
+ "Current Runtime: " + str(time.time() - starttime))
#set the values where we'd divide by zero to zero
sbsbt[:,:,rmv[0],rmv[1],rmv[2]]=0
#sbsbt[:,:,keep[0],keep[1],keep[2]]/=Vb[keep[0],keep[1],keep[2]]
# the above would work if I use // instead of /
#and for the nonzero values, calculate 1/Vb * sbsbt
sbsbt[:,:,keep[0],keep[1],keep[2]] = np.divide(sbsbt[:,:,keep[0],keep[1],
keep[2]],
Vb[keep[0],keep[1],keep[2]])
#calculate Jb = Jbhelper - 1/Vb * sbsbt w/o using
#np.reshape and dividing by zero (thank you Virginie for the trick)
Jb -= sbsbt
# Jb is 3x3xRest and for np.linalg.eigh we need a structure
# of Restx3x3
Jb = np.transpose(Jb, (2, 3, 4, 0, 1))
#calculate eigenvalues, where eigvals contains the eigvalues in
#ascending order.
eigvals, eigvects = np.linalg.eigh(Jb)
if debug==1:
print("Calculated all eigvals (and eigvects).\n"
+ "Current Runtime: " + str(time.time() - starttime))
jbev1 = eigvals[:,:,:,0]
jbev2 = eigvals[:,:,:,1]
jbev3 = eigvals[:,:,:,2]
jbevc1 = eigvects[:,:,:,0,:]
jbevc2 = eigvects[:,:,:,1,:]
jbevc3 = eigvects[:,:,:,2,:]
#order eigenvalues and eigenvectors
#order==0 means no ordering, order==1 means bigger eigenvalues are
#first, order==2 uses the error approximations from Pottmann07.
if order==1:
jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_size(jbev1, jbev2, jbev3,
jbevc1, jbevc2, jbevc3)
elif order==2:
jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_error(kr,jbev1, jbev2, jbev3,
jbevc1, jbevc2, jbevc3)
elif order==3:
OGD,jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_cdiffnrmls(OGD, cm, jbev1,
jbev2, jbev3,
jbevc1, jbevc2,
jbevc3)
elif order==4:
OGD,jbev1, jbev2, jbev3,\
jbevc1, jbevc2, jbevc3 = cepca_orderevals_errnrmlsmix(OGD, cm, kr,
jbev1, jbev2,
jbev3, jbevc1,
jbevc2, jbevc3)
#this ends the ordering stuff!
#now calculate principal curvatures
kappa1 = 6/(np.pi*kr**6)*(jbev2-3*jbev1)+8/(5*kr)
kappa2 = 6/(np.pi*kr**6)*(jbev1-3*jbev2)+8/(5*kr)
#now output the results!
if debug==1:
print("Success!.\n"
+ "Current Runtime: " + str(time.time() - starttime))
if debug==0:
return kappa1, kappa2, jbevc1, jbevc2, jbevc3
#two principal curvatures, two principal directions, one surface normal
else:
print("Debug: Returning kappa1, kappa2, jbev1, jbev2, jbev3, jbevc1,\
jbevc2, jbevc3. Good luck!")
return kappa1, kappa2, jbev1, jbev2, jbev3, jbevc1, jbevc2, jbevc3
def cepca_msavg_pointcloud(inp, rho, startscale=3, endscale=12, scaledist=3,
ocg="str", taulow=0, order=2, cm=1, debug=0):
"""
Curvature estimation on a pointcloud using integral invariants, pca,
and a multiscale averaging method.
Returns two principal curvatures, two principal
directions, and the surface normal.
Input:
-'inp' can be an already loaded pointcloud that consists of the three
coordinates x y z or a string that leads to the file that is in x y z
format with no header.
-'rho' controls the amount of cells in the occupancy grid (=rho+1).
-'cm' stands for convolution mode.If ==1, the fft with zero-padding is
used. If cm<1, then the discrete convolution with a certain kind of
padding is used. If cm==0, zero-padding, if cm==0.25, 'reflect',
if cm==0.50, | |
<reponame>n644dc/spacewaveTx
# Version: 5.1
# Architecture: i386
import vstruct
from vstruct.primitives import *
DEVICE_RELATION_TYPE = v_enum()
DEVICE_RELATION_TYPE.BusRelations = 0
DEVICE_RELATION_TYPE.EjectionRelations = 1
DEVICE_RELATION_TYPE.PowerRelations = 2
DEVICE_RELATION_TYPE.RemovalRelations = 3
DEVICE_RELATION_TYPE.TargetDeviceRelation = 4
DEVICE_RELATION_TYPE.SingleBusRelations = 5
IO_ALLOCATION_ACTION = v_enum()
IO_ALLOCATION_ACTION.KeepObject = 0
IO_ALLOCATION_ACTION.DeallocateObject = 1
IO_ALLOCATION_ACTION.DeallocateObjectKeepRegisters = 2
BUS_QUERY_ID_TYPE = v_enum()
BUS_QUERY_ID_TYPE.BusQueryDeviceID = 0
BUS_QUERY_ID_TYPE.BusQueryHardwareIDs = 1
BUS_QUERY_ID_TYPE.BusQueryCompatibleIDs = 2
BUS_QUERY_ID_TYPE.BusQueryInstanceID = 3
BUS_QUERY_ID_TYPE.BusQueryDeviceSerialNumber = 4
NT_PRODUCT_TYPE = v_enum()
NT_PRODUCT_TYPE.NtProductWinNt = 0
NT_PRODUCT_TYPE.NtProductLanManNt = 1
NT_PRODUCT_TYPE.NtProductServer = 2
DEVICE_POWER_STATE = v_enum()
DEVICE_POWER_STATE.PowerDeviceUnspecified = 0
DEVICE_POWER_STATE.PowerDeviceD0 = 1
DEVICE_POWER_STATE.PowerDeviceD1 = 2
DEVICE_POWER_STATE.PowerDeviceD2 = 3
DEVICE_POWER_STATE.PowerDeviceD3 = 4
DEVICE_POWER_STATE.PowerDeviceMaximum = 5
KSPIN_LOCK_QUEUE_NUMBER = v_enum()
KSPIN_LOCK_QUEUE_NUMBER.LockQueueDispatcherLock = 0
KSPIN_LOCK_QUEUE_NUMBER.LockQueueContextSwapLock = 1
KSPIN_LOCK_QUEUE_NUMBER.LockQueuePfnLock = 2
KSPIN_LOCK_QUEUE_NUMBER.LockQueueSystemSpaceLock = 3
KSPIN_LOCK_QUEUE_NUMBER.LockQueueVacbLock = 4
KSPIN_LOCK_QUEUE_NUMBER.LockQueueMasterLock = 5
KSPIN_LOCK_QUEUE_NUMBER.LockQueueNonPagedPoolLock = 6
KSPIN_LOCK_QUEUE_NUMBER.LockQueueIoCancelLock = 7
KSPIN_LOCK_QUEUE_NUMBER.LockQueueWorkQueueLock = 8
KSPIN_LOCK_QUEUE_NUMBER.LockQueueIoVpbLock = 9
KSPIN_LOCK_QUEUE_NUMBER.LockQueueIoDatabaseLock = 10
KSPIN_LOCK_QUEUE_NUMBER.LockQueueIoCompletionLock = 11
KSPIN_LOCK_QUEUE_NUMBER.LockQueueNtfsStructLock = 12
KSPIN_LOCK_QUEUE_NUMBER.LockQueueAfdWorkQueueLock = 13
KSPIN_LOCK_QUEUE_NUMBER.LockQueueBcbLock = 14
KSPIN_LOCK_QUEUE_NUMBER.LockQueueMaximumLock = 15
FSINFOCLASS = v_enum()
FSINFOCLASS.FileFsVolumeInformation = 0
FSINFOCLASS.FileFsLabelInformation = 1
FSINFOCLASS.FileFsSizeInformation = 2
FSINFOCLASS.FileFsDeviceInformation = 3
FSINFOCLASS.FileFsAttributeInformation = 4
FSINFOCLASS.FileFsControlInformation = 5
FSINFOCLASS.FileFsFullSizeInformation = 6
FSINFOCLASS.FileFsObjectIdInformation = 7
FSINFOCLASS.FileFsDriverPathInformation = 8
FSINFOCLASS.FileFsMaximumInformation = 9
POOL_TYPE = v_enum()
POOL_TYPE.NonPagedPool = 0
POOL_TYPE.PagedPool = 1
POOL_TYPE.NonPagedPoolMustSucceed = 2
POOL_TYPE.DontUseThisType = 3
POOL_TYPE.NonPagedPoolCacheAligned = 4
POOL_TYPE.PagedPoolCacheAligned = 5
POOL_TYPE.NonPagedPoolCacheAlignedMustS = 6
POOL_TYPE.MaxPoolType = 7
POOL_TYPE.NonPagedPoolSession = 8
POOL_TYPE.PagedPoolSession = 9
POOL_TYPE.NonPagedPoolMustSucceedSession = 10
POOL_TYPE.DontUseThisTypeSession = 11
POOL_TYPE.NonPagedPoolCacheAlignedSession = 12
POOL_TYPE.PagedPoolCacheAlignedSession = 13
POOL_TYPE.NonPagedPoolCacheAlignedMustSSession = 14
MODE = v_enum()
MODE.KernelMode = 0
MODE.UserMode = 1
MODE.MaximumMode = 2
FS_FILTER_SECTION_SYNC_TYPE = v_enum()
FS_FILTER_SECTION_SYNC_TYPE.SyncTypeOther = 0
FS_FILTER_SECTION_SYNC_TYPE.SyncTypeCreateSection = 1
OB_OPEN_REASON = v_enum()
OB_OPEN_REASON.ObCreateHandle = 0
OB_OPEN_REASON.ObOpenHandle = 1
OB_OPEN_REASON.ObDuplicateHandle = 2
OB_OPEN_REASON.ObInheritHandle = 3
OB_OPEN_REASON.ObMaxOpenReason = 4
DEVICE_TEXT_TYPE = v_enum()
DEVICE_TEXT_TYPE.DeviceTextDescription = 0
DEVICE_TEXT_TYPE.DeviceTextLocationInformation = 1
POWER_STATE_TYPE = v_enum()
POWER_STATE_TYPE.SystemPowerState = 0
POWER_STATE_TYPE.DevicePowerState = 1
FILE_INFORMATION_CLASS = v_enum()
FILE_INFORMATION_CLASS.FileDirectoryInformation = 0
FILE_INFORMATION_CLASS.FileFullDirectoryInformation = 1
FILE_INFORMATION_CLASS.FileBothDirectoryInformation = 2
FILE_INFORMATION_CLASS.FileBasicInformation = 3
FILE_INFORMATION_CLASS.FileStandardInformation = 4
FILE_INFORMATION_CLASS.FileInternalInformation = 5
FILE_INFORMATION_CLASS.FileEaInformation = 6
FILE_INFORMATION_CLASS.FileAccessInformation = 7
FILE_INFORMATION_CLASS.FileNameInformation = 8
FILE_INFORMATION_CLASS.FileRenameInformation = 9
FILE_INFORMATION_CLASS.FileLinkInformation = 10
FILE_INFORMATION_CLASS.FileNamesInformation = 11
FILE_INFORMATION_CLASS.FileDispositionInformation = 12
FILE_INFORMATION_CLASS.FilePositionInformation = 13
FILE_INFORMATION_CLASS.FileFullEaInformation = 14
FILE_INFORMATION_CLASS.FileModeInformation = 15
FILE_INFORMATION_CLASS.FileAlignmentInformation = 16
FILE_INFORMATION_CLASS.FileAllInformation = 17
FILE_INFORMATION_CLASS.FileAllocationInformation = 18
FILE_INFORMATION_CLASS.FileEndOfFileInformation = 19
FILE_INFORMATION_CLASS.FileAlternateNameInformation = 20
FILE_INFORMATION_CLASS.FileStreamInformation = 21
FILE_INFORMATION_CLASS.FilePipeInformation = 22
FILE_INFORMATION_CLASS.FilePipeLocalInformation = 23
FILE_INFORMATION_CLASS.FilePipeRemoteInformation = 24
FILE_INFORMATION_CLASS.FileMailslotQueryInformation = 25
FILE_INFORMATION_CLASS.FileMailslotSetInformation = 26
FILE_INFORMATION_CLASS.FileCompressionInformation = 27
FILE_INFORMATION_CLASS.FileObjectIdInformation = 28
FILE_INFORMATION_CLASS.FileCompletionInformation = 29
FILE_INFORMATION_CLASS.FileMoveClusterInformation = 30
FILE_INFORMATION_CLASS.FileQuotaInformation = 31
FILE_INFORMATION_CLASS.FileReparsePointInformation = 32
FILE_INFORMATION_CLASS.FileNetworkOpenInformation = 33
FILE_INFORMATION_CLASS.FileAttributeTagInformation = 34
FILE_INFORMATION_CLASS.FileTrackingInformation = 35
FILE_INFORMATION_CLASS.FileIdBothDirectoryInformation = 36
FILE_INFORMATION_CLASS.FileIdFullDirectoryInformation = 37
FILE_INFORMATION_CLASS.FileValidDataLengthInformation = 38
FILE_INFORMATION_CLASS.FileShortNameInformation = 39
FILE_INFORMATION_CLASS.FileMaximumInformation = 40
EXCEPTION_DISPOSITION = v_enum()
EXCEPTION_DISPOSITION.ExceptionContinueExecution = 0
EXCEPTION_DISPOSITION.ExceptionContinueSearch = 1
EXCEPTION_DISPOSITION.ExceptionNestedException = 2
EXCEPTION_DISPOSITION.ExceptionCollidedUnwind = 3
PF_SCENARIO_TYPE = v_enum()
PF_SCENARIO_TYPE.PfApplicationLaunchScenarioType = 0
PF_SCENARIO_TYPE.PfSystemBootScenarioType = 1
PF_SCENARIO_TYPE.PfMaxScenarioType = 2
SECURITY_OPERATION_CODE = v_enum()
SECURITY_OPERATION_CODE.SetSecurityDescriptor = 0
SECURITY_OPERATION_CODE.QuerySecurityDescriptor = 1
SECURITY_OPERATION_CODE.DeleteSecurityDescriptor = 2
SECURITY_OPERATION_CODE.AssignSecurityDescriptor = 3
PP_NPAGED_LOOKASIDE_NUMBER = v_enum()
PP_NPAGED_LOOKASIDE_NUMBER.LookasideSmallIrpList = 0
PP_NPAGED_LOOKASIDE_NUMBER.LookasideLargeIrpList = 1
PP_NPAGED_LOOKASIDE_NUMBER.LookasideMdlList = 2
PP_NPAGED_LOOKASIDE_NUMBER.LookasideCreateInfoList = 3
PP_NPAGED_LOOKASIDE_NUMBER.LookasideNameBufferList = 4
PP_NPAGED_LOOKASIDE_NUMBER.LookasideTwilightList = 5
PP_NPAGED_LOOKASIDE_NUMBER.LookasideCompletionList = 6
PP_NPAGED_LOOKASIDE_NUMBER.LookasideMaximumList = 7
SECURITY_IMPERSONATION_LEVEL = v_enum()
SECURITY_IMPERSONATION_LEVEL.SecurityAnonymous = 0
SECURITY_IMPERSONATION_LEVEL.SecurityIdentification = 1
SECURITY_IMPERSONATION_LEVEL.SecurityImpersonation = 2
SECURITY_IMPERSONATION_LEVEL.SecurityDelegation = 3
DEVICE_USAGE_NOTIFICATION_TYPE = v_enum()
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeUndefined = 0
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypePaging = 1
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeHibernation = 2
DEVICE_USAGE_NOTIFICATION_TYPE.DeviceUsageTypeDumpFile = 3
INTERFACE_TYPE = v_enum()
INTERFACE_TYPE.InterfaceTypeUndefined = 0
INTERFACE_TYPE.Internal = 1
INTERFACE_TYPE.Isa = 2
INTERFACE_TYPE.Eisa = 3
INTERFACE_TYPE.MicroChannel = 4
INTERFACE_TYPE.TurboChannel = 5
INTERFACE_TYPE.PCIBus = 6
INTERFACE_TYPE.VMEBus = 7
INTERFACE_TYPE.NuBus = 8
INTERFACE_TYPE.PCMCIABus = 9
INTERFACE_TYPE.CBus = 10
INTERFACE_TYPE.MPIBus = 11
INTERFACE_TYPE.MPSABus = 12
INTERFACE_TYPE.ProcessorInternal = 13
INTERFACE_TYPE.InternalPowerBus = 14
INTERFACE_TYPE.PNPISABus = 15
INTERFACE_TYPE.PNPBus = 16
INTERFACE_TYPE.MaximumInterfaceType = 17
KWAIT_REASON = v_enum()
KWAIT_REASON.Executive = 0
KWAIT_REASON.FreePage = 1
KWAIT_REASON.PageIn = 2
KWAIT_REASON.PoolAllocation = 3
KWAIT_REASON.DelayExecution = 4
KWAIT_REASON.Suspended = 5
KWAIT_REASON.UserRequest = 6
KWAIT_REASON.WrExecutive = 7
KWAIT_REASON.WrFreePage = 8
KWAIT_REASON.WrPageIn = 9
KWAIT_REASON.WrPoolAllocation = 10
KWAIT_REASON.WrDelayExecution = 11
KWAIT_REASON.WrSuspended = 12
KWAIT_REASON.WrUserRequest = 13
KWAIT_REASON.WrEventPair = 14
KWAIT_REASON.WrQueue = 15
KWAIT_REASON.WrLpcReceive = 16
KWAIT_REASON.WrLpcReply = 17
KWAIT_REASON.WrVirtualMemory = 18
KWAIT_REASON.WrPageOut = 19
KWAIT_REASON.WrRendezvous = 20
KWAIT_REASON.Spare2 = 21
KWAIT_REASON.Spare3 = 22
KWAIT_REASON.Spare4 = 23
KWAIT_REASON.Spare5 = 24
KWAIT_REASON.Spare6 = 25
KWAIT_REASON.WrKernel = 26
KWAIT_REASON.MaximumWaitReason = 27
ALTERNATIVE_ARCHITECTURE_TYPE = v_enum()
ALTERNATIVE_ARCHITECTURE_TYPE.StandardDesign = 0
ALTERNATIVE_ARCHITECTURE_TYPE.NEC98x86 = 1
ALTERNATIVE_ARCHITECTURE_TYPE.EndAlternatives = 2
MEMORY_TYPE = v_enum()
MEMORY_TYPE.MemoryExceptionBlock = 0
MEMORY_TYPE.MemorySystemBlock = 1
MEMORY_TYPE.MemoryFree = 2
MEMORY_TYPE.MemoryBad = 3
MEMORY_TYPE.MemoryLoadedProgram = 4
MEMORY_TYPE.MemoryFirmwareTemporary = 5
MEMORY_TYPE.MemoryFirmwarePermanent = 6
MEMORY_TYPE.MemoryFreeContiguous = 7
MEMORY_TYPE.MemorySpecialMemory = 8
MEMORY_TYPE.MemoryMaximum = 9
PS_QUOTA_TYPE = v_enum()
PS_QUOTA_TYPE.PsNonPagedPool = 0
PS_QUOTA_TYPE.PsPagedPool = 1
PS_QUOTA_TYPE.PsPageFile = 2
PS_QUOTA_TYPE.PsQuotaTypes = 3
ReplacesCorHdrNumericDefines = v_enum()
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_ILONLY = 0
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_32BITREQUIRED = 1
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_IL_LIBRARY = 2
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_STRONGNAMESIGNED = 3
ReplacesCorHdrNumericDefines.COMIMAGE_FLAGS_TRACKDEBUGDATA = 4
ReplacesCorHdrNumericDefines.COR_VERSION_MAJOR_V2 = 5
ReplacesCorHdrNumericDefines.COR_VERSION_MAJOR = 6
ReplacesCorHdrNumericDefines.COR_VERSION_MINOR = 7
ReplacesCorHdrNumericDefines.COR_DELETED_NAME_LENGTH = 8
ReplacesCorHdrNumericDefines.COR_VTABLEGAP_NAME_LENGTH = 9
ReplacesCorHdrNumericDefines.NATIVE_TYPE_MAX_CB = 10
ReplacesCorHdrNumericDefines.COR_ILMETHOD_SECT_SMALL_MAX_DATASIZE = 11
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_METHODRVA = 12
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_EHRVA = 13
ReplacesCorHdrNumericDefines.IMAGE_COR_MIH_BASICBLOCK = 14
ReplacesCorHdrNumericDefines.COR_VTABLE_32BIT = 15
ReplacesCorHdrNumericDefines.COR_VTABLE_64BIT = 16
ReplacesCorHdrNumericDefines.COR_VTABLE_FROM_UNMANAGED = 17
ReplacesCorHdrNumericDefines.COR_VTABLE_CALL_MOST_DERIVED = 18
ReplacesCorHdrNumericDefines.IMAGE_COR_EATJ_THUNK_SIZE = 19
ReplacesCorHdrNumericDefines.MAX_CLASS_NAME = 20
ReplacesCorHdrNumericDefines.MAX_PACKAGE_NAME = 21
SYSTEM_POWER_STATE = v_enum()
SYSTEM_POWER_STATE.PowerSystemUnspecified = 0
SYSTEM_POWER_STATE.PowerSystemWorking = 1
SYSTEM_POWER_STATE.PowerSystemSleeping1 = 2
SYSTEM_POWER_STATE.PowerSystemSleeping2 = 3
SYSTEM_POWER_STATE.PowerSystemSleeping3 = 4
SYSTEM_POWER_STATE.PowerSystemHibernate = 5
SYSTEM_POWER_STATE.PowerSystemShutdown = 6
SYSTEM_POWER_STATE.PowerSystemMaximum = 7
MEMORY_CACHING_TYPE_ORIG = v_enum()
MEMORY_CACHING_TYPE_ORIG.MmFrameBufferCached = 0
POWER_ACTION = v_enum()
POWER_ACTION.PowerActionNone = 0
POWER_ACTION.PowerActionReserved = 1
POWER_ACTION.PowerActionSleep = 2
POWER_ACTION.PowerActionHibernate = 3
POWER_ACTION.PowerActionShutdown = 4
POWER_ACTION.PowerActionShutdownReset = 5
POWER_ACTION.PowerActionShutdownOff = 6
POWER_ACTION.PowerActionWarmEject = 7
class KEXECUTE_OPTIONS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExecuteDisable = v_uint8()
class KPRCB(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.MinorVersion = v_uint16()
self.MajorVersion = v_uint16()
self.CurrentThread = v_ptr32()
self.NextThread = v_ptr32()
self.IdleThread = v_ptr32()
self.Number = v_uint8()
self.Reserved = v_uint8()
self.BuildType = v_uint16()
self.SetMember = v_uint32()
self.CpuType = v_uint8()
self.CpuID = v_uint8()
self.CpuStep = v_uint16()
self.ProcessorState = KPROCESSOR_STATE()
self.KernelReserved = v_bytes(size=64) # FIXME Unknown Array Type
self.HalReserved = v_bytes(size=64) # FIXME Unknown Array Type
self.PrcbPad0 = v_bytes(size=92) # FIXME Unknown Array Type
self.LockQueue = v_bytes(size=92) # FIXME Unknown Array Type
self.PrcbPad1 = v_bytes(size=8) # FIXME Unknown Array Type
self.NpxThread = v_ptr32()
self.InterruptCount = v_uint32()
self.KernelTime = v_uint32()
self.UserTime = v_uint32()
self.DpcTime = v_uint32()
self.DebugDpcTime = v_uint32()
self.InterruptTime = v_uint32()
self.AdjustDpcThreshold = v_uint32()
self.PageColor = v_uint32()
self.SkipTick = v_uint32()
self.MultiThreadSetBusy = v_uint8()
self.Spare2 = v_bytes(size=3) # FIXME Unknown Array Type
self.ParentNode = v_ptr32()
self.MultiThreadProcessorSet = v_uint32()
self.MultiThreadSetMaster = v_ptr32()
self.ThreadStartCount = v_bytes(size=8) # FIXME Unknown Array Type
self.CcFastReadNoWait = v_uint32()
self.CcFastReadWait = v_uint32()
self.CcFastReadNotPossible = v_uint32()
self.CcCopyReadNoWait = v_uint32()
self.CcCopyReadWait = v_uint32()
self.CcCopyReadNoWaitMiss = v_uint32()
self.KeAlignmentFixupCount = v_uint32()
self.KeContextSwitches = v_uint32()
self.KeDcacheFlushCount = v_uint32()
self.KeExceptionDispatchCount = v_uint32()
self.KeFirstLevelTbFills = v_uint32()
self.KeFloatingEmulationCount = v_uint32()
self.KeIcacheFlushCount = v_uint32()
self.KeSecondLevelTbFills = v_uint32()
self.KeSystemCalls = v_uint32()
self.SpareCounter0 = v_bytes(size=4) # FIXME Unknown Array Type
self.PPLookasideList = v_bytes(size=4) # FIXME Unknown Array Type
self.PPNPagedLookasideList = v_bytes(size=4) # FIXME Unknown Array Type
self.PPPagedLookasideList = v_bytes(size=4) # FIXME Unknown Array Type
self.PacketBarrier = v_uint32()
self.ReverseStall = v_uint32()
self.IpiFrame = v_ptr32()
self.PrcbPad2 = v_bytes(size=52) # FIXME Unknown Array Type
self.CurrentPacket = v_bytes(size=12) # FIXME Unknown Array Type
self.TargetSet = v_uint32()
self.WorkerRoutine = v_ptr32()
self.IpiFrozen = v_uint32()
self.PrcbPad3 = v_bytes(size=40) # FIXME Unknown Array Type
self.RequestSummary = v_uint32()
self.SignalDone = v_ptr32()
self.PrcbPad4 = v_bytes(size=56) # FIXME Unknown Array Type
self.DpcListHead = LIST_ENTRY()
self.DpcStack = v_ptr32()
self.DpcCount = v_uint32()
self.DpcQueueDepth = v_uint32()
self.DpcRoutineActive = v_uint32()
self.DpcInterruptRequested = v_uint32()
self.DpcLastCount = v_uint32()
self.DpcRequestRate = v_uint32()
self.MaximumDpcQueueDepth = v_uint32()
self.MinimumDpcRate = v_uint32()
self.QuantumEnd = v_uint32()
self.PrcbPad5 = v_bytes(size=16) # FIXME Unknown Array Type
self.DpcLock = v_uint32()
self.PrcbPad6 = v_bytes(size=28) # FIXME Unknown Array Type
self.CallDpc = KDPC()
self.ChainedInterruptList = v_ptr32()
self.LookasideIrpFloat = v_uint32()
self.SpareFields0 = v_bytes(size=24) # FIXME Unknown Array Type
self.VendorString = v_bytes(size=13) # FIXME Unknown Array Type
self.InitialApicId = v_uint8()
self.LogicalProcessorsPerPhysicalProcessor = v_uint8()
self._pad0910 = v_bytes(size=1)
self.MHz = v_uint32()
self.FeatureBits = v_uint32()
self.UpdateSignature = LARGE_INTEGER()
self.NpxSaveArea = FX_SAVE_AREA()
self.PowerState = PROCESSOR_POWER_STATE()
class OBJECT_ATTRIBUTES(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Length = v_uint32()
self.RootDirectory = v_ptr32()
self.ObjectName = v_ptr32()
self.Attributes = v_uint32()
self.SecurityDescriptor = v_ptr32()
self.SecurityQualityOfService = v_ptr32()
class IO_COUNTERS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ReadOperationCount = v_uint64()
self.WriteOperationCount = v_uint64()
self.OtherOperationCount = v_uint64()
self.ReadTransferCount = v_uint64()
self.WriteTransferCount = v_uint64()
self.OtherTransferCount = v_uint64()
class KSYSTEM_TIME(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.LowPart = v_uint32()
self.High1Time = v_uint32()
self.High2Time = v_uint32()
class CM_FULL_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.InterfaceType = v_uint32()
self.BusNumber = v_uint32()
self.PartialResourceList = CM_PARTIAL_RESOURCE_LIST()
class EXCEPTION_RECORD(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.ExceptionCode = v_uint32()
self.ExceptionFlags = v_uint32()
self.ExceptionRecord = v_ptr32()
self.ExceptionAddress = v_ptr32()
self.NumberParameters = v_uint32()
self.ExceptionInformation = v_bytes(size=60) # FIXME Unknown Array Type
class SID(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Revision = v_uint8()
self.SubAuthorityCount = v_uint8()
self.IdentifierAuthority = SID_IDENTIFIER_AUTHORITY()
self.SubAuthority = v_bytes(size=4) # FIXME Unknown Array Type
class PS_JOB_TOKEN_FILTER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.CapturedSidCount = v_uint32()
self.CapturedSids = v_ptr32()
self.CapturedSidsLength = v_uint32()
self.CapturedGroupCount = v_uint32()
self.CapturedGroups = v_ptr32()
self.CapturedGroupsLength = v_uint32()
self.CapturedPrivilegeCount = v_uint32()
self.CapturedPrivileges = v_ptr32()
self.CapturedPrivilegesLength = v_uint32()
class KSPIN_LOCK_QUEUE(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Next = v_ptr32()
self.Lock = v_ptr32()
class FAST_IO_DISPATCH(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFastIoDispatch = v_uint32()
self.FastIoCheckIfPossible = v_ptr32()
self.FastIoRead = v_ptr32()
self.FastIoWrite = v_ptr32()
self.FastIoQueryBasicInfo = v_ptr32()
self.FastIoQueryStandardInfo = v_ptr32()
self.FastIoLock = v_ptr32()
self.FastIoUnlockSingle = v_ptr32()
self.FastIoUnlockAll = v_ptr32()
self.FastIoUnlockAllByKey = v_ptr32()
self.FastIoDeviceControl = v_ptr32()
self.AcquireFileForNtCreateSection = v_ptr32()
self.ReleaseFileForNtCreateSection = v_ptr32()
self.FastIoDetachDevice = v_ptr32()
self.FastIoQueryNetworkOpenInfo = v_ptr32()
self.AcquireForModWrite = v_ptr32()
self.MdlRead = v_ptr32()
self.MdlReadComplete = v_ptr32()
self.PrepareMdlWrite = v_ptr32()
self.MdlWriteComplete = v_ptr32()
self.FastIoReadCompressed = v_ptr32()
self.FastIoWriteCompressed = v_ptr32()
self.MdlReadCompleteCompressed = v_ptr32()
self.MdlWriteCompleteCompressed = v_ptr32()
self.FastIoQueryOpen = v_ptr32()
self.ReleaseForModWrite = v_ptr32()
self.AcquireForCcFlush = v_ptr32()
self.ReleaseForCcFlush = v_ptr32()
class FS_FILTER_CALLBACKS(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.SizeOfFsFilterCallbacks = v_uint32()
self.Reserved = v_uint32()
self.PreAcquireForSectionSynchronization = v_ptr32()
self.PostAcquireForSectionSynchronization = v_ptr32()
self.PreReleaseForSectionSynchronization = v_ptr32()
self.PostReleaseForSectionSynchronization = v_ptr32()
self.PreAcquireForCcFlush = v_ptr32()
self.PostAcquireForCcFlush = v_ptr32()
self.PreReleaseForCcFlush = v_ptr32()
self.PostReleaseForCcFlush = v_ptr32()
self.PreAcquireForModifiedPageWriter = v_ptr32()
self.PostAcquireForModifiedPageWriter = v_ptr32()
self.PreReleaseForModifiedPageWriter = v_ptr32()
self.PostReleaseForModifiedPageWriter = v_ptr32()
class IMAGE_FILE_HEADER(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Machine = v_uint16()
self.NumberOfSections = v_uint16()
self.TimeDateStamp = v_uint32()
self.PointerToSymbolTable = v_uint32()
self.NumberOfSymbols = v_uint32()
self.SizeOfOptionalHeader = v_uint16()
self.Characteristics = v_uint16()
class IO_RESOURCE_DESCRIPTOR(vstruct.VStruct):
def __init__(self):
vstruct.VStruct.__init__(self)
self.Option = v_uint8()
self.Type = v_uint8()
self.ShareDisposition = v_uint8()
self.Spare1 = v_uint8()
self.Flags | |
<reponame>buildbuddy-io/rules_xcodeproj<gh_stars>10-100
"""Functions for creating `XcodeProjInfo` providers."""
load("@bazel_skylib//lib:paths.bzl", "paths")
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"AppleBundleInfo",
"AppleResourceBundleInfo",
)
load("@build_bazel_rules_swift//swift:swift.bzl", "SwiftInfo")
load(
":build_settings.bzl",
"get_product_module_name",
)
load(":collections.bzl", "set_if_true")
load("configuration.bzl", "calculate_configuration", "get_configuration")
load(
":files.bzl",
"join_paths_ignoring_empty",
"parsed_file_path",
)
load(":input_files.bzl", "input_files")
load(":linker_input_files.bzl", "linker_input_files")
load(":opts.bzl", "create_opts_search_paths", "process_opts")
load(":output_files.bzl", "output_files")
load(":platform.bzl", "process_platform")
load(
":providers.bzl",
"InputFileAttributesInfo",
"XcodeProjInfo",
"target_type",
)
load(
":processed_target.bzl",
"processed_target",
"xcode_target",
)
load(
":product.bzl",
"process_product",
)
load(":resource_bundle_products.bzl", "resource_bundle_products")
load(":search_paths.bzl", "process_search_paths")
load(":target_id.bzl", "get_id")
load(":targets.bzl", "targets")
load(
":target_properties.bzl",
"process_defines",
"process_dependencies",
"process_modulemaps",
"process_sdk_links",
"process_swiftmodules",
"should_bundle_resources",
"should_include_outputs",
)
load(
":top_level_targets.bzl",
"process_top_level_properties",
"process_top_level_target",
)
# Library targets
def _process_library_target(*, ctx, target, transitive_infos):
"""Gathers information about a library target.
Args:
ctx: The aspect context.
target: The `Target` to process.
transitive_infos: A `list` of `depset`s of `XcodeProjInfo`s from the
transitive dependencies of `target`.
Returns:
The value returned from `processed_target`.
"""
attrs_info = target[InputFileAttributesInfo]
configuration = get_configuration(ctx)
label = target.label
id = get_id(label = label, configuration = configuration)
build_settings = {}
package_bin_dir = join_paths_ignoring_empty(
ctx.bin_dir.path,
label.workspace_root,
label.package,
)
opts_search_paths = process_opts(
ctx = ctx,
target = target,
package_bin_dir = package_bin_dir,
build_settings = build_settings,
)
product_name = ctx.rule.attr.name
set_if_true(
build_settings,
"PRODUCT_MODULE_NAME",
get_product_module_name(ctx = ctx, target = target),
)
dependencies = process_dependencies(
attrs_info = attrs_info,
transitive_infos = transitive_infos,
)
objc = target[apple_common.Objc] if apple_common.Objc in target else None
linker_inputs = linker_input_files.collect_for_non_top_level(
cc_info = target[CcInfo],
objc = objc,
is_xcode_target = True,
)
cpp = ctx.fragments.cpp
# TODO: Get the value for device builds, even when active config is not for
# device, as Xcode only uses this value for device builds
build_settings["ENABLE_BITCODE"] = str(cpp.apple_bitcode_mode) != "none"
debug_format = "dwarf-with-dsym" if cpp.apple_generate_dsym else "dwarf"
build_settings["DEBUG_INFORMATION_FORMAT"] = debug_format
set_if_true(
build_settings,
"CLANG_ENABLE_MODULES",
getattr(ctx.rule.attr, "enable_modules", False),
)
set_if_true(
build_settings,
"ENABLE_TESTING_SEARCH_PATHS",
getattr(ctx.rule.attr, "testonly", False),
)
build_settings["OTHER_LDFLAGS"] = ["-ObjC"] + build_settings.get(
"OTHER_LDFLAGS",
[],
)
platform = process_platform(
ctx = ctx,
minimum_deployment_os_version = None,
build_settings = build_settings,
)
product = process_product(
target = target,
product_name = product_name,
product_type = "com.apple.product-type.library.static",
bundle_path = None,
linker_inputs = linker_inputs,
build_settings = build_settings,
)
bundle_resources = should_bundle_resources(ctx = ctx)
is_swift = SwiftInfo in target
swift_info = target[SwiftInfo] if is_swift else None
modulemaps = process_modulemaps(swift_info = swift_info)
resource_owner = str(target.label)
inputs = input_files.collect(
ctx = ctx,
target = target,
bundle_resources = bundle_resources,
attrs_info = attrs_info,
owner = resource_owner,
additional_files = modulemaps.files,
transitive_infos = transitive_infos,
)
outputs = output_files.collect(
bundle_info = None,
swift_info = swift_info,
id = id,
transitive_infos = transitive_infos,
should_produce_dto = should_include_outputs(ctx = ctx),
)
resource_bundles = resource_bundle_products.collect(
owner = resource_owner,
is_consuming_bundle = False,
bundle_resources = bundle_resources,
attrs_info = attrs_info,
transitive_infos = transitive_infos,
)
cc_info = target[CcInfo] if CcInfo in target else None
process_defines(
cc_info = cc_info,
build_settings = build_settings,
)
process_sdk_links(
objc = objc,
build_settings = build_settings,
)
search_paths = process_search_paths(
cc_info = cc_info,
objc = objc,
opts_search_paths = opts_search_paths,
)
return processed_target(
attrs_info = attrs_info,
dependencies = dependencies,
inputs = inputs,
linker_inputs = linker_inputs,
outputs = outputs,
potential_target_merges = None,
required_links = None,
resource_bundles = resource_bundles,
search_paths = search_paths,
target = struct(
id = id,
label = label,
is_bundle = False,
product_path = product.path,
),
xcode_target = xcode_target(
id = id,
name = ctx.rule.attr.name,
label = label,
configuration = configuration,
package_bin_dir = package_bin_dir,
platform = platform,
product = product,
is_bundle = False,
is_swift = is_swift,
test_host = None,
build_settings = build_settings,
search_paths = search_paths,
modulemaps = modulemaps,
swiftmodules = process_swiftmodules(swift_info = swift_info),
resource_bundles = resource_bundles,
inputs = inputs,
linker_inputs = linker_inputs,
info_plist = None,
entitlements = None,
dependencies = dependencies,
outputs = outputs,
),
)
# Resource targets
def _process_resource_target(*, ctx, target, transitive_infos):
"""Gathers information about a resource target.
Args:
ctx: The aspect context.
target: The `Target` to process.
transitive_infos: A `list` of `depset`s of `XcodeProjInfo`s from the
transitive dependencies of `target`.
Returns:
The value returned from `processed_target`.
"""
attrs_info = target[InputFileAttributesInfo]
configuration = get_configuration(ctx)
label = target.label
id = get_id(label = label, configuration = configuration)
build_settings = {}
set_if_true(
build_settings,
"PRODUCT_BUNDLE_IDENTIFIER",
ctx.rule.attr.bundle_id,
)
# TODO: Set Info.plist if one is set
build_settings["GENERATE_INFOPLIST_FILE"] = True
bundle_name = ctx.rule.attr.bundle_name or ctx.rule.attr.name
product_name = bundle_name
dependencies = process_dependencies(
attrs_info = attrs_info,
transitive_infos = transitive_infos,
)
package_bin_dir = join_paths_ignoring_empty(
ctx.bin_dir.path,
label.workspace_root,
label.package,
)
bundle_path = parsed_file_path(paths.join(
package_bin_dir,
"{}.bundle".format(bundle_name),
))
linker_inputs = linker_input_files.collect_for_non_top_level(
cc_info = None,
objc = None,
is_xcode_target = True,
)
platform = process_platform(
ctx = ctx,
minimum_deployment_os_version = None,
build_settings = build_settings,
)
product = process_product(
target = target,
product_name = product_name,
product_type = "com.apple.product-type.bundle",
bundle_path = bundle_path,
linker_inputs = linker_inputs,
build_settings = build_settings,
)
bundle_resources = should_bundle_resources(ctx = ctx)
resource_owner = str(label)
inputs = input_files.collect(
ctx = ctx,
target = target,
bundle_resources = bundle_resources,
attrs_info = attrs_info,
owner = resource_owner,
transitive_infos = transitive_infos,
)
outputs = output_files.collect(
bundle_info = None,
swift_info = None,
id = id,
transitive_infos = transitive_infos,
should_produce_dto = should_include_outputs(ctx = ctx),
)
resource_bundles = resource_bundle_products.collect(
bundle_path = bundle_path,
owner = resource_owner,
is_consuming_bundle = False,
bundle_resources = bundle_resources,
attrs_info = attrs_info,
transitive_infos = transitive_infos,
)
search_paths = process_search_paths(
cc_info = None,
objc = None,
opts_search_paths = create_opts_search_paths(
quote_includes = [],
includes = [],
system_includes = [],
),
)
if bundle_resources:
target = struct(
id = id,
label = label,
is_bundle = True,
product_path = product.path,
)
xctarget = xcode_target(
id = id,
name = ctx.rule.attr.name,
label = label,
configuration = configuration,
package_bin_dir = package_bin_dir,
platform = platform,
product = product,
is_bundle = True,
is_swift = False,
test_host = None,
build_settings = build_settings,
search_paths = search_paths,
modulemaps = process_modulemaps(swift_info = None),
swiftmodules = process_swiftmodules(swift_info = None),
resource_bundles = resource_bundles,
inputs = inputs,
linker_inputs = linker_inputs,
info_plist = None,
entitlements = None,
dependencies = dependencies,
outputs = outputs,
)
else:
target = None
xctarget = None
return processed_target(
attrs_info = attrs_info,
dependencies = dependencies,
inputs = inputs,
linker_inputs = linker_inputs,
outputs = outputs,
potential_target_merges = None,
required_links = None,
resource_bundles = resource_bundles,
search_paths = search_paths,
target = target,
xcode_target = xctarget,
)
# Non-Xcode targets
def _process_non_xcode_target(*, ctx, target, transitive_infos):
"""Gathers information about a non-Xcode target.
Args:
ctx: The aspect context.
target: The `Target` to process.
transitive_infos: A `list` of `depset`s of `XcodeProjInfo`s from the
transitive dependencies of `target`.
Returns:
The value returned from `processed_target`.
"""
cc_info = target[CcInfo] if CcInfo in target else None
objc = target[apple_common.Objc] if apple_common.Objc in target else None
attrs_info = target[InputFileAttributesInfo]
bundle_resources = should_bundle_resources(ctx = ctx)
resource_owner = None
return processed_target(
attrs_info = attrs_info,
dependencies = process_dependencies(
attrs_info = attrs_info,
transitive_infos = transitive_infos,
),
inputs = input_files.collect(
ctx = ctx,
target = target,
bundle_resources = bundle_resources,
attrs_info = attrs_info,
owner = resource_owner,
transitive_infos = transitive_infos,
),
linker_inputs = linker_input_files.collect_for_non_top_level(
cc_info = cc_info,
objc = objc,
is_xcode_target = False,
),
outputs = output_files.merge(
attrs_info = attrs_info,
transitive_infos = transitive_infos,
),
potential_target_merges = None,
required_links = None,
resource_bundles = resource_bundle_products.collect(
owner = resource_owner,
is_consuming_bundle = False,
bundle_resources = bundle_resources,
attrs_info = attrs_info,
transitive_infos = transitive_infos,
),
search_paths = process_search_paths(
cc_info = cc_info,
objc = objc,
opts_search_paths = create_opts_search_paths(
quote_includes = [],
includes = [],
system_includes = [],
),
),
target = None,
xcode_target = None,
)
# Creating `XcodeProjInfo`
def _should_skip_target(*, ctx, target):
"""Determines if the given target should be skipped for target generation.
There are some rules, like the test runners for iOS tests, that we want to
ignore. Nothing from those rules are considered.
Args:
ctx: The aspect context.
target: The `Target` to check.
Returns:
`True` if `target` should be skipped for target generation.
"""
# TODO: Find a way to detect TestEnvironment instead
return targets.is_test_bundle(
target = target,
deps = getattr(ctx.rule.attr, "deps", None),
)
def _target_info_fields(
*,
dependencies,
inputs,
linker_inputs,
outputs,
potential_target_merges,
required_links,
resource_bundles,
search_paths,
target,
target_type,
xcode_targets):
"""Generates target specific fields for the `XcodeProjInfo`.
This should be merged with other fields to fully create an `XcodeProjInfo`.
Args:
dependencies: Maps to the `XcodeProjInfo.dependencies` field.
inputs: Maps to the `XcodeProjInfo.inputs` field.
linker_inputs: Maps to the `XcodeProjInfo.linker_inputs` field.
outputs: Maps to the `XcodeProjInfo.outputs` field.
potential_target_merges: Maps to the
`XcodeProjInfo.potential_target_merges` field.
required_links: Maps to the `XcodeProjInfo.required_links` field.
resource_bundles: Maps to the `XcodeProjInfo.resource_bundles` field.
search_paths: Maps to the `XcodeProjInfo.search_paths` field.
target: Maps to the `XcodeProjInfo.target` field.
target_type: Maps to the `XcodeProjInfo.target_type` field.
xcode_targets: Maps to the `XcodeProjInfo.xcode_targets` field.
Returns:
A `dict` containing the following fields:
* `dependencies`
| |
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.257922,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 3.97331,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0306731,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.226781,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.283316,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.212147,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.342185,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.172724,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.727056,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.199198,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.64444,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0535245,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0088984,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0710456,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0658091,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.12457,
'Execution Unit/Register Files/Runtime Dynamic': 0.0747075,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.157343,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.434073,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.86799,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00156203,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00156203,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00141761,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000580001,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000945353,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00548703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0129371,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.063264,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.02413,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.172051,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.214873,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.43795,
'Instruction Fetch Unit/Runtime Dynamic': 0.468612,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0126418,
'L2/Runtime Dynamic': 0.00261563,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.16241,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.924925,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0622878,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0622878,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 3.45655,
'Load Store Unit/Runtime Dynamic': 1.2944,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.153591,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.307182,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.05451,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0546994,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.250206,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0282061,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.499953,
'Memory Management Unit/Runtime Dynamic': 0.0829055,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 18.641,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.140798,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.011285,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.105874,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming | |
<reponame>FNNDSC/pl-fshack
#!/usr/bin/env python
#
# fshack DS ChRIS plugin app
#
# (c) 2016-2020 Fetal-Neonatal Neuroimaging & Developmental Science Center
# Boston Children's Hospital
#
# http://childrenshospital.org/FNNDSC/
# <EMAIL>
#
import os
import sys
import subprocess
import glob
sys.path.append(os.path.dirname(__file__))
# import the Chris app superclass
from chrisapp.base import ChrisApp
import pudb
Gstr_title = """
__ _ _
/ _| | | | |
| |_ ___| |__ __ _ ___| | __ _ __ _ _
| _/ __| '_ \ / _` |/ __| |/ / | '_ \| | | |
| | \__ \ | | | (_| | (__| < _| |_) | |_| |
|_| |___/_| |_|\__,_|\___|_|\_(_) .__/ \__, |
| | __/ |
|_| |___/
"""
Gstr_synopsis = """
NAME
fshack.py
SYNOPSIS
python fshack.py \\
[-i|--inputFile <inputFileWithinInputDir>] \\
[-o|--outputFile <outputFileWithinOutputDir>] \\
[-e|--exec <commandToExec>] \\
[-a|--args <argsPassedToExec> ] \\
[-h] [--help] \\
[--json] \\
[--man] \\
[--meta] \\
[--savejson <DIR>] \\
[-v|--verbosity <level>] \\
[--version] \\
<inputDir> \\
<outputDir>
DESCRIPTION
This ChRIS DS plugin contains a complete FreeSurfer
https://surfer.nmr.mgh.harvard.edu/fswiki/rel7downloadsversion
distribution. Not all FreeSurfer internal applications are exposed at
the plugin level, however. At time of writing, the following FreeSurfer
applications are directly accessible from the plugin CLI:
* recon-all
* mri_convert
* mri_info
* mris_info
This plugin is meant to demonstrate some design patterns as well
as providing some utility for running FreeSurfer within the context
of ChRIS. It is not meant nor intended to be a canonical FreeSurfer
ChRIS plugin -- as explicitly indicated by the name, FreeSurfer "hack",
`fshack`. Colloquially, however, this plugin is also known as `f-shack`.
ARGS
[-i|--inputFile <inputFileWithinInputDir>]
Input file to process. In most cases this is typically a DICOM file
or a nifti volume, but is also very dependent on context. This file
exists within the explictly provided CLI positional <inputDir>. If
specified as a string that starts with a period '.', then examine the
<inputDir> and assign the first ls-ordered file in the glob pattern:
'*' + <inputFileWithoutPeriod> + '*'
as the <inputFile>. So, an <inputFile> of '.0001' will assign the first
file that satisfies the glob
'*0001*'
as <inputFile>.
[-o|--outputFile <outputFileWithinOutputDir>]
Output file/directory name to use within the <outputDir>. Note the
actual meaning of this usage is contextual to the particular <FSapp>.
Note: In the case of `recon-all`, this argument maps to the
-s|--subjectID <subjID>
CLI flag. This file is specified relative to the explicitly provided
positional CLI <outputDir>.
Also note that the <outputFile> string is used to prepend many of the CLI
-stdout -stderr and -returncode filenames.
[-e|--exec <commandToExec>]
Specifies the FreeSurfer command within the plugin/container to
execute.
Note that only a few of the FreeSurfer apps are currently exposed!
[-a|--args <argsPassedToExec>]
Optional string of additional arguments to "pass through" to the
FreeSurfer app.
The design pattern of this plugin is to provide all the CLI args for
a single app specificed `-exec` somewhat blindly. To this end, all the
args for a given internal FreeSurfer app are themselves specified at
the plugin level with this flag. These args MUST be contained within
single quotes (to protect them from the shell) and the quoted string
MUST start with the required keyword 'ARGS: '.
If the `--exec <FSapp>` does not require additional CLI args, then
this `--args <args>` can be safely omitted.
[-h] [--help]
If specified, show help message.
[--json]
If specified, show json representation of app.
[--man]
If specified, print (this) man page.
[--meta]
If specified, print plugin meta data.
[--savejson <DIR>]
If specified, save json representation file to DIR.
[--version]
If specified, print version number.
"""
class Fshack(ChrisApp):
DESCRIPTION = '''
This app houses a complete FreeSurfer distro and exposes some
FreeSurfer apps at the level of the plugin CLI.'
'''
AUTHORS = 'FNNDSC (<EMAIL>)'
SELFPATH = os.path.dirname(os.path.abspath(__file__))
SELFEXEC = os.path.basename(__file__)
EXECSHELL = 'python3'
TITLE = 'A quick-n-dirty attempt at hacking a FreeSurfer ChRIS plugin'
CATEGORY = ''
TYPE = 'ds'
DOCUMENTATION = 'https://github.com/FNNDSC/pl-fshack'
VERSION = '1.2.0'
ICON = '' # url of an icon image
LICENSE = 'Opensource (MIT)'
MAX_NUMBER_OF_WORKERS = 1 # Override with integer value
MIN_NUMBER_OF_WORKERS = 1 # Override with integer value
MAX_CPU_LIMIT = '' # Override with millicore value as string, e.g. '2000m'
MIN_CPU_LIMIT = '2000m' # Override with millicore value as string, e.g. '2000m'
MAX_MEMORY_LIMIT = '' # Override with string, e.g. '1Gi', '2000Mi'
MIN_MEMORY_LIMIT = '2000Mi' # Override with string, e.g. '1Gi', '2000Mi'
MIN_GPU_LIMIT = 0 # Override with the minimum number of GPUs, as an integer, for your plugin
MAX_GPU_LIMIT = 0 # Override with the maximum number of GPUs, as an integer, for your plugin
# Use this dictionary structure to provide key-value output descriptive information
# that may be useful for the next downstream plugin. For example:
#
# {
# "finalOutputFile": "final/file.out",
# "viewer": "genericTextViewer",
# }
#
# The above dictionary is saved when plugin is called with a ``--saveoutputmeta``
# flag. Note also that all file paths are relative to the system specified
# output directory.
OUTPUT_META_DICT = {}
def define_parameters(self):
"""
Define the CLI arguments accepted by this plugin app.
"""
self.add_argument("-a", "--args",
help = "FS arguments to pass",
type = str,
dest = 'args',
optional=True,
default = "")
self.add_argument("-e", "--exec",
help = "FS app to run",
type = str,
dest = 'exec',
optional = True,
default = "recon-all")
self.add_argument("-i", "--inputFile",
help = "input file (use .<ext> to find and use the first file with that extension)",
type = str,
dest = 'inputFile',
optional = True,
default = "")
self.add_argument("-o", "--outputFile",
help = "output file",
type = str,
dest = 'outputFile',
optional = True,
default = "run")
def job_run(self, str_cmd):
"""
Running some CLI process via python is cumbersome. The typical/easy
path of
os.system(str_cmd)
is deprecated and prone to hidden complexity. The preferred
method is via subprocess, which has a cumbersome processing
syntax. Still, this method runs the `str_cmd` and returns the
stderr and stdout strings as well as a returncode.
Providing readtime output of both stdout and stderr seems
problematic. The approach here is to provide realtime
output on stdout and only provide stderr on process completion.
"""
d_ret = {
'stdout': "",
'stderr': "",
'returncode': 0
}
localEnv = os.environ.copy()
localEnv["SUBJECTS_DIR"] = self.options.outputdir
p = subprocess.Popen(
str_cmd.split(),
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = localEnv
)
# Realtime output on stdout
str_stdoutLine = ""
str_stdout = ""
while True:
stdout = p.stdout.readline()
if p.poll() is not None:
break
if stdout:
str_stdoutLine = stdout.decode()
print(str_stdoutLine, end = '')
str_stdout += str_stdoutLine
d_ret['stdout'] = str_stdout
d_ret['stderr'] = p.stderr.read().decode()
d_ret['returncode'] = p.returncode
print('\nstderr: \n%s' % d_ret['stderr'])
return d_ret
def job_stdwrite(self, d_job, options):
"""
Capture the d_job entries to respective files.
"""
for key in d_job.keys():
with open(
'%s/%s-%s' % (options.outputdir, options.outputFile, key), "w"
) as f:
f.write(str(d_job[key]))
f.close()
return {
'status': True
}
def inputFileSpec_parse(self, options):
"""
Parse the inputFile value and possibly trigger some contentual
behaviour. Specifically, if the inputFile spec starts with a
period, '.', then search the inputDir for the first file with
that substring and assign that file as inputFile.
Modify the options variable in place.
"""
str_thisDir: str = ''
str_pattern: str = ''
l_files: list = []
if options.inputFile.startswith('.'):
str_pattern = options.inputFile[1:]
str_thisDir = os.getcwd()
os.chdir(options.inputdir)
l_files = glob.glob('*' + str_pattern + '*')
if len(l_files):
options.inputFile = l_files[0]
os.chdir(str_thisDir)
def run(self, options):
"""
Define the code to be run by this plugin app.
"""
global str_cmd
print(Gstr_title)
print('Version: %s' % self.get_version())
for k,v in options.__dict__.items():
print("%20s: -->%s<--" % (k, v))
self.options = options
self.inputFileSpec_parse(options)
str_args = ""
l_appargs = options.args.split('ARGS:')
if len(l_appargs) == 2:
str_args = l_appargs[1]
else:
str_args = l_appargs[0]
str_FSbinDir = '/usr/local/freesurfer/bin'
str_cmd = ""
if options.exec == 'recon-all':
str_cmd = '%s/%s -i %s/%s -subjid %s/%s %s ' % \
(str_FSbinDir,
options.exec, options.inputdir, options.inputFile,
options.outputdir, options.outputFile, str_args)
if options.exec == 'mri_convert':
str_cmd = '%s/%s %s/%s %s/%s %s ' % \
(str_FSbinDir,
options.exec, options.inputdir, options.inputFile,
options.outputdir, options.outputFile, str_args)
if options.exec == |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.