blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0821cd819be9e85ea5c74fffa2859d8149cac83f | 012a09b3f76dd2d1854bd8b38ee5c1d0ea254e4e | /src/script/create_user.py | 209e441a4edb7e573b840aaf571b4b42dc776c18 | [] | no_license | yezj/Paladin | 1215106c578cea239305ebe5d9666cf010e0a57b | f347f50d6491eac6c92f70af8c2a71720dcf7774 | refs/heads/master | 2020-04-18T17:27:00.602660 | 2019-10-22T06:27:39 | 2019-10-22T06:27:39 | 167,655,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163,620 | py | # -*- coding: utf-8 -*-
import uuid
import requests
NICKNAME = [
"赵 艺芯",
"赵 彦龙",
"赵 君昊",
"赵 子扬",
"赵 雨晴",
"赵 元起",
"赵 威璜",
"赵 梓云",
"赵 伯奢",
"赵 子伯",
"赵 公威",
"赵 曜岩",
"赵 昕阳",
"赵 晨欣",
"赵 世元",
"赵 子远",
"赵 元宗",
"赵 子义",
"赵 仲宣",
"赵 孔休",
"赵 义权",
"赵 文向",
"赵 佐治",
"赵 文则",
"赵 文谦",
"赵 子布",
"赵 文远",
"赵 康成",
"赵 士会",
"赵 正礼",
"赵 孝连",
"赵 彦靖",
"赵 玄风",
"赵 威彦",
"赵 子许",
"赵 文祥",
"赵 梦雯",
"赵 悦菲",
"赵 予馨",
"赵 婧馨",
"赵 婷儿",
"赵 圣楠",
"赵 芷欣",
"赵 心怡",
"赵 乐彤",
"赵 靖瑶",
"赵 艺婷",
"赵 樱璐",
"赵 婉琳",
"赵 婉儿",
"赵 倩儿",
"赵 蝶莺",
"赵 紫婉",
"赵 伯玉",
"赵 盛衡",
"赵 承伯",
"赵 子雍",
"赵 元伯",
"赵 元泰",
"赵 景兴",
"赵 子均",
"赵 文举",
"赵 子安",
"赵 仲达",
"赵 思然",
"赵 子昂",
"赵 子明",
"赵 子初",
"赵 文师",
"赵 世英",
"赵 敬达",
"赵 公昭",
"赵 文先",
"赵 文则",
"赵 温舒",
"赵 子正",
"赵 君肃",
"赵 彦英",
"赵 文进",
"赵 季宁",
"赵 孔璋",
"赵 元龙",
"赵 公台",
"赵 元悌",
"赵 文奥",
"赵 玄伯",
"赵 元方",
"赵 敬宗",
"赵 子烈",
"赵 元耀",
"赵 温伯",
"赵 公玮",
"赵 长文",
"赵 孔和",
"赵 文和",
"赵 恭夏",
"赵 文公",
"赵 曼倩",
"赵 文若",
"赵 景倩",
"赵 ",
"钱 艺芯",
"钱 彦龙",
"钱 君昊",
"钱 子扬",
"钱 雨晴",
"钱 元起",
"钱 威璜",
"钱 梓云",
"钱 伯奢",
"钱 子伯",
"钱 公威",
"钱 曜岩",
"钱 昕阳",
"钱 晨欣",
"钱 世元",
"钱 子远",
"钱 元宗",
"钱 子义",
"钱 仲宣",
"钱 孔休",
"钱 义权",
"钱 文向",
"钱 佐治",
"钱 文则",
"钱 文谦",
"钱 子布",
"钱 文远",
"钱 康成",
"钱 士会",
"钱 正礼",
"钱 孝连",
"钱 彦靖",
"钱 玄风",
"钱 威彦",
"钱 子许",
"钱 文祥",
"钱 梦雯",
"钱 悦菲",
"钱 予馨",
"钱 婧馨",
"钱 婷儿",
"钱 圣楠",
"钱 芷欣",
"钱 心怡",
"钱 乐彤",
"钱 靖瑶",
"钱 艺婷",
"钱 樱璐",
"钱 婉琳",
"钱 婉儿",
"钱 倩儿",
"钱 蝶莺",
"钱 紫婉",
"钱 伯玉",
"钱 盛衡",
"钱 承伯",
"钱 子雍",
"钱 元伯",
"钱 元泰",
"钱 景兴",
"钱 子均",
"钱 文举",
"钱 子安",
"钱 仲达",
"钱 思然",
"钱 子昂",
"钱 子明",
"钱 子初",
"钱 文师",
"钱 世英",
"钱 敬达",
"钱 公昭",
"钱 文先",
"钱 文则",
"钱 温舒",
"钱 子正",
"钱 君肃",
"钱 彦英",
"钱 文进",
"钱 季宁",
"钱 孔璋",
"钱 元龙",
"钱 公台",
"钱 元悌",
"钱 文奥",
"钱 玄伯",
"钱 元方",
"钱 敬宗",
"钱 子烈",
"钱 元耀",
"钱 温伯",
"钱 公玮",
"钱 长文",
"钱 孔和",
"钱 文和",
"钱 恭夏",
"钱 文公",
"钱 曼倩",
"钱 文若",
"钱 景倩",
"钱 ",
"孙 艺芯",
"孙 彦龙",
"孙 君昊",
"孙 子扬",
"孙 雨晴",
"孙 元起",
"孙 威璜",
"孙 梓云",
"孙 伯奢",
"孙 子伯",
"孙 公威",
"孙 曜岩",
"孙 昕阳",
"孙 晨欣",
"孙 世元",
"孙 子远",
"孙 元宗",
"孙 子义",
"孙 仲宣",
"孙 孔休",
"孙 义权",
"孙 文向",
"孙 佐治",
"孙 文则",
"孙 文谦",
"孙 子布",
"孙 文远",
"孙 康成",
"孙 士会",
"孙 正礼",
"孙 孝连",
"孙 彦靖",
"孙 玄风",
"孙 威彦",
"孙 子许",
"孙 文祥",
"孙 梦雯",
"孙 悦菲",
"孙 予馨",
"孙 婧馨",
"孙 婷儿",
"孙 圣楠",
"孙 芷欣",
"孙 心怡",
"孙 乐彤",
"孙 靖瑶",
"孙 艺婷",
"孙 樱璐",
"孙 婉琳",
"孙 婉儿",
"孙 倩儿",
"孙 蝶莺",
"孙 紫婉",
"孙 伯玉",
"孙 盛衡",
"孙 承伯",
"孙 子雍",
"孙 元伯",
"孙 元泰",
"孙 景兴",
"孙 子均",
"孙 文举",
"孙 子安",
"孙 仲达",
"孙 思然",
"孙 子昂",
"孙 子明",
"孙 子初",
"孙 文师",
"孙 世英",
"孙 敬达",
"孙 公昭",
"孙 文先",
"孙 文则",
"孙 温舒",
"孙 子正",
"孙 君肃",
"孙 彦英",
"孙 文进",
"孙 季宁",
"孙 孔璋",
"孙 元龙",
"孙 公台",
"孙 元悌",
"孙 文奥",
"孙 玄伯",
"孙 元方",
"孙 敬宗",
"孙 子烈",
"孙 元耀",
"孙 温伯",
"孙 公玮",
"孙 长文",
"孙 孔和",
"孙 文和",
"孙 恭夏",
"孙 文公",
"孙 曼倩",
"孙 文若",
"孙 景倩",
"孙 ",
"李 艺芯",
"李 彦龙",
"李 君昊",
"李 子扬",
"李 雨晴",
"李 元起",
"李 威璜",
"李 梓云",
"李 伯奢",
"李 子伯",
"李 公威",
"李 曜岩",
"李 昕阳",
"李 晨欣",
"李 世元",
"李 子远",
"李 元宗",
"李 子义",
"李 仲宣",
"李 孔休",
"李 义权",
"李 文向",
"李 佐治",
"李 文则",
"李 文谦",
"李 子布",
"李 文远",
"李 康成",
"李 士会",
"李 正礼",
"李 孝连",
"李 彦靖",
"李 玄风",
"李 威彦",
"李 子许",
"李 文祥",
"李 梦雯",
"李 悦菲",
"李 予馨",
"李 婧馨",
"李 婷儿",
"李 圣楠",
"李 芷欣",
"李 心怡",
"李 乐彤",
"李 靖瑶",
"李 艺婷",
"李 樱璐",
"李 婉琳",
"李 婉儿",
"李 倩儿",
"李 蝶莺",
"李 紫婉",
"李 伯玉",
"李 盛衡",
"李 承伯",
"李 子雍",
"李 元伯",
"李 元泰",
"李 景兴",
"李 子均",
"李 文举",
"李 子安",
"李 仲达",
"李 思然",
"李 子昂",
"李 子明",
"李 子初",
"李 文师",
"李 世英",
"李 敬达",
"李 公昭",
"李 文先",
"李 文则",
"李 温舒",
"李 子正",
"李 君肃",
"李 彦英",
"李 文进",
"李 季宁",
"李 孔璋",
"李 元龙",
"李 公台",
"李 元悌",
"李 文奥",
"李 玄伯",
"李 元方",
"李 敬宗",
"李 子烈",
"李 元耀",
"李 温伯",
"李 公玮",
"李 长文",
"李 孔和",
"李 文和",
"李 恭夏",
"李 文公",
"李 曼倩",
"李 文若",
"李 景倩",
"李 ",
"周 艺芯",
"周 彦龙",
"周 君昊",
"周 子扬",
"周 雨晴",
"周 元起",
"周 威璜",
"周 梓云",
"周 伯奢",
"周 子伯",
"周 公威",
"周 曜岩",
"周 昕阳",
"周 晨欣",
"周 世元",
"周 子远",
"周 元宗",
"周 子义",
"周 仲宣",
"周 孔休",
"周 义权",
"周 文向",
"周 佐治",
"周 文则",
"周 文谦",
"周 子布",
"周 文远",
"周 康成",
"周 士会",
"周 正礼",
"周 孝连",
"周 彦靖",
"周 玄风",
"周 威彦",
"周 子许",
"周 文祥",
"周 梦雯",
"周 悦菲",
"周 予馨",
"周 婧馨",
"周 婷儿",
"周 圣楠",
"周 芷欣",
"周 心怡",
"周 乐彤",
"周 靖瑶",
"周 艺婷",
"周 樱璐",
"周 婉琳",
"周 婉儿",
"周 倩儿",
"周 蝶莺",
"周 紫婉",
"周 伯玉",
"周 盛衡",
"周 承伯",
"周 子雍",
"周 元伯",
"周 元泰",
"周 景兴",
"周 子均",
"周 文举",
"周 子安",
"周 仲达",
"周 思然",
"周 子昂",
"周 子明",
"周 子初",
"周 文师",
"周 世英",
"周 敬达",
"周 公昭",
"周 文先",
"周 文则",
"周 温舒",
"周 子正",
"周 君肃",
"周 彦英",
"周 文进",
"周 季宁",
"周 孔璋",
"周 元龙",
"周 公台",
"周 元悌",
"周 文奥",
"周 玄伯",
"周 元方",
"周 敬宗",
"周 子烈",
"周 元耀",
"周 温伯",
"周 公玮",
"周 长文",
"周 孔和",
"周 文和",
"周 恭夏",
"周 文公",
"周 曼倩",
"周 文若",
"周 景倩",
"周 ",
"吴 艺芯",
"吴 彦龙",
"吴 君昊",
"吴 子扬",
"吴 雨晴",
"吴 元起",
"吴 威璜",
"吴 梓云",
"吴 伯奢",
"吴 子伯",
"吴 公威",
"吴 曜岩",
"吴 昕阳",
"吴 晨欣",
"吴 世元",
"吴 子远",
"吴 元宗",
"吴 子义",
"吴 仲宣",
"吴 孔休",
"吴 义权",
"吴 文向",
"吴 佐治",
"吴 文则",
"吴 文谦",
"吴 子布",
"吴 文远",
"吴 康成",
"吴 士会",
"吴 正礼",
"吴 孝连",
"吴 彦靖",
"吴 玄风",
"吴 威彦",
"吴 子许",
"吴 文祥",
"吴 梦雯",
"吴 悦菲",
"吴 予馨",
"吴 婧馨",
"吴 婷儿",
"吴 圣楠",
"吴 芷欣",
"吴 心怡",
"吴 乐彤",
"吴 靖瑶",
"吴 艺婷",
"吴 樱璐",
"吴 婉琳",
"吴 婉儿",
"吴 倩儿",
"吴 蝶莺",
"吴 紫婉",
"吴 伯玉",
"吴 盛衡",
"吴 承伯",
"吴 子雍",
"吴 元伯",
"吴 元泰",
"吴 景兴",
"吴 子均",
"吴 文举",
"吴 子安",
"吴 仲达",
"吴 思然",
"吴 子昂",
"吴 子明",
"吴 子初",
"吴 文师",
"吴 世英",
"吴 敬达",
"吴 公昭",
"吴 文先",
"吴 文则",
"吴 温舒",
"吴 子正",
"吴 君肃",
"吴 彦英",
"吴 文进",
"吴 季宁",
"吴 孔璋",
"吴 元龙",
"吴 公台",
"吴 元悌",
"吴 文奥",
"吴 玄伯",
"吴 元方",
"吴 敬宗",
"吴 子烈",
"吴 元耀",
"吴 温伯",
"吴 公玮",
"吴 长文",
"吴 孔和",
"吴 文和",
"吴 恭夏",
"吴 文公",
"吴 曼倩",
"吴 文若",
"吴 景倩",
"吴 ",
"郑 艺芯",
"郑 彦龙",
"郑 君昊",
"郑 子扬",
"郑 雨晴",
"郑 元起",
"郑 威璜",
"郑 梓云",
"郑 伯奢",
"郑 子伯",
"郑 公威",
"郑 曜岩",
"郑 昕阳",
"郑 晨欣",
"郑 世元",
"郑 子远",
"郑 元宗",
"郑 子义",
"郑 仲宣",
"郑 孔休",
"郑 义权",
"郑 文向",
"郑 佐治",
"郑 文则",
"郑 文谦",
"郑 子布",
"郑 文远",
"郑 康成",
"郑 士会",
"郑 正礼",
"郑 孝连",
"郑 彦靖",
"郑 玄风",
"郑 威彦",
"郑 子许",
"郑 文祥",
"郑 梦雯",
"郑 悦菲",
"郑 予馨",
"郑 婧馨",
"郑 婷儿",
"郑 圣楠",
"郑 芷欣",
"郑 心怡",
"郑 乐彤",
"郑 靖瑶",
"郑 艺婷",
"郑 樱璐",
"郑 婉琳",
"郑 婉儿",
"郑 倩儿",
"郑 蝶莺",
"郑 紫婉",
"郑 伯玉",
"郑 盛衡",
"郑 承伯",
"郑 子雍",
"郑 元伯",
"郑 元泰",
"郑 景兴",
"郑 子均",
"郑 文举",
"郑 子安",
"郑 仲达",
"郑 思然",
"郑 子昂",
"郑 子明",
"郑 子初",
"郑 文师",
"郑 世英",
"郑 敬达",
"郑 公昭",
"郑 文先",
"郑 文则",
"郑 温舒",
"郑 子正",
"郑 君肃",
"郑 彦英",
"郑 文进",
"郑 季宁",
"郑 孔璋",
"郑 元龙",
"郑 公台",
"郑 元悌",
"郑 文奥",
"郑 玄伯",
"郑 元方",
"郑 敬宗",
"郑 子烈",
"郑 元耀",
"郑 温伯",
"郑 公玮",
"郑 长文",
"郑 孔和",
"郑 文和",
"郑 恭夏",
"郑 文公",
"郑 曼倩",
"郑 文若",
"郑 景倩",
"郑 ",
"王 艺芯",
"王 彦龙",
"王 君昊",
"王 子扬",
"王 雨晴",
"王 元起",
"王 威璜",
"王 梓云",
"王 伯奢",
"王 子伯",
"王 公威",
"王 曜岩",
"王 昕阳",
"王 晨欣",
"王 世元",
"王 子远",
"王 元宗",
"王 子义",
"王 仲宣",
"王 孔休",
"王 义权",
"王 文向",
"王 佐治",
"王 文则",
"王 文谦",
"王 子布",
"王 文远",
"王 康成",
"王 士会",
"王 正礼",
"王 孝连",
"王 彦靖",
"王 玄风",
"王 威彦",
"王 子许",
"王 文祥",
"王 梦雯",
"王 悦菲",
"王 予馨",
"王 婧馨",
"王 婷儿",
"王 圣楠",
"王 芷欣",
"王 心怡",
"王 乐彤",
"王 靖瑶",
"王 艺婷",
"王 樱璐",
"王 婉琳",
"王 婉儿",
"王 倩儿",
"王 蝶莺",
"王 紫婉",
"王 伯玉",
"王 盛衡",
"王 承伯",
"王 子雍",
"王 元伯",
"王 元泰",
"王 景兴",
"王 子均",
"王 文举",
"王 子安",
"王 仲达",
"王 思然",
"王 子昂",
"王 子明",
"王 子初",
"王 文师",
"王 世英",
"王 敬达",
"王 公昭",
"王 文先",
"王 文则",
"王 温舒",
"王 子正",
"王 君肃",
"王 彦英",
"王 文进",
"王 季宁",
"王 孔璋",
"王 元龙",
"王 公台",
"王 元悌",
"王 文奥",
"王 玄伯",
"王 元方",
"王 敬宗",
"王 子烈",
"王 元耀",
"王 温伯",
"王 公玮",
"王 长文",
"王 孔和",
"王 文和",
"王 恭夏",
"王 文公",
"王 曼倩",
"王 文若",
"王 景倩",
"王 ",
"冯 艺芯",
"冯 彦龙",
"冯 君昊",
"冯 子扬",
"冯 雨晴",
"冯 元起",
"冯 威璜",
"冯 梓云",
"冯 伯奢",
"冯 子伯",
"冯 公威",
"冯 曜岩",
"冯 昕阳",
"冯 晨欣",
"冯 世元",
"冯 子远",
"冯 元宗",
"冯 子义",
"冯 仲宣",
"冯 孔休",
"冯 义权",
"冯 文向",
"冯 佐治",
"冯 文则",
"冯 文谦",
"冯 子布",
"冯 文远",
"冯 康成",
"冯 士会",
"冯 正礼",
"冯 孝连",
"冯 彦靖",
"冯 玄风",
"冯 威彦",
"冯 子许",
"冯 文祥",
"冯 梦雯",
"冯 悦菲",
"冯 予馨",
"冯 婧馨",
"冯 婷儿",
"冯 圣楠",
"冯 芷欣",
"冯 心怡",
"冯 乐彤",
"冯 靖瑶",
"冯 艺婷",
"冯 樱璐",
"冯 婉琳",
"冯 婉儿",
"冯 倩儿",
"冯 蝶莺",
"冯 紫婉",
"冯 伯玉",
"冯 盛衡",
"冯 承伯",
"冯 子雍",
"冯 元伯",
"冯 元泰",
"冯 景兴",
"冯 子均",
"冯 文举",
"冯 子安",
"冯 仲达",
"冯 思然",
"冯 子昂",
"冯 子明",
"冯 子初",
"冯 文师",
"冯 世英",
"冯 敬达",
"冯 公昭",
"冯 文先",
"冯 文则",
"冯 温舒",
"冯 子正",
"冯 君肃",
"冯 彦英",
"冯 文进",
"冯 季宁",
"冯 孔璋",
"冯 元龙",
"冯 公台",
"冯 元悌",
"冯 文奥",
"冯 玄伯",
"冯 元方",
"冯 敬宗",
"冯 子烈",
"冯 元耀",
"冯 温伯",
"冯 公玮",
"冯 长文",
"冯 孔和",
"冯 文和",
"冯 恭夏",
"冯 文公",
"冯 曼倩",
"冯 文若",
"冯 景倩",
"冯 ",
"陈 艺芯",
"陈 彦龙",
"陈 君昊",
"陈 子扬",
"陈 雨晴",
"陈 元起",
"陈 威璜",
"陈 梓云",
"陈 伯奢",
"陈 子伯",
"陈 公威",
"陈 曜岩",
"陈 昕阳",
"陈 晨欣",
"陈 世元",
"陈 子远",
"陈 元宗",
"陈 子义",
"陈 仲宣",
"陈 孔休",
"陈 义权",
"陈 文向",
"陈 佐治",
"陈 文则",
"陈 文谦",
"陈 子布",
"陈 文远",
"陈 康成",
"陈 士会",
"陈 正礼",
"陈 孝连",
"陈 彦靖",
"陈 玄风",
"陈 威彦",
"陈 子许",
"陈 文祥",
"陈 梦雯",
"陈 悦菲",
"陈 予馨",
"陈 婧馨",
"陈 婷儿",
"陈 圣楠",
"陈 芷欣",
"陈 心怡",
"陈 乐彤",
"陈 靖瑶",
"陈 艺婷",
"陈 樱璐",
"陈 婉琳",
"陈 婉儿",
"陈 倩儿",
"陈 蝶莺",
"陈 紫婉",
"陈 伯玉",
"陈 盛衡",
"陈 承伯",
"陈 子雍",
"陈 元伯",
"陈 元泰",
"陈 景兴",
"陈 子均",
"陈 文举",
"陈 子安",
"陈 仲达",
"陈 思然",
"陈 子昂",
"陈 子明",
"陈 子初",
"陈 文师",
"陈 世英",
"陈 敬达",
"陈 公昭",
"陈 文先",
"陈 文则",
"陈 温舒",
"陈 子正",
"陈 君肃",
"陈 彦英",
"陈 文进",
"陈 季宁",
"陈 孔璋",
"陈 元龙",
"陈 公台",
"陈 元悌",
"陈 文奥",
"陈 玄伯",
"陈 元方",
"陈 敬宗",
"陈 子烈",
"陈 元耀",
"陈 温伯",
"陈 公玮",
"陈 长文",
"陈 孔和",
"陈 文和",
"陈 恭夏",
"陈 文公",
"陈 曼倩",
"陈 文若",
"陈 景倩",
"陈 ",
"褚 艺芯",
"褚 彦龙",
"褚 君昊",
"褚 子扬",
"褚 雨晴",
"褚 元起",
"褚 威璜",
"褚 梓云",
"褚 伯奢",
"褚 子伯",
"褚 公威",
"褚 曜岩",
"褚 昕阳",
"褚 晨欣",
"褚 世元",
"褚 子远",
"褚 元宗",
"褚 子义",
"褚 仲宣",
"褚 孔休",
"褚 义权",
"褚 文向",
"褚 佐治",
"褚 文则",
"褚 文谦",
"褚 子布",
"褚 文远",
"褚 康成",
"褚 士会",
"褚 正礼",
"褚 孝连",
"褚 彦靖",
"褚 玄风",
"褚 威彦",
"褚 子许",
"褚 文祥",
"褚 梦雯",
"褚 悦菲",
"褚 予馨",
"褚 婧馨",
"褚 婷儿",
"褚 圣楠",
"褚 芷欣",
"褚 心怡",
"褚 乐彤",
"褚 靖瑶",
"褚 艺婷",
"褚 樱璐",
"褚 婉琳",
"褚 婉儿",
"褚 倩儿",
"褚 蝶莺",
"褚 紫婉",
"褚 伯玉",
"褚 盛衡",
"褚 承伯",
"褚 子雍",
"褚 元伯",
"褚 元泰",
"褚 景兴",
"褚 子均",
"褚 文举",
"褚 子安",
"褚 仲达",
"褚 思然",
"褚 子昂",
"褚 子明",
"褚 子初",
"褚 文师",
"褚 世英",
"褚 敬达",
"褚 公昭",
"褚 文先",
"褚 文则",
"褚 温舒",
"褚 子正",
"褚 君肃",
"褚 彦英",
"褚 文进",
"褚 季宁",
"褚 孔璋",
"褚 元龙",
"褚 公台",
"褚 元悌",
"褚 文奥",
"褚 玄伯",
"褚 元方",
"褚 敬宗",
"褚 子烈",
"褚 元耀",
"褚 温伯",
"褚 公玮",
"褚 长文",
"褚 孔和",
"褚 文和",
"褚 恭夏",
"褚 文公",
"褚 曼倩",
"褚 文若",
"褚 景倩",
"褚 ",
"卫 艺芯",
"卫 彦龙",
"卫 君昊",
"卫 子扬",
"卫 雨晴",
"卫 元起",
"卫 威璜",
"卫 梓云",
"卫 伯奢",
"卫 子伯",
"卫 公威",
"卫 曜岩",
"卫 昕阳",
"卫 晨欣",
"卫 世元",
"卫 子远",
"卫 元宗",
"卫 子义",
"卫 仲宣",
"卫 孔休",
"卫 义权",
"卫 文向",
"卫 佐治",
"卫 文则",
"卫 文谦",
"卫 子布",
"卫 文远",
"卫 康成",
"卫 士会",
"卫 正礼",
"卫 孝连",
"卫 彦靖",
"卫 玄风",
"卫 威彦",
"卫 子许",
"卫 文祥",
"卫 梦雯",
"卫 悦菲",
"卫 予馨",
"卫 婧馨",
"卫 婷儿",
"卫 圣楠",
"卫 芷欣",
"卫 心怡",
"卫 乐彤",
"卫 靖瑶",
"卫 艺婷",
"卫 樱璐",
"卫 婉琳",
"卫 婉儿",
"卫 倩儿",
"卫 蝶莺",
"卫 紫婉",
"卫 伯玉",
"卫 盛衡",
"卫 承伯",
"卫 子雍",
"卫 元伯",
"卫 元泰",
"卫 景兴",
"卫 子均",
"卫 文举",
"卫 子安",
"卫 仲达",
"卫 思然",
"卫 子昂",
"卫 子明",
"卫 子初",
"卫 文师",
"卫 世英",
"卫 敬达",
"卫 公昭",
"卫 文先",
"卫 文则",
"卫 温舒",
"卫 子正",
"卫 君肃",
"卫 彦英",
"卫 文进",
"卫 季宁",
"卫 孔璋",
"卫 元龙",
"卫 公台",
"卫 元悌",
"卫 文奥",
"卫 玄伯",
"卫 元方",
"卫 敬宗",
"卫 子烈",
"卫 元耀",
"卫 温伯",
"卫 公玮",
"卫 长文",
"卫 孔和",
"卫 文和",
"卫 恭夏",
"卫 文公",
"卫 曼倩",
"卫 文若",
"卫 景倩",
"卫 ",
"蒋 艺芯",
"蒋 彦龙",
"蒋 君昊",
"蒋 子扬",
"蒋 雨晴",
"蒋 元起",
"蒋 威璜",
"蒋 梓云",
"蒋 伯奢",
"蒋 子伯",
"蒋 公威",
"蒋 曜岩",
"蒋 昕阳",
"蒋 晨欣",
"蒋 世元",
"蒋 子远",
"蒋 元宗",
"蒋 子义",
"蒋 仲宣",
"蒋 孔休",
"蒋 义权",
"蒋 文向",
"蒋 佐治",
"蒋 文则",
"蒋 文谦",
"蒋 子布",
"蒋 文远",
"蒋 康成",
"蒋 士会",
"蒋 正礼",
"蒋 孝连",
"蒋 彦靖",
"蒋 玄风",
"蒋 威彦",
"蒋 子许",
"蒋 文祥",
"蒋 梦雯",
"蒋 悦菲",
"蒋 予馨",
"蒋 婧馨",
"蒋 婷儿",
"蒋 圣楠",
"蒋 芷欣",
"蒋 心怡",
"蒋 乐彤",
"蒋 靖瑶",
"蒋 艺婷",
"蒋 樱璐",
"蒋 婉琳",
"蒋 婉儿",
"蒋 倩儿",
"蒋 蝶莺",
"蒋 紫婉",
"蒋 伯玉",
"蒋 盛衡",
"蒋 承伯",
"蒋 子雍",
"蒋 元伯",
"蒋 元泰",
"蒋 景兴",
"蒋 子均",
"蒋 文举",
"蒋 子安",
"蒋 仲达",
"蒋 思然",
"蒋 子昂",
"蒋 子明",
"蒋 子初",
"蒋 文师",
"蒋 世英",
"蒋 敬达",
"蒋 公昭",
"蒋 文先",
"蒋 文则",
"蒋 温舒",
"蒋 子正",
"蒋 君肃",
"蒋 彦英",
"蒋 文进",
"蒋 季宁",
"蒋 孔璋",
"蒋 元龙",
"蒋 公台",
"蒋 元悌",
"蒋 文奥",
"蒋 玄伯",
"蒋 元方",
"蒋 敬宗",
"蒋 子烈",
"蒋 元耀",
"蒋 温伯",
"蒋 公玮",
"蒋 长文",
"蒋 孔和",
"蒋 文和",
"蒋 恭夏",
"蒋 文公",
"蒋 曼倩",
"蒋 文若",
"蒋 景倩",
"蒋 ",
"沈 艺芯",
"沈 彦龙",
"沈 君昊",
"沈 子扬",
"沈 雨晴",
"沈 元起",
"沈 威璜",
"沈 梓云",
"沈 伯奢",
"沈 子伯",
"沈 公威",
"沈 曜岩",
"沈 昕阳",
"沈 晨欣",
"沈 世元",
"沈 子远",
"沈 元宗",
"沈 子义",
"沈 仲宣",
"沈 孔休",
"沈 义权",
"沈 文向",
"沈 佐治",
"沈 文则",
"沈 文谦",
"沈 子布",
"沈 文远",
"沈 康成",
"沈 士会",
"沈 正礼",
"沈 孝连",
"沈 彦靖",
"沈 玄风",
"沈 威彦",
"沈 子许",
"沈 文祥",
"沈 梦雯",
"沈 悦菲",
"沈 予馨",
"沈 婧馨",
"沈 婷儿",
"沈 圣楠",
"沈 芷欣",
"沈 心怡",
"沈 乐彤",
"沈 靖瑶",
"沈 艺婷",
"沈 樱璐",
"沈 婉琳",
"沈 婉儿",
"沈 倩儿",
"沈 蝶莺",
"沈 紫婉",
"沈 伯玉",
"沈 盛衡",
"沈 承伯",
"沈 子雍",
"沈 元伯",
"沈 元泰",
"沈 景兴",
"沈 子均",
"沈 文举",
"沈 子安",
"沈 仲达",
"沈 思然",
"沈 子昂",
"沈 子明",
"沈 子初",
"沈 文师",
"沈 世英",
"沈 敬达",
"沈 公昭",
"沈 文先",
"沈 文则",
"沈 温舒",
"沈 子正",
"沈 君肃",
"沈 彦英",
"沈 文进",
"沈 季宁",
"沈 孔璋",
"沈 元龙",
"沈 公台",
"沈 元悌",
"沈 文奥",
"沈 玄伯",
"沈 元方",
"沈 敬宗",
"沈 子烈",
"沈 元耀",
"沈 温伯",
"沈 公玮",
"沈 长文",
"沈 孔和",
"沈 文和",
"沈 恭夏",
"沈 文公",
"沈 曼倩",
"沈 文若",
"沈 景倩",
"沈 ",
"韩 艺芯",
"韩 彦龙",
"韩 君昊",
"韩 子扬",
"韩 雨晴",
"韩 元起",
"韩 威璜",
"韩 梓云",
"韩 伯奢",
"韩 子伯",
"韩 公威",
"韩 曜岩",
"韩 昕阳",
"韩 晨欣",
"韩 世元",
"韩 子远",
"韩 元宗",
"韩 子义",
"韩 仲宣",
"韩 孔休",
"韩 义权",
"韩 文向",
"韩 佐治",
"韩 文则",
"韩 文谦",
"韩 子布",
"韩 文远",
"韩 康成",
"韩 士会",
"韩 正礼",
"韩 孝连",
"韩 彦靖",
"韩 玄风",
"韩 威彦",
"韩 子许",
"韩 文祥",
"韩 梦雯",
"韩 悦菲",
"韩 予馨",
"韩 婧馨",
"韩 婷儿",
"韩 圣楠",
"韩 芷欣",
"韩 心怡",
"韩 乐彤",
"韩 靖瑶",
"韩 艺婷",
"韩 樱璐",
"韩 婉琳",
"韩 婉儿",
"韩 倩儿",
"韩 蝶莺",
"韩 紫婉",
"韩 伯玉",
"韩 盛衡",
"韩 承伯",
"韩 子雍",
"韩 元伯",
"韩 元泰",
"韩 景兴",
"韩 子均",
"韩 文举",
"韩 子安",
"韩 仲达",
"韩 思然",
"韩 子昂",
"韩 子明",
"韩 子初",
"韩 文师",
"韩 世英",
"韩 敬达",
"韩 公昭",
"韩 文先",
"韩 文则",
"韩 温舒",
"韩 子正",
"韩 君肃",
"韩 彦英",
"韩 文进",
"韩 季宁",
"韩 孔璋",
"韩 元龙",
"韩 公台",
"韩 元悌",
"韩 文奥",
"韩 玄伯",
"韩 元方",
"韩 敬宗",
"韩 子烈",
"韩 元耀",
"韩 温伯",
"韩 公玮",
"韩 长文",
"韩 孔和",
"韩 文和",
"韩 恭夏",
"韩 文公",
"韩 曼倩",
"韩 文若",
"韩 景倩",
"韩 ",
"杨 艺芯",
"杨 彦龙",
"杨 君昊",
"杨 子扬",
"杨 雨晴",
"杨 元起",
"杨 威璜",
"杨 梓云",
"杨 伯奢",
"杨 子伯",
"杨 公威",
"杨 曜岩",
"杨 昕阳",
"杨 晨欣",
"杨 世元",
"杨 子远",
"杨 元宗",
"杨 子义",
"杨 仲宣",
"杨 孔休",
"杨 义权",
"杨 文向",
"杨 佐治",
"杨 文则",
"杨 文谦",
"杨 子布",
"杨 文远",
"杨 康成",
"杨 士会",
"杨 正礼",
"杨 孝连",
"杨 彦靖",
"杨 玄风",
"杨 威彦",
"杨 子许",
"杨 文祥",
"杨 梦雯",
"杨 悦菲",
"杨 予馨",
"杨 婧馨",
"杨 婷儿",
"杨 圣楠",
"杨 芷欣",
"杨 心怡",
"杨 乐彤",
"杨 靖瑶",
"杨 艺婷",
"杨 樱璐",
"杨 婉琳",
"杨 婉儿",
"杨 倩儿",
"杨 蝶莺",
"杨 紫婉",
"杨 伯玉",
"杨 盛衡",
"杨 承伯",
"杨 子雍",
"杨 元伯",
"杨 元泰",
"杨 景兴",
"杨 子均",
"杨 文举",
"杨 子安",
"杨 仲达",
"杨 思然",
"杨 子昂",
"杨 子明",
"杨 子初",
"杨 文师",
"杨 世英",
"杨 敬达",
"杨 公昭",
"杨 文先",
"杨 文则",
"杨 温舒",
"杨 子正",
"杨 君肃",
"杨 彦英",
"杨 文进",
"杨 季宁",
"杨 孔璋",
"杨 元龙",
"杨 公台",
"杨 元悌",
"杨 文奥",
"杨 玄伯",
"杨 元方",
"杨 敬宗",
"杨 子烈",
"杨 元耀",
"杨 温伯",
"杨 公玮",
"杨 长文",
"杨 孔和",
"杨 文和",
"杨 恭夏",
"杨 文公",
"杨 曼倩",
"杨 文若",
"杨 景倩",
"杨 ",
"朱 艺芯",
"朱 彦龙",
"朱 君昊",
"朱 子扬",
"朱 雨晴",
"朱 元起",
"朱 威璜",
"朱 梓云",
"朱 伯奢",
"朱 子伯",
"朱 公威",
"朱 曜岩",
"朱 昕阳",
"朱 晨欣",
"朱 世元",
"朱 子远",
"朱 元宗",
"朱 子义",
"朱 仲宣",
"朱 孔休",
"朱 义权",
"朱 文向",
"朱 佐治",
"朱 文则",
"朱 文谦",
"朱 子布",
"朱 文远",
"朱 康成",
"朱 士会",
"朱 正礼",
"朱 孝连",
"朱 彦靖",
"朱 玄风",
"朱 威彦",
"朱 子许",
"朱 文祥",
"朱 梦雯",
"朱 悦菲",
"朱 予馨",
"朱 婧馨",
"朱 婷儿",
"朱 圣楠",
"朱 芷欣",
"朱 心怡",
"朱 乐彤",
"朱 靖瑶",
"朱 艺婷",
"朱 樱璐",
"朱 婉琳",
"朱 婉儿",
"朱 倩儿",
"朱 蝶莺",
"朱 紫婉",
"朱 伯玉",
"朱 盛衡",
"朱 承伯",
"朱 子雍",
"朱 元伯",
"朱 元泰",
"朱 景兴",
"朱 子均",
"朱 文举",
"朱 子安",
"朱 仲达",
"朱 思然",
"朱 子昂",
"朱 子明",
"朱 子初",
"朱 文师",
"朱 世英",
"朱 敬达",
"朱 公昭",
"朱 文先",
"朱 文则",
"朱 温舒",
"朱 子正",
"朱 君肃",
"朱 彦英",
"朱 文进",
"朱 季宁",
"朱 孔璋",
"朱 元龙",
"朱 公台",
"朱 元悌",
"朱 文奥",
"朱 玄伯",
"朱 元方",
"朱 敬宗",
"朱 子烈",
"朱 元耀",
"朱 温伯",
"朱 公玮",
"朱 长文",
"朱 孔和",
"朱 文和",
"朱 恭夏",
"朱 文公",
"朱 曼倩",
"朱 文若",
"朱 景倩",
"朱 ",
"秦 艺芯",
"秦 彦龙",
"秦 君昊",
"秦 子扬",
"秦 雨晴",
"秦 元起",
"秦 威璜",
"秦 梓云",
"秦 伯奢",
"秦 子伯",
"秦 公威",
"秦 曜岩",
"秦 昕阳",
"秦 晨欣",
"秦 世元",
"秦 子远",
"秦 元宗",
"秦 子义",
"秦 仲宣",
"秦 孔休",
"秦 义权",
"秦 文向",
"秦 佐治",
"秦 文则",
"秦 文谦",
"秦 子布",
"秦 文远",
"秦 康成",
"秦 士会",
"秦 正礼",
"秦 孝连",
"秦 彦靖",
"秦 玄风",
"秦 威彦",
"秦 子许",
"秦 文祥",
"秦 梦雯",
"秦 悦菲",
"秦 予馨",
"秦 婧馨",
"秦 婷儿",
"秦 圣楠",
"秦 芷欣",
"秦 心怡",
"秦 乐彤",
"秦 靖瑶",
"秦 艺婷",
"秦 樱璐",
"秦 婉琳",
"秦 婉儿",
"秦 倩儿",
"秦 蝶莺",
"秦 紫婉",
"秦 伯玉",
"秦 盛衡",
"秦 承伯",
"秦 子雍",
"秦 元伯",
"秦 元泰",
"秦 景兴",
"秦 子均",
"秦 文举",
"秦 子安",
"秦 仲达",
"秦 思然",
"秦 子昂",
"秦 子明",
"秦 子初",
"秦 文师",
"秦 世英",
"秦 敬达",
"秦 公昭",
"秦 文先",
"秦 文则",
"秦 温舒",
"秦 子正",
"秦 君肃",
"秦 彦英",
"秦 文进",
"秦 季宁",
"秦 孔璋",
"秦 元龙",
"秦 公台",
"秦 元悌",
"秦 文奥",
"秦 玄伯",
"秦 元方",
"秦 敬宗",
"秦 子烈",
"秦 元耀",
"秦 温伯",
"秦 公玮",
"秦 长文",
"秦 孔和",
"秦 文和",
"秦 恭夏",
"秦 文公",
"秦 曼倩",
"秦 文若",
"秦 景倩",
"秦 ",
"许艺芯",
"许彦龙",
"许君昊",
"许子扬",
"许雨晴",
"许元起",
"许威璜",
"许梓云",
"许伯奢",
"许子伯",
"许公威",
"许曜岩",
"许昕阳",
"许晨欣",
"许世元",
"许子远",
"许元宗",
"许子义",
"许仲宣",
"许孔休",
"许义权",
"许文向",
"许佐治",
"许文则",
"许文谦",
"许子布",
"许文远",
"许康成",
"许士会",
"许正礼",
"许孝连",
"许彦靖",
"许玄风",
"许威彦",
"许子许",
"许文祥",
"许梦雯",
"许悦菲",
"许予馨",
"许婧馨",
"许婷儿",
"许圣楠",
"许芷欣",
"许心怡",
"许乐彤",
"许靖瑶",
"许艺婷",
"许樱璐",
"许婉琳",
"许婉儿",
"许倩儿",
"许蝶莺",
"许紫婉",
"许伯玉",
"许盛衡",
"许承伯",
"许子雍",
"许元伯",
"许元泰",
"许景兴",
"许子均",
"许文举",
"许子安",
"许仲达",
"许思然",
"许子昂",
"许子明",
"许子初",
"许文师",
"许世英",
"许敬达",
"许公昭",
"许文先",
"许文则",
"许温舒",
"许子正",
"许君肃",
"许彦英",
"许文进",
"许季宁",
"许孔璋",
"许元龙",
"许公台",
"许元悌",
"许文奥",
"许玄伯",
"许元方",
"许敬宗",
"许子烈",
"许元耀",
"许温伯",
"许公玮",
"许长文",
"许孔和",
"许文和",
"许恭夏",
"许文公",
"许曼倩",
"许文若",
"许景倩",
"许 ",
"何 艺芯",
"何 彦龙",
"何 君昊",
"何 子扬",
"何 雨晴",
"何 元起",
"何 威璜",
"何 梓云",
"何 伯奢",
"何 子伯",
"何 公威",
"何 曜岩",
"何 昕阳",
"何 晨欣",
"何 世元",
"何 子远",
"何 元宗",
"何 子义",
"何 仲宣",
"何 孔休",
"何 义权",
"何 文向",
"何 佐治",
"何 文则",
"何 文谦",
"何 子布",
"何 文远",
"何 康成",
"何 士会",
"何 正礼",
"何 孝连",
"何 彦靖",
"何 玄风",
"何 威彦",
"何 子许",
"何 文祥",
"何 梦雯",
"何 悦菲",
"何 予馨",
"何 婧馨",
"何 婷儿",
"何 圣楠",
"何 芷欣",
"何 心怡",
"何 乐彤",
"何 靖瑶",
"何 艺婷",
"何 樱璐",
"何 婉琳",
"何 婉儿",
"何 倩儿",
"何 蝶莺",
"何 紫婉",
"何 伯玉",
"何 盛衡",
"何 承伯",
"何 子雍",
"何 元伯",
"何 元泰",
"何 景兴",
"何 子均",
"何 文举",
"何 子安",
"何 仲达",
"何 思然",
"何 子昂",
"何 子明",
"何 子初",
"何 文师",
"何 世英",
"何 敬达",
"何 公昭",
"何 文先",
"何 文则",
"何 温舒",
"何 子正",
"何 君肃",
"何 彦英",
"何 文进",
"何 季宁",
"何 孔璋",
"何 元龙",
"何 公台",
"何 元悌",
"何 文奥",
"何 玄伯",
"何 元方",
"何 敬宗",
"何 子烈",
"何 元耀",
"何 温伯",
"何 公玮",
"何 长文",
"何 孔和",
"何 文和",
"何 恭夏",
"何 文公",
"何 曼倩",
"何 文若",
"何 景倩",
"何 ",
"吕 艺芯",
"吕 彦龙",
"吕 君昊",
"吕 子扬",
"吕 雨晴",
"吕 元起",
"吕 威璜",
"吕 梓云",
"吕 伯奢",
"吕 子伯",
"吕 公威",
"吕 曜岩",
"吕 昕阳",
"吕 晨欣",
"吕 世元",
"吕 子远",
"吕 元宗",
"吕 子义",
"吕 仲宣",
"吕 孔休",
"吕 义权",
"吕 文向",
"吕 佐治",
"吕 文则",
"吕 文谦",
"吕 子布",
"吕 文远",
"吕 康成",
"吕 士会",
"吕 正礼",
"吕 孝连",
"吕 彦靖",
"吕 玄风",
"吕 威彦",
"吕 子许",
"吕 文祥",
"吕 梦雯",
"吕 悦菲",
"吕 予馨",
"吕 婧馨",
"吕 婷儿",
"吕 圣楠",
"吕 芷欣",
"吕 心怡",
"吕 乐彤",
"吕 靖瑶",
"吕 艺婷",
"吕 樱璐",
"吕 婉琳",
"吕 婉儿",
"吕 倩儿",
"吕 蝶莺",
"吕 紫婉",
"吕 伯玉",
"吕 盛衡",
"吕 承伯",
"吕 子雍",
"吕 元伯",
"吕 元泰",
"吕 景兴",
"吕 子均",
"吕 文举",
"吕 子安",
"吕 仲达",
"吕 思然",
"吕 子昂",
"吕 子明",
"吕 子初",
"吕 文师",
"吕 世英",
"吕 敬达",
"吕 公昭",
"吕 文先",
"吕 文则",
"吕 温舒",
"吕 子正",
"吕 君肃",
"吕 彦英",
"吕 文进",
"吕 季宁",
"吕 孔璋",
"吕 元龙",
"吕 公台",
"吕 元悌",
"吕 文奥",
"吕 玄伯",
"吕 元方",
"吕 敬宗",
"吕 子烈",
"吕 元耀",
"吕 温伯",
"吕 公玮",
"吕 长文",
"吕 孔和",
"吕 文和",
"吕 恭夏",
"吕 文公",
"吕 曼倩",
"吕 文若",
"吕 景倩",
"吕 ",
"施 艺芯",
"施 彦龙",
"施 君昊",
"施 子扬",
"施 雨晴",
"施 元起",
"施 威璜",
"施 梓云",
"施 伯奢",
"施 子伯",
"施 公威",
"施 曜岩",
"施 昕阳",
"施 晨欣",
"施 世元",
"施 子远",
"施 元宗",
"施 子义",
"施 仲宣",
"施 孔休",
"施 义权",
"施 文向",
"施 佐治",
"施 文则",
"施 文谦",
"施 子布",
"施 文远",
"施 康成",
"施 士会",
"施 正礼",
"施 孝连",
"施 彦靖",
"施 玄风",
"施 威彦",
"施 子许",
"施 文祥",
"施 梦雯",
"施 悦菲",
"施 予馨",
"施 婧馨",
"施 婷儿",
"施 圣楠",
"施 芷欣",
"施 心怡",
"施 乐彤",
"施 靖瑶",
"施 艺婷",
"施 樱璐",
"施 婉琳",
"施 婉儿",
"施 倩儿",
"施 蝶莺",
"施 紫婉",
"施 伯玉",
"施 盛衡",
"施 承伯",
"施 子雍",
"施 元伯",
"施 元泰",
"施 景兴",
"施 子均",
"施 文举",
"施 子安",
"施 仲达",
"施 思然",
"施 子昂",
"施 子明",
"施 子初",
"施 文师",
"施 世英",
"施 敬达",
"施 公昭",
"施 文先",
"施 文则",
"施 温舒",
"施 子正",
"施 君肃",
"施 彦英",
"施 文进",
"施 季宁",
"施 孔璋",
"施 元龙",
"施 公台",
"施 元悌",
"施 文奥",
"施 玄伯",
"施 元方",
"施 敬宗",
"施 子烈",
"施 元耀",
"施 温伯",
"施 公玮",
"施 长文",
"施 孔和",
"施 文和",
"施 恭夏",
"施 文公",
"施 曼倩",
"施 文若",
"施 景倩",
"施 ",
"张 艺芯",
"张 彦龙",
"张 君昊",
"张 子扬",
"张 雨晴",
"张 元起",
"张 威璜",
"张 梓云",
"张 伯奢",
"张 子伯",
"张 公威",
"张 曜岩",
"张 昕阳",
"张 晨欣",
"张 世元",
"张 子远",
"张 元宗",
"张 子义",
"张 仲宣",
"张 孔休",
"张 义权",
"张 文向",
"张 佐治",
"张 文则",
"张 文谦",
"张 子布",
"张 文远",
"张 康成",
"张 士会",
"张 正礼",
"张 孝连",
"张 彦靖",
"张 玄风",
"张 威彦",
"张 子许",
"张 文祥",
"张 梦雯",
"张 悦菲",
"张 予馨",
"张 婧馨",
"张 婷儿",
"张 圣楠",
"张 芷欣",
"张 心怡",
"张 乐彤",
"张 靖瑶",
"张 艺婷",
"张 樱璐",
"张 婉琳",
"张 婉儿",
"张 倩儿",
"张 蝶莺",
"张 紫婉",
"张 伯玉",
"张 盛衡",
"张 承伯",
"张 子雍",
"张 元伯",
"张 元泰",
"张 景兴",
"张 子均",
"张 文举",
"张 子安",
"张 仲达",
"张 思然",
"张 子昂",
"张 子明",
"张 子初",
"张 文师",
"张 世英",
"张 敬达",
"张 公昭",
"张 文先",
"张 文则",
"张 温舒",
"张 子正",
"张 君肃",
"张 彦英",
"张 文进",
"张 季宁",
"张 孔璋",
"张 元龙",
"张 公台",
"张 元悌",
"张 文奥",
"张 玄伯",
"张 元方",
"张 敬宗",
"张 子烈",
"张 元耀",
"张 温伯",
"张 公玮",
"张 长文",
"张 孔和",
"张 文和",
"张 恭夏",
"张 文公",
"张 曼倩",
"张 文若",
"张 景倩",
"张 ",
"孔 艺芯",
"孔 彦龙",
"孔 君昊",
"孔 子扬",
"孔 雨晴",
"孔 元起",
"孔 威璜",
"孔 梓云",
"孔 伯奢",
"孔 子伯",
"孔 公威",
"孔 曜岩",
"孔 昕阳",
"孔 晨欣",
"孔 世元",
"孔 子远",
"孔 元宗",
"孔 子义",
"孔 仲宣",
"孔 孔休",
"孔 义权",
"孔 文向",
"孔 佐治",
"孔 文则",
"孔 文谦",
"孔 子布",
"孔 文远",
"孔 康成",
"孔 士会",
"孔 正礼",
"孔 孝连",
"孔 彦靖",
"孔 玄风",
"孔 威彦",
"孔 子许",
"孔 文祥",
"孔 梦雯",
"孔 悦菲",
"孔 予馨",
"孔 婧馨",
"孔 婷儿",
"孔 圣楠",
"孔 芷欣",
"孔 心怡",
"孔 乐彤",
"孔 靖瑶",
"孔 艺婷",
"孔 樱璐",
"孔 婉琳",
"孔 婉儿",
"孔 倩儿",
"孔 蝶莺",
"孔 紫婉",
"孔 伯玉",
"孔 盛衡",
"孔 承伯",
"孔 子雍",
"孔 元伯",
"孔 元泰",
"孔 景兴",
"孔 子均",
"孔 文举",
"孔 子安",
"孔 仲达",
"孔 思然",
"孔 子昂",
"孔 子明",
"孔 子初",
"孔 文师",
"孔 世英",
"孔 敬达",
"孔 公昭",
"孔 文先",
"孔 文则",
"孔 温舒",
"孔 子正",
"孔 君肃",
"孔 彦英",
"孔 文进",
"孔 季宁",
"孔 孔璋",
"孔 元龙",
"孔 公台",
"孔 元悌",
"孔 文奥",
"孔 玄伯",
"孔 元方",
"孔 敬宗",
"孔 子烈",
"孔 元耀",
"孔 温伯",
"孔 公玮",
"孔 长文",
"孔 孔和",
"孔 文和",
"孔 恭夏",
"孔 文公",
"孔 曼倩",
"孔 文若",
"孔 景倩",
"孔 ",
"曹 艺芯",
"曹 彦龙",
"曹 君昊",
"曹 子扬",
"曹 雨晴",
"曹 元起",
"曹 威璜",
"曹 梓云",
"曹 伯奢",
"曹 子伯",
"曹 公威",
"曹 曜岩",
"曹 昕阳",
"曹 晨欣",
"曹 世元",
"曹 子远",
"曹 元宗",
"曹 子义",
"曹 仲宣",
"曹 孔休",
"曹 义权",
"曹 文向",
"曹 佐治",
"曹 文则",
"曹 文谦",
"曹 子布",
"曹 文远",
"曹 康成",
"曹 士会",
"曹 正礼",
"曹 孝连",
"曹 彦靖",
"曹 玄风",
"曹 威彦",
"曹 子许",
"曹 文祥",
"曹 梦雯",
"曹 悦菲",
"曹 予馨",
"曹 婧馨",
"曹 婷儿",
"曹 圣楠",
"曹 芷欣",
"曹 心怡",
"曹 乐彤",
"曹 靖瑶",
"曹 艺婷",
"曹 樱璐",
"曹 婉琳",
"曹 婉儿",
"曹 倩儿",
"曹 蝶莺",
"曹 紫婉",
"曹 伯玉",
"曹 盛衡",
"曹 承伯",
"曹 子雍",
"曹 元伯",
"曹 元泰",
"曹 景兴",
"曹 子均",
"曹 文举",
"曹 子安",
"曹 仲达",
"曹 思然",
"曹 子昂",
"曹 子明",
"曹 子初",
"曹 文师",
"曹 世英",
"曹 敬达",
"曹 公昭",
"曹 文先",
"曹 文则",
"曹 温舒",
"曹 子正",
"曹 君肃",
"曹 彦英",
"曹 文进",
"曹 季宁",
"曹 孔璋",
"曹 元龙",
"曹 公台",
"曹 元悌",
"曹 文奥",
"曹 玄伯",
"曹 元方",
"曹 敬宗",
"曹 子烈",
"曹 元耀",
"曹 温伯",
"曹 公玮",
"曹 长文",
"曹 孔和",
"曹 文和",
"曹 恭夏",
"曹 文公",
"曹 曼倩",
"曹 文若",
"曹 景倩",
"曹 ",
"严 艺芯",
"严 彦龙",
"严 君昊",
"严 子扬",
"严 雨晴",
"严 元起",
"严 威璜",
"严 梓云",
"严 伯奢",
"严 子伯",
"严 公威",
"严 曜岩",
"严 昕阳",
"严 晨欣",
"严 世元",
"严 子远",
"严 元宗",
"严 子义",
"严 仲宣",
"严 孔休",
"严 义权",
"严 文向",
"严 佐治",
"严 文则",
"严 文谦",
"严 子布",
"严 文远",
"严 康成",
"严 士会",
"严 正礼",
"严 孝连",
"严 彦靖",
"严 玄风",
"严 威彦",
"严 子许",
"严 文祥",
"严 梦雯",
"严 悦菲",
"严 予馨",
"严 婧馨",
"严 婷儿",
"严 圣楠",
"严 芷欣",
"严 心怡",
"严 乐彤",
"严 靖瑶",
"严 艺婷",
"严 樱璐",
"严 婉琳",
"严 婉儿",
"严 倩儿",
"严 蝶莺",
"严 紫婉",
"严 伯玉",
"严 盛衡",
"严 承伯",
"严 子雍",
"严 元伯",
"严 元泰",
"严 景兴",
"严 子均",
"严 文举",
"严 子安",
"严 仲达",
"严 思然",
"严 子昂",
"严 子明",
"严 子初",
"严 文师",
"严 世英",
"严 敬达",
"严 公昭",
"严 文先",
"严 文则",
"严 温舒",
"严 子正",
"严 君肃",
"严 彦英",
"严 文进",
"严 季宁",
"严 孔璋",
"严 元龙",
"严 公台",
"严 元悌",
"严 文奥",
"严 玄伯",
"严 元方",
"严 敬宗",
"严 子烈",
"严 元耀",
"严 温伯",
"严 公玮",
"严 长文",
"严 孔和",
"严 文和",
"严 恭夏",
"严 文公",
"严 曼倩",
"严 文若",
"严 景倩",
"严 ",
"华 艺芯",
"华 彦龙",
"华 君昊",
"华 子扬",
"华 雨晴",
"华 元起",
"华 威璜",
"华 梓云",
"华 伯奢",
"华 子伯",
"华 公威",
"华 曜岩",
"华 昕阳",
"华 晨欣",
"华 世元",
"华 子远",
"华 元宗",
"华 子义",
"华 仲宣",
"华 孔休",
"华 义权",
"华 文向",
"华 佐治",
"华 文则",
"华 文谦",
"华 子布",
"华 文远",
"华 康成",
"华 士会",
"华 正礼",
"华 孝连",
"华 彦靖",
"华 玄风",
"华 威彦",
"华 子许",
"华 文祥",
"华 梦雯",
"华 悦菲",
"华 予馨",
"华 婧馨",
"华 婷儿",
"华 圣楠",
"华 芷欣",
"华 心怡",
"华 乐彤",
"华 靖瑶",
"华 艺婷",
"华 樱璐",
"华 婉琳",
"华 婉儿",
"华 倩儿",
"华 蝶莺",
"华 紫婉",
"华 伯玉",
"华 盛衡",
"华 承伯",
"华 子雍",
"华 元伯",
"华 元泰",
"华 景兴",
"华 子均",
"华 文举",
"华 子安",
"华 仲达",
"华 思然",
"华 子昂",
"华 子明",
"华 子初",
"华 文师",
"华 世英",
"华 敬达",
"华 公昭",
"华 文先",
"华 文则",
"华 温舒",
"华 子正",
"华 君肃",
"华 彦英",
"华 文进",
"华 季宁",
"华 孔璋",
"华 元龙",
"华 公台",
"华 元悌",
"华 文奥",
"华 玄伯",
"华 元方",
"华 敬宗",
"华 子烈",
"华 元耀",
"华 温伯",
"华 公玮",
"华 长文",
"华 孔和",
"华 文和",
"华 恭夏",
"华 文公",
"华 曼倩",
"华 文若",
"华 景倩",
"华 ",
"金 艺芯",
"金 彦龙",
"金 君昊",
"金 子扬",
"金 雨晴",
"金 元起",
"金 威璜",
"金 梓云",
"金 伯奢",
"金 子伯",
"金 公威",
"金 曜岩",
"金 昕阳",
"金 晨欣",
"金 世元",
"金 子远",
"金 元宗",
"金 子义",
"金 仲宣",
"金 孔休",
"金 义权",
"金 文向",
"金 佐治",
"金 文则",
"金 文谦",
"金 子布",
"金 文远",
"金 康成",
"金 士会",
"金 正礼",
"金 孝连",
"金 彦靖",
"金 玄风",
"金 威彦",
"金 子许",
"金 文祥",
"金 梦雯",
"金 悦菲",
"金 予馨",
"金 婧馨",
"金 婷儿",
"金 圣楠",
"金 芷欣",
"金 心怡",
"金 乐彤",
"金 靖瑶",
"金 艺婷",
"金 樱璐",
"金 婉琳",
"金 婉儿",
"金 倩儿",
"金 蝶莺",
"金 紫婉",
"金 伯玉",
"金 盛衡",
"金 承伯",
"金 子雍",
"金 元伯",
"金 元泰",
"金 景兴",
"金 子均",
"金 文举",
"金 子安",
"金 仲达",
"金 思然",
"金 子昂",
"金 子明",
"金 子初",
"金 文师",
"金 世英",
"金 敬达",
"金 公昭",
"金 文先",
"金 文则",
"金 温舒",
"金 子正",
"金 君肃",
"金 彦英",
"金 文进",
"金 季宁",
"金 孔璋",
"金 元龙",
"金 公台",
"金 元悌",
"金 文奥",
"金 玄伯",
"金 元方",
"金 敬宗",
"金 子烈",
"金 元耀",
"金 温伯",
"金 公玮",
"金 长文",
"金 孔和",
"金 文和",
"金 恭夏",
"金 文公",
"金 曼倩",
"金 文若",
"金 景倩",
"金 ",
"魏 艺芯",
"魏 彦龙",
"魏 君昊",
"魏 子扬",
"魏 雨晴",
"魏 元起",
"魏 威璜",
"魏 梓云",
"魏 伯奢",
"魏 子伯",
"魏 公威",
"魏 曜岩",
"魏 昕阳",
"魏 晨欣",
"魏 世元",
"魏 子远",
"魏 元宗",
"魏 子义",
"魏 仲宣",
"魏 孔休",
"魏 义权",
"魏 文向",
"魏 佐治",
"魏 文则",
"魏 文谦",
"魏 子布",
"魏 文远",
"魏 康成",
"魏 士会",
"魏 正礼",
"魏 孝连",
"魏 彦靖",
"魏 玄风",
"魏 威彦",
"魏 子许",
"魏 文祥",
"魏 梦雯",
"魏 悦菲",
"魏 予馨",
"魏 婧馨",
"魏 婷儿",
"魏 圣楠",
"魏 芷欣",
"魏 心怡",
"魏 乐彤",
"魏 靖瑶",
"魏 艺婷",
"魏 樱璐",
"魏 婉琳",
"魏 婉儿",
"魏 倩儿",
"魏 蝶莺",
"魏 紫婉",
"魏 伯玉",
"魏 盛衡",
"魏 承伯",
"魏 子雍",
"魏 元伯",
"魏 元泰",
"魏 景兴",
"魏 子均",
"魏 文举",
"魏 子安",
"魏 仲达",
"魏 思然",
"魏 子昂",
"魏 子明",
"魏 子初",
"魏 文师",
"魏 世英",
"魏 敬达",
"魏 公昭",
"魏 文先",
"魏 文则",
"魏 温舒",
"魏 子正",
"魏 君肃",
"魏 彦英",
"魏 文进",
"魏 季宁",
"魏 孔璋",
"魏 元龙",
"魏 公台",
"魏 元悌",
"魏 文奥",
"魏 玄伯",
"魏 元方",
"魏 敬宗",
"魏 子烈",
"魏 元耀",
"魏 温伯",
"魏 公玮",
"魏 长文",
"魏 孔和",
"魏 文和",
"魏 恭夏",
"魏 文公",
"魏 曼倩",
"魏 文若",
"魏 景倩",
"魏 ",
"陶 艺芯",
"陶 彦龙",
"陶 君昊",
"陶 子扬",
"陶 雨晴",
"陶 元起",
"陶 威璜",
"陶 梓云",
"陶 伯奢",
"陶 子伯",
"陶 公威",
"陶 曜岩",
"陶 昕阳",
"陶 晨欣",
"陶 世元",
"陶 子远",
"陶 元宗",
"陶 子义",
"陶 仲宣",
"陶 孔休",
"陶 义权",
"陶 文向",
"陶 佐治",
"陶 文则",
"陶 文谦",
"陶 子布",
"陶 文远",
"陶 康成",
"陶 士会",
"陶 正礼",
"陶 孝连",
"陶 彦靖",
"陶 玄风",
"陶 威彦",
"陶 子许",
"陶 文祥",
"陶 梦雯",
"陶 悦菲",
"陶 予馨",
"陶 婧馨",
"陶 婷儿",
"陶 圣楠",
"陶 芷欣",
"陶 心怡",
"陶 乐彤",
"陶 靖瑶",
"陶 艺婷",
"陶 樱璐",
"陶 婉琳",
"陶 婉儿",
"陶 倩儿",
"陶 蝶莺",
"陶 紫婉",
"陶 伯玉",
"陶 盛衡",
"陶 承伯",
"陶 子雍",
"陶 元伯",
"陶 元泰",
"陶 景兴",
"陶 子均",
"陶 文举",
"陶 子安",
"陶 仲达",
"陶 思然",
"陶 子昂",
"陶 子明",
"陶 子初",
"陶 文师",
"陶 世英",
"陶 敬达",
"陶 公昭",
"陶 文先",
"陶 文则",
"陶 温舒",
"陶 子正",
"陶 君肃",
"陶 彦英",
"陶 文进",
"陶 季宁",
"陶 孔璋",
"陶 元龙",
"陶 公台",
"陶 元悌",
"陶 文奥",
"陶 玄伯",
"陶 元方",
"陶 敬宗",
"陶 子烈",
"陶 元耀",
"陶 温伯",
"陶 公玮",
"陶 长文",
"陶 孔和",
"陶 文和",
"陶 恭夏",
"陶 文公",
"陶 曼倩",
"陶 文若",
"陶 景倩",
"陶 ",
"姜 艺芯",
"姜 彦龙",
"姜 君昊",
"姜 子扬",
"姜 雨晴",
"姜 元起",
"姜 威璜",
"姜 梓云",
"姜 伯奢",
"姜 子伯",
"姜 公威",
"姜 曜岩",
"姜 昕阳",
"姜 晨欣",
"姜 世元",
"姜 子远",
"姜 元宗",
"姜 子义",
"姜 仲宣",
"姜 孔休",
"姜 义权",
"姜 文向",
"姜 佐治",
"姜 文则",
"姜 文谦",
"姜 子布",
"姜 文远",
"姜 康成",
"姜 士会",
"姜 正礼",
"姜 孝连",
"姜 彦靖",
"姜 玄风",
"姜 威彦",
"姜 子许",
"姜 文祥",
"姜 梦雯",
"姜 悦菲",
"姜 予馨",
"姜 婧馨",
"姜 婷儿",
"姜 圣楠",
"姜 芷欣",
"姜 心怡",
"姜 乐彤",
"姜 靖瑶",
"姜 艺婷",
"姜 樱璐",
"姜 婉琳",
"姜 婉儿",
"姜 倩儿",
"姜 蝶莺",
"姜 紫婉",
"姜 伯玉",
"姜 盛衡",
"姜 承伯",
"姜 子雍",
"姜 元伯",
"姜 元泰",
"姜 景兴",
"姜 子均",
"姜 文举",
"姜 子安",
"姜 仲达",
"姜 思然",
"姜 子昂",
"姜 子明",
"姜 子初",
"姜 文师",
"姜 世英",
"姜 敬达",
"姜 公昭",
"姜 文先",
"姜 文则",
"姜 温舒",
"姜 子正",
"姜 君肃",
"姜 彦英",
"姜 文进",
"姜 季宁",
"姜 孔璋",
"姜 元龙",
"姜 公台",
"姜 元悌",
"姜 文奥",
"姜 玄伯",
"姜 元方",
"姜 敬宗",
"姜 子烈",
"姜 元耀",
"姜 温伯",
"姜 公玮",
"姜 长文",
"姜 孔和",
"姜 文和",
"姜 恭夏",
"姜 文公",
"姜 曼倩",
"姜 文若",
"姜 景倩",
"姜 ",
"谢 艺芯",
"谢 彦龙",
"谢 君昊",
"谢 子扬",
"谢 雨晴",
"谢 元起",
"谢 威璜",
"谢 梓云",
"谢 伯奢",
"谢 子伯",
"谢 公威",
"谢 曜岩",
"谢 昕阳",
"谢 晨欣",
"谢 世元",
"谢 子远",
"谢 元宗",
"谢 子义",
"谢 仲宣",
"谢 孔休",
"谢 义权",
"谢 文向",
"谢 佐治",
"谢 文则",
"谢 文谦",
"谢 子布",
"谢 文远",
"谢 康成",
"谢 士会",
"谢 正礼",
"谢 孝连",
"谢 彦靖",
"谢 玄风",
"谢 威彦",
"谢 子许",
"谢 文祥",
"谢 梦雯",
"谢 悦菲",
"谢 予馨",
"谢 婧馨",
"谢 婷儿",
"谢 圣楠",
"谢 芷欣",
"谢 心怡",
"谢 乐彤",
"谢 靖瑶",
"谢 艺婷",
"谢 樱璐",
"谢 婉琳",
"谢 婉儿",
"谢 倩儿",
"谢 蝶莺",
"谢 紫婉",
"谢 伯玉",
"谢 盛衡",
"谢 承伯",
"谢 子雍",
"谢 元伯",
"谢 元泰",
"谢 景兴",
"谢 子均",
"谢 文举",
"谢 子安",
"谢 仲达",
"谢 思然",
"谢 子昂",
"谢 子明",
"谢 子初",
"谢 文师",
"谢 世英",
"谢 敬达",
"谢 公昭",
"谢 文先",
"谢 文则",
"谢 温舒",
"谢 子正",
"谢 君肃",
"谢 彦英",
"谢 文进",
"谢 季宁",
"谢 孔璋",
"谢 元龙",
"谢 公台",
"谢 元悌",
"谢 文奥",
"谢 玄伯",
"谢 元方",
"谢 敬宗",
"谢 子烈",
"谢 元耀",
"谢 温伯",
"谢 公玮",
"谢 长文",
"谢 孔和",
"谢 文和",
"谢 恭夏",
"谢 文公",
"谢 曼倩",
"谢 文若",
"谢 景倩",
"谢 ",
"邹 艺芯",
"邹 彦龙",
"邹 君昊",
"邹 子扬",
"邹 雨晴",
"邹 元起",
"邹 威璜",
"邹 梓云",
"邹 伯奢",
"邹 子伯",
"邹 公威",
"邹 曜岩",
"邹 昕阳",
"邹 晨欣",
"邹 世元",
"邹 子远",
"邹 元宗",
"邹 子义",
"邹 仲宣",
"邹 孔休",
"邹 义权",
"邹 文向",
"邹 佐治",
"邹 文则",
"邹 文谦",
"邹 子布",
"邹 文远",
"邹 康成",
"邹 士会",
"邹 正礼",
"邹 孝连",
"邹 彦靖",
"邹 玄风",
"邹 威彦",
"邹 子许",
"邹 文祥",
"邹 梦雯",
"邹 悦菲",
"邹 予馨",
"邹 婧馨",
"邹 婷儿",
"邹 圣楠",
"邹 芷欣",
"邹 心怡",
"邹 乐彤",
"邹 靖瑶",
"邹 艺婷",
"邹 樱璐",
"邹 婉琳",
"邹 婉儿",
"邹 倩儿",
"邹 蝶莺",
"邹 紫婉",
"邹 伯玉",
"邹 盛衡",
"邹 承伯",
"邹 子雍",
"邹 元伯",
"邹 元泰",
"邹 景兴",
"邹 子均",
"邹 文举",
"邹 子安",
"邹 仲达",
"邹 思然",
"邹 子昂",
"邹 子明",
"邹 子初",
"邹 文师",
"邹 世英",
"邹 敬达",
"邹 公昭",
"邹 文先",
"邹 文则",
"邹 温舒",
"邹 子正",
"邹 君肃",
"邹 彦英",
"邹 文进",
"邹 季宁",
"邹 孔璋",
"邹 元龙",
"邹 公台",
"邹 元悌",
"邹 文奥",
"邹 玄伯",
"邹 元方",
"邹 敬宗",
"邹 子烈",
"邹 元耀",
"邹 温伯",
"邹 公玮",
"邹 长文",
"邹 孔和",
"邹 文和",
"邹 恭夏",
"邹 文公",
"邹 曼倩",
"邹 文若",
"邹 景倩",
"邹 ",
"水 艺芯",
"水 彦龙",
"水 君昊",
"水 子扬",
"水 雨晴",
"水 元起",
"水 威璜",
"水 梓云",
"水 伯奢",
"水 子伯",
"水 公威",
"水 曜岩",
"水 昕阳",
"水 晨欣",
"水 世元",
"水 子远",
"水 元宗",
"水 子义",
"水 仲宣",
"水 孔休",
"水 义权",
"水 文向",
"水 佐治",
"水 文则",
"水 文谦",
"水 子布",
"水 文远",
"水 康成",
"水 士会",
"水 正礼",
"水 孝连",
"水 彦靖",
"水 玄风",
"水 威彦",
"水 子许",
"水 文祥",
"水 梦雯",
"水 悦菲",
"水 予馨",
"水 婧馨",
"水 婷儿",
"水 圣楠",
"水 芷欣",
"水 心怡",
"水 乐彤",
"水 靖瑶",
"水 艺婷",
"水 樱璐",
"水 婉琳",
"水 婉儿",
"水 倩儿",
"水 蝶莺",
"水 紫婉",
"水 伯玉",
"水 盛衡",
"水 承伯",
"水 子雍",
"水 元伯",
"水 元泰",
"水 景兴",
"水 子均",
"水 文举",
"水 子安",
"水 仲达",
"水 思然",
"水 子昂",
"水 子明",
"水 子初",
"水 文师",
"水 世英",
"水 敬达",
"水 公昭",
"水 文先",
"水 文则",
"水 温舒",
"水 子正",
"水 君肃",
"水 彦英",
"水 文进",
"水 季宁",
"水 孔璋",
"水 元龙",
"水 公台",
"水 元悌",
"水 文奥",
"水 玄伯",
"水 元方",
"水 敬宗",
"水 子烈",
"水 元耀",
"水 温伯",
"水 公玮",
"水 长文",
"水 孔和",
"水 文和",
"水 恭夏",
"水 文公",
"水 曼倩",
"水 文若",
"水 景倩",
"水 ",
"章艺芯",
"章彦龙",
"章君昊",
"章子扬",
"章雨晴",
"章元起",
"章威璜",
"章梓云",
"章伯奢",
"章子伯",
"章公威",
"章曜岩",
"章昕阳",
"章晨欣",
"章世元",
"章子远",
"章元宗",
"章子义",
"章仲宣",
"章孔休",
"章义权",
"章文向",
"章佐治",
"章文则",
"章文谦",
"章子布",
"章文远",
"章康成",
"章士会",
"章正礼",
"章孝连",
"章彦靖",
"章玄风",
"章威彦",
"章子许",
"章文祥",
"章梦雯",
"章悦菲",
"章予馨",
"章婧馨",
"章婷儿",
"章圣楠",
"章芷欣",
"章心怡",
"章乐彤",
"章靖瑶",
"章艺婷",
"章樱璐",
"章婉琳",
"章婉儿",
"章倩儿",
"章蝶莺",
"章紫婉",
"章伯玉",
"章盛衡",
"章承伯",
"章子雍",
"章元伯",
"章元泰",
"章景兴",
"章子均",
"章文举",
"章子安",
"章仲达",
"章思然",
"章子昂",
"章子明",
"章子初",
"章文师",
"章世英",
"章敬达",
"章公昭",
"章文先",
"章文则",
"章温舒",
"章子正",
"章君肃",
"章彦英",
"章文进",
"章季宁",
"章孔璋",
"章元龙",
"章公台",
"章元悌",
"章文奥",
"章玄伯",
"章元方",
"章敬宗",
"章子烈",
"章元耀",
"章温伯",
"章公玮",
"章长文",
"章孔和",
"章文和",
"章恭夏",
"章文公",
"章曼倩",
"章文若",
"章景倩",
"章 ",
"云 艺芯",
"云 彦龙",
"云 君昊",
"云 子扬",
"云 雨晴",
"云 元起",
"云 威璜",
"云 梓云",
"云 伯奢",
"云 子伯",
"云 公威",
"云 曜岩",
"云 昕阳",
"云 晨欣",
"云 世元",
"云 子远",
"云 元宗",
"云 子义",
"云 仲宣",
"云 孔休",
"云 义权",
"云 文向",
"云 佐治",
"云 文则",
"云 文谦",
"云 子布",
"云 文远",
"云 康成",
"云 士会",
"云 正礼",
"云 孝连",
"云 彦靖",
"云 玄风",
"云 威彦",
"云 子许",
"云 文祥",
"云 梦雯",
"云 悦菲",
"云 予馨",
"云 婧馨",
"云 婷儿",
"云 圣楠",
"云 芷欣",
"云 心怡",
"云 乐彤",
"云 靖瑶",
"云 艺婷",
"云 樱璐",
"云 婉琳",
"云 婉儿",
"云 倩儿",
"云 蝶莺",
"云 紫婉",
"云 伯玉",
"云 盛衡",
"云 承伯",
"云 子雍",
"云 元伯",
"云 元泰",
"云 景兴",
"云 子均",
"云 文举",
"云 子安",
"云 仲达",
"云 思然",
"云 子昂",
"云 子明",
"云 子初",
"云 文师",
"云 世英",
"云 敬达",
"云 公昭",
"云 文先",
"云 文则",
"云 温舒",
"云 子正",
"云 君肃",
"云 彦英",
"云 文进",
"云 季宁",
"云 孔璋",
"云 元龙",
"云 公台",
"云 元悌",
"云 文奥",
"云 玄伯",
"云 元方",
"云 敬宗",
"云 子烈",
"云 元耀",
"云 温伯",
"云 公玮",
"云 长文",
"云 孔和",
"云 文和",
"云 恭夏",
"云 文公",
"云 曼倩",
"云 文若",
"云 景倩",
"云 ",
"苏 艺芯",
"苏 彦龙",
"苏 君昊",
"苏 子扬",
"苏 雨晴",
"苏 元起",
"苏 威璜",
"苏 梓云",
"苏 伯奢",
"苏 子伯",
"苏 公威",
"苏 曜岩",
"苏 昕阳",
"苏 晨欣",
"苏 世元",
"苏 子远",
"苏 元宗",
"苏 子义",
"苏 仲宣",
"苏 孔休",
"苏 义权",
"苏 文向",
"苏 佐治",
"苏 文则",
"苏 文谦",
"苏 子布",
"苏 文远",
"苏 康成",
"苏 士会",
"苏 正礼",
"苏 孝连",
"苏 彦靖",
"苏 玄风",
"苏 威彦",
"苏 子许",
"苏 文祥",
"苏 梦雯",
"苏 悦菲",
"苏 予馨",
"苏 婧馨",
"苏 婷儿",
"苏 圣楠",
"苏 芷欣",
"苏 心怡",
"苏 乐彤",
"苏 靖瑶",
"苏 艺婷",
"苏 樱璐",
"苏 婉琳",
"苏 婉儿",
"苏 倩儿",
"苏 蝶莺",
"苏 紫婉",
"苏 伯玉",
"苏 盛衡",
"苏 承伯",
"苏 子雍",
"苏 元伯",
"苏 元泰",
"苏 景兴",
"苏 子均",
"苏 文举",
"苏 子安",
"苏 仲达",
"苏 思然",
"苏 子昂",
"苏 子明",
"苏 子初",
"苏 文师",
"苏 世英",
"苏 敬达",
"苏 公昭",
"苏 文先",
"苏 文则",
"苏 温舒",
"苏 子正",
"苏 君肃",
"苏 彦英",
"苏 文进",
"苏 季宁",
"苏 孔璋",
"苏 元龙",
"苏 公台",
"苏 元悌",
"苏 文奥",
"苏 玄伯",
"苏 元方",
"苏 敬宗",
"苏 子烈",
"苏 元耀",
"苏 温伯",
"苏 公玮",
"苏 长文",
"苏 孔和",
"苏 文和",
"苏 恭夏",
"苏 文公",
"苏 曼倩",
"苏 文若",
"苏 景倩",
"苏 ",
"潘 艺芯",
"潘 彦龙",
"潘 君昊",
"潘 子扬",
"潘 雨晴",
"潘 元起",
"潘 威璜",
"潘 梓云",
"潘 伯奢",
"潘 子伯",
"潘 公威",
"潘 曜岩",
"潘 昕阳",
"潘 晨欣",
"潘 世元",
"潘 子远",
"潘 元宗",
"潘 子义",
"潘 仲宣",
"潘 孔休",
"潘 义权",
"潘 文向",
"潘 佐治",
"潘 文则",
"潘 文谦",
"潘 子布",
"潘 文远",
"潘 康成",
"潘 士会",
"潘 正礼",
"潘 孝连",
"潘 彦靖",
"潘 玄风",
"潘 威彦",
"潘 子许",
"潘 文祥",
"潘 梦雯",
"潘 悦菲",
"潘 予馨",
"潘 婧馨",
"潘 婷儿",
"潘 圣楠",
"潘 芷欣",
"潘 心怡",
"潘 乐彤",
"潘 靖瑶",
"潘 艺婷",
"潘 樱璐",
"潘 婉琳",
"潘 婉儿",
"潘 倩儿",
"潘 蝶莺",
"潘 紫婉",
"潘 伯玉",
"潘 盛衡",
"潘 承伯",
"潘 子雍",
"潘 元伯",
"潘 元泰",
"潘 景兴",
"潘 子均",
"潘 文举",
"潘 子安",
"潘 仲达",
"潘 思然",
"潘 子昂",
"潘 子明",
"潘 子初",
"潘 文师",
"潘 世英",
"潘 敬达",
"潘 公昭",
"潘 文先",
"潘 文则",
"潘 温舒",
"潘 子正",
"潘 君肃",
"潘 彦英",
"潘 文进",
"潘 季宁",
"潘 孔璋",
"潘 元龙",
"潘 公台",
"潘 元悌",
"潘 文奥",
"潘 玄伯",
"潘 元方",
"潘 敬宗",
"潘 子烈",
"潘 元耀",
"潘 温伯",
"潘 公玮",
"潘 长文",
"潘 孔和",
"潘 文和",
"潘 恭夏",
"潘 文公",
"潘 曼倩",
"潘 文若",
"潘 景倩",
"潘 ",
"范 艺芯",
"范 彦龙",
"范 君昊",
"范 子扬",
"范 雨晴",
"范 元起",
"范 威璜",
"范 梓云",
"范 伯奢",
"范 子伯",
"范 公威",
"范 曜岩",
"范 昕阳",
"范 晨欣",
"范 世元",
"范 子远",
"范 元宗",
"范 子义",
"范 仲宣",
"范 孔休",
"范 义权",
"范 文向",
"范 佐治",
"范 文则",
"范 文谦",
"范 子布",
"范 文远",
"范 康成",
"范 士会",
"范 正礼",
"范 孝连",
"范 彦靖",
"范 玄风",
"范 威彦",
"范 子许",
"范 文祥",
"范 梦雯",
"范 悦菲",
"范 予馨",
"范 婧馨",
"范 婷儿",
"范 圣楠",
"范 芷欣",
"范 心怡",
"范 乐彤",
"范 靖瑶",
"范 艺婷",
"范 樱璐",
"范 婉琳",
"范 婉儿",
"范 倩儿",
"范 蝶莺",
"范 紫婉",
"范 伯玉",
"范 盛衡",
"范 承伯",
"范 子雍",
"范 元伯",
"范 元泰",
"范 景兴",
"范 子均",
"范 文举",
"范 子安",
"范 仲达",
"范 思然",
"范 子昂",
"范 子明",
"范 子初",
"范 文师",
"范 世英",
"范 敬达",
"范 公昭",
"范 文先",
"范 文则",
"范 温舒",
"范 子正",
"范 君肃",
"范 彦英",
"范 文进",
"范 季宁",
"范 孔璋",
"范 元龙",
"范 公台",
"范 元悌",
"范 文奥",
"范 玄伯",
"范 元方",
"范 敬宗",
"范 子烈",
"范 元耀",
"范 温伯",
"范 公玮",
"范 长文",
"范 孔和",
"范 文和",
"范 恭夏",
"范 文公",
"范 曼倩",
"范 文若",
"范 景倩",
"范 ",
"彭 艺芯",
"彭 彦龙",
"彭 君昊",
"彭 子扬",
"彭 雨晴",
"彭 元起",
"彭 威璜",
"彭 梓云",
"彭 伯奢",
"彭 子伯",
"彭 公威",
"彭 曜岩",
"彭 昕阳",
"彭 晨欣",
"彭 世元",
"彭 子远",
"彭 元宗",
"彭 子义",
"彭 仲宣",
"彭 孔休",
"彭 义权",
"彭 文向",
"彭 佐治",
"彭 文则",
"彭 文谦",
"彭 子布",
"彭 文远",
"彭 康成",
"彭 士会",
"彭 正礼",
"彭 孝连",
"彭 彦靖",
"彭 玄风",
"彭 威彦",
"彭 子许",
"彭 文祥",
"彭 梦雯",
"彭 悦菲",
"彭 予馨",
"彭 婧馨",
"彭 婷儿",
"彭 圣楠",
"彭 芷欣",
"彭 心怡",
"彭 乐彤",
"彭 靖瑶",
"彭 艺婷",
"彭 樱璐",
"彭 婉琳",
"彭 婉儿",
"彭 倩儿",
"彭 蝶莺",
"彭 紫婉",
"彭 伯玉",
"彭 盛衡",
"彭 承伯",
"彭 子雍",
"彭 元伯",
"彭 元泰",
"彭 景兴",
"彭 子均",
"彭 文举",
"彭 子安",
"彭 仲达",
"彭 思然",
"彭 子昂",
"彭 子明",
"彭 子初",
"彭 文师",
"彭 世英",
"彭 敬达",
"彭 公昭",
"彭 文先",
"彭 文则",
"彭 温舒",
"彭 子正",
"彭 君肃",
"彭 彦英",
"彭 文进",
"彭 季宁",
"彭 孔璋",
"彭 元龙",
"彭 公台",
"彭 元悌",
"彭 文奥",
"彭 玄伯",
"彭 元方",
"彭 敬宗",
"彭 子烈",
"彭 元耀",
"彭 温伯",
"彭 公玮",
"彭 长文",
"彭 孔和",
"彭 文和",
"彭 恭夏",
"彭 文公",
"彭 曼倩",
"彭 文若",
"彭 景倩",
"彭 ",
"马 艺芯",
"马 彦龙",
"马 君昊",
"马 子扬",
"马 雨晴",
"马 元起",
"马 威璜",
"马 梓云",
"马 伯奢",
"马 子伯",
"马 公威",
"马 曜岩",
"马 昕阳",
"马 晨欣",
"马 世元",
"马 子远",
"马 元宗",
"马 子义",
"马 仲宣",
"马 孔休",
"马 义权",
"马 文向",
"马 佐治",
"马 文则",
"马 文谦",
"马 子布",
"马 文远",
"马 康成",
"马 士会",
"马 正礼",
"马 孝连",
"马 彦靖",
"马 玄风",
"马 威彦",
"马 子许",
"马 文祥",
"马 梦雯",
"马 悦菲",
"马 予馨",
"马 婧馨",
"马 婷儿",
"马 圣楠",
"马 芷欣",
"马 心怡",
"马 乐彤",
"马 靖瑶",
"马 艺婷",
"马 樱璐",
"马 婉琳",
"马 婉儿",
"马 倩儿",
"马 蝶莺",
"马 紫婉",
"马 伯玉",
"马 盛衡",
"马 承伯",
"马 子雍",
"马 元伯",
"马 元泰",
"马 景兴",
"马 子均",
"马 文举",
"马 子安",
"马 仲达",
"马 思然",
"马 子昂",
"马 子明",
"马 子初",
"马 文师",
"马 世英",
"马 敬达",
"马 公昭",
"马 文先",
"马 文则",
"马 温舒",
"马 子正",
"马 君肃",
"马 彦英",
"马 文进",
"马 季宁",
"马 孔璋",
"马 元龙",
"马 公台",
"马 元悌",
"马 文奥",
"马 玄伯",
"马 元方",
"马 敬宗",
"马 子烈",
"马 元耀",
"马 温伯",
"马 公玮",
"马 长文",
"马 孔和",
"马 文和",
"马 恭夏",
"马 文公",
"马 曼倩",
"马 文若",
"马 景倩",
"马 ",
"苗 艺芯",
"苗 彦龙",
"苗 君昊",
"苗 子扬",
"苗 雨晴",
"苗 元起",
"苗 威璜",
"苗 梓云",
"苗 伯奢",
"苗 子伯",
"苗 公威",
"苗 曜岩",
"苗 昕阳",
"苗 晨欣",
"苗 世元",
"苗 子远",
"苗 元宗",
"苗 子义",
"苗 仲宣",
"苗 孔休",
"苗 义权",
"苗 文向",
"苗 佐治",
"苗 文则",
"苗 文谦",
"苗 子布",
"苗 文远",
"苗 康成",
"苗 士会",
"苗 正礼",
"苗 孝连",
"苗 彦靖",
"苗 玄风",
"苗 威彦",
"苗 子许",
"苗 文祥",
"苗 梦雯",
"苗 悦菲",
"苗 予馨",
"苗 婧馨",
"苗 婷儿",
"苗 圣楠",
"苗 芷欣",
"苗 心怡",
"苗 乐彤",
"苗 靖瑶",
"苗 艺婷",
"苗 樱璐",
"苗 婉琳",
"苗 婉儿",
"苗 倩儿",
"苗 蝶莺",
"苗 紫婉",
"苗 伯玉",
"苗 盛衡",
"苗 承伯",
"苗 子雍",
"苗 元伯",
"苗 元泰",
"苗 景兴",
"苗 子均",
"苗 文举",
"苗 子安",
"苗 仲达",
"苗 思然",
"苗 子昂",
"苗 子明",
"苗 子初",
"苗 文师",
"苗 世英",
"苗 敬达",
"苗 公昭",
"苗 文先",
"苗 文则",
"苗 温舒",
"苗 子正",
"苗 君肃",
"苗 彦英",
"苗 文进",
"苗 季宁",
"苗 孔璋",
"苗 元龙",
"苗 公台",
"苗 元悌",
"苗 文奥",
"苗 玄伯",
"苗 元方",
"苗 敬宗",
"苗 子烈",
"苗 元耀",
"苗 温伯",
"苗 公玮",
"苗 长文",
"苗 孔和",
"苗 文和",
"苗 恭夏",
"苗 文公",
"苗 曼倩",
"苗 文若",
"苗 景倩",
"苗 ",
"凤 艺芯",
"凤 彦龙",
"凤 君昊",
"凤 子扬",
"凤 雨晴",
"凤 元起",
"凤 威璜",
"凤 梓云",
"凤 伯奢",
"凤 子伯",
"凤 公威",
"凤 曜岩",
"凤 昕阳",
"凤 晨欣",
"凤 世元",
"凤 子远",
"凤 元宗",
"凤 子义",
"凤 仲宣",
"凤 孔休",
"凤 义权",
"凤 文向",
"凤 佐治",
"凤 文则",
"凤 文谦",
"凤 子布",
"凤 文远",
"凤 康成",
"凤 士会",
"凤 正礼",
"凤 孝连",
"凤 彦靖",
"凤 玄风",
"凤 威彦",
"凤 子许",
"凤 文祥",
"凤 梦雯",
"凤 悦菲",
"凤 予馨",
"凤 婧馨",
"凤 婷儿",
"凤 圣楠",
"凤 芷欣",
"凤 心怡",
"凤 乐彤",
"凤 靖瑶",
"凤 艺婷",
"凤 樱璐",
"凤 婉琳",
"凤 婉儿",
"凤 倩儿",
"凤 蝶莺",
"凤 紫婉",
"凤 伯玉",
"凤 盛衡",
"凤 承伯",
"凤 子雍",
"凤 元伯",
"凤 元泰",
"凤 景兴",
"凤 子均",
"凤 文举",
"凤 子安",
"凤 仲达",
"凤 思然",
"凤 子昂",
"凤 子明",
"凤 子初",
"凤 文师",
"凤 世英",
"凤 敬达",
"凤 公昭",
"凤 文先",
"凤 文则",
"凤 温舒",
"凤 子正",
"凤 君肃",
"凤 彦英",
"凤 文进",
"凤 季宁",
"凤 孔璋",
"凤 元龙",
"凤 公台",
"凤 元悌",
"凤 文奥",
"凤 玄伯",
"凤 元方",
"凤 敬宗",
"凤 子烈",
"凤 元耀",
"凤 温伯",
"凤 公玮",
"凤 长文",
"凤 孔和",
"凤 文和",
"凤 恭夏",
"凤 文公",
"凤 曼倩",
"凤 文若",
"凤 景倩",
"凤 ",
"方 艺芯",
"方 彦龙",
"方 君昊",
"方 子扬",
"方 雨晴",
"方 元起",
"方 威璜",
"方 梓云",
"方 伯奢",
"方 子伯",
"方 公威",
"方 曜岩",
"方 昕阳",
"方 晨欣",
"方 世元",
"方 子远",
"方 元宗",
"方 子义",
"方 仲宣",
"方 孔休",
"方 义权",
"方 文向",
"方 佐治",
"方 文则",
"方 文谦",
"方 子布",
"方 文远",
"方 康成",
"方 士会",
"方 正礼",
"方 孝连",
"方 彦靖",
"方 玄风",
"方 威彦",
"方 子许",
"方 文祥",
"方 梦雯",
"方 悦菲",
"方 予馨",
"方 婧馨",
"方 婷儿",
"方 圣楠",
"方 芷欣",
"方 心怡",
"方 乐彤",
"方 靖瑶",
"方 艺婷",
"方 樱璐",
"方 婉琳",
"方 婉儿",
"方 倩儿",
"方 蝶莺",
"方 紫婉",
"方 伯玉",
"方 盛衡",
"方 承伯",
"方 子雍",
"方 元伯",
"方 元泰",
"方 景兴",
"方 子均",
"方 文举",
"方 子安",
"方 仲达",
"方 思然",
"方 子昂",
"方 子明",
"方 子初",
"方 文师",
"方 世英",
"方 敬达",
"方 公昭",
"方 文先",
"方 文则",
"方 温舒",
"方 子正",
"方 君肃",
"方 彦英",
"方 文进",
"方 季宁",
"方 孔璋",
"方 元龙",
"方 公台",
"方 元悌",
"方 文奥",
"方 玄伯",
"方 元方",
"方 敬宗",
"方 子烈",
"方 元耀",
"方 温伯",
"方 公玮",
"方 长文",
"方 孔和",
"方 文和",
"方 恭夏",
"方 文公",
"方 曼倩",
"方 文若",
"方 景倩",
"方 ",
"任 艺芯",
"任 彦龙",
"任 君昊",
"任 子扬",
"任 雨晴",
"任 元起",
"任 威璜",
"任 梓云",
"任 伯奢",
"任 子伯",
"任 公威",
"任 曜岩",
"任 昕阳",
"任 晨欣",
"任 世元",
"任 子远",
"任 元宗",
"任 子义",
"任 仲宣",
"任 孔休",
"任 义权",
"任 文向",
"任 佐治",
"任 文则",
"任 文谦",
"任 子布",
"任 文远",
"任 康成",
"任 士会",
"任 正礼",
"任 孝连",
"任 彦靖",
"任 玄风",
"任 威彦",
"任 子许",
"任 文祥",
"任 梦雯",
"任 悦菲",
"任 予馨",
"任 婧馨",
"任 婷儿",
"任 圣楠",
"任 芷欣",
"任 心怡",
"任 乐彤",
"任 靖瑶",
"任 艺婷",
"任 樱璐",
"任 婉琳",
"任 婉儿",
"任 倩儿",
"任 蝶莺",
"任 紫婉",
"任 伯玉",
"任 盛衡",
"任 承伯",
"任 子雍",
"任 元伯",
"任 元泰",
"任 景兴",
"任 子均",
"任 文举",
"任 子安",
"任 仲达",
"任 思然",
"任 子昂",
"任 子明",
"任 子初",
"任 文师",
"任 世英",
"任 敬达",
"任 公昭",
"任 文先",
"任 文则",
"任 温舒",
"任 子正",
"任 君肃",
"任 彦英",
"任 文进",
"任 季宁",
"任 孔璋",
"任 元龙",
"任 公台",
"任 元悌",
"任 文奥",
"任 玄伯",
"任 元方",
"任 敬宗",
"任 子烈",
"任 元耀",
"任 温伯",
"任 公玮",
"任 长文",
"任 孔和",
"任 文和",
"任 恭夏",
"任 文公",
"任 曼倩",
"任 文若",
"任 景倩",
"任 ",
"袁 艺芯",
"袁 彦龙",
"袁 君昊",
"袁 子扬",
"袁 雨晴",
"袁 元起",
"袁 威璜",
"袁 梓云",
"袁 伯奢",
"袁 子伯",
"袁 公威",
"袁 曜岩",
"袁 昕阳",
"袁 晨欣",
"袁 世元",
"袁 子远",
"袁 元宗",
"袁 子义",
"袁 仲宣",
"袁 孔休",
"袁 义权",
"袁 文向",
"袁 佐治",
"袁 文则",
"袁 文谦",
"袁 子布",
"袁 文远",
"袁 康成",
"袁 士会",
"袁 正礼",
"袁 孝连",
"袁 彦靖",
"袁 玄风",
"袁 威彦",
"袁 子许",
"袁 文祥",
"袁 梦雯",
"袁 悦菲",
"袁 予馨",
"袁 婧馨",
"袁 婷儿",
"袁 圣楠",
"袁 芷欣",
"袁 心怡",
"袁 乐彤",
"袁 靖瑶",
"袁 艺婷",
"袁 樱璐",
"袁 婉琳",
"袁 婉儿",
"袁 倩儿",
"袁 蝶莺",
"袁 紫婉",
"袁 伯玉",
"袁 盛衡",
"袁 承伯",
"袁 子雍",
"袁 元伯",
"袁 元泰",
"袁 景兴",
"袁 子均",
"袁 文举",
"袁 子安",
"袁 仲达",
"袁 思然",
"袁 子昂",
"袁 子明",
"袁 子初",
"袁 文师",
"袁 世英",
"袁 敬达",
"袁 公昭",
"袁 文先",
"袁 文则",
"袁 温舒",
"袁 子正",
"袁 君肃",
"袁 彦英",
"袁 文进",
"袁 季宁",
"袁 孔璋",
"袁 元龙",
"袁 公台",
"袁 元悌",
"袁 文奥",
"袁 玄伯",
"袁 元方",
"袁 敬宗",
"袁 子烈",
"袁 元耀",
"袁 温伯",
"袁 公玮",
"袁 长文",
"袁 孔和",
"袁 文和",
"袁 恭夏",
"袁 文公",
"袁 曼倩",
"袁 文若",
"袁 景倩",
"袁 ",
"柳艺芯",
"柳彦龙",
"柳君昊",
"柳子扬",
"柳雨晴",
"柳元起",
"柳威璜",
"柳梓云",
"柳伯奢",
"柳子伯",
"柳公威",
"柳曜岩",
"柳昕阳",
"柳晨欣",
"柳世元",
"柳子远",
"柳元宗",
"柳子义",
"柳仲宣",
"柳孔休",
"柳义权",
"柳文向",
"柳佐治",
"柳文则",
"柳文谦",
"柳子布",
"柳文远",
"柳康成",
"柳士会",
"柳正礼",
"柳孝连",
"柳彦靖",
"柳玄风",
"柳威彦",
"柳子许",
"柳文祥",
"柳梦雯",
"柳悦菲",
"柳予馨",
"柳婧馨",
"柳婷儿",
"柳圣楠",
"柳芷欣",
"柳心怡",
"柳乐彤",
"柳靖瑶",
"柳艺婷",
"柳樱璐",
"柳婉琳",
"柳婉儿",
"柳倩儿",
"柳蝶莺",
"柳紫婉",
"柳伯玉",
"柳盛衡",
"柳承伯",
"柳子雍",
"柳元伯",
"柳元泰",
"柳景兴",
"柳子均",
"柳文举",
"柳子安",
"柳仲达",
"柳思然",
"柳子昂",
"柳子明",
"柳子初",
"柳文师",
"柳世英",
"柳敬达",
"柳公昭",
"柳文先",
"柳文则",
"柳温舒",
"柳子正",
"柳君肃",
"柳彦英",
"柳文进",
"柳季宁",
"柳孔璋",
"柳元龙",
"柳公台",
"柳元悌",
"柳文奥",
"柳玄伯",
"柳元方",
"柳敬宗",
"柳子烈",
"柳元耀",
"柳温伯",
"柳公玮",
"柳长文",
"柳孔和",
"柳文和",
"柳恭夏",
"柳文公",
"柳曼倩",
"柳文若",
"柳景倩",
"柳 ",
"唐 艺芯",
"唐 彦龙",
"唐 君昊",
"唐 子扬",
"唐 雨晴",
"唐 元起",
"唐 威璜",
"唐 梓云",
"唐 伯奢",
"唐 子伯",
"唐 公威",
"唐 曜岩",
"唐 昕阳",
"唐 晨欣",
"唐 世元",
"唐 子远",
"唐 元宗",
"唐 子义",
"唐 仲宣",
"唐 孔休",
"唐 义权",
"唐 文向",
"唐 佐治",
"唐 文则",
"唐 文谦",
"唐 子布",
"唐 文远",
"唐 康成",
"唐 士会",
"唐 正礼",
"唐 孝连",
"唐 彦靖",
"唐 玄风",
"唐 威彦",
"唐 子许",
"唐 文祥",
"唐 梦雯",
"唐 悦菲",
"唐 予馨",
"唐 婧馨",
"唐 婷儿",
"唐 圣楠",
"唐 芷欣",
"唐 心怡",
"唐 乐彤",
"唐 靖瑶",
"唐 艺婷",
"唐 樱璐",
"唐 婉琳",
"唐 婉儿",
"唐 倩儿",
"唐 蝶莺",
"唐 紫婉",
"唐 伯玉",
"唐 盛衡",
"唐 承伯",
"唐 子雍",
"唐 元伯",
"唐 元泰",
"唐 景兴",
"唐 子均",
"唐 文举",
"唐 子安",
"唐 仲达",
"唐 思然",
"唐 子昂",
"唐 子明",
"唐 子初",
"唐 文师",
"唐 世英",
"唐 敬达",
"唐 公昭",
"唐 文先",
"唐 文则",
"唐 温舒",
"唐 子正",
"唐 君肃",
"唐 彦英",
"唐 文进",
"唐 季宁",
"唐 孔璋",
"唐 元龙",
"唐 公台",
"唐 元悌",
"唐 文奥",
"唐 玄伯",
"唐 元方",
"唐 敬宗",
"唐 子烈",
"唐 元耀",
"唐 温伯",
"唐 公玮",
"唐 长文",
"唐 孔和",
"唐 文和",
"唐 恭夏",
"唐 文公",
"唐 曼倩",
"唐 文若",
"唐 景倩",
"唐 ",
"薛 艺芯",
"薛 彦龙",
"薛 君昊",
"薛 子扬",
"薛 雨晴",
"薛 元起",
"薛 威璜",
"薛 梓云",
"薛 伯奢",
"薛 子伯",
"薛 公威",
"薛 曜岩",
"薛 昕阳",
"薛 晨欣",
"薛 世元",
"薛 子远",
"薛 元宗",
"薛 子义",
"薛 仲宣",
"薛 孔休",
"薛 义权",
"薛 文向",
"薛 佐治",
"薛 文则",
"薛 文谦",
"薛 子布",
"薛 文远",
"薛 康成",
"薛 士会",
"薛 正礼",
"薛 孝连",
"薛 彦靖",
"薛 玄风",
"薛 威彦",
"薛 子许",
"薛 文祥",
"薛 梦雯",
"薛 悦菲",
"薛 予馨",
"薛 婧馨",
"薛 婷儿",
"薛 圣楠",
"薛 芷欣",
"薛 心怡",
"薛 乐彤",
"薛 靖瑶",
"薛 艺婷",
"薛 樱璐",
"薛 婉琳",
"薛 婉儿",
"薛 倩儿",
"薛 蝶莺",
"薛 紫婉",
"薛 伯玉",
"薛 盛衡",
"薛 承伯",
"薛 子雍",
"薛 元伯",
"薛 元泰",
"薛 景兴",
"薛 子均",
"薛 文举",
"薛 子安",
"薛 仲达",
"薛 思然",
"薛 子昂",
"薛 子明",
"薛 子初",
"薛 文师",
"薛 世英",
"薛 敬达",
"薛 公昭",
"薛 文先",
"薛 文则",
"薛 温舒",
"薛 子正",
"薛 君肃",
"薛 彦英",
"薛 文进",
"薛 季宁",
"薛 孔璋",
"薛 元龙",
"薛 公台",
"薛 元悌",
"薛 文奥",
"薛 玄伯",
"薛 元方",
"薛 敬宗",
"薛 子烈",
"薛 元耀",
"薛 温伯",
"薛 公玮",
"薛 长文",
"薛 孔和",
"薛 文和",
"薛 恭夏",
"薛 文公",
"薛 曼倩",
"薛 文若",
"薛 景倩",
"薛 ",
"贺 艺芯",
"贺 彦龙",
"贺 君昊",
"贺 子扬",
"贺 雨晴",
"贺 元起",
"贺 威璜",
"贺 梓云",
"贺 伯奢",
"贺 子伯",
"贺 公威",
"贺 曜岩",
"贺 昕阳",
"贺 晨欣",
"贺 世元",
"贺 子远",
"贺 元宗",
"贺 子义",
"贺 仲宣",
"贺 孔休",
"贺 义权",
"贺 文向",
"贺 佐治",
"贺 文则",
"贺 文谦",
"贺 子布",
"贺 文远",
"贺 康成",
"贺 士会",
"贺 正礼",
"贺 孝连",
"贺 彦靖",
"贺 玄风",
"贺 威彦",
"贺 子许",
"贺 文祥",
"贺 梦雯",
"贺 悦菲",
"贺 予馨",
"贺 婧馨",
"贺 婷儿",
"贺 圣楠",
"贺 芷欣",
"贺 心怡",
"贺 乐彤",
"贺 靖瑶",
"贺 艺婷",
"贺 樱璐",
"贺 婉琳",
"贺 婉儿",
"贺 倩儿",
"贺 蝶莺",
"贺 紫婉",
"贺 伯玉",
"贺 盛衡",
"贺 承伯",
"贺 子雍",
"贺 元伯",
"贺 元泰",
"贺 景兴",
"贺 子均",
"贺 文举",
"贺 子安",
"贺 仲达",
"贺 思然",
"贺 子昂",
"贺 子明",
"贺 子初",
"贺 文师",
"贺 世英",
"贺 敬达",
"贺 公昭",
"贺 文先",
"贺 文则",
"贺 温舒",
"贺 子正",
"贺 君肃",
"贺 彦英",
"贺 文进",
"贺 季宁",
"贺 孔璋",
"贺 元龙",
"贺 公台",
"贺 元悌",
"贺 文奥",
"贺 玄伯",
"贺 元方",
"贺 敬宗",
"贺 子烈",
"贺 元耀",
"贺 温伯",
"贺 公玮",
"贺 长文",
"贺 孔和",
"贺 文和",
"贺 恭夏",
"贺 文公",
"贺 曼倩",
"贺 文若",
"贺 景倩",
"贺 ",
"倪 艺芯",
"倪 彦龙",
"倪 君昊",
"倪 子扬",
"倪 雨晴",
"倪 元起",
"倪 威璜",
"倪 梓云",
"倪 伯奢",
"倪 子伯",
"倪 公威",
"倪 曜岩",
"倪 昕阳",
"倪 晨欣",
"倪 世元",
"倪 子远",
"倪 元宗",
"倪 子义",
"倪 仲宣",
"倪 孔休",
"倪 义权",
"倪 文向",
"倪 佐治",
"倪 文则",
"倪 文谦",
"倪 子布",
"倪 文远",
"倪 康成",
"倪 士会",
"倪 正礼",
"倪 孝连",
"倪 彦靖",
"倪 玄风",
"倪 威彦",
"倪 子许",
"倪 文祥",
"倪 梦雯",
"倪 悦菲",
"倪 予馨",
"倪 婧馨",
"倪 婷儿",
"倪 圣楠",
"倪 芷欣",
"倪 心怡",
"倪 乐彤",
"倪 靖瑶",
"倪 艺婷",
"倪 樱璐",
"倪 婉琳",
"倪 婉儿",
"倪 倩儿",
"倪 蝶莺",
"倪 紫婉",
"倪 伯玉",
"倪 盛衡",
"倪 承伯",
"倪 子雍",
"倪 元伯",
"倪 元泰",
"倪 景兴",
"倪 子均",
"倪 文举",
"倪 子安",
"倪 仲达",
"倪 思然",
"倪 子昂",
"倪 子明",
"倪 子初",
"倪 文师",
"倪 世英",
"倪 敬达",
"倪 公昭",
"倪 文先",
"倪 文则",
"倪 温舒",
"倪 子正",
"倪 君肃",
"倪 彦英",
"倪 文进",
"倪 季宁",
"倪 孔璋",
"倪 元龙",
"倪 公台",
"倪 元悌",
"倪 文奥",
"倪 玄伯",
"倪 元方",
"倪 敬宗",
"倪 子烈",
"倪 元耀",
"倪 温伯",
"倪 公玮",
"倪 长文",
"倪 孔和",
"倪 文和",
"倪 恭夏",
"倪 文公",
"倪 曼倩",
"倪 文若",
"倪 景倩",
"倪 ",
"汤 艺芯",
"汤 彦龙",
"汤 君昊",
"汤 子扬",
"汤 雨晴",
"汤 元起",
"汤 威璜",
"汤 梓云",
"汤 伯奢",
"汤 子伯",
"汤 公威",
"汤 曜岩",
"汤 昕阳",
"汤 晨欣",
"汤 世元",
"汤 子远",
"汤 元宗",
"汤 子义",
"汤 仲宣",
"汤 孔休",
"汤 义权",
"汤 文向",
"汤 佐治",
"汤 文则",
"汤 文谦",
"汤 子布",
"汤 文远",
"汤 康成",
"汤 士会",
"汤 正礼",
"汤 孝连",
"汤 彦靖",
"汤 玄风",
"汤 威彦",
"汤 子许",
"汤 文祥",
"汤 梦雯",
"汤 悦菲",
"汤 予馨",
"汤 婧馨",
"汤 婷儿",
"汤 圣楠",
"汤 芷欣",
"汤 心怡",
"汤 乐彤",
"汤 靖瑶",
"汤 艺婷",
"汤 樱璐",
"汤 婉琳",
"汤 婉儿",
"汤 倩儿",
"汤 蝶莺",
"汤 紫婉",
"汤 伯玉",
"汤 盛衡",
"汤 承伯",
"汤 子雍",
"汤 元伯",
"汤 元泰",
"汤 景兴",
"汤 子均",
"汤 文举",
"汤 子安",
"汤 仲达",
"汤 思然",
"汤 子昂",
"汤 子明",
"汤 子初",
"汤 文师",
"汤 世英",
"汤 敬达",
"汤 公昭",
"汤 文先",
"汤 文则",
"汤 温舒",
"汤 子正",
"汤 君肃",
"汤 彦英",
"汤 文进",
"汤 季宁",
"汤 孔璋",
"汤 元龙",
"汤 公台",
"汤 元悌",
"汤 文奥",
"汤 玄伯",
"汤 元方",
"汤 敬宗",
"汤 子烈",
"汤 元耀",
"汤 温伯",
"汤 公玮",
"汤 长文",
"汤 孔和",
"汤 文和",
"汤 恭夏",
"汤 文公",
"汤 曼倩",
"汤 文若",
"汤 景倩",
"汤 ",
"滕 艺芯",
"滕 彦龙",
"滕 君昊",
"滕 子扬",
"滕 雨晴",
"滕 元起",
"滕 威璜",
"滕 梓云",
"滕 伯奢",
"滕 子伯",
"滕 公威",
"滕 曜岩",
"滕 昕阳",
"滕 晨欣",
"滕 世元",
"滕 子远",
"滕 元宗",
"滕 子义",
"滕 仲宣",
"滕 孔休",
"滕 义权",
"滕 文向",
"滕 佐治",
"滕 文则",
"滕 文谦",
"滕 子布",
"滕 文远",
"滕 康成",
"滕 士会",
"滕 正礼",
"滕 孝连",
"滕 彦靖",
"滕 玄风",
"滕 威彦",
"滕 子许",
"滕 文祥",
"滕 梦雯",
"滕 悦菲",
"滕 予馨",
"滕 婧馨",
"滕 婷儿",
"滕 圣楠",
"滕 芷欣",
"滕 心怡",
"滕 乐彤",
"滕 靖瑶",
"滕 艺婷",
"滕 樱璐",
"滕 婉琳",
"滕 婉儿",
"滕 倩儿",
"滕 蝶莺",
"滕 紫婉",
"滕 伯玉",
"滕 盛衡",
"滕 承伯",
"滕 子雍",
"滕 元伯",
"滕 元泰",
"滕 景兴",
"滕 子均",
"滕 文举",
"滕 子安",
"滕 仲达",
"滕 思然",
"滕 子昂",
"滕 子明",
"滕 子初",
"滕 文师",
"滕 世英",
"滕 敬达",
"滕 公昭",
"滕 文先",
"滕 文则",
"滕 温舒",
"滕 子正",
"滕 君肃",
"滕 彦英",
"滕 文进",
"滕 季宁",
"滕 孔璋",
"滕 元龙",
"滕 公台",
"滕 元悌",
"滕 文奥",
"滕 玄伯",
"滕 元方",
"滕 敬宗",
"滕 子烈",
"滕 元耀",
"滕 温伯",
"滕 公玮",
"滕 长文",
"滕 孔和",
"滕 文和",
"滕 恭夏",
"滕 文公",
"滕 曼倩",
"滕 文若",
"滕 景倩",
"滕 ",
"罗 艺芯",
"罗 彦龙",
"罗 君昊",
"罗 子扬",
"罗 雨晴",
"罗 元起",
"罗 威璜",
"罗 梓云",
"罗 伯奢",
"罗 子伯",
"罗 公威",
"罗 曜岩",
"罗 昕阳",
"罗 晨欣",
"罗 世元",
"罗 子远",
"罗 元宗",
"罗 子义",
"罗 仲宣",
"罗 孔休",
"罗 义权",
"罗 文向",
"罗 佐治",
"罗 文则",
"罗 文谦",
"罗 子布",
"罗 文远",
"罗 康成",
"罗 士会",
"罗 正礼",
"罗 孝连",
"罗 彦靖",
"罗 玄风",
"罗 威彦",
"罗 子许",
"罗 文祥",
"罗 梦雯",
"罗 悦菲",
"罗 予馨",
"罗 婧馨",
"罗 婷儿",
"罗 圣楠",
"罗 芷欣",
"罗 心怡",
"罗 乐彤",
"罗 靖瑶",
"罗 艺婷",
"罗 樱璐",
"罗 婉琳",
"罗 婉儿",
"罗 倩儿",
"罗 蝶莺",
"罗 紫婉",
"罗 伯玉",
"罗 盛衡",
"罗 承伯",
"罗 子雍",
"罗 元伯",
"罗 元泰",
"罗 景兴",
"罗 子均",
"罗 文举",
"罗 子安",
"罗 仲达",
"罗 思然",
"罗 子昂",
"罗 子明",
"罗 子初",
"罗 文师",
"罗 世英",
"罗 敬达",
"罗 公昭",
"罗 文先",
"罗 文则",
"罗 温舒",
"罗 子正",
"罗 君肃",
"罗 彦英",
"罗 文进",
"罗 季宁",
"罗 孔璋",
"罗 元龙",
"罗 公台",
"罗 元悌",
"罗 文奥",
"罗 玄伯",
"罗 元方",
"罗 敬宗",
"罗 子烈",
"罗 元耀",
"罗 温伯",
"罗 公玮",
"罗 长文",
"罗 孔和",
"罗 文和",
"罗 恭夏",
"罗 文公",
"罗 曼倩",
"罗 文若",
"罗 景倩",
"罗 ",
"郝 艺芯",
"郝 彦龙",
"郝 君昊",
"郝 子扬",
"郝 雨晴",
"郝 元起",
"郝 威璜",
"郝 梓云",
"郝 伯奢",
"郝 子伯",
"郝 公威",
"郝 曜岩",
"郝 昕阳",
"郝 晨欣",
"郝 世元",
"郝 子远",
"郝 元宗",
"郝 子义",
"郝 仲宣",
"郝 孔休",
"郝 义权",
"郝 文向",
"郝 佐治",
"郝 文则",
"郝 文谦",
"郝 子布",
"郝 文远",
"郝 康成",
"郝 士会",
"郝 正礼",
"郝 孝连",
"郝 彦靖",
"郝 玄风",
"郝 威彦",
"郝 子许",
"郝 文祥",
"郝 梦雯",
"郝 悦菲",
"郝 予馨",
"郝 婧馨",
"郝 婷儿",
"郝 圣楠",
"郝 芷欣",
"郝 心怡",
"郝 乐彤",
"郝 靖瑶",
"郝 艺婷",
"郝 樱璐",
"郝 婉琳",
"郝 婉儿",
"郝 倩儿",
"郝 蝶莺",
"郝 紫婉",
"郝 伯玉",
"郝 盛衡",
"郝 承伯",
"郝 子雍",
"郝 元伯",
"郝 元泰",
"郝 景兴",
"郝 子均",
"郝 文举",
"郝 子安",
"郝 仲达",
"郝 思然",
"郝 子昂",
"郝 子明",
"郝 子初",
"郝 文师",
"郝 世英",
"郝 敬达",
"郝 公昭",
"郝 文先",
"郝 文则",
"郝 温舒",
"郝 子正",
"郝 君肃",
"郝 彦英",
"郝 文进",
"郝 季宁",
"郝 孔璋",
"郝 元龙",
"郝 公台",
"郝 元悌",
"郝 文奥",
"郝 玄伯",
"郝 元方",
"郝 敬宗",
"郝 子烈",
"郝 元耀",
"郝 温伯",
"郝 公玮",
"郝 长文",
"郝 孔和",
"郝 文和",
"郝 恭夏",
"郝 文公",
"郝 曼倩",
"郝 文若",
"郝 景倩",
"郝 ",
"安 艺芯",
"安 彦龙",
"安 君昊",
"安 子扬",
"安 雨晴",
"安 元起",
"安 威璜",
"安 梓云",
"安 伯奢",
"安 子伯",
"安 公威",
"安 曜岩",
"安 昕阳",
"安 晨欣",
"安 世元",
"安 子远",
"安 元宗",
"安 子义",
"安 仲宣",
"安 孔休",
"安 义权",
"安 文向",
"安 佐治",
"安 文则",
"安 文谦",
"安 子布",
"安 文远",
"安 康成",
"安 士会",
"安 正礼",
"安 孝连",
"安 彦靖",
"安 玄风",
"安 威彦",
"安 子许",
"安 文祥",
"安 梦雯",
"安 悦菲",
"安 予馨",
"安 婧馨",
"安 婷儿",
"安 圣楠",
"安 芷欣",
"安 心怡",
"安 乐彤",
"安 靖瑶",
"安 艺婷",
"安 樱璐",
"安 婉琳",
"安 婉儿",
"安 倩儿",
"安 蝶莺",
"安 紫婉",
"安 伯玉",
"安 盛衡",
"安 承伯",
"安 子雍",
"安 元伯",
"安 元泰",
"安 景兴",
"安 子均",
"安 文举",
"安 子安",
"安 仲达",
"安 思然",
"安 子昂",
"安 子明",
"安 子初",
"安 文师",
"安 世英",
"安 敬达",
"安 公昭",
"安 文先",
"安 文则",
"安 温舒",
"安 子正",
"安 君肃",
"安 彦英",
"安 文进",
"安 季宁",
"安 孔璋",
"安 元龙",
"安 公台",
"安 元悌",
"安 文奥",
"安 玄伯",
"安 元方",
"安 敬宗",
"安 子烈",
"安 元耀",
"安 温伯",
"安 公玮",
"安 长文",
"安 孔和",
"安 文和",
"安 恭夏",
"安 文公",
"安 曼倩",
"安 文若",
"安 景倩",
"安 ",
"常艺芯",
"常彦龙",
"常君昊",
"常子扬",
"常雨晴",
"常元起",
"常威璜",
"常梓云",
"常伯奢",
"常子伯",
"常公威",
"常曜岩",
"常昕阳",
"常晨欣",
"常世元",
"常子远",
"常元宗",
"常子义",
"常仲宣",
"常孔休",
"常义权",
"常文向",
"常佐治",
"常文则",
"常文谦",
"常子布",
"常文远",
"常康成",
"常士会",
"常正礼",
"常孝连",
"常彦靖",
"常玄风",
"常威彦",
"常子许",
"常文祥",
"常梦雯",
"常悦菲",
"常予馨",
"常婧馨",
"常婷儿",
"常圣楠",
"常芷欣",
"常心怡",
"常乐彤",
"常靖瑶",
"常艺婷",
"常樱璐",
"常婉琳",
"常婉儿",
"常倩儿",
"常蝶莺",
"常紫婉",
"常伯玉",
"常盛衡",
"常承伯",
"常子雍",
"常元伯",
"常元泰",
"常景兴",
"常子均",
"常文举",
"常子安",
"常仲达",
"常思然",
"常子昂",
"常子明",
"常子初",
"常文师",
"常世英",
"常敬达",
"常公昭",
"常文先",
"常文则",
"常温舒",
"常子正",
"常君肃",
"常彦英",
"常文进",
"常季宁",
"常孔璋",
"常元龙",
"常公台",
"常元悌",
"常文奥",
"常玄伯",
"常元方",
"常敬宗",
"常子烈",
"常元耀",
"常温伯",
"常公玮",
"常长文",
"常孔和",
"常文和",
"常恭夏",
"常文公",
"常曼倩",
"常文若",
"常景倩",
"常 ",
"乐 艺芯",
"乐 彦龙",
"乐 君昊",
"乐 子扬",
"乐 雨晴",
"乐 元起",
"乐 威璜",
"乐 梓云",
"乐 伯奢",
"乐 子伯",
"乐 公威",
"乐 曜岩",
"乐 昕阳",
"乐 晨欣",
"乐 世元",
"乐 子远",
"乐 元宗",
"乐 子义",
"乐 仲宣",
"乐 孔休",
"乐 义权",
"乐 文向",
"乐 佐治",
"乐 文则",
"乐 文谦",
"乐 子布",
"乐 文远",
"乐 康成",
"乐 士会",
"乐 正礼",
"乐 孝连",
"乐 彦靖",
"乐 玄风",
"乐 威彦",
"乐 子许",
"乐 文祥",
"乐 梦雯",
"乐 悦菲",
"乐 予馨",
"乐 婧馨",
"乐 婷儿",
"乐 圣楠",
"乐 芷欣",
"乐 心怡",
"乐 乐彤",
"乐 靖瑶",
"乐 艺婷",
"乐 樱璐",
"乐 婉琳",
"乐 婉儿",
"乐 倩儿",
"乐 蝶莺",
"乐 紫婉",
"乐 伯玉",
"乐 盛衡",
"乐 承伯",
"乐 子雍",
"乐 元伯",
"乐 元泰",
"乐 景兴",
"乐 子均",
"乐 文举",
"乐 子安",
"乐 仲达",
"乐 思然",
"乐 子昂",
"乐 子明",
"乐 子初",
"乐 文师",
"乐 世英",
"乐 敬达",
"乐 公昭",
"乐 文先",
"乐 文则",
"乐 温舒",
"乐 子正",
"乐 君肃",
"乐 彦英",
"乐 文进",
"乐 季宁",
"乐 孔璋",
"乐 元龙",
"乐 公台",
"乐 元悌",
"乐 文奥",
"乐 玄伯",
"乐 元方",
"乐 敬宗",
"乐 子烈",
"乐 元耀",
"乐 温伯",
"乐 公玮",
"乐 长文",
"乐 孔和",
"乐 文和",
"乐 恭夏",
"乐 文公",
"乐 曼倩",
"乐 文若",
"乐 景倩",
"乐 ",
"于 艺芯",
"于 彦龙",
"于 君昊",
"于 子扬",
"于 雨晴",
"于 元起",
"于 威璜",
"于 梓云",
"于 伯奢",
"于 子伯",
"于 公威",
"于 曜岩",
"于 昕阳",
"于 晨欣",
"于 世元",
"于 子远",
"于 元宗",
"于 子义",
"于 仲宣",
"于 孔休",
"于 义权",
"于 文向",
"于 佐治",
"于 文则",
"于 文谦",
"于 子布",
"于 文远",
"于 康成",
"于 士会",
"于 正礼",
"于 孝连",
"于 彦靖",
"于 玄风",
"于 威彦",
"于 子许",
"于 文祥",
"于 梦雯",
"于 悦菲",
"于 予馨",
"于 婧馨",
"于 婷儿",
"于 圣楠",
"于 芷欣",
"于 心怡",
"于 乐彤",
"于 靖瑶",
"于 艺婷",
"于 樱璐",
"于 婉琳",
"于 婉儿",
"于 倩儿",
"于 蝶莺",
"于 紫婉",
"于 伯玉",
"于 盛衡",
"于 承伯",
"于 子雍",
"于 元伯",
"于 元泰",
"于 景兴",
"于 子均",
"于 文举",
"于 子安",
"于 仲达",
"于 思然",
"于 子昂",
"于 子明",
"于 子初",
"于 文师",
"于 世英",
"于 敬达",
"于 公昭",
"于 文先",
"于 文则",
"于 温舒",
"于 子正",
"于 君肃",
"于 彦英",
"于 文进",
"于 季宁",
"于 孔璋",
"于 元龙",
"于 公台",
"于 元悌",
"于 文奥",
"于 玄伯",
"于 元方",
"于 敬宗",
"于 子烈",
"于 元耀",
"于 温伯",
"于 公玮",
"于 长文",
"于 孔和",
"于 文和",
"于 恭夏",
"于 文公",
"于 曼倩",
"于 文若",
"于 景倩",
"于 ",
"穆 艺芯",
"穆 彦龙",
"穆 君昊",
"穆 子扬",
"穆 雨晴",
"穆 元起",
"穆 威璜",
"穆 梓云",
"穆 伯奢",
"穆 子伯",
"穆 公威",
"穆 曜岩",
"穆 昕阳",
"穆 晨欣",
"穆 世元",
"穆 子远",
"穆 元宗",
"穆 子义",
"穆 仲宣",
"穆 孔休",
"穆 义权",
"穆 文向",
"穆 佐治",
"穆 文则",
"穆 文谦",
"穆 子布",
"穆 文远",
"穆 康成",
"穆 士会",
"穆 正礼",
"穆 孝连",
"穆 彦靖",
"穆 玄风",
"穆 威彦",
"穆 子许",
"穆 文祥",
"穆 梦雯",
"穆 悦菲",
"穆 予馨",
"穆 婧馨",
"穆 婷儿",
"穆 圣楠",
"穆 芷欣",
"穆 心怡",
"穆 乐彤",
"穆 靖瑶",
"穆 艺婷",
"穆 樱璐",
"穆 婉琳",
"穆 婉儿",
"穆 倩儿",
"穆 蝶莺",
"穆 紫婉",
"穆 伯玉",
"穆 盛衡",
"穆 承伯",
"穆 子雍",
"穆 元伯",
"穆 元泰",
"穆 景兴",
"穆 子均",
"穆 文举",
"穆 子安",
"穆 仲达",
"穆 思然",
"穆 子昂",
"穆 子明",
"穆 子初",
"穆 文师",
"穆 世英",
"穆 敬达",
"穆 公昭",
"穆 文先",
"穆 文则",
"穆 温舒",
"穆 子正",
"穆 君肃",
"穆 彦英",
"穆 文进",
"穆 季宁",
"穆 孔璋",
"穆 元龙",
"穆 公台",
"穆 元悌",
"穆 文奥",
"穆 玄伯",
"穆 元方",
"穆 敬宗",
"穆 子烈",
"穆 元耀",
"穆 温伯",
"穆 公玮",
"穆 长文",
"穆 孔和",
"穆 文和",
"穆 恭夏",
"穆 文公",
"穆 曼倩",
"穆 文若",
"穆 景倩",
"穆 ",
"萧 艺芯",
"萧 彦龙",
"萧 君昊",
"萧 子扬",
"萧 雨晴",
"萧 元起",
"萧 威璜",
"萧 梓云",
"萧 伯奢",
"萧 子伯",
"萧 公威",
"萧 曜岩",
"萧 昕阳",
"萧 晨欣",
"萧 世元",
"萧 子远",
"萧 元宗",
"萧 子义",
"萧 仲宣",
"萧 孔休",
"萧 义权",
"萧 文向",
"萧 佐治",
"萧 文则",
"萧 文谦",
"萧 子布",
"萧 文远",
"萧 康成",
"萧 士会",
"萧 正礼",
"萧 孝连",
"萧 彦靖",
"萧 玄风",
"萧 威彦",
"萧 子许",
"萧 文祥",
"萧 梦雯",
"萧 悦菲",
"萧 予馨",
"萧 婧馨",
"萧 婷儿",
"萧 圣楠",
"萧 芷欣",
"萧 心怡",
"萧 乐彤",
"萧 靖瑶",
"萧 艺婷",
"萧 樱璐",
"萧 婉琳",
"萧 婉儿",
"萧 倩儿",
"萧 蝶莺",
"萧 紫婉",
"萧 伯玉",
"萧 盛衡",
"萧 承伯",
"萧 子雍",
"萧 元伯",
"萧 元泰",
"萧 景兴",
"萧 子均",
"萧 文举",
"萧 子安",
"萧 仲达",
"萧 思然",
"萧 子昂",
"萧 子明",
"萧 子初",
"萧 文师",
"萧 世英",
"萧 敬达",
"萧 公昭",
"萧 文先",
"萧 文则",
"萧 温舒",
"萧 子正",
"萧 君肃",
"萧 彦英",
"萧 文进",
"萧 季宁",
"萧 孔璋",
"萧 元龙",
"萧 公台",
"萧 元悌",
"萧 文奥",
"萧 玄伯",
"萧 元方",
"萧 敬宗",
"萧 子烈",
"萧 元耀",
"萧 温伯",
"萧 公玮",
"萧 长文",
"萧 孔和",
"萧 文和",
"萧 恭夏",
"萧 文公",
"萧 曼倩",
"萧 文若",
"萧 景倩",
"萧 ",
"姚 艺芯",
"姚 彦龙",
"姚 君昊",
"姚 子扬",
"姚 雨晴",
"姚 元起",
"姚 威璜",
"姚 梓云",
"姚 伯奢",
"姚 子伯",
"姚 公威",
"姚 曜岩",
"姚 昕阳",
"姚 晨欣",
"姚 世元",
"姚 子远",
"姚 元宗",
"姚 子义",
"姚 仲宣",
"姚 孔休",
"姚 义权",
"姚 文向",
"姚 佐治",
"姚 文则",
"姚 文谦",
"姚 子布",
"姚 文远",
"姚 康成",
"姚 士会",
"姚 正礼",
"姚 孝连",
"姚 彦靖",
"姚 玄风",
"姚 威彦",
"姚 子许",
"姚 文祥",
"姚 梦雯",
"姚 悦菲",
"姚 予馨",
"姚 婧馨",
"姚 婷儿",
"姚 圣楠",
"姚 芷欣",
"姚 心怡",
"姚 乐彤",
"姚 靖瑶",
"姚 艺婷",
"姚 樱璐",
"姚 婉琳",
"姚 婉儿",
"姚 倩儿",
"姚 蝶莺",
"姚 紫婉",
"姚 伯玉",
"姚 盛衡",
"姚 承伯",
"姚 子雍",
"姚 元伯",
"姚 元泰",
"姚 景兴",
"姚 子均",
"姚 文举",
"姚 子安",
"姚 仲达",
"姚 思然",
"姚 子昂",
"姚 子明",
"姚 子初",
"姚 文师",
"姚 世英",
"姚 敬达",
"姚 公昭",
"姚 文先",
"姚 文则",
"姚 温舒",
"姚 子正",
"姚 君肃",
"姚 彦英",
"姚 文进",
"姚 季宁",
"姚 孔璋",
"姚 元龙",
"姚 公台",
"姚 元悌",
"姚 文奥",
"姚 玄伯",
"姚 元方",
"姚 敬宗",
"姚 子烈",
"姚 元耀",
"姚 温伯",
"姚 公玮",
"姚 长文",
"姚 孔和",
"姚 文和",
"姚 恭夏",
"姚 文公",
"姚 曼倩",
"姚 文若",
"姚 景倩",
"姚 ",
"邓艺芯",
"邓彦龙",
"邓君昊",
"邓子扬",
"邓雨晴",
"邓元起",
"邓威璜",
"邓梓云",
"邓伯奢",
"邓子伯",
"邓公威",
"邓曜岩",
"邓昕阳",
"邓晨欣",
"邓世元",
"邓子远",
"邓元宗",
"邓子义",
"邓仲宣",
"邓孔休",
"邓义权",
"邓文向",
"邓佐治",
"邓文则",
"邓文谦",
"邓子布",
"邓文远",
"邓康成",
"邓士会",
"邓正礼",
"邓孝连",
"邓彦靖",
"邓玄风",
"邓威彦",
"邓子许",
"邓文祥",
"邓梦雯",
"邓悦菲",
"邓予馨",
"邓婧馨",
"邓婷儿",
"邓圣楠",
"邓芷欣",
"邓心怡",
"邓乐彤",
"邓靖瑶",
"邓艺婷",
"邓樱璐",
"邓婉琳",
"邓婉儿",
"邓倩儿",
"邓蝶莺",
"邓紫婉",
"邓伯玉",
"邓盛衡",
"邓承伯",
"邓子雍",
"邓元伯",
"邓元泰",
"邓景兴",
"邓子均",
"邓文举",
"邓子安",
"邓仲达",
"邓思然",
"邓子昂",
"邓子明",
"邓子初",
"邓文师",
"邓世英",
"邓敬达",
"邓公昭",
"邓文先",
"邓文则",
"邓温舒",
"邓子正",
"邓君肃",
"邓彦英",
"邓文进",
"邓季宁",
"邓孔璋",
"邓元龙",
"邓公台",
"邓元悌",
"邓文奥",
"邓玄伯",
"邓元方",
"邓敬宗",
"邓子烈",
"邓元耀",
"邓温伯",
"邓公玮",
"邓长文",
"邓孔和",
"邓文和",
"邓恭夏",
"邓文公",
"邓曼倩",
"邓文若",
"邓景倩",
"邓 ",
"牛艺芯",
"牛彦龙",
"牛君昊",
"牛子扬",
"牛雨晴",
"牛元起",
"牛威璜",
"牛梓云",
"牛伯奢",
"牛子伯",
"牛公威",
"牛曜岩",
"牛昕阳",
"牛晨欣",
"牛世元",
"牛子远",
"牛元宗",
"牛子义",
"牛仲宣",
"牛孔休",
"牛义权",
"牛文向",
"牛佐治",
"牛文则",
"牛文谦",
"牛子布",
"牛文远",
"牛康成",
"牛士会",
"牛正礼",
"牛孝连",
"牛彦靖",
"牛玄风",
"牛威彦",
"牛子许",
"牛文祥",
"牛梦雯",
"牛悦菲",
"牛予馨",
"牛婧馨",
"牛婷儿",
"牛圣楠",
"牛芷欣",
"牛心怡",
"牛乐彤",
"牛靖瑶",
"牛艺婷",
"牛樱璐",
"牛婉琳",
"牛婉儿",
"牛倩儿",
"牛蝶莺",
"牛紫婉",
"牛伯玉",
"牛盛衡",
"牛承伯",
"牛子雍",
"牛元伯",
"牛元泰",
"牛景兴",
"牛子均",
"牛文举",
"牛子安",
"牛仲达",
"牛思然",
"牛子昂",
"牛子明",
"牛子初",
"牛文师",
"牛世英",
"牛敬达",
"牛公昭",
"牛文先",
"牛文则",
"牛温舒",
"牛子正",
"牛君肃",
"牛彦英",
"牛文进",
"牛季宁",
"牛孔璋",
"牛元龙",
"牛公台",
"牛元悌",
"牛文奥",
"牛玄伯",
"牛元方",
"牛敬宗",
"牛子烈",
"牛元耀",
"牛温伯",
"牛公玮",
"牛长文",
"牛孔和",
"牛文和",
"牛恭夏",
"牛文公",
"牛曼倩",
"牛文若",
"牛景倩",
"牛 ",
"牧艺芯",
"牧彦龙",
"牧君昊",
"牧子扬",
"牧雨晴",
"牧元起",
"牧威璜",
"牧梓云",
"牧伯奢",
"牧子伯",
"牧公威",
"牧曜岩",
"牧昕阳",
"牧晨欣",
"牧世元",
"牧子远",
"牧元宗",
"牧子义",
"牧仲宣",
"牧孔休",
"牧义权",
"牧文向",
"牧佐治",
"牧文则",
"牧文谦",
"牧子布",
"牧文远",
"牧康成",
"牧士会",
"牧正礼",
"牧孝连",
"牧彦靖",
"牧玄风",
"牧威彦",
"牧子许",
"牧文祥",
"牧梦雯",
"牧悦菲",
"牧予馨",
"牧婧馨",
"牧婷儿",
"牧圣楠",
"牧芷欣",
"牧心怡",
"牧乐彤",
"牧靖瑶",
"牧艺婷",
"牧樱璐",
"牧婉琳",
"牧婉儿",
"牧倩儿",
"牧蝶莺",
"牧紫婉",
"牧伯玉",
"牧盛衡",
"牧承伯",
"牧子雍",
"牧元伯",
"牧元泰",
"牧景兴",
"牧子均",
"牧文举",
"牧子安",
"牧仲达",
"牧思然",
"牧子昂",
"牧子明",
"牧子初",
"牧文师",
"牧世英",
"牧敬达",
"牧公昭",
"牧文先",
"牧文则",
"牧温舒",
"牧子正",
"牧君肃",
"牧彦英",
"牧文进",
"牧季宁",
"牧孔璋",
"牧元龙",
"牧公台",
"牧元悌",
"牧文奥",
"牧玄伯",
"牧元方",
"牧敬宗",
"牧子烈",
"牧元耀",
"牧温伯",
"牧公玮",
"牧长文",
"牧孔和",
"牧文和",
"牧恭夏",
"牧文公",
"牧曼倩",
"牧文若",
"牧景倩",
"牧 ",
"龙艺芯",
"龙彦龙",
"龙君昊",
"龙子扬",
"龙雨晴",
"龙元起",
"龙威璜",
"龙梓云",
"龙伯奢",
"龙子伯",
"龙公威",
"龙曜岩",
"龙昕阳",
"龙晨欣",
"龙世元",
"龙子远",
"龙元宗",
"龙子义",
"龙仲宣",
"龙孔休",
"龙义权",
"龙文向",
"龙佐治",
"龙文则",
"龙文谦",
"龙子布",
"龙文远",
"龙康成",
"龙士会",
"龙正礼",
"龙孝连",
"龙彦靖",
"龙玄风",
"龙威彦",
"龙子许",
"龙文祥",
"龙梦雯",
"龙悦菲",
"龙予馨",
"龙婧馨",
"龙婷儿",
"龙圣楠",
"龙芷欣",
"龙心怡",
"龙乐彤",
"龙靖瑶",
"龙艺婷",
"龙樱璐",
"龙婉琳",
"龙婉儿",
"龙倩儿",
"龙蝶莺",
"龙紫婉",
"龙伯玉",
"龙盛衡",
"龙承伯",
"龙子雍",
"龙元伯",
"龙元泰",
"龙景兴",
"龙子均",
"龙文举",
"龙子安",
"龙仲达",
"龙思然",
"龙子昂",
"龙子明",
"龙子初",
"龙文师",
"龙世英",
"龙敬达",
"龙公昭",
"龙文先",
"龙文则",
"龙温舒",
"龙子正",
"龙君肃",
"龙彦英",
"龙文进",
"龙季宁",
"龙孔璋",
"龙元龙",
"龙公台",
"龙元悌",
"龙文奥",
"龙玄伯",
"龙元方",
"龙敬宗",
"龙子烈",
"龙元耀",
"龙温伯",
"龙公玮",
"龙长文",
"龙孔和",
"龙文和",
"龙恭夏",
"龙文公",
"龙曼倩",
"龙文若",
"龙景倩",
"龙 ",
"文艺芯",
"文彦龙",
"文君昊",
"文子扬",
"文雨晴",
"文元起",
"文威璜",
"文梓云",
"文伯奢",
"文子伯",
"文公威",
"文曜岩",
"文昕阳",
"文晨欣",
"文世元",
"文子远",
"文元宗",
"文子义",
"文仲宣",
"文孔休",
"文义权",
"文文向",
"文佐治",
"文文则",
"文文谦",
"文子布",
"文文远",
"文康成",
"文士会",
"文正礼",
"文孝连",
"文彦靖",
"文玄风",
"文威彦",
"文子许",
"文文祥",
"文梦雯",
"文悦菲",
"文予馨",
"文婧馨",
"文婷儿",
"文圣楠",
"文芷欣",
"文心怡",
"文乐彤",
"文靖瑶",
"文艺婷",
"文樱璐",
"文婉琳",
"文婉儿",
"文倩儿",
"文蝶莺",
"文紫婉",
"文伯玉",
"文盛衡",
"文承伯",
"文子雍",
"文元伯",
"文元泰",
"文景兴",
"文子均",
"文文举",
"文子安",
"文仲达",
"文思然",
"文子昂",
"文子明",
"文子初",
"文文师",
"文世英",
"文敬达",
"文公昭",
"文文先",
"文文则",
"文温舒",
"文子正",
"文君肃",
"文彦英",
"文文进",
"文季宁",
"文孔璋",
"文元龙",
"文公台",
"文元悌",
"文文奥",
"文玄伯",
"文元方",
"文敬宗",
"文子烈",
"文元耀",
"文温伯",
"文公玮",
"文长文",
"文孔和",
"文文和",
"文恭夏",
"文文公",
"文曼倩",
"文文若",
"文景倩",
"文 ",
"杜艺芯",
"杜彦龙",
"杜君昊",
"杜子扬",
"杜雨晴",
"杜元起",
"杜威璜",
"杜梓云",
"杜伯奢",
"杜子伯",
"杜公威",
"杜曜岩",
"杜昕阳",
"杜晨欣",
"杜世元",
"杜子远",
"杜元宗",
"杜子义",
"杜仲宣",
"杜孔休",
"杜义权",
"杜文向",
"杜佐治",
"杜文则",
"杜文谦",
"杜子布",
"杜文远",
"杜康成",
"杜士会",
"杜正礼",
"杜孝连",
"杜彦靖",
"杜玄风",
"杜威彦",
"杜子许",
"杜文祥",
"杜梦雯",
"杜悦菲",
"杜予馨",
"杜婧馨",
"杜婷儿",
"杜圣楠",
"杜芷欣",
"杜心怡",
"杜乐彤",
"杜靖瑶",
"杜艺婷",
"杜樱璐",
"杜婉琳",
"杜婉儿",
"杜倩儿",
"杜蝶莺",
"杜紫婉",
"杜伯玉",
"杜盛衡",
"杜承伯",
"杜子雍",
"杜元伯",
"杜元泰",
"杜景兴",
"杜子均",
"杜文举",
"杜子安",
"杜仲达",
"杜思然",
"杜子昂",
"杜子明",
"杜子初",
"杜文师",
"杜世英",
"杜敬达",
"杜公昭",
"杜文先",
"杜文则",
"杜温舒",
"杜子正",
"杜君肃",
"杜彦英",
"杜文进",
"杜季宁",
"杜孔璋",
"杜元龙",
"杜公台",
"杜元悌",
"杜文奥",
"杜玄伯",
"杜元方",
"杜敬宗",
"杜子烈",
"杜元耀",
"杜温伯",
"杜公玮",
"杜长文",
"杜孔和",
"杜文和",
"杜恭夏",
"杜文公",
"杜曼倩",
"杜文若",
"杜景倩",
"杜 ",
"乔艺芯",
"乔彦龙",
"乔君昊",
"乔子扬",
"乔雨晴",
"乔元起",
"乔威璜",
"乔梓云",
"乔伯奢",
"乔子伯",
"乔公威",
"乔曜岩",
"乔昕阳",
"乔晨欣",
"乔世元",
"乔子远",
"乔元宗",
"乔子义",
"乔仲宣",
"乔孔休",
"乔义权",
"乔文向",
"乔佐治",
"乔文则",
"乔文谦",
"乔子布",
"乔文远",
"乔康成",
"乔士会",
"乔正礼",
"乔孝连",
"乔彦靖",
"乔玄风",
"乔威彦",
"乔子许",
"乔文祥",
"乔梦雯",
"乔悦菲",
"乔予馨",
"乔婧馨",
"乔婷儿",
"乔圣楠",
"乔芷欣",
"乔心怡",
"乔乐彤",
"乔靖瑶",
"乔艺婷",
"乔樱璐",
"乔婉琳",
"乔婉儿",
"乔倩儿",
"乔蝶莺",
"乔紫婉",
"乔伯玉",
"乔盛衡",
"乔承伯",
"乔子雍",
"乔元伯",
"乔元泰",
"乔景兴",
"乔子均",
"乔文举",
"乔子安",
"乔仲达",
"乔思然",
"乔子昂",
"乔子明",
"乔子初",
"乔文师",
"乔世英",
"乔敬达",
"乔公昭",
"乔文先",
"乔文则",
"乔温舒",
"乔子正",
"乔君肃",
"乔彦英",
"乔文进",
"乔季宁",
"乔孔璋",
"乔元龙",
"乔公台",
"乔元悌",
"乔文奥",
"乔玄伯",
"乔元方",
"乔敬宗",
"乔子烈",
"乔元耀",
"乔温伯",
"乔公玮",
"乔长文",
"乔孔和",
"乔文和",
"乔恭夏",
"乔文公",
"乔曼倩",
"乔文若",
"乔景倩",
"乔 ",
"白艺芯",
"白彦龙",
"白君昊",
"白子扬",
"白雨晴",
"白元起",
"白威璜",
"白梓云",
"白伯奢",
"白子伯",
"白公威",
"白曜岩",
"白昕阳",
"白晨欣",
"白世元",
"白子远",
"白元宗",
"白子义",
"白仲宣",
"白孔休",
"白义权",
"白文向",
"白佐治",
"白文则",
"白文谦",
"白子布",
"白文远",
"白康成",
"白士会",
"白正礼",
"白孝连",
"白彦靖",
"白玄风",
"白威彦",
"白子许",
"白文祥",
"白梦雯",
"白悦菲",
"白予馨",
"白婧馨",
"白婷儿",
"白圣楠",
"白芷欣",
"白心怡",
"白乐彤",
"白靖瑶",
"白艺婷",
"白樱璐",
"白婉琳",
"白婉儿",
"白倩儿",
"白蝶莺",
"白紫婉",
"白伯玉",
"白盛衡",
"白承伯",
"白子雍",
"白元伯",
"白元泰",
"白景兴",
"白子均",
"白文举",
"白子安",
"白仲达",
"白思然",
"白子昂",
"白子明",
"白子初",
"白文师",
"白世英",
"白敬达",
"白公昭",
"白文先",
"白文则",
"白温舒",
"白子正",
"白君肃",
"白彦英",
"白文进",
"白季宁",
"白孔璋",
"白元龙",
"白公台",
"白元悌",
"白文奥",
"白玄伯",
"白元方",
"白敬宗",
"白子烈",
"白元耀",
"白温伯",
"白公玮",
"白长文",
"白孔和",
"白文和",
"白恭夏",
"白文公",
"白曼倩",
"白文若",
"白景倩",
"白 ",
"邵 艺芯",
"邵 彦龙",
"邵 君昊",
"邵 子扬",
"邵 雨晴",
"邵 元起",
"邵 威璜",
"邵 梓云",
"邵 伯奢",
"邵 子伯",
"邵 公威",
"邵 曜岩",
"邵 昕阳",
"邵 晨欣",
"邵 世元",
"邵 子远",
"邵 元宗",
"邵 子义",
"邵 仲宣",
"邵 孔休",
"邵 义权",
"邵 文向",
"邵 佐治",
"邵 文则",
"邵 文谦",
"邵 子布",
"邵 文远",
"邵 康成",
"邵 士会",
"邵 正礼",
"邵 孝连",
"邵 彦靖",
"邵 玄风",
"邵 威彦",
"邵 子许",
"邵 文祥",
"邵 梦雯",
"邵 悦菲",
"邵 予馨",
"邵 婧馨",
"邵 婷儿",
"邵 圣楠",
"邵 芷欣",
"邵 心怡",
"邵 乐彤",
"邵 靖瑶",
"邵 艺婷",
"邵 樱璐",
"邵 婉琳",
"邵 婉儿",
"邵 倩儿",
"邵 蝶莺",
"邵 紫婉",
"邵 伯玉",
"邵 盛衡",
"邵 承伯",
"邵 子雍",
"邵 元伯",
"邵 元泰",
"邵 景兴",
"邵 子均",
"邵 文举",
"邵 子安",
"邵 仲达",
"邵 思然",
"邵 子昂",
"邵 子明",
"邵 子初",
"邵 文师",
"邵 世英",
"邵 敬达",
"邵 公昭",
"邵 文先",
"邵 文则",
"邵 温舒",
"邵 子正",
"邵 君肃",
"邵 彦英",
"邵 文进",
"邵 季宁",
"邵 孔璋",
"邵 元龙",
"邵 公台",
"邵 元悌",
"邵 文奥",
"邵 玄伯",
"邵 元方",
"邵 敬宗",
"邵 子烈",
"邵 元耀",
"邵 温伯",
"邵 公玮",
"邵 长文",
"邵 孔和",
"邵 文和",
"邵 恭夏",
"邵 文公",
"邵 曼倩",
"邵 文若",
"邵 景倩",
"邵 ",
"江 艺芯",
"江 彦龙",
"江 君昊",
"江 子扬",
"江 雨晴",
"江 元起",
"江 威璜",
"江 梓云",
"江 伯奢",
"江 子伯",
"江 公威",
"江 曜岩",
"江 昕阳",
"江 晨欣",
"江 世元",
"江 子远",
"江 元宗",
"江 子义",
"江 仲宣",
"江 孔休",
"江 义权",
"江 文向",
"江 佐治",
"江 文则",
"江 文谦",
"江 子布",
"江 文远",
"江 康成",
"江 士会",
"江 正礼",
"江 孝连",
"江 彦靖",
"江 玄风",
"江 威彦",
"江 子许",
"江 文祥",
"江 梦雯",
"江 悦菲",
"江 予馨",
"江 婧馨",
"江 婷儿",
"江 圣楠",
"江 芷欣",
"江 心怡",
"江 乐彤",
"江 靖瑶",
"江 艺婷",
"江 樱璐",
"江 婉琳",
"江 婉儿",
"江 倩儿",
"江 蝶莺",
"江 紫婉",
"江 伯玉",
"江 盛衡",
"江 承伯",
"江 子雍",
"江 元伯",
"江 元泰",
"江 景兴",
"江 子均",
"江 文举",
"江 子安",
"江 仲达",
"江 思然",
"江 子昂",
"江 子明",
"江 子初",
"江 文师",
"江 世英",
"江 敬达",
"江 公昭",
"江 文先",
"江 文则",
"江 温舒",
"江 子正",
"江 君肃",
"江 彦英",
"江 文进",
"江 季宁",
"江 孔璋",
"江 元龙",
"江 公台",
"江 元悌",
"江 文奥",
"江 玄伯",
"江 元方",
"江 敬宗",
"江 子烈",
"江 元耀",
"江 温伯",
"江 公玮",
"江 长文",
"江 孔和",
"江 文和",
"江 恭夏",
"江 文公",
"江 曼倩",
"江 文若",
"江 景倩",
"江 ",
"花艺芯",
"花彦龙",
"花君昊",
"花子扬",
"花雨晴",
"花元起",
"花威璜",
"花梓云",
"花伯奢",
"花子伯",
"花公威",
"花曜岩",
"花昕阳",
"花晨欣",
"花世元",
"花子远",
"花元宗",
"花子义",
"花仲宣",
"花孔休",
"花义权",
"花文向",
"花佐治",
"花文则",
"花文谦",
"花子布",
"花文远",
"花康成",
"花士会",
"花正礼",
"花孝连",
"花彦靖",
"花玄风",
"花威彦",
"花子许",
"花文祥",
"花梦雯",
"花悦菲",
"花予馨",
"花婧馨",
"花婷儿",
"花圣楠",
"花芷欣",
"花心怡",
"花乐彤",
"花靖瑶",
"花艺婷",
"花樱璐",
"花婉琳",
"花婉儿",
"花倩儿",
"花蝶莺",
"花紫婉",
"花伯玉",
"花盛衡",
"花承伯",
"花子雍",
"花元伯",
"花元泰",
"花景兴",
"花子均",
"花文举",
"花子安",
"花仲达",
"花思然",
"花子昂",
"花子明",
"花子初",
"花文师",
"花世英",
"花敬达",
"花公昭",
"花文先",
"花文则",
"花温舒",
"花子正",
"花君肃",
"花彦英",
"花文进",
"花季宁",
"花孔璋",
"花元龙",
"花公台",
"花元悌",
"花文奥",
"花玄伯",
"花元方",
"花敬宗",
"花子烈",
"花元耀",
"花温伯",
"花公玮",
"花长文",
"花孔和",
"花文和",
"花恭夏",
"花文公",
"花曼倩",
"花文若",
"花景倩",
"花 ",
"易艺芯",
"易彦龙",
"易君昊",
"易子扬",
"易雨晴",
"易元起",
"易威璜",
"易梓云",
"易伯奢",
"易子伯",
"易公威",
"易曜岩",
"易昕阳",
"易晨欣",
"易世元",
"易子远",
"易元宗",
"易子义",
"易仲宣",
"易孔休",
"易义权",
"易文向",
"易佐治",
"易文则",
"易文谦",
"易子布",
"易文远",
"易康成",
"易士会",
"易正礼",
"易孝连",
"易彦靖",
"易玄风",
"易威彦",
"易子许",
"易文祥",
"易梦雯",
"易悦菲",
"易予馨",
"易婧馨",
"易婷儿",
"易圣楠",
"易芷欣",
"易心怡",
"易乐彤",
"易靖瑶",
"易艺婷",
"易樱璐",
"易婉琳",
"易婉儿",
"易倩儿",
"易蝶莺",
"易紫婉",
"易伯玉",
"易盛衡",
"易承伯",
"易子雍",
"易元伯",
"易元泰",
"易景兴",
"易子均",
"易文举",
"易子安",
"易仲达",
"易思然",
"易子昂",
"易子明",
"易子初",
"易文师",
"易世英",
"易敬达",
"易公昭",
"易文先",
"易文则",
"易温舒",
"易子正",
"易君肃",
"易彦英",
"易文进",
"易季宁",
"易孔璋",
"易元龙",
"易公台",
"易元悌",
"易文奥",
"易玄伯",
"易元方",
"易敬宗",
"易子烈",
"易元耀",
"易温伯",
"易公玮",
"易长文",
"易孔和",
"易文和",
"易恭夏",
"易文公",
"易曼倩",
"易文若",
"易景倩",
"易 ",
"尚艺芯",
"尚彦龙",
"尚君昊",
"尚子扬",
"尚雨晴",
"尚元起",
"尚威璜",
"尚梓云",
"尚伯奢",
"尚子伯",
"尚公威",
"尚曜岩",
"尚昕阳",
"尚晨欣",
"尚世元",
"尚子远",
"尚元宗",
"尚子义",
"尚仲宣",
"尚孔休",
"尚义权",
"尚文向",
"尚佐治",
"尚文则",
"尚文谦",
"尚子布",
"尚文远",
"尚康成",
"尚士会",
"尚正礼",
"尚孝连",
"尚彦靖",
"尚玄风",
"尚威彦",
"尚子许",
"尚文祥",
"尚梦雯",
"尚悦菲",
"尚予馨",
"尚婧馨",
"尚婷儿",
"尚圣楠",
"尚芷欣",
"尚心怡",
"尚乐彤",
"尚靖瑶",
"尚艺婷",
"尚樱璐",
"尚婉琳",
"尚婉儿",
"尚倩儿",
"尚蝶莺",
"尚紫婉",
"尚伯玉",
"尚盛衡",
"尚承伯",
"尚子雍",
"尚元伯",
"尚元泰",
"尚景兴",
"尚子均",
"尚文举",
"尚子安",
"尚仲达",
"尚思然",
"尚子昂",
"尚子明",
"尚子初",
"尚文师",
"尚世英",
"尚敬达",
"尚公昭",
"尚文先",
"尚文则",
"尚温舒",
"尚子正",
"尚君肃",
"尚彦英",
"尚文进",
"尚季宁",
"尚孔璋",
"尚元龙",
"尚公台",
"尚元悌",
"尚文奥",
"尚玄伯",
"尚元方",
"尚敬宗",
"尚子烈",
"尚元耀",
"尚温伯",
"尚公玮",
"尚长文",
"尚孔和",
"尚文和",
"尚恭夏",
"尚文公",
"尚曼倩",
"尚文若",
"尚景倩",
"尚 ",
"刘艺芯",
"刘彦龙",
"刘君昊",
"刘子扬",
"刘雨晴",
"刘元起",
"刘威璜",
"刘梓云",
"刘伯奢",
"刘子伯",
"刘公威",
"刘曜岩",
"刘昕阳",
"刘晨欣",
"刘世元",
"刘子远",
"刘元宗",
"刘子义",
"刘仲宣",
"刘孔休",
"刘义权",
"刘文向",
"刘佐治",
"刘文则",
"刘文谦",
"刘子布",
"刘文远",
"刘康成",
"刘士会",
"刘正礼",
"刘孝连",
"刘彦靖",
"刘玄风",
"刘威彦",
"刘子许",
"刘文祥",
"刘梦雯",
"刘悦菲",
"刘予馨",
"刘婧馨",
"刘婷儿",
"刘圣楠",
"刘芷欣",
"刘心怡",
"刘乐彤",
"刘靖瑶",
"刘艺婷",
"刘樱璐",
"刘婉琳",
"刘婉儿",
"刘倩儿",
"刘蝶莺",
"刘紫婉",
"刘伯玉",
"刘盛衡",
"刘承伯",
"刘子雍",
"刘元伯",
"刘元泰",
"刘景兴",
"刘子均",
"刘文举",
"刘子安",
"刘仲达",
"刘思然",
"刘子昂",
"刘子明",
"刘子初",
"刘文师",
"刘世英",
"刘敬达",
"刘公昭",
"刘文先",
"刘文则",
"刘温舒",
"刘子正",
"刘君肃",
"刘彦英",
"刘文进",
"刘季宁",
"刘孔璋",
"刘元龙",
"刘公台",
"刘元悌",
"刘文奥",
"刘玄伯",
"刘元方",
"刘敬宗",
"刘子烈",
"刘元耀",
"刘温伯",
"刘公玮",
"刘长文",
"刘孔和",
"刘文和",
"刘恭夏",
"刘文公",
"刘曼倩",
"刘文若",
"刘景倩",
"刘 ",
"巩艺芯",
"巩彦龙",
"巩君昊",
"巩子扬",
"巩雨晴",
"巩元起",
"巩威璜",
"巩梓云",
"巩伯奢",
"巩子伯",
"巩公威",
"巩曜岩",
"巩昕阳",
"巩晨欣",
"巩世元",
"巩子远",
"巩元宗",
"巩子义",
"巩仲宣",
"巩孔休",
"巩义权",
"巩文向",
"巩佐治",
"巩文则",
"巩文谦",
"巩子布",
"巩文远",
"巩康成",
"巩士会",
"巩正礼",
"巩孝连",
"巩彦靖",
"巩玄风",
"巩威彦",
"巩子许",
"巩文祥",
"巩梦雯",
"巩悦菲",
"巩予馨",
"巩婧馨",
"巩婷儿",
"巩圣楠",
"巩芷欣",
"巩心怡",
"巩乐彤",
"巩靖瑶",
"巩艺婷",
"巩樱璐",
"巩婉琳",
"巩婉儿",
"巩倩儿",
"巩蝶莺",
"巩紫婉",
"巩伯玉",
"巩盛衡",
"巩承伯",
"巩子雍",
"巩元伯",
"巩元泰",
"巩景兴",
"巩子均",
"巩文举",
"巩子安",
"巩仲达",
"巩思然",
"巩子昂",
"巩子明",
"巩子初",
"巩文师",
"巩世英",
"巩敬达",
"巩公昭",
"巩文先",
"巩文则",
"巩温舒",
"巩子正",
"巩君肃",
"巩彦英",
"巩文进",
"巩季宁",
"巩孔璋",
"巩元龙",
"巩公台",
"巩元悌",
"巩文奥",
"巩玄伯",
"巩元方",
"巩敬宗",
"巩子烈",
"巩元耀",
"巩温伯",
"巩公玮",
"巩长文",
"巩孔和",
"巩文和",
"巩恭夏",
"巩文公",
"巩曼倩",
"巩文若",
"巩景倩",
"巩 ",
"聂艺芯",
"聂彦龙",
"聂君昊",
"聂子扬",
"聂雨晴",
"聂元起",
"聂威璜",
"聂梓云",
"聂伯奢",
"聂子伯",
"聂公威",
"聂曜岩",
"聂昕阳",
"聂晨欣",
"聂世元",
"聂子远",
"聂元宗",
"聂子义",
"聂仲宣",
"聂孔休",
"聂义权",
"聂文向",
"聂佐治",
"聂文则",
"聂文谦",
"聂子布",
"聂文远",
"聂康成",
"聂士会",
"聂正礼",
"聂孝连",
"聂彦靖",
"聂玄风",
"聂威彦",
"聂子许",
"聂文祥",
"聂梦雯",
"聂悦菲",
"聂予馨",
"聂婧馨",
"聂婷儿",
"聂圣楠",
"聂芷欣",
"聂心怡",
"聂乐彤",
"聂靖瑶",
"聂艺婷",
"聂樱璐",
"聂婉琳",
"聂婉儿",
"聂倩儿",
"聂蝶莺",
"聂紫婉",
"聂伯玉",
"聂盛衡",
"聂承伯",
"聂子雍",
"聂元伯",
"聂元泰",
"聂景兴",
"聂子均",
"聂文举",
"聂子安",
"聂仲达",
"聂思然",
"聂子昂",
"聂子明",
"聂子初",
"聂文师",
"聂世英",
"聂敬达",
"聂公昭",
"聂文先",
"聂文则",
"聂温舒",
"聂子正",
"聂君肃",
"聂彦英",
"聂文进",
"聂季宁",
"聂孔璋",
"聂元龙",
"聂公台",
"聂元悌",
"聂文奥",
"聂玄伯",
"聂元方",
"聂敬宗",
"聂子烈",
"聂元耀",
"聂温伯",
"聂公玮",
"聂长文",
"聂孔和",
"聂文和",
"聂恭夏",
"聂文公",
"聂曼倩",
"聂文若",
"聂景倩",
"聂 ",
"武艺芯",
"武彦龙",
"武君昊",
"武子扬",
"武雨晴",
"武元起",
"武威璜",
"武梓云",
"武伯奢",
"武子伯",
"武公威",
"武曜岩",
"武昕阳",
"武晨欣",
"武世元",
"武子远",
"武元宗",
"武子义",
"武仲宣",
"武孔休",
"武义权",
"武文向",
"武佐治",
"武文则",
"武文谦",
"武子布",
"武文远",
"武康成",
"武士会",
"武正礼",
"武孝连",
"武彦靖",
"武玄风",
"武威彦",
"武子许",
"武文祥",
"武梦雯",
"武悦菲",
"武予馨",
"武婧馨",
"武婷儿",
"武圣楠",
"武芷欣",
"武心怡",
"武乐彤",
"武靖瑶",
"武艺婷",
"武樱璐",
"武婉琳",
"武婉儿",
"武倩儿",
"武蝶莺",
"武紫婉",
"武伯玉",
"武盛衡",
"武承伯",
"武子雍",
"武元伯",
"武元泰",
"武景兴",
"武子均",
"武文举",
"武子安",
"武仲达",
"武思然",
"武子昂",
"武子明",
"武子初",
"武文师",
"武世英",
"武敬达",
"武公昭",
"武文先",
"武文则",
"武温舒",
"武子正",
"武君肃",
"武彦英",
"武文进",
"武季宁",
"武孔璋",
"武元龙",
"武公台",
"武元悌",
"武文奥",
"武玄伯",
"武元方",
"武敬宗",
"武子烈",
"武元耀",
"武温伯",
"武公玮",
"武长文",
"武孔和",
"武文和",
"武恭夏",
"武文公",
"武曼倩",
"武文若",
"武景倩",
"武 ",
"甘艺芯",
"甘彦龙",
"甘君昊",
"甘子扬",
"甘雨晴",
"甘元起",
"甘威璜",
"甘梓云",
"甘伯奢",
"甘子伯",
"甘公威",
"甘曜岩",
"甘昕阳",
"甘晨欣",
"甘世元",
"甘子远",
"甘元宗",
"甘子义",
"甘仲宣",
"甘孔休",
"甘义权",
"甘文向",
"甘佐治",
"甘文则",
"甘文谦",
"甘子布",
"甘文远",
"甘康成",
"甘士会",
"甘正礼",
"甘孝连",
"甘彦靖",
"甘玄风",
"甘威彦",
"甘子许",
"甘文祥",
"甘梦雯",
"甘悦菲",
"甘予馨",
"甘婧馨",
"甘婷儿",
"甘圣楠",
"甘芷欣",
"甘心怡",
"甘乐彤",
"甘靖瑶",
"甘艺婷",
"甘樱璐",
"甘婉琳",
"甘婉儿",
"甘倩儿",
"甘蝶莺",
"甘紫婉",
"甘伯玉",
"甘盛衡",
"甘承伯",
"甘子雍",
"甘元伯",
"甘元泰",
"甘景兴",
"甘子均",
"甘文举",
"甘子安",
"甘仲达",
"甘思然",
"甘子昂",
"甘子明",
"甘子初",
"甘文师",
"甘世英",
"甘敬达",
"甘公昭",
"甘文先",
"甘文则",
"甘温舒",
"甘子正",
"甘君肃",
"甘彦英",
"甘文进",
"甘季宁",
"甘孔璋",
"甘元龙",
"甘公台",
"甘元悌",
"甘文奥",
"甘玄伯",
"甘元方",
"甘敬宗",
"甘子烈",
"甘元耀",
"甘温伯",
"甘公玮",
"甘长文",
"甘孔和",
"甘文和",
"甘恭夏",
"甘文公",
"甘曼倩",
"甘文若",
"甘景倩",
"甘 ",
"冷艺芯",
"冷彦龙",
"冷君昊",
"冷子扬",
"冷雨晴",
"冷元起",
"冷威璜",
"冷梓云",
"冷伯奢",
"冷子伯",
"冷公威",
"冷曜岩",
"冷昕阳",
"冷晨欣",
"冷世元",
"冷子远",
"冷元宗",
"冷子义",
"冷仲宣",
"冷孔休",
"冷义权",
"冷文向",
"冷佐治",
"冷文则",
"冷文谦",
"冷子布",
"冷文远",
"冷康成",
"冷士会",
"冷正礼",
"冷孝连",
"冷彦靖",
"冷玄风",
"冷威彦",
"冷子许",
"冷文祥",
"冷梦雯",
"冷悦菲",
"冷予馨",
"冷婧馨",
"冷婷儿",
"冷圣楠",
"冷芷欣",
"冷心怡",
"冷乐彤",
"冷靖瑶",
"冷艺婷",
"冷樱璐",
"冷婉琳",
"冷婉儿",
"冷倩儿",
"冷蝶莺",
"冷紫婉",
"冷伯玉",
"冷盛衡",
"冷承伯",
"冷子雍",
"冷元伯",
"冷元泰",
"冷景兴",
"冷子均",
"冷文举",
"冷子安",
"冷仲达",
"冷思然",
"冷子昂",
"冷子明",
"冷子初",
"冷文师",
"冷世英",
"冷敬达",
"冷公昭",
"冷文先",
"冷文则",
"冷温舒",
"冷子正",
"冷君肃",
"冷彦英",
"冷文进",
"冷季宁",
"冷孔璋",
"冷元龙",
"冷公台",
"冷元悌",
"冷文奥",
"冷玄伯",
"冷元方",
"冷敬宗",
"冷子烈",
"冷元耀",
"冷温伯",
"冷公玮",
"冷长文",
"冷孔和",
"冷文和",
"冷恭夏",
"冷文公",
"冷曼倩",
"冷文若",
"冷景倩",
"冷 ",
"乐正艺芯",
"乐正彦龙",
"乐正君昊",
"乐正子扬",
"乐正雨晴",
"乐正元起",
"乐正威璜",
"乐正梓云",
"乐正伯奢",
"乐正子伯",
"乐正公威",
"乐正曜岩",
"乐正昕阳",
"乐正晨欣",
"乐正世元",
"乐正子远",
"乐正元宗",
"乐正子义",
"乐正仲宣",
"乐正孔休",
"乐正义权",
"乐正文向",
"乐正佐治",
"乐正文则",
"乐正文谦",
"乐正子布",
"乐正文远",
"乐正康成",
"乐正士会",
"乐正正礼",
"乐正孝连",
"乐正彦靖",
"乐正玄风",
"乐正威彦",
"乐正子许",
"乐正文祥",
"乐正梦雯",
"乐正悦菲",
"乐正予馨",
"乐正婧馨",
"乐正婷儿",
"乐正圣楠",
"乐正芷欣",
"乐正心怡",
"乐正乐彤",
"乐正靖瑶",
"乐正艺婷",
"乐正樱璐",
"乐正婉琳",
"乐正婉儿",
"乐正倩儿",
"乐正蝶莺",
"乐正紫婉",
"乐正伯玉",
"乐正盛衡",
"乐正承伯",
"乐正子雍",
"乐正元伯",
"乐正元泰",
"乐正景兴",
"乐正子均",
"乐正文举",
"乐正子安",
"乐正仲达",
"乐正思然",
"乐正子昂",
"乐正子明",
"乐正子初",
"乐正文师",
"乐正世英",
"乐正敬达",
"乐正公昭",
"乐正文先",
"乐正文则",
"乐正温舒",
"乐正子正",
"乐正君肃",
"乐正彦英",
"乐正文进",
"乐正季宁",
"乐正孔璋",
"乐正元龙",
"乐正公台",
"乐正元悌",
"乐正文奥",
"乐正玄伯",
"乐正元方",
"乐正敬宗",
"乐正子烈",
"乐正元耀",
"乐正温伯",
"乐正公玮",
"乐正长文",
"乐正孔和",
"乐正文和",
"乐正恭夏",
"乐正文公",
"乐正曼倩",
"乐正文若",
"乐正景倩",
"乐正 ",
"欧阳艺芯",
"欧阳彦龙",
"欧阳君昊",
"欧阳子扬",
"欧阳雨晴",
"欧阳元起",
"欧阳威璜",
"欧阳梓云",
"欧阳伯奢",
"欧阳子伯",
"欧阳公威",
"欧阳曜岩",
"欧阳昕阳",
"欧阳晨欣",
"欧阳世元",
"欧阳子远",
"欧阳元宗",
"欧阳子义",
"欧阳仲宣",
"欧阳孔休",
"欧阳义权",
"欧阳文向",
"欧阳佐治",
"欧阳文则",
"欧阳文谦",
"欧阳子布",
"欧阳文远",
"欧阳康成",
"欧阳士会",
"欧阳正礼",
"欧阳孝连",
"欧阳彦靖",
"欧阳玄风",
"欧阳威彦",
"欧阳子许",
"欧阳文祥",
"欧阳梦雯",
"欧阳悦菲",
"欧阳予馨",
"欧阳婧馨",
"欧阳婷儿",
"欧阳圣楠",
"欧阳芷欣",
"欧阳心怡",
"欧阳乐彤",
"欧阳靖瑶",
"欧阳艺婷",
"欧阳樱璐",
"欧阳婉琳",
"欧阳婉儿",
"欧阳倩儿",
"欧阳蝶莺",
"欧阳紫婉",
"欧阳伯玉",
"欧阳盛衡",
"欧阳承伯",
"欧阳子雍",
"欧阳元伯",
"欧阳元泰",
"欧阳景兴",
"欧阳子均",
"欧阳文举",
"欧阳子安",
"欧阳仲达",
"欧阳思然",
"欧阳子昂",
"欧阳子明",
"欧阳子初",
"欧阳文师",
"欧阳世英",
"欧阳敬达",
"欧阳公昭",
"欧阳文先",
"欧阳文则",
"欧阳温舒",
"欧阳子正",
"欧阳君肃",
"欧阳彦英",
"欧阳文进",
"欧阳季宁",
"欧阳孔璋",
"欧阳元龙",
"欧阳公台",
"欧阳元悌",
"欧阳文奥",
"欧阳玄伯",
"欧阳元方",
"欧阳敬宗",
"欧阳子烈",
"欧阳元耀",
"欧阳温伯",
"欧阳公玮",
"欧阳长文",
"欧阳孔和",
"欧阳文和",
"欧阳恭夏",
"欧阳文公",
"欧阳曼倩",
"欧阳文若",
"欧阳景倩",
"欧阳 ",
"端木艺芯",
"端木彦龙",
"端木君昊",
"端木子扬",
"端木雨晴",
"端木元起",
"端木威璜",
"端木梓云",
"端木伯奢",
"端木子伯",
"端木公威",
"端木曜岩",
"端木昕阳",
"端木晨欣",
"端木世元",
"端木子远",
"端木元宗",
"端木子义",
"端木仲宣",
"端木孔休",
"端木义权",
"端木文向",
"端木佐治",
"端木文则",
"端木文谦",
"端木子布",
"端木文远",
"端木康成",
"端木士会",
"端木正礼",
"端木孝连",
"端木彦靖",
"端木玄风",
"端木威彦",
"端木子许",
"端木文祥",
"端木梦雯",
"端木悦菲",
"端木予馨",
"端木婧馨",
"端木婷儿",
"端木圣楠",
"端木芷欣",
"端木心怡",
"端木乐彤",
"端木靖瑶",
"端木艺婷",
"端木樱璐",
"端木婉琳",
"端木婉儿",
"端木倩儿",
"端木蝶莺",
"端木紫婉",
"端木伯玉",
"端木盛衡",
"端木承伯",
"端木子雍",
"端木元伯",
"端木元泰",
"端木景兴",
"端木子均",
"端木文举",
"端木子安",
"端木仲达",
"端木思然",
"端木子昂",
"端木子明",
"端木子初",
"端木文师",
"端木世英",
"端木敬达",
"端木公昭",
"端木文先",
"端木文则",
"端木温舒",
"端木子正",
"端木君肃",
"端木彦英",
"端木文进",
"端木季宁",
"端木孔璋",
"端木元龙",
"端木公台",
"端木元悌",
"端木文奥",
"端木玄伯",
"端木元方",
"端木敬宗",
"端木子烈",
"端木元耀",
"端木温伯",
"端木公玮",
"端木长文",
"端木孔和",
"端木文和",
"端木恭夏",
"端木文公",
"端木曼倩",
"端木文若",
"端木景倩",
"端木 ",
"上官艺芯",
"上官彦龙",
"上官君昊",
"上官子扬",
"上官雨晴",
"上官元起",
"上官威璜",
"上官梓云",
"上官伯奢",
"上官子伯",
"上官公威",
"上官曜岩",
"上官昕阳",
"上官晨欣",
"上官世元",
"上官子远",
"上官元宗",
"上官子义",
"上官仲宣",
"上官孔休",
"上官义权",
"上官文向",
"上官佐治",
"上官文则",
"上官文谦",
"上官子布",
"上官文远",
"上官康成",
"上官士会",
"上官正礼",
"上官孝连",
"上官彦靖",
"上官玄风",
"上官威彦",
"上官子许",
"上官文祥",
"上官梦雯",
"上官悦菲",
"上官予馨",
"上官婧馨",
"上官婷儿",
"上官圣楠",
"上官芷欣",
"上官心怡",
"上官乐彤",
"上官靖瑶",
"上官艺婷",
"上官樱璐",
"上官婉琳",
"上官婉儿",
"上官倩儿",
"上官蝶莺",
"上官紫婉",
"上官伯玉",
"上官盛衡",
"上官承伯",
"上官子雍",
"上官元伯",
"上官元泰",
"上官景兴",
"上官子均",
"上官文举",
"上官子安",
"上官仲达",
"上官思然",
"上官子昂",
"上官子明",
"上官子初",
"上官文师",
"上官世英",
"上官敬达",
"上官公昭",
"上官文先",
"上官文则",
"上官温舒",
"上官子正",
"上官君肃",
"上官彦英",
"上官文进",
"上官季宁",
"上官孔璋",
"上官元龙",
"上官公台",
"上官元悌",
"上官文奥",
"上官玄伯",
"上官元方",
"上官敬宗",
"上官子烈",
"上官元耀",
"上官温伯",
"上官公玮",
"上官长文",
"上官孔和",
"上官文和",
"上官恭夏",
"上官文公",
"上官曼倩",
"上官文若",
"上官景倩",
"上官 ",
"司马艺芯",
"司马彦龙",
"司马君昊",
"司马子扬",
"司马雨晴",
"司马元起",
"司马威璜",
"司马梓云",
"司马伯奢",
"司马子伯",
"司马公威",
"司马曜岩",
"司马昕阳",
"司马晨欣",
"司马世元",
"司马子远",
"司马元宗",
"司马子义",
"司马仲宣",
"司马孔休",
"司马义权",
"司马文向",
"司马佐治",
"司马文则",
"司马文谦",
"司马子布",
"司马文远",
"司马康成",
"司马士会",
"司马正礼",
"司马孝连",
"司马彦靖",
"司马玄风",
"司马威彦",
"司马子许",
"司马文祥",
"司马梦雯",
"司马悦菲",
"司马予馨",
"司马婧馨",
"司马婷儿",
"司马圣楠",
"司马芷欣",
"司马心怡",
"司马乐彤",
"司马靖瑶",
"司马艺婷",
"司马樱璐",
"司马婉琳",
"司马婉儿",
"司马倩儿",
"司马蝶莺",
"司马紫婉",
"司马伯玉",
"司马盛衡",
"司马承伯",
"司马子雍",
"司马元伯",
"司马元泰",
"司马景兴",
"司马子均",
"司马文举",
"司马子安",
"司马仲达",
"司马思然",
"司马子昂",
"司马子明",
"司马子初",
"司马文师",
"司马世英",
"司马敬达",
"司马公昭",
"司马文先",
"司马文则",
"司马温舒",
"司马子正",
"司马君肃",
"司马彦英",
"司马文进",
"司马季宁",
"司马孔璋",
"司马元龙",
"司马公台",
"司马元悌",
"司马文奥",
"司马玄伯",
"司马元方",
"司马敬宗",
"司马子烈",
"司马元耀",
"司马温伯",
"司马公玮",
"司马长文",
"司马孔和",
"司马文和",
"司马恭夏",
"司马文公",
"司马曼倩",
"司马文若",
"司马景倩",
"司马 ",
"东方艺芯",
"东方彦龙",
"东方君昊",
"东方子扬",
"东方雨晴",
"东方元起",
"东方威璜",
"东方梓云",
"东方伯奢",
"东方子伯",
"东方公威",
"东方曜岩",
"东方昕阳",
"东方晨欣",
"东方世元",
"东方子远",
"东方元宗",
"东方子义",
"东方仲宣",
"东方孔休",
"东方义权",
"东方文向",
"东方佐治",
"东方文则",
"东方文谦",
"东方子布",
"东方文远",
"东方康成",
"东方士会",
"东方正礼",
"东方孝连",
"东方彦靖",
"东方玄风",
"东方威彦",
"东方子许",
"东方文祥",
"东方梦雯",
"东方悦菲",
"东方予馨",
"东方婧馨",
"东方婷儿",
"东方圣楠",
"东方芷欣",
"东方心怡",
"东方乐彤",
"东方靖瑶",
"东方艺婷",
"东方樱璐",
"东方婉琳",
"东方婉儿",
"东方倩儿",
"东方蝶莺",
"东方紫婉",
"东方伯玉",
"东方盛衡",
"东方承伯",
"东方子雍",
"东方元伯",
"东方元泰",
"东方景兴",
"东方子均",
"东方文举",
"东方子安",
"东方仲达",
"东方思然",
"东方子昂",
"东方子明",
"东方子初",
"东方文师",
"东方世英",
"东方敬达",
"东方公昭",
"东方文先",
"东方文则",
"东方温舒",
"东方子正",
"东方君肃",
"东方彦英",
"东方文进",
"东方季宁",
"东方孔璋",
"东方元龙",
"东方公台",
"东方元悌",
"东方文奥",
"东方玄伯",
"东方元方",
"东方敬宗",
"东方子烈",
"东方元耀",
"东方温伯",
"东方公玮",
"东方长文",
"东方孔和",
"东方文和",
"东方恭夏",
"东方文公",
"东方曼倩",
"东方文若",
"东方景倩",
"东方 ",
"独孤艺芯",
"独孤彦龙",
"独孤君昊",
"独孤子扬",
"独孤雨晴",
"独孤元起",
"独孤威璜",
"独孤梓云",
"独孤伯奢",
"独孤子伯",
"独孤公威",
"独孤曜岩",
"独孤昕阳",
"独孤晨欣",
"独孤世元",
"独孤子远",
"独孤元宗",
"独孤子义",
"独孤仲宣",
"独孤孔休",
"独孤义权",
"独孤文向",
"独孤佐治",
"独孤文则",
"独孤文谦",
"独孤子布",
"独孤文远",
"独孤康成",
"独孤士会",
"独孤正礼",
"独孤孝连",
"独孤彦靖",
"独孤玄风",
"独孤威彦",
"独孤子许",
"独孤文祥",
"独孤梦雯",
"独孤悦菲",
"独孤予馨",
"独孤婧馨",
"独孤婷儿",
"独孤圣楠",
"独孤芷欣",
"独孤心怡",
"独孤乐彤",
"独孤靖瑶",
"独孤艺婷",
"独孤樱璐",
"独孤婉琳",
"独孤婉儿",
"独孤倩儿",
"独孤蝶莺",
"独孤紫婉",
"独孤伯玉",
"独孤盛衡",
"独孤承伯",
"独孤子雍",
"独孤元伯",
"独孤元泰",
"独孤景兴",
"独孤子均",
"独孤文举",
"独孤子安",
"独孤仲达",
"独孤思然",
"独孤子昂",
"独孤子明",
"独孤子初",
"独孤文师",
"独孤世英",
"独孤敬达",
"独孤公昭",
"独孤文先",
"独孤文则",
"独孤温舒",
"独孤子正",
"独孤君肃",
"独孤彦英",
"独孤文进",
"独孤季宁",
"独孤孔璋",
"独孤元龙",
"独孤公台",
"独孤元悌",
"独孤文奥",
"独孤玄伯",
"独孤元方",
"独孤敬宗",
"独孤子烈",
"独孤元耀",
"独孤温伯",
"独孤公玮",
"独孤长文",
"独孤孔和",
"独孤文和",
"独孤恭夏",
"独孤文公",
"独孤曼倩",
"独孤文若",
"独孤景倩",
"独孤 ",
"南宫艺芯",
"南宫彦龙",
"南宫君昊",
"南宫子扬",
"南宫雨晴",
"南宫元起",
"南宫威璜",
"南宫梓云",
"南宫伯奢",
"南宫子伯",
"南宫公威",
"南宫曜岩",
"南宫昕阳",
"南宫晨欣",
"南宫世元",
"南宫子远",
"南宫元宗",
"南宫子义",
"南宫仲宣",
"南宫孔休",
"南宫义权",
"南宫文向",
"南宫佐治",
"南宫文则",
"南宫文谦",
"南宫子布",
"南宫文远",
"南宫康成",
"南宫士会",
"南宫正礼",
"南宫孝连",
"南宫彦靖",
"南宫玄风",
"南宫威彦",
"南宫子许",
"南宫文祥",
"南宫梦雯",
"南宫悦菲",
"南宫予馨",
"南宫婧馨",
"南宫婷儿",
"南宫圣楠",
"南宫芷欣",
"南宫心怡",
"南宫乐彤",
"南宫靖瑶",
"南宫艺婷",
"南宫樱璐",
"南宫婉琳",
"南宫婉儿",
"南宫倩儿",
"南宫蝶莺",
"南宫紫婉",
"南宫伯玉",
"南宫盛衡",
"南宫承伯",
"南宫子雍",
"南宫元伯",
"南宫元泰",
"南宫景兴",
"南宫子均",
"南宫文举",
"南宫子安",
"南宫仲达",
"南宫思然",
"南宫子昂",
"南宫子明",
"南宫子初",
"南宫文师",
"南宫世英",
"南宫敬达",
"南宫公昭",
"南宫文先",
"南宫文则",
"南宫温舒",
"南宫子正",
"南宫君肃",
"南宫彦英",
"南宫文进",
"南宫季宁",
"南宫孔璋",
"南宫元龙",
"南宫公台",
"南宫元悌",
"南宫文奥",
"南宫玄伯",
"南宫元方",
"南宫敬宗",
"南宫子烈",
"南宫元耀",
"南宫温伯",
"南宫公玮",
"南宫长文",
"南宫孔和",
"南宫文和",
"南宫恭夏",
"南宫文公",
"南宫曼倩",
"南宫文若",
"南宫景倩",
"南宫 ",
"夏侯艺芯",
"夏侯彦龙",
"夏侯君昊",
"夏侯子扬",
"夏侯雨晴",
"夏侯元起",
"夏侯威璜",
"夏侯梓云",
"夏侯伯奢",
"夏侯子伯",
"夏侯公威",
"夏侯曜岩",
"夏侯昕阳",
"夏侯晨欣",
"夏侯世元",
"夏侯子远",
"夏侯元宗",
"夏侯子义",
"夏侯仲宣",
"夏侯孔休",
"夏侯义权",
"夏侯文向",
"夏侯佐治",
"夏侯文则",
"夏侯文谦",
"夏侯子布",
"夏侯文远",
"夏侯康成",
"夏侯士会",
"夏侯正礼",
"夏侯孝连",
"夏侯彦靖",
"夏侯玄风",
"夏侯威彦",
"夏侯子许",
"夏侯文祥",
"夏侯梦雯",
"夏侯悦菲",
"夏侯予馨",
"夏侯婧馨",
"夏侯婷儿",
"夏侯圣楠",
"夏侯芷欣",
"夏侯心怡",
"夏侯乐彤",
"夏侯靖瑶",
"夏侯艺婷",
"夏侯樱璐",
"夏侯婉琳",
"夏侯婉儿",
"夏侯倩儿",
"夏侯蝶莺",
"夏侯紫婉",
"夏侯伯玉",
"夏侯盛衡",
"夏侯承伯",
"夏侯子雍",
"夏侯元伯",
"夏侯元泰",
"夏侯景兴",
"夏侯子均",
"夏侯文举",
"夏侯子安",
"夏侯仲达",
"夏侯思然",
"夏侯子昂",
"夏侯子明",
"夏侯子初",
"夏侯文师",
"夏侯世英",
"夏侯敬达",
"夏侯公昭",
"夏侯文先",
"夏侯文则",
"夏侯温舒",
"夏侯子正",
"夏侯君肃",
"夏侯彦英",
"夏侯文进",
"夏侯季宁",
"夏侯孔璋",
"夏侯元龙",
"夏侯公台",
"夏侯元悌",
"夏侯文奥",
"夏侯玄伯",
"夏侯元方",
"夏侯敬宗",
"夏侯子烈",
"夏侯元耀",
"夏侯温伯",
"夏侯公玮",
"夏侯长文",
"夏侯孔和",
"夏侯文和",
"夏侯恭夏",
"夏侯文公",
"夏侯曼倩",
"夏侯文若",
"夏侯景倩",
"夏侯 ",
"诸葛艺芯",
"诸葛彦龙",
"诸葛君昊",
"诸葛子扬",
"诸葛雨晴",
"诸葛元起",
"诸葛威璜",
"诸葛梓云",
"诸葛伯奢",
"诸葛子伯",
"诸葛公威",
"诸葛曜岩",
"诸葛昕阳",
"诸葛晨欣",
"诸葛世元",
"诸葛子远",
"诸葛元宗",
"诸葛子义",
"诸葛仲宣",
"诸葛孔休",
"诸葛义权",
"诸葛文向",
"诸葛佐治",
"诸葛文则",
"诸葛文谦",
"诸葛子布",
"诸葛文远",
"诸葛康成",
"诸葛士会",
"诸葛正礼",
"诸葛孝连",
"诸葛彦靖",
"诸葛玄风",
"诸葛威彦",
"诸葛子许",
"诸葛文祥",
"诸葛梦雯",
"诸葛悦菲",
"诸葛予馨",
"诸葛婧馨",
"诸葛婷儿",
"诸葛圣楠",
"诸葛芷欣",
"诸葛心怡",
"诸葛乐彤",
"诸葛靖瑶",
"诸葛艺婷",
"诸葛樱璐",
"诸葛婉琳",
"诸葛婉儿",
"诸葛倩儿",
"诸葛蝶莺",
"诸葛紫婉",
"诸葛伯玉",
"诸葛盛衡",
"诸葛承伯",
"诸葛子雍",
"诸葛元伯",
"诸葛元泰",
"诸葛景兴",
"诸葛子均",
"诸葛文举",
"诸葛子安",
"诸葛仲达",
"诸葛思然",
"诸葛子昂",
"诸葛子明",
"诸葛子初",
"诸葛文师",
"诸葛世英",
"诸葛敬达",
"诸葛公昭",
"诸葛文先",
"诸葛文则",
"诸葛温舒",
"诸葛子正",
"诸葛君肃",
"诸葛彦英",
"诸葛文进",
"诸葛季宁",
"诸葛孔璋",
"诸葛元龙",
"诸葛公台",
"诸葛元悌",
"诸葛文奥",
"诸葛玄伯",
"诸葛元方",
"诸葛敬宗",
"诸葛子烈",
"诸葛元耀",
"诸葛温伯",
"诸葛公玮",
"诸葛长文",
"诸葛孔和",
"诸葛文和",
"诸葛恭夏",
"诸葛文公",
"诸葛曼倩",
"诸葛文若",
"诸葛景倩",
"诸葛 ",
"尉迟艺芯",
"尉迟彦龙",
"尉迟君昊",
"尉迟子扬",
"尉迟雨晴",
"尉迟元起",
"尉迟威璜",
"尉迟梓云",
"尉迟伯奢",
"尉迟子伯",
"尉迟公威",
"尉迟曜岩",
"尉迟昕阳",
"尉迟晨欣",
"尉迟世元",
"尉迟子远",
"尉迟元宗",
"尉迟子义",
"尉迟仲宣",
"尉迟孔休",
"尉迟义权",
"尉迟文向",
"尉迟佐治",
"尉迟文则",
"尉迟文谦",
"尉迟子布",
"尉迟文远",
"尉迟康成",
"尉迟士会",
"尉迟正礼",
"尉迟孝连",
"尉迟彦靖",
"尉迟玄风",
"尉迟威彦",
"尉迟子许",
"尉迟文祥",
"尉迟梦雯",
"尉迟悦菲",
"尉迟予馨",
"尉迟婧馨",
"尉迟婷儿",
"尉迟圣楠",
"尉迟芷欣",
"尉迟心怡",
"尉迟乐彤",
"尉迟靖瑶",
"尉迟艺婷",
"尉迟樱璐",
"尉迟婉琳",
"尉迟婉儿",
"尉迟倩儿",
"尉迟蝶莺",
"尉迟紫婉",
"尉迟伯玉",
"尉迟盛衡",
"尉迟承伯",
"尉迟子雍",
"尉迟元伯",
"尉迟元泰",
"尉迟景兴",
"尉迟子均",
"尉迟文举",
"尉迟子安",
"尉迟仲达",
"尉迟思然",
"尉迟子昂",
"尉迟子明",
"尉迟子初",
"尉迟文师",
"尉迟世英",
"尉迟敬达",
"尉迟公昭",
"尉迟文先",
"尉迟文则",
"尉迟温舒",
"尉迟子正",
"尉迟君肃",
"尉迟彦英",
"尉迟文进",
"尉迟季宁",
"尉迟孔璋",
"尉迟元龙",
"尉迟公台",
"尉迟元悌",
"尉迟文奥",
"尉迟玄伯",
"尉迟元方",
"尉迟敬宗",
"尉迟子烈",
"尉迟元耀",
"尉迟温伯",
"尉迟公玮",
"尉迟长文",
"尉迟孔和",
"尉迟文和",
"尉迟恭夏",
"尉迟文公",
"尉迟曼倩",
"尉迟文若",
"尉迟景倩",
"尉迟 ",
"轩辕艺芯",
"轩辕彦龙",
"轩辕君昊",
"轩辕子扬",
"轩辕雨晴",
"轩辕元起",
"轩辕威璜",
"轩辕梓云",
"轩辕伯奢",
"轩辕子伯",
"轩辕公威",
"轩辕曜岩",
"轩辕昕阳",
"轩辕晨欣",
"轩辕世元",
"轩辕子远",
"轩辕元宗",
"轩辕子义",
"轩辕仲宣",
"轩辕孔休",
"轩辕义权",
"轩辕文向",
"轩辕佐治",
"轩辕文则",
"轩辕文谦",
"轩辕子布",
"轩辕文远",
"轩辕康成",
"轩辕士会",
"轩辕正礼",
"轩辕孝连",
"轩辕彦靖",
"轩辕玄风",
"轩辕威彦",
"轩辕子许",
"轩辕文祥",
"轩辕梦雯",
"轩辕悦菲",
"轩辕予馨",
"轩辕婧馨",
"轩辕婷儿",
"轩辕圣楠",
"轩辕芷欣",
"轩辕心怡",
"轩辕乐彤",
"轩辕靖瑶",
"轩辕艺婷",
"轩辕樱璐",
"轩辕婉琳",
"轩辕婉儿",
"轩辕倩儿",
"轩辕蝶莺",
"轩辕紫婉",
"轩辕伯玉",
"轩辕盛衡",
"轩辕承伯",
"轩辕子雍",
"轩辕元伯",
"轩辕元泰",
"轩辕景兴",
"轩辕子均",
"轩辕文举",
"轩辕子安",
"轩辕仲达",
"轩辕思然",
"轩辕子昂",
"轩辕子明",
"轩辕子初",
"轩辕文师",
"轩辕世英",
"轩辕敬达",
"轩辕公昭",
"轩辕文先",
"轩辕文则",
"轩辕温舒",
"轩辕子正",
"轩辕君肃",
"轩辕彦英",
"轩辕文进",
"轩辕季宁",
"轩辕孔璋",
"轩辕元龙",
"轩辕公台",
"轩辕元悌",
"轩辕文奥",
"轩辕玄伯",
"轩辕元方",
"轩辕敬宗",
"轩辕子烈",
"轩辕元耀",
"轩辕温伯",
"轩辕公玮",
"轩辕长文",
"轩辕孔和",
"轩辕文和",
"轩辕恭夏",
"轩辕文公",
"轩辕曼倩",
"轩辕文若",
"轩辕景倩",
"轩辕 ",
"令狐艺芯",
"令狐彦龙",
"令狐君昊",
"令狐子扬",
"令狐雨晴",
"令狐元起",
"令狐威璜",
"令狐梓云",
"令狐伯奢",
"令狐子伯",
"令狐公威",
"令狐曜岩",
"令狐昕阳",
"令狐晨欣",
"令狐世元",
"令狐子远",
"令狐元宗",
"令狐子义",
"令狐仲宣",
"令狐孔休",
"令狐义权",
"令狐文向",
"令狐佐治",
"令狐文则",
"令狐文谦",
"令狐子布",
"令狐文远",
"令狐康成",
"令狐士会",
"令狐正礼",
"令狐孝连",
"令狐彦靖",
"令狐玄风",
"令狐威彦",
"令狐子许",
"令狐文祥",
"令狐梦雯",
"令狐悦菲",
"令狐予馨",
"令狐婧馨",
"令狐婷儿",
"令狐圣楠",
"令狐芷欣",
"令狐心怡",
"令狐乐彤",
"令狐靖瑶",
"令狐艺婷",
"令狐樱璐",
"令狐婉琳",
"令狐婉儿",
"令狐倩儿",
"令狐蝶莺",
"令狐紫婉",
"令狐伯玉",
"令狐盛衡",
"令狐承伯",
"令狐子雍",
"令狐元伯",
"令狐元泰",
"令狐景兴",
"令狐子均",
"令狐文举",
"令狐子安",
"令狐仲达",
"令狐思然",
"令狐子昂",
"令狐子明",
"令狐子初",
"令狐文师",
"令狐世英",
"令狐敬达",
"令狐公昭",
"令狐文先",
"令狐文则",
"令狐温舒",
"令狐子正",
"令狐君肃",
"令狐彦英",
"令狐文进",
"令狐季宁",
"令狐孔璋",
"令狐元龙",
"令狐公台",
"令狐元悌",
"令狐文奥",
"令狐玄伯",
"令狐元方",
"令狐敬宗",
"令狐子烈",
"令狐元耀",
"令狐温伯",
"令狐公玮",
"令狐长文",
"令狐孔和",
"令狐文和",
"令狐恭夏",
"令狐文公",
"令狐曼倩",
"令狐文若",
"令狐景倩",
"令狐 ",
"公孙艺芯",
"公孙彦龙",
"公孙君昊",
"公孙子扬",
"公孙雨晴",
"公孙元起",
"公孙威璜",
"公孙梓云",
"公孙伯奢",
"公孙子伯",
"公孙公威",
"公孙曜岩",
"公孙昕阳",
"公孙晨欣",
"公孙世元",
"公孙子远",
"公孙元宗",
"公孙子义",
"公孙仲宣",
"公孙孔休",
"公孙义权",
"公孙文向",
"公孙佐治",
"公孙文则",
"公孙文谦",
"公孙子布",
"公孙文远",
"公孙康成",
"公孙士会",
"公孙正礼",
"公孙孝连",
"公孙彦靖",
"公孙玄风",
"公孙威彦",
"公孙子许",
"公孙文祥",
"公孙梦雯",
"公孙悦菲",
"公孙予馨",
"公孙婧馨",
"公孙婷儿",
"公孙圣楠",
"公孙芷欣",
"公孙心怡",
"公孙乐彤",
"公孙靖瑶",
"公孙艺婷",
"公孙樱璐",
"公孙婉琳",
"公孙婉儿",
"公孙倩儿",
"公孙蝶莺",
"公孙紫婉",
"公孙伯玉",
"公孙盛衡",
"公孙承伯",
"公孙子雍",
"公孙元伯",
"公孙元泰",
"公孙景兴",
"公孙子均",
"公孙文举",
"公孙子安",
"公孙仲达",
"公孙思然",
"公孙子昂",
"公孙子明",
"公孙子初",
"公孙文师",
"公孙世英",
"公孙敬达",
"公孙公昭",
"公孙文先",
"公孙文则",
"公孙温舒",
"公孙子正",
"公孙君肃",
"公孙彦英",
"公孙文进",
"公孙季宁",
"公孙孔璋",
"公孙元龙",
"公孙公台",
"公孙元悌",
"公孙文奥",
"公孙玄伯",
"公孙元方",
"公孙敬宗",
"公孙子烈",
"公孙元耀",
"公孙温伯",
"公孙公玮",
"公孙长文",
"公孙孔和",
"公孙文和",
"公孙恭夏",
"公孙文公",
"公孙曼倩",
"公孙文若",
"公孙景倩",
"公孙 ",
"慕容艺芯",
"慕容彦龙",
"慕容君昊",
"慕容子扬",
"慕容雨晴",
"慕容元起",
"慕容威璜",
"慕容梓云",
"慕容伯奢",
"慕容子伯",
"慕容公威",
"慕容曜岩",
"慕容昕阳",
"慕容晨欣",
"慕容世元",
"慕容子远",
"慕容元宗",
"慕容子义",
"慕容仲宣",
"慕容孔休",
"慕容义权",
"慕容文向",
"慕容佐治",
"慕容文则",
"慕容文谦",
"慕容子布",
"慕容文远",
"慕容康成",
"慕容士会",
"慕容正礼",
"慕容孝连",
"慕容彦靖",
"慕容玄风",
"慕容威彦",
"慕容子许",
"慕容文祥",
"慕容梦雯",
"慕容悦菲",
"慕容予馨",
"慕容婧馨",
"慕容婷儿",
"慕容圣楠",
"慕容芷欣",
"慕容心怡",
"慕容乐彤",
"慕容靖瑶",
"慕容艺婷",
"慕容樱璐",
"慕容婉琳",
"慕容婉儿",
"慕容倩儿",
"慕容蝶莺",
"慕容紫婉",
"慕容伯玉",
"慕容盛衡",
"慕容承伯",
"慕容子雍",
"慕容元伯",
"慕容元泰",
"慕容景兴",
"慕容子均",
"慕容文举",
"慕容子安",
"慕容仲达",
"慕容思然",
"慕容子昂",
"慕容子明",
"慕容子初",
"慕容文师",
"慕容世英",
"慕容敬达",
"慕容公昭",
"慕容文先",
"慕容文则",
"慕容温舒",
"慕容子正",
"慕容君肃",
"慕容彦英",
"慕容文进",
"慕容季宁",
"慕容孔璋",
"慕容元龙",
"慕容公台",
"慕容元悌",
"慕容文奥",
"慕容玄伯",
"慕容元方",
"慕容敬宗",
"慕容子烈",
"慕容元耀",
"慕容温伯",
"慕容公玮",
"慕容长文",
"慕容孔和",
"慕容文和",
"慕容恭夏",
"慕容文公",
"慕容曼倩",
"慕容文若",
"慕容景倩",
"慕容 ",
"东郭艺芯",
"东郭彦龙",
"东郭君昊",
"东郭子扬",
"东郭雨晴",
"东郭元起",
"东郭威璜",
"东郭梓云",
"东郭伯奢",
"东郭子伯",
"东郭公威",
"东郭曜岩",
"东郭昕阳",
"东郭晨欣",
"东郭世元",
"东郭子远",
"东郭元宗",
"东郭子义",
"东郭仲宣",
"东郭孔休",
"东郭义权",
"东郭文向",
"东郭佐治",
"东郭文则",
"东郭文谦",
"东郭子布",
"东郭文远",
"东郭康成",
"东郭士会",
"东郭正礼",
"东郭孝连",
"东郭彦靖",
"东郭玄风",
"东郭威彦",
"东郭子许",
"东郭文祥",
"东郭梦雯",
"东郭悦菲",
"东郭予馨",
"东郭婧馨",
"东郭婷儿",
"东郭圣楠",
"东郭芷欣",
"东郭心怡",
"东郭乐彤",
"东郭靖瑶",
"东郭艺婷",
"东郭樱璐",
"东郭婉琳",
"东郭婉儿",
"东郭倩儿",
"东郭蝶莺",
"东郭紫婉",
"东郭伯玉",
"东郭盛衡",
"东郭承伯",
"东郭子雍",
"东郭元伯",
"东郭元泰",
"东郭景兴",
"东郭子均",
"东郭文举",
"东郭子安",
"东郭仲达",
"东郭思然",
"东郭子昂",
"东郭子明",
"东郭子初",
"东郭文师",
"东郭世英",
"东郭敬达",
"东郭公昭",
"东郭文先",
"东郭文则",
"东郭温舒",
"东郭子正",
"东郭君肃",
"东郭彦英",
"东郭文进",
"东郭季宁",
"东郭孔璋",
"东郭元龙",
"东郭公台",
"东郭元悌",
"东郭文奥",
"东郭玄伯",
"东郭元方",
"东郭敬宗",
"东郭子烈",
"东郭元耀",
"东郭温伯",
"东郭公玮",
"东郭长文",
"东郭孔和",
"东郭文和",
"东郭恭夏",
"东郭文公",
"东郭曼倩",
"东郭文若",
"东郭景倩",
"东郭 ",
"司空艺芯",
"司空彦龙",
"司空君昊",
"司空子扬",
"司空雨晴",
"司空元起",
"司空威璜",
"司空梓云",
"司空伯奢",
"司空子伯",
"司空公威",
"司空曜岩",
"司空昕阳",
"司空晨欣",
"司空世元",
"司空子远",
"司空元宗",
"司空子义",
"司空仲宣",
"司空孔休",
"司空义权",
"司空文向",
"司空佐治",
"司空文则",
"司空文谦",
"司空子布",
"司空文远",
"司空康成",
"司空士会",
"司空正礼",
"司空孝连",
"司空彦靖",
"司空玄风",
"司空威彦",
"司空子许",
"司空文祥",
"司空梦雯",
"司空悦菲",
"司空予馨",
"司空婧馨",
"司空婷儿",
"司空圣楠",
"司空芷欣",
"司空心怡",
"司空乐彤",
"司空靖瑶",
"司空艺婷",
"司空樱璐",
"司空婉琳",
"司空婉儿",
"司空倩儿",
"司空蝶莺",
"司空紫婉",
"司空伯玉",
"司空盛衡",
"司空承伯",
"司空子雍",
"司空元伯",
"司空元泰",
"司空景兴",
"司空子均",
"司空文举",
"司空子安",
"司空仲达",
"司空思然",
"司空子昂",
"司空子明",
"司空子初",
"司空文师",
"司空世英",
"司空敬达",
"司空公昭",
"司空文先",
"司空文则",
"司空温舒",
"司空子正",
"司空君肃",
"司空彦英",
"司空文进",
"司空季宁",
"司空孔璋",
"司空元龙",
"司空公台",
"司空元悌",
"司空文奥",
"司空玄伯",
"司空元方",
"司空敬宗",
"司空子烈",
"司空元耀",
"司空温伯",
"司空公玮",
"司空长文",
"司空孔和",
"司空文和",
"司空恭夏",
"司空文公",
"司空曼倩",
"司空文若",
"司空景倩",
"司空 ",
"仲长艺芯",
"仲长彦龙",
"仲长君昊",
"仲长子扬",
"仲长雨晴",
"仲长元起",
"仲长威璜",
"仲长梓云",
"仲长伯奢",
"仲长子伯",
"仲长公威",
"仲长曜岩",
"仲长昕阳",
"仲长晨欣",
"仲长世元",
"仲长子远",
"仲长元宗",
"仲长子义",
"仲长仲宣",
"仲长孔休",
"仲长义权",
"仲长文向",
"仲长佐治",
"仲长文则",
"仲长文谦",
"仲长子布",
"仲长文远",
"仲长康成",
"仲长士会",
"仲长正礼",
"仲长孝连",
"仲长彦靖",
"仲长玄风",
"仲长威彦",
"仲长子许",
"仲长文祥",
"仲长梦雯",
"仲长悦菲",
"仲长予馨",
"仲长婧馨",
"仲长婷儿",
"仲长圣楠",
"仲长芷欣",
"仲长心怡",
"仲长乐彤",
"仲长靖瑶",
"仲长艺婷",
"仲长樱璐",
"仲长婉琳",
"仲长婉儿",
"仲长倩儿",
"仲长蝶莺",
"仲长紫婉",
"仲长伯玉",
"仲长盛衡",
"仲长承伯",
"仲长子雍",
"仲长元伯",
"仲长元泰",
"仲长景兴",
"仲长子均",
"仲长文举",
"仲长子安",
"仲长仲达",
"仲长思然",
"仲长子昂",
"仲长子明",
"仲长子初",
"仲长文师",
"仲长世英",
"仲长敬达",
"仲长公昭",
"仲长文先",
"仲长文则",
"仲长温舒",
"仲长子正",
"仲长君肃",
"仲长彦英",
"仲长文进",
"仲长季宁",
"仲长孔璋",
"仲长元龙",
"仲长公台",
"仲长元悌",
"仲长文奥",
"仲长玄伯",
"仲长元方",
"仲长敬宗",
"仲长子烈",
"仲长元耀",
"仲长温伯",
"仲长公玮",
"仲长长文",
"仲长孔和",
"仲长文和",
"仲长恭夏",
"仲长文公",
"仲长曼倩",
"仲长文若",
"仲长景倩",
"仲长 ",
"子书艺芯",
"子书彦龙",
"子书君昊",
"子书子扬",
"子书雨晴",
"子书元起",
"子书威璜",
"子书梓云",
"子书伯奢",
"子书子伯",
"子书公威",
"子书曜岩",
"子书昕阳",
"子书晨欣",
"子书世元",
"子书子远",
"子书元宗",
"子书子义",
"子书仲宣",
"子书孔休",
"子书义权",
"子书文向",
"子书佐治",
"子书文则",
"子书文谦",
"子书子布",
"子书文远",
"子书康成",
"子书士会",
"子书正礼",
"子书孝连",
"子书彦靖",
"子书玄风",
"子书威彦",
"子书子许",
"子书文祥",
"子书梦雯",
"子书悦菲",
"子书予馨",
"子书婧馨",
"子书婷儿",
"子书圣楠",
"子书芷欣",
"子书心怡",
"子书乐彤",
"子书靖瑶",
"子书艺婷",
"子书樱璐",
"子书婉琳",
"子书婉儿",
"子书倩儿",
"子书蝶莺",
"子书紫婉",
"子书伯玉",
"子书盛衡",
"子书承伯",
"子书子雍",
"子书元伯",
"子书元泰",
"子书景兴",
"子书子均",
"子书文举",
"子书子安",
"子书仲达",
"子书思然",
"子书子昂",
"子书子明",
"子书子初",
"子书文师",
"子书世英",
"子书敬达",
"子书公昭",
"子书文先",
"子书文则",
"子书温舒",
"子书子正",
"子书君肃",
"子书彦英",
"子书文进",
"子书季宁",
"子书孔璋",
"子书元龙",
"子书公台",
"子书元悌",
"子书文奥",
"子书玄伯",
"子书元方",
"子书敬宗",
"子书子烈",
"子书元耀",
"子书温伯",
"子书公玮",
"子书长文",
"子书孔和",
"子书文和",
"子书恭夏",
"子书文公",
"子书曼倩",
"子书文若",
"子书景倩",
"子书 "
]
for i in xrange(0, 2000):
payload = dict(model=uuid.uuid4().hex, serial=uuid.uuid4().hex)
startup = 'http://127.0.0.1:8888/startup/'
r = requests.get(startup, params=payload)
active = 'http://127.0.0.1:8888/active/'
r = requests.get(active, params=r.json())
payload= dict(_sign=r.json()['sign'], nickname=NICKNAME[i])
userset = 'http://127.0.0.1:8888/user/set/'
r = requests.get(userset, params=payload) | [
"[email protected]"
] | |
803ff5ac5b0cdf521eddb1c05b2ba9f5a0625e88 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2518/60667/257400.py | 5a34f96a54c8f08657b63d8da8e6099b6338fb0a | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | import math
houses = list(map(int, input().split(',')))
heaters = list(map(int, input().split(',')))
dist = []
for i in range(len(houses)):
dist.append(len(houses))
for heater in heaters:
for i in range(len(houses)):
dist[i] = min(dist[i], math.fabs(houses[i] - heater))
print(int(max(dist))) | [
"[email protected]"
] | |
3b36ccf527d7f2266c29d77220345506c88dfb84 | 4a8bfa3407aa98a04ede3162f85467b1b5012fe7 | /aiogram/api/types/input_venue_message_content.py | ae89bf991e07eb8b472c705371a17a215d91e73c | [] | no_license | aiogram/tg-codegen | 07ec80814eec46f464d2490fd27b7b6b27257f1b | ba3c2f893591d45dda418dd16e0646e260afdf14 | refs/heads/master | 2022-12-09T10:44:10.781570 | 2021-11-07T23:33:25 | 2021-11-07T23:33:25 | 218,523,371 | 24 | 5 | null | 2022-12-08T08:47:43 | 2019-10-30T12:33:21 | Python | UTF-8 | Python | false | false | 1,278 | py | from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .input_message_content import InputMessageContent
if TYPE_CHECKING:
pass
class InputVenueMessageContent(InputMessageContent):
"""
Represents the `content <https://core.telegram.org/bots/api#inputmessagecontent>`_ of a venue message to be sent as the result of an inline query.
Source: https://core.telegram.org/bots/api#inputvenuemessagecontent
"""
latitude: float
"""Latitude of the venue in degrees"""
longitude: float
"""Longitude of the venue in degrees"""
title: str
"""Name of the venue"""
address: str
"""Address of the venue"""
foursquare_id: Optional[str] = None
"""*Optional*. Foursquare identifier of the venue, if known"""
foursquare_type: Optional[str] = None
"""*Optional*. Foursquare type of the venue, if known. (For example, 'arts_entertainment/default', 'arts_entertainment/aquarium' or 'food/icecream'.)"""
google_place_id: Optional[str] = None
"""*Optional*. Google Places identifier of the venue"""
google_place_type: Optional[str] = None
"""*Optional*. Google Places type of the venue. (See `supported types <https://developers.google.com/places/web-service/supported_types>`_.)"""
| [
"[email protected]"
] | |
2aebcd46bf5395a3b680783cacda6f9c826dd5e0 | ac085e82a957da4e59beefe52fae06e8e0077e09 | /gen3va/endpoints/api/download_api.py | 9979505563b22fad1ad79b4ccb1bdbb559c60975 | [] | no_license | MaayanLab/gen3va | 9cb670f971690c911a09f633c49c2e8a35d69afc | b08755993633f5bfc710be088da327813b8df1fc | refs/heads/master | 2022-05-13T13:13:35.151752 | 2022-04-15T15:04:13 | 2022-04-15T15:04:13 | 67,866,413 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,028 | py | """Manages downloads.
"""
import os
import zipfile
import StringIO
from flask import Blueprint, Response
from substrate import Tag
from gen3va import database
from gen3va.config import Config
download_api = Blueprint('download_api',
__name__,
url_prefix='%s/download' % Config.BASE_URL)
DOWNLOAD_DIR = '%s/static/downloads' % Config.SERVER_FILE_ROOT
@download_api.route('/<tag_name>', methods=['GET'])
def test(tag_name):
"""Returns a zipped directory with one plain text file for each signature.
"""
tag = database.get(Tag, tag_name, 'name')
# Write the contents of the signatures to file and get the filenames.
filenames = _get_signature_files(tag)
# Folder name in ZIP archive which contains the above files
zip_subdir = tag.name
zip_filename = '%s.zip' % zip_subdir
# Open StringIO to grab in-memory ZIP contents
s = StringIO.StringIO()
# The zip compressor
zf = zipfile.ZipFile(s, 'w')
for fpath in filenames:
# Calculate path for file in zip
fdir, fname = os.path.split(fpath)
zip_path = os.path.join(zip_subdir, fname)
# Add file, at correct path
zf.write(fpath, zip_path)
zf.close()
# Grab ZIP file from in-memory, make response with correct MIME-type
resp = Response(s.getvalue(), mimetype='application/x-zip-compressed')
resp.headers['Content-Disposition'] = 'attachment; filename=%s' % zip_filename
# Remove files from disc
for f in filenames:
os.remove(f)
return resp
def _get_signature_files(tag):
"""Returns a list of filenames where each file has the contents of a gene
signature.
"""
filenames = []
for idx, sig in enumerate(tag.report.gene_signatures):
fname = _write_signature_to_file(idx, sig)
filenames.append(fname)
return filenames
def _write_signature_to_file(idx, gene_signature):
"""Returns the name of a file with the contents of a gene signature.
"""
name = gene_signature.name.replace('/', '')
path = '%s/%s_%s.txt' % (DOWNLOAD_DIR, idx, name)
with open(path, 'w+') as f:
rm = gene_signature.required_metadata
_write_metadata(f, 'diff_exp_method', rm.diff_exp_method)
_write_metadata(f, 'ttest_correction_method', rm.ttest_correction_method)
_write_metadata(f, 'cutoff', rm.cutoff)
_write_metadata(f, 'threshold', rm.threshold)
for om in gene_signature.filtered_optional_metadata:
_write_metadata(f, om.name, om.value)
f.write('!end_metadata\n\n')
for rg in gene_signature.combined_genes:
line = '%s\t%s\n' % (rg.gene.name, rg.value)
f.write(line)
return path
def _write_metadata(f, key, value):
"""Writes metadata key-value pair to file, encoding to UTF-8.
"""
try:
line = '!%s\t%s\n' % (key, value)
line = line.encode('utf-8')
f.write(line)
except UnicodeEncodeError as e:
print(e)
| [
"[email protected]"
] | |
a68246a09171e570c3cc0605b6f72b9b8bf385af | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/engagementfabric/v20180901preview/channel.py | d75bf027941cf1de54160c62d8d0642667c65226 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,820 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['ChannelArgs', 'Channel']
@pulumi.input_type
class ChannelArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
channel_type: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
channel_functions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
channel_name: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Channel resource.
:param pulumi.Input[str] account_name: Account Name
:param pulumi.Input[str] channel_type: The channel type
:param pulumi.Input[str] resource_group_name: Resource Group Name
:param pulumi.Input[Sequence[pulumi.Input[str]]] channel_functions: The functions to be enabled for the channel
:param pulumi.Input[str] channel_name: Channel Name
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] credentials: The channel credentials
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "channel_type", channel_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if channel_functions is not None:
pulumi.set(__self__, "channel_functions", channel_functions)
if channel_name is not None:
pulumi.set(__self__, "channel_name", channel_name)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
Account Name
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="channelType")
def channel_type(self) -> pulumi.Input[str]:
"""
The channel type
"""
return pulumi.get(self, "channel_type")
@channel_type.setter
def channel_type(self, value: pulumi.Input[str]):
pulumi.set(self, "channel_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Resource Group Name
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="channelFunctions")
def channel_functions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The functions to be enabled for the channel
"""
return pulumi.get(self, "channel_functions")
@channel_functions.setter
def channel_functions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "channel_functions", value)
@property
@pulumi.getter(name="channelName")
def channel_name(self) -> Optional[pulumi.Input[str]]:
"""
Channel Name
"""
return pulumi.get(self, "channel_name")
@channel_name.setter
def channel_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "channel_name", value)
@property
@pulumi.getter
def credentials(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
The channel credentials
"""
return pulumi.get(self, "credentials")
@credentials.setter
def credentials(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "credentials", value)
class Channel(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
channel_functions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
channel_name: Optional[pulumi.Input[str]] = None,
channel_type: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
The EngagementFabric channel
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: Account Name
:param pulumi.Input[Sequence[pulumi.Input[str]]] channel_functions: The functions to be enabled for the channel
:param pulumi.Input[str] channel_name: Channel Name
:param pulumi.Input[str] channel_type: The channel type
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] credentials: The channel credentials
:param pulumi.Input[str] resource_group_name: Resource Group Name
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ChannelArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
The EngagementFabric channel
:param str resource_name: The name of the resource.
:param ChannelArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ChannelArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
channel_functions: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
channel_name: Optional[pulumi.Input[str]] = None,
channel_type: Optional[pulumi.Input[str]] = None,
credentials: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ChannelArgs.__new__(ChannelArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
__props__.__dict__["channel_functions"] = channel_functions
__props__.__dict__["channel_name"] = channel_name
if channel_type is None and not opts.urn:
raise TypeError("Missing required property 'channel_type'")
__props__.__dict__["channel_type"] = channel_type
__props__.__dict__["credentials"] = credentials
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:engagementfabric:Channel")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Channel, __self__).__init__(
'azure-native:engagementfabric/v20180901preview:Channel',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Channel':
"""
Get an existing Channel resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ChannelArgs.__new__(ChannelArgs)
__props__.__dict__["channel_functions"] = None
__props__.__dict__["channel_type"] = None
__props__.__dict__["credentials"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
return Channel(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="channelFunctions")
def channel_functions(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The functions to be enabled for the channel
"""
return pulumi.get(self, "channel_functions")
@property
@pulumi.getter(name="channelType")
def channel_type(self) -> pulumi.Output[str]:
"""
The channel type
"""
return pulumi.get(self, "channel_type")
@property
@pulumi.getter
def credentials(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The channel credentials
"""
return pulumi.get(self, "credentials")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The fully qualified type of the resource
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] | |
8f367326fc506820a4dc0f6116e2f02a0c3a3d1e | 9d84fa1d80c4248ad304f4b1ac32cb6adfa7a0f1 | /mk_dti_report.py | d07b0c8c0884dd676b8f1e4a0fc95068c4a97fd6 | [
"MIT"
] | permissive | poldrack/dtiqa | 6a504848af736eb2c585a337568c41a11812c0b6 | 3b44e3dbec4cace8fb4bb2fcdf43e4f64a649898 | refs/heads/master | 2021-01-10T21:01:33.025637 | 2015-07-13T21:20:31 | 2015-07-13T21:20:31 | 39,037,793 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,707 | py | """
make report for QA using reportlab module
"""
from reportlab.pdfgen import canvas
import numpy as N
import time
import os
def mk_dti_report(infile,dtidir,datavars):
#imgsnr,meansfnr,spikes,badvols):
timestamp=time.strftime('%B %d, %Y: %H:%M:%S')
report_header=[]
report_header.append('QA Report: %s'%timestamp)
report_header.append('directory: %s'%os.path.dirname(infile))
report_header.append('filename: %s'%os.path.basename(infile))
report_header.append('Mean SNR: %f'%N.mean(datavars['imgsnr']))
badvols=['%d'%i for i in datavars['badvols']]
report_header.append('# potentially bad gradients: %d (%s)'%(len(datavars['badvols']),' '.join(badvols)))
c = canvas.Canvas(os.path.join(dtidir,"QA_report.pdf"))
yloc=820
stepsize=16
for line in report_header:
c.drawString(10,yloc,line)
yloc=yloc-stepsize
timeseries_to_draw=['snr.png','fd.png','interleavecorr.png','slicecorr.png']
tsfiles=[os.path.join(dtidir,t) for t in timeseries_to_draw]
ts_img_size=[467,140]
yloc=yloc-ts_img_size[1]
for imfile in tsfiles:
c.drawImage(imfile, 45,yloc,width=ts_img_size[0],height=ts_img_size[1])
yloc=yloc-ts_img_size[1]
c.showPage()
# yloc=650
# c.drawImage(os.path.join(qadir,'spike.png'),20,yloc,width=500,height=133)
yloc=330
images_to_draw=['FA.png','worst_gradient.png']
imfiles=[os.path.join(dtidir,t) for t in images_to_draw]
c.drawImage(imfiles[0],0,yloc,width=300,height=300)
c.drawImage(imfiles[1],300,yloc,width=300,height=300)
# yloc=20
# c.drawImage(imfiles[2],0,yloc,width=325,height=325)
c.save() | [
"[email protected]"
] | |
5bd5a62e7c57b3658b7b1fd7f54e5fffc4ca9fc9 | a14ec6e367e6a471bfc74c066fb958ef585bc269 | /2019/04/common.py | 89c29d75ecf7bdfd829c92b56ca0d45e14d2a616 | [] | no_license | jimhendy/AoC | 90641814ed431f46a8500ff0f022c6c957567563 | a1727f88bc2e6f739d65902dce188377966b3fb4 | refs/heads/master | 2023-09-02T14:48:39.860352 | 2023-08-28T08:09:19 | 2023-08-28T08:09:19 | 225,152,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | import numpy as np
import pandas as pd
def possibles(inputs):
Min, Max = (int(i) for i in inputs.split("-"))
return pd.DataFrame([list(str(i)) for i in np.arange(Min, Max)]).astype(int)
def diffs(possibles):
return possibles.diff(axis=1).fillna(0)
def counts(possibles):
counts = {
i: possibles.eq(i).sum(axis=1) for i in np.unique(possibles.values.ravel())
}
return pd.concat(counts, axis=1, sort=False)
| [
"[email protected]"
] | |
714cd772a4ca796aa9b65938d2bdb2ed0f9c22da | 5509d3b5bbcc393684f7d2fc7fc11bb12ed1911a | /env/lib/python2.7/site-packages/venusian/compat/pkgutil_26.py | 706e048c703ea6b48dc2bbb28484c11057eb85f2 | [] | no_license | jundong/CRManager | 99fd6c0eda084354d9237e11d07ef82124c22e1e | 4306bf4d2b29b19d4b3092aab152192f7d623a19 | refs/heads/master | 2021-01-21T04:47:26.125045 | 2016-07-29T15:07:04 | 2016-07-29T15:07:04 | 50,995,792 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,962 | py | """Utilities to support packages."""
# NOTE: This module must remain compatible with Python 2.3, as it is shared
# by setuptools for distribution with Python 2.3 and up.
if 1: # pragma: no cover
import os
import sys
import imp
import os.path
from types import ModuleType
__all__ = [
'get_importer', 'iter_importers', 'get_loader', 'find_loader',
'walk_packages', 'iter_modules',
'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
]
def read_code(stream):
# This helper is needed in order for the PEP 302 emulation to
# correctly handle compiled files
import marshal
magic = stream.read(4)
if magic != imp.get_magic():
return None
stream.read(4) # Skip timestamp
return marshal.load(stream)
def simplegeneric(func):
"""Make a trivial single-dispatch generic function"""
registry = {}
def wrapper(*args, **kw):
ob = args[0]
try:
cls = ob.__class__
except AttributeError:
cls = type(ob)
try:
mro = cls.__mro__
except AttributeError:
try:
class cls(cls, object):
pass
mro = cls.__mro__[1:]
except TypeError:
mro = object, # must be an ExtensionClass or some such :(
for t in mro:
if t in registry:
return registry[t](*args, **kw)
else:
return func(*args, **kw)
try:
wrapper.__name__ = func.__name__
except (TypeError, AttributeError):
pass # Python 2.3 doesn't allow functions to be renamed
def register(typ, func=None):
if func is None:
return lambda f: register(typ, f)
registry[typ] = func
return func
wrapper.__dict__ = func.__dict__
wrapper.__doc__ = func.__doc__
wrapper.register = register
return wrapper
def walk_packages(path=None, prefix='', onerror=None):
"""Yields (module_loader, name, ispkg) for all modules recursively
on path, or, if path is None, all accessible modules.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
Note that this function must import all *packages* (NOT all
modules!) on the given path, in order to access the __path__
attribute to find submodules.
'onerror' is a function which gets called with one argument (the
name of the package which was being imported) if any exception
occurs while trying to import a package. If no onerror function is
supplied, ImportErrors are caught and ignored, while all other
exceptions are propagated, terminating the search.
Examples:
# list all modules python can access
walk_packages()
# list all submodules of ctypes
walk_packages(ctypes.__path__, ctypes.__name__+'.')
"""
def seen(p, m={}):
if p in m:
return True
m[p] = True
for importer, name, ispkg in iter_modules(path, prefix):
yield importer, name, ispkg
if ispkg:
try:
__import__(name)
except ImportError:
if onerror is not None:
onerror(name)
except Exception:
if onerror is not None:
onerror(name)
else:
raise
else:
path = getattr(sys.modules[name], '__path__', None) or []
# don't traverse path items we've seen before
path = [p for p in path if not seen(p)]
for item in walk_packages(path, name+'.', onerror):
yield item
def iter_modules(path=None, prefix=''):
"""Yields (module_loader, name, ispkg) for all submodules on path,
or, if path is None, all top-level modules on sys.path.
'path' should be either None or a list of paths to look for
modules in.
'prefix' is a string to output on the front of every module name
on output.
"""
if path is None:
importers = iter_importers()
else:
importers = map(get_importer, path)
yielded = {}
for i in importers:
for name, ispkg in iter_importer_modules(i, prefix):
if name not in yielded:
yielded[name] = 1
yield i, name, ispkg
#@simplegeneric
def iter_importer_modules(importer, prefix=''):
if not hasattr(importer, 'iter_modules'):
return []
return importer.iter_modules(prefix)
iter_importer_modules = simplegeneric(iter_importer_modules)
class ImpImporter:
"""PEP 302 Importer that wraps Python's "classic" import algorithm
ImpImporter(dirname) produces a PEP 302 importer that searches that
directory. ImpImporter(None) produces a PEP 302 importer that searches
the current sys.path, plus any modules that are frozen or built-in.
Note that ImpImporter does not currently support being used by placement
on sys.meta_path.
"""
def __init__(self, path=None):
self.path = path
def find_module(self, fullname, path=None):
# Note: we ignore 'path' argument since it is only used via meta_path
subname = fullname.split(".")[-1]
if subname != fullname and self.path is None:
return None
if self.path is None:
path = None
else:
path = [os.path.realpath(self.path)]
try:
file, filename, etc = imp.find_module(subname, path)
except ImportError:
return None
return ImpLoader(fullname, file, filename, etc)
def iter_modules(self, prefix=''):
if self.path is None or not os.path.isdir(self.path):
return
yielded = {}
import inspect
filenames = os.listdir(self.path)
filenames.sort() # handle packages before same-named modules
for fn in filenames:
modname = inspect.getmodulename(fn)
if modname=='__init__' or modname in yielded:
continue
path = os.path.join(self.path, fn)
ispkg = False
if not modname and os.path.isdir(path) and '.' not in fn:
modname = fn
for fn in os.listdir(path):
subname = inspect.getmodulename(fn)
if subname=='__init__':
ispkg = True
break
else:
continue # not a package
if modname and '.' not in modname:
yielded[modname] = 1
yield prefix + modname, ispkg
class ImpLoader:
"""PEP 302 Loader that wraps Python's "classic" import algorithm
"""
code = source = None
def __init__(self, fullname, file, filename, etc):
self.file = file
self.filename = filename
self.fullname = fullname
self.etc = etc
def load_module(self, fullname):
self._reopen()
try:
mod = imp.load_module(fullname, self.file, self.filename, self.etc)
finally:
if self.file:
self.file.close()
# Note: we don't set __loader__ because we want the module to look
# normal; i.e. this is just a wrapper for standard import machinery
return mod
def get_data(self, pathname):
return open(pathname, "rb").read()
def _reopen(self):
if self.file and self.file.closed:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self.file = open(self.filename, 'rU')
elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
self.file = open(self.filename, 'rb')
def _fix_name(self, fullname):
if fullname is None:
fullname = self.fullname
elif fullname != self.fullname:
raise ImportError("Loader for module %s cannot handle "
"module %s" % (self.fullname, fullname))
return fullname
def is_package(self, fullname):
fullname = self._fix_name(fullname)
return self.etc[2]==imp.PKG_DIRECTORY
def get_code(self, fullname=None):
fullname = self._fix_name(fullname)
if self.code is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
source = self.get_source(fullname)
self.code = compile(source, self.filename, 'exec')
elif mod_type==imp.PY_COMPILED:
self._reopen()
try:
self.code = read_code(self.file)
finally:
self.file.close()
elif mod_type==imp.PKG_DIRECTORY:
self.code = self._get_delegate().get_code()
return self.code
def get_source(self, fullname=None):
fullname = self._fix_name(fullname)
if self.source is None:
mod_type = self.etc[2]
if mod_type==imp.PY_SOURCE:
self._reopen()
try:
self.source = self.file.read()
finally:
self.file.close()
elif mod_type==imp.PY_COMPILED:
if os.path.exists(self.filename[:-1]):
f = open(self.filename[:-1], 'rU')
self.source = f.read()
f.close()
elif mod_type==imp.PKG_DIRECTORY:
self.source = self._get_delegate().get_source()
return self.source
def _get_delegate(self):
return ImpImporter(self.filename).find_module('__init__')
def get_filename(self, fullname=None):
fullname = self._fix_name(fullname)
mod_type = self.etc[2]
if self.etc[2]==imp.PKG_DIRECTORY:
return self._get_delegate().get_filename()
elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
return self.filename
return None
try:
import zipimport
from zipimport import zipimporter
def iter_zipimport_modules(importer, prefix=''):
dirlist = zipimport._zip_directory_cache[importer.archive].keys()
dirlist.sort()
_prefix = importer.prefix
plen = len(_prefix)
yielded = {}
import inspect
for fn in dirlist:
if not fn.startswith(_prefix):
continue
fn = fn[plen:].split(os.sep)
if len(fn)==2 and fn[1].startswith('__init__.py'):
if fn[0] not in yielded:
yielded[fn[0]] = 1
yield fn[0], True
if len(fn)!=1:
continue
modname = inspect.getmodulename(fn[0])
if modname=='__init__':
continue
if modname and '.' not in modname and modname not in yielded:
yielded[modname] = 1
yield prefix + modname, False
iter_importer_modules.register(zipimporter, iter_zipimport_modules)
except ImportError:
pass
def get_importer(path_item):
"""Retrieve a PEP 302 importer for the given path item
The returned importer is cached in sys.path_importer_cache
if it was newly created by a path hook.
If there is no importer, a wrapper around the basic import
machinery is returned. This wrapper is never inserted into
the importer cache (None is inserted instead).
The cache (or part of it) can be cleared manually if a
rescan of sys.path_hooks is necessary.
"""
try:
importer = sys.path_importer_cache[path_item]
except KeyError:
for path_hook in sys.path_hooks:
try:
importer = path_hook(path_item)
break
except ImportError:
pass
else:
importer = None
sys.path_importer_cache.setdefault(path_item, importer)
if importer is None:
try:
importer = ImpImporter(path_item)
except ImportError:
importer = None
return importer
def iter_importers(fullname=""):
"""Yield PEP 302 importers for the given module name
If fullname contains a '.', the importers will be for the package
containing fullname, otherwise they will be importers for sys.meta_path,
sys.path, and Python's "classic" import machinery, in that order. If
the named module is in a package, that package is imported as a side
effect of invoking this function.
Non PEP 302 mechanisms (e.g. the Windows registry) used by the
standard import machinery to find files in alternative locations
are partially supported, but are searched AFTER sys.path. Normally,
these locations are searched BEFORE sys.path, preventing sys.path
entries from shadowing them.
For this to cause a visible difference in behaviour, there must
be a module or package name that is accessible via both sys.path
and one of the non PEP 302 file system mechanisms. In this case,
the emulation will find the former version, while the builtin
import mechanism will find the latter.
Items of the following types can be affected by this discrepancy:
imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
"""
if fullname.startswith('.'):
raise ImportError("Relative module names not supported")
if '.' in fullname:
# Get the containing package's __path__
pkg = '.'.join(fullname.split('.')[:-1])
if pkg not in sys.modules:
__import__(pkg)
path = getattr(sys.modules[pkg], '__path__', None) or []
else:
for importer in sys.meta_path:
yield importer
path = sys.path
for item in path:
yield get_importer(item)
if '.' not in fullname:
yield ImpImporter()
def get_loader(module_or_name):
"""Get a PEP 302 "loader" object for module_or_name
If the module or package is accessible via the normal import
mechanism, a wrapper around the relevant part of that machinery
is returned. Returns None if the module cannot be found or imported.
If the named module is not already imported, its containing package
(if any) is imported, in order to establish the package __path__.
This function uses iter_importers(), and is thus subject to the same
limitations regarding platform-specific special import locations such
as the Windows registry.
"""
if module_or_name in sys.modules:
module_or_name = sys.modules[module_or_name]
if isinstance(module_or_name, ModuleType):
module = module_or_name
loader = getattr(module, '__loader__', None)
if loader is not None:
return loader
fullname = module.__name__
else:
fullname = module_or_name
return find_loader(fullname)
def find_loader(fullname):
"""Find a PEP 302 "loader" object for fullname
If fullname contains dots, path must be the containing package's __path__.
Returns None if the module cannot be found or imported. This function uses
iter_importers(), and is thus subject to the same limitations regarding
platform-specific special import locations such as the Windows registry.
"""
for importer in iter_importers(fullname):
loader = importer.find_module(fullname)
if loader is not None:
return loader
return None
def extend_path(path, name):
"""Extend a package's path.
Intended use is to place the following code in a package's __init__.py:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
This will add to the package's __path__ all subdirectories of
directories on sys.path named after the package. This is useful
if one wants to distribute different parts of a single logical
package as multiple directories.
It also looks for *.pkg files beginning where * matches the name
argument. This feature is similar to *.pth files (see site.py),
except that it doesn't special-case lines starting with 'import'.
A *.pkg file is trusted at face value: apart from checking for
duplicates, all entries found in a *.pkg file are added to the
path, regardless of whether they are exist the filesystem. (This
is a feature.)
If the input path is not a list (as is the case for frozen
packages) it is returned unchanged. The input path is not
modified; an extended copy is returned. Items are only appended
to the copy at the end.
It is assumed that sys.path is a sequence. Items of sys.path that
are not (unicode or 8-bit) strings referring to existing
directories are ignored. Unicode items of sys.path that cause
errors when used as filenames may cause this function to raise an
exception (in line with os.path.isdir() behavior).
"""
if not isinstance(path, list):
# This could happen e.g. when this is called from inside a
# frozen package. Return the path unchanged in that case.
return path
pname = os.path.join(*name.split('.')) # Reconstitute as relative path
# Just in case os.extsep != '.'
sname = os.extsep.join(name.split('.'))
sname_pkg = sname + os.extsep + "pkg"
init_py = "__init__" + os.extsep + "py"
path = path[:] # Start with a copy of the existing path
for dir in sys.path:
if not isinstance(dir, basestring) or not os.path.isdir(dir):
continue
subdir = os.path.join(dir, pname)
# XXX This may still add duplicate entries to path on
# case-insensitive filesystems
initfile = os.path.join(subdir, init_py)
if subdir not in path and os.path.isfile(initfile):
path.append(subdir)
# XXX Is this the right thing for subpackages like zope.app?
# It looks for a file named "zope.app.pkg"
pkgfile = os.path.join(dir, sname_pkg)
if os.path.isfile(pkgfile):
try:
f = open(pkgfile)
except IOError:
msg = sys.exc_info()[1]
sys.stderr.write("Can't open %s: %s\n" %
(pkgfile, msg.args[0]))
else:
for line in f:
line = line.rstrip('\n')
if not line or line.startswith('#'):
continue
path.append(line) # Don't check for existence!
f.close()
return path
def get_data(package, resource):
"""Get a resource from a package.
This is a wrapper round the PEP 302 loader get_data API. The package
argument should be the name of a package, in standard module format
(foo.bar). The resource argument should be in the form of a relative
filename, using '/' as the path separator. The parent directory name '..'
is not allowed, and nor is a rooted name (starting with a '/').
The function returns a binary string, which is the contents of the
specified resource.
For packages located in the filesystem, which have already been imported,
this is the rough equivalent of
d = os.path.dirname(sys.modules[package].__file__)
data = open(os.path.join(d, resource), 'rb').read()
If the package cannot be located or loaded, or it uses a PEP 302 loader
which does not support get_data(), then None is returned.
"""
loader = get_loader(package)
if loader is None or not hasattr(loader, 'get_data'):
return None
mod = sys.modules.get(package) or loader.load_module(package)
if mod is None or not hasattr(mod, '__file__'):
return None
# Modify the resource name to be compatible with the loader.get_data
# signature - an os.path format "filename" starting with the dirname of
# the package's __file__
parts = resource.split('/')
parts.insert(0, os.path.dirname(mod.__file__))
resource_name = os.path.join(*parts)
return loader.get_data(resource_name)
| [
"[email protected]"
] | |
82b357f3e1c30701a2183d9943222fe1aee5969e | 7b44c55ea74a4a407251117dbbc7ac09b4cb3a4c | /experiments/run_scripts/policy_plasticity/ModelMAML/model_batch_maml_polopt.py | 379ce2cf0e0063beb8da037750ce8a4bbb8f8e29 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | AaronStone24/model_ensemble_meta_learning | 1c48b039a8c533fd1802bff50988dec635288206 | 51d522cc68e4685135c4770fe43175c4b3a63122 | refs/heads/master | 2023-05-27T15:47:32.862283 | 2023-05-10T07:04:44 | 2023-05-10T07:04:44 | 636,159,846 | 0 | 0 | null | 2023-05-04T08:47:34 | 2023-05-04T08:47:33 | null | UTF-8 | Python | false | false | 27,115 | py | import rllab.misc.logger as logger
import tensorflow as tf
import time
import numpy as np
import os
import joblib
from rllab_maml.algos.base import RLAlgorithm
from rllab_maml.sampler.stateful_pool import singleton_pool
from sandbox.ours.sampler import RandomVectorizedSampler, MAMLModelVectorizedSampler, MAMLVectorizedSampler
from sandbox.ours.sampler.MAML_sampler.maml_batch_sampler import BatchSampler
class ModelBatchMAMLPolopt(RLAlgorithm):
"""
Base class for batch sampling-based policy optimization methods, with maml.
This includes various policy gradient methods like vpg, npg, ppo, trpo, etc.
"""
def __init__(
self,
env,
policy,
dynamics_model,
baseline,
scope=None,
n_itr=20,
start_itr=0,
# Note that the number of trajectories for grad upate = batch_size
# Defaults are 10 trajectories of length 500 for gradient update
batch_size_env_samples=10,
batch_size_dynamics_samples=100,
meta_batch_size=None,
initial_random_samples=None,
max_path_length_env=100,
max_path_length_dyn=None,
num_grad_updates=1,
discount=0.99,
entropy_bonus=0,
gae_lambda=1,
dynamic_model_max_epochs=(1000, 1000),
num_maml_steps_per_iter=10,
reset_from_env_traj=False,
dynamics_data_buffer_size=1e5,
retrain_model_when_reward_decreases=True,
reset_policy_std=False,
reinit_model_cycle=0,
center_adv=True,
positive_adv=False,
store_paths=False,
sampler_cls=None,
sampler_args=None,
load_policy=None,
frac_gpu=0.85,
log_real_performance=True,
clip_obs=False,
**kwargs
):
"""
:param env: Environment
:param policy: Policy
:param dynamics_model: Dynamics model ensemble
:param baseline: Baseline
:param scope: Scope for identifying the algorithm. Must be specified if running multiple algorithms
simultaneously, each using different environments and policies
:param n_itr: Number of iterations.
:param start_itr: Starting iteration.
:param batch_size_env_samples: Number of policy rollouts for each model/policy
:param batch_size_dynamics_samples: Number of (imaginary) policy rollouts with each dynamics model
:param meta_batch_size: Number of meta-tasks (default - meta_batch_size-dynamics_model.num_models)
:param initial_random_samples: either None -> use initial policy to sample from env
or int: number of random samples at first iteration to train dynamics model
if provided, in the first iteration no samples from the env are generated
with the policy
:param max_path_length_env: Maximum length of a single rollout in the environment
:param max_path_length_dyn: Maximum path length of a single (imaginary) rollout with the dynamics model
:param num_grad_updates: Number of fast gradient updates
:param discount: Discount.
:param entropy_bonus_coef: Entropy bonus
:param gae_lambda: Lambda used for generalized advantage estimation.
:param dynamic_model_max_epochs: (int) maximum number of epochs for training the dynamics model
:param num_maml_steps_per_iter: number of policy gradients steps before retraining dynamics model
:param reset_from_env_traj: (boolean) whether to use the real environment observations for resetting the imaginary dynamics model rollouts
:param dynamics_data_buffer_size: (int) size of the queue/buffer that holds data for the model training
:param retrain_model_when_reward_decreases: (boolean) if true - stop inner gradient steps when performance decreases
:param reset_policy_std: whether to reset the policy std after each iteration
:param reinit_model_cycle: number of iterations before re-initializing the dynamics model (if 0 the dynamic model is not re-initialized at all)
:param store_paths: Whether to save all paths data to the snapshot.
:param frac_gpu: memory fraction of the gpu that shall be used for this task
:param log_real_performance: (boolean) if true the pre-update and post-update performance in the real env is evaluated and logged
:param clip_obs: (boolean) whether to clip the predicted next observations of the dynamics model in order to avoid numerical instabilities
"""
self.env = env
self.policy = policy
self.dynamics_model = dynamics_model
self.load_policy = load_policy
self.baseline = baseline
self.scope = scope
self.n_itr = n_itr
self.start_itr = start_itr
# meta batch size and number of dynamics models
self.num_models = dynamics_model.num_models
if meta_batch_size is None:
self.meta_batch_size = self.num_models # set meta_batch_size to number of dynamic models
else:
assert meta_batch_size % self.num_models == 0, "meta_batch_size must a multiple the number of models in the dynamics ensemble"
self.meta_batch_size = meta_batch_size
self.max_path_length = max_path_length_env
self.max_path_length_dyn = max_path_length_dyn if max_path_length_dyn is not None else max_path_length_env
# batch_size is the number of trajectories for one fast grad update.
self.batch_size = batch_size_env_samples * max_path_length_env * self.meta_batch_size # batch_size for env sampling
self.batch_size_dynamics_samples = batch_size_dynamics_samples * self.max_path_length_dyn * self.meta_batch_size # batch_size for model sampling
if initial_random_samples is None:
self.initial_random_samples = self.batch_size
else:
self.initial_random_samples = initial_random_samples
self.discount = discount
self.entropy_bonus = entropy_bonus
self.gae_lambda = gae_lambda
# dynamics model config
self.dynamic_model_epochs = dynamic_model_max_epochs
self.num_maml_steps_per_iter = num_maml_steps_per_iter
self.reset_from_env_traj = reset_from_env_traj
self.dynamics_data_buffer_size = dynamics_data_buffer_size
self.retrain_model_when_reward_decreases = retrain_model_when_reward_decreases
self.reset_policy_std = reset_policy_std
self.reinit_model = reinit_model_cycle
self.log_real_performance = log_real_performance
self.center_adv = center_adv
self.positive_adv = positive_adv
self.store_paths = store_paths
self.num_grad_updates = num_grad_updates # number of gradient steps during training
self.frac_gpu = frac_gpu
''' setup sampler classes '''
# env sampler - get samples from environment using the policy
if sampler_cls is None:
sampler_cls = MAMLVectorizedSampler
sampler_args = dict(n_tasks=self.meta_batch_size, n_envs=self.meta_batch_size * batch_size_env_samples, parallel=False)
self.env_sampler = sampler_cls(self, **sampler_args)
# model sampler - makes (imaginary) rollouts with the estimated dynamics model ensemble
self.model_sampler = MAMLModelVectorizedSampler(self, max_path_length=max_path_length_dyn, clip_obs=clip_obs)
# random sampler - (initially) collects random samples from the environment to train the dynamics model
if self.initial_random_samples:
self.random_sampler = RandomVectorizedSampler(self)
else:
self.random_sampler = None
def start_worker(self):
self.env_sampler.start_worker()
self.model_sampler.start_worker()
if self.initial_random_samples:
self.random_sampler.start_worker()
def shutdown_worker(self):
self.env_sampler.shutdown_worker()
self.model_sampler.shutdown_worker()
def obtain_env_samples(self, itr, reset_args=None, log_prefix=''):
paths = self.env_sampler.obtain_samples(itr, reset_args, return_dict=True, log_prefix=log_prefix)
assert type(paths) == dict
return paths
def obtain_model_samples(self, itr, log=False, traj_starting_obs=None, traj_starting_ts=None):
return self.model_sampler.obtain_samples(itr, log=log, return_dict=True, traj_starting_obs=traj_starting_obs,
traj_starting_ts=traj_starting_ts)
def obtain_random_samples(self, itr, log=False):
assert self.random_sampler is not None
assert self.initial_random_samples is not None
return self.random_sampler.obtain_samples(itr, num_samples=self.initial_random_samples, log=log,
log_prefix='EnvSampler-')
def process_samples_for_dynamics(self, itr, paths):
return self.model_sampler.process_samples(itr, paths, log=False)
def process_samples_for_policy(self, itr, paths, log=True, log_prefix='DynTrajs-', return_reward=False):
return self.env_sampler.process_samples(itr, paths, log=log, log_prefix=log_prefix, return_reward=return_reward)
def train(self):
# TODO - make this a util
flatten_list = lambda l: [item for sublist in l for item in sublist]
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = self.frac_gpu
with tf.Session(config=config) as sess:
# Code for loading a previous policy. Somewhat hacky because needs to be in sess.
if self.load_policy is not None:
self.policy = joblib.load(self.load_policy)['policy']
self.init_opt()
self.initialize_uninitialized_variables(sess)
self.all_paths = []
self.start_worker()
start_time = time.time()
n_env_timesteps = 0
""" ----- prepare stuff for kl heatplots --------- """
resolution = 50
linspace = np.linspace(-1.8, 1.8, resolution)
x_points, y_points = np.meshgrid(linspace, linspace)
obs_grid_points = np.stack([x_points.flatten(), y_points.flatten()], axis=1)
assert obs_grid_points.shape == (resolution**2, 2)
if logger._snapshot_dir:
DUMP_DIR = logger._snapshot_dir
else:
DUMP_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'plots')
for itr in range(self.start_itr, self.n_itr):
itr_start_time = time.time()
with logger.prefix('itr #%d | ' % itr):
logger.record_tabular("mean_inner_stepsize", self.policy.get_mean_step_size())
''' sample environment configuration '''
env = self.env
while not ('sample_env_params' in dir(env) or 'sample_goals' in dir(env)):
env = env._wrapped_env
if 'sample_goals' in dir(env):
learner_env_params = env.sample_goals(self.meta_batch_size)
elif 'sample_env_params':
learner_env_params = env.sample_env_params(self.meta_batch_size)
''' get rollouts from the environment'''
time_env_sampling_start = time.time()
if self.initial_random_samples and itr == 0:
logger.log("Obtaining random samples from the environment...")
new_env_paths = self.obtain_random_samples(itr, log=True)
n_env_timesteps += self.initial_random_samples
logger.record_tabular("n_timesteps", n_env_timesteps)
self.all_paths.extend(new_env_paths)
samples_data_dynamics = self.random_sampler.process_samples(itr, self.all_paths,
log=True,
log_prefix='EnvTrajs-') # must log in the same way as the model sampler below
else:
if self.reset_policy_std:
logger.log("Resetting policy std")
self.policy.set_std()
logger.log("Obtaining samples from the environment using the policy...")
new_env_paths = self.obtain_env_samples(itr, reset_args=learner_env_params,
log_prefix='EnvSampler-')
n_env_timesteps += self.batch_size
logger.record_tabular("n_timesteps", n_env_timesteps)
# flatten dict of paths per task/mode --> list of paths
new_env_paths = [path for task_paths in new_env_paths.values() for path in task_paths]
# self.all_paths.extend(new_env_paths)
logger.log("Processing environment samples...")
# first processing just for logging purposes
self.model_sampler.process_samples(itr, new_env_paths, log=True, log_prefix='EnvTrajs-')
new_samples_data_dynamics = self.process_samples_for_dynamics(itr, new_env_paths)
for k, v in samples_data_dynamics.items():
samples_data_dynamics[k] = np.concatenate([v, new_samples_data_dynamics[k]], axis=0)[-int(self.dynamics_data_buffer_size):]
logger.record_tabular('Time-EnvSampling', time.time() - time_env_sampling_start)
epochs = self.dynamic_model_epochs[min(itr, len(self.dynamic_model_epochs) - 1)]
logger.log("Training dynamics model for %i epochs ..." % (epochs))
self.dynamics_model.fit(samples_data_dynamics['observations_dynamics'],
samples_data_dynamics['actions_dynamics'],
samples_data_dynamics['next_observations_dynamics'],
epochs=epochs, verbose=True)
''' ------------- Making Plots ------------------ '''
logger.log("Evaluating the performance of the real policy")
self.policy.switch_to_init_dist()
env_paths_pre = self.obtain_env_samples(itr, reset_args=learner_env_params,
log_prefix='PrePolicy-')
samples_data = {}
for key in env_paths_pre.keys():
samples_data[key] = self.process_samples_for_policy(itr, env_paths_pre[key], log=False)
_ = self.process_samples_for_policy(itr, flatten_list(env_paths_pre.values()), log_prefix='PrePolicy-')
_ , agent_infos_pre = self.policy.get_actions_batch([obs_grid_points for _ in range(self.meta_batch_size)])
self.policy.compute_updated_dists(samples_data)
env_paths_post = self.obtain_env_samples(itr, reset_args=learner_env_params, log_prefix='PostPolicy-',)
_ = self.process_samples_for_policy(itr, flatten_list(env_paths_post.values()), log_prefix='PostPolicy-')
_, agent_infos_post = self.policy.get_actions_batch(
[obs_grid_points for _ in range(self.meta_batch_size)])
# compute KL divergence between pre and post update policy
kl_pre_post = self.policy.distribution.kl(agent_infos_pre, agent_infos_post)
kl_pre_post_grid = kl_pre_post.reshape((self.meta_batch_size, resolution**2)).mean(axis=0).reshape((resolution,resolution))
model_std_grid = self.dynamics_model.predict_std(obs_grid_points, - 0.05 * obs_grid_points).mean(axis=1).reshape((resolution,resolution))
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
img_filename = os.path.join(DUMP_DIR, 'kl_vs_model_std_plot_iter_%i' % itr)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 4))
fig.tight_layout(pad=3)
ax1.set_title('KL-divergence')
ax1.set_ylabel('y')
ax1.set_xlabel('x')
# determine range of plot
point_env = self.env._wrapped_env._wrapped_env
env_center = (point_env.init_sampling_boundaries[0] + point_env.init_sampling_boundaries[1]) / 2
distance = np.abs(
point_env.init_sampling_boundaries[0] - point_env.init_sampling_boundaries[1]) / 2
extent = (env_center - 0.9*distance, env_center + 0.9*distance, env_center - 0.9*distance, env_center + 0.9*distance)
im1 = ax1.imshow(kl_pre_post_grid, extent=extent)
fig.colorbar(ax=ax1, mappable=im1, shrink=0.8, orientation='vertical')
ax1.grid(False)
ax2.set_title('Ensemble variance')
ax2.set_ylabel('y')
ax2.set_xlabel('x')
im2 = ax2.imshow(model_std_grid, extent=extent)
fig.colorbar(ax=ax2, mappable=im2, shrink=0.8, orientation='vertical')
ax2.grid(False)
# save plot
fig.savefig(img_filename)
# save plot data
plot_data={
'kl': kl_pre_post_grid,
'std': model_std_grid,
'extent': extent
}
plot_data_file = os.path.join(DUMP_DIR, 'kl_vs_model_std_plot_iter_%i.pkl' % itr)
joblib.dump(plot_data, plot_data_file)
''' --------------- fit dynamics model --------------- '''
time_fit_start = time.time()
epochs = self.dynamic_model_epochs[min(itr, len(self.dynamic_model_epochs) - 1)]
if self.reinit_model and itr % self.reinit_model == 0:
self.dynamics_model.reinit_model()
epochs = self.dynamic_model_epochs[0]
logger.log("Training dynamics model for %i epochs ..." % (epochs))
self.dynamics_model.fit(samples_data_dynamics['observations_dynamics'],
samples_data_dynamics['actions_dynamics'],
samples_data_dynamics['next_observations_dynamics'],
epochs=epochs, verbose=True, log_tabular=True)
logger.record_tabular('Time-ModelFit', time.time() - time_fit_start)
''' --------------- MAML steps --------------- '''
times_dyn_sampling = []
times_dyn_sample_processing = []
times_inner_step = []
times_outer_step = []
time_maml_steps_start = time.time()
for maml_itr in range(self.num_maml_steps_per_iter):
self.policy.switch_to_init_dist() # Switch to pre-update policy
all_samples_data_maml_iter, all_paths_maml_iter = [], []
for step in range(self.num_grad_updates + 1):
''' --------------- Sampling from Dynamics Models --------------- '''
logger.log("MAML Step %i%s of %i - Obtaining samples from the dynamics model..." % (
maml_itr + 1, chr(97 + step), self.num_maml_steps_per_iter))
time_dyn_sampling_start = time.time()
if self.reset_from_env_traj:
new_model_paths = self.obtain_model_samples(itr, traj_starting_obs=samples_data_dynamics['observations_dynamics'],
traj_starting_ts=samples_data_dynamics['timesteps_dynamics'])
else:
new_model_paths = self.obtain_model_samples(itr)
assert type(new_model_paths) == dict and len(new_model_paths) == self.meta_batch_size
all_paths_maml_iter.append(new_model_paths)
times_dyn_sampling.append(time.time() - time_dyn_sampling_start)
''' --------------- Processing Dynamics Samples --------------- '''
logger.log("Processing samples...")
time_dyn_sample_processing_start = time.time()
samples_data = {}
for key in new_model_paths.keys(): # the keys are the tasks
# don't log because this will spam the consol with every task.
samples_data[key] = self.process_samples_for_policy(itr, new_model_paths[key], log=False)
all_samples_data_maml_iter.append(samples_data)
# for logging purposes
_, mean_reward = self.process_samples_for_policy(itr,
flatten_list(new_model_paths.values()),
log='reward',
log_prefix="DynTrajs%i%s-" % (
maml_itr + 1, chr(97 + step)),
return_reward=True)
times_dyn_sample_processing.append(time.time() - time_dyn_sample_processing_start)
''' --------------- Inner Policy Update --------------- '''
time_inner_step_start = time.time()
if step < self.num_grad_updates:
logger.log("Computing policy updates...")
self.policy.compute_updated_dists(samples_data)
times_inner_step.append(time.time() - time_inner_step_start)
if maml_itr == 0:
prev_rolling_reward_mean = mean_reward
rolling_reward_mean = mean_reward
else:
prev_rolling_reward_mean = rolling_reward_mean
rolling_reward_mean = 0.8 * rolling_reward_mean + 0.2 * mean_reward
# stop gradient steps when mean_reward decreases
if self.retrain_model_when_reward_decreases and rolling_reward_mean < prev_rolling_reward_mean:
logger.log(
"Stopping policy gradients steps since rolling mean reward decreased from %.2f to %.2f" % (
prev_rolling_reward_mean, rolling_reward_mean))
# complete some logging stuff
for i in range(maml_itr + 1, self.num_maml_steps_per_iter):
logger.record_tabular('DynTrajs%ia-AverageReturn' % (i+1), 0.0)
logger.record_tabular('DynTrajs%ib-AverageReturn' % (i+1), 0.0)
break
''' --------------- Meta Policy Update --------------- '''
logger.log("MAML Step %i of %i - Optimizing policy..." % (maml_itr + 1, self.num_maml_steps_per_iter))
time_outer_step_start = time.time()
# This needs to take all samples_data so that it can construct graph for meta-optimization.
self.optimize_policy(itr, all_samples_data_maml_iter, log=False)
if itr == 0: sess.graph.finalize()
times_outer_step.append(time.time() - time_outer_step_start)
''' --------------- Logging Stuff --------------- '''
logger.record_tabular('Time-MAMLSteps', time.time() - time_maml_steps_start)
logger.record_tabular('Time-DynSampling', np.mean(times_dyn_sampling))
logger.record_tabular('Time-DynSampleProc', np.mean(times_dyn_sample_processing))
logger.record_tabular('Time-InnerStep', np.mean(times_inner_step))
logger.record_tabular('Time-OuterStep', np.mean(times_outer_step))
logger.log("Saving snapshot...")
params = self.get_itr_snapshot(itr, all_samples_data_maml_iter[-1]) # , **kwargs)
if self.store_paths:
params["paths"] = all_samples_data_maml_iter[-1]["paths"]
logger.save_itr_params(itr, params)
logger.log("Saved")
logger.record_tabular('Time-Overall', time.time() - start_time)
logger.record_tabular('Time-Itr', time.time() - itr_start_time)
logger.dump_tabular(with_prefix=False)
self.shutdown_worker()
def log_diagnostics(self, paths, prefix):
self.env.log_diagnostics(paths, prefix)
self.policy.log_diagnostics(paths, prefix)
self.baseline.log_diagnostics(paths)
def init_opt(self):
"""
Initialize the optimization procedure. If using tensorflow, this may
include declaring all the variables and compiling functions
"""
raise NotImplementedError
def get_itr_snapshot(self, itr, samples_data):
"""
Returns all the data that should be saved in the snapshot for this
iteration.
"""
raise NotImplementedError
def optimize_policy(self, itr, samples_data, log=True):
raise NotImplementedError
def initialize_uninitialized_variables(self, sess):
uninit_vars = []
for var in tf.global_variables():
# note - this is hacky, may be better way to do this in newer TF.
try:
sess.run(var)
except tf.errors.FailedPreconditionError:
uninit_vars.append(var)
sess.run(tf.variables_initializer(uninit_vars)) | [
"[email protected]"
] | |
7e5988d2abc9de3328191dcf4a48925e1114b43a | 1dbc955c3d717476fa75a48cc87a05e2eceb0002 | /easy/min_depth.py | bc408df46fa538cc996dbb3442ff9a4ebdd9d9e3 | [] | no_license | gregorysimpson13/leetcode | e68eaee2ba38a1edff119eda1ccdeacc0c400d26 | ae88b9f9979a5643497cb2dfeb90d19a1bcdb137 | refs/heads/master | 2023-03-31T23:58:58.940234 | 2021-04-11T14:37:31 | 2021-04-11T14:37:31 | 258,632,884 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | py | # 111. Minimum Depth of Binary Tree - EASY
# https://leetcode.com/problems/minimum-depth-of-binary-tree/submissions/
# Given a binary tree, find its minimum depth.
# The minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.
# Note: A leaf is a node with no children.
# Example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its minimum depth = 2.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
def minDepth(self, root: TreeNode) -> int:
if root == None: return 0
def getDepth(node, depth=1):
if node == None:
return float('inf')
if node.left == None and node.right == None:
return depth
return min(getDepth(node.left, depth+1), getDepth(node.right, depth+1))
return getDepth(root, 1) | [
"[email protected]"
] | |
51ea9c030f677af42fd38725debf843d3c69b260 | 8bcf973008b1d7549f59501a1667909848ea87dd | /Day0822/ORM2/app01/views.py | 3ad1248d70ac91299eaef1087bd98c2d4840af27 | [] | no_license | simplesmall/Python-FullStack | 74ffeb2119eecb7fcb21a136d01aaaf2bcc2c24c | 210844ef6443a5543d49a20dbec2db9a9b960230 | refs/heads/master | 2022-12-17T00:56:40.515335 | 2019-11-15T02:07:57 | 2019-11-15T02:07:57 | 221,816,447 | 0 | 1 | null | 2022-12-13T19:22:26 | 2019-11-15T01:10:55 | Python | UTF-8 | Python | false | false | 10,201 | py | from django.shortcuts import render, HttpResponse, redirect
# Create your views here.
from app01.models import Book, Publish, Author, AuthorDetail,Emp
def index(request):
return HttpResponse("OKK")
def addrecord(request):
'''
添加记录
'''
pub_obj = Publish.objects.filter(name="南京出版社").first()
# 插入的书籍本身也是一个对象,该对象也是可以赋值给一个变量进而进行下一步操作的
book=Book.objects.create(
title="Python",
price=122,
pub_date="2017-12-23",
# 一对多的实现方法 第一种方法 直接在"一"里面指定对应字段以及对应字段的值
# publish_id=1
# 方法二 以对象的形式来实现
publish=pub_obj
)
# 多对多的绑定方式
# 方式一
alex=Author.objects.filter(name="alex").first()
egon=Author.objects.filter(name="egon").first()
# 先将要插入记录的记录 查出来并赋值给一个变量,其实是一个对象类型,然后再插入到有 对应关系的表中
book.authors.add(alex,egon)
# 也可以直接在这里添加字段,括号里面的1,2分别就是author 表中的记录的关键字ID,这样使用的缺点就是不知道所使用的ID到底是什么意思
# 方式二
# book.authors.add(1,2)
# 方式三
# book.authors.add(*[1,2])
return HttpResponse("添加成功")
def delrecord(request):
book = Book.objects.filter(nid=4).first()
# 解除定向绑定
alex = Author.objects.filter(name="alex").first()
egon = Author.objects.filter(name="egon").first()
# book.authors.remove(alex)
book.authors.set(1)
return HttpResponse("删除成功")
def query(request):
########################## 基于对象的跨表查询 #############################
'''
正向查询
反向查询
book.publish
Book ----- Publish
反向查询表名小写_set.all() : pub_obj.book_set.all()
'''
# 1查询这本书出版社的名字和邮箱
# book=Book.objects.filter(title="JAVA").first()
# pub_obj=Publish.objects.filter(nid=book.publish_id).first()
# print(pub_obj.name)
# print(pub_obj.email)
# return HttpResponse("查询成功")
# book=Book.objects.filter(title="JAVA").first()
# print(book.publish) # 与book这本书关联的传射对象
# print(book.publish.name)
# print(book.publish.email)
# return HttpResponse("再次成功返回")
# 2.查询某个出版社出版的所有书籍的名称
# pub_obj = Publish.objects.get(name="南京出版社")
# print(pub_obj.book_set.all().values_list("title"))
# return HttpResponse("查询倒是成功了,但是并没有返回任何东西")
################多对多######################
'''
正向查询
book.authgor.all()
BOOK ----------------------Author
反向查询
按表名小写_set.all()
'''
# 查询某本书是由哪些作者参与编写的
book = Book.objects.filter(nid=4).first()
ret=book.authors.all().values("name")
print(ret)
# 查询某个作者参与编写过的书籍
author=Author.objects.filter(name="egon").first()
print(author.book_set.all())
################ 一对一 #######
# 正向查询按字段 alex.ad.tel
# 查询alex 手机号
alex = Author.objects.filter(name="alex").first()
print(alex.ad.tel)
# 反向查询按表明小写 ad.author
# 查询手机号为112的作者的name
ad = AuthorDetail.objects.filter(tel=112).first()
print(ad.author.name)
return HttpResponse("返回一些东西啦")
def book_view(request):
book_list = Book.objects.all()
return render(request,"book_view.html",{"book_list":book_list})
def book_add(request):
if request.method=="GET":
publish_list=Publish.objects.all()
author_list=Author.objects.all()
return render(request,"book_add.html",{"publish_list":publish_list,"author_list":author_list})
else:
title=request.POST.get("title")
price=request.POST.get("price")
pub_date=request.POST.get("pub_date")
publish_id=request.POST.get("publish_id")
authors=request.POST.getlist("authors")
print(request.POST)
print(authors)
book=Book.objects.create(title=title,price=price,pub_date=pub_date,publish_id=publish_id)
book.authors.add(*authors)
return redirect("/books/")
def book_edit(request,edit_book_id):
edit_book = Book.objects.filter(pk=edit_book_id).first()
if request.method=="GET":
publish_list = Publish.objects.all()
author_list = Author.objects.all()
return render(request,"book_edit.html",{"edit_book":edit_book,"publish_list":publish_list,"author_list":author_list})
else:
title = request.POST.get("title")
price = request.POST.get("price")
pub_date = request.POST.get("pub_date")
publish_id = request.POST.get("publish_id")
authors = request.POST.getlist("authors")
print(request.POST)
print(authors)
Book.objects.filter(pk=edit_book_id).update(title=title,price=price,pub_date=pub_date,publish_id=publish_id)
edit_book.authors.set(authors)
return redirect("/books/")
def book_del(request,book_del_id):
Book.objects.filter(pk=book_del_id).delete()
return redirect("/books/")
def query2(request):
'''
基于双下划线的跨表查询(基于join实现的)
正向查询按字段,反向查询表名小写加下划线加字段
:param request:
:return:
'''
#1=1 1查询这本书出版社的名字和邮箱
ret = Book.objects.filter(title="python").values("publish__name")
pub=Publish.objects.filter(book__title="JAVA").values("name")
print(pub)
# 1=多2.查询某个出版社出版的所有书籍的名称
bknm=Publish.objects.filter(name="南京出版社").values("book__title")
Book.objects.filter(publish__name="北京出版社").values("title")
print(bknm)
# 1=多 3查询某本书作者的nianl
auage=Book.objects.filter(title="python").values("authors__age")
Author.objects.filter(book__title="JAVA").values("age")
print(auage)
# 1=多4查询某个作者出版过的书籍
Book.objects.filter(authors__name="alex").values("title")
Author.objects.filter(name="egon").values("book__title")
# i=1 5 查询alex 手机号
Author.objects.filter(name="alex").values("ad__tel")
AuthorDetail.objects.filter(author__name="alex").values("tel")
# 1=1 6 查询手机号为110的作者的名字
AuthorDetail.objects.filter(tel=110).values("author__name")
Author.objects.filter(ad__tel=110).values("name")
##############################连续跨表##################
# 查询人民出版社传出版过的所有书籍的名字以及作者的姓名
res1=Publish.objects.filter(name="南京出版社").values("book__title","book__authors__name")
res12=Book.objects.filter(publish__name="北京出版社").values("title","authors__name")
print('RESULT',res1,'\r\n',res12)
# 手机号以110开头的作者出版过的所有书籍名称以及出版社名称
# 方式1
ano1=Author.objects.filter(ad__tel__startswith=110).values("book__title","book__publish__name")
# print('---------------->')
# print(ano1)
# 方式2
AuthorDetail.objects.filter(tel__startswith=110).values("author__book__title","author__book__publish__name")
# 方式3
Book.objects.filter(authors__ad__tel__startswith=110).values("title","publish__name")
########################聚合 分组###########################
from django.db.models import Avg, Max, Sum, Min, Count
avgPrice = Book.objects.all().aggregate(Avg("price"))
print(avgPrice)
# 单表分组查询
# 查询书籍表每一个出版社id以及对应的书籍个数
# select app01_book.nid,count(1) from app01_book group by publish_id;
# key: annotate() 前value哪一个字段就按哪一个字段group by
ret=Book.objects.values("publish_id").annotate(c=Count(1))
print(ret)
# 查询每一个部门的名称以及对应员工的平均工资
ret=Emp.objects.values("dep").annotate(avg_salary=Avg("sal"))
print(ret)
# 查询每一个省份的名称以及对应的员工人数
ret=Emp.objects.values("addr").annotate(num_person=Count(1))
print(ret)
#########跨表分组查询
#查询每一个出版社的名名称以及对应的书籍平均价格
# 查询select 语句 select app01_publish.name,count(1) from app01_book inner join app01_publish on app01_book.publish_id = app01_publish.nid
# group by app01_publish.nid
ret=Publish.objects.values("name").annotate(avg_price=Avg("book__price"))
print(ret)
# 查询每一个作者的名字以及出版的书籍的最高价格
ret=Author.objects.values("nid","name").annotate(max_price=Max("book__price"))
print(ret)
# 查询每一个书籍的名称以及对应的作者的个数
ret = Book.objects.values("pk","title").annotate(total=Count("authors"))
print(ret)
# 查询作者数不止一个的书籍名称以及作者个数
ret=Book.objects.annotate(c=Count("authors")).filter(c__gt=1).values("title","c")
print(ret)
'''
SELECT app01_book.title,COUNT(app01_author.nid) as c FROM app01_book
INNER JOIN app01_book_authors on app01_book.nid=app01_book_authors.book_id
INNER JOIN app01_author on app01_author.nid=app01_book_authors.author_id
GROUP BY app01_book.nid,app01_book.price,app01_book.publish_id,app01_book.pub_date,app01_book.title
HAVING c>1
'''
from django.db.models import F,Q
ret=Book.objects.filter(price__gt=100).values("title")
ret = Book.objects.filter(price=F("ad__pub_date")*2)
return HttpResponse("__(Join)查询") | [
"[email protected]"
] | |
2d0c20dbdb1d0b69c0e130f7c0b4fb5917f56aed | e2bbbf67487fddfd4648c3f37d84849efee5bfaa | /backend/hazardest/game/tests/test_trick.py | 644cfa0ee7485c64b02fe2197757d46af891215b | [] | no_license | JordanSlaman/hazardest | 2a6f813e86c58fdee2a4b6cde0b9634c2750b57a | 07d055d9cc8423f5fc45fc2992f8a8798d054cf2 | refs/heads/master | 2022-09-01T17:35:22.414359 | 2022-08-07T22:47:23 | 2022-08-07T22:47:23 | 98,753,092 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,107 | py | from django.test import TestCase
from ..models.game import Game
from ..models.hand import Hand
from ..models.trick import Trick
from ..utils.create_cards import create_cards
from .fixtures import create_game_with_players
class TrickModelTests(TestCase):
def setUp(self):
create_cards()
self.test_game = create_game_with_players()
self.test_game.start_game()
# trump turn stuff
self.test_hand = self.test_game.active_hand
self.trick = Trick.objects.create(hand=self.test_hand)
# self.trick.save()
def test_play_card(self):
player = self.test_hand.active_player
card = player.cards.last()
self.trick.play_card(player=player, card=card)
x = 3
# assert player does not have card.
# Assert card in trick
# card winning?
# def test_deals_5_cards(self):
# game = Game.objects.get(pk=1)
#
# a = game.player_set.get(user__username='alice')
# new_hand = Hand(game=game, dealer=a)
# new_hand.deal()
#
# self.assertIs(a.cards.count(), 5)
| [
"[email protected]"
] | |
2623b29fb1f656b795b705e7ca7f4f8ed1255c7b | 0e25dc15ae9efce8bfd716d4d2041da07767968b | /qbench/benchmarks/QLib/OPENQL_converted/benstein_vazirani_41b_secret_16.py | fc9be6f9ad7d194f78cb38cb05d15e7415b54a28 | [] | no_license | alxhotel/crossbar-bench | f608fc0062b4f8a5162ec33d61c0204aaf27b6ff | 3bf7536e7697d29c3089b0ba564ba22d39698b88 | refs/heads/master | 2021-07-13T16:06:50.085838 | 2020-10-04T23:39:05 | 2020-10-04T23:39:05 | 213,409,122 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | from openql import openql as ql
import os
import argparse
def circuit(config_file, new_scheduler='yes', scheduler='ASAP', uniform_sched= 'no', sched_commute = 'yes', mapper='base', moves='no', maptiebreak='random', initial_placement='no', output_dir_name='test_output', optimize='no', measurement=True, log_level='LOG_WARNING'):
curdir = os.path.dirname(__file__)
output_dir = os.path.join(curdir, output_dir_name)
ql.set_option('output_dir', output_dir)
ql.set_option('optimize', optimize)
ql.set_option('scheduler', scheduler)
ql.set_option('scheduler_uniform', uniform_sched)
ql.set_option('mapper', mapper)
ql.set_option('initialplace', initial_placement)
ql.set_option('log_level', log_level)
ql.set_option('scheduler_post179', new_scheduler)
ql.set_option('scheduler_commute', sched_commute)
ql.set_option('mapusemoves', moves)
ql.set_option('maptiebreak', maptiebreak)
config_fn = os.path.join(curdir, config_file)
# platform = ql.Platform('platform_none', config_fn)
platform = ql.Platform('starmon', config_fn)
sweep_points = [1,2]
num_circuits = 1
num_qubits = 43
p = ql.Program('benstein_vazirani_41b_secret_16', platform, num_qubits)
p.set_sweep_points(sweep_points, num_circuits)
k = ql.Kernel('benstein_vazirani_41b_secret_16', platform, num_qubits)
k.gate('prepz',[41])
k.gate('x',[41])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
k.gate('cnot',[4,41])
k.gate('h',[0])
k.gate('h',[1])
k.gate('h',[2])
k.gate('h',[3])
k.gate('h',[4])
k.gate('h',[5])
k.gate('h',[6])
k.gate('h',[7])
k.gate('h',[8])
k.gate('h',[9])
k.gate('h',[10])
k.gate('h',[11])
k.gate('h',[12])
k.gate('h',[13])
k.gate('h',[14])
k.gate('h',[15])
k.gate('h',[16])
k.gate('h',[17])
k.gate('h',[18])
k.gate('h',[19])
k.gate('h',[20])
k.gate('h',[21])
k.gate('h',[22])
k.gate('h',[23])
k.gate('h',[24])
k.gate('h',[25])
k.gate('h',[26])
k.gate('h',[27])
k.gate('h',[28])
k.gate('h',[29])
k.gate('h',[30])
k.gate('h',[31])
k.gate('h',[32])
k.gate('h',[33])
k.gate('h',[34])
k.gate('h',[35])
k.gate('h',[36])
k.gate('h',[37])
k.gate('h',[38])
k.gate('h',[39])
k.gate('h',[40])
k.gate('h',[41])
if measurement:
for q in range(num_qubits):
k.gate('measure', [q])
p.add_kernel(k)
p.compile()
ql.set_option('mapper', 'no')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='OpenQL compilation of a Quantum Algorithm')
parser.add_argument('config_file', help='Path to the OpenQL configuration file to compile this algorithm')
parser.add_argument('--new_scheduler', nargs='?', default='yes', help='Scheduler defined by Hans')
parser.add_argument('--scheduler', nargs='?', default='ASAP', help='Scheduler specification (ASAP (default), ALAP, ...)')
parser.add_argument('--uniform_sched', nargs='?', default='no', help='Uniform shceduler actication (yes or no)')
parser.add_argument('--sched_commute', nargs='?', default='yes', help='Permits two-qubit gates to be commutable')
parser.add_argument('--mapper', nargs='?', default='base', help='Mapper specification (base, minextend, minextendrc)')
parser.add_argument('--moves', nargs='?', default='no', help='Let the use of moves')
parser.add_argument('--maptiebreak', nargs='?', default='random', help='')
parser.add_argument('--initial_placement', nargs='?', default='no', help='Initial placement specification (yes or no)')
parser.add_argument('--out_dir', nargs='?', default='test_output', help='Folder name to store the compilation')
parser.add_argument('--measurement', nargs='?', default=True, help='Add measurement to all the qubits in the end of the algorithm')
args = parser.parse_args()
try:
circuit(args.config_file, args.new_scheduler, args.scheduler, args.uniform_sched, args.sched_commute, args.mapper, args.moves, args.maptiebreak, args.initial_placement, args.out_dir)
except TypeError:
print('\nCompiled, but some gate is not defined in the configuration file. \nThe gate will be invoked like it is.')
raise | [
"[email protected]"
] | |
2b70ed9dffd63b20564f11772482485422f9afc9 | 18508cea9458b2879017b44e6f18520cd8cf4f6c | /UCMDBPython/src/vmware_vim_utils.py | f9c6f4dbc0a79a0b86699ef93132b0a6c6dd4272 | [] | no_license | kvt11/dd-git | 7d4935962e06d835ad0023c4abb185876a5a9e77 | 49aafa7081b861c5f6d0e1753b425e78948116d0 | refs/heads/master | 2022-11-23T19:03:19.763423 | 2016-04-04T14:54:18 | 2016-04-04T14:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74,849 | py | #coding=utf-8
from modeling import HostBuilder
import logger
import modeling
import errormessages
import netutils
import re
from appilog.common.system.types import ObjectStateHolder
from com.hp.ucmdb.discovery.library.scope import DomainScopeManager
from com.hp.ucmdb.discovery.library.clients import BaseClient
from com.hp.ucmdb.discovery.library.clients.vmware import NoPermissionException
from com.hp.ucmdb.discovery.library.clients.vmware import NotSupportedException
from java.net import InetSocketAddress
from java.lang import Exception
from java.util import HashSet
from java.net import InetAddress
from java.net import UnknownHostException
from org.apache.axis import AxisFault
from java.util import Properties
VMWARE_PROTOCOL = 'vmwareprotocol'
VMWARE_PROTOCOL_SHORT = 'vmware'
VMWARE_PROTOCOL_NAME = 'VMware VIM'
PATTERN_PARAM_REPORT_POWEREDOFF_VMS = 'reportPoweredOffVMs'
VMWARE_PROTOCOL_VERSION_20 = "2.0"
VMWARE_PROTOCOL_VERSION_25 = "2.5"
PROP_CONNECTION_URL = "connection_url"
PROP_PROTOCOL_VERSION = "protocol_version"
class VimGlobalDiscoveryConfig:
"""
Class represents global discovery configuration.
Parameter 'reportPoweredOffVms':
- default value is false
- if it's false powered-off VMs won't be reported
- if it's true powered-off VMs will be reported unless there is a powered-on machine
with the same host key
"""
def __init__(self, framework):
self.reportPoweredOffVms = 0
reportPoweredOffVmsValue = framework.getParameter(PATTERN_PARAM_REPORT_POWEREDOFF_VMS)
if reportPoweredOffVmsValue and reportPoweredOffVmsValue.lower() == 'true':
logger.debug("Powered-off Virtual Machines will be reported")
self.reportPoweredOffVms = 1
self.ucmdbVersion = logger.Version().getVersion(framework)
def getUcmdbVersion(self):
return self.ucmdbVersion
class VimClientFactory:
"""
Factory that creates clients for particular connection object.
Tries to create 2.5 client first, if it fails - tries to create client of version 2.0
"""
def __init__(self, framework, urlString, credentialsId):
self.framework = framework
self.urlString = urlString
self.credentialsId = credentialsId
def createClient(self):
try:
client = self.createClientOfVersion(VMWARE_PROTOCOL_VERSION_25)
return client
except AxisFault, fault:
faultString = fault.getFaultString()
if faultString.lower().find('unsupported namespace') != -1:
logger.debug('There is a namespace problem in SOAP response for version 2.5, trying version 2.0')
client = self.createClientOfVersion(VMWARE_PROTOCOL_VERSION_20)
return client
else:
raise fault
def createClientOfVersion(self, clientVersion):
properties = Properties()
properties.setProperty(BaseClient.CREDENTIALS_ID, self.credentialsId)
properties.setProperty(PROP_CONNECTION_URL, self.urlString)
properties.setProperty(PROP_PROTOCOL_VERSION, clientVersion)
return self.framework.createClient(properties)
class BaseDiscoverer:
"""
Class represents a base discoverer.
Discovered is an object that performs some discovery activities and
returns results. This abstract discoverer has methods related to handling and reporting
the error messages.
"""
def __init__(self, client, framework, discoveryConfig):
self.client = client
self.framework = framework
self.discoveryConfig = discoveryConfig
self.errors = []
self.warnings = []
def discover(self):
""" Template method to perform further discovery based on all data this discoverer has """
pass
def addResultsToVector(self, vector):
""" Template method, adds all OSH objects created during discovery to results vector """
pass
def processMessages(self):
self.__handleMessagesArray(self.errors)
self.__handleMessagesArray(self.warnings)
self.__reportMessages()
def __handleMessagesArray(self, msgArray):
for i in range(len(msgArray)):
msg = msgArray[i]
msgArray[i] = self.handleMessage(msg)
def handleMessage(self, message):
""" Template method, is used to inject values for named parameters in particular message """
pass
def __reportMessages(self):
for msg in self.errors:
self.framework.reportError(msg)
logger.debug(msg)
for msg in self.warnings:
self.framework.reportWarning(msg)
logger.debug(msg)
class ManagedEntityDiscoverer(BaseDiscoverer):
"""
Class represents a base discoverer for Managed Entity
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.ManagedEntity.html]
Each Managed Entity has a name and configStatus, this class defines template handlers for them.
"""
PROP_NAME = 'name'
PROP_STATUS = 'configStatus'
supportedProperties = [PROP_NAME, PROP_STATUS]
def __init__(self, client, framework, discoveryConfig):
BaseDiscoverer.__init__(self, client, framework, discoveryConfig)
self.handlers = {}
self.handlers[ManagedEntityDiscoverer.PROP_NAME] = self.handleEscapedName
self.handlers[ManagedEntityDiscoverer.PROP_STATUS] = self.handleStatus
def handle(self, selfRef, propertiesSet):
"""
Common method for all Managed Entity discoverers to handle the Managed Object reference and all
accompanying properties that were discovered. Properties are coming as a list of propName:propValue
pairs. We walk over this list and call a handler method assigned to this property.
The order in which the properties will be handled is unpredictable
"""
self.selfRef = selfRef
for property in propertiesSet:
name = property.getName()
value = property.getVal()
if self.handlers.has_key(name):
handler = self.handlers[name]
handler(value)
self._afterHandle()
def handleEscapedName(self, escapedName):
# SDK returns name of Managed Entity with 3 special chars escaped (% as %25, \ as %5c, / as %2f)
decodedName = unescapeString(escapedName)
self.handleName(decodedName)
def handleName(self, name):
""" Template method to handle a name of ManagedEntity """
pass
def handleStatus(self, status):
""" Template method to handle a status of ManagedEntity """
pass
def _afterHandle(self):
""" Method is called after all properties were handled, subclasses may override in case some post-processing is required"""
pass
class VirtualCenterDiscoverer(BaseDiscoverer):
"""
Class represents a discoverer that discovers a VirtualCenter server. There is no ManagedEntity
corresponding to VC in VMware API.
"""
def __init__(self, client, framework, discoveryConfig, vcOSH):
BaseDiscoverer.__init__(self, client, framework, discoveryConfig)
self.osh = vcOSH
self.datacenterDiscoverers = []
self.licensesDiscoverer = None
def discover(self):
self.discoverDatacenters()
self.discoverLicenses()
def discoverDatacenters(self):
contents = self.__retrieveDatacenters()
if contents is not None:
for objectContent in contents:
ref = objectContent.getObj()
props = objectContent.getPropSet()
dcHandler = DatacenterDiscoverer(self.client, self.framework, self.discoveryConfig, self.osh)
dcHandler.handle(ref, props)
dcHandler.discover()
dcHandler.processMessages()
self.datacenterDiscoverers.append(dcHandler)
else:
logger.debug('No datacenters found')
def __retrieveDatacenters(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('Datacenter')
propertySpec.setPathSet(DatacenterDiscoverer.supportedProperties)
recurseFoldersSelectionSpec = self.client.createSelectionSpec()
recurseFoldersSelectionSpec.setName('folder2childEntity')
folderTraversalSpec = self.client.createTraversalSpec()
folderTraversalSpec.setType('Folder')
folderTraversalSpec.setPath('childEntity')
folderTraversalSpec.setName(recurseFoldersSelectionSpec.getName())
folderTraversalSpec.setSelectSet([recurseFoldersSelectionSpec])
objectSpec = self.client.createObjectSpec()
rootFolderRef = self.client.getRootFolder()
objectSpec.setObj(rootFolderRef)
objectSpec.setSkip(1)
objectSpec.setSelectSet([folderTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def discoverLicenses(self):
self.licensesDiscoverer = VirtualCenterLicensesDiscoverer(self.client, self.framework, self.discoveryConfig, None, self.osh)
self.licensesDiscoverer.discover()
def addResultsToVector(self, vector):
for dcHandler in self.datacenterDiscoverers:
dcHandler.addResultsToVector(vector)
self.licensesDiscoverer.addResultsToVector(vector)
class DatacenterDiscoverer(ManagedEntityDiscoverer):
"""
Class represents a discoverer for Datacenter
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.Datacenter.html]
"""
PROP_VM_FOLDER = 'vmFolder'
PROP_HOST_FOLDER = 'hostFolder'
supportedProperties = ManagedEntityDiscoverer.supportedProperties + [PROP_VM_FOLDER, PROP_HOST_FOLDER]
def __init__(self, client, framework, discoveryConfig, vcOSH):
ManagedEntityDiscoverer.__init__(self, client, framework, discoveryConfig)
self.vcOSH = vcOSH
self.handlers[DatacenterDiscoverer.PROP_VM_FOLDER] = self.handleVmFolder
self.handlers[DatacenterDiscoverer.PROP_HOST_FOLDER] = self.handleHostFolder
self.computeResourceDiscoverers = []
self.createDatacenterOSH()
def handleName(self, name):
self.osh.setAttribute('data_name', name)
def handleStatus(self, status):
self.osh.setStringAttribute('datacenter_status', status.getValue())
def handleVmFolder(self, vmFolderRef):
self.vmFolderRef = vmFolderRef
def handleHostFolder(self, hostFolderRef):
self.hostFolderRef = hostFolderRef
def createDatacenterOSH(self):
self.osh = ObjectStateHolder('datacenter')
def discover(self):
contents = self.__retrieveComputeResources()
dcName = self.osh.getAttribute('data_name').getValue()
if contents is not None:
for objectContent in contents:
ref = objectContent.getObj()
props = objectContent.getPropSet()
computeResourceDiscoverer = None
# it's not possible to query ComputeResources separately from ClusterComputeResources
# so I'm retrieving all ComputerResources in one traversal
# check below distinguishes whether it's a cluster or not
if ref.getType() == 'ClusterComputeResource':
# cluster
# here I have to create separate discoverers for each version of protocol
# since for 2.5 we need to fetch 'configurationEx' property
# and for 2.0 we need to fetch 'configuration' property
if self.client.getVersionString() == VMWARE_PROTOCOL_VERSION_25:
computeResourceDiscoverer = ClusterComputeResourceDiscoverer25(self.client, self.framework, self.discoveryConfig, self.osh)
elif self.client.getVersionString() == VMWARE_PROTOCOL_VERSION_20:
computeResourceDiscoverer = ClusterComputeResourceDiscoverer20(self.client, self.framework, self.discoveryConfig, self.osh)
else:
raise ValueError, "Unknown protocol version"
else:
# non-clustered managed ESX
computeResourceDiscoverer = NonclusteredEsxComputeResourceDiscoverer(self.client, self.framework, self.discoveryConfig, self.osh)
computeResourceDiscoverer.handle(ref, props)
computeResourceDiscoverer.discover()
computeResourceDiscoverer.processMessages()
self.computeResourceDiscoverers.append(computeResourceDiscoverer)
else:
logger.debug("No ComputeResources found in datacenter '%s'" % dcName)
def __retrieveComputeResources(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('ComputeResource')
propertySpec.setPathSet(BaseComputeResourceDiscoverer.supportedProperties)
recurseFoldersSelectionSpec = self.client.createSelectionSpec()
recurseFoldersSelectionSpec.setName('folder2childEntity')
folderTraversalSpec = self.client.createTraversalSpec()
folderTraversalSpec.setType('Folder')
folderTraversalSpec.setPath('childEntity')
folderTraversalSpec.setName(recurseFoldersSelectionSpec.getName())
folderTraversalSpec.setSelectSet([recurseFoldersSelectionSpec])
objectSpec = self.client.createObjectSpec()
objectSpec.setObj(self.hostFolderRef)
objectSpec.setSkip(1)
objectSpec.setSelectSet([folderTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def addResultsToVector(self, vector):
linkOSH = modeling.createLinkOSH('manage', self.vcOSH, self.osh)
vector.add(self.osh)
vector.add(linkOSH)
for computeResourceDiscoverer in self.computeResourceDiscoverers:
computeResourceDiscoverer.addResultsToVector(vector)
class BaseComputeResourceDiscoverer(ManagedEntityDiscoverer):
"""
Class represents a base discoverer for all ComputeResources (clusters and non-clusters)
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.ComputeResource.html]
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.ClusterComputeResource.html]
ComputeResources are not mapped to any CI in current Class Model for VI.
Important: to reduce the number of calls to server we retrieve all descendant objects at once:
- all resource pools are retrieved in one traversal
- all virtual machines are retrieved in one traversal
- all ESX servers are retrieved in one traversal
Afterwards we restore the hierarchy of resource pools, align VMs to resource pools they are assigned to,
align VMs to ESXes where they are running
Important: any ComputeResource has a root Resource Pool always which is not customizable and not visible from
regular UI. Such root Resource Pool does not have any representation in our Class Model, so child pools
are linked directly to parent cluster/ESX.
"""
PROP_RESOURCE_POOL = 'resourcePool'
supportedProperties = ManagedEntityDiscoverer.supportedProperties + [PROP_RESOURCE_POOL]
def __init__(self, client, framework, discoveryConfig, datacenterOSH):
ManagedEntityDiscoverer.__init__(self, client, framework, discoveryConfig)
self.handlers[BaseComputeResourceDiscoverer.PROP_RESOURCE_POOL] = self.handleResourcePool
self.datacenterOSH = datacenterOSH
self.poolRefToPoolDiscoverer = {}
self.parentPoolToChildPools = {}
self.esxRefToEsxDiscoverer = {}
self.vmRefToVmDiscoverer = {}
self.vmHostKeyToVmRef = {}
self.vms = None
def handleResourcePool(self, resourcePoolRef):
self.rootResourcePoolRef = resourcePoolRef
def discover(self):
self.discoverResourcePools()
self.discoverEsxServers()
self.discoverVirtualMachines()
def discoverResourcePools(self):
resourcePoolsContents = self.__retrieveResourcePools()
if resourcePoolsContents is not None:
for objectContent in resourcePoolsContents:
ref = objectContent.getObj()
props = objectContent.getPropSet()
resourcePoolDiscoverer = ResourcePoolDiscoverer(self.client, self.framework, self.discoveryConfig)
resourcePoolDiscoverer.handle(ref, props)
if ref.equals(self.rootResourcePoolRef):
#this a root pool, do not include in regular hierarchy
self.vms = resourcePoolDiscoverer.vms
else:
resourcePoolDiscoverer.discover()
resourcePoolDiscoverer.processMessages()
self.poolRefToPoolDiscoverer[ref] = resourcePoolDiscoverer
self.__linkParentAndChildPools(resourcePoolDiscoverer.parentRef, ref)
else:
logger.debug('No resource pools were found')
def discoverEsxServers(self):
esxContents = self.__retrieveEsxServers()
if esxContents is not None:
for objectContent in esxContents:
ref = objectContent.getObj()
props = objectContent.getPropSet()
esxDiscoverer = EsxDiscoverer(self.client, self.framework, self.discoveryConfig)
esxDiscoverer.handle(ref, props)
esxDiscoverer.discover()
esxDiscoverer.processMessages()
self.esxRefToEsxDiscoverer[ref] = esxDiscoverer
else:
logger.debug('No ESX Servers were found')
def discoverVirtualMachines(self):
vmContents = self.__retrieveVirtualMachines()
if vmContents is not None:
for objectContent in vmContents:
ref = objectContent.getObj()
props = objectContent.getPropSet()
vmDiscoverer = VirtualMachineDiscoverer(self.client, self.framework, self.discoveryConfig)
vmDiscoverer.handle(ref, props)
vmDiscoverer.discover()
vmDiscoverer.processMessages()
if vmDiscoverer.hostKey:
self.addVmWithFiltering(vmDiscoverer)
else:
logger.debug('No Virtual Machines found')
def addVmWithFiltering(self, vmDiscoverer):
if vmDiscoverer.vmIsPowered:
if self.vmHostKeyToVmRef.has_key(vmDiscoverer.hostKey):
secondCandidateRef = self.vmHostKeyToVmRef[vmDiscoverer.hostKey]
secondCandidate = self.vmRefToVmDiscoverer[secondCandidateRef]
if secondCandidate.vmIsPowered:
msg = "There are two machines with the same host key '%s', both are powered on, keeping the first one" % vmDiscoverer.hostKey
logger.debug(msg)
else:
msg = "There are two machines with the same host key '%s', keeping powered-on one" % vmDiscoverer.hostKey
logger.debug(msg)
self.__addVm(vmDiscoverer)
del(self.vmRefToVmDiscoverer[secondCandidateRef])
else:
self.__addVm(vmDiscoverer)
else:
if self.discoveryConfig.reportPoweredOffVms:
if self.vmHostKeyToVmRef.has_key(vmDiscoverer.hostKey):
secondCandidateRef = self.vmHostKeyToVmRef[vmDiscoverer.hostKey]
secondCandidate = self.vmRefToVmDiscoverer[secondCandidateRef]
if secondCandidate.vmIsPowered:
msg = "There are two machines with the same host key '%s', keeping powered-on one" % vmDiscoverer.hostKey
logger.debug(msg)
else:
msg = "There are two machines with the same host key '%s', both are powered off, keeping the first one" % vmDiscoverer.hostKey
logger.debug(msg)
else:
self.__addVm(vmDiscoverer)
def __addVm(self, vmDiscoverer):
self.vmRefToVmDiscoverer[vmDiscoverer.selfRef] = vmDiscoverer
self.vmHostKeyToVmRef[vmDiscoverer.hostKey] = vmDiscoverer.selfRef
def __retrieveResourcePools(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('ResourcePool')
propertySpec.setPathSet(ResourcePoolDiscoverer.supportedProperties)
recursePoolsSelectionSpec = self.client.createSelectionSpec()
recursePoolsSelectionSpec.setName('pool2childPools')
poolTraversalSpec = self.client.createTraversalSpec()
poolTraversalSpec.setType('ResourcePool')
poolTraversalSpec.setPath('resourcePool')
poolTraversalSpec.setName(recursePoolsSelectionSpec.getName())
poolTraversalSpec.setSelectSet([recursePoolsSelectionSpec])
objectSpec = self.client.createObjectSpec()
objectSpec.setObj(self.rootResourcePoolRef)
# we do not skip the root here but we will not include it regular hierarchy later
objectSpec.setSkip(0)
objectSpec.setSelectSet([poolTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def __retrieveEsxServers(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('HostSystem')
propertySpec.setPathSet(EsxDiscoverer.supportedProperties)
computeResourceSelectionSpec = self.client.createSelectionSpec()
computeResourceSelectionSpec.setName('computeResource2hosts')
computeResourceTraversalSpec = self.client.createTraversalSpec()
computeResourceTraversalSpec.setType('ComputeResource')
computeResourceTraversalSpec.setPath('host')
computeResourceTraversalSpec.setName(computeResourceSelectionSpec.getName())
computeResourceTraversalSpec.setSelectSet([computeResourceSelectionSpec])
objectSpec = self.client.createObjectSpec()
objectSpec.setObj(self.selfRef)
objectSpec.setSkip(1)
objectSpec.setSelectSet([computeResourceTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def __retrieveVirtualMachines(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('VirtualMachine')
propertySpec.setPathSet(VirtualMachineDiscoverer.supportedProperties)
vmInPoolSelectionSpec = self.client.createSelectionSpec()
vmInPoolSelectionSpec.setName('resourcePool2Vms')
vmInPoolTraversalSpec = self.client.createTraversalSpec()
vmInPoolTraversalSpec.setType('ResourcePool')
vmInPoolTraversalSpec.setPath('vm')
vmInPoolTraversalSpec.setName(vmInPoolSelectionSpec.getName())
vmInPoolTraversalSpec.setSelectSet([vmInPoolSelectionSpec])
recursePoolSelectionSpec = self.client.createSelectionSpec()
recursePoolSelectionSpec.setName('resourcePool2ChildPools')
recursePoolTraversalSpec = self.client.createTraversalSpec()
recursePoolTraversalSpec.setType('ResourcePool')
recursePoolTraversalSpec.setPath('resourcePool')
recursePoolTraversalSpec.setName(recursePoolSelectionSpec.getName())
recursePoolTraversalSpec.setSelectSet([recursePoolSelectionSpec, vmInPoolSelectionSpec])
objectSpec = self.client.createObjectSpec()
objectSpec.setObj(self.rootResourcePoolRef)
objectSpec.setSkip(0)
objectSpec.setSelectSet([vmInPoolTraversalSpec, recursePoolTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def __linkParentAndChildPools(self, parentPoolRef, childPoolRef):
childSet = None
if self.parentPoolToChildPools.has_key(parentPoolRef):
childSet = self.parentPoolToChildPools[parentPoolRef]
else:
childSet = HashSet()
self.parentPoolToChildPools[parentPoolRef] = childSet
childSet.add(childPoolRef)
def addAllPoolsToVector(self, vector, parentOSH):
self.addDescendantsOfPoolToVector(vector, parentOSH, self.rootResourcePoolRef)
self.addVmsToVector(vector, parentOSH, self.vms)
def addPoolToVector(self, vector, parentOSH, ref):
if self.poolRefToPoolDiscoverer.has_key(ref):
poolDiscoverer = self.poolRefToPoolDiscoverer[ref]
poolDiscoverer.osh.setContainer(parentOSH)
vector.add(poolDiscoverer.osh)
self.addVmsToVector(vector, poolDiscoverer.osh, poolDiscoverer.vms)
self.addDescendantsOfPoolToVector(vector, poolDiscoverer.osh, ref)
def addDescendantsOfPoolToVector(self, vector, parentOSH, ref):
if self.parentPoolToChildPools.has_key(ref):
childSetIterator = self.parentPoolToChildPools[ref].iterator()
while childSetIterator.hasNext():
childRef = childSetIterator.next()
self.addPoolToVector(vector, parentOSH, childRef)
def addVmsToVector(self, vector, parentOSH, arrayOfMoRef):
if arrayOfMoRef:
vms = arrayOfMoRef.getManagedObjectReference()
if vms is not None:
for vmRef in vms:
if self.vmRefToVmDiscoverer.has_key(vmRef):
vmDiscoverer = self.vmRefToVmDiscoverer[vmRef]
if vmDiscoverer.hostOSH is not None:
vmDiscoverer.addResultsToVector(vector)
if parentOSH is not None:
linkOSH = modeling.createLinkOSH('contains', parentOSH, vmDiscoverer.hostOSH.build())
vector.add(linkOSH)
def addLinksForServersWithRunningVmsToVector(self, vector):
for esxDiscoverer in self.esxRefToEsxDiscoverer.values():
esxOsh = esxDiscoverer.osh
vms = esxDiscoverer.vms.getManagedObjectReference()
if vms:
for vmRef in vms:
if self.vmRefToVmDiscoverer.has_key(vmRef):
vmDiscoverer = self.vmRefToVmDiscoverer[vmRef]
hostOsh = vmDiscoverer.hostOSH
if hostOsh is not None:
hostOsh = hostOsh.build()
runLink = modeling.createLinkOSH('run', esxOsh, hostOsh)
vector.add(runLink)
class BaseClusterComputeResourceDiscoverer(BaseComputeResourceDiscoverer):
"""
Class represents a base discoverer for cluster (ClusterComputeResource)
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.ClusterComputeResource.html]
"""
PROP_SUMMARY = 'summary'
versionDependentProperties = [PROP_SUMMARY]
def __init__(self, client, framework, discoveryConfig, datacenterOSH):
BaseComputeResourceDiscoverer.__init__(self, client, framework, discoveryConfig, datacenterOSH)
self.handlers[BaseClusterComputeResourceDiscoverer.PROP_SUMMARY] = self.handleSummary
self.createClusterOSH()
def discoverVersionDependentProperties(self, properties):
contents = self.__retrieveVersionDependentProperties(properties)
props = None
if contents:
objectContent = contents[0]
if objectContent:
props = objectContent.getPropSet()
if props:
self.handle(self.selfRef, props)
else:
msg = "Failed to retrieve cluster properties, verify the connected user has sufficient permissions to query clusters information."
self.framework.reportWarning(msg)
def __retrieveVersionDependentProperties(self, properties):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('ClusterComputeResource')
propertySpec.setPathSet(properties)
objectSpec = self.client.createObjectSpec()
objectSpec.setObj(self.selfRef)
objectSpec.setSkip(0)
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def handleName(self, name):
self.osh.setAttribute('data_name', name)
def handleStatus(self, status):
self.osh.setStringAttribute('cluster_status', status.getValue())
def handleDasConfig(self, dasConfig):
dasEnabled = dasConfig.getEnabled()
if dasEnabled:
self.osh.setBoolAttribute('das_enabled', dasEnabled)
dasAdmissionControlEnabled = dasConfig.getAdmissionControlEnabled()
if dasAdmissionControlEnabled:
self.osh.setBoolAttribute('das_admission_control_enabled', dasAdmissionControlEnabled)
failoverLevel = dasConfig.getFailoverLevel()
if failoverLevel:
self.osh.setIntegerAttribute('das_failover_level', failoverLevel)
def handleDrsConfig(self, drsConfig):
drsEnabled = drsConfig.getEnabled()
if drsEnabled:
self.osh.setBoolAttribute('drs_enabled', drsEnabled)
vmotionRate = drsConfig.getVmotionRate()
if vmotionRate:
self.osh.setIntegerAttribute('drs_vmotion_rate', vmotionRate)
drsBehavior = drsConfig.getDefaultVmBehavior()
drsBehavior = drsBehavior and drsBehavior.getValue() or None
if drsBehavior:
self.osh.setStringAttribute('drs_behavior', drsBehavior)
def handleSummary(self, summary):
totalCpu = summary.getTotalCpu()
if totalCpu:
self.osh.setIntegerAttribute('total_cpu', totalCpu)
totalMemory = summary.getTotalMemory()
if totalMemory:
self.osh.setLongAttribute('total_memory', totalMemory)
def createClusterOSH(self):
self.osh = ObjectStateHolder('vmware_cluster')
modeling.setAppSystemVendor(self.osh)
def addResultsToVector(self, vector):
self.osh.setContainer(self.datacenterOSH)
vector.add(self.osh)
self.addAllPoolsToVector(vector, self.osh)
for esxDiscoverer in self.esxRefToEsxDiscoverer.values():
memberLink = modeling.createLinkOSH('member', self.osh, esxDiscoverer.osh)
esxDiscoverer.addResultsToVector(vector)
vector.add(memberLink)
self.addLinksForServersWithRunningVmsToVector(vector)
class ClusterComputeResourceDiscoverer25(BaseClusterComputeResourceDiscoverer):
"""
Class represents a subclass of Cluster discoverer specific for protocol version 2.5
Here we perform additional query (for each cluster) for cluster's properties that appear in API 2.5 only
"""
PROP_CONFIG_EX = 'configurationEx'
versionDependentProperties = BaseClusterComputeResourceDiscoverer.versionDependentProperties + [PROP_CONFIG_EX]
def __init__(self, client, framework, discoveryConfig, datacenterOSH):
BaseClusterComputeResourceDiscoverer.__init__(self, client, framework, discoveryConfig, datacenterOSH)
self.handlers[ClusterComputeResourceDiscoverer25.PROP_CONFIG_EX] = self.handleConfigurationEx
def discover(self):
self.discoverVersionDependentProperties(ClusterComputeResourceDiscoverer25.versionDependentProperties)
BaseComputeResourceDiscoverer.discover(self)
def handleConfigurationEx(self, configurationExObject):
dasConfig = configurationExObject.getDasConfig()
if dasConfig is not None:
self.handleDasConfig(dasConfig)
drsConfig = configurationExObject.getDrsConfig()
if drsConfig is not None:
self.handleDrsConfig(drsConfig)
dpmConfig = configurationExObject.getDpmConfigInfo()
if dpmConfig is not None:
self.handleDpmConfig(dpmConfig)
def handleDasConfig(self, dasConfig):
BaseClusterComputeResourceDiscoverer.handleDasConfig(self, dasConfig)
defaultVmSettings = dasConfig.getDefaultVmSettings()
if defaultVmSettings is not None:
self.handleDasDefaultVmSettings(defaultVmSettings)
def handleDasDefaultVmSettings(self, defaultVmSettings):
restartPriority = defaultVmSettings.getRestartPriority()
if restartPriority:
self.osh.setStringAttribute('das_restart_priority', restartPriority)
isolationResponse = defaultVmSettings.getIsolationResponse()
if isolationResponse:
self.osh.setStringAttribute('das_isolation_response', isolationResponse)
def handleDpmConfig(self, dpmConfig):
dpmEnabled = dpmConfig.getEnabled()
if dpmEnabled:
self.osh.setBoolAttribute('dpm_enabled', dpmEnabled)
dpmBehavior = dpmConfig.getDefaultDpmBehavior()
dpmBehavior = dpmBehavior and dpmBehavior.getValue() or None
if dpmBehavior:
self.osh.setStringAttribute('dpm_behavior', dpmBehavior)
class ClusterComputeResourceDiscoverer20(BaseClusterComputeResourceDiscoverer):
"""
Class represents a subclass of Cluster discoverer specific for protocol version 2.0
Here we perform additional query (for each cluster) for cluster's properties that are available in API 2.0.
"""
PROP_CONFIG = 'configuration'
versionDependentProperties = BaseClusterComputeResourceDiscoverer.versionDependentProperties + [PROP_CONFIG]
def __init__(self, client, framework, discoveryConfig, datacenterOSH):
BaseClusterComputeResourceDiscoverer.__init__(self, client, framework, discoveryConfig, datacenterOSH)
self.handlers[ClusterComputeResourceDiscoverer20.PROP_CONFIG] = self.handleConfiguration
def discover(self):
self.discoverVersionDependentProperties(ClusterComputeResourceDiscoverer20.versionDependentProperties)
BaseComputeResourceDiscoverer.discover(self)
def handleConfiguration(self, configurationObject):
dasConfig = configurationObject.getDasConfig()
if dasConfig is not None:
self.handleDasConfig(dasConfig)
drsConfig = configurationObject.getDrsConfig()
if drsConfig is not None:
self.handleDrsConfig(drsConfig)
class NonclusteredEsxComputeResourceDiscoverer(BaseComputeResourceDiscoverer):
"""
Class represents a discoverer for non-clustered ComputeResource.
Since ComputeResource is a ManagedEntity that is not mapped to any CI in our Class Model,
we need to push the ESX CI up one level and make it a parent for all descendant objects.
"""
def __init__(self, client, framework, discoveryConfig, datacenterOSH):
BaseComputeResourceDiscoverer.__init__(self, client, framework, discoveryConfig, datacenterOSH)
def addResultsToVector(self, vector):
esxesCount = len(self.esxRefToEsxDiscoverer)
if esxesCount == 1:
esxDiscoverer = self.esxRefToEsxDiscoverer.values()[0]
containsLink = modeling.createLinkOSH('contains', self.datacenterOSH, esxDiscoverer.osh)
esxDiscoverer.addResultsToVector(vector)
vector.add(containsLink)
#add VMs associated with root pool to vector, setting parent to None will not create 'contains' link
#all other VMs linked to pools will be added by 'addDescendantsOfPoolToVector'
self.addVmsToVector(vector, None, self.vms)
self.addDescendantsOfPoolToVector(vector, esxDiscoverer.osh, self.rootResourcePoolRef)
self.addLinksForServersWithRunningVmsToVector(vector)
else:
if esxesCount == 0:
logger.debug('No ESX Server was found in ComputeResource')
else:
logger.debug('ERROR: ComputeResource contains more than one ESX Server (expected 1)')
class ResourcePoolDiscoverer(ManagedEntityDiscoverer):
"""
Class represents a discoverer for Resource Pool
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.ResourcePool.html]
"""
PROP_PARENT = 'parent'
PROP_VM = 'vm'
PROP_CONFIG = 'config'
supportedProperties = ManagedEntityDiscoverer.supportedProperties + [PROP_PARENT, PROP_VM, PROP_CONFIG]
def __init__(self, client, framework, discoveryConfig):
ManagedEntityDiscoverer.__init__(self, client, framework, discoveryConfig)
self.handlers[ResourcePoolDiscoverer.PROP_PARENT] = self.handleParent
self.handlers[ResourcePoolDiscoverer.PROP_VM] = self.handleVms
self.handlers[ResourcePoolDiscoverer.PROP_CONFIG] = self.handleConfig
self.createPoolOSH()
self.vms = []
def handleParent(self, parentRef):
self.parentRef = parentRef
def handleVms(self, vms):
self.vms = vms
def handleName(self, name):
self.osh.setAttribute('data_name', name)
def handleStatus(self, status):
self.osh.setStringAttribute('resource_pool_status', status.getValue())
def handleConfig(self, configObject):
cpuAllocation = configObject.getCpuAllocation()
if cpuAllocation is not None:
self.handleCpuAllocation(cpuAllocation)
memoryAllocation = configObject.getMemoryAllocation()
if memoryAllocation is not None:
self.handleMemoryAllocation(memoryAllocation)
def handleCpuAllocation(self, cpuAllocation):
self.osh.setLongAttribute('cpu_reservation', cpuAllocation.getReservation())
self.osh.setLongAttribute('cpu_limit', cpuAllocation.getLimit())
self.osh.setBoolAttribute('cpu_expandable_reservation', cpuAllocation.getExpandableReservation())
sharesInfo = cpuAllocation.getShares()
self.osh.setIntegerAttribute('cpu_shares', sharesInfo.getShares())
self.osh.setStringAttribute('cpu_shares_level', sharesInfo.getLevel().getValue())
def handleMemoryAllocation(self, memoryAllocation):
self.osh.setLongAttribute('memory_reservation', memoryAllocation.getReservation())
self.osh.setLongAttribute('memory_limit', memoryAllocation.getLimit())
self.osh.setBoolAttribute('memory_expandable_reservation', memoryAllocation.getExpandableReservation())
sharesInfo = memoryAllocation.getShares()
self.osh.setIntegerAttribute('memory_shares', sharesInfo.getShares())
self.osh.setStringAttribute('memory_shares_level', sharesInfo.getLevel().getValue())
def createPoolOSH(self):
self.osh = ObjectStateHolder('vmware_resource_pool')
class EsxDiscoverer(ManagedEntityDiscoverer):
"""
Class represents a discoverer for ESX server
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.HostSystem.html]
"""
PROP_SUMMARY = 'summary'
PROP_PRODUCT = 'config.product'
PROP_DNS_CONFIG = 'config.network.dnsConfig'
PROP_VMS = 'vm'
PROP_CONNECTION_STATE = 'runtime.connectionState'
supportedProperties = ManagedEntityDiscoverer.supportedProperties + [PROP_SUMMARY, PROP_PRODUCT, PROP_DNS_CONFIG, PROP_VMS, PROP_CONNECTION_STATE]
def __init__(self, client, framework, discoveryConfig):
ManagedEntityDiscoverer.__init__(self, client, framework, discoveryConfig)
self.handlers[EsxDiscoverer.PROP_SUMMARY] = self.handleSummary
self.handlers[EsxDiscoverer.PROP_PRODUCT] = self.handleProductInfo
self.handlers[EsxDiscoverer.PROP_DNS_CONFIG] = self.handleDnsConfig
self.handlers[EsxDiscoverer.PROP_VMS] = self.handleVms
self.handlers[EsxDiscoverer.PROP_CONNECTION_STATE] = self.handleConnectionState
self.createLayerOsh()
self.createEsxOsh()
self.ip = None
self.licensesDiscoverer = None
self.connectionState = None
def handleName(self, name):
self.osh.setAttribute('hypervisor_name', name)
self.esxOsh.setAttribute('data_name', name)
def handleStatus(self, status):
self.osh.setStringAttribute('status', status.getValue())
def handleVms(self, vms):
self.vms = vms
def handleSummary(self, summaryObject):
configSummary = summaryObject.getConfig()
if configSummary is not None:
self.handleConfigSummary(configSummary)
runtimeInfo = summaryObject.getRuntime()
if runtimeInfo is not None:
self.handleRuntimeInfo(runtimeInfo)
hardwareSummary = summaryObject.getHardware()
if hardwareSummary is not None:
self.handleHardwareSummary(hardwareSummary)
def handleConfigSummary(self, configSummary):
vmotionEnabled = configSummary.isVmotionEnabled()
self.osh.setBoolAttribute('vmotion_enabled', vmotionEnabled)
def handleHardwareSummary(self, hardwareSummary):
#cpuModel = hardwareSummary.getCpuModel()
#cpuMhz = hardwareSummary.getCpuMhz()
#numberOfCpus = hardwareSummary.getNumCpuPkgs()
hostModel = hardwareSummary.getModel()
self.esxOsh.setStringAttribute('host_model', hostModel)
hostVendor = hardwareSummary.getVendor()
self.esxOsh.setStringAttribute('host_vendor', hostVendor)
#memorySize = hardwareSummary.getMemorySize() #in bytes
uuid = hardwareSummary.getUuid()
self.esxOsh.setStringAttribute('host_key', uuid)
modeling.setHostBiosUuid(self.esxOsh, uuid)
self.esxOsh.setBoolAttribute('host_iscomplete', 1)
def handleRuntimeInfo(self, runtimeInfo):
inMaintenanceMode = runtimeInfo.isInMaintenanceMode()
self.osh.setBoolAttribute('maintenance_mode', inMaintenanceMode)
bootTime = runtimeInfo.getBootTime()
if bootTime is not None:
bootTimeMillis = bootTime.getTime()
self.esxOsh.setDateAttribute('host_last_boot_time', bootTimeMillis)
def handleProductInfo(self, productInfo):
if productInfo is not None:
fullName = productInfo.getFullName()
version = productInfo.getVersion()
build = productInfo.getBuild()
fullVersion = "%s.%s" % (version, build)
self.osh.setStringAttribute('data_description', fullName)
self.osh.setStringAttribute('application_version', fullVersion)
def handleDnsConfig(self, dnsConfig):
hostName = dnsConfig.getHostName()
self.esxOsh.setStringAttribute('host_hostname', hostName)
domainName = dnsConfig.getDomainName()
fullHostName = hostName
if domainName:
fullHostName = "%s.%s" % (hostName, domainName)
self.ip = resolveHostIp(fullHostName)
def handleConnectionState(self, state):
self.connectionState = state.getValue()
self.osh.setStringAttribute('connection_state', self.connectionState)
def createLayerOsh(self):
self.osh = ObjectStateHolder('virtualization_layer')
self.osh.setStringAttribute('data_name', 'Virtualization Layer Software')
def createEsxOsh(self):
self.esxOsh = HostBuilder.fromClassName('vmware_esx_server')
modeling.setHostOsFamily(self.esxOsh, 'baremetal_hypervisor')
def createConsoleOsHost(self, ip):
cosOsh = modeling.createHostOSH(ip)
return HostBuilder(cosOsh).setAsVirtual(1).build()
def discover(self):
if self.connectionState == 'connected':
self.discoverLicenses()
def discoverLicenses(self):
self.licensesDiscoverer = LicensesDiscoverer(self.client, self.framework, self.discoveryConfig, self.selfRef, self.osh)
self.licensesDiscoverer.discover()
def addResultsToVector(self, vector):
esxOsh = self.esxOsh.build()
self.osh.setContainer(esxOsh)
vector.add(self.osh)
vector.add(esxOsh)
if self.ip is not None:
cosOsh = self.createConsoleOsHost(self.ip)
ipOsh = modeling.createIpOSH(self.ip)
containedLink = modeling.createLinkOSH('contained', cosOsh, ipOsh)
runLink = modeling.createLinkOSH('run', self.osh, cosOsh)
vector.add(cosOsh)
vector.add(ipOsh)
vector.add(containedLink)
vector.add(runLink)
if self.licensesDiscoverer is not None:
self.licensesDiscoverer.addResultsToVector(vector)
class VirtualMachineDiscoverer(ManagedEntityDiscoverer):
"""
Class represents a discoverer for Virtual Machine
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.VirtualMachine.html]
"""
PROP_CPU_ALLOCATION = 'config.cpuAllocation'
PROP_MEMORY_ALLOCATION = 'config.memoryAllocation'
PROP_GUEST = 'guest'
PROP_IS_TEMPLATE = 'config.template'
PROP_UUID = 'config.uuid'
PROP_MEMORY_SIZE = 'config.hardware.memoryMB'
PROP_NUM_CPUS = 'config.hardware.numCPU'
PROP_BOOT_TIME = 'runtime.bootTime'
PROP_POWER_STATE = 'runtime.powerState'
MESSAGE_PARAM_VM_NAME = 'vmName'
MESSAGE_PARAM_TOOLS_STATUS = 'toolsStatus'
MESSAGE_PARAM_GUEST_STATE = 'guestState'
supportedProperties = ManagedEntityDiscoverer.supportedProperties + [
PROP_CPU_ALLOCATION,
PROP_MEMORY_ALLOCATION,
PROP_GUEST,
PROP_IS_TEMPLATE,
PROP_UUID,
PROP_MEMORY_SIZE,
PROP_NUM_CPUS,
PROP_BOOT_TIME,
PROP_POWER_STATE
]
def __init__(self, client, framework, discoveryConfig):
ManagedEntityDiscoverer.__init__(self, client, framework, discoveryConfig)
self.handlers[VirtualMachineDiscoverer.PROP_CPU_ALLOCATION] = self.handleCpuAllocation
self.handlers[VirtualMachineDiscoverer.PROP_MEMORY_ALLOCATION] = self.handleMemoryAllocation
self.handlers[VirtualMachineDiscoverer.PROP_IS_TEMPLATE] = self.handleTemplate
self.handlers[VirtualMachineDiscoverer.PROP_UUID] = self.handleUuid
self.handlers[VirtualMachineDiscoverer.PROP_MEMORY_SIZE] = self.handleMemorySize
self.handlers[VirtualMachineDiscoverer.PROP_NUM_CPUS] = self.handleNumberOfCpus
self.handlers[VirtualMachineDiscoverer.PROP_BOOT_TIME] = self.handleBootTime
self.handlers[VirtualMachineDiscoverer.PROP_POWER_STATE] = self.handlePowerState
if self.discoveryConfig.getUcmdbVersion() >= 9:
self.handlers[VirtualMachineDiscoverer.PROP_GUEST] = self.handleGuest90
else:
self.handlers[VirtualMachineDiscoverer.PROP_GUEST] = self.handleGuest80
self.createVirtualHostResourceOSH()
self.bootTime = None
self.hostOSH = None
self.vmName = None
self.guestState = None
self.toolsStatus = None
self.powerState = None
self.hostKey = None
self.hostIsComplete = 0
self.vmIsPowered = 0
self.ipAddress = None
self.uuid = None
self.hostName = None
self.fullName = None
self._hostClass = 'host'
self._lowestMac = None
def handleName(self, name):
self.hostResourceOSH.setAttribute('data_name', name)
self.vmName = name
def handleStatus(self, status):
self.hostResourceOSH.setStringAttribute('vm_status', status.getValue())
def handleCpuAllocation(self, cpuAllocation):
self.hostResourceOSH.setLongAttribute('vm_cpu_reservation', cpuAllocation.getReservation())
self.hostResourceOSH.setLongAttribute('vm_cpu_limit', cpuAllocation.getLimit())
sharesInfo = cpuAllocation.getShares()
self.hostResourceOSH.setIntegerAttribute('vm_cpu_shares', sharesInfo.getShares())
self.hostResourceOSH.setStringAttribute('vm_cpu_shares_level', sharesInfo.getLevel().getValue())
def handleMemoryAllocation(self, memoryAllocation):
self.hostResourceOSH.setLongAttribute('vm_memory_reservation', memoryAllocation.getReservation())
self.hostResourceOSH.setLongAttribute('vm_memory_limit', memoryAllocation.getLimit())
sharesInfo = memoryAllocation.getShares()
self.hostResourceOSH.setIntegerAttribute('vm_memory_shares', sharesInfo.getShares())
self.hostResourceOSH.setStringAttribute('vm_memory_shares_level', sharesInfo.getLevel().getValue())
def handleGuest80(self, guestInfo):
self._getHostAttributes(guestInfo)
toolsStatusObject = guestInfo.getToolsStatus()
if toolsStatusObject is not None:
self.toolsStatus = toolsStatusObject.getValue()
if (self.toolsStatus == 'toolsOk' or self.toolsStatus == 'toolsOld'):
self.hostResourceOSH.setStringAttribute('vm_tools_status', self.toolsStatus)
self._lowestMac = self._getLowestMac(guestInfo)
if not self._lowestMac and not self.ipAddress:
msg = "Cannot determine the IP or MAC address of virtual machine '%(" + VirtualMachineDiscoverer.MESSAGE_PARAM_VM_NAME + ")s', CI will not be reported"
self.warnings.append(msg)
else:
msg = "Virtual machine '%(" + VirtualMachineDiscoverer.MESSAGE_PARAM_VM_NAME + ")s' does not have a VMware Tools running (status is '%(" + VirtualMachineDiscoverer.MESSAGE_PARAM_TOOLS_STATUS + ")s'), CI will not be reported"
self.warnings.append(msg)
else:
msg = "Virtual machine '%(" + VirtualMachineDiscoverer.MESSAGE_PARAM_VM_NAME + ")s' does not have a VMware Tools running (status is unknown), CI will not be reported"
self.warnings.append(msg)
def handleGuest90(self, guestInfo):
self._getHostAttributes(guestInfo)
toolsStatusObject = guestInfo.getToolsStatus()
if toolsStatusObject is not None:
self.toolsStatus = toolsStatusObject.getValue()
if self.toolsStatus == 'toolsOk' or self.toolsStatus == 'toolsOld':
self.hostResourceOSH.setStringAttribute('vm_tools_status', self.toolsStatus)
self._lowestMac = self._getLowestMac(guestInfo)
def handleTemplate(self, template):
self.hostResourceOSH.setBoolAttribute('vm_is_template', template)
def handleUuid(self, uuid):
self.uuid = uuid
self.hostResourceOSH.setStringAttribute('vm_uuid', uuid)
def handleMemorySize(self, memorySize):
self.hostResourceOSH.setIntegerAttribute('vm_memory_size', memorySize)
def handleNumberOfCpus(self, numOfCpus):
self.hostResourceOSH.setIntegerAttribute('vm_num_cpus', numOfCpus)
def handleBootTime(self, bootTime):
self.bootTime = bootTime.getTime()
def handlePowerState(self, powerState):
self.powerState = powerState.getValue()
def _getLowestMac(self, guestInfo):
mac = None
nics = guestInfo.getNet()
if nics:
# try to find a lowest MAC
for nic in nics:
mac = nic.getMacAddress()
parsedMac = None
try:
parsedMac = netutils.parseMac(mac)
except:
pass
if parsedMac:
if (mac is None) or (parsedMac < mac):
mac = parsedMac
return mac
def _getHostAttributes(self, guestInfo):
self.ipAddress = guestInfo.getIpAddress()
self.guestState = guestInfo.getGuestState()
self.hostName = guestInfo.getHostName()
self.fullName = guestInfo.getGuestFullName()
family = guestInfo.getGuestFamily()
if family == 'windowsGuest':
self._hostClass = 'nt'
elif family == 'linuxGuest':
self._hostClass = 'unix'
def _afterHandle(self):
ManagedEntityDiscoverer._afterHandle(self)
self._findHostKey()
self._findIfVmIsPowered()
if self.hostKey:
self._createHostOsh()
def _findHostKey(self):
if self._lowestMac:
self.hostKey = self._lowestMac
self.hostIsComplete = 1
return
if self.ipAddress:
# try to use the IP for weak key
probeDomain = DomainScopeManager.getDomainByIp(self.ipAddress)
self.hostKey = "%s %s" % (self.ipAddress, probeDomain)
return
if self.uuid and self.discoveryConfig.getUcmdbVersion() >= 9:
self.hostKey = self.uuid
self.hostIsComplete = 1
def _findIfVmIsPowered(self):
if self.powerState:
if self.powerState == 'poweredOn':
self.vmIsPowered = 1
else:
if self.guestState and self.guestState == 'running':
self.vmIsPowered = 1
def _createHostOsh(self):
self.hostOSH = HostBuilder.fromClassName(self._hostClass)
self.hostOSH.setAsVirtual(1)
self.hostOSH.setStringAttribute('host_key', self.hostKey)
if self.hostIsComplete:
self.hostOSH.setBoolAttribute('host_iscomplete', 1)
self._setHostName(self.hostOSH, self.hostName)
if self.fullName:
self.hostOSH.setStringAttribute('data_description', self.fullName)
if self.bootTime:
self.hostOSH.setDateAttribute('host_last_boot_time', self.bootTime)
if self.uuid:
self.hostOSH.setStringAttribute('host_biosuuid', self.uuid.upper())
def _setHostName(self, hostOsh, hostNameStr):
hostname = hostNameStr and hostNameStr.strip().lower() or None
if not hostname: return
domain = None
tokens = re.split(r"\.", hostname)
if len(tokens) > 1:
hostname = tokens[0]
domain = ".".join(tokens[1:])
if hostname:
hostOsh.setStringAttribute('host_hostname', hostname)
if domain:
hostOsh.setStringAttribute('host_osdomain', domain)
def createVirtualHostResourceOSH(self):
self.hostResourceOSH = ObjectStateHolder('vmware_host_resource')
def handleMessage(self, message):
ManagedEntityDiscoverer.handleMessage(self, message)
namedParams = {}
namedParams[VirtualMachineDiscoverer.MESSAGE_PARAM_VM_NAME] = self.vmName
namedParams[VirtualMachineDiscoverer.MESSAGE_PARAM_GUEST_STATE] = self.guestState
namedParams[VirtualMachineDiscoverer.MESSAGE_PARAM_TOOLS_STATUS] = self.toolsStatus
return message % namedParams
def addResultsToVector(self, vector):
if not self.hostKey or not self.hostOSH: return
builtHostOsh = self.hostOSH.build()
self.hostResourceOSH.setContainer(builtHostOsh)
vector.add(builtHostOsh)
vector.add(self.hostResourceOSH)
if self.ipAddress is not None:
ipOsh = modeling.createIpOSH(self.ipAddress)
containedLink = modeling.createLinkOSH('contained', builtHostOsh, ipOsh)
vector.add(containedLink)
class LicensesDiscoverer(BaseDiscoverer):
"""
Class represents a discoverer for licensing information for either VirtualCenter or for ESX server.
We use LicenseManager to get all information:
[http://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.LicenseManager.html]
Currently we build a new hierarchy (server - feature - license source) each time for each server.
We do not perform any activities to save the created OSH for license server or feature etc,
we rely on probe to merge the trees.
"""
def __init__(self, client, framework, discoveryConfig, parentRef, parentOsh):
BaseDiscoverer.__init__(self, client, framework, discoveryConfig)
self.parentRef = parentRef
self.parentOsh = parentOsh
self.keyToFeatureOsh = {}
self.sourceOsh = None
self.additionalOshs = []
# some features that are part of edition has different values reported depending on version of server
# for example ESX server 3.0 can report available for 'nas' as 997 (same number as for 'esx' edition license
# which includes 'nas') and ESX 3.5 can report available as 0 (of 1)
# because of these differences we skip total/available for these features
self.ignoreAvailabilityForFeaturesSet = HashSet()
self.ignoreAvailabilityForFeaturesSet.add('nas')
self.ignoreAvailabilityForFeaturesSet.add('san')
self.ignoreAvailabilityForFeaturesSet.add('iscsi')
self.ignoreAvailabilityForFeaturesSet.add('vsmp')
def discover(self):
try:
self.discoverAvailability()
self.discoverUsageAndSource()
except NotSupportedException, ex:
msg = "Licensing information discovery is not supported by server with current protocol"
self.framework.reportWarning(msg)
if self.parentRef is not None:
name = self.parentRef.get_value()
if name:
msg = "%s for '%s'" % (msg, name)
logger.warn(msg)
except:
logger.warnException('Failed to discover licensing information')
def discoverAvailability(self):
try:
licenseAvailabilityInfoArray = self.client.queryLicenseAvailability(self.parentRef)
if licenseAvailabilityInfoArray:
for lai in licenseAvailabilityInfoArray:
total = lai.getTotal()
available = lai.getAvailable()
featureInfo = lai.getFeature()
key = featureInfo.getKey()
featureOsh = self.__makeFeatureOsh(featureInfo)
if not self.ignoreAvailabilityForFeaturesSet.contains(key):
featureOsh.setIntegerAttribute('licenses_total', total)
featureOsh.setIntegerAttribute('licenses_available', available)
self.keyToFeatureOsh[key] = featureOsh
except NoPermissionException, ex:
priviledgeId = ex.getMessage()
msg = "User does not have required '%s' permission, features availability information won't be reported" % priviledgeId
self.framework.reportWarning(msg)
def __makeFeatureOsh(self, featureInfo):
featureOsh = ObjectStateHolder('license_feature')
key = featureInfo.getKey()
featureOsh.setStringAttribute('data_name', key)
costUnit = featureInfo.getCostUnit()
featureOsh.setStringAttribute('license_cost_unit', costUnit)
featureName = featureInfo.getFeatureName()
featureOsh.setStringAttribute('feature_name', featureName)
# isEdition and description are available only in 2.5
if self.client.getVersionString() == VMWARE_PROTOCOL_VERSION_25:
description = featureInfo.getFeatureDescription()
featureOsh.setStringAttribute('data_description', description)
isEdition = featureInfo.getEdition()
if key != 'esxFull':
featureOsh.setBoolAttribute('feature_is_edition', isEdition)
return featureOsh
def discoverUsageAndSource(self):
try:
licenseUsageInfo = self.client.queryLicenseUsage(self.parentRef)
if licenseUsageInfo:
features = licenseUsageInfo.getFeatureInfo()
if features:
for feature in features:
key = feature.getKey()
if not self.keyToFeatureOsh.has_key(key):
featureOsh = self.__makeFeatureOsh(feature)
self.keyToFeatureOsh[key] = featureOsh
source = licenseUsageInfo.getSource()
self.__makeSourceOsh(source)
reservations = licenseUsageInfo.getReservationInfo()
if reservations:
for reservation in reservations:
key = reservation.getKey()
reservationLink = self.__makeReservationLink(reservation)
if reservationLink is not None:
self.additionalOshs.append(reservationLink)
except NoPermissionException, ex:
priviledgeId = ex.getMessage()
msg = "User does not have required '%s' permission, features usage information won't be reported" % priviledgeId
self.framework.reportWarning(msg)
def __makeSourceOsh(self, licenseSource):
sourceType = licenseSource.getTypeDesc().getXmlType().getLocalPart()
if sourceType == 'LicenseServerSource':
return self.__makeLicenseServerOsh(licenseSource)
elif sourceType == 'LocalLicenseSource':
return self.__makeLocalLicenseOsh(licenseSource)
elif sourceType == 'EvaluationLicenseSource':
return self.__makeEvaluationLicenseOsh(licenseSource)
else:
raise ValueError, "Unsupported license source type '%s'" % sourceType
def __makeLicenseServerOsh(self, licenseSource):
server = licenseSource.getLicenseServer()
matcher = re.match('(\d+)@(\S+)$', server)
if matcher:
port = matcher.group(1)
host = matcher.group(2)
ip = resolveHostIp(host)
if ip is not None:
hostOsh = modeling.createHostOSH(ip)
licenseServerOsh = modeling.createApplicationOSH('license_server', server, hostOsh)
licenseServerOsh.setIntegerAttribute('application_port', port)
self.sourceOsh = licenseServerOsh
self.additionalOshs.append(hostOsh)
return licenseServerOsh
def __makeLocalLicenseOsh(self, licenseSource):
logger.debug("Local license was ignored")
def __makeEvaluationLicenseOsh(self, licenseSource):
logger.debug("Evaluation license source was ignored")
def __makeReservationLink(self, reservation):
key = reservation.getKey()
reserve = reservation.getRequired()
state = reservation.getState().getValue()
if self.keyToFeatureOsh.has_key(key):
featureOsh = self.keyToFeatureOsh[key]
reservationLink = modeling.createLinkOSH('license_reservation', self.parentOsh, featureOsh)
reservationLink.setIntegerAttribute('reserved', reserve)
reservationLink.setStringAttribute('state', state)
return reservationLink
else:
logger.debug("Warn: there is no feature for reservation with key '%s'" % key)
def addResultsToVector(self, vector):
if self.sourceOsh is not None:
vector.add(self.sourceOsh)
for featureOsh in self.keyToFeatureOsh.values():
featureOsh.setContainer(self.sourceOsh)
vector.add(featureOsh)
for osh in self.additionalOshs:
vector.add(osh)
class VirtualCenterLicensesDiscoverer(LicensesDiscoverer):
"""
Class represents a discoverer for licensing information specific to VirtualCenter where we have
an additional use link between license server and VirtualCenter server
"""
def __init__(self, client, framework, discoveryConfig, parentRef, parentOsh):
LicensesDiscoverer.__init__(self, client, framework, discoveryConfig, parentRef, parentOsh)
def addResultsToVector(self, vector):
LicensesDiscoverer.addResultsToVector(self, vector)
if self.sourceOsh is not None:
useLink = modeling.createLinkOSH('use', self.parentOsh, self.sourceOsh)
vector.add(useLink)
class StandaloneEsxDiscoverer(BaseDiscoverer):
"""
Class represents a discoverer for ESX server, when you connect to it directly and not
discover it via VirtualCenter.
On ESX server's side the connection is handled by Host Agent,
which has almost the same API as VC. So, in order to get to ComputeResource (under which we
have resource pools, VMs and HostSystem object itself) we need to traverse the hierachy of
folders/datacenters.
"""
def __init__(self, client, framework, discoveryConfig):
BaseDiscoverer.__init__(self, client, framework, discoveryConfig)
self.standaloneEsxComputeResourceDiscoverer = None
def discover(self):
self.discoverStandaloneEsxComputeResource()
self.discoverLicenses()
def discoverStandaloneEsxComputeResource(self):
contents = self.__retrieveStandaloneEsxComputeResource()
if contents is not None:
computeResourcesCount = len(contents)
if computeResourcesCount == 1:
objectContent = contents[0]
ref = objectContent.getObj()
props = objectContent.getPropSet()
esxComputeResourceHandler = StandaloneEsxComputeResourceDiscoverer(self.client, self.framework, self.discoveryConfig)
esxComputeResourceHandler.handle(ref, props)
esxComputeResourceHandler.discover()
esxComputeResourceHandler.processMessages()
self.standaloneEsxComputeResourceDiscoverer = esxComputeResourceHandler
else:
logger.debug('ERROR: standalone ESX has %d ComputeResources (expected 1)' % computeResourcesCount)
else:
logger.debug('No ComputeResources found')
def __retrieveStandaloneEsxComputeResource(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('ComputeResource')
propertySpec.setPathSet(BaseComputeResourceDiscoverer.supportedProperties)
recurseFoldersSelectionSpec = self.client.createSelectionSpec()
recurseFoldersSelectionSpec.setName('folder2childEntity')
datacenterToHostFolderSelectionSpec = self.client.createSelectionSpec()
datacenterToHostFolderSelectionSpec.setName('datacenter2hostFolder')
folderTraversalSpec = self.client.createTraversalSpec()
folderTraversalSpec.setType('Folder')
folderTraversalSpec.setPath('childEntity')
folderTraversalSpec.setName(recurseFoldersSelectionSpec.getName())
folderTraversalSpec.setSelectSet([datacenterToHostFolderSelectionSpec, recurseFoldersSelectionSpec])
datacenterTraversalSpec = self.client.createTraversalSpec()
datacenterTraversalSpec.setType('Datacenter')
datacenterTraversalSpec.setPath('hostFolder')
datacenterTraversalSpec.setName(datacenterToHostFolderSelectionSpec.getName())
datacenterTraversalSpec.setSelectSet([recurseFoldersSelectionSpec])
objectSpec = self.client.createObjectSpec()
rootFolderRef = self.client.getRootFolder()
objectSpec.setObj(rootFolderRef)
objectSpec.setSkip(1)
objectSpec.setSelectSet([folderTraversalSpec, datacenterTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
return self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
def extractEsxOsh(self):
esxesCount = len(self.standaloneEsxComputeResourceDiscoverer.esxRefToEsxDiscoverer)
if esxesCount == 1:
esxDiscoverer = self.standaloneEsxComputeResourceDiscoverer.esxRefToEsxDiscoverer.values()[0]
return esxDiscoverer.osh
def discoverLicenses(self):
esxOsh = self.extractEsxOsh()
if esxOsh is not None:
# setting parentref to None makes all licensing queries relative to current host
self.licensesDiscoverer = LicensesDiscoverer(self.client, self.framework, self.discoveryConfig, None, esxOsh)
self.licensesDiscoverer.discover()
def addResultsToVector(self, vector):
self.standaloneEsxComputeResourceDiscoverer.addResultsToVector(vector)
if self.licensesDiscoverer is not None:
self.licensesDiscoverer.addResultsToVector(vector)
class StandaloneEsxComputeResourceDiscoverer(BaseComputeResourceDiscoverer):
"""
Class represents a discoverer for ComputeResource in standalone ESX server
Here we do not have a parent Cluster or Datacenter to link to.
"""
def __init__(self, client, framework, discoveryConfig):
BaseComputeResourceDiscoverer.__init__(self, client, framework, discoveryConfig, None)
def addResultsToVector(self, vector):
esxesCount = len(self.esxRefToEsxDiscoverer)
if esxesCount == 1:
esxDiscoverer = self.esxRefToEsxDiscoverer.values()[0]
esxDiscoverer.addResultsToVector(vector)
#add VMs associated with root pool to vector, setting parent to None will not create 'contains' link
#all other VMs linked to pools will be added by 'addDescendantsOfPoolToVector'
self.addVmsToVector(vector, None, self.vms)
self.addDescendantsOfPoolToVector(vector, esxDiscoverer.osh, self.rootResourcePoolRef)
self.addLinksForServersWithRunningVmsToVector(vector)
else:
if esxesCount == 0:
logger.debug('No ESX Server was found in ComputeResource')
else:
logger.debug('ERROR: ComputeResource contains more than one ESX Server (expected 1)')
class VmwareServerConnectionDiscoverer(BaseDiscoverer):
"""
Class represents a discoverer for server connections. If connection is successful,
we determine the type of server we connected to and return appropriate server CI:
VirtualCenter with Host or ESX with Virtualization Layer.
Connection URL and credentialsId are saved to attributes.
"""
VIM_API_VC = 'VirtualCenter'
VIM_API_ESX = 'HostAgent'
def __init__(self, client, framework, urlString, credentialsId, serverIp):
BaseDiscoverer.__init__(self, client, framework, None)
self.urlString = urlString
self.credentialsId = credentialsId
self.apiType = None
self.serverIp = serverIp
def discover(self):
about = self.client.getServiceContent().getAbout()
self.apiType = about.getApiType()
if self.apiType == VmwareServerConnectionDiscoverer.VIM_API_VC:
self.hostOsh = modeling.createHostOSH(self.serverIp)
self.osh = modeling.createApplicationOSH('vmware_virtual_center', 'VMware VirtualCenter', self.hostOsh)
elif self.apiType == VmwareServerConnectionDiscoverer.VIM_API_ESX:
self.retrieveEsxRequiredAttributes()
self.hostOsh = modeling.createCompleteHostOSH('vmware_esx_server', self.esxUuid)
modeling.setHostBiosUuid(self.hostOsh, self.esxUuid)
self.osh = modeling.createApplicationOSH('virtualization_layer', 'Virtualization Layer Software', self.hostOsh)
else:
raise ValueError, "Failed to retrieve VMware Server details, unknown API type %s" % self.apiType
version = about.getVersion()
fullName = about.getFullName()
buildNumber = about.getBuild()
fullVersion = "%s.%s" % (version, buildNumber)
self.osh.setAttribute('data_description', fullName)
self.osh.setAttribute('application_version', fullVersion)
self.osh.setAttribute('application_ip', self.serverIp)
self.osh.setAttribute('credentials_id', self.credentialsId)
self.osh.setAttribute('connection_url', self.urlString)
def retrieveEsxRequiredAttributes(self):
self.retrieveEsxUuid()
if self.esxUuid is None:
raise ValueError, "Failed to get ESX UUID"
def retrieveEsxUuid(self):
propertySpec = self.client.createPropertySpec()
propertySpec.setType('HostSystem')
propertySpec.setPathSet(['summary.hardware.uuid'])
recurseFoldersSelectionSpec = self.client.createSelectionSpec()
recurseFoldersSelectionSpec.setName('folder2childEntity')
datacenterToHostFolderSelectionSpec = self.client.createSelectionSpec()
datacenterToHostFolderSelectionSpec.setName('datacenter2hostFolder')
computeResourceSelectionSpec = self.client.createSelectionSpec()
computeResourceSelectionSpec.setName('computeResource2hosts')
folderTraversalSpec = self.client.createTraversalSpec()
folderTraversalSpec.setType('Folder')
folderTraversalSpec.setPath('childEntity')
folderTraversalSpec.setName(recurseFoldersSelectionSpec.getName())
folderTraversalSpec.setSelectSet([datacenterToHostFolderSelectionSpec, recurseFoldersSelectionSpec, computeResourceSelectionSpec])
datacenterTraversalSpec = self.client.createTraversalSpec()
datacenterTraversalSpec.setType('Datacenter')
datacenterTraversalSpec.setPath('hostFolder')
datacenterTraversalSpec.setName(datacenterToHostFolderSelectionSpec.getName())
datacenterTraversalSpec.setSelectSet([recurseFoldersSelectionSpec, computeResourceSelectionSpec])
computeResourceTraversalSpec = self.client.createTraversalSpec()
computeResourceTraversalSpec.setType('ComputeResource')
computeResourceTraversalSpec.setPath('host')
computeResourceTraversalSpec.setName(computeResourceSelectionSpec.getName())
computeResourceTraversalSpec.setSelectSet([])
objectSpec = self.client.createObjectSpec()
rootFolderRef = self.client.getRootFolder()
objectSpec.setObj(rootFolderRef)
objectSpec.setSkip(1)
objectSpec.setSelectSet([folderTraversalSpec, datacenterTraversalSpec, computeResourceTraversalSpec])
propertyFilterSpec = self.client.createPropertyFilterSpec()
propertyFilterSpec.setPropSet([propertySpec])
propertyFilterSpec.setObjectSet([objectSpec])
contents = self.client.getService().retrieveProperties(self.client.getPropertyCollector(), [propertyFilterSpec])
if contents is not None:
hostsCount = len(contents)
if hostsCount == 1:
objectContent = contents[0]
props = objectContent.getPropSet()
if props:
self.esxUuid = props[0].getVal()
else:
raise ValueError, 'ERROR: standalone ESX has %d HostSystem (expected 1)' % hostsCount
else:
raise ValueError, 'Failed to retrieve ESX details'
def addResultsToVector(self, vector):
vector.add(self.osh)
vector.add(self.hostOsh)
if self.apiType != VmwareServerConnectionDiscoverer.VIM_API_ESX and self.serverIp is not None:
ipOsh = modeling.createIpOSH(self.serverIp)
containedLink = modeling.createLinkOSH('contained', self.hostOsh, ipOsh)
vector.add(ipOsh)
vector.add(containedLink)
def unescapeString(str):
"""
Convert any occurrence of %<hexnumber> in string to its ASCII symbol
Almost as URL decode but we do not convert '+' to space
"""
if str is not None:
words = str.split('%')
resultList = []
resultList.append(words[0])
for word in words[1:]:
if word:
hex = word[:2]
code = 0
try:
code = int(hex, 16)
except ValueError:
resultList.append('%')
resultList.append(word)
else:
converted = chr(code)
remaining = word[2:]
resultList.append(converted)
resultList.append(remaining)
return ''.join(resultList)
def restoreVirtualCenterOSH(vcIdString):
virtualCenterOSH = modeling.createOshByCmdbIdString('vmware_virtual_center', vcIdString)
return virtualCenterOSH
def getFaultType(axisFault):
faultType = None
if hasattr(axisFault, 'getTypeDesc'):
typeDesc = axisFault.getTypeDesc()
if typeDesc is not None:
xmlType = typeDesc.getXmlType()
if xmlType is not None:
faultType = xmlType.getLocalPart()
return faultType
def getIpFromUrlObject(urlObject):
portResolveMap = {'http':80, 'https':443 }
hostname = urlObject.getHost()
if netutils.isValidIp(hostname):
return hostname
else:
port = urlObject.getPort()
if (port <= 0):
proto = urlObject.getProtocol()
if portResolveMap.has_key(proto):
port = portResolveMap[proto]
inetAddress = InetSocketAddress(hostname, port).getAddress()
if inetAddress:
return inetAddress.getHostAddress()
def resolveHostIp(hostName):
try:
return InetAddress.getByName(hostName).getHostAddress()
except UnknownHostException:
logger.debug("Failed to resolve IP for host '%s'" % hostName)
class UrlGenerator:
""" Abstract URL Generator - strategy for obtaining the connection URL by protocol"""
def getUrl(self, credentialsId, errorList, warningList):
pass
class ConstantUrlGenerator(UrlGenerator):
def __init__(self, urlConstant):
self.urlConstant = urlConstant
def getUrl(self, credentialsId, errorList, warningList):
return self.urlConstant
class UrlFromProtocolGenerator(UrlGenerator):
PROTOCOL_PARAM_PORT = 'protocol_port'
PROTOCOL_PARAM_USE_SSL = 'vmwareprotocol_use_ssl'
URL_PATTERN = "%s://%s:%d/sdk"
URL_PATTERN_NO_PORT = "%s://%s/sdk"
def __init__(self, ipAddress, framework):
self.ipAddress = ipAddress
self.framework = framework
def getUrl(self, credentialsId, errorList, warningsList):
port = self.framework.getProtocolProperty(credentialsId, UrlFromProtocolGenerator.PROTOCOL_PARAM_PORT, "")
useSsl = self.framework.getProtocolProperty(credentialsId, UrlFromProtocolGenerator.PROTOCOL_PARAM_USE_SSL, "1")
if useSsl:
prefix = 'https'
else:
prefix = 'http'
urlString = None
if port:
urlString = UrlFromProtocolGenerator.URL_PATTERN % (prefix, self.ipAddress, port)
else:
urlString = UrlFromProtocolGenerator.URL_PATTERN_NO_PORT % (prefix, self.ipAddress)
return urlString
def connectByUrlAndCredentialsId(framework, urlString, credentialsId):
clientFactory = VimClientFactory(framework, urlString, credentialsId)
client = clientFactory.createClient()
if client is not None:
return client
else:
raise ValueError, "Failed to create client"
def executeConnectionPattern(ipAddress, urlGenerator, resultsVector, Framework):
"""
Method that performs a general VMware server connection discovery. It goes over all defined
credentials and uses passed urlGenerator object to get an URL for connection. Credentials
are tried one by one while accumulating all errors until successful connection is made.
Once we successfully connected all errors are cleared.
@return: TRUE if one of the protocols succeeded in connecting else FALSE
"""
credentialsIdList = Framework.getAvailableProtocols(ipAddress, VMWARE_PROTOCOL_SHORT)
isConnected = 0
isOneOfProtocolsSucceed = 0
if credentialsIdList:
errorsList = []
warningsList = []
for credentialsId in credentialsIdList:
if not isConnected:
try:
urlString = urlGenerator.getUrl(credentialsId, errorsList, warningsList)
client = connectByUrlAndCredentialsId(Framework, urlString, credentialsId)
# no exception at this point means the connection was successful, we need to clear the
# errors and warnings
logger.debug('Connection is successful')
isConnected = 1
errorsList = []
warningsList = []
try:
serverDiscoverer = VmwareServerConnectionDiscoverer(client.getAgent(), Framework, urlString, credentialsId, ipAddress)
serverDiscoverer.discover()
serverDiscoverer.addResultsToVector(resultsVector)
isOneOfProtocolsSucceed = 1
finally:
if client is not None:
client.close()
except AxisFault, axisFault:
faultType = getFaultType(axisFault)
if faultType == 'InvalidLogin':
msg = errormessages.makeErrorMessage(VMWARE_PROTOCOL_NAME, None, errormessages.ERROR_INVALID_USERNAME_PASSWORD)
logger.debug(msg)
errorsList.append(msg)
elif faultType == 'NoPermission':
priviledgeId = axisFault.getPrivilegeId()
msg = "User does not have required '%s' permission" % priviledgeId
logger.debug(msg)
shouldStop = errormessages.resolveAndAddToCollections(msg, VMWARE_PROTOCOL_NAME, warningsList, errorsList)
if shouldStop:
break
else:
faultString = axisFault.getFaultString()
dump = axisFault.dumpToString()
logger.debug(dump)
shouldStop = errormessages.resolveAndAddToCollections(faultString, VMWARE_PROTOCOL_NAME, warningsList, errorsList)
if shouldStop:
break
except Exception, ex:
msg = ex.getMessage()
logger.debug(msg)
shouldStop = errormessages.resolveAndAddToCollections(msg, VMWARE_PROTOCOL_NAME, warningsList, errorsList)
if shouldStop:
break
except:
msg = logger.prepareJythonStackTrace('')
logger.debug(msg)
shouldStop = errormessages.resolveAndAddToCollections(msg, VMWARE_PROTOCOL_NAME, warningsList, errorsList)
if shouldStop:
break
for errorMsg in errorsList:
Framework.reportError(errorMsg)
for warningMsg in warningsList:
Framework.reportWarning(warningMsg)
else:
msg = errormessages.makeErrorMessage(VMWARE_PROTOCOL_NAME, None, errormessages.ERROR_NO_CREDENTIALS)
Framework.reportWarning(msg)
return isOneOfProtocolsSucceed
| [
"[email protected]"
] | |
a5c147f2b13a9262b93ad5d48796d82c3012edfb | 04e5b6df2ee3bcfb7005d8ec91aab8e380333ac4 | /Lib/objc/StoreKit.py | 4b919a9b01a65ef35f5ae3e2e78d36703567db64 | [
"MIT"
] | permissive | ColdGrub1384/Pyto | 64e2a593957fd640907f0e4698d430ea7754a73e | 7557485a733dd7e17ba0366b92794931bdb39975 | refs/heads/main | 2023-08-01T03:48:35.694832 | 2022-07-20T14:38:45 | 2022-07-20T14:38:45 | 148,944,721 | 884 | 157 | MIT | 2023-02-26T21:34:04 | 2018-09-15T22:29:07 | C | UTF-8 | Python | false | false | 5,635 | py | """
Classes from the 'StoreKit' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
SKCloudServiceController = _Class("SKCloudServiceController")
SKOverlay = _Class("SKOverlay")
_SKStoreProductActivityAnimationController = _Class(
"_SKStoreProductActivityAnimationController"
)
SKAccountPageSpecifierProvider = _Class("SKAccountPageSpecifierProvider")
SKPurchaseIntent = _Class("SKPurchaseIntent")
SKPaymentQueueClient = _Class("SKPaymentQueueClient")
SKPaymentDiscount = _Class("SKPaymentDiscount")
SKPaymentDiscountInternal = _Class("SKPaymentDiscountInternal")
SKStorePageRequest = _Class("SKStorePageRequest")
SKStoreReviewController = _Class("SKStoreReviewController")
SKServiceProxy = _Class("SKServiceProxy")
SKInvocationQueueProxy = _Class("SKInvocationQueueProxy")
SKInternalProductStorePromotionController = _Class(
"SKInternalProductStorePromotionController"
)
SKStorefront = _Class("SKStorefront")
SKCloudServiceSetupExtension = _Class("SKCloudServiceSetupExtension")
SKXPCConnection = _Class("SKXPCConnection")
SKCloudServiceSetupConfiguration = _Class("SKCloudServiceSetupConfiguration")
SKScrollDetector = _Class("SKScrollDetector")
SKWeakContainer = _Class("SKWeakContainer")
SKEntitlementChecker = _Class("SKEntitlementChecker")
SKAdNetwork = _Class("SKAdNetwork")
SKArcadeService = _Class("SKArcadeService")
SKProductStorePromotionController = _Class("SKProductStorePromotionController")
SKPrivacyController = _Class("SKPrivacyController")
SKOverlayConfiguration = _Class("SKOverlayConfiguration")
SKOverlayAppClipConfiguration = _Class("SKOverlayAppClipConfiguration")
SKOverlayAppConfiguration = _Class("SKOverlayAppConfiguration")
SKURLParserBagContract = _Class("SKURLParserBagContract")
SKProductSubscriptionPeriod = _Class("SKProductSubscriptionPeriod")
SKProductSubscriptionPeriodInternal = _Class("SKProductSubscriptionPeriodInternal")
SKPaymentTransactionInternal = _Class("SKPaymentTransactionInternal")
SKPaymentTransaction = _Class("SKPaymentTransaction")
SKRemoteDismissingTransition = _Class("SKRemoteDismissingTransition")
SKInGameAnalytics = _Class("SKInGameAnalytics")
SKOverlayTransitionContext = _Class("SKOverlayTransitionContext")
SKPaymentInternal = _Class("SKPaymentInternal")
SKPayment = _Class("SKPayment")
SKMutablePayment = _Class("SKMutablePayment")
SKCloudServiceSetupReloadContext = _Class("SKCloudServiceSetupReloadContext")
SKDownloadChangeset = _Class("SKDownloadChangeset")
SKDownload = _Class("SKDownload")
SKDownloadInternal = _Class("SKDownloadInternal")
SKProductDiscount = _Class("SKProductDiscount")
SKProductDiscountInternal = _Class("SKProductDiscountInternal")
SKProductInternal = _Class("SKProductInternal")
SKProduct = _Class("SKProduct")
SKProductsResponseInternal = _Class("SKProductsResponseInternal")
SKProductsResponse = _Class("SKProductsResponse")
SKProductsRequestInternal = _Class("SKProductsRequestInternal")
SKRequestInternal = _Class("SKRequestInternal")
SKRequest = _Class("SKRequest")
SKInstallSheetStatusUpdateRequest = _Class("SKInstallSheetStatusUpdateRequest")
SKPromotedIAPGetInfoInternalRequest = _Class("SKPromotedIAPGetInfoInternalRequest")
SKHandleInvalidReceiptRequest = _Class("SKHandleInvalidReceiptRequest")
SKReceiptRefreshRequest = _Class("SKReceiptRefreshRequest")
SKPromotedIAPSetOrderRequest = _Class("SKPromotedIAPSetOrderRequest")
SKPromotedIAPGetVisibilityRequest = _Class("SKPromotedIAPGetVisibilityRequest")
SKPromotedIAPSetVisibilityRequest = _Class("SKPromotedIAPSetVisibilityRequest")
SKPromotedIAPGetOrderRequest = _Class("SKPromotedIAPGetOrderRequest")
SKProductsRequest = _Class("SKProductsRequest")
SKWeakReference = _Class("SKWeakReference")
SKDefaultsManager = _Class("SKDefaultsManager")
SKPaymentQueueInternal = _Class("SKPaymentQueueInternal")
SKPaymentQueue = _Class("SKPaymentQueue")
SKSpecifierWithSubtitleCell = _Class("SKSpecifierWithSubtitleCell")
SKStoreReviewPresentationWindow = _Class("SKStoreReviewPresentationWindow")
SKStarRatingControl = _Class("SKStarRatingControl")
SKStorePageViewController = _Class("SKStorePageViewController")
SKStoreProductActivityViewController = _Class("SKStoreProductActivityViewController")
SKComposeReviewViewController = _Class("SKComposeReviewViewController")
SKCloudServiceSetupViewController = _Class("SKCloudServiceSetupViewController")
SKProductPageExtension = _Class("SKProductPageExtension")
SKStoreProductViewController = _Class("SKStoreProductViewController")
SKTermsPageViewController = _Class("SKTermsPageViewController")
SKAccountPageViewController = _Class("SKAccountPageViewController")
SKStoreReviewViewController = _Class("SKStoreReviewViewController")
SKArcadeSubscribeViewController = _Class("SKArcadeSubscribeViewController")
SKStoreExtension = _Class("SKStoreExtension")
SKRemoteProductActivityViewController = _Class("SKRemoteProductActivityViewController")
SKRemoteStorePageViewController = _Class("SKRemoteStorePageViewController")
SKRemoteComposeReviewViewController = _Class("SKRemoteComposeReviewViewController")
SKRemoteReviewViewController = _Class("SKRemoteReviewViewController")
SKRemoteProductViewController = _Class("SKRemoteProductViewController")
SKStoreRemoteViewController = _Class("SKStoreRemoteViewController")
SKCloudServiceSetupRemoteViewController = _Class(
"SKCloudServiceSetupRemoteViewController"
)
SKRemoteAccountPageViewController = _Class("SKRemoteAccountPageViewController")
SKStarRatingAlertController = _Class("SKStarRatingAlertController")
| [
"[email protected]"
] | |
23a9af8c3a7546c53ad2b85b5f514b566b53d151 | c36679186f669c6e3bd1c106c96d4a17be1f5ab1 | /Practice_Telusko/121.py | ddb01801081aec49788165b80cbbbe017000e1b4 | [] | no_license | touhiduzzaman-tuhin/python-code-university-life | 60a3d671b200a6f5222c6d176c13c5f20f013509 | 6d2e3d90d430faa5c83fe79e7fb1ebe516994762 | refs/heads/master | 2023-03-22T15:18:10.636203 | 2021-03-06T18:52:04 | 2021-03-06T18:52:04 | 332,467,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | x = int(input("Enter A Number : "))
if x == 0:
print("Zero Number")
elif x >= 0:
print("Positive Number")
if x % 2 == 0:
print("Even Number")
else:
print("Odd Number")
else:
print("Negative Number") | [
"[email protected]"
] | |
f0457a3c6eba2170502ff89f45eef7cd3dcb14d9 | 36bfa8c212270b3c1eaab77210a525f0bbef6874 | /podoc/ast/__init__.py | 2bb4bd02683ba6fae13666a16f9937b333aa96eb | [
"BSD-3-Clause"
] | permissive | podoc/podoc | ce7b22571251ae90b56d272eff0277ec6090ea75 | 1868e7f82a521b1722dca528802acedf9010b11a | refs/heads/master | 2021-01-17T15:18:12.158127 | 2018-02-12T15:16:10 | 2018-02-12T15:16:10 | 41,724,522 | 54 | 9 | BSD-3-Clause | 2018-02-04T15:08:13 | 2015-09-01T07:44:50 | Python | UTF-8 | Python | false | false | 338 | py | # -*- coding: utf-8 -*-
# flake8: noqa
"""JSON plugin."""
#-------------------------------------------------------------------------------------------------
# Imports
#-------------------------------------------------------------------------------------------------
from ._ast import ASTNode, ASTPlugin, PandocPlugin, ast_from_pandoc
| [
"[email protected]"
] | |
6f337a988d6c798586c741832f986ede08989829 | a964f0f3f93a84d5195042d3c1bb2288e8b62161 | /muddery/server/dao/event_data.py | 3bec35efc460fb681d575e820864148aa197845a | [
"BSD-3-Clause"
] | permissive | nobodxbodon/muddery | 474433791b75d2f2130e6b758fb3126e2d56230b | 4b4c6c0dc5cc237a5df012a05ed260fad1a793a7 | refs/heads/master | 2023-06-19T19:28:39.252340 | 2021-07-14T15:07:47 | 2021-07-14T15:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 436 | py | """
Query and deal common tables.
"""
from muddery.server.dao.base_query import BaseQuery
from muddery.server.dao.worlddata import WorldData
class EventData(BaseQuery):
"""
Object's event data.
"""
table_name = "event_data"
@classmethod
def get_object_event(cls, object_key):
"""
Get object's event.
"""
return WorldData.get_table_data(cls.table_name, trigger_obj=object_key)
| [
"[email protected]"
] | |
bb6a8c6681d3591410ec011af26b33133b0dec73 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_2376.py | d781f9e9cec34f15d0ca267954c8f71c05c73516 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30 | py | # Cookies with urllib
Cookie:
| [
"[email protected]"
] | |
2192bb616634a4295e7ef0cd9808e3bbab101988 | e629795e54c7f0bf79c9128adc8bc9154bebfb19 | /dynamic_programming/leetcode/python/leet_code_1289.py | 4b2f08063b0352438dfb7adf0f552bae3817f7f6 | [] | no_license | sm2774us/leetcode_hackerrank_codesignal_practice | 1aff675b8b3e6b58e3bb2f81c46c8646da50589f | bdc2407f391a8bc08e3a119227c384f3e15bb693 | refs/heads/main | 2023-07-19T21:59:31.638152 | 2021-08-25T12:29:59 | 2021-08-25T12:29:59 | 392,862,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | from typing import List
class Solution:
def minFallingPathSum(A):
rows = len(A)
cols = len(A[0])
prv_row_min1 = prv_row_min2 = 0
prev_pos1 = -1
for i in range(rows):
curr_row_min1 = curr_row_min2 = float('inf')
for j in range(cols):
if prev_pos1 != j:
min_val = prv_row_min1
else:
min_val = prv_row_min2
if min_val + A[i][j] < curr_row_min1:
curr_row_min2 = curr_row_min1
curr_row_min1 = min_val + A[i][j]
curr_pos = j
else:
curr_row_min2 = min(curr_row_min2, min_val+A[i][j])
prv_row_min1, prv_row_min2 = curr_row_min1, curr_row_min2
prev_pos1 = curr_pos
return prv_row_min1 | [
"[email protected]"
] | |
bdb032f9ec951da8035b9e788b89c0624320bc26 | e1243b212be599a801e8d1fb5fd00e0ab0db974d | /models/EDSR_freq.py | fa798b278b49185c4ded544307b278cd3d6168e4 | [] | no_license | hyungminr/PyTorch_SISR | d17358ca94ccf7a82223a3b383b7529f1bfff0cd | 5aa22c3f68d262eb7ff80618cb3a75a7b488dacc | refs/heads/master | 2023-04-17T04:15:58.591604 | 2021-04-27T01:02:20 | 2021-04-27T01:02:20 | 328,836,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | from models.EDSR import EDSR
from models.EDSR import MeanShift
import math
import torch.nn as nn
import torch
class EDSR_freq(nn.Module):
def __init__(self, scale=2, num_feats=64, kernel=3, padding=1, bias=True):
super().__init__()
self.model_image = EDSR(scale=scale, num_feats=32)
self.model_high = EDSR(scale=scale, num_feats=24)
self.model_low = EDSR(scale=scale, num_feats= 8)
layers = []
if (scale & (scale - 1)) == 0: # Is scale = 2^n?
for _ in range(int(math.log(scale, 2))):
layers += [nn.Conv2d(in_channels=num_feats, out_channels=num_feats*4, kernel_size=kernel, padding=padding, bias=bias)]
layers += [nn.PixelShuffle(2)]
layers += [nn.Conv2d(in_channels=num_feats, out_channels=3, kernel_size=kernel, padding=padding, bias=bias)]
self.tail = nn.Sequential(*layers)
self.add_mean = MeanShift(mode='add')
def forward(self, img, high, low):
sr_image, deep_image = self.model_image(img)
sr_high, deep_high = self.model_high(high)
sr_low, deep_low = self.model_low(low)
deep = torch.cat((deep_image[0] + deep_image[-1],
deep_high[0] + deep_high[-1],
deep_low[0] + deep_low[-1]), dim=1)
x_up = self.tail(deep)
out = self.add_mean(x_up)
return out, [sr_image, sr_high, sr_low]
| [
"[email protected]"
] | |
2c7b84585721c3bf19fe1bdbef14fec4e8afedae | 743da4642ac376e5c4e1a3b63c079533a5e56587 | /examples/noisychannel/rerank_generate.py | fd1d26b3ebfdf86311c6764d22205b691d21210b | [
"MIT"
] | permissive | tmtmaj/Exploiting-PrLM-for-NLG-tasks | cdae1b6e451b594b11d8ecef3c1cd4e12fe51c9b | e8752593d3ee881cf9c0fb5ed26d26fcb02e6dd5 | refs/heads/main | 2023-06-16T08:26:32.560746 | 2021-07-14T17:50:19 | 2021-07-14T17:50:19 | 371,899,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,442 | py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generate n-best translations using a trained model.
"""
from contextlib import redirect_stdout
import os
import subprocess
from fairseq import options
from fairseq_cli import generate, preprocess
from . import rerank_options, rerank_utils
def gen_and_reprocess_nbest(args):
if args.score_dict_dir is None:
args.score_dict_dir = args.data
if args.prefix_len is not None:
assert args.right_to_left1 is False, "prefix length not compatible with right to left models"
assert args.right_to_left2 is False, "prefix length not compatible with right to left models"
if args.nbest_list is not None:
assert args.score_model2 is None
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
store_data = os.path.join(os.path.dirname(__file__))+"/rerank_data/"+args.data_dir_name
if not os.path.exists(store_data):
os.makedirs(store_data)
pre_gen, left_to_right_preprocessed_dir, right_to_left_preprocessed_dir, \
backwards_preprocessed_dir, lm_preprocessed_dir = \
rerank_utils.get_directories(args.data_dir_name, args.num_rescore, args.gen_subset,
args.gen_model_name, args.shard_id, args.num_shards,
args.sampling, args.prefix_len, args.target_prefix_frac,
args.source_prefix_frac)
assert not (args.right_to_left1 and args.backwards1), "backwards right to left not supported"
assert not (args.right_to_left2 and args.backwards2), "backwards right to left not supported"
assert not (args.prefix_len is not None and args.target_prefix_frac is not None), \
"target prefix frac and target prefix len incompatible"
# make directory to store generation results
if not os.path.exists(pre_gen):
os.makedirs(pre_gen)
rerank1_is_gen = args.gen_model == args.score_model1 and args.source_prefix_frac is None
rerank2_is_gen = args.gen_model == args.score_model2 and args.source_prefix_frac is None
if args.nbest_list is not None:
rerank2_is_gen = True
# make directories to store preprossed nbest list for reranking
if not os.path.exists(left_to_right_preprocessed_dir):
os.makedirs(left_to_right_preprocessed_dir)
if not os.path.exists(right_to_left_preprocessed_dir):
os.makedirs(right_to_left_preprocessed_dir)
if not os.path.exists(lm_preprocessed_dir):
os.makedirs(lm_preprocessed_dir)
if not os.path.exists(backwards_preprocessed_dir):
os.makedirs(backwards_preprocessed_dir)
score1_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(pre_gen, args.prefix_len, args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2)
predictions_bpe_file = pre_gen+"/generate_output_bpe.txt"
using_nbest = args.nbest_list is not None
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
else:
if not os.path.isfile(predictions_bpe_file):
print("STEP 1: generate predictions using the p(T|S) model with bpe")
print(args.data)
param1 = [args.data,
"--path", args.gen_model,
"--shard-id", str(args.shard_id),
"--num-shards", str(args.num_shards),
"--nbest", str(args.num_rescore),
"--batch-size", str(args.batch_size),
"--beam", str(args.num_rescore),
"--max-sentences", str(args.num_rescore),
"--gen-subset", args.gen_subset,
"--source-lang", args.source_lang,
"--target-lang", args.target_lang]
if args.sampling:
param1 += ["--sampling"]
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, param1)
print(input_args)
with open(predictions_bpe_file, 'w') as f:
with redirect_stdout(f):
generate.main(input_args)
gen_output = rerank_utils.BitextOutputFromGen(predictions_bpe_file, bpe_symbol=args.remove_bpe,
nbest=using_nbest, prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac)
if args.diff_bpe:
rerank_utils.write_reprocessed(gen_output.no_bpe_source, gen_output.no_bpe_hypo,
gen_output.no_bpe_target, pre_gen+"/source_gen_bpe."+args.source_lang,
pre_gen+"/target_gen_bpe."+args.target_lang,
pre_gen+"/reference_gen_bpe."+args.target_lang)
bitext_bpe = args.rescore_bpe_code
bpe_src_param = ["-c", bitext_bpe,
"--input", pre_gen+"/source_gen_bpe."+args.source_lang,
"--output", pre_gen+"/rescore_data."+args.source_lang]
bpe_tgt_param = ["-c", bitext_bpe,
"--input", pre_gen+"/target_gen_bpe."+args.target_lang,
"--output", pre_gen+"/rescore_data."+args.target_lang]
subprocess.call(["python",
os.path.join(os.path.dirname(__file__),
"subword-nmt/subword_nmt/apply_bpe.py")] + bpe_src_param,
shell=False)
subprocess.call(["python",
os.path.join(os.path.dirname(__file__),
"subword-nmt/subword_nmt/apply_bpe.py")] + bpe_tgt_param,
shell=False)
if (not os.path.isfile(score1_file) and not rerank1_is_gen) or \
(args.score_model2 is not None and not os.path.isfile(score2_file) and not rerank2_is_gen):
print("STEP 2: process the output of generate.py so we have clean text files with the translations")
rescore_file = "/rescore_data"
if args.prefix_len is not None:
prefix_len_rescore_file = rescore_file + "prefix"+str(args.prefix_len)
if args.target_prefix_frac is not None:
target_prefix_frac_rescore_file = rescore_file + "target_prefix_frac"+str(args.target_prefix_frac)
if args.source_prefix_frac is not None:
source_prefix_frac_rescore_file = rescore_file + "source_prefix_frac"+str(args.source_prefix_frac)
if not args.right_to_left1 or not args.right_to_left2:
if not args.diff_bpe:
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+rescore_file+"."+args.source_lang,
pre_gen+rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", bpe_symbol=args.remove_bpe)
if args.prefix_len is not None:
bw_rescore_file = prefix_len_rescore_file
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+prefix_len_rescore_file+"."+args.source_lang,
pre_gen+prefix_len_rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", prefix_len=args.prefix_len,
bpe_symbol=args.remove_bpe)
elif args.target_prefix_frac is not None:
bw_rescore_file = target_prefix_frac_rescore_file
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+target_prefix_frac_rescore_file+"."+args.source_lang,
pre_gen+target_prefix_frac_rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", bpe_symbol=args.remove_bpe,
target_prefix_frac=args.target_prefix_frac)
else:
bw_rescore_file = rescore_file
if args.source_prefix_frac is not None:
fw_rescore_file = source_prefix_frac_rescore_file
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+source_prefix_frac_rescore_file+"."+args.source_lang,
pre_gen+source_prefix_frac_rescore_file+"."+args.target_lang,
pre_gen+"/reference_file", bpe_symbol=args.remove_bpe,
source_prefix_frac=args.source_prefix_frac)
else:
fw_rescore_file = rescore_file
if args.right_to_left1 or args.right_to_left2:
rerank_utils.write_reprocessed(gen_output.source, gen_output.hypo, gen_output.target,
pre_gen+"/right_to_left_rescore_data."+args.source_lang,
pre_gen+"/right_to_left_rescore_data."+args.target_lang,
pre_gen+"/right_to_left_reference_file",
right_to_left=True, bpe_symbol=args.remove_bpe)
print("STEP 3: binarize the translations")
if not args.right_to_left1 or args.score_model2 is not None and not args.right_to_left2 or not rerank1_is_gen:
if args.backwards1 or args.backwards2:
if args.backwards_score_dict_dir is not None:
bw_dict = args.backwards_score_dict_dir
else:
bw_dict = args.score_dict_dir
bw_preprocess_param = ["--source-lang", scorer1_src,
"--target-lang", scorer1_tgt,
"--trainpref", pre_gen+bw_rescore_file,
"--srcdict", bw_dict + "/dict." + scorer1_src + ".txt",
"--tgtdict", bw_dict + "/dict." + scorer1_tgt + ".txt",
"--destdir", backwards_preprocessed_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(bw_preprocess_param)
preprocess.main(input_args)
preprocess_param = ["--source-lang", scorer1_src,
"--target-lang", scorer1_tgt,
"--trainpref", pre_gen+fw_rescore_file,
"--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt",
"--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt",
"--destdir", left_to_right_preprocessed_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
if args.right_to_left1 or args.right_to_left2:
preprocess_param = ["--source-lang", scorer1_src,
"--target-lang", scorer1_tgt,
"--trainpref", pre_gen+"/right_to_left_rescore_data",
"--srcdict", args.score_dict_dir+"/dict."+scorer1_src+".txt",
"--tgtdict", args.score_dict_dir+"/dict."+scorer1_tgt+".txt",
"--destdir", right_to_left_preprocessed_dir]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
return gen_output
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
gen_and_reprocess_nbest(args)
if __name__ == '__main__':
cli_main()
| [
"[email protected]"
] | |
a9ee77da3ed775af437905f8a77e31c68934d890 | b47c136e077f5100478338280495193a8ab81801 | /Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/motorkit_dc_test.py | acee534bc1f2a7feb18935e5d4caa5d3f8df88a2 | [
"Apache-2.0"
] | permissive | IanSMoyes/SpiderPi | 22cd8747cc389f674cc8d95f32b4d86f9b7b2d8e | cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1 | refs/heads/master | 2023-03-20T22:30:23.362137 | 2021-03-12T17:37:33 | 2021-03-12T17:37:33 | 339,555,949 | 16 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | # SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
import time
import board
from adafruit_motorkit import MotorKit
kit = MotorKit(i2c=board.I2C())
kit.motor1.throttle = 0
while True:
print("Forward!")
kit.motor1.throttle = 0.5
time.sleep(1)
print("Speed up...")
for i in range(0, 101):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Slow down...")
for i in range(100, -1, -1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Backward!")
kit.motor1.throttle = -0.5
time.sleep(1)
print("Speed up...")
for i in range(0, -101, -1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Slow down...")
for i in range(-100, 1):
speed = i * 0.01
kit.motor1.throttle = speed
time.sleep(0.01)
print("Stop!")
kit.motor1.throttle = 0
time.sleep(1)
| [
"[email protected]"
] | |
50e73f4718d3047767bd32a558235cc11004ff42 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/56b0c712ecb8ed28e63886a27ac24cb72334d327-<get_device>-fix.py | 7e5ac765baf394538173d3b221ff38bdbfa43683 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py |
def get_device(*args):
'Gets the device from a device object, an ID integer or an array object.\n\n .. note::\n\n This API is deprecated. Please use\n :func:`~chainer.cuda.get_device_from_id`\n or :func:`~chainer.cuda.get_device_from_array` instead.\n\n This is a convenient utility to select a correct device if the type of\n ``arg`` is unknown (i.e., one can use this function on arrays that may be\n on CPU or GPU). The returned device object supports the context management\n protocol of Python for the *with* statement.\n\n Args:\n args: Values to specify a GPU device. The first device object, integer\n or :class:`cupy.ndarray` object is used to select a device.\n If it is a device object, it is returned. If it is an integer,\n the corresponding device is returned. If it is a CuPy array,\n the device on which this array reside is returned. If any\n arguments are neither integers nor CuPy arrays, a dummy device\n object representing CPU is returned.\n\n Returns:\n Device object specified by given ``args``.\n\n .. seealso::\n See :class:`cupy.cuda.Device` for the device selection not by arrays.\n\n '
warnings.warn('get_device is deprecated. Please use get_device_from_id or get_device_from_array instead.', DeprecationWarning)
return _get_device(*args)
| [
"[email protected]"
] | |
75d822e59f72d9e13076734236f48c4e1dafc564 | 12112cdb7efa8a7026925f3eea9bf096b098ed3b | /yy_api/dao/address.py | 6517613dac58761ab644934790528a0fc6197e79 | [] | no_license | smallstrong0/Earth | c377f43216515b21191778c8a529c1c073bd8954 | c81c2e0e0ea18af62ef8f81b2ea84d2cc0faed8d | refs/heads/master | 2021-05-16T00:08:12.047520 | 2019-07-27T01:39:48 | 2019-07-27T01:39:48 | 106,982,264 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import core.mongo as db_core
import tool.c_utils
import tool.t_utils
collection = db_core.DbUtils().db.address
"""
字段规则
ctime
mtime
address_id 地址唯一id
user_id 用户ID全局唯一 UUID生成
name 收货人姓名
phone 电话
qq qq号
we_chat 微信号
address 地址
place 具体门牌号
"""
def create(dic={}):
dic['ctime'] = tool.t_utils.get_ts()
dic['mtime'] = tool.t_utils.get_ts()
code = collection.insert(dic)
if code:
return dic
else:
return None
def update(old={}, new={}):
return True if collection.update(old, {'$set': new}) else False
def select(where={}):
cursor = collection.find(where)
if cursor.count() > 0:
return list(cursor)
else:
return []
def delete(field={}):
return True if collection.remove(field) else False
| [
"[email protected]"
] | |
212fd46f2e5f7911e6424e3b7c2bc3491bab7a3b | f8b9e5de8823ff810ec445b6fa6d0e34f7b6319f | /Django/Users_project/apps/Users_app/migrations/0001_initial.py | 01e46249e25ed13984f49c592311b5448c9212d9 | [] | no_license | amalfushi/Python | 6c042443a8aeae15fc96a41a692abdbea05db863 | 067c2cef722457e884833f77baf9f44f45a4a165 | refs/heads/master | 2021-01-24T04:08:21.278071 | 2018-02-26T06:25:59 | 2018-02-26T06:25:59 | 122,923,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-18 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('email_address', models.CharField(max_length=255)),
('age', models.IntegerField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
2f7a6efeb9a5d3c242058dcae19f31ce8c0190c8 | b4f5055ab8c61098a66cfb10095d835acbb373b7 | /dataaccess/basedao.py | 3075a4cdbe3bb3d571997f4822803471207d9fc9 | [] | no_license | shmyhero/option | f9f01f98144e5f58023ddacfd133647a019b63a2 | 436027c8b33a7bc25cebcf16daa6962eb079c220 | refs/heads/master | 2021-03-16T09:03:04.229299 | 2017-11-29T08:15:02 | 2017-11-29T08:15:02 | 111,675,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,013 | py | import traceback
import datetime
import mysql.connector
from utils.logger import Logger
from common.pathmgr import PathMgr
from common.configmgr import ConfigMgr
class BaseDAO(object):
def __init__(self):
self.logger = Logger(self.__class__.__name__, PathMgr.get_log_path())
@staticmethod
def get_connection():
db_config = ConfigMgr.get_db_config()
return mysql.connector.connect(host=db_config['host'], user=db_config['user'], password=db_config['password'], database=db_config['database'])
@staticmethod
def python_value_to_sql_value(val):
if val is not None:
if type(val) is float:
return '{:.5f}'.format(val)
else:
return str(val)
else:
return 'null'
@staticmethod
def mysql_format(template, *args):
mysql_args = map(BaseDAO.python_value_to_sql_value, args)
return template.format(*mysql_args)
def select(self, query, cursor=None):
#self.logger.info('query:%s' % query)
conn = None
if cursor is None:
conn = BaseDAO.get_connection()
cursor = conn.cursor()
try:
cursor.execute(query)
rows = cursor.fetchall()
return rows
except Exception as e:
error_message = "Query:{}, error message: {}, Stack Trace: {}".format(query, str(e), traceback.format_exc())
self.logger.exception(error_message)
finally:
if conn:
conn.close()
def execute_query(self, query, cursor=None):
#self.logger.info('query:%s' % query)
conn = None
if cursor is None:
conn = BaseDAO.get_connection()
cursor = conn.cursor()
try:
cursor.execute(query)
if conn:
conn.commit()
except mysql.connector.IntegrityError:
pass
except Exception as e:
error_message = "Query:{}, error message: {}, Stack Trace: {}".format(query, str(e), traceback.format_exc())
self.logger.exception(error_message)
finally:
if conn:
conn.close()
def execute_query_list(self, query_list):
conn = BaseDAO.get_connection()
cursor = conn.cursor()
try:
query_for_log_exception = None
for query in query_list:
#self.logger.info('query:%s' % query)
query_for_log_exception = query
cursor.execute(query)
conn.commit()
except Exception as e:
error_message = "Query:{}, error message: {}, Stack Trace: {}".format(query_for_log_exception, str(e), traceback.format_exc())
self.logger.exception(error_message)
finally:
conn.close()
if __name__ == '__main__':
#print BaseDAO.mysql_format('insert into table (field1, field2) values ({}, {})', None, None)
print BaseDAO.python_value_to_sql_value(0.0)
| [
"[email protected]"
] | |
610705888444eecd4a25200c9dd46b8ac12f5337 | 6eb207074705bacb36457d713e1dc06555192380 | /plot_ppi_blockage_map.py | ff52f40610dd5525e69260d3983ba9dba9563685 | [] | no_license | ritvje/lidar-xband-article-scripts | 22574adb29d2645fab31003ae8cd654363f6cb0a | ad6ec0997b09609c494316e2ae285296ffdde0eb | refs/heads/main | 2023-04-07T01:31:49.337350 | 2023-02-07T07:55:01 | 2023-02-07T07:55:01 | 491,413,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,484 | py | """Plot a 2-panel figure of radar and lidar availability PPIs.
Looks for files in directory `inpath` called
- xband_obs_pct_{startdate}_{enddate}_pct.txt
- xband_obs_pct_{startdate}_{enddate}_range.txt
- xband_obs_pct_{startdate}_{enddate}_azimuth.txt
- lidar_obs_pct_{startdate}_{enddate}_pct.txt
- lidar_obs_pct_{startdate}_{enddate}_range.txt
- lidar_obs_pct_{startdate}_{enddate}_azimuth.txt
Author: Jenna Ritvanen <[email protected]>
"""
import os
import sys
import argparse
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mlt
mlt.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cartopy.crs as ccrs
plt.style.use("./presentation.mplstyle")
from radar_plotting import plotting
import contextily as ctx
from pathlib import Path
centerpoint = (24.87608, 60.28233)
airport_aws = (24.95675, 60.32670)
COPYRIGHT_TEXT = "Map tiles by Stamen Design, under CC BY 3.0. \nMap data by OpenStreetMap, under ODbL."
@mlt.ticker.FuncFormatter
def m2km_formatter(x, pos):
return f"{x / 1000:.0f}"
if __name__ == "__main__":
argparser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
argparser.add_argument("startdate", type=str, help="the startdate (YYYYmm)")
argparser.add_argument("enddate", type=str, help="the enddate (YYYYmm)")
argparser.add_argument(
"inpath", type=str, help="Path where the input files are located"
)
argparser.add_argument(
"--ext",
type=str,
default="png",
choices=["pdf", "png"],
help="Output plot file format.",
)
argparser.add_argument("--outpath", type=str, default=".", help="Output path")
argparser.add_argument(
"--maxdist",
type=float,
default=15,
help="Maximum distance plotted in figures in km",
)
argparser.add_argument(
"--dpi", type=int, default=300, help="Dots per inch in figure"
)
args = argparser.parse_args()
outpath = Path(args.outpath)
outpath.mkdir(parents=True, exist_ok=True)
inpath = Path(args.inpath).resolve()
startdate = datetime.strptime(args.startdate, "%Y%m")
enddate = (
datetime.strptime(args.enddate, "%Y%m") + pd.offsets.MonthEnd(0)
).to_pydatetime()
pct_xband = np.loadtxt(
inpath / f"xband_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_pct.txt"
)
xband_rr = np.loadtxt(
inpath / f"xband_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_range.txt"
)
xband_az = np.loadtxt(
inpath / f"xband_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_azimuth.txt"
)
pct_lidar = np.loadtxt(
inpath / f"lidar_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_pct.txt"
)
lidar_rr = np.loadtxt(
inpath / f"lidar_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_range.txt"
)
lidar_az = np.loadtxt(
inpath / f"lidar_obs_pct_{startdate:%Y%m%d}_{enddate:%Y%m%d}_azimuth.txt"
)
outfn = os.path.join(outpath, f"meas_pct_map.{args.ext}")
cbar_ax_kws = {
"width": "5%", # width = 5% of parent_bbox width
"height": "100%",
"loc": "lower left",
"bbox_to_anchor": (1.01, 0.0, 1, 1),
"borderpad": 0,
}
fig = plt.figure(figsize=(12, 10))
ax_lidar, fig, aeqd, ext = plotting.axes_with_background_map(
centerpoint, 15, 10, fig=fig, no_map=True, map="toner-line", ncols=2, index=1
)
ctx.add_basemap(
ax_lidar, crs=aeqd, zorder=9, zoom=11, source=ctx.providers.Stamen.TonerLite
)
p = plotting.plot_ppi(
ax_lidar,
pct_lidar,
lidar_az,
lidar_rr,
rasterized=True,
vmin=0,
vmax=1,
cmap="viridis",
zorder=100,
alpha=0.7,
linewidth=0,
antialiased=True,
edgecolor="none",
)
ax_lidar.scatter(
*airport_aws,
s=75,
transform=ccrs.PlateCarree(),
zorder=110,
label="Helsinki Airport",
marker="X",
color="k",
)
ax_radar, fig, aeqd, ext = plotting.axes_with_background_map(
centerpoint,
15,
10,
fig=fig,
no_map=True,
map="toner-line",
sharey=None,
ncols=2,
index=2,
)
ctx.add_basemap(
ax_radar, crs=aeqd, zorder=9, zoom=11, source=ctx.providers.Stamen.TonerLite
)
p = plotting.plot_ppi(
ax_radar,
pct_xband,
xband_az,
xband_rr,
rasterized=True,
vmin=0,
vmax=1,
cmap="viridis",
zorder=100,
alpha=0.7,
linewidth=0,
antialiased=True,
edgecolor="none",
)
ax_radar.scatter(
*airport_aws,
s=75,
transform=ccrs.PlateCarree(),
zorder=110,
label="Helsinki Airport",
marker="X",
color="k",
)
cax = inset_axes(ax_radar, bbox_transform=ax_radar.transAxes, **cbar_ax_kws)
cbar = plt.colorbar(p, orientation="vertical", cax=cax, ax=None)
cbar.set_label("Fraction", weight="bold")
cbar.ax.tick_params(labelsize=12)
for ax, title in zip([ax_lidar, ax_radar], ["(a) Lidar", "(b) X-band radar"]):
plotting.set_ticks_km(
ax,
[
-args.maxdist * 1e3,
args.maxdist * 1e3,
-args.maxdist * 1e3,
args.maxdist * 1e3,
],
16,
16,
)
# x-axis
ax.set_xlabel("Distance from site [km]", weight="bold", size="medium")
ax.set_title(title, y=-0.15, size="large")
ax.xaxis.set_major_formatter(m2km_formatter)
# y-axis
ax.set_ylabel("Distance from site [km]", weight="bold", size="medium")
ax.yaxis.set_major_formatter(m2km_formatter)
ax.set_xlim([-args.maxdist * 1e3, args.maxdist * 1e3])
ax.set_ylim([-args.maxdist * 1e3, args.maxdist * 1e3])
ax.tick_params(axis="both", which="major", labelsize="small")
ax.set_aspect(1)
ax.text(
0.75, 0.01, COPYRIGHT_TEXT, fontsize=4, zorder=100, transform=ax.transAxes
)
ax_radar.set_yticks([])
ax_radar.set_yticklabels([])
ax_radar.set_ylabel("")
fig.savefig(outfn, dpi=args.dpi, bbox_inches="tight")
| [
"[email protected]"
] | |
638543371020bc4e2ed3cc60c96f65e4fd382168 | 3292017df3ff6c7190d5c5a60ecf5f8936cb7b90 | /checkio/Dropbox/Break Rings/test_break_rings.py | 6901be0675a8ff2873ac7103b90f6f5e11117248 | [
"MIT"
] | permissive | KenMercusLai/checkio | 1e9cdfe70ccaf5315db36391c4710533d99cf9aa | 5082ab0c6a7ae2d97963568a6f41589332e88029 | refs/heads/master | 2022-05-12T18:22:22.604531 | 2022-05-11T09:00:28 | 2022-05-11T09:00:28 | 22,260,056 | 39 | 22 | NOASSERTION | 2022-05-11T08:42:05 | 2014-07-25T14:40:06 | Python | UTF-8 | Python | false | false | 11,561 | py | import unittest
from break_rings import break_rings
class Tests(unittest.TestCase):
TESTS = {
"Basics": [
{"input": [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [4, 6]], "answer": 3},
{"input": [[1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]], "answer": 3},
{"input": [[5, 6], [4, 5], [3, 4], [3, 2], [2, 1], [1, 6]], "answer": 3},
{
"input": [
[8, 9],
[1, 9],
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[8, 7],
],
"answer": 5,
},
],
"Extra": [
{
"input": [
[8, 7],
[1, 9],
[2, 7],
[3, 6],
[1, 7],
[5, 7],
[3, 4],
[9, 5],
[9, 6],
[3, 5],
],
"answer": 3,
},
{
"input": [
[3, 4],
[1, 6],
[1, 2],
[9, 5],
[2, 5],
[9, 2],
[8, 3],
[2, 4],
[8, 4],
[1, 3],
[8, 1],
[1, 7],
[6, 7],
],
"answer": 6,
},
{
"input": [
[5, 7],
[9, 4],
[1, 2],
[9, 5],
[1, 3],
[9, 3],
[9, 6],
[1, 5],
[2, 3],
[3, 7],
[9, 7],
[8, 6],
[3, 4],
],
"answer": 5,
},
{
"input": [
[1, 9],
[1, 2],
[8, 5],
[4, 6],
[5, 6],
[8, 1],
[3, 4],
[2, 6],
[9, 6],
[8, 4],
[8, 3],
[5, 7],
[9, 7],
[2, 3],
[1, 7],
],
"answer": 5,
},
{
"input": [
[1, 3],
[3, 4],
[3, 5],
[4, 6],
[6, 7],
[8, 3],
[8, 1],
[2, 6],
[8, 4],
[9, 5],
[4, 5],
[1, 7],
],
"answer": 5,
},
{
"input": [
[9, 5],
[5, 6],
[2, 6],
[4, 5],
[8, 2],
[1, 3],
[1, 4],
[9, 4],
[1, 2],
[9, 2],
[8, 7],
[8, 3],
[8, 6],
[2, 3],
[8, 9],
],
"answer": 5,
},
{
"input": [
[9, 7],
[9, 6],
[8, 5],
[8, 3],
[8, 9],
[5, 7],
[4, 5],
[8, 4],
[1, 7],
[9, 4],
[1, 5],
[2, 5],
[4, 6],
[8, 2],
[1, 2],
[2, 4],
[8, 7],
[8, 1],
],
"answer": 5,
},
{
"input": [
[3, 4],
[5, 6],
[2, 7],
[1, 5],
[2, 6],
[8, 4],
[1, 7],
[4, 5],
[9, 5],
[2, 3],
[8, 2],
[2, 4],
[9, 6],
[5, 7],
[3, 6],
[1, 3],
],
"answer": 5,
},
{
"input": [
[2, 5],
[3, 7],
[5, 6],
[6, 7],
[9, 6],
[8, 9],
[9, 7],
[1, 4],
[1, 9],
[9, 5],
[2, 4],
[2, 6],
[2, 3],
[9, 2],
[3, 6],
[4, 5],
[1, 2],
],
"answer": 5,
},
{
"input": [
[1, 4],
[4, 7],
[9, 3],
[8, 2],
[4, 6],
[3, 4],
[2, 3],
[8, 9],
[5, 7],
[9, 5],
],
"answer": 4,
},
{
"input": [
[1, 3],
[8, 4],
[4, 6],
[3, 7],
[8, 2],
[1, 2],
[8, 9],
[4, 5],
[8, 1],
[1, 9],
[1, 7],
[1, 6],
[2, 5],
[9, 6],
[2, 4],
[9, 2],
],
"answer": 5,
},
{
"input": [
[9, 7],
[9, 4],
[9, 3],
[2, 6],
[2, 5],
[3, 7],
[4, 6],
[1, 3],
[1, 4],
[8, 9],
[3, 5],
[5, 7],
],
"answer": 5,
},
{
"input": [
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 2],
[1, 6],
[6, 7],
[7, 8],
[8, 9],
[9, 6],
[1, 10],
[10, 11],
[11, 12],
[12, 13],
[13, 10],
[1, 14],
[14, 15],
[15, 16],
[16, 17],
[17, 14],
],
"answer": 8,
},
{
"input": [
[1, 4],
[4, 7],
[9, 2],
[2, 6],
[5, 6],
[8, 1],
[3, 7],
[9, 3],
[3, 6],
[8, 6],
[1, 7],
[2, 4],
[1, 9],
[8, 3],
[9, 6],
],
"answer": 5,
},
{
"input": [
[1, 2],
[3, 7],
[2, 3],
[3, 5],
[1, 4],
[2, 5],
[9, 3],
[5, 7],
[1, 9],
[8, 4],
[1, 3],
[2, 6],
[9, 4],
],
"answer": 5,
},
{
"input": [
[4, 6],
[2, 5],
[1, 6],
[6, 7],
[2, 6],
[8, 7],
[2, 4],
[4, 7],
[9, 3],
[3, 7],
[8, 3],
[2, 7],
[9, 6],
[4, 5],
],
"answer": 5,
},
{
"input": [
[11, 7],
[10, 5],
[4, 6],
[3, 4],
[19, 14],
[1, 17],
[8, 4],
[18, 3],
[17, 12],
[16, 11],
[9, 11],
[2, 6],
[11, 4],
[17, 3],
[13, 6],
[11, 20],
[11, 15],
[8, 3],
[5, 7],
],
"answer": 7,
},
{
"input": [
[4, 6],
[4, 12],
[2, 4],
[12, 5],
[12, 14],
[12, 7],
[9, 13],
[1, 10],
[9, 18],
[17, 19],
[4, 13],
[2, 20],
[10, 14],
[11, 12],
[11, 15],
[16, 2],
[8, 5],
[3, 12],
[17, 11],
[10, 19],
],
"answer": 8,
},
{
"input": [
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[8, 7],
[8, 9],
[9, 1],
],
"answer": 5,
},
{
"input": [
[1, 2],
[2, 3],
[3, 4],
[4, 5],
[5, 6],
[6, 7],
[8, 7],
[8, 9],
[9, 7],
[10, 4],
[10, 11],
[11, 12],
[12, 13],
[12, 14],
],
"answer": 7,
},
{
"input": [[1, 2], [1, 3], [1, 5], [2, 3], [2, 4], [4, 6], [5, 6]],
"answer": 3,
},
],
}
def test_Basics(self):
for i in self.TESTS['Basics']:
assert break_rings(i['input']) == i['answer'], i['input']
def test_Extra(self):
for i in self.TESTS['Extra']:
assert break_rings(i['input']) == i['answer'], i['input']
if __name__ == "__main__": # pragma: no cover
unittest.main()
| [
"[email protected]"
] | |
59c5e626e9a7066e4b46f04bc311d2d0bb6b243e | 13f78c34e80a52442d72e0aa609666163233e7e0 | /Other/ICPC Live Archive/7526/input_gen.py | e02dee7416064f5e4eaed6e55e6ef4f49cc2d0f0 | [] | no_license | Giantpizzahead/comp-programming | 0d16babe49064aee525d78a70641ca154927af20 | 232a19fdd06ecef7be845c92db38772240a33e41 | refs/heads/master | 2023-08-17T20:23:28.693280 | 2023-08-11T22:18:26 | 2023-08-11T22:18:26 | 252,904,746 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | import random
def genTestCase(out):
ROWS = random.randint(200, 300)
COLS = random.randint(200, 300)
SPARE_ROWS = random.randint(0, int(ROWS / 5))
SPARE_DIODES = random.randint(0, int(ROWS * COLS / 7))
out.write("{} {} {} {}\n".format(ROWS, COLS, SPARE_ROWS, SPARE_DIODES))
ZERO_DENSITY = random.randint(0, 10)
choice_arr = ['0' for _ in range(ZERO_DENSITY)]
choice_arr.append('1')
for i in range(ROWS):
out.write(' '.join([random.choice(choice_arr) for _ in range(COLS)]))
out.write('\n')
with open("input.txt", 'w') as out:
for i in range(30):
genTestCase(out)
out.write("0 0 0 0\n")
| [
"[email protected]"
] | |
cb7b18e29e9988f4058c89a85ff27762dd7ea458 | eb1a2e24ecdbe9be8a6aac153fe1980b19dcbe4a | /sameline.py | db7711b6a73bb522e178afdfcfdba7a0d80d7476 | [] | no_license | sushmithasushi/playerlevel | 1e7363e63bd75deba79b96a659db7736c93ed6a2 | 612ea4483a5c5d7c3c3a8564e0b7ce0df08a686a | refs/heads/master | 2020-06-19T07:03:06.457910 | 2019-07-24T04:28:31 | 2019-07-24T04:28:31 | 196,608,802 | 0 | 0 | null | 2019-07-12T16:08:51 | 2019-07-12T16:08:51 | null | UTF-8 | Python | false | false | 206 | py | num1=input().split()
n2=input().split()
n3=input().split()
if(num1[0]==n2[0]==n3[0] or num1[1]==n2[1]==n3[1] or (num1[0]==num1[1] and n2[0]==n2[1] and n3[0]==n3[1])):
print('yes')
else:
print('no')
| [
"[email protected]"
] | |
7de4e0db9696917074e2cac47616c5de30351365 | 1f33bcbd545b82bc64e7f7b5ef2052798b02b279 | /Quiz:Program1/quiz.py | f90e3289eeff403a20c20b0baeae41159bfc7583 | [] | no_license | jlevy44/LearningScripts | 107369798605641a24a5a29c9f8f249d5396e0b6 | fe7ce46a995cd6b7e40099a4b2db558a549de446 | refs/heads/master | 2021-01-01T19:19:11.849540 | 2017-09-27T00:19:54 | 2017-09-27T00:19:54 | 98,566,879 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | def is_true(i):
if i==2:
return True
else:
return False
print(is_true(2))
def lalala(i):
return lambda y: i**2
print(lalala(5))
print('alpha' > 'beta')
d='hello'
for p in d: print(p)
print(d[-2:2])
def add(x,y):
print x+y
add(2,6)
def make_scalar(c):
return lambda n: c*(n)
tripleAdd=make_scalar(3)
print tripleAdd(2) | [
"[email protected]"
] | |
21120623aaae1cf3c80e160d93a4a917c30ebcfc | ece0d321e48f182832252b23db1df0c21b78f20c | /engine/2.80/scripts/templates_py/ui_previews_dynamic_enum.py | 39a3750bfee6404d1a45d3c448143682e5ad1b80 | [
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause",
"Unlicense"
] | permissive | byteinc/Phasor | 47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9 | f7d23a489c2b4bcc3c1961ac955926484ff8b8d9 | refs/heads/master | 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 | Unlicense | 2022-10-21T07:02:37 | 2019-03-15T00:58:08 | Python | UTF-8 | Python | false | false | 4,092 | py | # This sample script demonstrates a dynamic EnumProperty with custom icons.
# The EnumProperty is populated dynamically with thumbnails of the contents of
# a chosen directory in 'enum_previews_from_directory_items'.
# Then, the same enum is displayed with different interfaces. Note that the
# generated icon previews do not have Blender IDs, which means that they can
# not be used with UILayout templates that require IDs,
# such as template_list and template_ID_preview.
#
# Other use cases:
# - make a fixed list of enum_items instead of calculating them in a function
# - generate isolated thumbnails to use as custom icons in buttons
# and menu items
#
# For custom icons, see the template "ui_previews_custom_icon.py".
#
# For distributable scripts, it is recommended to place the icons inside the
# script directory and access it relative to the py script file for portability:
#
# os.path.join(os.path.dirname(__file__), "images")
import os
import bpy
def enum_previews_from_directory_items(self, context):
"""EnumProperty callback"""
enum_items = []
if context is None:
return enum_items
wm = context.window_manager
directory = wm.my_previews_dir
# Get the preview collection (defined in register func).
pcoll = preview_collections["main"]
if directory == pcoll.my_previews_dir:
return pcoll.my_previews
print("Scanning directory: %s" % directory)
if directory and os.path.exists(directory):
# Scan the directory for png files
image_paths = []
for fn in os.listdir(directory):
if fn.lower().endswith(".png"):
image_paths.append(fn)
for i, name in enumerate(image_paths):
# generates a thumbnail preview for a file.
filepath = os.path.join(directory, name)
thumb = pcoll.load(filepath, filepath, 'IMAGE')
enum_items.append((name, name, "", thumb.icon_id, i))
pcoll.my_previews = enum_items
pcoll.my_previews_dir = directory
return pcoll.my_previews
class PreviewsExamplePanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Previews Example Panel"
bl_idname = "OBJECT_PT_previews"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "object"
def draw(self, context):
layout = self.layout
wm = context.window_manager
row = layout.row()
row.prop(wm, "my_previews_dir")
row = layout.row()
row.template_icon_view(wm, "my_previews")
row = layout.row()
row.prop(wm, "my_previews")
# We can store multiple preview collections here,
# however in this example we only store "main"
preview_collections = {}
def register():
from bpy.types import WindowManager
from bpy.props import (
StringProperty,
EnumProperty,
)
WindowManager.my_previews_dir = StringProperty(
name="Folder Path",
subtype='DIR_PATH',
default=""
)
WindowManager.my_previews = EnumProperty(
items=enum_previews_from_directory_items,
)
# Note that preview collections returned by bpy.utils.previews
# are regular Python objects - you can use them to store custom data.
#
# This is especially useful here, since:
# - It avoids us regenerating the whole enum over and over.
# - It can store enum_items' strings
# (remember you have to keep those strings somewhere in py,
# else they get freed and Blender references invalid memory!).
import bpy.utils.previews
pcoll = bpy.utils.previews.new()
pcoll.my_previews_dir = ""
pcoll.my_previews = ()
preview_collections["main"] = pcoll
bpy.utils.register_class(PreviewsExamplePanel)
def unregister():
from bpy.types import WindowManager
del WindowManager.my_previews
for pcoll in preview_collections.values():
bpy.utils.previews.remove(pcoll)
preview_collections.clear()
bpy.utils.unregister_class(PreviewsExamplePanel)
if __name__ == "__main__":
register()
| [
"[email protected]"
] | |
9fd7c3ec34570c9b399747c13d244594b46a245a | 26aa4bdbc12ee99d187c94226af732d7487e30fa | /backend/weathered_shape_26383/settings.py | 6f52b6bc0ee24fd25fd10775d3917d2cc9a51ac4 | [] | no_license | crowdbotics-apps/weathered-shape-26383 | 25e722172a9e0c9305e96962c010f81b06692df4 | 86c617b71b1789e1b49212ee3f83859e526d8740 | refs/heads/master | 2023-04-17T01:20:00.812312 | 2021-05-08T13:33:10 | 2021-05-08T13:33:10 | 365,522,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,129 | py | """
Django settings for weathered_shape_26383 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'weathered_shape_26383.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'weathered_shape_26383.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
d40bb63a0564eb02be11362b9c6a638abb65d150 | e5e2b7da41fda915cb849f031a0223e2ac354066 | /sdk/python/pulumi_azure_native/containerservice/v20200701/agent_pool.py | 8251a0a40d2242bfa0e0bf39684334d89914e23d | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | johnbirdau/pulumi-azure-native | b7d3bdddeb7c4b319a7e43a892ddc6e25e3bfb25 | d676cc331caa0694d8be99cb90b93fa231e3c705 | refs/heads/master | 2023-05-06T06:48:05.040357 | 2021-06-01T20:42:38 | 2021-06-01T20:42:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,405 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AgentPoolArgs', 'AgentPool']
@pulumi.input_type
class AgentPoolArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
agent_pool_name: Optional[pulumi.Input[str]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
count: Optional[pulumi.Input[int]] = None,
enable_auto_scaling: Optional[pulumi.Input[bool]] = None,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
max_count: Optional[pulumi.Input[int]] = None,
max_pods: Optional[pulumi.Input[int]] = None,
min_count: Optional[pulumi.Input[int]] = None,
mode: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]] = None,
node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
orchestrator_version: Optional[pulumi.Input[str]] = None,
os_disk_size_gb: Optional[pulumi.Input[int]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
scale_set_eviction_policy: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]] = None,
scale_set_priority: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]] = None,
spot_max_price: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[Union[str, 'AgentPoolType']]] = None,
upgrade_settings: Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']] = None,
vm_size: Optional[pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']]] = None,
vnet_subnet_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AgentPool resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name: The name of the managed cluster resource.
:param pulumi.Input[str] agent_pool_name: The name of the agent pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
:param pulumi.Input[int] count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
:param pulumi.Input[bool] enable_auto_scaling: Whether to enable auto-scaler
:param pulumi.Input[bool] enable_node_public_ip: Enable public IP for nodes
:param pulumi.Input[int] max_count: Maximum number of nodes for auto-scaling
:param pulumi.Input[int] max_pods: Maximum number of pods that can run on a node.
:param pulumi.Input[int] min_count: Minimum number of nodes for auto-scaling
:param pulumi.Input[Union[str, 'AgentPoolMode']] mode: AgentPoolMode represents mode of an agent pool
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_taints: Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
:param pulumi.Input[str] orchestrator_version: Version of orchestrator specified when creating the managed cluster.
:param pulumi.Input[int] os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
:param pulumi.Input[Union[str, 'OSType']] os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param pulumi.Input[str] proximity_placement_group_id: The ID for Proximity Placement Group.
:param pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']] scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
:param pulumi.Input[Union[str, 'ScaleSetPriority']] scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
:param pulumi.Input[float] spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:param pulumi.Input[Union[str, 'AgentPoolType']] type: AgentPoolType represents types of an agent pool
:param pulumi.Input['AgentPoolUpgradeSettingsArgs'] upgrade_settings: Settings for upgrading the agentpool
:param pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']] vm_size: Size of agent VMs.
:param pulumi.Input[str] vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if agent_pool_name is not None:
pulumi.set(__self__, "agent_pool_name", agent_pool_name)
if availability_zones is not None:
pulumi.set(__self__, "availability_zones", availability_zones)
if count is not None:
pulumi.set(__self__, "count", count)
if enable_auto_scaling is not None:
pulumi.set(__self__, "enable_auto_scaling", enable_auto_scaling)
if enable_node_public_ip is not None:
pulumi.set(__self__, "enable_node_public_ip", enable_node_public_ip)
if max_count is not None:
pulumi.set(__self__, "max_count", max_count)
if max_pods is not None:
pulumi.set(__self__, "max_pods", max_pods)
if min_count is not None:
pulumi.set(__self__, "min_count", min_count)
if mode is not None:
pulumi.set(__self__, "mode", mode)
if node_labels is not None:
pulumi.set(__self__, "node_labels", node_labels)
if node_taints is not None:
pulumi.set(__self__, "node_taints", node_taints)
if orchestrator_version is not None:
pulumi.set(__self__, "orchestrator_version", orchestrator_version)
if os_disk_size_gb is not None:
pulumi.set(__self__, "os_disk_size_gb", os_disk_size_gb)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if proximity_placement_group_id is not None:
pulumi.set(__self__, "proximity_placement_group_id", proximity_placement_group_id)
if scale_set_eviction_policy is not None:
pulumi.set(__self__, "scale_set_eviction_policy", scale_set_eviction_policy)
if scale_set_priority is not None:
pulumi.set(__self__, "scale_set_priority", scale_set_priority)
if spot_max_price is not None:
pulumi.set(__self__, "spot_max_price", spot_max_price)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if type is not None:
pulumi.set(__self__, "type", type)
if upgrade_settings is not None:
pulumi.set(__self__, "upgrade_settings", upgrade_settings)
if vm_size is not None:
pulumi.set(__self__, "vm_size", vm_size)
if vnet_subnet_id is not None:
pulumi.set(__self__, "vnet_subnet_id", vnet_subnet_id)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the managed cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="agentPoolName")
def agent_pool_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the agent pool.
"""
return pulumi.get(self, "agent_pool_name")
@agent_pool_name.setter
def agent_pool_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "agent_pool_name", value)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
"""
return pulumi.get(self, "availability_zones")
@availability_zones.setter
def availability_zones(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "availability_zones", value)
@property
@pulumi.getter
def count(self) -> Optional[pulumi.Input[int]]:
"""
Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
"""
return pulumi.get(self, "count")
@count.setter
def count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "count", value)
@property
@pulumi.getter(name="enableAutoScaling")
def enable_auto_scaling(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable auto-scaler
"""
return pulumi.get(self, "enable_auto_scaling")
@enable_auto_scaling.setter
def enable_auto_scaling(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_auto_scaling", value)
@property
@pulumi.getter(name="enableNodePublicIP")
def enable_node_public_ip(self) -> Optional[pulumi.Input[bool]]:
"""
Enable public IP for nodes
"""
return pulumi.get(self, "enable_node_public_ip")
@enable_node_public_ip.setter
def enable_node_public_ip(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_node_public_ip", value)
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of nodes for auto-scaling
"""
return pulumi.get(self, "max_count")
@max_count.setter
def max_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_count", value)
@property
@pulumi.getter(name="maxPods")
def max_pods(self) -> Optional[pulumi.Input[int]]:
"""
Maximum number of pods that can run on a node.
"""
return pulumi.get(self, "max_pods")
@max_pods.setter
def max_pods(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_pods", value)
@property
@pulumi.getter(name="minCount")
def min_count(self) -> Optional[pulumi.Input[int]]:
"""
Minimum number of nodes for auto-scaling
"""
return pulumi.get(self, "min_count")
@min_count.setter
def min_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_count", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[Union[str, 'AgentPoolMode']]]:
"""
AgentPoolMode represents mode of an agent pool
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]]):
pulumi.set(self, "mode", value)
@property
@pulumi.getter(name="nodeLabels")
def node_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Agent pool node labels to be persisted across all nodes in agent pool.
"""
return pulumi.get(self, "node_labels")
@node_labels.setter
def node_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "node_labels", value)
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
"""
return pulumi.get(self, "node_taints")
@node_taints.setter
def node_taints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "node_taints", value)
@property
@pulumi.getter(name="orchestratorVersion")
def orchestrator_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of orchestrator specified when creating the managed cluster.
"""
return pulumi.get(self, "orchestrator_version")
@orchestrator_version.setter
def orchestrator_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "orchestrator_version", value)
@property
@pulumi.getter(name="osDiskSizeGB")
def os_disk_size_gb(self) -> Optional[pulumi.Input[int]]:
"""
OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
"""
return pulumi.get(self, "os_disk_size_gb")
@os_disk_size_gb.setter
def os_disk_size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "os_disk_size_gb", value)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[pulumi.Input[Union[str, 'OSType']]]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@os_type.setter
def os_type(self, value: Optional[pulumi.Input[Union[str, 'OSType']]]):
pulumi.set(self, "os_type", value)
@property
@pulumi.getter(name="proximityPlacementGroupID")
def proximity_placement_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID for Proximity Placement Group.
"""
return pulumi.get(self, "proximity_placement_group_id")
@proximity_placement_group_id.setter
def proximity_placement_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "proximity_placement_group_id", value)
@property
@pulumi.getter(name="scaleSetEvictionPolicy")
def scale_set_eviction_policy(self) -> Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]]:
"""
ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
"""
return pulumi.get(self, "scale_set_eviction_policy")
@scale_set_eviction_policy.setter
def scale_set_eviction_policy(self, value: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]]):
pulumi.set(self, "scale_set_eviction_policy", value)
@property
@pulumi.getter(name="scaleSetPriority")
def scale_set_priority(self) -> Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]]:
"""
ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
"""
return pulumi.get(self, "scale_set_priority")
@scale_set_priority.setter
def scale_set_priority(self, value: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]]):
pulumi.set(self, "scale_set_priority", value)
@property
@pulumi.getter(name="spotMaxPrice")
def spot_max_price(self) -> Optional[pulumi.Input[float]]:
"""
SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
"""
return pulumi.get(self, "spot_max_price")
@spot_max_price.setter
def spot_max_price(self, value: Optional[pulumi.Input[float]]):
pulumi.set(self, "spot_max_price", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Agent pool tags to be persisted on the agent pool virtual machine scale set.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[Union[str, 'AgentPoolType']]]:
"""
AgentPoolType represents types of an agent pool
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[Union[str, 'AgentPoolType']]]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="upgradeSettings")
def upgrade_settings(self) -> Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']]:
"""
Settings for upgrading the agentpool
"""
return pulumi.get(self, "upgrade_settings")
@upgrade_settings.setter
def upgrade_settings(self, value: Optional[pulumi.Input['AgentPoolUpgradeSettingsArgs']]):
pulumi.set(self, "upgrade_settings", value)
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> Optional[pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']]]:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@vm_size.setter
def vm_size(self, value: Optional[pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']]]):
pulumi.set(self, "vm_size", value)
@property
@pulumi.getter(name="vnetSubnetID")
def vnet_subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
VNet SubnetID specifies the VNet's subnet identifier.
"""
return pulumi.get(self, "vnet_subnet_id")
@vnet_subnet_id.setter
def vnet_subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vnet_subnet_id", value)
class AgentPool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_name: Optional[pulumi.Input[str]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
count: Optional[pulumi.Input[int]] = None,
enable_auto_scaling: Optional[pulumi.Input[bool]] = None,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
max_count: Optional[pulumi.Input[int]] = None,
max_pods: Optional[pulumi.Input[int]] = None,
min_count: Optional[pulumi.Input[int]] = None,
mode: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]] = None,
node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
orchestrator_version: Optional[pulumi.Input[str]] = None,
os_disk_size_gb: Optional[pulumi.Input[int]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
scale_set_eviction_policy: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]] = None,
scale_set_priority: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]] = None,
spot_max_price: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[Union[str, 'AgentPoolType']]] = None,
upgrade_settings: Optional[pulumi.Input[pulumi.InputType['AgentPoolUpgradeSettingsArgs']]] = None,
vm_size: Optional[pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']]] = None,
vnet_subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Agent Pool.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] agent_pool_name: The name of the agent pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] availability_zones: Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
:param pulumi.Input[int] count: Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
:param pulumi.Input[bool] enable_auto_scaling: Whether to enable auto-scaler
:param pulumi.Input[bool] enable_node_public_ip: Enable public IP for nodes
:param pulumi.Input[int] max_count: Maximum number of nodes for auto-scaling
:param pulumi.Input[int] max_pods: Maximum number of pods that can run on a node.
:param pulumi.Input[int] min_count: Minimum number of nodes for auto-scaling
:param pulumi.Input[Union[str, 'AgentPoolMode']] mode: AgentPoolMode represents mode of an agent pool
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] node_labels: Agent pool node labels to be persisted across all nodes in agent pool.
:param pulumi.Input[Sequence[pulumi.Input[str]]] node_taints: Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
:param pulumi.Input[str] orchestrator_version: Version of orchestrator specified when creating the managed cluster.
:param pulumi.Input[int] os_disk_size_gb: OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
:param pulumi.Input[Union[str, 'OSType']] os_type: OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
:param pulumi.Input[str] proximity_placement_group_id: The ID for Proximity Placement Group.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_name_: The name of the managed cluster resource.
:param pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']] scale_set_eviction_policy: ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
:param pulumi.Input[Union[str, 'ScaleSetPriority']] scale_set_priority: ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
:param pulumi.Input[float] spot_max_price: SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Agent pool tags to be persisted on the agent pool virtual machine scale set.
:param pulumi.Input[Union[str, 'AgentPoolType']] type: AgentPoolType represents types of an agent pool
:param pulumi.Input[pulumi.InputType['AgentPoolUpgradeSettingsArgs']] upgrade_settings: Settings for upgrading the agentpool
:param pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']] vm_size: Size of agent VMs.
:param pulumi.Input[str] vnet_subnet_id: VNet SubnetID specifies the VNet's subnet identifier.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AgentPoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Agent Pool.
:param str resource_name: The name of the resource.
:param AgentPoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AgentPoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_name: Optional[pulumi.Input[str]] = None,
availability_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
count: Optional[pulumi.Input[int]] = None,
enable_auto_scaling: Optional[pulumi.Input[bool]] = None,
enable_node_public_ip: Optional[pulumi.Input[bool]] = None,
max_count: Optional[pulumi.Input[int]] = None,
max_pods: Optional[pulumi.Input[int]] = None,
min_count: Optional[pulumi.Input[int]] = None,
mode: Optional[pulumi.Input[Union[str, 'AgentPoolMode']]] = None,
node_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
node_taints: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
orchestrator_version: Optional[pulumi.Input[str]] = None,
os_disk_size_gb: Optional[pulumi.Input[int]] = None,
os_type: Optional[pulumi.Input[Union[str, 'OSType']]] = None,
proximity_placement_group_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
scale_set_eviction_policy: Optional[pulumi.Input[Union[str, 'ScaleSetEvictionPolicy']]] = None,
scale_set_priority: Optional[pulumi.Input[Union[str, 'ScaleSetPriority']]] = None,
spot_max_price: Optional[pulumi.Input[float]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[Union[str, 'AgentPoolType']]] = None,
upgrade_settings: Optional[pulumi.Input[pulumi.InputType['AgentPoolUpgradeSettingsArgs']]] = None,
vm_size: Optional[pulumi.Input[Union[str, 'ContainerServiceVMSizeTypes']]] = None,
vnet_subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AgentPoolArgs.__new__(AgentPoolArgs)
__props__.__dict__["agent_pool_name"] = agent_pool_name
__props__.__dict__["availability_zones"] = availability_zones
__props__.__dict__["count"] = count
__props__.__dict__["enable_auto_scaling"] = enable_auto_scaling
__props__.__dict__["enable_node_public_ip"] = enable_node_public_ip
__props__.__dict__["max_count"] = max_count
__props__.__dict__["max_pods"] = max_pods
__props__.__dict__["min_count"] = min_count
__props__.__dict__["mode"] = mode
__props__.__dict__["node_labels"] = node_labels
__props__.__dict__["node_taints"] = node_taints
__props__.__dict__["orchestrator_version"] = orchestrator_version
__props__.__dict__["os_disk_size_gb"] = os_disk_size_gb
__props__.__dict__["os_type"] = os_type
__props__.__dict__["proximity_placement_group_id"] = proximity_placement_group_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["scale_set_eviction_policy"] = scale_set_eviction_policy
__props__.__dict__["scale_set_priority"] = scale_set_priority
__props__.__dict__["spot_max_price"] = spot_max_price
__props__.__dict__["tags"] = tags
__props__.__dict__["type"] = type
__props__.__dict__["upgrade_settings"] = upgrade_settings
__props__.__dict__["vm_size"] = vm_size
__props__.__dict__["vnet_subnet_id"] = vnet_subnet_id
__props__.__dict__["name"] = None
__props__.__dict__["node_image_version"] = None
__props__.__dict__["provisioning_state"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20200701:AgentPool"), pulumi.Alias(type_="azure-native:containerservice:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20190201:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190201:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20190401:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190401:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20190601:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190601:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20190801:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190801:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20191001:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20191001:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20191101:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20191101:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20200101:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200101:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20200201:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200201:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20200301:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200301:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20200401:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200401:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20200601:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200601:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20200901:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20200901:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20201101:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20201101:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20201201:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20201201:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20210201:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210201:AgentPool"), pulumi.Alias(type_="azure-native:containerservice/v20210301:AgentPool"), pulumi.Alias(type_="azure-nextgen:containerservice/v20210301:AgentPool")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AgentPool, __self__).__init__(
'azure-native:containerservice/v20200701:AgentPool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AgentPool':
"""
Get an existing AgentPool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AgentPoolArgs.__new__(AgentPoolArgs)
__props__.__dict__["availability_zones"] = None
__props__.__dict__["count"] = None
__props__.__dict__["enable_auto_scaling"] = None
__props__.__dict__["enable_node_public_ip"] = None
__props__.__dict__["max_count"] = None
__props__.__dict__["max_pods"] = None
__props__.__dict__["min_count"] = None
__props__.__dict__["mode"] = None
__props__.__dict__["name"] = None
__props__.__dict__["node_image_version"] = None
__props__.__dict__["node_labels"] = None
__props__.__dict__["node_taints"] = None
__props__.__dict__["orchestrator_version"] = None
__props__.__dict__["os_disk_size_gb"] = None
__props__.__dict__["os_type"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["proximity_placement_group_id"] = None
__props__.__dict__["scale_set_eviction_policy"] = None
__props__.__dict__["scale_set_priority"] = None
__props__.__dict__["spot_max_price"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["upgrade_settings"] = None
__props__.__dict__["vm_size"] = None
__props__.__dict__["vnet_subnet_id"] = None
return AgentPool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="availabilityZones")
def availability_zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Availability zones for nodes. Must use VirtualMachineScaleSets AgentPoolType.
"""
return pulumi.get(self, "availability_zones")
@property
@pulumi.getter
def count(self) -> pulumi.Output[Optional[int]]:
"""
Number of agents (VMs) to host docker containers. Allowed values must be in the range of 0 to 100 (inclusive) for user pools and in the range of 1 to 100 (inclusive) for system pools. The default value is 1.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="enableAutoScaling")
def enable_auto_scaling(self) -> pulumi.Output[Optional[bool]]:
"""
Whether to enable auto-scaler
"""
return pulumi.get(self, "enable_auto_scaling")
@property
@pulumi.getter(name="enableNodePublicIP")
def enable_node_public_ip(self) -> pulumi.Output[Optional[bool]]:
"""
Enable public IP for nodes
"""
return pulumi.get(self, "enable_node_public_ip")
@property
@pulumi.getter(name="maxCount")
def max_count(self) -> pulumi.Output[Optional[int]]:
"""
Maximum number of nodes for auto-scaling
"""
return pulumi.get(self, "max_count")
@property
@pulumi.getter(name="maxPods")
def max_pods(self) -> pulumi.Output[Optional[int]]:
"""
Maximum number of pods that can run on a node.
"""
return pulumi.get(self, "max_pods")
@property
@pulumi.getter(name="minCount")
def min_count(self) -> pulumi.Output[Optional[int]]:
"""
Minimum number of nodes for auto-scaling
"""
return pulumi.get(self, "min_count")
@property
@pulumi.getter
def mode(self) -> pulumi.Output[Optional[str]]:
"""
AgentPoolMode represents mode of an agent pool
"""
return pulumi.get(self, "mode")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nodeImageVersion")
def node_image_version(self) -> pulumi.Output[str]:
"""
Version of node image
"""
return pulumi.get(self, "node_image_version")
@property
@pulumi.getter(name="nodeLabels")
def node_labels(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Agent pool node labels to be persisted across all nodes in agent pool.
"""
return pulumi.get(self, "node_labels")
@property
@pulumi.getter(name="nodeTaints")
def node_taints(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Taints added to new nodes during node pool create and scale. For example, key=value:NoSchedule.
"""
return pulumi.get(self, "node_taints")
@property
@pulumi.getter(name="orchestratorVersion")
def orchestrator_version(self) -> pulumi.Output[Optional[str]]:
"""
Version of orchestrator specified when creating the managed cluster.
"""
return pulumi.get(self, "orchestrator_version")
@property
@pulumi.getter(name="osDiskSizeGB")
def os_disk_size_gb(self) -> pulumi.Output[Optional[int]]:
"""
OS Disk Size in GB to be used to specify the disk size for every machine in this master/agent pool. If you specify 0, it will apply the default osDisk size according to the vmSize specified.
"""
return pulumi.get(self, "os_disk_size_gb")
@property
@pulumi.getter(name="osType")
def os_type(self) -> pulumi.Output[Optional[str]]:
"""
OsType to be used to specify os type. Choose from Linux and Windows. Default to Linux.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroupID")
def proximity_placement_group_id(self) -> pulumi.Output[Optional[str]]:
"""
The ID for Proximity Placement Group.
"""
return pulumi.get(self, "proximity_placement_group_id")
@property
@pulumi.getter(name="scaleSetEvictionPolicy")
def scale_set_eviction_policy(self) -> pulumi.Output[Optional[str]]:
"""
ScaleSetEvictionPolicy to be used to specify eviction policy for Spot virtual machine scale set. Default to Delete.
"""
return pulumi.get(self, "scale_set_eviction_policy")
@property
@pulumi.getter(name="scaleSetPriority")
def scale_set_priority(self) -> pulumi.Output[Optional[str]]:
"""
ScaleSetPriority to be used to specify virtual machine scale set priority. Default to regular.
"""
return pulumi.get(self, "scale_set_priority")
@property
@pulumi.getter(name="spotMaxPrice")
def spot_max_price(self) -> pulumi.Output[Optional[float]]:
"""
SpotMaxPrice to be used to specify the maximum price you are willing to pay in US Dollars. Possible values are any decimal value greater than zero or -1 which indicates default price to be up-to on-demand.
"""
return pulumi.get(self, "spot_max_price")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Agent pool tags to be persisted on the agent pool virtual machine scale set.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
AgentPoolType represents types of an agent pool
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="upgradeSettings")
def upgrade_settings(self) -> pulumi.Output[Optional['outputs.AgentPoolUpgradeSettingsResponse']]:
"""
Settings for upgrading the agentpool
"""
return pulumi.get(self, "upgrade_settings")
@property
@pulumi.getter(name="vmSize")
def vm_size(self) -> pulumi.Output[Optional[str]]:
"""
Size of agent VMs.
"""
return pulumi.get(self, "vm_size")
@property
@pulumi.getter(name="vnetSubnetID")
def vnet_subnet_id(self) -> pulumi.Output[Optional[str]]:
"""
VNet SubnetID specifies the VNet's subnet identifier.
"""
return pulumi.get(self, "vnet_subnet_id")
| [
"[email protected]"
] | |
4b6a6d58de4e10eca11deabb352f84f61028a2b2 | 5fb3db282628d15567c11e8f99abb5259fb30c24 | /codemod/base.py | bf80a9f5e1d13bf802f8abe67079eab729c680be | [
"Apache-2.0"
] | permissive | solcolin/codemod | 66c17e1f3e0c6f2117260cb2550301864bafdcf8 | df92fb09d9b5d1b5434d3f383466ad77e2965259 | refs/heads/master | 2021-01-21T18:43:25.537933 | 2015-12-21T13:43:26 | 2015-12-21T13:43:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,317 | py | #!/usr/bin/env python2
# Copyright (c) 2007-2008 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See accompanying file LICENSE.
#
# @author Justin Rosenstein
import argparse
import os
import re
import sys
import textwrap
from math import ceil
def is_extensionless(path):
"""
Returns True if path has no extension.
>>> is_extensionless("./www/test")
True
>>> is_extensionless("./www/.profile")
True
>>> is_extensionless("./www/.dir/README")
True
>>> is_extensionless("./scripts/menu.js")
False
>>> is_extensionless("./LICENSE")
True
"""
_, ext = os.path.splitext(path)
return ext == ''
def matches_extension(path, extension):
"""
Returns True if path has the given extension, or if
the last path component matches the extension.
>>> matches_extension("./www/profile.php", "php")
True
>>> matches_extension("./scripts/menu.js", "html")
False
>>> matches_extension("./LICENSE", "LICENSE")
True
"""
_, ext = os.path.splitext(path)
if ext == '':
# If there is no extension, grab the file name and
# compare it to the given extension.
return os.path.basename(path) == extension
else:
# If the is an extension, drop the leading period and
# compare it to the extension.
return ext[1:] == extension
def path_filter(extensions=None, exclude_paths=None):
"""
Returns a function (useful as the path_filter field of a Query instance)
that returns True iff the path it is given has an extension one of the
file extensions specified in `extensions`, an array of strings.
>>> map(path_filter(extensions=['js', 'php']),
... ['./profile.php', './q.jjs'])
[True, False]
>>> map(path_filter(exclude_paths=['html']),
... ['./html/x.php', './lib/y.js'])
[False, True]
>>> map(path_filter(extensions=['js', 'BUILD']),
... ['./a.js', './BUILD', './profile.php'])
[True, True, False]
"""
exclude_paths = exclude_paths or []
def the_filter(path):
if extensions:
if not any(
matches_extension(path, extension) for extension in extensions
):
return False
if exclude_paths:
for excluded in exclude_paths:
if path.startswith(
excluded
) or path.startswith('./' + excluded):
return False
return True
return the_filter
_default_path_filter = path_filter(
extensions=['php', 'phpt', 'js', 'css', 'rb', 'erb']
)
def run_interactive(query, editor=None, just_count=False, default_no=False):
"""
Asks the user about each patch suggested by the result of the query.
@param query An instance of the Query class.
@param editor Name of editor to use for manual intervention, e.g.
'vim'
or 'emacs'. If omitted/None, defaults to $EDITOR
environment variable.
@param just_count If true: don't run normally. Just print out number of
places in the codebase where the query matches.
"""
global yes_to_all # noqa
# Load start from bookmark, if appropriate.
bookmark = _load_bookmark()
if bookmark:
print 'Resume where you left off, at %s (y/n)? ' % str(bookmark),
if (_prompt(default='y') == 'y'):
query.start_position = bookmark
# Okay, enough of this foolishness of computing start and end.
# Let's ask the user about some one line diffs!
print 'Searching for first instance...'
suggestions = query.generate_patches()
if just_count:
for count, _ in enumerate(suggestions):
terminal_move_to_beginning_of_line()
print count,
sys.stdout.flush() # since print statement ends in comma
print
return
for patch in suggestions:
_save_bookmark(patch.start_position)
_ask_about_patch(patch, editor, default_no)
print 'Searching...'
_delete_bookmark()
if yes_to_all:
terminal_clear()
print (
"You MUST indicate in your code review:"
" \"codemod with 'Yes to all'\"."
"Make sure you and other people review the changes.\n\n"
"With great power, comes great responsibility."
)
def line_transformation_suggestor(line_transformation, line_filter=None):
"""
Returns a suggestor (a function that takes a list of lines and yields
patches) where suggestions are the result of line-by-line transformations.
@param line_transformation Function that, given a line, returns another
line
with which to replace the given one. If the
output line is different from the input line,
the
user will be prompted about whether to make the
change. If the output is None, this means "I
don't have a suggestion, but the user should
still be asked if zhe wants to edit the line."
@param line_filter Given a line, returns True or False. If False,
a line is ignored (as if line_transformation
returned the line itself for that line).
"""
def suggestor(lines):
for line_number, line in enumerate(lines):
if line_filter and not line_filter(line):
continue
candidate = line_transformation(line)
if candidate is None:
yield Patch(line_number)
else:
yield Patch(line_number, new_lines=[candidate])
return suggestor
def regex_suggestor(regex, substitution=None, ignore_case=False,
line_filter=None):
if isinstance(regex, str):
if ignore_case is False:
regex = re.compile(regex)
else:
regex = re.compile(regex, re.IGNORECASE)
if substitution is None:
line_transformation = lambda line: None if regex.search(line) else line
else:
line_transformation = lambda line: regex.sub(substitution, line)
return line_transformation_suggestor(line_transformation, line_filter)
def multiline_regex_suggestor(regex, substitution=None, ignore_case=False):
"""
Return a suggestor function which, given a list of lines, generates patches
to substitute matches of the given regex with (if provided) the given
substitution.
@param regex Either a regex object or a string describing a regex.
@param substitution Either None (meaning that we should flag the matches
without suggesting an alternative), or a string (using
\1 notation to backreference match groups) or a
function (that takes a match object as input).
"""
if isinstance(regex, str):
if ignore_case is False:
regex = re.compile(regex, re.DOTALL)
else:
regex = re.compile(regex, re.DOTALL | re.IGNORECASE)
if isinstance(substitution, str):
substitution_func = lambda match: match.expand(substitution)
else:
substitution_func = substitution
def suggestor(lines):
pos = 0
while True:
match = regex.search(''.join(lines), pos)
if not match:
break
start_row, start_col = _index_to_row_col(lines, match.start())
end_row, end_col = _index_to_row_col(lines, match.end() - 1)
if substitution is None:
new_lines = None
else:
# TODO: ugh, this is hacky. Clearly I need to rewrite
# this to use
# character-level patches, rather than line-level patches.
new_lines = substitution_func(match)
if new_lines is not None:
new_lines = ''.join((
lines[start_row][:start_col],
new_lines,
lines[end_row][end_col + 1:]
))
yield Patch(
start_line_number=start_row,
end_line_number=end_row + 1,
new_lines=new_lines
)
pos = match.start() + 1
return suggestor
def _index_to_row_col(lines, index):
r"""
>>> lines = ['hello\n', 'world\n']
>>> _index_to_row_col(lines, 0)
(0, 0)
>>> _index_to_row_col(lines, 7)
(1, 1)
"""
if index < 0:
raise IndexError('negative index')
current_index = 0
for line_number, line in enumerate(lines):
line_length = len(line)
if current_index + line_length > index:
return line_number, index - current_index
current_index += line_length
raise IndexError('index %d out of range' % index)
class Query(object):
"""
Represents a suggestor, along with a set of constraints on which files
should be fed to that suggestor.
>>> Query(lambda x: None, start='profile.php:20').start_position
Position('profile.php', 20)
"""
def __init__(self,
suggestor,
start=None,
end=None,
root_directory='.',
path_filter=_default_path_filter,
inc_extensionless=False):
"""
@param suggestor A function that takes a list of lines and
generates instances of Patch to suggest.
(Patches should not specify paths.)
@param start One of:
- an instance of Position
(indicating the place in the file
hierarchy at which to resume),
- a path:line_number-formatted string
representing a position,
- a string formatted like "25%"
(indicating we should start 25% of
the way through the process), or
- None (indicating that we should
start at the beginning).
@param end An indicator of the position
just *before* which
to stop exploring, using one
of the same formats
used for start (where None means
'traverse to the end of the hierarchy).
@param root_directory The path whose ancestor files
are to be explored.
@param path_filter Given a path, returns True or False.
If False,
the entire file is ignored.
@param inc_extensionless If True, will include all files without an
extension when checking
against the path_filter
"""
self.suggestor = suggestor
self._start = start
self._end = end
self.root_directory = root_directory
self.path_filter = path_filter
self.inc_extensionless = inc_extensionless
self._all_patches_cache = None
def clone(self):
import copy
return copy.copy(self)
def _get_position(self, attr_name):
attr_value = getattr(self, attr_name)
if attr_value is None:
return None
if isinstance(attr_value, str) and attr_value.endswith('%'):
attr_value = self.compute_percentile(int(attr_value[:-1]))
setattr(self, attr_name, attr_value)
return Position(attr_value)
def get_start_position(self):
return self._get_position('_start')
start_position = property(get_start_position)
def get_end_position(self):
return self._get_position('_end')
end_position = property(get_end_position)
def get_all_patches(self, dont_use_cache=False):
"""
Computes a list of all patches matching this query, though ignoreing
self.start_position and self.end_position.
@param dont_use_cache If False, and get_all_patches has been called
before, compute the list computed last time.
"""
if not dont_use_cache and self._all_patches_cache is not None:
return self._all_patches_cache
print (
'Computing full change list (since you specified a percentage)...'
),
sys.stdout.flush() # since print statement ends in comma
endless_query = self.clone()
endless_query.start_position = endless_query.end_position = None
self._all_patches_cache = list(endless_query.generate_patches())
return self._all_patches_cache
def compute_percentile(self, percentage):
"""
Returns a Position object that represents percentage%-far-of-the-way
through the larger task, as specified by this query.
@param percentage a number between 0 and 100.
"""
all_patches = self.get_all_patches()
return all_patches[
int(len(all_patches) * percentage / 100)
].start_position
def generate_patches(self):
"""
Generates a list of patches for each file underneath
self.root_directory
that satisfy the given conditions given
query conditions, where patches for
each file are suggested by self.suggestor.
"""
start_pos = self.start_position or Position(None, None)
end_pos = self.end_position or Position(None, None)
path_list = Query._walk_directory(self.root_directory)
path_list = Query._sublist(path_list, start_pos.path, end_pos.path)
path_list = (
path for path in path_list if
Query._path_looks_like_code(path) and
(self.path_filter(path)) or
(self.inc_extensionless and is_extensionless(path))
)
for path in path_list:
try:
lines = list(open(path))
except IOError:
# If we can't open the file--perhaps it's a symlink whose
# destination no loner exists--then short-circuit.
continue
for patch in self.suggestor(lines):
if path == start_pos.path:
if patch.start_line_number < start_pos.line_number:
continue # suggestion is pre-start_pos
if path == end_pos.path:
if patch.end_line_number >= end_pos.line_number:
break # suggestion is post-end_pos
old_lines = lines[
patch.start_line_number:patch.end_line_number]
if patch.new_lines is None or patch.new_lines != old_lines:
patch.path = path
yield patch
# re-open file, in case contents changed
lines[:] = list(open(path))
def run_interactive(self, **kargs):
run_interactive(self, **kargs)
@staticmethod
def _walk_directory(root_directory):
"""
Generates the paths of all files that are ancestors
of `root_directory`.
"""
paths = [os.path.join(root, name)
for root, dirs, files in os.walk(root_directory) # noqa
for name in files]
paths.sort()
return paths
@staticmethod
def _sublist(items, starting_value, ending_value=None):
"""
>>> list(Query._sublist((x*x for x in xrange(1, 100)), 16, 64))
[16, 25, 36, 49, 64]
"""
have_started = starting_value is None
for x in items:
have_started = have_started or x == starting_value
if have_started:
yield x
if ending_value is not None and x == ending_value:
break
@staticmethod
def _path_looks_like_code(path):
"""
>>> Query._path_looks_like_code('/home/jrosenstein/www/profile.php')
True
>>> Query._path_looks_like_code('./tags')
False
>>> Query._path_looks_like_code('/home/jrosenstein/www/profile.php~')
False
>>> Query._path_looks_like_code('/home/jrosenstein/www/.git/HEAD')
False
"""
return (
'/.' not in path and
path[-1] != '~' and
not path.endswith('tags') and
not path.endswith('TAGS')
)
class Position(object):
"""
>>> p1, p2 = Position('./hi.php', 20), Position('./hi.php:20')
>>> p1.path == p2.path and p1.line_number == p2.line_number
True
>>> p1
Position('./hi.php', 20)
>>> print p1
./hi.php:20
>>> Position(p1)
Position('./hi.php', 20)
"""
def __init__(self, *path_and_line_number):
"""
You can use the two parameter version, and pass a
path and line number, or
you can use the one parameter version, and
pass a $path:$line_number string,
or another instance of Position to copy.
"""
if len(path_and_line_number) == 2:
self.path, self.line_number = path_and_line_number
elif len(path_and_line_number) == 1:
arg = path_and_line_number[0]
if isinstance(arg, Position):
self.path, self.line_number = arg.path, arg.line_number
else:
try:
self.path, line_number_s = arg.split(':')
self.line_number = int(line_number_s)
except ValueError:
raise ValueError(
'inappropriately formatted Position string: %s'
% path_and_line_number[0]
)
else:
raise TypeError('Position takes 1 or 2 arguments')
def __repr__(self):
return 'Position(%s, %d)' % (repr(self.path), self.line_number)
def __str__(self):
return '%s:%d' % (self.path, self.line_number)
class Patch(object):
"""
Represents a range of a file and (optionally) a list of lines with which to
replace that range.
>>> p = Patch(2, 4, ['X', 'Y', 'Z'], 'x.php')
>>> print p.render_range()
x.php:2-3
>>> p.start_position
Position('x.php', 2)
>>> l = ['a', 'b', 'c', 'd', 'e', 'f']
>>> p.apply_to(l)
>>> l
['a', 'b', 'X', 'Y', 'Z', 'e', 'f']
"""
def __init__(self, start_line_number, end_line_number=None, new_lines=None,
path=None): # noqa
"""
Constructs a Patch object.
@param end_line_number The line number just *after* the end of
the range.
Defaults to
start_line_number + 1, i.e. a one-line
diff.
@param new_lines The set of lines with which to
replace the range
specified, or a newline-delimited string.
Omitting this means that
this "patch" doesn't actually
suggest a change.
@param path Path is optional only so that
suggestors that have
been passed a list of lines
don't have to set the
path explicitly.
(It'll get set by the suggestor's caller.)
"""
self.path = path
self.start_line_number = start_line_number
self.end_line_number = end_line_number
self.new_lines = new_lines
if self.end_line_number is None:
self.end_line_number = self.start_line_number + 1
if isinstance(self.new_lines, str):
self.new_lines = self.new_lines.splitlines(True)
def __repr__(self):
return 'Patch()' % ', '.join(map(repr, [
self.path,
self.start_line_number,
self.end_line_number,
self.new_lines
]))
def apply_to(self, lines):
if self.new_lines is None:
raise ValueError('Can\'t apply patch without suggested new lines.')
lines[self.start_line_number:self.end_line_number] = self.new_lines
def render_range(self):
path = self.path or '<unknown>'
if self.start_line_number == self.end_line_number - 1:
return '%s:%d' % (path, self.start_line_number)
else:
return '%s:%d-%d' % (
path,
self.start_line_number, self.end_line_number - 1
)
def get_start_position(self):
return Position(self.path, self.start_line_number)
start_position = property(get_start_position)
def print_patch(patch, lines_to_print, file_lines=None):
if file_lines is None:
file_lines = list(open(patch.path))
size_of_old = patch.end_line_number - patch.start_line_number
size_of_new = len(patch.new_lines) if patch.new_lines else 0
size_of_diff = size_of_old + size_of_new
size_of_context = max(0, lines_to_print - size_of_diff)
size_of_up_context = int(size_of_context / 2)
size_of_down_context = int(ceil(size_of_context / 2))
start_context_line_number = patch.start_line_number - size_of_up_context
end_context_line_number = patch.end_line_number + size_of_down_context
def print_file_line(line_number): # noqa
# Why line_number is passed here?
print (' %s' % file_lines[i]) if (
0 <= i < len(file_lines)) else '~\n',
for i in xrange(start_context_line_number, patch.start_line_number):
print_file_line(i)
for i in xrange(patch.start_line_number, patch.end_line_number):
if patch.new_lines is not None:
terminal_print('- %s' % file_lines[i], color='RED')
else:
terminal_print('* %s' % file_lines[i], color='YELLOW')
if patch.new_lines is not None:
for line in patch.new_lines:
terminal_print('+ %s' % line, color='GREEN')
for i in xrange(patch.end_line_number, end_context_line_number):
print_file_line(i)
yes_to_all = False
def _ask_about_patch(patch, editor, default_no):
global yes_to_all
default_action = 'n' if default_no else 'y'
terminal_clear()
terminal_print('%s\n' % patch.render_range(), color='WHITE')
print
lines = list(open(patch.path))
print_patch(patch, terminal_get_size()[0] - 20, lines)
print
if patch.new_lines is not None:
if not yes_to_all:
if default_no:
print ('Accept change (y = yes, n = no [default], e = edit, ' +
'A = yes to all, E = yes+edit)? '),
else:
print ('Accept change (y = yes [default], n = no, e = edit, ' +
'A = yes to all, E = yes+edit)? '),
p = _prompt('yneEA', default=default_action)
else:
p = 'y'
else:
print '(e = edit [default], n = skip line)? ',
p = _prompt('en', default='e')
if p in 'A':
yes_to_all = True
p = 'y'
if p in 'yE':
patch.apply_to(lines)
_save(patch.path, lines)
if p in 'eE':
run_editor(patch.start_position, editor)
def _prompt(letters='yn', default=None):
"""
Wait for the user to type a character (and hit Enter). If the user enters
one of the characters in `letters`, return that character. If the user
hits Enter without entering a character, and `default` is specified,
returns `default`. Otherwise, asks the user to enter a character again.
"""
while True:
try:
input_text = sys.stdin.readline().strip()
except KeyboardInterrupt:
sys.exit(0)
if input_text and input_text in letters:
return input_text
if default is not None and input_text == '':
return default
print 'Come again?'
def _save(path, lines):
file_w = open(path, 'w')
for line in lines:
file_w.write(line)
file_w.close()
def run_editor(position, editor=None):
editor = editor or os.environ.get('EDITOR') or 'vim'
os.system('%s +%d %s' % (editor, position.line_number + 1, position.path))
#
# Bookmarking functions. codemod saves a file called .codemod.bookmark to
# keep track of where you were the last time you exited in the middle of
# an interactive sesh.
#
def _save_bookmark(position):
file_w = open('.codemod.bookmark', 'w')
file_w.write(str(position))
file_w.close()
def _load_bookmark():
try:
bookmark_file = open('.codemod.bookmark')
except IOError:
return None
contents = bookmark_file.readline().strip()
bookmark_file.close()
return Position(contents)
def _delete_bookmark():
try:
os.remove('.codemod.bookmark')
except OSError:
pass # file didn't exist
#
# Functions for working with the terminal. Should probably be moved to a
# standalone library.
#
def terminal_get_size(default_size=(25, 80)):
"""
Return (number of rows, number of columns) for the terminal,
if they can be determined, or `default_size` if they can't.
"""
def ioctl_gwinsz(fd): # TABULATION FUNCTIONS
try: # Discover terminal width
import fcntl
import termios
import struct
return struct.unpack(
'hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except Exception:
return None
# try open fds
size = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
if not size:
# ...then ctty
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
size = ioctl_gwinsz(fd)
os.close(fd)
except Exception:
pass
if not size:
# env vars or finally defaults
try:
size = (os.environ.get('LINES'), os.environ.get('COLUMNS'))
except Exception:
return default_size
return map(int, size)
def terminal_clear():
"""
Like calling the `clear` UNIX command. If that fails, just prints a bunch
of newlines :-P
"""
if not _terminal_use_capability('clear'):
print '\n' * 8
def terminal_move_to_beginning_of_line():
"""
Jumps the cursor back to the beginning of the current line of text.
"""
if not _terminal_use_capability('cr'):
print
def _terminal_use_capability(capability_name):
"""
If the terminal supports the given capability, output it. Return whether
it was output.
"""
import curses
curses.setupterm()
capability = curses.tigetstr(capability_name)
if capability:
sys.stdout.write(capability)
return bool(capability)
def terminal_print(text, color):
"""Print text in the specified color, without a terminating newline."""
_terminal_set_color(color)
print text,
_terminal_restore_color()
def _terminal_set_color(color):
import curses
def color_code(set_capability, possible_colors):
try:
color_index = possible_colors.split(' ').index(color)
except ValueError:
return None
set_code = curses.tigetstr(set_capability)
if not set_code:
return None
return curses.tparm(set_code, color_index)
code = (
color_code(
'setaf', 'BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE'
) or color_code(
'setf', 'BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE'
)
)
if code:
sys.stdout.write(code)
def _terminal_restore_color():
import curses
sys.stdout.write(curses.tigetstr('sgr0'))
#
# Code to make this run as an executable from the command line.
#
def _parse_command_line():
global yes_to_all
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(r"""
codemod.py is a tool/library to assist you with large-scale
codebase refactors
that can be partially automated but still require
human oversight and
occassional intervention.
Example: Let's say you're deprecating your use
of the <font> tag. From the
command line, you might make progress by running:
codemod.py -m -d /home/jrosenstein/www --extensions php,html \
'<font *color="?(.*?)"?>(.*?)</font>' \
'<span style="color: \1;">\2</span>'
For each match of the regex, you'll be shown a colored diff,
and asked if you
want to accept the change (the replacement of
the <font> tag with a <span>
tag), reject it, or edit the line in question
in your $EDITOR of choice.
"""),
epilog=textwrap.dedent(r"""
You can also use codemod for transformations that are much
more sophisticated
than regular expression substitution. Rather than using
the command line, you
write Python code that looks like:
import codemod
codemod.Query(...).run_interactive()
See the documentation for the Query class for details.
@author Justin Rosenstein
""")
)
parser.add_argument('-m', action='store_true',
help='Have regex work over multiple lines '
'(e.g. have dot match newlines). '
'By default, codemod applies the regex one '
'line at a time.')
parser.add_argument('-d', action='store', type=str, default='.',
help='The path whose descendent files '
'are to be explored. '
'Defaults to current dir.')
parser.add_argument('-i', action='store_true',
help='Perform case-insensitive search.')
parser.add_argument('--start', action='store', type=str,
help='A path:line_number-formatted position somewhere'
' in the hierarchy from which to being exploring,'
'or a percentage (e.g. "--start 25%%") of '
'the way through to start.'
'Useful if you\'re divvying up the '
'substitution task across multiple people.')
parser.add_argument('--end', action='store', type=str,
help='A path:line_number-formatted position '
'somewhere in the hierarchy just *before* '
'which we should stop exploring, '
'or a percentage of the way through, '
'just before which to end.')
parser.add_argument('--extensions', action='store', type=str,
help='A comma-delimited list of file extensions '
'to process.')
parser.add_argument('--include-extensionless', action='store_true',
help='If set, this will check files without '
'an extension, along with any matching file '
'extensions passed in --extensions')
parser.add_argument('--exclude-paths', action='store', type=str,
help='A comma-delimited list of paths to exclude.')
parser.add_argument('--accept-all', action='store_true',
help='Automatically accept all '
'changes (use with caution).')
parser.add_argument('--default-no', action='store_true',
help='If set, this will make the default '
'option to not accept the change.')
parser.add_argument('--editor', action='store', type=str,
help='Specify an editor, e.g. "vim" or emacs". '
'If omitted, defaults to $EDITOR environment '
'variable.')
parser.add_argument('--count', action='store_true',
help='Don\'t run normally. Instead, just print '
'out number of times places in the codebase '
'where the \'query\' matches.')
parser.add_argument('match', nargs='?', action='store', type=str,
help='Regular expression to match.')
parser.add_argument('subst', nargs='?', action='store', type=str,
help='Substitution to replace with.')
arguments = parser.parse_args()
if (
arguments.extensions is None
) and (arguments.include_extensionless is False):
parser.print_usage()
sys.exit(0)
yes_to_all = arguments.accept_all
query_options = {}
query_options['suggestor'] = (
multiline_regex_suggestor if arguments.m else regex_suggestor
)(arguments.match, arguments.subst, arguments.i)
query_options['start'] = arguments.start
query_options['end'] = arguments.end
query_options['root_directory'] = arguments.d
query_options['inc_extensionless'] = arguments.include_extensionless
if arguments.extensions is not None or arguments.exclude_paths is not None:
query_options['path_filter'] = (
path_filter(extensions=arguments.extensions.split(',')
if arguments.extensions is not None else None,
exclude_paths=arguments.exclude_paths.split(',')
if arguments.exclude_paths is not None else None))
options = {}
options['query'] = Query(**query_options)
if arguments.editor is not None:
options['editor'] = arguments.editor
options['just_count'] = arguments.count
options['default_no'] = arguments.default_no
return options
def main():
options = _parse_command_line()
run_interactive(**options)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3f5aafbcf9931ad72e65de3c2d356b6fddbf2328 | 55e11b61b3230f7442dd2c8f2c16754ad50dcaf6 | /code/SLFN/training.py | 801ed20b9e22b128eae974493e8cffee9b990256 | [] | no_license | manuwhs/B-ADSNs | f70f534ebf1389f2fe51c4d46978ca1c24bbb671 | 6fe0c8456f71dbeb72b172baccdbf98caaa86d7c | refs/heads/master | 2021-04-28T03:06:26.873816 | 2018-04-18T21:54:08 | 2018-04-18T21:54:08 | 122,131,782 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,058 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 24 01:31:58 2015
@author: montoya
"""
import numpy as np
from sklearn import cross_validation
import paramClasses as paC
from time import time # For the random seed
def train (self):
# Adapt the labels so that they are correct (-1 or 0 and transform multivariate if needed)
self.Ytrain = paC.adapt_labels(self.Ytrain, mode = self.outMode)
self.Yval = paC.adapt_labels(self.Yval, mode = self.outMode )
if (self.Xtest != []): # If there is a test dataset.
self.Ytest = paC.adapt_labels(self.Ytest, mode = self.outMode )
# Values that will be stored in Exec_list for later processing
self.TrError = np.zeros((self.Nruns,1))
self.ValError = np.zeros((self.Nruns,1))
self.TstError = np.zeros((self.Nruns,1))
for r in range (self.Nruns):
self.train_CV(r = r)
# print self.TrError[r], self.ValError[r] , self.TstError[r]
def train_once (self):
# print "train_once"
# print self.D
# print self
self.init_Weights() # Initialize
# Check the training algorithm and pass it with its parameters.
# D is the dehenfasis vector, distribution of samples probabilities.
if (self.trainingAlg.trAlg == "ELM"):
self.ELM_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "BP"):
self.BP_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "BMBP"):
self.BMBP_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "ELMT"):
self.ELMT_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "LDAT"):
self.LDAT_train(self.trainingAlg.param)
if (self.trainingAlg.trAlg == "LDA"):
self.LDA_train(self.trainingAlg.param)
def train_CV (self, r):
# Trains the learner CV times using cross validation
total_Xtrain = self.Xtrain
total_Ytrain = self.Ytrain
## Get the random seed and use it
if (self.InitRandomSeed == -1): # If no seed is specified
self.RandomSeed[r] = int((time()%1 * 100000))
np.random.seed(self.RandomSeed[r])
else:
self.RandomSeed[r] = self.InitRandomSeed
np.random.seed(self.RandomSeed[r])
TrError = 0;
ValError = 0;
TstError = 0;
# print "train_CV"
# print self.CV
# print self.D
if (self.CV == 1):
# If the validation is performed with just the training set
# Then the validation set is the original self.Xval.
""" Why you may ask ?? """
# In other aggregate solutions, like Boosting, the CV is done
# over the whole structure, not layer by layer. In this cases,
# the CV of the SLFN will be 1 always and its the Boosting "train"
# the one in charge for changing the Validation set and training set.
self.train_once()
TrError += self.score(self.Xtrain, self.Ytrain)
ValError += self.score(self.Xval, self.Yval)
if (self.Xtest != []): # If there is a test dataset.
TstError += self.score(self.Xtest, self.Ytest)
if (self.CV > 1):
stkfold = cross_validation.StratifiedKFold(total_Ytrain.ravel(), n_folds = self.CV)
for train_index, val_index in stkfold:
# print train_index
self.set_Train(total_Xtrain[train_index],total_Ytrain[train_index])
self.set_Val(total_Xtrain[val_index],total_Ytrain[val_index])
self.train_once()
TrError += self.score(self.Xtrain, self.Ytrain)
ValError += self.score(self.Xval, self.Yval)
if (self.Xtest != []): # If there is a test dataset.
TstError += self.score(self.Xtest, self.Ytest)
self.TrError[r] = TrError / self.CV
self.ValError[r] = ValError / self.CV
self.TstError[r] = TstError / self.CV
self.Xtrain = total_Xtrain # Restore the original Xtrain
self.Ytrain = total_Ytrain
| [
"[email protected]"
] | |
654d7d6f7c0da9cba7eba0bd180f21513014fd75 | 684dcc0478abc504e32059efac049fe0bf3ca7e0 | /build/android/apk_operations.py | bfd5f54c05842ded53218d65485b174850d58790 | [
"BSD-3-Clause"
] | permissive | Agi-Developer/chromium | 86f751fb858b5eae535c589b26d3e8b353459dee | 8466ad258c8499e315df51d42f3b387bc327a7b6 | refs/heads/master | 2023-02-27T00:14:34.120092 | 2018-06-25T22:38:08 | 2018-06-25T22:38:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 47,414 | py | #!/usr/bin/env python
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Using colorama.Fore/Back/Style members
# pylint: disable=no-member
import argparse
import collections
import json
import logging
import os
import pipes
import posixpath
import random
import re
import shlex
import sys
import tempfile
import textwrap
import devil_chromium
from devil import devil_env
from devil.android import apk_helper
from devil.android import device_errors
from devil.android import device_utils
from devil.android import flag_changer
from devil.android.sdk import adb_wrapper
from devil.android.sdk import intent
from devil.android.sdk import version_codes
from devil.utils import run_tests_helper
with devil_env.SysPath(os.path.join(os.path.dirname(__file__), '..', '..',
'third_party', 'colorama', 'src')):
import colorama
from incremental_install import installer
from pylib import constants
from pylib.symbols import deobfuscator
from pylib.utils import simpleperf
# Matches messages only on pre-L (Dalvik) that are spammy and unimportant.
_DALVIK_IGNORE_PATTERN = re.compile('|'.join([
r'^Added shared lib',
r'^Could not find ',
r'^DexOpt:',
r'^GC_',
r'^Late-enabling CheckJNI',
r'^Link of class',
r'^No JNI_OnLoad found in',
r'^Trying to load lib',
r'^Unable to resolve superclass',
r'^VFY:',
r'^WAIT_',
]))
def _Colorize(text, style=''):
return (style
+ text
+ colorama.Style.RESET_ALL)
def _InstallApk(devices, apk, install_dict):
def install(device):
if install_dict:
installer.Install(device, install_dict, apk=apk)
else:
device.Install(apk, reinstall=True)
logging.info('Installing %sincremental apk.', '' if install_dict else 'non-')
device_utils.DeviceUtils.parallel(devices).pMap(install)
def _UninstallApk(devices, install_dict, package_name):
def uninstall(device):
if install_dict:
installer.Uninstall(device, package_name)
else:
device.Uninstall(package_name)
device_utils.DeviceUtils.parallel(devices).pMap(uninstall)
def _NormalizeProcessName(debug_process_name, package_name):
if not debug_process_name:
debug_process_name = package_name
elif debug_process_name.startswith(':'):
debug_process_name = package_name + debug_process_name
elif '.' not in debug_process_name:
debug_process_name = package_name + ':' + debug_process_name
return debug_process_name
def _LaunchUrl(devices, package_name, argv=None, command_line_flags_file=None,
url=None, apk=None, wait_for_java_debugger=False,
debug_process_name=None, nokill=None):
if argv and command_line_flags_file is None:
raise Exception('This apk does not support any flags.')
if url:
# TODO(agrieve): Launch could be changed to require only package name by
# parsing "dumpsys package" rather than relying on the apk.
if not apk:
raise Exception('Launching with URL is not supported when using '
'--package-name. Use --apk-path instead.')
view_activity = apk.GetViewActivityName()
if not view_activity:
raise Exception('APK does not support launching with URLs.')
debug_process_name = _NormalizeProcessName(debug_process_name, package_name)
def launch(device):
# --persistent is required to have Settings.Global.DEBUG_APP be set, which
# we currently use to allow reading of flags. https://crbug.com/784947
if not nokill:
cmd = ['am', 'set-debug-app', '--persistent', debug_process_name]
if wait_for_java_debugger:
cmd[-1:-1] = ['-w']
# Ignore error since it will fail if apk is not debuggable.
device.RunShellCommand(cmd, check_return=False)
# The flags are first updated with input args.
if command_line_flags_file:
changer = flag_changer.FlagChanger(device, command_line_flags_file)
flags = []
if argv:
flags = shlex.split(argv)
try:
changer.ReplaceFlags(flags)
except device_errors.AdbShellCommandFailedError:
logging.exception('Failed to set flags')
if url is None:
# Simulate app icon click if no url is present.
cmd = ['monkey', '-p', package_name, '-c',
'android.intent.category.LAUNCHER', '1']
device.RunShellCommand(cmd, check_return=True)
else:
launch_intent = intent.Intent(action='android.intent.action.VIEW',
activity=view_activity, data=url,
package=package_name)
device.StartActivity(launch_intent)
device_utils.DeviceUtils.parallel(devices).pMap(launch)
if wait_for_java_debugger:
print ('Waiting for debugger to attach to process: ' +
_Colorize(debug_process_name, colorama.Fore.YELLOW))
def _ChangeFlags(devices, argv, command_line_flags_file):
if argv is None:
_DisplayArgs(devices, command_line_flags_file)
else:
flags = shlex.split(argv)
def update(device):
changer = flag_changer.FlagChanger(device, command_line_flags_file)
changer.ReplaceFlags(flags)
device_utils.DeviceUtils.parallel(devices).pMap(update)
def _TargetCpuToTargetArch(target_cpu):
if target_cpu == 'x64':
return 'x86_64'
if target_cpu == 'mipsel':
return 'mips'
return target_cpu
def _RunGdb(device, package_name, debug_process_name, pid, output_directory,
target_cpu, extra_args, verbose):
if not pid:
debug_process_name = _NormalizeProcessName(debug_process_name, package_name)
pid = device.GetApplicationPids(debug_process_name, at_most_one=True)
if not pid:
logging.warning('App not running. Sending launch intent.')
_LaunchUrl([device], package_name)
pid = device.GetApplicationPids(debug_process_name, at_most_one=True)
if not pid:
raise Exception('Unable to find process "%s"' % debug_process_name)
gdb_script_path = os.path.dirname(__file__) + '/adb_gdb'
cmd = [
gdb_script_path,
'--package-name=%s' % package_name,
'--output-directory=%s' % output_directory,
'--adb=%s' % adb_wrapper.AdbWrapper.GetAdbPath(),
'--device=%s' % device.serial,
'--pid=%s' % pid,
# Use one lib dir per device so that changing between devices does require
# refetching the device libs.
'--pull-libs-dir=/tmp/adb-gdb-libs-%s' % device.serial,
]
# Enable verbose output of adb_gdb if it's set for this script.
if verbose:
cmd.append('--verbose')
if target_cpu:
cmd.append('--target-arch=%s' % _TargetCpuToTargetArch(target_cpu))
cmd.extend(extra_args)
logging.warning('Running: %s', ' '.join(pipes.quote(x) for x in cmd))
print _Colorize(
'All subsequent output is from adb_gdb script.', colorama.Fore.YELLOW)
os.execv(gdb_script_path, cmd)
def _PrintPerDeviceOutput(devices, results, single_line=False):
for d, result in zip(devices, results):
if not single_line and d is not devices[0]:
sys.stdout.write('\n')
sys.stdout.write(
_Colorize('{} ({}):'.format(d, d.build_description),
colorama.Fore.YELLOW))
sys.stdout.write(' ' if single_line else '\n')
yield result
def _RunMemUsage(devices, package_name, query_app=False):
cmd_args = ['dumpsys', 'meminfo']
if not query_app:
cmd_args.append('--local')
def mem_usage_helper(d):
ret = []
for process in sorted(_GetPackageProcesses(d, package_name)):
meminfo = d.RunShellCommand(cmd_args + [str(process.pid)])
ret.append((process.name, '\n'.join(meminfo)))
return ret
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(mem_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print 'No processes found.'
else:
for name, usage in sorted(result):
print _Colorize(
'==== Output of "dumpsys meminfo %s" ====' % name,
colorama.Fore.GREEN)
print usage
def _DuHelper(device, path_spec, run_as=None):
"""Runs "du -s -k |path_spec|" on |device| and returns parsed result.
Args:
device: A DeviceUtils instance.
path_spec: The list of paths to run du on. May contain shell expansions
(will not be escaped).
run_as: Package name to run as, or None to run as shell user. If not None
and app is not android:debuggable (run-as fails), then command will be
run as root.
Returns:
A dict of path->size in kb containing all paths in |path_spec| that exist on
device. Paths that do not exist are silently ignored.
"""
# Example output for: du -s -k /data/data/org.chromium.chrome/{*,.*}
# 144 /data/data/org.chromium.chrome/cache
# 8 /data/data/org.chromium.chrome/files
# <snip>
# du: .*: No such file or directory
# The -d flag works differently across android version, so use -s instead.
# Without the explicit 2>&1, stderr and stdout get combined at random :(.
cmd_str = 'du -s -k ' + path_spec + ' 2>&1'
lines = device.RunShellCommand(cmd_str, run_as=run_as, shell=True,
check_return=False)
output = '\n'.join(lines)
# run-as: Package 'com.android.chrome' is not debuggable
if output.startswith('run-as:'):
# check_return=False needed for when some paths in path_spec do not exist.
lines = device.RunShellCommand(cmd_str, as_root=True, shell=True,
check_return=False)
ret = {}
try:
for line in lines:
# du: .*: No such file or directory
if line.startswith('du:'):
continue
size, subpath = line.split(None, 1)
ret[subpath] = int(size)
return ret
except ValueError:
logging.error('du command was: %s', cmd_str)
logging.error('Failed to parse du output:\n%s', output)
raise
def _RunDiskUsage(devices, package_name, verbose):
# Measuring dex size is a bit complicated:
# https://source.android.com/devices/tech/dalvik/jit-compiler
#
# For KitKat and below:
# dumpsys package contains:
# dataDir=/data/data/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-1.apk
# resourcePath=/data/app/org.chromium.chrome-1.apk
# nativeLibraryPath=/data/app-lib/org.chromium.chrome-1
# To measure odex:
# ls -l /data/dalvik-cache/data@[email protected]@classes.dex
#
# For Android L and M (and maybe for N+ system apps):
# dumpsys package contains:
# codePath=/data/app/org.chromium.chrome-1
# resourcePath=/data/app/org.chromium.chrome-1
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-1/lib
# To measure odex:
# # Option 1:
# /data/dalvik-cache/arm/data@[email protected]@[email protected]
# /data/dalvik-cache/arm/data@[email protected]@[email protected]
# ls -l /data/dalvik-cache/profiles/org.chromium.chrome
# (these profiles all appear to be 0 bytes)
# # Option 2:
# ls -l /data/app/org.chromium.chrome-1/oat/arm/base.odex
#
# For Android N+:
# dumpsys package contains:
# dataDir=/data/user/0/org.chromium.chrome
# codePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# resourcePath=/data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==
# legacyNativeLibraryDir=/data/app/org.chromium.chrome-GUID/lib
# Instruction Set: arm
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm/base.odex[status=kOatUpToDate, compilation_f
# ilter=quicken]
# Instruction Set: arm64
# path: /data/app/org.chromium.chrome-UuCZ71IE-i5sZgHAkU49_w==/base.apk
# status: /data/.../oat/arm64/base.odex[status=..., compilation_filter=q
# uicken]
# To measure odex:
# ls -l /data/app/.../oat/arm/base.odex
# ls -l /data/app/.../oat/arm/base.vdex (optional)
# To measure the correct odex size:
# cmd package compile -m speed org.chromium.chrome # For webview
# cmd package compile -m speed-profile org.chromium.chrome # For others
def disk_usage_helper(d):
package_output = '\n'.join(d.RunShellCommand(
['dumpsys', 'package', package_name], check_return=True))
# Prints a message but does not return error when apk is not installed.
if 'Unable to find package:' in package_output:
return None
# Ignore system apks.
idx = package_output.find('Hidden system packages:')
if idx != -1:
package_output = package_output[:idx]
try:
data_dir = re.search(r'dataDir=(.*)', package_output).group(1)
code_path = re.search(r'codePath=(.*)', package_output).group(1)
lib_path = re.search(r'(?:legacyN|n)ativeLibrary(?:Dir|Path)=(.*)',
package_output).group(1)
except AttributeError:
raise Exception('Error parsing dumpsys output: ' + package_output)
compilation_filters = set()
# Match "compilation_filter=value", where a line break can occur at any spot
# (refer to examples above).
awful_wrapping = r'\s*'.join('compilation_filter=')
for m in re.finditer(awful_wrapping + r'([\s\S]+?)[\],]', package_output):
compilation_filters.add(re.sub(r'\s+', '', m.group(1)))
compilation_filter = ','.join(sorted(compilation_filters))
data_dir_sizes = _DuHelper(d, '%s/{*,.*}' % data_dir, run_as=package_name)
# Measure code_cache separately since it can be large.
code_cache_sizes = {}
code_cache_dir = next(
(k for k in data_dir_sizes if k.endswith('/code_cache')), None)
if code_cache_dir:
data_dir_sizes.pop(code_cache_dir)
code_cache_sizes = _DuHelper(d, '%s/{*,.*}' % code_cache_dir,
run_as=package_name)
apk_path_spec = code_path
if not apk_path_spec.endswith('.apk'):
apk_path_spec += '/*.apk'
apk_sizes = _DuHelper(d, apk_path_spec)
if lib_path.endswith('/lib'):
# Shows architecture subdirectory.
lib_sizes = _DuHelper(d, '%s/{*,.*}' % lib_path)
else:
lib_sizes = _DuHelper(d, lib_path)
# Look at all possible locations for odex files.
odex_paths = []
for apk_path in apk_sizes:
mangled_apk_path = apk_path[1:].replace('/', '@')
apk_basename = posixpath.basename(apk_path)[:-4]
for ext in ('dex', 'odex', 'vdex', 'art'):
# Easier to check all architectures than to determine active ones.
for arch in ('arm', 'arm64', 'x86', 'x86_64', 'mips', 'mips64'):
odex_paths.append(
'%s/oat/%s/%s.%s' % (code_path, arch, apk_basename, ext))
# No app could possibly have more than 6 dex files.
for suffix in ('', '2', '3', '4', '5'):
odex_paths.append('/data/dalvik-cache/%s/%s@classes%s.%s' % (
arch, mangled_apk_path, suffix, ext))
# This path does not have |arch|, so don't repeat it for every arch.
if arch == 'arm':
odex_paths.append('/data/dalvik-cache/%s@classes%s.dex' % (
mangled_apk_path, suffix))
odex_sizes = _DuHelper(d, ' '.join(pipes.quote(p) for p in odex_paths))
return (data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter)
def print_sizes(desc, sizes):
print '%s: %dkb' % (desc, sum(sizes.itervalues()))
if verbose:
for path, size in sorted(sizes.iteritems()):
print ' %s: %skb' % (path, size)
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_results = parallel_devices.pMap(disk_usage_helper).pGet(None)
for result in _PrintPerDeviceOutput(devices, all_results):
if not result:
print 'APK is not installed.'
continue
(data_dir_sizes, code_cache_sizes, apk_sizes, lib_sizes, odex_sizes,
compilation_filter) = result
total = sum(sum(sizes.itervalues()) for sizes in result[:-1])
print_sizes('Apk', apk_sizes)
print_sizes('App Data (non-code cache)', data_dir_sizes)
print_sizes('App Data (code cache)', code_cache_sizes)
print_sizes('Native Libs', lib_sizes)
show_warning = compilation_filter and 'speed' not in compilation_filter
compilation_filter = compilation_filter or 'n/a'
print_sizes('odex (compilation_filter=%s)' % compilation_filter, odex_sizes)
if show_warning:
logging.warning('For a more realistic odex size, run:')
logging.warning(' %s compile-dex [speed|speed-profile]', sys.argv[0])
print 'Total: %skb (%.1fmb)' % (total, total / 1024.0)
class _LogcatProcessor(object):
ParsedLine = collections.namedtuple(
'ParsedLine',
['date', 'invokation_time', 'pid', 'tid', 'priority', 'tag', 'message'])
def __init__(self, device, package_name, deobfuscate=None, verbose=False):
self._device = device
self._package_name = package_name
self._verbose = verbose
self._deobfuscator = deobfuscate
self._primary_pid = None
self._my_pids = set()
self._seen_pids = set()
self._UpdateMyPids()
def _UpdateMyPids(self):
# We intentionally do not clear self._my_pids to make sure that the
# ProcessLine method below also includes lines from processes which may
# have already exited.
self._primary_pid = None
for process in _GetPackageProcesses(self._device, self._package_name):
# We take only the first "main" process found in order to account for
# possibly forked() processes.
if ':' not in process.name and self._primary_pid is None:
self._primary_pid = process.pid
self._my_pids.add(process.pid)
def _GetPidStyle(self, pid, dim=False):
if pid == self._primary_pid:
return colorama.Fore.WHITE
elif pid in self._my_pids:
# TODO(wnwen): Use one separate persistent color per process, pop LRU
return colorama.Fore.YELLOW
elif dim:
return colorama.Style.DIM
return ''
def _GetPriorityStyle(self, priority, dim=False):
# pylint:disable=no-self-use
if dim:
return ''
style = ''
if priority == 'E' or priority == 'F':
style = colorama.Back.RED
elif priority == 'W':
style = colorama.Back.YELLOW
elif priority == 'I':
style = colorama.Back.GREEN
elif priority == 'D':
style = colorama.Back.BLUE
return style + colorama.Fore.BLACK
def _ParseLine(self, line):
tokens = line.split(None, 6)
date = tokens[0]
invokation_time = tokens[1]
pid = int(tokens[2])
tid = int(tokens[3])
priority = tokens[4]
tag = tokens[5]
if len(tokens) > 6:
original_message = tokens[6]
else: # Empty log message
original_message = ''
# Example:
# 09-19 06:35:51.113 9060 9154 W GCoreFlp: No location...
# 09-19 06:01:26.174 9060 10617 I Auth : [ReflectiveChannelBinder]...
# Parsing "GCoreFlp:" vs "Auth :", we only want tag to contain the word,
# and we don't want to keep the colon for the message.
if tag[-1] == ':':
tag = tag[:-1]
else:
original_message = original_message[2:]
return self.ParsedLine(
date, invokation_time, pid, tid, priority, tag, original_message)
def _PrintParsedLine(self, parsed_line, dim=False):
tid_style = ''
# Make the main thread bright.
if not dim and parsed_line.pid == parsed_line.tid:
tid_style = colorama.Style.BRIGHT
pid_style = self._GetPidStyle(parsed_line.pid, dim)
# We have to pad before adding color as that changes the width of the tag.
pid_str = _Colorize('{:5}'.format(parsed_line.pid), pid_style)
tid_str = _Colorize('{:5}'.format(parsed_line.tid), tid_style)
tag = _Colorize('{:8}'.format(parsed_line.tag),
pid_style + ('' if dim else colorama.Style.BRIGHT))
priority = _Colorize(parsed_line.priority,
self._GetPriorityStyle(parsed_line.priority))
messages = [parsed_line.message]
if self._deobfuscator:
messages = self._deobfuscator.TransformLines(messages)
for message in messages:
message = _Colorize(message, pid_style)
sys.stdout.write('{} {} {} {} {} {}: {}\n'.format(
parsed_line.date, parsed_line.invokation_time, pid_str, tid_str,
priority, tag, message))
def ProcessLine(self, line, fast=False):
if not line or line.startswith('------'):
return
log = self._ParseLine(line)
if log.pid not in self._seen_pids:
self._seen_pids.add(log.pid)
if not fast:
self._UpdateMyPids()
owned_pid = log.pid in self._my_pids
if fast and not owned_pid:
return
if owned_pid and not self._verbose and log.tag == 'dalvikvm':
if _DALVIK_IGNORE_PATTERN.match(log.message):
return
if owned_pid or self._verbose or (
log.priority == 'F' or # Java crash dump
log.tag == 'ActivityManager' or # Android system
log.tag == 'DEBUG'): # Native crash dump
self._PrintParsedLine(log, not owned_pid)
def _RunLogcat(device, package_name, mapping_path, verbose):
deobfuscate = None
if mapping_path:
try:
deobfuscate = deobfuscator.Deobfuscator(mapping_path)
except OSError:
sys.stderr.write('Error executing "bin/java_deobfuscate". '
'Did you forget to build it?\n')
sys.exit(1)
try:
logcat_processor = _LogcatProcessor(
device, package_name, deobfuscate, verbose)
nonce = 'apk_wrappers.py nonce={}'.format(random.random())
device.RunShellCommand(['log', nonce])
fast = True
for line in device.adb.Logcat(logcat_format='threadtime'):
try:
logcat_processor.ProcessLine(line, fast)
except:
sys.stderr.write('Failed to process line: ' + line)
raise
if fast and nonce in line:
fast = False
except KeyboardInterrupt:
pass # Don't show stack trace upon Ctrl-C
finally:
if mapping_path:
deobfuscate.Close()
def _GetPackageProcesses(device, package_name):
return [
p for p in device.ListProcesses(package_name)
if p.name == package_name or p.name.startswith(package_name + ':')]
def _RunPs(devices, package_name):
parallel_devices = device_utils.DeviceUtils.parallel(devices)
all_processes = parallel_devices.pMap(
lambda d: _GetPackageProcesses(d, package_name)).pGet(None)
for processes in _PrintPerDeviceOutput(devices, all_processes):
if not processes:
print 'No processes found.'
else:
proc_map = collections.defaultdict(list)
for p in processes:
proc_map[p.name].append(str(p.pid))
for name, pids in sorted(proc_map.items()):
print name, ','.join(pids)
def _RunShell(devices, package_name, cmd):
if cmd:
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(
cmd, run_as=package_name).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print line
else:
adb_path = adb_wrapper.AdbWrapper.GetAdbPath()
cmd = [adb_path, '-s', devices[0].serial, 'shell']
# Pre-N devices do not support -t flag.
if devices[0].build_version_sdk >= version_codes.NOUGAT:
cmd += ['-t', 'run-as', package_name]
else:
print 'Upon entering the shell, run:'
print 'run-as', package_name
print
os.execv(adb_path, cmd)
def _RunCompileDex(devices, package_name, compilation_filter):
cmd = ['cmd', 'package', 'compile', '-f', '-m', compilation_filter,
package_name]
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.RunShellCommand(cmd).pGet(None)
for output in _PrintPerDeviceOutput(devices, outputs):
for line in output:
print line
def _RunProfile(device, package_name, host_build_directory, pprof_out_path,
process_specifier, thread_specifier, extra_args):
simpleperf.PrepareDevice(device)
device_simpleperf_path = simpleperf.InstallSimpleperf(device, package_name)
with tempfile.NamedTemporaryFile() as fh:
host_simpleperf_out_path = fh.name
with simpleperf.RunSimpleperf(device, device_simpleperf_path, package_name,
process_specifier, thread_specifier,
extra_args, host_simpleperf_out_path):
sys.stdout.write('Profiler is running; press Enter to stop...')
sys.stdin.read(1)
sys.stdout.write('Post-processing data...')
sys.stdout.flush()
simpleperf.ConvertSimpleperfToPprof(host_simpleperf_out_path,
host_build_directory, pprof_out_path)
print textwrap.dedent("""
Profile data written to %(s)s.
To view profile as a call graph in browser:
pprof -web %(s)s
To print the hottest methods:
pprof -top %(s)s
pprof has many useful customization options; `pprof --help` for details.
""" % {'s': pprof_out_path})
def _GenerateAvailableDevicesMessage(devices):
devices_obj = device_utils.DeviceUtils.parallel(devices)
descriptions = devices_obj.pMap(lambda d: d.build_description).pGet(None)
msg = 'Available devices:\n'
for d, desc in zip(devices, descriptions):
msg += ' %s (%s)\n' % (d, desc)
return msg
# TODO(agrieve):add "--all" in the MultipleDevicesError message and use it here.
def _GenerateMissingAllFlagMessage(devices):
return ('More than one device available. Use --all to select all devices, ' +
'or use --device to select a device by serial.\n\n' +
_GenerateAvailableDevicesMessage(devices))
def _DisplayArgs(devices, command_line_flags_file):
def flags_helper(d):
changer = flag_changer.FlagChanger(d, command_line_flags_file)
return changer.GetCurrentFlags()
parallel_devices = device_utils.DeviceUtils.parallel(devices)
outputs = parallel_devices.pMap(flags_helper).pGet(None)
print 'Existing flags per-device (via /data/local/tmp/{}):'.format(
command_line_flags_file)
for flags in _PrintPerDeviceOutput(devices, outputs, single_line=True):
quoted_flags = ' '.join(pipes.quote(f) for f in flags)
print quoted_flags or 'No flags set.'
def _DeviceCachePath(device, output_directory):
file_name = 'device_cache_%s.json' % device.serial
return os.path.join(output_directory, file_name)
def _LoadDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
if os.path.exists(cache_path):
logging.debug('Using device cache: %s', cache_path)
with open(cache_path) as f:
d.LoadCacheData(f.read())
# Delete the cached file so that any exceptions cause it to be cleared.
os.unlink(cache_path)
else:
logging.debug('No cache present for device: %s', d)
def _SaveDeviceCaches(devices, output_directory):
if not output_directory:
return
for d in devices:
cache_path = _DeviceCachePath(d, output_directory)
with open(cache_path, 'w') as f:
f.write(d.DumpCacheData())
logging.info('Wrote device cache: %s', cache_path)
class _Command(object):
name = None
description = None
long_description = None
needs_package_name = False
needs_output_directory = False
needs_apk_path = False
supports_incremental = False
accepts_command_line_flags = False
accepts_args = False
all_devices_by_default = False
calls_exec = False
supports_multiple_devices = True
def __init__(self, from_wrapper_script):
self._parser = None
self._from_wrapper_script = from_wrapper_script
self.args = None
self.apk_helper = None
self.install_dict = None
self.devices = None
# Do not support incremental install outside the context of wrapper scripts.
if not from_wrapper_script:
self.supports_incremental = False
def _RegisterExtraArgs(self, subp):
pass
def RegisterArgs(self, parser):
subp = parser.add_parser(
self.name, help=self.description,
description=self.long_description or self.description,
formatter_class=argparse.RawDescriptionHelpFormatter)
self._parser = subp
subp.set_defaults(command=self)
subp.add_argument('--all',
action='store_true',
default=self.all_devices_by_default,
help='Operate on all connected devices.',)
subp.add_argument('-d',
'--device',
action='append',
default=[],
dest='devices',
help='Target device for script to work on. Enter '
'multiple times for multiple devices.')
subp.add_argument('-v',
'--verbose',
action='count',
default=0,
dest='verbose_count',
help='Verbose level (multiple times for more)')
group = subp.add_argument_group('%s arguments' % self.name)
if self.needs_package_name:
# Always gleaned from apk when using wrapper scripts.
group.add_argument('--package-name',
help=argparse.SUPPRESS if self._from_wrapper_script else (
"App's package name."))
if self.needs_apk_path or self.needs_package_name:
# Adding this argument to the subparser would override the set_defaults()
# value set by on the parent parser (even if None).
if not self._from_wrapper_script:
group.add_argument('--apk-path',
required=self.needs_apk_path,
help='Path to .apk')
if self.supports_incremental:
group.add_argument('--incremental',
action='store_true',
default=False,
help='Always install an incremental apk.')
group.add_argument('--non-incremental',
action='store_true',
default=False,
help='Always install a non-incremental apk.')
# accepts_command_line_flags and accepts_args are mutually exclusive.
# argparse will throw if they are both set.
if self.accepts_command_line_flags:
group.add_argument(
'--args', help='Command-line flags. Use = to assign args.')
if self.accepts_args:
group.add_argument(
'--args', help='Extra arguments. Use = to assign args')
if not self._from_wrapper_script and self.accepts_command_line_flags:
# Provided by wrapper scripts.
group.add_argument(
'--command-line-flags-file',
help='Name of the command-line flags file')
self._RegisterExtraArgs(group)
def ProcessArgs(self, args):
devices = device_utils.DeviceUtils.HealthyDevices(
device_arg=args.devices,
enable_device_files_cache=bool(args.output_directory),
default_retries=0)
self.args = args
self.devices = devices
# TODO(agrieve): Device cache should not depend on output directory.
# Maybe put int /tmp?
_LoadDeviceCaches(devices, args.output_directory)
# Ensure these keys always exist. They are set by wrapper scripts, but not
# always added when not using wrapper scripts.
args.__dict__.setdefault('apk_path', None)
args.__dict__.setdefault('incremental_json', None)
try:
if len(devices) > 1:
if not self.supports_multiple_devices:
self._parser.error(device_errors.MultipleDevicesError(devices))
if not args.all and not args.devices:
self._parser.error(_GenerateMissingAllFlagMessage(devices))
if self.supports_incremental:
if args.incremental_json:
with open(args.incremental_json) as f:
install_dict = json.load(f)
apk_path = os.path.join(args.output_directory,
install_dict['apk_path'])
incremental_apk_exists = os.path.exists(apk_path)
if args.incremental and args.non_incremental:
self._parser.error('Must use only one of --incremental and '
'--non-incremental')
elif args.non_incremental:
if not args.apk_path:
self._parser.error('Apk has not been built.')
args.incremental_json = None
elif args.incremental:
if not (args.incremental_json and incremental_apk_exists):
self._parser.error('Incremental apk has not been built.')
args.apk_path = None
if args.apk_path and args.incremental_json and incremental_apk_exists:
self._parser.error('Both incremental and non-incremental apks exist. '
'Select using --incremental or --non-incremental')
if self.needs_apk_path or args.apk_path or args.incremental_json:
if args.incremental_json:
if incremental_apk_exists:
self.install_dict = install_dict
self.apk_helper = apk_helper.ToHelper(
os.path.join(args.output_directory,
self.install_dict['apk_path']))
if not self.apk_helper and args.apk_path:
self.apk_helper = apk_helper.ToHelper(args.apk_path)
if not self.apk_helper:
self._parser.error(
'Neither incremental nor non-incremental apk is built.')
if self.needs_package_name and not args.package_name:
if self.apk_helper:
args.package_name = self.apk_helper.GetPackageName()
elif self._from_wrapper_script:
self._parser.error(
'Neither incremental nor non-incremental apk is built.')
else:
self._parser.error('One of --package-name or --apk-path is required.')
# Save cache now if command will not get a chance to afterwards.
if self.calls_exec:
_SaveDeviceCaches(devices, args.output_directory)
except:
_SaveDeviceCaches(devices, args.output_directory)
raise
class _DevicesCommand(_Command):
name = 'devices'
description = 'Describe attached devices.'
all_devices_by_default = True
def Run(self):
print _GenerateAvailableDevicesMessage(self.devices)
class _InstallCommand(_Command):
name = 'install'
description = 'Installs the APK to one or more devices.'
needs_apk_path = True
supports_incremental = True
def Run(self):
_InstallApk(self.devices, self.apk_helper, self.install_dict)
class _UninstallCommand(_Command):
name = 'uninstall'
description = 'Removes the APK to one or more devices.'
needs_package_name = True
def Run(self):
_UninstallApk(self.devices, self.install_dict, self.args.package_name)
class _LaunchCommand(_Command):
name = 'launch'
description = ('Sends a launch intent for the APK after first writing the '
'command-line flags file.')
needs_package_name = True
accepts_command_line_flags = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument('-w', '--wait-for-java-debugger', action='store_true',
help='Pause execution until debugger attaches. Applies '
'only to the main process. To have renderers wait, '
'use --args="--renderer-wait-for-java-debugger"')
group.add_argument('--debug-process-name',
help='Name of the process to debug. '
'E.g. "privileged_process0", or "foo.bar:baz"')
group.add_argument('--nokill', action='store_true',
help='Do not set the debug-app, nor set command-line '
'flags. Useful to load a URL without having the '
'app restart.')
group.add_argument('url', nargs='?', help='A URL to launch with.')
def Run(self):
_LaunchUrl(self.devices, self.args.package_name, argv=self.args.args,
command_line_flags_file=self.args.command_line_flags_file,
url=self.args.url, apk=self.apk_helper,
wait_for_java_debugger=self.args.wait_for_java_debugger,
debug_process_name=self.args.debug_process_name,
nokill=self.args.nokill)
class _StopCommand(_Command):
name = 'stop'
description = 'Force-stops the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ForceStop(
self.args.package_name)
class _ClearDataCommand(_Command):
name = 'clear-data'
descriptions = 'Clears all app data.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
device_utils.DeviceUtils.parallel(self.devices).ClearApplicationState(
self.args.package_name)
class _ArgvCommand(_Command):
name = 'argv'
description = 'Display and optionally update command-line flags file.'
needs_package_name = True
accepts_command_line_flags = True
all_devices_by_default = True
def Run(self):
_ChangeFlags(self.devices, self.args.args,
self.args.command_line_flags_file)
class _GdbCommand(_Command):
name = 'gdb'
description = 'Runs //build/android/adb_gdb with apk-specific args.'
long_description = description + """
To attach to a process other than the APK's main process, use --pid=1234.
To list all PIDs, use the "ps" command.
If no apk process is currently running, sends a launch intent.
"""
needs_package_name = True
needs_output_directory = True
accepts_args = True
calls_exec = True
supports_multiple_devices = False
def Run(self):
extra_args = shlex.split(self.args.args or '')
_RunGdb(self.devices[0], self.args.package_name,
self.args.debug_process_name, self.args.pid,
self.args.output_directory, self.args.target_cpu, extra_args,
bool(self.args.verbose_count))
def _RegisterExtraArgs(self, group):
pid_group = group.add_mutually_exclusive_group()
pid_group.add_argument('--debug-process-name',
help='Name of the process to attach to. '
'E.g. "privileged_process0", or "foo.bar:baz"')
pid_group.add_argument('--pid',
help='The process ID to attach to. Defaults to '
'the main process for the package.')
class _LogcatCommand(_Command):
name = 'logcat'
description = 'Runs "adb logcat" with filters relevant the current APK.'
long_description = description + """
"Relevant filters" means:
* Log messages from processes belonging to the apk,
* Plus log messages from log tags: ActivityManager|DEBUG,
* Plus fatal logs from any process,
* Minus spamy dalvikvm logs (for pre-L devices).
Colors:
* Primary process is white
* Other processes (gpu, renderer) are yellow
* Non-apk processes are grey
* UI thread has a bolded Thread-ID
Java stack traces are detected and deobfuscated (for release builds).
To disable filtering, (but keep coloring), use --verbose.
"""
needs_package_name = True
supports_multiple_devices = False
def Run(self):
mapping = self.args.proguard_mapping_path
if self.args.no_deobfuscate:
mapping = None
_RunLogcat(self.devices[0], self.args.package_name, mapping,
bool(self.args.verbose_count))
def _RegisterExtraArgs(self, group):
if self._from_wrapper_script:
group.add_argument('--no-deobfuscate', action='store_true',
help='Disables ProGuard deobfuscation of logcat.')
else:
group.set_defaults(no_deobfuscate=False)
group.add_argument('--proguard-mapping-path',
help='Path to ProGuard map (enables deobfuscation)')
class _PsCommand(_Command):
name = 'ps'
description = 'Show PIDs of any APK processes currently running.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunPs(self.devices, self.args.package_name)
class _DiskUsageCommand(_Command):
name = 'disk-usage'
description = 'Show how much device storage is being consumed by the app.'
needs_package_name = True
all_devices_by_default = True
def Run(self):
_RunDiskUsage(self.devices, self.args.package_name,
bool(self.args.verbose_count))
class _MemUsageCommand(_Command):
name = 'mem-usage'
description = 'Show memory usage of currently running APK processes.'
needs_package_name = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument('--query-app', action='store_true',
help='Do not add --local to "dumpsys meminfo". This will output '
'additional metrics (e.g. Context count), but also cause memory '
'to be used in order to gather the metrics.')
def Run(self):
_RunMemUsage(self.devices, self.args.package_name,
query_app=self.args.query_app)
class _ShellCommand(_Command):
name = 'shell'
description = ('Same as "adb shell <command>", but runs as the apk\'s uid '
'(via run-as). Useful for inspecting the app\'s data '
'directory.')
needs_package_name = True
@property
def calls_exec(self):
return not self.args.cmd
@property
def supports_multiple_devices(self):
return not self.args.cmd
def _RegisterExtraArgs(self, group):
group.add_argument(
'cmd', nargs=argparse.REMAINDER, help='Command to run.')
def Run(self):
_RunShell(self.devices, self.args.package_name, self.args.cmd)
class _CompileDexCommand(_Command):
name = 'compile-dex'
description = ('Applicable only for Android N+. Forces .odex files to be '
'compiled with the given compilation filter. To see existing '
'filter, use "disk-usage" command.')
needs_package_name = True
all_devices_by_default = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'compilation_filter',
choices=['verify', 'quicken', 'space-profile', 'space',
'speed-profile', 'speed'],
help='For WebView/Monochrome, use "speed". For other apks, use '
'"speed-profile".')
def Run(self):
_RunCompileDex(self.devices, self.args.package_name,
self.args.compilation_filter)
class _ProfileCommand(_Command):
name = 'profile'
description = ('Run the simpleperf sampling CPU profiler on the currently-'
'running APK. If --args is used, the extra arguments will be '
'passed on to simpleperf; otherwise, the following default '
'arguments are used: -g -f 1000 -o /data/local/tmp/perf.data')
needs_package_name = True
needs_output_directory = True
supports_multiple_devices = False
accepts_args = True
def _RegisterExtraArgs(self, group):
group.add_argument(
'--profile-process', default='browser',
help=('Which process to profile. This may be a process name or pid '
'such as you would get from running `%s ps`; or '
'it can be one of (browser, renderer, gpu).' % sys.argv[0]))
group.add_argument(
'--profile-thread', default=None,
help=('(Optional) Profile only a single thread. This may be either a '
'thread ID such as you would get by running `adb shell ps -t` '
'(pre-Oreo) or `adb shell ps -e -T` (Oreo and later); or it may '
'be one of (io, compositor, main, render), in which case '
'--profile-process is also required. (Note that "render" thread '
'refers to a thread in the browser process that manages a '
'renderer; to profile the main thread of the renderer process, '
'use --profile-thread=main).'))
group.add_argument('--profile-output', default='profile.pb',
help='Output file for profiling data')
def Run(self):
extra_args = shlex.split(self.args.args or '')
_RunProfile(self.devices[0], self.args.package_name,
self.args.output_directory, self.args.profile_output,
self.args.profile_process, self.args.profile_thread,
extra_args)
class _RunCommand(_InstallCommand, _LaunchCommand, _LogcatCommand):
name = 'run'
description = 'Install, launch, and show logcat (when targeting one device).'
all_devices_by_default = False
supports_multiple_devices = True
def _RegisterExtraArgs(self, group):
_InstallCommand._RegisterExtraArgs(self, group)
_LaunchCommand._RegisterExtraArgs(self, group)
_LogcatCommand._RegisterExtraArgs(self, group)
group.add_argument('--no-logcat', action='store_true',
help='Install and launch, but do not enter logcat.')
def Run(self):
logging.warning('Installing...')
_InstallCommand.Run(self)
logging.warning('Sending launch intent...')
_LaunchCommand.Run(self)
if len(self.devices) == 1 and not self.args.no_logcat:
logging.warning('Entering logcat...')
_LogcatCommand.Run(self)
_COMMANDS = [
_DevicesCommand,
_InstallCommand,
_UninstallCommand,
_LaunchCommand,
_StopCommand,
_ClearDataCommand,
_ArgvCommand,
_GdbCommand,
_LogcatCommand,
_PsCommand,
_DiskUsageCommand,
_MemUsageCommand,
_ShellCommand,
_CompileDexCommand,
_ProfileCommand,
_RunCommand,
]
def _ParseArgs(parser, from_wrapper_script):
subparsers = parser.add_subparsers()
commands = [clazz(from_wrapper_script) for clazz in _COMMANDS]
for command in commands:
if from_wrapper_script or not command.needs_output_directory:
command.RegisterArgs(subparsers)
# Show extended help when no command is passed.
argv = sys.argv[1:]
if not argv:
argv = ['--help']
return parser.parse_args(argv)
def _RunInternal(parser, output_directory=None):
colorama.init()
parser.set_defaults(output_directory=output_directory)
from_wrapper_script = bool(output_directory)
args = _ParseArgs(parser, from_wrapper_script)
run_tests_helper.SetLogLevel(args.verbose_count)
args.command.ProcessArgs(args)
args.command.Run()
# Incremental install depends on the cache being cleared when uninstalling.
if args.command.name != 'uninstall':
_SaveDeviceCaches(args.command.devices, output_directory)
# TODO(agrieve): Remove =None from target_cpu on or after October 2017.
# It exists only so that stale wrapper scripts continue to work.
def Run(output_directory, apk_path, incremental_json, command_line_flags_file,
target_cpu, proguard_mapping_path):
"""Entry point for generated wrapper scripts."""
constants.SetOutputDirectory(output_directory)
devil_chromium.Initialize(output_directory=output_directory)
parser = argparse.ArgumentParser()
exists_or_none = lambda p: p if p and os.path.exists(p) else None
parser.set_defaults(
command_line_flags_file=command_line_flags_file,
target_cpu=target_cpu,
apk_path=exists_or_none(apk_path),
incremental_json=exists_or_none(incremental_json),
proguard_mapping_path=proguard_mapping_path)
_RunInternal(parser, output_directory=output_directory)
def main():
devil_chromium.Initialize()
_RunInternal(argparse.ArgumentParser(), output_directory=None)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
c700458f0091d7453e741f1c4bd9c0cf550770d1 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2022_02_01_preview/models/_models_py3.py | 7d13ec5c1d65b60a1cb7e121af13f4f876fde286 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-python-cwi",
"PSF-2.0",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"Python-2.0",
"MPL-2.0",
"LicenseRef-scancode-other-copyleft",
"HPND",
"ODbL-1.0",
"GPL-3.0-only",
"ZPL-2.1",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 698,083 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._azure_machine_learning_workspaces_enums import *
class DatastoreCredentials(msrest.serialization.Model):
"""Base definition for datastore credentials.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AccountKeyDatastoreCredentials, CertificateDatastoreCredentials, KerberosKeytabCredentials, KerberosPasswordCredentials, NoneDatastoreCredentials, SasDatastoreCredentials, ServicePrincipalDatastoreCredentials.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
"""
_validation = {
'credentials_type': {'required': True},
}
_attribute_map = {
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
}
_subtype_map = {
'credentials_type': {'AccountKey': 'AccountKeyDatastoreCredentials', 'Certificate': 'CertificateDatastoreCredentials', 'KerberosKeytab': 'KerberosKeytabCredentials', 'KerberosPassword': 'KerberosPasswordCredentials', 'None': 'NoneDatastoreCredentials', 'Sas': 'SasDatastoreCredentials', 'ServicePrincipal': 'ServicePrincipalDatastoreCredentials'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(DatastoreCredentials, self).__init__(**kwargs)
self.credentials_type = None # type: Optional[str]
class AccountKeyDatastoreCredentials(DatastoreCredentials):
"""Account key datastore credentials configuration.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar secrets: Required. [Required] Storage account secrets.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.AccountKeyDatastoreSecrets
"""
_validation = {
'credentials_type': {'required': True},
'secrets': {'required': True},
}
_attribute_map = {
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
'secrets': {'key': 'secrets', 'type': 'AccountKeyDatastoreSecrets'},
}
def __init__(
self,
*,
secrets: "AccountKeyDatastoreSecrets",
**kwargs
):
"""
:keyword secrets: Required. [Required] Storage account secrets.
:paramtype secrets: ~azure.mgmt.machinelearningservices.models.AccountKeyDatastoreSecrets
"""
super(AccountKeyDatastoreCredentials, self).__init__(**kwargs)
self.credentials_type = 'AccountKey' # type: str
self.secrets = secrets
class DatastoreSecrets(msrest.serialization.Model):
"""Base definition for datastore secrets.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AccountKeyDatastoreSecrets, CertificateDatastoreSecrets, KerberosKeytabSecrets, KerberosPasswordSecrets, SasDatastoreSecrets, ServicePrincipalDatastoreSecrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
}
_subtype_map = {
'secrets_type': {'AccountKey': 'AccountKeyDatastoreSecrets', 'Certificate': 'CertificateDatastoreSecrets', 'KerberosKeytab': 'KerberosKeytabSecrets', 'KerberosPassword': 'KerberosPasswordSecrets', 'Sas': 'SasDatastoreSecrets', 'ServicePrincipal': 'ServicePrincipalDatastoreSecrets'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(DatastoreSecrets, self).__init__(**kwargs)
self.secrets_type = None # type: Optional[str]
class AccountKeyDatastoreSecrets(DatastoreSecrets):
"""Datastore account key secrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar key: Storage account key.
:vartype key: str
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
}
def __init__(
self,
*,
key: Optional[str] = None,
**kwargs
):
"""
:keyword key: Storage account key.
:paramtype key: str
"""
super(AccountKeyDatastoreSecrets, self).__init__(**kwargs)
self.secrets_type = 'AccountKey' # type: str
self.key = key
class IdentityConfiguration(msrest.serialization.Model):
"""Base definition for identity configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AmlToken, ManagedIdentity, UserIdentity.
All required parameters must be populated in order to send to Azure.
:ivar identity_type: Required. [Required] Specifies the type of identity framework.Constant
filled by server. Possible values include: "Managed", "AMLToken", "UserIdentity".
:vartype identity_type: str or
~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
"""
_validation = {
'identity_type': {'required': True},
}
_attribute_map = {
'identity_type': {'key': 'identityType', 'type': 'str'},
}
_subtype_map = {
'identity_type': {'AMLToken': 'AmlToken', 'Managed': 'ManagedIdentity', 'UserIdentity': 'UserIdentity'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(IdentityConfiguration, self).__init__(**kwargs)
self.identity_type = None # type: Optional[str]
class AmlToken(IdentityConfiguration):
"""AML Token identity configuration.
All required parameters must be populated in order to send to Azure.
:ivar identity_type: Required. [Required] Specifies the type of identity framework.Constant
filled by server. Possible values include: "Managed", "AMLToken", "UserIdentity".
:vartype identity_type: str or
~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
"""
_validation = {
'identity_type': {'required': True},
}
_attribute_map = {
'identity_type': {'key': 'identityType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AmlToken, self).__init__(**kwargs)
self.identity_type = 'AMLToken' # type: str
class ResourceBase(msrest.serialization.Model):
"""ResourceBase.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
"""
super(ResourceBase, self).__init__(**kwargs)
self.description = description
self.properties = properties
self.tags = tags
class AssetBase(ResourceBase):
"""AssetBase.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
"""
super(AssetBase, self).__init__(description=description, properties=properties, tags=tags, **kwargs)
self.is_anonymous = is_anonymous
self.is_archived = is_archived
class AssetContainer(ResourceBase):
"""AssetContainer.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar latest_version: The latest version inside this container.
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
"""
_validation = {
'latest_version': {'readonly': True},
'next_version': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'latest_version': {'key': 'latestVersion', 'type': 'str'},
'next_version': {'key': 'nextVersion', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
"""
super(AssetContainer, self).__init__(description=description, properties=properties, tags=tags, **kwargs)
self.is_archived = is_archived
self.latest_version = None
self.next_version = None
class AssetJobInput(msrest.serialization.Model):
"""Asset input type.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
"""
super(AssetJobInput, self).__init__(**kwargs)
self.mode = mode
self.uri = uri
class AssetJobOutput(msrest.serialization.Model):
"""Asset output type.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
"""
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
"""
super(AssetJobOutput, self).__init__(**kwargs)
self.mode = mode
self.uri = uri
class AssetReferenceBase(msrest.serialization.Model):
"""Base definition for asset references.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DataPathAssetReference, IdAssetReference, OutputPathAssetReference.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
}
_subtype_map = {
'reference_type': {'DataPath': 'DataPathAssetReference', 'Id': 'IdAssetReference', 'OutputPath': 'OutputPathAssetReference'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(AssetReferenceBase, self).__init__(**kwargs)
self.reference_type = None # type: Optional[str]
class ForecastHorizon(msrest.serialization.Model):
"""The desired maximum forecast horizon in units of time-series frequency.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutoForecastHorizon, CustomForecastHorizon.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Set forecast horizon value selection mode.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.ForecastHorizonMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
_subtype_map = {
'mode': {'Auto': 'AutoForecastHorizon', 'Custom': 'CustomForecastHorizon'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(ForecastHorizon, self).__init__(**kwargs)
self.mode = None # type: Optional[str]
class AutoForecastHorizon(ForecastHorizon):
"""Forecast horizon determined automatically by system.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Set forecast horizon value selection mode.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.ForecastHorizonMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AutoForecastHorizon, self).__init__(**kwargs)
self.mode = 'Auto' # type: str
class JobBaseDetails(ResourceBase):
"""Base definition for a job.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutoMLJob, CommandJob, PipelineJob, SweepJob.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar compute_id: ARM resource ID of the compute resource.
:vartype compute_id: str
:ivar display_name: Display name of job.
:vartype display_name: str
:ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:vartype experiment_name: str
:ivar identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "AutoML", "Command", "Sweep", "Pipeline".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
:ivar schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown", "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
"""
_validation = {
'job_type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'compute_id': {'key': 'computeId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'IdentityConfiguration'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'job_type': {'key': 'jobType', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'ScheduleBase'},
'services': {'key': 'services', 'type': '{JobService}'},
'status': {'key': 'status', 'type': 'str'},
}
_subtype_map = {
'job_type': {'AutoML': 'AutoMLJob', 'Command': 'CommandJob', 'Pipeline': 'PipelineJob', 'Sweep': 'SweepJob'}
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
compute_id: Optional[str] = None,
display_name: Optional[str] = None,
experiment_name: Optional[str] = "Default",
identity: Optional["IdentityConfiguration"] = None,
is_archived: Optional[bool] = False,
schedule: Optional["ScheduleBase"] = None,
services: Optional[Dict[str, "JobService"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword compute_id: ARM resource ID of the compute resource.
:paramtype compute_id: str
:keyword display_name: Display name of job.
:paramtype display_name: str
:keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:paramtype experiment_name: str
:keyword identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
"""
super(JobBaseDetails, self).__init__(description=description, properties=properties, tags=tags, **kwargs)
self.compute_id = compute_id
self.display_name = display_name
self.experiment_name = experiment_name
self.identity = identity
self.is_archived = is_archived
self.job_type = 'JobBaseDetails' # type: str
self.schedule = schedule
self.services = services
self.status = None
class AutoMLJob(JobBaseDetails):
"""AutoMLJob class.
Use this class for executing AutoML tasks like Classification/Regression etc.
See TaskType enum for all the tasks supported.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar compute_id: ARM resource ID of the compute resource.
:vartype compute_id: str
:ivar display_name: Display name of job.
:vartype display_name: str
:ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:vartype experiment_name: str
:ivar identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "AutoML", "Command", "Sweep", "Pipeline".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
:ivar schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown", "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar environment_id: The ARM resource ID of the Environment specification for the job.
This is optional value to provide, if not provided, AutoML will default this to Production
AutoML curated environment version when running the job.
:vartype environment_id: str
:ivar environment_variables: Environment variables included in the job.
:vartype environment_variables: dict[str, str]
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:ivar resources: Compute Resource configuration for the job.
:vartype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
:ivar task_details: Required. [Required] This represents scenario which can be one of
Tables/NLP/Image.
:vartype task_details: ~azure.mgmt.machinelearningservices.models.AutoMLVertical
"""
_validation = {
'job_type': {'required': True},
'status': {'readonly': True},
'task_details': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'compute_id': {'key': 'computeId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'IdentityConfiguration'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'job_type': {'key': 'jobType', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'ScheduleBase'},
'services': {'key': 'services', 'type': '{JobService}'},
'status': {'key': 'status', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'outputs': {'key': 'outputs', 'type': '{JobOutput}'},
'resources': {'key': 'resources', 'type': 'ResourceConfiguration'},
'task_details': {'key': 'taskDetails', 'type': 'AutoMLVertical'},
}
def __init__(
self,
*,
task_details: "AutoMLVertical",
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
compute_id: Optional[str] = None,
display_name: Optional[str] = None,
experiment_name: Optional[str] = "Default",
identity: Optional["IdentityConfiguration"] = None,
is_archived: Optional[bool] = False,
schedule: Optional["ScheduleBase"] = None,
services: Optional[Dict[str, "JobService"]] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
outputs: Optional[Dict[str, "JobOutput"]] = None,
resources: Optional["ResourceConfiguration"] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword compute_id: ARM resource ID of the compute resource.
:paramtype compute_id: str
:keyword display_name: Display name of job.
:paramtype display_name: str
:keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:paramtype experiment_name: str
:keyword identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:keyword environment_id: The ARM resource ID of the Environment specification for the job.
This is optional value to provide, if not provided, AutoML will default this to Production
AutoML curated environment version when running the job.
:paramtype environment_id: str
:keyword environment_variables: Environment variables included in the job.
:paramtype environment_variables: dict[str, str]
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:keyword resources: Compute Resource configuration for the job.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
:keyword task_details: Required. [Required] This represents scenario which can be one of
Tables/NLP/Image.
:paramtype task_details: ~azure.mgmt.machinelearningservices.models.AutoMLVertical
"""
super(AutoMLJob, self).__init__(description=description, properties=properties, tags=tags, compute_id=compute_id, display_name=display_name, experiment_name=experiment_name, identity=identity, is_archived=is_archived, schedule=schedule, services=services, **kwargs)
self.job_type = 'AutoML' # type: str
self.environment_id = environment_id
self.environment_variables = environment_variables
self.outputs = outputs
self.resources = resources
self.task_details = task_details
class AutoMLVertical(msrest.serialization.Model):
"""AutoML vertical class.
Base class for AutoML verticals - TableVertical/ImageVertical/NLPVertical.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Classification, Forecasting, ImageClassification, ImageClassificationMultilabel, ImageInstanceSegmentation, ImageObjectDetection, Regression, TextClassification, TextClassificationMultilabel, TextNer.
All required parameters must be populated in order to send to Azure.
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
"""
_validation = {
'task_type': {'required': True},
}
_attribute_map = {
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
}
_subtype_map = {
'task_type': {'Classification': 'Classification', 'Forecasting': 'Forecasting', 'ImageClassification': 'ImageClassification', 'ImageClassificationMultilabel': 'ImageClassificationMultilabel', 'ImageInstanceSegmentation': 'ImageInstanceSegmentation', 'ImageObjectDetection': 'ImageObjectDetection', 'Regression': 'Regression', 'TextClassification': 'TextClassification', 'TextClassificationMultilabel': 'TextClassificationMultilabel', 'TextNER': 'TextNer'}
}
def __init__(
self,
*,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
**kwargs
):
"""
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
"""
super(AutoMLVertical, self).__init__(**kwargs)
self.log_verbosity = log_verbosity
self.task_type = None # type: Optional[str]
class NCrossValidations(msrest.serialization.Model):
"""N-Cross validations value.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutoNCrossValidations, CustomNCrossValidations.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Mode for determining N-Cross validations.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.NCrossValidationsMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
_subtype_map = {
'mode': {'Auto': 'AutoNCrossValidations', 'Custom': 'CustomNCrossValidations'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(NCrossValidations, self).__init__(**kwargs)
self.mode = None # type: Optional[str]
class AutoNCrossValidations(NCrossValidations):
"""N-Cross validations determined automatically.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Mode for determining N-Cross validations.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.NCrossValidationsMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AutoNCrossValidations, self).__init__(**kwargs)
self.mode = 'Auto' # type: str
class Seasonality(msrest.serialization.Model):
"""Forecasting seasonality.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutoSeasonality, CustomSeasonality.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Seasonality mode.Constant filled by server. Possible values
include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
_subtype_map = {
'mode': {'Auto': 'AutoSeasonality', 'Custom': 'CustomSeasonality'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(Seasonality, self).__init__(**kwargs)
self.mode = None # type: Optional[str]
class AutoSeasonality(Seasonality):
"""AutoSeasonality.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Seasonality mode.Constant filled by server. Possible values
include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AutoSeasonality, self).__init__(**kwargs)
self.mode = 'Auto' # type: str
class TargetLags(msrest.serialization.Model):
"""The number of past periods to lag from the target column.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutoTargetLags, CustomTargetLags.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Set target lags mode - Auto/Custom.Constant filled by server.
Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetLagsMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
_subtype_map = {
'mode': {'Auto': 'AutoTargetLags', 'Custom': 'CustomTargetLags'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(TargetLags, self).__init__(**kwargs)
self.mode = None # type: Optional[str]
class AutoTargetLags(TargetLags):
"""AutoTargetLags.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Set target lags mode - Auto/Custom.Constant filled by server.
Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetLagsMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AutoTargetLags, self).__init__(**kwargs)
self.mode = 'Auto' # type: str
class TargetRollingWindowSize(msrest.serialization.Model):
"""Forecasting target rolling window size.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AutoTargetRollingWindowSize, CustomTargetRollingWindowSize.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] TargetRollingWindowSiz detection mode.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSizeMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
_subtype_map = {
'mode': {'Auto': 'AutoTargetRollingWindowSize', 'Custom': 'CustomTargetRollingWindowSize'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(TargetRollingWindowSize, self).__init__(**kwargs)
self.mode = None # type: Optional[str]
class AutoTargetRollingWindowSize(TargetRollingWindowSize):
"""Target lags rolling window determined automatically.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] TargetRollingWindowSiz detection mode.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSizeMode
"""
_validation = {
'mode': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AutoTargetRollingWindowSize, self).__init__(**kwargs)
self.mode = 'Auto' # type: str
class DatastoreDetails(ResourceBase):
"""Base definition for datastore contents configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureBlobDatastore, AzureDataLakeGen1Datastore, AzureDataLakeGen2Datastore, AzureFileDatastore, HdfsDatastore.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar credentials: Required. [Required] Account credentials.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: Required. [Required] Storage type backing the datastore.Constant filled
by server. Possible values include: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2",
"AzureFile", "Hdfs".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
"""
_validation = {
'credentials': {'required': True},
'datastore_type': {'required': True},
'is_default': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'credentials': {'key': 'credentials', 'type': 'DatastoreCredentials'},
'datastore_type': {'key': 'datastoreType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
}
_subtype_map = {
'datastore_type': {'AzureBlob': 'AzureBlobDatastore', 'AzureDataLakeGen1': 'AzureDataLakeGen1Datastore', 'AzureDataLakeGen2': 'AzureDataLakeGen2Datastore', 'AzureFile': 'AzureFileDatastore', 'Hdfs': 'HdfsDatastore'}
}
def __init__(
self,
*,
credentials: "DatastoreCredentials",
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword credentials: Required. [Required] Account credentials.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
"""
super(DatastoreDetails, self).__init__(description=description, properties=properties, tags=tags, **kwargs)
self.credentials = credentials
self.datastore_type = 'DatastoreDetails' # type: str
self.is_default = None
class AzureBlobDatastore(DatastoreDetails):
"""Azure Blob datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar credentials: Required. [Required] Account credentials.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: Required. [Required] Storage type backing the datastore.Constant filled
by server. Possible values include: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2",
"AzureFile", "Hdfs".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
:ivar account_name: Storage account name.
:vartype account_name: str
:ivar container_name: Storage account container name.
:vartype container_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
:vartype endpoint: str
:ivar protocol: Protocol used to communicate with the storage account.
:vartype protocol: str
:ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:vartype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
_validation = {
'credentials': {'required': True},
'datastore_type': {'required': True},
'is_default': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'credentials': {'key': 'credentials', 'type': 'DatastoreCredentials'},
'datastore_type': {'key': 'datastoreType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'account_name': {'key': 'accountName', 'type': 'str'},
'container_name': {'key': 'containerName', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'service_data_access_auth_identity': {'key': 'serviceDataAccessAuthIdentity', 'type': 'str'},
}
def __init__(
self,
*,
credentials: "DatastoreCredentials",
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
account_name: Optional[str] = None,
container_name: Optional[str] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "ServiceDataAccessAuthIdentity"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword credentials: Required. [Required] Account credentials.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:keyword account_name: Storage account name.
:paramtype account_name: str
:keyword container_name: Storage account container name.
:paramtype container_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
:paramtype endpoint: str
:keyword protocol: Protocol used to communicate with the storage account.
:paramtype protocol: str
:keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
super(AzureBlobDatastore, self).__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
self.datastore_type = 'AzureBlob' # type: str
self.account_name = account_name
self.container_name = container_name
self.endpoint = endpoint
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
class AzureDataLakeGen1Datastore(DatastoreDetails):
"""Azure Data Lake Gen1 datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar credentials: Required. [Required] Account credentials.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: Required. [Required] Storage type backing the datastore.Constant filled
by server. Possible values include: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2",
"AzureFile", "Hdfs".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
:ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:vartype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
:ivar store_name: Required. [Required] Azure Data Lake store name.
:vartype store_name: str
"""
_validation = {
'credentials': {'required': True},
'datastore_type': {'required': True},
'is_default': {'readonly': True},
'store_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'credentials': {'key': 'credentials', 'type': 'DatastoreCredentials'},
'datastore_type': {'key': 'datastoreType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'service_data_access_auth_identity': {'key': 'serviceDataAccessAuthIdentity', 'type': 'str'},
'store_name': {'key': 'storeName', 'type': 'str'},
}
def __init__(
self,
*,
credentials: "DatastoreCredentials",
store_name: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
service_data_access_auth_identity: Optional[Union[str, "ServiceDataAccessAuthIdentity"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword credentials: Required. [Required] Account credentials.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
:keyword store_name: Required. [Required] Azure Data Lake store name.
:paramtype store_name: str
"""
super(AzureDataLakeGen1Datastore, self).__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
self.datastore_type = 'AzureDataLakeGen1' # type: str
self.service_data_access_auth_identity = service_data_access_auth_identity
self.store_name = store_name
class AzureDataLakeGen2Datastore(DatastoreDetails):
"""Azure Data Lake Gen2 datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar credentials: Required. [Required] Account credentials.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: Required. [Required] Storage type backing the datastore.Constant filled
by server. Possible values include: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2",
"AzureFile", "Hdfs".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
:ivar account_name: Required. [Required] Storage account name.
:vartype account_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
:vartype endpoint: str
:ivar filesystem: Required. [Required] The name of the Data Lake Gen2 filesystem.
:vartype filesystem: str
:ivar protocol: Protocol used to communicate with the storage account.
:vartype protocol: str
:ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:vartype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
_validation = {
'credentials': {'required': True},
'datastore_type': {'required': True},
'is_default': {'readonly': True},
'account_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'filesystem': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'credentials': {'key': 'credentials', 'type': 'DatastoreCredentials'},
'datastore_type': {'key': 'datastoreType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'account_name': {'key': 'accountName', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'filesystem': {'key': 'filesystem', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'service_data_access_auth_identity': {'key': 'serviceDataAccessAuthIdentity', 'type': 'str'},
}
def __init__(
self,
*,
credentials: "DatastoreCredentials",
account_name: str,
filesystem: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "ServiceDataAccessAuthIdentity"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword credentials: Required. [Required] Account credentials.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:keyword account_name: Required. [Required] Storage account name.
:paramtype account_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
:paramtype endpoint: str
:keyword filesystem: Required. [Required] The name of the Data Lake Gen2 filesystem.
:paramtype filesystem: str
:keyword protocol: Protocol used to communicate with the storage account.
:paramtype protocol: str
:keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
super(AzureDataLakeGen2Datastore, self).__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
self.datastore_type = 'AzureDataLakeGen2' # type: str
self.account_name = account_name
self.endpoint = endpoint
self.filesystem = filesystem
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
class AzureFileDatastore(DatastoreDetails):
"""Azure File datastore configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar credentials: Required. [Required] Account credentials.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: Required. [Required] Storage type backing the datastore.Constant filled
by server. Possible values include: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2",
"AzureFile", "Hdfs".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
:ivar account_name: Required. [Required] Storage account name.
:vartype account_name: str
:ivar endpoint: Azure cloud endpoint for the storage account.
:vartype endpoint: str
:ivar file_share_name: Required. [Required] The name of the Azure file share that the datastore
points to.
:vartype file_share_name: str
:ivar protocol: Protocol used to communicate with the storage account.
:vartype protocol: str
:ivar service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:vartype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
_validation = {
'credentials': {'required': True},
'datastore_type': {'required': True},
'is_default': {'readonly': True},
'account_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'file_share_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'credentials': {'key': 'credentials', 'type': 'DatastoreCredentials'},
'datastore_type': {'key': 'datastoreType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'account_name': {'key': 'accountName', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'str'},
'file_share_name': {'key': 'fileShareName', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'service_data_access_auth_identity': {'key': 'serviceDataAccessAuthIdentity', 'type': 'str'},
}
def __init__(
self,
*,
credentials: "DatastoreCredentials",
account_name: str,
file_share_name: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
endpoint: Optional[str] = None,
protocol: Optional[str] = None,
service_data_access_auth_identity: Optional[Union[str, "ServiceDataAccessAuthIdentity"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword credentials: Required. [Required] Account credentials.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:keyword account_name: Required. [Required] Storage account name.
:paramtype account_name: str
:keyword endpoint: Azure cloud endpoint for the storage account.
:paramtype endpoint: str
:keyword file_share_name: Required. [Required] The name of the Azure file share that the
datastore points to.
:paramtype file_share_name: str
:keyword protocol: Protocol used to communicate with the storage account.
:paramtype protocol: str
:keyword service_data_access_auth_identity: Indicates which identity to use to authenticate
service data access to customer's storage. Possible values include: "None",
"WorkspaceSystemAssignedIdentity", "WorkspaceUserAssignedIdentity".
:paramtype service_data_access_auth_identity: str or
~azure.mgmt.machinelearningservices.models.ServiceDataAccessAuthIdentity
"""
super(AzureFileDatastore, self).__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
self.datastore_type = 'AzureFile' # type: str
self.account_name = account_name
self.endpoint = endpoint
self.file_share_name = file_share_name
self.protocol = protocol
self.service_data_access_auth_identity = service_data_access_auth_identity
class EarlyTerminationPolicy(msrest.serialization.Model):
"""Early termination policies enable canceling poor-performing runs before they complete.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: BanditPolicy, MedianStoppingPolicy, TruncationSelectionPolicy.
All required parameters must be populated in order to send to Azure.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
:ivar evaluation_interval: Interval (number of runs) between policy evaluations.
:vartype evaluation_interval: int
:ivar policy_type: Required. [Required] Name of policy configuration.Constant filled by server.
Possible values include: "Bandit", "MedianStopping", "TruncationSelection".
:vartype policy_type: str or
~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
"""
_validation = {
'policy_type': {'required': True},
}
_attribute_map = {
'delay_evaluation': {'key': 'delayEvaluation', 'type': 'int'},
'evaluation_interval': {'key': 'evaluationInterval', 'type': 'int'},
'policy_type': {'key': 'policyType', 'type': 'str'},
}
_subtype_map = {
'policy_type': {'Bandit': 'BanditPolicy', 'MedianStopping': 'MedianStoppingPolicy', 'TruncationSelection': 'TruncationSelectionPolicy'}
}
def __init__(
self,
*,
delay_evaluation: Optional[int] = 0,
evaluation_interval: Optional[int] = 0,
**kwargs
):
"""
:keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
:paramtype delay_evaluation: int
:keyword evaluation_interval: Interval (number of runs) between policy evaluations.
:paramtype evaluation_interval: int
"""
super(EarlyTerminationPolicy, self).__init__(**kwargs)
self.delay_evaluation = delay_evaluation
self.evaluation_interval = evaluation_interval
self.policy_type = None # type: Optional[str]
class BanditPolicy(EarlyTerminationPolicy):
"""Defines an early termination policy based on slack criteria, and a frequency and delay interval for evaluation.
All required parameters must be populated in order to send to Azure.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
:ivar evaluation_interval: Interval (number of runs) between policy evaluations.
:vartype evaluation_interval: int
:ivar policy_type: Required. [Required] Name of policy configuration.Constant filled by server.
Possible values include: "Bandit", "MedianStopping", "TruncationSelection".
:vartype policy_type: str or
~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
:ivar slack_amount: Absolute distance allowed from the best performing run.
:vartype slack_amount: float
:ivar slack_factor: Ratio of the allowed distance from the best performing run.
:vartype slack_factor: float
"""
_validation = {
'policy_type': {'required': True},
}
_attribute_map = {
'delay_evaluation': {'key': 'delayEvaluation', 'type': 'int'},
'evaluation_interval': {'key': 'evaluationInterval', 'type': 'int'},
'policy_type': {'key': 'policyType', 'type': 'str'},
'slack_amount': {'key': 'slackAmount', 'type': 'float'},
'slack_factor': {'key': 'slackFactor', 'type': 'float'},
}
def __init__(
self,
*,
delay_evaluation: Optional[int] = 0,
evaluation_interval: Optional[int] = 0,
slack_amount: Optional[float] = 0,
slack_factor: Optional[float] = 0,
**kwargs
):
"""
:keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
:paramtype delay_evaluation: int
:keyword evaluation_interval: Interval (number of runs) between policy evaluations.
:paramtype evaluation_interval: int
:keyword slack_amount: Absolute distance allowed from the best performing run.
:paramtype slack_amount: float
:keyword slack_factor: Ratio of the allowed distance from the best performing run.
:paramtype slack_factor: float
"""
super(BanditPolicy, self).__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
self.policy_type = 'Bandit' # type: str
self.slack_amount = slack_amount
self.slack_factor = slack_factor
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar location: Required. The geo-location where the resource lives.
:vartype location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
location: str,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword location: Required. The geo-location where the resource lives.
:paramtype location: str
"""
super(TrackedResource, self).__init__(**kwargs)
self.tags = tags
self.location = location
class BatchDeploymentData(TrackedResource):
"""BatchDeploymentData.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar location: Required. The geo-location where the resource lives.
:vartype location: str
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.BatchDeploymentDetails
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BatchDeploymentDetails'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
location: str,
properties: "BatchDeploymentDetails",
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
kind: Optional[str] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword location: Required. The geo-location where the resource lives.
:paramtype location: str
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.BatchDeploymentDetails
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
super(BatchDeploymentData, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.kind = kind
self.properties = properties
self.sku = sku
class EndpointDeploymentPropertiesBase(msrest.serialization.Model):
"""Base definition for endpoint deployment.
:ivar code_configuration: Code configuration for the endpoint deployment.
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
:ivar environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
"""
_attribute_map = {
'code_configuration': {'key': 'codeConfiguration', 'type': 'CodeConfiguration'},
'description': {'key': 'description', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
}
def __init__(
self,
*,
code_configuration: Optional["CodeConfiguration"] = None,
description: Optional[str] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword code_configuration: Code configuration for the endpoint deployment.
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
:keyword environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
"""
super(EndpointDeploymentPropertiesBase, self).__init__(**kwargs)
self.code_configuration = code_configuration
self.description = description
self.environment_id = environment_id
self.environment_variables = environment_variables
self.properties = properties
class BatchDeploymentDetails(EndpointDeploymentPropertiesBase):
"""Batch inference settings per deployment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code_configuration: Code configuration for the endpoint deployment.
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
:ivar environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar compute: Compute target for batch inference operation.
:vartype compute: str
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
For FileDataset, this value is the count of file failures.
For TabularDataset, this value is the count of record failures.
If set to -1 (the lower bound), all failures during batch inference will be ignored.
:vartype error_threshold: int
:ivar logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:vartype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:ivar max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:vartype max_concurrency_per_instance: int
:ivar mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:vartype mini_batch_size: long
:ivar model: Reference to the model asset for the endpoint deployment.
:vartype model: ~azure.mgmt.machinelearningservices.models.AssetReferenceBase
:ivar output_action: Indicates how the output will be organized. Possible values include:
"SummaryOnly", "AppendRow".
:vartype output_action: str or ~azure.mgmt.machinelearningservices.models.BatchOutputAction
:ivar output_file_name: Customized output file name for append_row output action.
:vartype output_file_name: str
:ivar provisioning_state: Provisioning state for the endpoint deployment. Possible values
include: "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
:ivar resources: Indicates compute configuration for the job.
If not provided, will default to the defaults defined in ResourceConfiguration.
:vartype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
:ivar retry_settings: Retry Settings for the batch inference operation.
If not provided, will default to the defaults defined in BatchRetrySettings.
:vartype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'code_configuration': {'key': 'codeConfiguration', 'type': 'CodeConfiguration'},
'description': {'key': 'description', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'compute': {'key': 'compute', 'type': 'str'},
'error_threshold': {'key': 'errorThreshold', 'type': 'int'},
'logging_level': {'key': 'loggingLevel', 'type': 'str'},
'max_concurrency_per_instance': {'key': 'maxConcurrencyPerInstance', 'type': 'int'},
'mini_batch_size': {'key': 'miniBatchSize', 'type': 'long'},
'model': {'key': 'model', 'type': 'AssetReferenceBase'},
'output_action': {'key': 'outputAction', 'type': 'str'},
'output_file_name': {'key': 'outputFileName', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'resources': {'key': 'resources', 'type': 'ResourceConfiguration'},
'retry_settings': {'key': 'retrySettings', 'type': 'BatchRetrySettings'},
}
def __init__(
self,
*,
code_configuration: Optional["CodeConfiguration"] = None,
description: Optional[str] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
compute: Optional[str] = None,
error_threshold: Optional[int] = -1,
logging_level: Optional[Union[str, "BatchLoggingLevel"]] = None,
max_concurrency_per_instance: Optional[int] = 1,
mini_batch_size: Optional[int] = 10,
model: Optional["AssetReferenceBase"] = None,
output_action: Optional[Union[str, "BatchOutputAction"]] = None,
output_file_name: Optional[str] = "predictions.csv",
resources: Optional["ResourceConfiguration"] = None,
retry_settings: Optional["BatchRetrySettings"] = None,
**kwargs
):
"""
:keyword code_configuration: Code configuration for the endpoint deployment.
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
:keyword environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword compute: Compute target for batch inference operation.
:paramtype compute: str
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
For FileDataset, this value is the count of file failures.
For TabularDataset, this value is the count of record failures.
If set to -1 (the lower bound), all failures during batch inference will be ignored.
:paramtype error_threshold: int
:keyword logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:paramtype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:keyword max_concurrency_per_instance: Indicates maximum number of parallelism per instance.
:paramtype max_concurrency_per_instance: int
:keyword mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:paramtype mini_batch_size: long
:keyword model: Reference to the model asset for the endpoint deployment.
:paramtype model: ~azure.mgmt.machinelearningservices.models.AssetReferenceBase
:keyword output_action: Indicates how the output will be organized. Possible values include:
"SummaryOnly", "AppendRow".
:paramtype output_action: str or ~azure.mgmt.machinelearningservices.models.BatchOutputAction
:keyword output_file_name: Customized output file name for append_row output action.
:paramtype output_file_name: str
:keyword resources: Indicates compute configuration for the job.
If not provided, will default to the defaults defined in ResourceConfiguration.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
:keyword retry_settings: Retry Settings for the batch inference operation.
If not provided, will default to the defaults defined in BatchRetrySettings.
:paramtype retry_settings: ~azure.mgmt.machinelearningservices.models.BatchRetrySettings
"""
super(BatchDeploymentDetails, self).__init__(code_configuration=code_configuration, description=description, environment_id=environment_id, environment_variables=environment_variables, properties=properties, **kwargs)
self.compute = compute
self.error_threshold = error_threshold
self.logging_level = logging_level
self.max_concurrency_per_instance = max_concurrency_per_instance
self.mini_batch_size = mini_batch_size
self.model = model
self.output_action = output_action
self.output_file_name = output_file_name
self.provisioning_state = None
self.resources = resources
self.retry_settings = retry_settings
class BatchDeploymentTrackedResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of BatchDeployment entities.
:ivar next_link: The link to the next page of BatchDeployment objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type BatchDeployment.
:vartype value: list[~azure.mgmt.machinelearningservices.models.BatchDeploymentData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[BatchDeploymentData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["BatchDeploymentData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of BatchDeployment objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type BatchDeployment.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.BatchDeploymentData]
"""
super(BatchDeploymentTrackedResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class BatchEndpointData(TrackedResource):
"""BatchEndpointData.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar location: Required. The geo-location where the resource lives.
:vartype location: str
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.BatchEndpointDetails
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BatchEndpointDetails'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
location: str,
properties: "BatchEndpointDetails",
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
kind: Optional[str] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword location: Required. The geo-location where the resource lives.
:paramtype location: str
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.BatchEndpointDetails
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
super(BatchEndpointData, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.kind = kind
self.properties = properties
self.sku = sku
class BatchEndpointDefaults(msrest.serialization.Model):
"""Batch endpoint default values.
:ivar deployment_name: Name of the deployment that will be default for the endpoint.
This deployment will end up getting 100% traffic when the endpoint scoring URL is invoked.
:vartype deployment_name: str
"""
_attribute_map = {
'deployment_name': {'key': 'deploymentName', 'type': 'str'},
}
def __init__(
self,
*,
deployment_name: Optional[str] = None,
**kwargs
):
"""
:keyword deployment_name: Name of the deployment that will be default for the endpoint.
This deployment will end up getting 100% traffic when the endpoint scoring URL is invoked.
:paramtype deployment_name: str
"""
super(BatchEndpointDefaults, self).__init__(**kwargs)
self.deployment_name = deployment_name
class EndpointPropertiesBase(msrest.serialization.Model):
"""Inference Endpoint base definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar auth_mode: Required. [Required] Use 'Key' for key based authentication and 'AMLToken' for
Azure Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
Possible values include: "AMLToken", "Key", "AADToken".
:vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:ivar description: Description of the inference endpoint.
:vartype description: str
:ivar keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the
ListKeys API.
:vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar scoring_uri: Endpoint URI.
:vartype scoring_uri: str
:ivar swagger_uri: Endpoint Swagger URI.
:vartype swagger_uri: str
"""
_validation = {
'auth_mode': {'required': True},
'scoring_uri': {'readonly': True},
'swagger_uri': {'readonly': True},
}
_attribute_map = {
'auth_mode': {'key': 'authMode', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'keys': {'key': 'keys', 'type': 'EndpointAuthKeys'},
'properties': {'key': 'properties', 'type': '{str}'},
'scoring_uri': {'key': 'scoringUri', 'type': 'str'},
'swagger_uri': {'key': 'swaggerUri', 'type': 'str'},
}
def __init__(
self,
*,
auth_mode: Union[str, "EndpointAuthMode"],
description: Optional[str] = None,
keys: Optional["EndpointAuthKeys"] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword auth_mode: Required. [Required] Use 'Key' for key based authentication and 'AMLToken'
for Azure Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken'
does. Possible values include: "AMLToken", "Key", "AADToken".
:paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:keyword description: Description of the inference endpoint.
:paramtype description: str
:keyword keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the
ListKeys API.
:paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
"""
super(EndpointPropertiesBase, self).__init__(**kwargs)
self.auth_mode = auth_mode
self.description = description
self.keys = keys
self.properties = properties
self.scoring_uri = None
self.swagger_uri = None
class BatchEndpointDetails(EndpointPropertiesBase):
"""Batch endpoint configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar auth_mode: Required. [Required] Use 'Key' for key based authentication and 'AMLToken' for
Azure Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
Possible values include: "AMLToken", "Key", "AADToken".
:vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:ivar description: Description of the inference endpoint.
:vartype description: str
:ivar keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the
ListKeys API.
:vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar scoring_uri: Endpoint URI.
:vartype scoring_uri: str
:ivar swagger_uri: Endpoint Swagger URI.
:vartype swagger_uri: str
:ivar defaults: Default values for Batch Endpoint.
:vartype defaults: ~azure.mgmt.machinelearningservices.models.BatchEndpointDefaults
:ivar provisioning_state: Provisioning state for the endpoint. Possible values include:
"Creating", "Deleting", "Succeeded", "Failed", "Updating", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
"""
_validation = {
'auth_mode': {'required': True},
'scoring_uri': {'readonly': True},
'swagger_uri': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'auth_mode': {'key': 'authMode', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'keys': {'key': 'keys', 'type': 'EndpointAuthKeys'},
'properties': {'key': 'properties', 'type': '{str}'},
'scoring_uri': {'key': 'scoringUri', 'type': 'str'},
'swagger_uri': {'key': 'swaggerUri', 'type': 'str'},
'defaults': {'key': 'defaults', 'type': 'BatchEndpointDefaults'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
}
def __init__(
self,
*,
auth_mode: Union[str, "EndpointAuthMode"],
description: Optional[str] = None,
keys: Optional["EndpointAuthKeys"] = None,
properties: Optional[Dict[str, str]] = None,
defaults: Optional["BatchEndpointDefaults"] = None,
**kwargs
):
"""
:keyword auth_mode: Required. [Required] Use 'Key' for key based authentication and 'AMLToken'
for Azure Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken'
does. Possible values include: "AMLToken", "Key", "AADToken".
:paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:keyword description: Description of the inference endpoint.
:paramtype description: str
:keyword keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the
ListKeys API.
:paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword defaults: Default values for Batch Endpoint.
:paramtype defaults: ~azure.mgmt.machinelearningservices.models.BatchEndpointDefaults
"""
super(BatchEndpointDetails, self).__init__(auth_mode=auth_mode, description=description, keys=keys, properties=properties, **kwargs)
self.defaults = defaults
self.provisioning_state = None
class BatchEndpointTrackedResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of BatchEndpoint entities.
:ivar next_link: The link to the next page of BatchEndpoint objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type BatchEndpoint.
:vartype value: list[~azure.mgmt.machinelearningservices.models.BatchEndpointData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[BatchEndpointData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["BatchEndpointData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of BatchEndpoint objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type BatchEndpoint.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.BatchEndpointData]
"""
super(BatchEndpointTrackedResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class BatchRetrySettings(msrest.serialization.Model):
"""Retry settings for a batch inference operation.
:ivar max_retries: Maximum retry count for a mini-batch.
:vartype max_retries: int
:ivar timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_retries': {'key': 'maxRetries', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
max_retries: Optional[int] = 3,
timeout: Optional[datetime.timedelta] = "PT30S",
**kwargs
):
"""
:keyword max_retries: Maximum retry count for a mini-batch.
:paramtype max_retries: int
:keyword timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:paramtype timeout: ~datetime.timedelta
"""
super(BatchRetrySettings, self).__init__(**kwargs)
self.max_retries = max_retries
self.timeout = timeout
class SamplingAlgorithm(msrest.serialization.Model):
"""The Sampling Algorithm used to generate hyperparameter values, along with properties to
configure the algorithm.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: BayesianSamplingAlgorithm, GridSamplingAlgorithm, RandomSamplingAlgorithm.
All required parameters must be populated in order to send to Azure.
:ivar sampling_algorithm_type: Required. [Required] The algorithm used for generating
hyperparameter values, along with configuration properties.Constant filled by server. Possible
values include: "Grid", "Random", "Bayesian".
:vartype sampling_algorithm_type: str or
~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
_validation = {
'sampling_algorithm_type': {'required': True},
}
_attribute_map = {
'sampling_algorithm_type': {'key': 'samplingAlgorithmType', 'type': 'str'},
}
_subtype_map = {
'sampling_algorithm_type': {'Bayesian': 'BayesianSamplingAlgorithm', 'Grid': 'GridSamplingAlgorithm', 'Random': 'RandomSamplingAlgorithm'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(SamplingAlgorithm, self).__init__(**kwargs)
self.sampling_algorithm_type = None # type: Optional[str]
class BayesianSamplingAlgorithm(SamplingAlgorithm):
"""Defines a Sampling Algorithm that generates values based on previous values.
All required parameters must be populated in order to send to Azure.
:ivar sampling_algorithm_type: Required. [Required] The algorithm used for generating
hyperparameter values, along with configuration properties.Constant filled by server. Possible
values include: "Grid", "Random", "Bayesian".
:vartype sampling_algorithm_type: str or
~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
_validation = {
'sampling_algorithm_type': {'required': True},
}
_attribute_map = {
'sampling_algorithm_type': {'key': 'samplingAlgorithmType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(BayesianSamplingAlgorithm, self).__init__(**kwargs)
self.sampling_algorithm_type = 'Bayesian' # type: str
class BuildContext(msrest.serialization.Model):
"""Configuration settings for Docker build context.
All required parameters must be populated in order to send to Azure.
:ivar context_uri: Required. [Required] URI of the Docker build context used to build the
image. Supports blob URIs on environment creation and may return blob or Git URIs.
.. raw:: html
<seealso
href="https://docs.docker.com/engine/reference/commandline/build/#extended-description" />.
:vartype context_uri: str
:ivar dockerfile_path: Path to the Dockerfile in the build context.
.. raw:: html
<seealso href="https://docs.docker.com/engine/reference/builder/" />.
:vartype dockerfile_path: str
"""
_validation = {
'context_uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'context_uri': {'key': 'contextUri', 'type': 'str'},
'dockerfile_path': {'key': 'dockerfilePath', 'type': 'str'},
}
def __init__(
self,
*,
context_uri: str,
dockerfile_path: Optional[str] = "Dockerfile",
**kwargs
):
"""
:keyword context_uri: Required. [Required] URI of the Docker build context used to build the
image. Supports blob URIs on environment creation and may return blob or Git URIs.
.. raw:: html
<seealso
href="https://docs.docker.com/engine/reference/commandline/build/#extended-description" />.
:paramtype context_uri: str
:keyword dockerfile_path: Path to the Dockerfile in the build context.
.. raw:: html
<seealso href="https://docs.docker.com/engine/reference/builder/" />.
:paramtype dockerfile_path: str
"""
super(BuildContext, self).__init__(**kwargs)
self.context_uri = context_uri
self.dockerfile_path = dockerfile_path
class CertificateDatastoreCredentials(DatastoreCredentials):
"""Certificate datastore credentials configuration.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar authority_url: Authority URL used for authentication.
:vartype authority_url: str
:ivar client_id: Required. [Required] Service principal client ID.
:vartype client_id: str
:ivar resource_url: Resource the service principal has access to.
:vartype resource_url: str
:ivar secrets: Required. [Required] Service principal secrets.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
:ivar tenant_id: Required. [Required] ID of the tenant to which the service principal belongs.
:vartype tenant_id: str
:ivar thumbprint: Required. [Required] Thumbprint of the certificate used for authentication.
:vartype thumbprint: str
"""
_validation = {
'credentials_type': {'required': True},
'client_id': {'required': True},
'secrets': {'required': True},
'tenant_id': {'required': True},
'thumbprint': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
'authority_url': {'key': 'authorityUrl', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'resource_url': {'key': 'resourceUrl', 'type': 'str'},
'secrets': {'key': 'secrets', 'type': 'CertificateDatastoreSecrets'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
}
def __init__(
self,
*,
client_id: str,
secrets: "CertificateDatastoreSecrets",
tenant_id: str,
thumbprint: str,
authority_url: Optional[str] = None,
resource_url: Optional[str] = None,
**kwargs
):
"""
:keyword authority_url: Authority URL used for authentication.
:paramtype authority_url: str
:keyword client_id: Required. [Required] Service principal client ID.
:paramtype client_id: str
:keyword resource_url: Resource the service principal has access to.
:paramtype resource_url: str
:keyword secrets: Required. [Required] Service principal secrets.
:paramtype secrets: ~azure.mgmt.machinelearningservices.models.CertificateDatastoreSecrets
:keyword tenant_id: Required. [Required] ID of the tenant to which the service principal
belongs.
:paramtype tenant_id: str
:keyword thumbprint: Required. [Required] Thumbprint of the certificate used for
authentication.
:paramtype thumbprint: str
"""
super(CertificateDatastoreCredentials, self).__init__(**kwargs)
self.credentials_type = 'Certificate' # type: str
self.authority_url = authority_url
self.client_id = client_id
self.resource_url = resource_url
self.secrets = secrets
self.tenant_id = tenant_id
self.thumbprint = thumbprint
class CertificateDatastoreSecrets(DatastoreSecrets):
"""Datastore certificate secrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar certificate: Service principal certificate.
:vartype certificate: str
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
}
def __init__(
self,
*,
certificate: Optional[str] = None,
**kwargs
):
"""
:keyword certificate: Service principal certificate.
:paramtype certificate: str
"""
super(CertificateDatastoreSecrets, self).__init__(**kwargs)
self.secrets_type = 'Certificate' # type: str
self.certificate = certificate
class TableVertical(msrest.serialization.Model):
"""Abstract class for AutoML tasks that use table dataset as input - such as Classification/Regression/Forecasting.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:ivar training_settings: Inputs for training phase for an AutoML Job.
:vartype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
"""
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'TableVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'TableVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'TableVerticalLimitSettings'},
'training_settings': {'key': 'trainingSettings', 'type': 'TrainingSettings'},
}
def __init__(
self,
*,
data_settings: Optional["TableVerticalDataSettings"] = None,
featurization_settings: Optional["TableVerticalFeaturizationSettings"] = None,
limit_settings: Optional["TableVerticalLimitSettings"] = None,
training_settings: Optional["TrainingSettings"] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:keyword training_settings: Inputs for training phase for an AutoML Job.
:paramtype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
"""
super(TableVertical, self).__init__(**kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.training_settings = training_settings
class Classification(AutoMLVertical, TableVertical):
"""Classification task in AutoML Table vertical.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:ivar training_settings: Inputs for training phase for an AutoML Job.
:vartype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar allowed_models: Allowed models for classification task.
:vartype allowed_models: list[str or
~azure.mgmt.machinelearningservices.models.ClassificationModels]
:ivar blocked_models: Blocked models for classification task.
:vartype blocked_models: list[str or
~azure.mgmt.machinelearningservices.models.ClassificationModels]
:ivar primary_metric: Primary metric for the task. Possible values include: "AUCWeighted",
"Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", "PrecisionScoreWeighted".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
_validation = {
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'TableVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'TableVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'TableVerticalLimitSettings'},
'training_settings': {'key': 'trainingSettings', 'type': 'TrainingSettings'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'allowed_models': {'key': 'allowedModels', 'type': '[str]'},
'blocked_models': {'key': 'blockedModels', 'type': '[str]'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: Optional["TableVerticalDataSettings"] = None,
featurization_settings: Optional["TableVerticalFeaturizationSettings"] = None,
limit_settings: Optional["TableVerticalLimitSettings"] = None,
training_settings: Optional["TrainingSettings"] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
allowed_models: Optional[List[Union[str, "ClassificationModels"]]] = None,
blocked_models: Optional[List[Union[str, "ClassificationModels"]]] = None,
primary_metric: Optional[Union[str, "ClassificationPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:keyword training_settings: Inputs for training phase for an AutoML Job.
:paramtype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword allowed_models: Allowed models for classification task.
:paramtype allowed_models: list[str or
~azure.mgmt.machinelearningservices.models.ClassificationModels]
:keyword blocked_models: Blocked models for classification task.
:paramtype blocked_models: list[str or
~azure.mgmt.machinelearningservices.models.ClassificationModels]
:keyword primary_metric: Primary metric for the task. Possible values include: "AUCWeighted",
"Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted", "PrecisionScoreWeighted".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
super(Classification, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, featurization_settings=featurization_settings, limit_settings=limit_settings, training_settings=training_settings, **kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.training_settings = training_settings
self.task_type = 'Classification' # type: str
self.allowed_models = allowed_models
self.blocked_models = blocked_models
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'Classification' # type: str
self.allowed_models = allowed_models
self.blocked_models = blocked_models
self.primary_metric = primary_metric
class CodeConfiguration(msrest.serialization.Model):
"""Configuration for a scoring code asset.
All required parameters must be populated in order to send to Azure.
:ivar code_id: ARM resource ID of the code asset.
:vartype code_id: str
:ivar scoring_script: Required. [Required] The script to execute on startup. eg. "score.py".
:vartype scoring_script: str
"""
_validation = {
'scoring_script': {'required': True, 'min_length': 1, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'code_id': {'key': 'codeId', 'type': 'str'},
'scoring_script': {'key': 'scoringScript', 'type': 'str'},
}
def __init__(
self,
*,
scoring_script: str,
code_id: Optional[str] = None,
**kwargs
):
"""
:keyword code_id: ARM resource ID of the code asset.
:paramtype code_id: str
:keyword scoring_script: Required. [Required] The script to execute on startup. eg. "score.py".
:paramtype scoring_script: str
"""
super(CodeConfiguration, self).__init__(**kwargs)
self.code_id = code_id
self.scoring_script = scoring_script
class CodeContainerData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.CodeContainerDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'CodeContainerDetails'},
}
def __init__(
self,
*,
properties: "CodeContainerDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.CodeContainerDetails
"""
super(CodeContainerData, self).__init__(**kwargs)
self.properties = properties
class CodeContainerDetails(AssetContainer):
"""Container for code asset versions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar latest_version: The latest version inside this container.
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
"""
_validation = {
'latest_version': {'readonly': True},
'next_version': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'latest_version': {'key': 'latestVersion', 'type': 'str'},
'next_version': {'key': 'nextVersion', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
"""
super(CodeContainerDetails, self).__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
class CodeContainerResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of CodeContainer entities.
:ivar next_link: The link to the next page of CodeContainer objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type CodeContainer.
:vartype value: list[~azure.mgmt.machinelearningservices.models.CodeContainerData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[CodeContainerData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["CodeContainerData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of CodeContainer objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type CodeContainer.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.CodeContainerData]
"""
super(CodeContainerResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class CodeVersionData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.CodeVersionDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'CodeVersionDetails'},
}
def __init__(
self,
*,
properties: "CodeVersionDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.CodeVersionDetails
"""
super(CodeVersionData, self).__init__(**kwargs)
self.properties = properties
class CodeVersionDetails(AssetBase):
"""Code asset version details.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar code_uri: Uri where code is located.
:vartype code_uri: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'code_uri': {'key': 'codeUri', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
code_uri: Optional[str] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword code_uri: Uri where code is located.
:paramtype code_uri: str
"""
super(CodeVersionDetails, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, **kwargs)
self.code_uri = code_uri
class CodeVersionResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of CodeVersion entities.
:ivar next_link: The link to the next page of CodeVersion objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type CodeVersion.
:vartype value: list[~azure.mgmt.machinelearningservices.models.CodeVersionData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[CodeVersionData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["CodeVersionData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of CodeVersion objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type CodeVersion.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.CodeVersionData]
"""
super(CodeVersionResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class ColumnTransformer(msrest.serialization.Model):
"""Column transformer parameters.
:ivar fields: Fields to apply transformer logic on.
:vartype fields: list[str]
:ivar parameters: Different properties to be passed to transformer.
Input expected is dictionary of key,value pairs in JSON format.
:vartype parameters: any
"""
_attribute_map = {
'fields': {'key': 'fields', 'type': '[str]'},
'parameters': {'key': 'parameters', 'type': 'object'},
}
def __init__(
self,
*,
fields: Optional[List[str]] = None,
parameters: Optional[Any] = None,
**kwargs
):
"""
:keyword fields: Fields to apply transformer logic on.
:paramtype fields: list[str]
:keyword parameters: Different properties to be passed to transformer.
Input expected is dictionary of key,value pairs in JSON format.
:paramtype parameters: any
"""
super(ColumnTransformer, self).__init__(**kwargs)
self.fields = fields
self.parameters = parameters
class CommandJob(JobBaseDetails):
"""Command job definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar compute_id: ARM resource ID of the compute resource.
:vartype compute_id: str
:ivar display_name: Display name of job.
:vartype display_name: str
:ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:vartype experiment_name: str
:ivar identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "AutoML", "Command", "Sweep", "Pipeline".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
:ivar schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown", "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar code_id: ARM resource ID of the code asset.
:vartype code_id: str
:ivar command: Required. [Required] The command to execute on startup of the job. eg. "python
train.py".
:vartype command: str
:ivar distribution: Distribution configuration of the job. If set, this should be one of Mpi,
Tensorflow, PyTorch, or null.
:vartype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
:ivar environment_id: Required. [Required] The ARM resource ID of the Environment specification
for the job.
:vartype environment_id: str
:ivar environment_variables: Environment variables included in the job.
:vartype environment_variables: dict[str, str]
:ivar inputs: Mapping of input data bindings used in the job.
:vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar limits: Command Job limit.
:vartype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:ivar parameters: Input parameters.
:vartype parameters: any
:ivar resources: Compute Resource configuration for the job.
:vartype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
"""
_validation = {
'job_type': {'required': True},
'status': {'readonly': True},
'command': {'required': True, 'min_length': 1, 'pattern': r'[a-zA-Z0-9_]'},
'environment_id': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'parameters': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'compute_id': {'key': 'computeId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'IdentityConfiguration'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'job_type': {'key': 'jobType', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'ScheduleBase'},
'services': {'key': 'services', 'type': '{JobService}'},
'status': {'key': 'status', 'type': 'str'},
'code_id': {'key': 'codeId', 'type': 'str'},
'command': {'key': 'command', 'type': 'str'},
'distribution': {'key': 'distribution', 'type': 'DistributionConfiguration'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'inputs': {'key': 'inputs', 'type': '{JobInput}'},
'limits': {'key': 'limits', 'type': 'CommandJobLimits'},
'outputs': {'key': 'outputs', 'type': '{JobOutput}'},
'parameters': {'key': 'parameters', 'type': 'object'},
'resources': {'key': 'resources', 'type': 'ResourceConfiguration'},
}
def __init__(
self,
*,
command: str,
environment_id: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
compute_id: Optional[str] = None,
display_name: Optional[str] = None,
experiment_name: Optional[str] = "Default",
identity: Optional["IdentityConfiguration"] = None,
is_archived: Optional[bool] = False,
schedule: Optional["ScheduleBase"] = None,
services: Optional[Dict[str, "JobService"]] = None,
code_id: Optional[str] = None,
distribution: Optional["DistributionConfiguration"] = None,
environment_variables: Optional[Dict[str, str]] = None,
inputs: Optional[Dict[str, "JobInput"]] = None,
limits: Optional["CommandJobLimits"] = None,
outputs: Optional[Dict[str, "JobOutput"]] = None,
resources: Optional["ResourceConfiguration"] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword compute_id: ARM resource ID of the compute resource.
:paramtype compute_id: str
:keyword display_name: Display name of job.
:paramtype display_name: str
:keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:paramtype experiment_name: str
:keyword identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:keyword code_id: ARM resource ID of the code asset.
:paramtype code_id: str
:keyword command: Required. [Required] The command to execute on startup of the job. eg.
"python train.py".
:paramtype command: str
:keyword distribution: Distribution configuration of the job. If set, this should be one of
Mpi, Tensorflow, PyTorch, or null.
:paramtype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
:keyword environment_id: Required. [Required] The ARM resource ID of the Environment
specification for the job.
:paramtype environment_id: str
:keyword environment_variables: Environment variables included in the job.
:paramtype environment_variables: dict[str, str]
:keyword inputs: Mapping of input data bindings used in the job.
:paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword limits: Command Job limit.
:paramtype limits: ~azure.mgmt.machinelearningservices.models.CommandJobLimits
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:keyword resources: Compute Resource configuration for the job.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
"""
super(CommandJob, self).__init__(description=description, properties=properties, tags=tags, compute_id=compute_id, display_name=display_name, experiment_name=experiment_name, identity=identity, is_archived=is_archived, schedule=schedule, services=services, **kwargs)
self.job_type = 'Command' # type: str
self.code_id = code_id
self.command = command
self.distribution = distribution
self.environment_id = environment_id
self.environment_variables = environment_variables
self.inputs = inputs
self.limits = limits
self.outputs = outputs
self.parameters = None
self.resources = resources
class JobLimits(msrest.serialization.Model):
"""JobLimits.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CommandJobLimits, SweepJobLimits.
All required parameters must be populated in order to send to Azure.
:ivar job_limits_type: Required. [Required] JobLimit type.Constant filled by server. Possible
values include: "Command", "Sweep".
:vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
:ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
Only supports duration with precision as low as Seconds.
:vartype timeout: ~datetime.timedelta
"""
_validation = {
'job_limits_type': {'required': True},
}
_attribute_map = {
'job_limits_type': {'key': 'jobLimitsType', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
_subtype_map = {
'job_limits_type': {'Command': 'CommandJobLimits', 'Sweep': 'SweepJobLimits'}
}
def __init__(
self,
*,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword timeout: The max run duration in ISO 8601 format, after which the job will be
cancelled. Only supports duration with precision as low as Seconds.
:paramtype timeout: ~datetime.timedelta
"""
super(JobLimits, self).__init__(**kwargs)
self.job_limits_type = None # type: Optional[str]
self.timeout = timeout
class CommandJobLimits(JobLimits):
"""Command Job limit class.
All required parameters must be populated in order to send to Azure.
:ivar job_limits_type: Required. [Required] JobLimit type.Constant filled by server. Possible
values include: "Command", "Sweep".
:vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
:ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
Only supports duration with precision as low as Seconds.
:vartype timeout: ~datetime.timedelta
"""
_validation = {
'job_limits_type': {'required': True},
}
_attribute_map = {
'job_limits_type': {'key': 'jobLimitsType', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword timeout: The max run duration in ISO 8601 format, after which the job will be
cancelled. Only supports duration with precision as low as Seconds.
:paramtype timeout: ~datetime.timedelta
"""
super(CommandJobLimits, self).__init__(timeout=timeout, **kwargs)
self.job_limits_type = 'Command' # type: str
class ComponentContainerData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.ComponentContainerDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'ComponentContainerDetails'},
}
def __init__(
self,
*,
properties: "ComponentContainerDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.ComponentContainerDetails
"""
super(ComponentContainerData, self).__init__(**kwargs)
self.properties = properties
class ComponentContainerDetails(AssetContainer):
"""Component container definition.
.. raw:: html
<see href="https://docs.microsoft.com/en-us/azure/machine-learning/reference-yaml-component-command" />.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar latest_version: The latest version inside this container.
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
"""
_validation = {
'latest_version': {'readonly': True},
'next_version': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'latest_version': {'key': 'latestVersion', 'type': 'str'},
'next_version': {'key': 'nextVersion', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
"""
super(ComponentContainerDetails, self).__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
class ComponentContainerResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of ComponentContainer entities.
:ivar next_link: The link to the next page of ComponentContainer objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type ComponentContainer.
:vartype value: list[~azure.mgmt.machinelearningservices.models.ComponentContainerData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[ComponentContainerData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["ComponentContainerData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of ComponentContainer objects. If null, there are
no additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type ComponentContainer.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.ComponentContainerData]
"""
super(ComponentContainerResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class ComponentVersionData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.ComponentVersionDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'ComponentVersionDetails'},
}
def __init__(
self,
*,
properties: "ComponentVersionDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.ComponentVersionDetails
"""
super(ComponentVersionData, self).__init__(**kwargs)
self.properties = properties
class ComponentVersionDetails(AssetBase):
"""Definition of a component version: defines resources that span component types.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar component_spec: Defines Component definition details.
.. raw:: html
<see
href="https://docs.microsoft.com/en-us/azure/machine-learning/reference-yaml-component-command"
/>.
:vartype component_spec: any
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'component_spec': {'key': 'componentSpec', 'type': 'object'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
component_spec: Optional[Any] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword component_spec: Defines Component definition details.
.. raw:: html
<see
href="https://docs.microsoft.com/en-us/azure/machine-learning/reference-yaml-component-command"
/>.
:paramtype component_spec: any
"""
super(ComponentVersionDetails, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, **kwargs)
self.component_spec = component_spec
class ComponentVersionResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of ComponentVersion entities.
:ivar next_link: The link to the next page of ComponentVersion objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type ComponentVersion.
:vartype value: list[~azure.mgmt.machinelearningservices.models.ComponentVersionData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[ComponentVersionData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["ComponentVersionData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of ComponentVersion objects. If null, there are
no additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type ComponentVersion.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.ComponentVersionData]
"""
super(ComponentVersionResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class ContainerResourceRequirements(msrest.serialization.Model):
"""Resource requirements for each container instance within an online deployment.
:ivar container_resource_limits: Container resource limit info:.
:vartype container_resource_limits:
~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
:ivar container_resource_requests: Container resource request info:.
:vartype container_resource_requests:
~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
"""
_attribute_map = {
'container_resource_limits': {'key': 'containerResourceLimits', 'type': 'ContainerResourceSettings'},
'container_resource_requests': {'key': 'containerResourceRequests', 'type': 'ContainerResourceSettings'},
}
def __init__(
self,
*,
container_resource_limits: Optional["ContainerResourceSettings"] = None,
container_resource_requests: Optional["ContainerResourceSettings"] = None,
**kwargs
):
"""
:keyword container_resource_limits: Container resource limit info:.
:paramtype container_resource_limits:
~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
:keyword container_resource_requests: Container resource request info:.
:paramtype container_resource_requests:
~azure.mgmt.machinelearningservices.models.ContainerResourceSettings
"""
super(ContainerResourceRequirements, self).__init__(**kwargs)
self.container_resource_limits = container_resource_limits
self.container_resource_requests = container_resource_requests
class ContainerResourceSettings(msrest.serialization.Model):
"""ContainerResourceSettings.
:ivar cpu: Number of vCPUs request/limit for container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
:vartype cpu: str
:ivar gpu: Number of Nvidia GPU cards request/limit for container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
:vartype gpu: str
:ivar memory: Memory size request/limit for container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
:vartype memory: str
"""
_attribute_map = {
'cpu': {'key': 'cpu', 'type': 'str'},
'gpu': {'key': 'gpu', 'type': 'str'},
'memory': {'key': 'memory', 'type': 'str'},
}
def __init__(
self,
*,
cpu: Optional[str] = None,
gpu: Optional[str] = None,
memory: Optional[str] = None,
**kwargs
):
"""
:keyword cpu: Number of vCPUs request/limit for container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
:paramtype cpu: str
:keyword gpu: Number of Nvidia GPU cards request/limit for container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
:paramtype gpu: str
:keyword memory: Memory size request/limit for container. More info:
https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/.
:paramtype memory: str
"""
super(ContainerResourceSettings, self).__init__(**kwargs)
self.cpu = cpu
self.gpu = gpu
self.memory = memory
class ScheduleBase(msrest.serialization.Model):
"""Base definition of a schedule.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CronSchedule, RecurrenceSchedule.
All required parameters must be populated in order to send to Azure.
:ivar end_time: Specifies end time of schedule in ISO 8601 format.
If not present, the schedule will run indefinitely.
:vartype end_time: ~datetime.datetime
:ivar schedule_status: Specifies the schedule's status. Possible values include: "Enabled",
"Disabled".
:vartype schedule_status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
:ivar schedule_type: Required. [Required] Specifies the schedule type.Constant filled by
server. Possible values include: "Cron", "Recurrence".
:vartype schedule_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleType
:ivar start_time: Specifies start time of schedule in ISO 8601 format.
:vartype start_time: ~datetime.datetime
:ivar time_zone: Specifies time zone in which the schedule runs.
TimeZone should follow Windows time zone format.
:vartype time_zone: str
"""
_validation = {
'schedule_type': {'required': True},
}
_attribute_map = {
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'schedule_status': {'key': 'scheduleStatus', 'type': 'str'},
'schedule_type': {'key': 'scheduleType', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
}
_subtype_map = {
'schedule_type': {'Cron': 'CronSchedule', 'Recurrence': 'RecurrenceSchedule'}
}
def __init__(
self,
*,
end_time: Optional[datetime.datetime] = None,
schedule_status: Optional[Union[str, "ScheduleStatus"]] = None,
start_time: Optional[datetime.datetime] = None,
time_zone: Optional[str] = "UTC",
**kwargs
):
"""
:keyword end_time: Specifies end time of schedule in ISO 8601 format.
If not present, the schedule will run indefinitely.
:paramtype end_time: ~datetime.datetime
:keyword schedule_status: Specifies the schedule's status. Possible values include: "Enabled",
"Disabled".
:paramtype schedule_status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
:keyword start_time: Specifies start time of schedule in ISO 8601 format.
:paramtype start_time: ~datetime.datetime
:keyword time_zone: Specifies time zone in which the schedule runs.
TimeZone should follow Windows time zone format.
:paramtype time_zone: str
"""
super(ScheduleBase, self).__init__(**kwargs)
self.end_time = end_time
self.schedule_status = schedule_status
self.schedule_type = None # type: Optional[str]
self.start_time = start_time
self.time_zone = time_zone
class CronSchedule(ScheduleBase):
"""Cron schedule definition.
All required parameters must be populated in order to send to Azure.
:ivar end_time: Specifies end time of schedule in ISO 8601 format.
If not present, the schedule will run indefinitely.
:vartype end_time: ~datetime.datetime
:ivar schedule_status: Specifies the schedule's status. Possible values include: "Enabled",
"Disabled".
:vartype schedule_status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
:ivar schedule_type: Required. [Required] Specifies the schedule type.Constant filled by
server. Possible values include: "Cron", "Recurrence".
:vartype schedule_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleType
:ivar start_time: Specifies start time of schedule in ISO 8601 format.
:vartype start_time: ~datetime.datetime
:ivar time_zone: Specifies time zone in which the schedule runs.
TimeZone should follow Windows time zone format.
:vartype time_zone: str
:ivar expression: Required. [Required] Specifies cron expression of schedule.
The expression should follow NCronTab format.
:vartype expression: str
"""
_validation = {
'schedule_type': {'required': True},
'expression': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'schedule_status': {'key': 'scheduleStatus', 'type': 'str'},
'schedule_type': {'key': 'scheduleType', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'expression': {'key': 'expression', 'type': 'str'},
}
def __init__(
self,
*,
expression: str,
end_time: Optional[datetime.datetime] = None,
schedule_status: Optional[Union[str, "ScheduleStatus"]] = None,
start_time: Optional[datetime.datetime] = None,
time_zone: Optional[str] = "UTC",
**kwargs
):
"""
:keyword end_time: Specifies end time of schedule in ISO 8601 format.
If not present, the schedule will run indefinitely.
:paramtype end_time: ~datetime.datetime
:keyword schedule_status: Specifies the schedule's status. Possible values include: "Enabled",
"Disabled".
:paramtype schedule_status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
:keyword start_time: Specifies start time of schedule in ISO 8601 format.
:paramtype start_time: ~datetime.datetime
:keyword time_zone: Specifies time zone in which the schedule runs.
TimeZone should follow Windows time zone format.
:paramtype time_zone: str
:keyword expression: Required. [Required] Specifies cron expression of schedule.
The expression should follow NCronTab format.
:paramtype expression: str
"""
super(CronSchedule, self).__init__(end_time=end_time, schedule_status=schedule_status, start_time=start_time, time_zone=time_zone, **kwargs)
self.schedule_type = 'Cron' # type: str
self.expression = expression
class CustomForecastHorizon(ForecastHorizon):
"""The desired maximum forecast horizon in units of time-series frequency.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Set forecast horizon value selection mode.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.ForecastHorizonMode
:ivar value: Required. [Required] Forecast horizon value.
:vartype value: int
"""
_validation = {
'mode': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'value': {'key': 'value', 'type': 'int'},
}
def __init__(
self,
*,
value: int,
**kwargs
):
"""
:keyword value: Required. [Required] Forecast horizon value.
:paramtype value: int
"""
super(CustomForecastHorizon, self).__init__(**kwargs)
self.mode = 'Custom' # type: str
self.value = value
class JobInput(msrest.serialization.Model):
"""Command job definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CustomModelJobInput, LiteralJobInput, MLFlowModelJobInput, MLTableJobInput, TritonModelJobInput, UriFileJobInput, UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'job_input_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
_subtype_map = {
'job_input_type': {'CustomModel': 'CustomModelJobInput', 'Literal': 'LiteralJobInput', 'MLFlowModel': 'MLFlowModelJobInput', 'MLTable': 'MLTableJobInput', 'TritonModel': 'TritonModelJobInput', 'UriFile': 'UriFileJobInput', 'UriFolder': 'UriFolderJobInput'}
}
def __init__(
self,
*,
description: Optional[str] = None,
**kwargs
):
"""
:keyword description: Description for the input.
:paramtype description: str
"""
super(JobInput, self).__init__(**kwargs)
self.description = description
self.job_input_type = None # type: Optional[str]
class CustomModelJobInput(JobInput, AssetJobInput):
"""CustomModelJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(CustomModelJobInput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_input_type = 'CustomModel' # type: str
self.description = description
self.job_input_type = 'CustomModel' # type: str
class JobOutput(msrest.serialization.Model):
"""Job output definition container information on where to find job output/logs.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: CustomModelJobOutput, MLFlowModelJobOutput, MLTableJobOutput, TritonModelJobOutput, UriFileJobOutput, UriFolderJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
_subtype_map = {
'job_output_type': {'CustomModel': 'CustomModelJobOutput', 'MLFlowModel': 'MLFlowModelJobOutput', 'MLTable': 'MLTableJobOutput', 'TritonModel': 'TritonModelJobOutput', 'UriFile': 'UriFileJobOutput', 'UriFolder': 'UriFolderJobOutput'}
}
def __init__(
self,
*,
description: Optional[str] = None,
**kwargs
):
"""
:keyword description: Description for the output.
:paramtype description: str
"""
super(JobOutput, self).__init__(**kwargs)
self.description = description
self.job_output_type = None # type: Optional[str]
class CustomModelJobOutput(JobOutput, AssetJobOutput):
"""CustomModelJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(CustomModelJobOutput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_output_type = 'CustomModel' # type: str
self.description = description
self.job_output_type = 'CustomModel' # type: str
class CustomNCrossValidations(NCrossValidations):
"""N-Cross validations are specified by user.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Mode for determining N-Cross validations.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.NCrossValidationsMode
:ivar value: Required. [Required] N-Cross validations value.
:vartype value: int
"""
_validation = {
'mode': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'value': {'key': 'value', 'type': 'int'},
}
def __init__(
self,
*,
value: int,
**kwargs
):
"""
:keyword value: Required. [Required] N-Cross validations value.
:paramtype value: int
"""
super(CustomNCrossValidations, self).__init__(**kwargs)
self.mode = 'Custom' # type: str
self.value = value
class CustomSeasonality(Seasonality):
"""CustomSeasonality.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Seasonality mode.Constant filled by server. Possible values
include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.SeasonalityMode
:ivar value: Required. [Required] Seasonality value.
:vartype value: int
"""
_validation = {
'mode': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'value': {'key': 'value', 'type': 'int'},
}
def __init__(
self,
*,
value: int,
**kwargs
):
"""
:keyword value: Required. [Required] Seasonality value.
:paramtype value: int
"""
super(CustomSeasonality, self).__init__(**kwargs)
self.mode = 'Custom' # type: str
self.value = value
class CustomTargetLags(TargetLags):
"""CustomTargetLags.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] Set target lags mode - Auto/Custom.Constant filled by server.
Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetLagsMode
:ivar values: Required. [Required] Set target lags values.
:vartype values: list[int]
"""
_validation = {
'mode': {'required': True},
'values': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'values': {'key': 'values', 'type': '[int]'},
}
def __init__(
self,
*,
values: List[int],
**kwargs
):
"""
:keyword values: Required. [Required] Set target lags values.
:paramtype values: list[int]
"""
super(CustomTargetLags, self).__init__(**kwargs)
self.mode = 'Custom' # type: str
self.values = values
class CustomTargetRollingWindowSize(TargetRollingWindowSize):
"""CustomTargetRollingWindowSize.
All required parameters must be populated in order to send to Azure.
:ivar mode: Required. [Required] TargetRollingWindowSiz detection mode.Constant filled by
server. Possible values include: "Auto", "Custom".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.TargetRollingWindowSizeMode
:ivar value: Required. [Required] TargetRollingWindowSize value.
:vartype value: int
"""
_validation = {
'mode': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'value': {'key': 'value', 'type': 'int'},
}
def __init__(
self,
*,
value: int,
**kwargs
):
"""
:keyword value: Required. [Required] TargetRollingWindowSize value.
:paramtype value: int
"""
super(CustomTargetRollingWindowSize, self).__init__(**kwargs)
self.mode = 'Custom' # type: str
self.value = value
class DataContainerData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.DataContainerDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'DataContainerDetails'},
}
def __init__(
self,
*,
properties: "DataContainerDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.DataContainerDetails
"""
super(DataContainerData, self).__init__(**kwargs)
self.properties = properties
class DataContainerDetails(AssetContainer):
"""Container for data asset versions.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar latest_version: The latest version inside this container.
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
:ivar data_type: Required. [Required] Specifies the type of data. Possible values include:
"UriFile", "UriFolder", "MLTable".
:vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
"""
_validation = {
'latest_version': {'readonly': True},
'next_version': {'readonly': True},
'data_type': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'latest_version': {'key': 'latestVersion', 'type': 'str'},
'next_version': {'key': 'nextVersion', 'type': 'str'},
'data_type': {'key': 'dataType', 'type': 'str'},
}
def __init__(
self,
*,
data_type: Union[str, "DataType"],
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword data_type: Required. [Required] Specifies the type of data. Possible values include:
"UriFile", "UriFolder", "MLTable".
:paramtype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
"""
super(DataContainerDetails, self).__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
self.data_type = data_type
class DataContainerResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of DataContainer entities.
:ivar next_link: The link to the next page of DataContainer objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type DataContainer.
:vartype value: list[~azure.mgmt.machinelearningservices.models.DataContainerData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DataContainerData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["DataContainerData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of DataContainer objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type DataContainer.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.DataContainerData]
"""
super(DataContainerResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class DataPathAssetReference(AssetReferenceBase):
"""Reference to an asset via its path in a datastore.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
:ivar datastore_id: ARM resource ID of the datastore where the asset is located.
:vartype datastore_id: str
:ivar path: The path of the file/directory in the datastore.
:vartype path: str
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
*,
datastore_id: Optional[str] = None,
path: Optional[str] = None,
**kwargs
):
"""
:keyword datastore_id: ARM resource ID of the datastore where the asset is located.
:paramtype datastore_id: str
:keyword path: The path of the file/directory in the datastore.
:paramtype path: str
"""
super(DataPathAssetReference, self).__init__(**kwargs)
self.reference_type = 'DataPath' # type: str
self.datastore_id = datastore_id
self.path = path
class DataSettings(msrest.serialization.Model):
"""Collection of registered Tabular Dataset Ids and other data settings required for training and validating models.
All required parameters must be populated in order to send to Azure.
:ivar target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:vartype target_column_name: str
:ivar test_data: Test data input.
:vartype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:ivar training_data: Required. [Required] Training data input.
:vartype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
"""
_validation = {
'target_column_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'training_data': {'required': True},
}
_attribute_map = {
'target_column_name': {'key': 'targetColumnName', 'type': 'str'},
'test_data': {'key': 'testData', 'type': 'TestDataSettings'},
'training_data': {'key': 'trainingData', 'type': 'TrainingDataSettings'},
}
def __init__(
self,
*,
target_column_name: str,
training_data: "TrainingDataSettings",
test_data: Optional["TestDataSettings"] = None,
**kwargs
):
"""
:keyword target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:paramtype target_column_name: str
:keyword test_data: Test data input.
:paramtype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:keyword training_data: Required. [Required] Training data input.
:paramtype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
"""
super(DataSettings, self).__init__(**kwargs)
self.target_column_name = target_column_name
self.test_data = test_data
self.training_data = training_data
class DatastoreData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.DatastoreDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'DatastoreDetails'},
}
def __init__(
self,
*,
properties: "DatastoreDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.DatastoreDetails
"""
super(DatastoreData, self).__init__(**kwargs)
self.properties = properties
class DatastoreResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of Datastore entities.
:ivar next_link: The link to the next page of Datastore objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type Datastore.
:vartype value: list[~azure.mgmt.machinelearningservices.models.DatastoreData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DatastoreData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["DatastoreData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of Datastore objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type Datastore.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.DatastoreData]
"""
super(DatastoreResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class DataVersionBaseData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.DataVersionBaseDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'DataVersionBaseDetails'},
}
def __init__(
self,
*,
properties: "DataVersionBaseDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.DataVersionBaseDetails
"""
super(DataVersionBaseData, self).__init__(**kwargs)
self.properties = properties
class DataVersionBaseDetails(AssetBase):
"""Data version base definition.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: MLTableData, UriFileDataVersion, UriFolderDataVersion.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar data_type: Required. [Required] Specifies the type of data.Constant filled by server.
Possible values include: "UriFile", "UriFolder", "MLTable".
:vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
:ivar data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:vartype data_uri: str
"""
_validation = {
'data_type': {'required': True},
'data_uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'data_type': {'key': 'dataType', 'type': 'str'},
'data_uri': {'key': 'dataUri', 'type': 'str'},
}
_subtype_map = {
'data_type': {'MLTable': 'MLTableData', 'UriFile': 'UriFileDataVersion', 'UriFolder': 'UriFolderDataVersion'}
}
def __init__(
self,
*,
data_uri: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:paramtype data_uri: str
"""
super(DataVersionBaseDetails, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, **kwargs)
self.data_type = 'DataVersionBaseDetails' # type: str
self.data_uri = data_uri
class DataVersionBaseResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of DataVersionBase entities.
:ivar next_link: The link to the next page of DataVersionBase objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type DataVersionBase.
:vartype value: list[~azure.mgmt.machinelearningservices.models.DataVersionBaseData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[DataVersionBaseData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["DataVersionBaseData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of DataVersionBase objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type DataVersionBase.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.DataVersionBaseData]
"""
super(DataVersionBaseResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class OnlineScaleSettings(msrest.serialization.Model):
"""Online deployment scaling configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: DefaultScaleSettings, TargetUtilizationScaleSettings.
All required parameters must be populated in order to send to Azure.
:ivar scale_type: Required. [Required] Type of deployment scaling algorithm.Constant filled by
server. Possible values include: "Default", "TargetUtilization".
:vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
"""
_validation = {
'scale_type': {'required': True},
}
_attribute_map = {
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
_subtype_map = {
'scale_type': {'Default': 'DefaultScaleSettings', 'TargetUtilization': 'TargetUtilizationScaleSettings'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(OnlineScaleSettings, self).__init__(**kwargs)
self.scale_type = None # type: Optional[str]
class DefaultScaleSettings(OnlineScaleSettings):
"""DefaultScaleSettings.
All required parameters must be populated in order to send to Azure.
:ivar scale_type: Required. [Required] Type of deployment scaling algorithm.Constant filled by
server. Possible values include: "Default", "TargetUtilization".
:vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
"""
_validation = {
'scale_type': {'required': True},
}
_attribute_map = {
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(DefaultScaleSettings, self).__init__(**kwargs)
self.scale_type = 'Default' # type: str
class DeploymentLogs(msrest.serialization.Model):
"""DeploymentLogs.
:ivar content: The retrieved online deployment logs.
:vartype content: str
"""
_attribute_map = {
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
content: Optional[str] = None,
**kwargs
):
"""
:keyword content: The retrieved online deployment logs.
:paramtype content: str
"""
super(DeploymentLogs, self).__init__(**kwargs)
self.content = content
class DeploymentLogsRequest(msrest.serialization.Model):
"""DeploymentLogsRequest.
:ivar container_type: The type of container to retrieve logs from. Possible values include:
"StorageInitializer", "InferenceServer".
:vartype container_type: str or ~azure.mgmt.machinelearningservices.models.ContainerType
:ivar tail: The maximum number of lines to tail.
:vartype tail: int
"""
_attribute_map = {
'container_type': {'key': 'containerType', 'type': 'str'},
'tail': {'key': 'tail', 'type': 'int'},
}
def __init__(
self,
*,
container_type: Optional[Union[str, "ContainerType"]] = None,
tail: Optional[int] = None,
**kwargs
):
"""
:keyword container_type: The type of container to retrieve logs from. Possible values include:
"StorageInitializer", "InferenceServer".
:paramtype container_type: str or ~azure.mgmt.machinelearningservices.models.ContainerType
:keyword tail: The maximum number of lines to tail.
:paramtype tail: int
"""
super(DeploymentLogsRequest, self).__init__(**kwargs)
self.container_type = container_type
self.tail = tail
class DistributionConfiguration(msrest.serialization.Model):
"""Base definition for job distribution configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: Mpi, PyTorch, TensorFlow.
All required parameters must be populated in order to send to Azure.
:ivar distribution_type: Required. [Required] Specifies the type of distribution
framework.Constant filled by server. Possible values include: "PyTorch", "TensorFlow", "Mpi".
:vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
"""
_validation = {
'distribution_type': {'required': True},
}
_attribute_map = {
'distribution_type': {'key': 'distributionType', 'type': 'str'},
}
_subtype_map = {
'distribution_type': {'Mpi': 'Mpi', 'PyTorch': 'PyTorch', 'TensorFlow': 'TensorFlow'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(DistributionConfiguration, self).__init__(**kwargs)
self.distribution_type = None # type: Optional[str]
class EndpointAuthKeys(msrest.serialization.Model):
"""Keys for endpoint authentication.
:ivar primary_key: The primary key.
:vartype primary_key: str
:ivar secondary_key: The secondary key.
:vartype secondary_key: str
"""
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
}
def __init__(
self,
*,
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
**kwargs
):
"""
:keyword primary_key: The primary key.
:paramtype primary_key: str
:keyword secondary_key: The secondary key.
:paramtype secondary_key: str
"""
super(EndpointAuthKeys, self).__init__(**kwargs)
self.primary_key = primary_key
self.secondary_key = secondary_key
class EndpointAuthToken(msrest.serialization.Model):
"""Service Token.
:ivar access_token: Access token for endpoint authentication.
:vartype access_token: str
:ivar expiry_time_utc: Access token expiry time (UTC).
:vartype expiry_time_utc: long
:ivar refresh_after_time_utc: Refresh access token after time (UTC).
:vartype refresh_after_time_utc: long
:ivar token_type: Access token type.
:vartype token_type: str
"""
_attribute_map = {
'access_token': {'key': 'accessToken', 'type': 'str'},
'expiry_time_utc': {'key': 'expiryTimeUtc', 'type': 'long'},
'refresh_after_time_utc': {'key': 'refreshAfterTimeUtc', 'type': 'long'},
'token_type': {'key': 'tokenType', 'type': 'str'},
}
def __init__(
self,
*,
access_token: Optional[str] = None,
expiry_time_utc: Optional[int] = 0,
refresh_after_time_utc: Optional[int] = 0,
token_type: Optional[str] = None,
**kwargs
):
"""
:keyword access_token: Access token for endpoint authentication.
:paramtype access_token: str
:keyword expiry_time_utc: Access token expiry time (UTC).
:paramtype expiry_time_utc: long
:keyword refresh_after_time_utc: Refresh access token after time (UTC).
:paramtype refresh_after_time_utc: long
:keyword token_type: Access token type.
:paramtype token_type: str
"""
super(EndpointAuthToken, self).__init__(**kwargs)
self.access_token = access_token
self.expiry_time_utc = expiry_time_utc
self.refresh_after_time_utc = refresh_after_time_utc
self.token_type = token_type
class EnvironmentContainerData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentContainerDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'EnvironmentContainerDetails'},
}
def __init__(
self,
*,
properties: "EnvironmentContainerDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentContainerDetails
"""
super(EnvironmentContainerData, self).__init__(**kwargs)
self.properties = properties
class EnvironmentContainerDetails(AssetContainer):
"""Container for environment specification versions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar latest_version: The latest version inside this container.
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
"""
_validation = {
'latest_version': {'readonly': True},
'next_version': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'latest_version': {'key': 'latestVersion', 'type': 'str'},
'next_version': {'key': 'nextVersion', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
"""
super(EnvironmentContainerDetails, self).__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
class EnvironmentContainerResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of EnvironmentContainer entities.
:ivar next_link: The link to the next page of EnvironmentContainer objects. If null, there are
no additional pages.
:vartype next_link: str
:ivar value: An array of objects of type EnvironmentContainer.
:vartype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentContainerData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[EnvironmentContainerData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["EnvironmentContainerData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of EnvironmentContainer objects. If null, there
are no additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type EnvironmentContainer.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentContainerData]
"""
super(EnvironmentContainerResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class EnvironmentVersionData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentVersionDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'EnvironmentVersionDetails'},
}
def __init__(
self,
*,
properties: "EnvironmentVersionDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.EnvironmentVersionDetails
"""
super(EnvironmentVersionData, self).__init__(**kwargs)
self.properties = properties
class EnvironmentVersionDetails(AssetBase):
"""Environment version details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar build: Configuration settings for Docker build context.
:vartype build: ~azure.mgmt.machinelearningservices.models.BuildContext
:ivar conda_file: Standard configuration file used by Conda that lets you install any kind of
package, including Python, R, and C/C++ packages.
.. raw:: html
<see
href="https://repo2docker.readthedocs.io/en/latest/config_files.html#environment-yml-install-a-conda-environment"
/>.
:vartype conda_file: str
:ivar environment_type: Environment type is either user managed or curated by the Azure ML
service
.. raw:: html
<see
href="https://docs.microsoft.com/en-us/azure/machine-learning/resource-curated-environments"
/>. Possible values include: "Curated", "UserCreated".
:vartype environment_type: str or ~azure.mgmt.machinelearningservices.models.EnvironmentType
:ivar image: Name of the image that will be used for the environment.
.. raw:: html
<seealso
href="https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-custom-docker-image#use-a-custom-base-image"
/>.
:vartype image: str
:ivar inference_config: Defines configuration specific to inference.
:vartype inference_config:
~azure.mgmt.machinelearningservices.models.InferenceContainerProperties
:ivar os_type: The OS type of the environment. Possible values include: "Linux", "Windows".
:vartype os_type: str or ~azure.mgmt.machinelearningservices.models.OperatingSystemType
"""
_validation = {
'environment_type': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'build': {'key': 'build', 'type': 'BuildContext'},
'conda_file': {'key': 'condaFile', 'type': 'str'},
'environment_type': {'key': 'environmentType', 'type': 'str'},
'image': {'key': 'image', 'type': 'str'},
'inference_config': {'key': 'inferenceConfig', 'type': 'InferenceContainerProperties'},
'os_type': {'key': 'osType', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
build: Optional["BuildContext"] = None,
conda_file: Optional[str] = None,
image: Optional[str] = None,
inference_config: Optional["InferenceContainerProperties"] = None,
os_type: Optional[Union[str, "OperatingSystemType"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword build: Configuration settings for Docker build context.
:paramtype build: ~azure.mgmt.machinelearningservices.models.BuildContext
:keyword conda_file: Standard configuration file used by Conda that lets you install any kind
of package, including Python, R, and C/C++ packages.
.. raw:: html
<see
href="https://repo2docker.readthedocs.io/en/latest/config_files.html#environment-yml-install-a-conda-environment"
/>.
:paramtype conda_file: str
:keyword image: Name of the image that will be used for the environment.
.. raw:: html
<seealso
href="https://docs.microsoft.com/en-us/azure/machine-learning/how-to-deploy-custom-docker-image#use-a-custom-base-image"
/>.
:paramtype image: str
:keyword inference_config: Defines configuration specific to inference.
:paramtype inference_config:
~azure.mgmt.machinelearningservices.models.InferenceContainerProperties
:keyword os_type: The OS type of the environment. Possible values include: "Linux", "Windows".
:paramtype os_type: str or ~azure.mgmt.machinelearningservices.models.OperatingSystemType
"""
super(EnvironmentVersionDetails, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, **kwargs)
self.build = build
self.conda_file = conda_file
self.environment_type = None
self.image = image
self.inference_config = inference_config
self.os_type = os_type
class EnvironmentVersionResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of EnvironmentVersion entities.
:ivar next_link: The link to the next page of EnvironmentVersion objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type EnvironmentVersion.
:vartype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentVersionData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[EnvironmentVersionData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["EnvironmentVersionData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of EnvironmentVersion objects. If null, there are
no additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type EnvironmentVersion.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.EnvironmentVersionData]
"""
super(EnvironmentVersionResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.machinelearningservices.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~azure.mgmt.machinelearningservices.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:ivar error: The error object.
:vartype error: ~azure.mgmt.machinelearningservices.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
*,
error: Optional["ErrorDetail"] = None,
**kwargs
):
"""
:keyword error: The error object.
:paramtype error: ~azure.mgmt.machinelearningservices.models.ErrorDetail
"""
super(ErrorResponse, self).__init__(**kwargs)
self.error = error
class FeaturizationSettings(msrest.serialization.Model):
"""Featurization Configuration.
:ivar dataset_language: Dataset language, useful for the text data.
:vartype dataset_language: str
"""
_attribute_map = {
'dataset_language': {'key': 'datasetLanguage', 'type': 'str'},
}
def __init__(
self,
*,
dataset_language: Optional[str] = None,
**kwargs
):
"""
:keyword dataset_language: Dataset language, useful for the text data.
:paramtype dataset_language: str
"""
super(FeaturizationSettings, self).__init__(**kwargs)
self.dataset_language = dataset_language
class FlavorData(msrest.serialization.Model):
"""FlavorData.
:ivar data: Model flavor-specific data.
:vartype data: dict[str, str]
"""
_attribute_map = {
'data': {'key': 'data', 'type': '{str}'},
}
def __init__(
self,
*,
data: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword data: Model flavor-specific data.
:paramtype data: dict[str, str]
"""
super(FlavorData, self).__init__(**kwargs)
self.data = data
class Forecasting(AutoMLVertical, TableVertical):
"""Forecasting task in AutoML Table vertical.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:ivar training_settings: Inputs for training phase for an AutoML Job.
:vartype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar allowed_models: Allowed models for forecasting task.
:vartype allowed_models: list[str or
~azure.mgmt.machinelearningservices.models.ForecastingModels]
:ivar blocked_models: Blocked models for forecasting task.
:vartype blocked_models: list[str or
~azure.mgmt.machinelearningservices.models.ForecastingModels]
:ivar forecasting_settings: Forecasting task specific inputs.
:vartype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
:ivar primary_metric: Primary metric for forecasting task. Possible values include:
"SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score",
"NormalizedMeanAbsoluteError".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
"""
_validation = {
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'TableVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'TableVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'TableVerticalLimitSettings'},
'training_settings': {'key': 'trainingSettings', 'type': 'TrainingSettings'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'allowed_models': {'key': 'allowedModels', 'type': '[str]'},
'blocked_models': {'key': 'blockedModels', 'type': '[str]'},
'forecasting_settings': {'key': 'forecastingSettings', 'type': 'ForecastingSettings'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: Optional["TableVerticalDataSettings"] = None,
featurization_settings: Optional["TableVerticalFeaturizationSettings"] = None,
limit_settings: Optional["TableVerticalLimitSettings"] = None,
training_settings: Optional["TrainingSettings"] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
allowed_models: Optional[List[Union[str, "ForecastingModels"]]] = None,
blocked_models: Optional[List[Union[str, "ForecastingModels"]]] = None,
forecasting_settings: Optional["ForecastingSettings"] = None,
primary_metric: Optional[Union[str, "ForecastingPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:keyword training_settings: Inputs for training phase for an AutoML Job.
:paramtype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword allowed_models: Allowed models for forecasting task.
:paramtype allowed_models: list[str or
~azure.mgmt.machinelearningservices.models.ForecastingModels]
:keyword blocked_models: Blocked models for forecasting task.
:paramtype blocked_models: list[str or
~azure.mgmt.machinelearningservices.models.ForecastingModels]
:keyword forecasting_settings: Forecasting task specific inputs.
:paramtype forecasting_settings: ~azure.mgmt.machinelearningservices.models.ForecastingSettings
:keyword primary_metric: Primary metric for forecasting task. Possible values include:
"SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score",
"NormalizedMeanAbsoluteError".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ForecastingPrimaryMetrics
"""
super(Forecasting, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, featurization_settings=featurization_settings, limit_settings=limit_settings, training_settings=training_settings, **kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.training_settings = training_settings
self.task_type = 'Forecasting' # type: str
self.allowed_models = allowed_models
self.blocked_models = blocked_models
self.forecasting_settings = forecasting_settings
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'Forecasting' # type: str
self.allowed_models = allowed_models
self.blocked_models = blocked_models
self.forecasting_settings = forecasting_settings
self.primary_metric = primary_metric
class ForecastingSettings(msrest.serialization.Model):
"""Forecasting specific parameters.
:ivar country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
:vartype country_or_region_for_holidays: str
:ivar cv_step_size: Number of periods between the origin time of one CV fold and the next fold.
For
example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
three days apart.
:vartype cv_step_size: int
:ivar feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
Possible values include: "None", "Auto".
:vartype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
:ivar forecast_horizon: The desired maximum forecast horizon in units of time-series frequency.
:vartype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
:ivar frequency: When forecasting, this parameter represents the period with which the forecast
is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset frequency
by default.
:vartype frequency: str
:ivar seasonality: Set time series seasonality as an integer multiple of the series frequency.
If seasonality is set to 'auto', it will be inferred.
:vartype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
:ivar short_series_handling_config: The parameter defining how if AutoML should handle short
time series. Possible values include: "None", "Auto", "Pad", "Drop".
:vartype short_series_handling_config: str or
~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
:ivar target_aggregate_function: The function to be used to aggregate the time series target
column to conform to a user specified frequency.
If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
Possible values include: "None", "Sum", "Max", "Min", "Mean".
:vartype target_aggregate_function: str or
~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
:ivar target_lags: The number of past periods to lag from the target column.
:vartype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
:ivar target_rolling_window_size: The number of past periods used to create a rolling window
average of the target column.
:vartype target_rolling_window_size:
~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
:ivar time_column_name: The name of the time column. This parameter is required when
forecasting to specify the datetime column in the input data used for building the time series
and inferring its frequency.
:vartype time_column_name: str
:ivar time_series_id_column_names: The names of columns used to group a timeseries. It can be
used to create multiple series.
If grain is not defined, the data set is assumed to be one time-series. This parameter is used
with task type forecasting.
:vartype time_series_id_column_names: list[str]
:ivar use_stl: Configure STL Decomposition of the time-series target column. Possible values
include: "None", "Season", "SeasonTrend".
:vartype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
"""
_attribute_map = {
'country_or_region_for_holidays': {'key': 'countryOrRegionForHolidays', 'type': 'str'},
'cv_step_size': {'key': 'cvStepSize', 'type': 'int'},
'feature_lags': {'key': 'featureLags', 'type': 'str'},
'forecast_horizon': {'key': 'forecastHorizon', 'type': 'ForecastHorizon'},
'frequency': {'key': 'frequency', 'type': 'str'},
'seasonality': {'key': 'seasonality', 'type': 'Seasonality'},
'short_series_handling_config': {'key': 'shortSeriesHandlingConfig', 'type': 'str'},
'target_aggregate_function': {'key': 'targetAggregateFunction', 'type': 'str'},
'target_lags': {'key': 'targetLags', 'type': 'TargetLags'},
'target_rolling_window_size': {'key': 'targetRollingWindowSize', 'type': 'TargetRollingWindowSize'},
'time_column_name': {'key': 'timeColumnName', 'type': 'str'},
'time_series_id_column_names': {'key': 'timeSeriesIdColumnNames', 'type': '[str]'},
'use_stl': {'key': 'useStl', 'type': 'str'},
}
def __init__(
self,
*,
country_or_region_for_holidays: Optional[str] = None,
cv_step_size: Optional[int] = None,
feature_lags: Optional[Union[str, "FeatureLags"]] = None,
forecast_horizon: Optional["ForecastHorizon"] = None,
frequency: Optional[str] = None,
seasonality: Optional["Seasonality"] = None,
short_series_handling_config: Optional[Union[str, "ShortSeriesHandlingConfiguration"]] = None,
target_aggregate_function: Optional[Union[str, "TargetAggregationFunction"]] = None,
target_lags: Optional["TargetLags"] = None,
target_rolling_window_size: Optional["TargetRollingWindowSize"] = None,
time_column_name: Optional[str] = None,
time_series_id_column_names: Optional[List[str]] = None,
use_stl: Optional[Union[str, "UseStl"]] = None,
**kwargs
):
"""
:keyword country_or_region_for_holidays: Country or region for holidays for forecasting tasks.
These should be ISO 3166 two-letter country/region codes, for example 'US' or 'GB'.
:paramtype country_or_region_for_holidays: str
:keyword cv_step_size: Number of periods between the origin time of one CV fold and the next
fold. For
example, if ``CVStepSize`` = 3 for daily data, the origin time for each fold will be
three days apart.
:paramtype cv_step_size: int
:keyword feature_lags: Flag for generating lags for the numeric features with 'auto' or null.
Possible values include: "None", "Auto".
:paramtype feature_lags: str or ~azure.mgmt.machinelearningservices.models.FeatureLags
:keyword forecast_horizon: The desired maximum forecast horizon in units of time-series
frequency.
:paramtype forecast_horizon: ~azure.mgmt.machinelearningservices.models.ForecastHorizon
:keyword frequency: When forecasting, this parameter represents the period with which the
forecast is desired, for example daily, weekly, yearly, etc. The forecast frequency is dataset
frequency by default.
:paramtype frequency: str
:keyword seasonality: Set time series seasonality as an integer multiple of the series
frequency.
If seasonality is set to 'auto', it will be inferred.
:paramtype seasonality: ~azure.mgmt.machinelearningservices.models.Seasonality
:keyword short_series_handling_config: The parameter defining how if AutoML should handle short
time series. Possible values include: "None", "Auto", "Pad", "Drop".
:paramtype short_series_handling_config: str or
~azure.mgmt.machinelearningservices.models.ShortSeriesHandlingConfiguration
:keyword target_aggregate_function: The function to be used to aggregate the time series target
column to conform to a user specified frequency.
If the TargetAggregateFunction is set i.e. not 'None', but the freq parameter is not set, the
error is raised. The possible target aggregation functions are: "sum", "max", "min" and "mean".
Possible values include: "None", "Sum", "Max", "Min", "Mean".
:paramtype target_aggregate_function: str or
~azure.mgmt.machinelearningservices.models.TargetAggregationFunction
:keyword target_lags: The number of past periods to lag from the target column.
:paramtype target_lags: ~azure.mgmt.machinelearningservices.models.TargetLags
:keyword target_rolling_window_size: The number of past periods used to create a rolling window
average of the target column.
:paramtype target_rolling_window_size:
~azure.mgmt.machinelearningservices.models.TargetRollingWindowSize
:keyword time_column_name: The name of the time column. This parameter is required when
forecasting to specify the datetime column in the input data used for building the time series
and inferring its frequency.
:paramtype time_column_name: str
:keyword time_series_id_column_names: The names of columns used to group a timeseries. It can
be used to create multiple series.
If grain is not defined, the data set is assumed to be one time-series. This parameter is used
with task type forecasting.
:paramtype time_series_id_column_names: list[str]
:keyword use_stl: Configure STL Decomposition of the time-series target column. Possible values
include: "None", "Season", "SeasonTrend".
:paramtype use_stl: str or ~azure.mgmt.machinelearningservices.models.UseStl
"""
super(ForecastingSettings, self).__init__(**kwargs)
self.country_or_region_for_holidays = country_or_region_for_holidays
self.cv_step_size = cv_step_size
self.feature_lags = feature_lags
self.forecast_horizon = forecast_horizon
self.frequency = frequency
self.seasonality = seasonality
self.short_series_handling_config = short_series_handling_config
self.target_aggregate_function = target_aggregate_function
self.target_lags = target_lags
self.target_rolling_window_size = target_rolling_window_size
self.time_column_name = time_column_name
self.time_series_id_column_names = time_series_id_column_names
self.use_stl = use_stl
class GridSamplingAlgorithm(SamplingAlgorithm):
"""Defines a Sampling Algorithm that exhaustively generates every value combination in the space.
All required parameters must be populated in order to send to Azure.
:ivar sampling_algorithm_type: Required. [Required] The algorithm used for generating
hyperparameter values, along with configuration properties.Constant filled by server. Possible
values include: "Grid", "Random", "Bayesian".
:vartype sampling_algorithm_type: str or
~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
_validation = {
'sampling_algorithm_type': {'required': True},
}
_attribute_map = {
'sampling_algorithm_type': {'key': 'samplingAlgorithmType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(GridSamplingAlgorithm, self).__init__(**kwargs)
self.sampling_algorithm_type = 'Grid' # type: str
class HdfsDatastore(DatastoreDetails):
"""HdfsDatastore.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar credentials: Required. [Required] Account credentials.
:vartype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:ivar datastore_type: Required. [Required] Storage type backing the datastore.Constant filled
by server. Possible values include: "AzureBlob", "AzureDataLakeGen1", "AzureDataLakeGen2",
"AzureFile", "Hdfs".
:vartype datastore_type: str or ~azure.mgmt.machinelearningservices.models.DatastoreType
:ivar is_default: Readonly property to indicate if datastore is the workspace default
datastore.
:vartype is_default: bool
:ivar hdfs_server_certificate: The TLS cert of the HDFS server. Needs to be a base64 encoded
string. Required if "Https" protocol is selected.
:vartype hdfs_server_certificate: str
:ivar name_node_address: Required. [Required] IP Address or DNS HostName.
:vartype name_node_address: str
:ivar protocol: Protocol used to communicate with the storage account (Https/Http).
:vartype protocol: str
"""
_validation = {
'credentials': {'required': True},
'datastore_type': {'required': True},
'is_default': {'readonly': True},
'name_node_address': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'credentials': {'key': 'credentials', 'type': 'DatastoreCredentials'},
'datastore_type': {'key': 'datastoreType', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'hdfs_server_certificate': {'key': 'hdfsServerCertificate', 'type': 'str'},
'name_node_address': {'key': 'nameNodeAddress', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
}
def __init__(
self,
*,
credentials: "DatastoreCredentials",
name_node_address: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
hdfs_server_certificate: Optional[str] = None,
protocol: Optional[str] = "http",
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword credentials: Required. [Required] Account credentials.
:paramtype credentials: ~azure.mgmt.machinelearningservices.models.DatastoreCredentials
:keyword hdfs_server_certificate: The TLS cert of the HDFS server. Needs to be a base64 encoded
string. Required if "Https" protocol is selected.
:paramtype hdfs_server_certificate: str
:keyword name_node_address: Required. [Required] IP Address or DNS HostName.
:paramtype name_node_address: str
:keyword protocol: Protocol used to communicate with the storage account (Https/Http).
:paramtype protocol: str
"""
super(HdfsDatastore, self).__init__(description=description, properties=properties, tags=tags, credentials=credentials, **kwargs)
self.datastore_type = 'Hdfs' # type: str
self.hdfs_server_certificate = hdfs_server_certificate
self.name_node_address = name_node_address
self.protocol = protocol
class IdAssetReference(AssetReferenceBase):
"""Reference to an asset via its ARM resource ID.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
:ivar asset_id: Required. [Required] ARM resource ID of the asset.
:vartype asset_id: str
"""
_validation = {
'reference_type': {'required': True},
'asset_id': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
'asset_id': {'key': 'assetId', 'type': 'str'},
}
def __init__(
self,
*,
asset_id: str,
**kwargs
):
"""
:keyword asset_id: Required. [Required] ARM resource ID of the asset.
:paramtype asset_id: str
"""
super(IdAssetReference, self).__init__(**kwargs)
self.reference_type = 'Id' # type: str
self.asset_id = asset_id
class ImageVertical(msrest.serialization.Model):
"""Abstract class for AutoML tasks that train image (computer vision) models -
such as Image Classification / Image Classification Multilabel / Image Object Detection / Image Instance Segmentation.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
"""
super(ImageVertical, self).__init__(**kwargs)
self.data_settings = data_settings
self.limit_settings = limit_settings
self.sweep_settings = sweep_settings
class ImageClassificationBase(ImageVertical):
"""ImageClassificationBase.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:ivar model_settings: Settings used for training the model.
:vartype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
:ivar search_space: Search space for sampling different combinations of models and their
hyperparameters.
:vartype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
'model_settings': {'key': 'modelSettings', 'type': 'ImageModelSettingsClassification'},
'search_space': {'key': 'searchSpace', 'type': '[ImageModelDistributionSettingsClassification]'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
model_settings: Optional["ImageModelSettingsClassification"] = None,
search_space: Optional[List["ImageModelDistributionSettingsClassification"]] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:keyword model_settings: Settings used for training the model.
:paramtype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
:keyword search_space: Search space for sampling different combinations of models and their
hyperparameters.
:paramtype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
"""
super(ImageClassificationBase, self).__init__(data_settings=data_settings, limit_settings=limit_settings, sweep_settings=sweep_settings, **kwargs)
self.model_settings = model_settings
self.search_space = search_space
class ImageClassification(AutoMLVertical, ImageClassificationBase):
"""Image Classification. Multi-class image classification is used when an image is classified with only a single label
from a set of classes - e.g. each image is classified as either an image of a 'cat' or a 'dog' or a 'duck'.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:ivar model_settings: Settings used for training the model.
:vartype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
:ivar search_space: Search space for sampling different combinations of models and their
hyperparameters.
:vartype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric to optimize for this task. Possible values include:
"AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
'model_settings': {'key': 'modelSettings', 'type': 'ImageModelSettingsClassification'},
'search_space': {'key': 'searchSpace', 'type': '[ImageModelDistributionSettingsClassification]'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
model_settings: Optional["ImageModelSettingsClassification"] = None,
search_space: Optional[List["ImageModelDistributionSettingsClassification"]] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
primary_metric: Optional[Union[str, "ClassificationPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:keyword model_settings: Settings used for training the model.
:paramtype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
:keyword search_space: Search space for sampling different combinations of models and their
hyperparameters.
:paramtype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword primary_metric: Primary metric to optimize for this task. Possible values include:
"AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
super(ImageClassification, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, limit_settings=limit_settings, sweep_settings=sweep_settings, model_settings=model_settings, search_space=search_space, **kwargs)
self.data_settings = data_settings
self.limit_settings = limit_settings
self.sweep_settings = sweep_settings
self.model_settings = model_settings
self.search_space = search_space
self.task_type = 'ImageClassification' # type: str
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'ImageClassification' # type: str
self.primary_metric = primary_metric
class ImageClassificationMultilabel(AutoMLVertical, ImageClassificationBase):
"""Image Classification Multilabel. Multi-label image classification is used when an image could have one or more labels
from a set of labels - e.g. an image could be labeled with both 'cat' and 'dog'.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:ivar model_settings: Settings used for training the model.
:vartype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
:ivar search_space: Search space for sampling different combinations of models and their
hyperparameters.
:vartype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric to optimize for this task. Possible values include:
"AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted", "IOU".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
'model_settings': {'key': 'modelSettings', 'type': 'ImageModelSettingsClassification'},
'search_space': {'key': 'searchSpace', 'type': '[ImageModelDistributionSettingsClassification]'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
model_settings: Optional["ImageModelSettingsClassification"] = None,
search_space: Optional[List["ImageModelDistributionSettingsClassification"]] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
primary_metric: Optional[Union[str, "ClassificationMultilabelPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:keyword model_settings: Settings used for training the model.
:paramtype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsClassification
:keyword search_space: Search space for sampling different combinations of models and their
hyperparameters.
:paramtype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsClassification]
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword primary_metric: Primary metric to optimize for this task. Possible values include:
"AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted", "IOU".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
"""
super(ImageClassificationMultilabel, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, limit_settings=limit_settings, sweep_settings=sweep_settings, model_settings=model_settings, search_space=search_space, **kwargs)
self.data_settings = data_settings
self.limit_settings = limit_settings
self.sweep_settings = sweep_settings
self.model_settings = model_settings
self.search_space = search_space
self.task_type = 'ImageClassificationMultilabel' # type: str
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'ImageClassificationMultilabel' # type: str
self.primary_metric = primary_metric
class ImageObjectDetectionBase(ImageVertical):
"""ImageObjectDetectionBase.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:ivar model_settings: Settings used for training the model.
:vartype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
:ivar search_space: Search space for sampling different combinations of models and their
hyperparameters.
:vartype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
'model_settings': {'key': 'modelSettings', 'type': 'ImageModelSettingsObjectDetection'},
'search_space': {'key': 'searchSpace', 'type': '[ImageModelDistributionSettingsObjectDetection]'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
model_settings: Optional["ImageModelSettingsObjectDetection"] = None,
search_space: Optional[List["ImageModelDistributionSettingsObjectDetection"]] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:keyword model_settings: Settings used for training the model.
:paramtype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
:keyword search_space: Search space for sampling different combinations of models and their
hyperparameters.
:paramtype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
"""
super(ImageObjectDetectionBase, self).__init__(data_settings=data_settings, limit_settings=limit_settings, sweep_settings=sweep_settings, **kwargs)
self.model_settings = model_settings
self.search_space = search_space
class ImageInstanceSegmentation(AutoMLVertical, ImageObjectDetectionBase):
"""Image Instance Segmentation. Instance segmentation is used to identify objects in an image at the pixel level,
drawing a polygon around each object in the image.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:ivar model_settings: Settings used for training the model.
:vartype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
:ivar search_space: Search space for sampling different combinations of models and their
hyperparameters.
:vartype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric to optimize for this task. Possible values include:
"MeanAveragePrecision".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
'model_settings': {'key': 'modelSettings', 'type': 'ImageModelSettingsObjectDetection'},
'search_space': {'key': 'searchSpace', 'type': '[ImageModelDistributionSettingsObjectDetection]'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
model_settings: Optional["ImageModelSettingsObjectDetection"] = None,
search_space: Optional[List["ImageModelDistributionSettingsObjectDetection"]] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
primary_metric: Optional[Union[str, "InstanceSegmentationPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:keyword model_settings: Settings used for training the model.
:paramtype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
:keyword search_space: Search space for sampling different combinations of models and their
hyperparameters.
:paramtype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword primary_metric: Primary metric to optimize for this task. Possible values include:
"MeanAveragePrecision".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.InstanceSegmentationPrimaryMetrics
"""
super(ImageInstanceSegmentation, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, limit_settings=limit_settings, sweep_settings=sweep_settings, model_settings=model_settings, search_space=search_space, **kwargs)
self.data_settings = data_settings
self.limit_settings = limit_settings
self.sweep_settings = sweep_settings
self.model_settings = model_settings
self.search_space = search_space
self.task_type = 'ImageInstanceSegmentation' # type: str
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'ImageInstanceSegmentation' # type: str
self.primary_metric = primary_metric
class ImageLimitSettings(msrest.serialization.Model):
"""Limit settings for the AutoML job.
:ivar max_concurrent_trials: Maximum number of concurrent AutoML iterations.
:vartype max_concurrent_trials: int
:ivar max_trials: Maximum number of AutoML iterations.
:vartype max_trials: int
:ivar timeout: AutoML job timeout.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_concurrent_trials': {'key': 'maxConcurrentTrials', 'type': 'int'},
'max_trials': {'key': 'maxTrials', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
max_concurrent_trials: Optional[int] = 1,
max_trials: Optional[int] = 1,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword max_concurrent_trials: Maximum number of concurrent AutoML iterations.
:paramtype max_concurrent_trials: int
:keyword max_trials: Maximum number of AutoML iterations.
:paramtype max_trials: int
:keyword timeout: AutoML job timeout.
:paramtype timeout: ~datetime.timedelta
"""
super(ImageLimitSettings, self).__init__(**kwargs)
self.max_concurrent_trials = max_concurrent_trials
self.max_trials = max_trials
self.timeout = timeout
class ImageModelDistributionSettings(msrest.serialization.Model):
"""Distribution expressions to sweep over values of model settings.
:code:`<example>
Some examples are:
<code>
ModelName = "choice('seresnext', 'resnest50')";
LearningRate = "uniform(0.001, 0.01)";
LayersToFreeze = "choice(0, 2)";
</code></example>`
All distributions can be specified as distribution_name(min, max) or choice(val1, val2, ..., valn)
where distribution name can be: uniform, quniform, loguniform, etc
For more details on how to compose distribution expressions please check the documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
For more information on the available settings please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:vartype ams_gradient: str
:ivar augmentations: Settings for using Augmentations.
:vartype augmentations: str
:ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta1: str
:ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta2: str
:ivar distributed: Whether to use distributer training.
:vartype distributed: str
:ivar early_stopping: Enable early stopping logic during training.
:vartype early_stopping: str
:ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
primary metric improvement
is tracked for early stopping. Must be a positive integer.
:vartype early_stopping_delay: str
:ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:vartype early_stopping_patience: str
:ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
:vartype enable_onnx_normalization: str
:ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
be a positive integer.
:vartype evaluation_frequency: str
:ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:vartype gradient_accumulation_step: str
:ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype layers_to_freeze: str
:ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:vartype learning_rate: str
:ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'.
:vartype learning_rate_scheduler: str
:ivar model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype model_name: str
:ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
:vartype momentum: str
:ivar nesterov: Enable nesterov when optimizer is 'sgd'.
:vartype nesterov: str
:ivar number_of_epochs: Number of training epochs. Must be a positive integer.
:vartype number_of_epochs: str
:ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
:vartype number_of_workers: str
:ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
:vartype optimizer: str
:ivar random_seed: Random seed to be used when using deterministic training.
:vartype random_seed: str
:ivar split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:vartype split_ratio: str
:ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
the range [0, 1].
:vartype step_lr_gamma: str
:ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
positive integer.
:vartype step_lr_step_size: str
:ivar training_batch_size: Training batch size. Must be a positive integer.
:vartype training_batch_size: str
:ivar validation_batch_size: Validation batch size. Must be a positive integer.
:vartype validation_batch_size: str
:ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:vartype warmup_cosine_lr_cycles: str
:ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:vartype warmup_cosine_lr_warmup_epochs: str
:ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
a float in the range[0, 1].
:vartype weight_decay: str
"""
_attribute_map = {
'ams_gradient': {'key': 'amsGradient', 'type': 'str'},
'augmentations': {'key': 'augmentations', 'type': 'str'},
'beta1': {'key': 'beta1', 'type': 'str'},
'beta2': {'key': 'beta2', 'type': 'str'},
'distributed': {'key': 'distributed', 'type': 'str'},
'early_stopping': {'key': 'earlyStopping', 'type': 'str'},
'early_stopping_delay': {'key': 'earlyStoppingDelay', 'type': 'str'},
'early_stopping_patience': {'key': 'earlyStoppingPatience', 'type': 'str'},
'enable_onnx_normalization': {'key': 'enableOnnxNormalization', 'type': 'str'},
'evaluation_frequency': {'key': 'evaluationFrequency', 'type': 'str'},
'gradient_accumulation_step': {'key': 'gradientAccumulationStep', 'type': 'str'},
'layers_to_freeze': {'key': 'layersToFreeze', 'type': 'str'},
'learning_rate': {'key': 'learningRate', 'type': 'str'},
'learning_rate_scheduler': {'key': 'learningRateScheduler', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
'momentum': {'key': 'momentum', 'type': 'str'},
'nesterov': {'key': 'nesterov', 'type': 'str'},
'number_of_epochs': {'key': 'numberOfEpochs', 'type': 'str'},
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'str'},
'optimizer': {'key': 'optimizer', 'type': 'str'},
'random_seed': {'key': 'randomSeed', 'type': 'str'},
'split_ratio': {'key': 'splitRatio', 'type': 'str'},
'step_lr_gamma': {'key': 'stepLRGamma', 'type': 'str'},
'step_lr_step_size': {'key': 'stepLRStepSize', 'type': 'str'},
'training_batch_size': {'key': 'trainingBatchSize', 'type': 'str'},
'validation_batch_size': {'key': 'validationBatchSize', 'type': 'str'},
'warmup_cosine_lr_cycles': {'key': 'warmupCosineLRCycles', 'type': 'str'},
'warmup_cosine_lr_warmup_epochs': {'key': 'warmupCosineLRWarmupEpochs', 'type': 'str'},
'weight_decay': {'key': 'weightDecay', 'type': 'str'},
}
def __init__(
self,
*,
ams_gradient: Optional[str] = None,
augmentations: Optional[str] = None,
beta1: Optional[str] = None,
beta2: Optional[str] = None,
distributed: Optional[str] = None,
early_stopping: Optional[str] = None,
early_stopping_delay: Optional[str] = None,
early_stopping_patience: Optional[str] = None,
enable_onnx_normalization: Optional[str] = None,
evaluation_frequency: Optional[str] = None,
gradient_accumulation_step: Optional[str] = None,
layers_to_freeze: Optional[str] = None,
learning_rate: Optional[str] = None,
learning_rate_scheduler: Optional[str] = None,
model_name: Optional[str] = None,
momentum: Optional[str] = None,
nesterov: Optional[str] = None,
number_of_epochs: Optional[str] = None,
number_of_workers: Optional[str] = None,
optimizer: Optional[str] = None,
random_seed: Optional[str] = None,
split_ratio: Optional[str] = None,
step_lr_gamma: Optional[str] = None,
step_lr_step_size: Optional[str] = None,
training_batch_size: Optional[str] = None,
validation_batch_size: Optional[str] = None,
warmup_cosine_lr_cycles: Optional[str] = None,
warmup_cosine_lr_warmup_epochs: Optional[str] = None,
weight_decay: Optional[str] = None,
**kwargs
):
"""
:keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:paramtype ams_gradient: str
:keyword augmentations: Settings for using Augmentations.
:paramtype augmentations: str
:keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta1: str
:keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta2: str
:keyword distributed: Whether to use distributer training.
:paramtype distributed: str
:keyword early_stopping: Enable early stopping logic during training.
:paramtype early_stopping: str
:keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
before primary metric improvement
is tracked for early stopping. Must be a positive integer.
:paramtype early_stopping_delay: str
:keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: str
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
:paramtype enable_onnx_normalization: str
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
:paramtype evaluation_frequency: str
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:paramtype gradient_accumulation_step: str
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype layers_to_freeze: str
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:paramtype learning_rate: str
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'.
:paramtype learning_rate_scheduler: str
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
:paramtype momentum: str
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
:paramtype nesterov: str
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
:paramtype number_of_epochs: str
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
:paramtype number_of_workers: str
:keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
:paramtype optimizer: str
:keyword random_seed: Random seed to be used when using deterministic training.
:paramtype random_seed: str
:keyword split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:paramtype split_ratio: str
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
:paramtype step_lr_gamma: str
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
:paramtype step_lr_step_size: str
:keyword training_batch_size: Training batch size. Must be a positive integer.
:paramtype training_batch_size: str
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
:paramtype validation_batch_size: str
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:paramtype warmup_cosine_lr_cycles: str
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:paramtype warmup_cosine_lr_warmup_epochs: str
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
:paramtype weight_decay: str
"""
super(ImageModelDistributionSettings, self).__init__(**kwargs)
self.ams_gradient = ams_gradient
self.augmentations = augmentations
self.beta1 = beta1
self.beta2 = beta2
self.distributed = distributed
self.early_stopping = early_stopping
self.early_stopping_delay = early_stopping_delay
self.early_stopping_patience = early_stopping_patience
self.enable_onnx_normalization = enable_onnx_normalization
self.evaluation_frequency = evaluation_frequency
self.gradient_accumulation_step = gradient_accumulation_step
self.layers_to_freeze = layers_to_freeze
self.learning_rate = learning_rate
self.learning_rate_scheduler = learning_rate_scheduler
self.model_name = model_name
self.momentum = momentum
self.nesterov = nesterov
self.number_of_epochs = number_of_epochs
self.number_of_workers = number_of_workers
self.optimizer = optimizer
self.random_seed = random_seed
self.split_ratio = split_ratio
self.step_lr_gamma = step_lr_gamma
self.step_lr_step_size = step_lr_step_size
self.training_batch_size = training_batch_size
self.validation_batch_size = validation_batch_size
self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
self.weight_decay = weight_decay
class ImageModelDistributionSettingsClassification(ImageModelDistributionSettings):
"""Distribution expressions to sweep over values of model settings.
:code:`<example>
Some examples are:
<code>
ModelName = "choice('seresnext', 'resnest50')";
LearningRate = "uniform(0.001, 0.01)";
LayersToFreeze = "choice(0, 2)";
</code></example>`
For more details on how to compose distribution expressions please check the documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
For more information on the available settings please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:vartype ams_gradient: str
:ivar augmentations: Settings for using Augmentations.
:vartype augmentations: str
:ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta1: str
:ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta2: str
:ivar distributed: Whether to use distributer training.
:vartype distributed: str
:ivar early_stopping: Enable early stopping logic during training.
:vartype early_stopping: str
:ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
primary metric improvement
is tracked for early stopping. Must be a positive integer.
:vartype early_stopping_delay: str
:ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:vartype early_stopping_patience: str
:ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
:vartype enable_onnx_normalization: str
:ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
be a positive integer.
:vartype evaluation_frequency: str
:ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:vartype gradient_accumulation_step: str
:ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype layers_to_freeze: str
:ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:vartype learning_rate: str
:ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'.
:vartype learning_rate_scheduler: str
:ivar model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype model_name: str
:ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
:vartype momentum: str
:ivar nesterov: Enable nesterov when optimizer is 'sgd'.
:vartype nesterov: str
:ivar number_of_epochs: Number of training epochs. Must be a positive integer.
:vartype number_of_epochs: str
:ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
:vartype number_of_workers: str
:ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
:vartype optimizer: str
:ivar random_seed: Random seed to be used when using deterministic training.
:vartype random_seed: str
:ivar split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:vartype split_ratio: str
:ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
the range [0, 1].
:vartype step_lr_gamma: str
:ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
positive integer.
:vartype step_lr_step_size: str
:ivar training_batch_size: Training batch size. Must be a positive integer.
:vartype training_batch_size: str
:ivar validation_batch_size: Validation batch size. Must be a positive integer.
:vartype validation_batch_size: str
:ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:vartype warmup_cosine_lr_cycles: str
:ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:vartype warmup_cosine_lr_warmup_epochs: str
:ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
a float in the range[0, 1].
:vartype weight_decay: str
:ivar training_crop_size: Image crop size that is input to the neural network for the training
dataset. Must be a positive integer.
:vartype training_crop_size: str
:ivar validation_crop_size: Image crop size that is input to the neural network for the
validation dataset. Must be a positive integer.
:vartype validation_crop_size: str
:ivar validation_resize_size: Image size to which to resize before cropping for validation
dataset. Must be a positive integer.
:vartype validation_resize_size: str
:ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
0 or 1 or 2.
:vartype weighted_loss: str
"""
_attribute_map = {
'ams_gradient': {'key': 'amsGradient', 'type': 'str'},
'augmentations': {'key': 'augmentations', 'type': 'str'},
'beta1': {'key': 'beta1', 'type': 'str'},
'beta2': {'key': 'beta2', 'type': 'str'},
'distributed': {'key': 'distributed', 'type': 'str'},
'early_stopping': {'key': 'earlyStopping', 'type': 'str'},
'early_stopping_delay': {'key': 'earlyStoppingDelay', 'type': 'str'},
'early_stopping_patience': {'key': 'earlyStoppingPatience', 'type': 'str'},
'enable_onnx_normalization': {'key': 'enableOnnxNormalization', 'type': 'str'},
'evaluation_frequency': {'key': 'evaluationFrequency', 'type': 'str'},
'gradient_accumulation_step': {'key': 'gradientAccumulationStep', 'type': 'str'},
'layers_to_freeze': {'key': 'layersToFreeze', 'type': 'str'},
'learning_rate': {'key': 'learningRate', 'type': 'str'},
'learning_rate_scheduler': {'key': 'learningRateScheduler', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
'momentum': {'key': 'momentum', 'type': 'str'},
'nesterov': {'key': 'nesterov', 'type': 'str'},
'number_of_epochs': {'key': 'numberOfEpochs', 'type': 'str'},
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'str'},
'optimizer': {'key': 'optimizer', 'type': 'str'},
'random_seed': {'key': 'randomSeed', 'type': 'str'},
'split_ratio': {'key': 'splitRatio', 'type': 'str'},
'step_lr_gamma': {'key': 'stepLRGamma', 'type': 'str'},
'step_lr_step_size': {'key': 'stepLRStepSize', 'type': 'str'},
'training_batch_size': {'key': 'trainingBatchSize', 'type': 'str'},
'validation_batch_size': {'key': 'validationBatchSize', 'type': 'str'},
'warmup_cosine_lr_cycles': {'key': 'warmupCosineLRCycles', 'type': 'str'},
'warmup_cosine_lr_warmup_epochs': {'key': 'warmupCosineLRWarmupEpochs', 'type': 'str'},
'weight_decay': {'key': 'weightDecay', 'type': 'str'},
'training_crop_size': {'key': 'trainingCropSize', 'type': 'str'},
'validation_crop_size': {'key': 'validationCropSize', 'type': 'str'},
'validation_resize_size': {'key': 'validationResizeSize', 'type': 'str'},
'weighted_loss': {'key': 'weightedLoss', 'type': 'str'},
}
def __init__(
self,
*,
ams_gradient: Optional[str] = None,
augmentations: Optional[str] = None,
beta1: Optional[str] = None,
beta2: Optional[str] = None,
distributed: Optional[str] = None,
early_stopping: Optional[str] = None,
early_stopping_delay: Optional[str] = None,
early_stopping_patience: Optional[str] = None,
enable_onnx_normalization: Optional[str] = None,
evaluation_frequency: Optional[str] = None,
gradient_accumulation_step: Optional[str] = None,
layers_to_freeze: Optional[str] = None,
learning_rate: Optional[str] = None,
learning_rate_scheduler: Optional[str] = None,
model_name: Optional[str] = None,
momentum: Optional[str] = None,
nesterov: Optional[str] = None,
number_of_epochs: Optional[str] = None,
number_of_workers: Optional[str] = None,
optimizer: Optional[str] = None,
random_seed: Optional[str] = None,
split_ratio: Optional[str] = None,
step_lr_gamma: Optional[str] = None,
step_lr_step_size: Optional[str] = None,
training_batch_size: Optional[str] = None,
validation_batch_size: Optional[str] = None,
warmup_cosine_lr_cycles: Optional[str] = None,
warmup_cosine_lr_warmup_epochs: Optional[str] = None,
weight_decay: Optional[str] = None,
training_crop_size: Optional[str] = None,
validation_crop_size: Optional[str] = None,
validation_resize_size: Optional[str] = None,
weighted_loss: Optional[str] = None,
**kwargs
):
"""
:keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:paramtype ams_gradient: str
:keyword augmentations: Settings for using Augmentations.
:paramtype augmentations: str
:keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta1: str
:keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta2: str
:keyword distributed: Whether to use distributer training.
:paramtype distributed: str
:keyword early_stopping: Enable early stopping logic during training.
:paramtype early_stopping: str
:keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
before primary metric improvement
is tracked for early stopping. Must be a positive integer.
:paramtype early_stopping_delay: str
:keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: str
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
:paramtype enable_onnx_normalization: str
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
:paramtype evaluation_frequency: str
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:paramtype gradient_accumulation_step: str
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype layers_to_freeze: str
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:paramtype learning_rate: str
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'.
:paramtype learning_rate_scheduler: str
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
:paramtype momentum: str
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
:paramtype nesterov: str
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
:paramtype number_of_epochs: str
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
:paramtype number_of_workers: str
:keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
:paramtype optimizer: str
:keyword random_seed: Random seed to be used when using deterministic training.
:paramtype random_seed: str
:keyword split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:paramtype split_ratio: str
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
:paramtype step_lr_gamma: str
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
:paramtype step_lr_step_size: str
:keyword training_batch_size: Training batch size. Must be a positive integer.
:paramtype training_batch_size: str
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
:paramtype validation_batch_size: str
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:paramtype warmup_cosine_lr_cycles: str
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:paramtype warmup_cosine_lr_warmup_epochs: str
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
:paramtype weight_decay: str
:keyword training_crop_size: Image crop size that is input to the neural network for the
training dataset. Must be a positive integer.
:paramtype training_crop_size: str
:keyword validation_crop_size: Image crop size that is input to the neural network for the
validation dataset. Must be a positive integer.
:paramtype validation_crop_size: str
:keyword validation_resize_size: Image size to which to resize before cropping for validation
dataset. Must be a positive integer.
:paramtype validation_resize_size: str
:keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
0 or 1 or 2.
:paramtype weighted_loss: str
"""
super(ImageModelDistributionSettingsClassification, self).__init__(ams_gradient=ams_gradient, augmentations=augmentations, beta1=beta1, beta2=beta2, distributed=distributed, early_stopping=early_stopping, early_stopping_delay=early_stopping_delay, early_stopping_patience=early_stopping_patience, enable_onnx_normalization=enable_onnx_normalization, evaluation_frequency=evaluation_frequency, gradient_accumulation_step=gradient_accumulation_step, layers_to_freeze=layers_to_freeze, learning_rate=learning_rate, learning_rate_scheduler=learning_rate_scheduler, model_name=model_name, momentum=momentum, nesterov=nesterov, number_of_epochs=number_of_epochs, number_of_workers=number_of_workers, optimizer=optimizer, random_seed=random_seed, split_ratio=split_ratio, step_lr_gamma=step_lr_gamma, step_lr_step_size=step_lr_step_size, training_batch_size=training_batch_size, validation_batch_size=validation_batch_size, warmup_cosine_lr_cycles=warmup_cosine_lr_cycles, warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs, weight_decay=weight_decay, **kwargs)
self.training_crop_size = training_crop_size
self.validation_crop_size = validation_crop_size
self.validation_resize_size = validation_resize_size
self.weighted_loss = weighted_loss
class ImageModelDistributionSettingsObjectDetection(ImageModelDistributionSettings):
"""Distribution expressions to sweep over values of model settings.
:code:`<example>
Some examples are:
<code>
ModelName = "choice('seresnext', 'resnest50')";
LearningRate = "uniform(0.001, 0.01)";
LayersToFreeze = "choice(0, 2)";
</code></example>`
For more details on how to compose distribution expressions please check the documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-tune-hyperparameters
For more information on the available settings please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:vartype ams_gradient: str
:ivar augmentations: Settings for using Augmentations.
:vartype augmentations: str
:ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta1: str
:ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta2: str
:ivar distributed: Whether to use distributer training.
:vartype distributed: str
:ivar early_stopping: Enable early stopping logic during training.
:vartype early_stopping: str
:ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
primary metric improvement
is tracked for early stopping. Must be a positive integer.
:vartype early_stopping_delay: str
:ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:vartype early_stopping_patience: str
:ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
:vartype enable_onnx_normalization: str
:ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
be a positive integer.
:vartype evaluation_frequency: str
:ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:vartype gradient_accumulation_step: str
:ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype layers_to_freeze: str
:ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:vartype learning_rate: str
:ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'.
:vartype learning_rate_scheduler: str
:ivar model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype model_name: str
:ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
:vartype momentum: str
:ivar nesterov: Enable nesterov when optimizer is 'sgd'.
:vartype nesterov: str
:ivar number_of_epochs: Number of training epochs. Must be a positive integer.
:vartype number_of_epochs: str
:ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
:vartype number_of_workers: str
:ivar optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
:vartype optimizer: str
:ivar random_seed: Random seed to be used when using deterministic training.
:vartype random_seed: str
:ivar split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:vartype split_ratio: str
:ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
the range [0, 1].
:vartype step_lr_gamma: str
:ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
positive integer.
:vartype step_lr_step_size: str
:ivar training_batch_size: Training batch size. Must be a positive integer.
:vartype training_batch_size: str
:ivar validation_batch_size: Validation batch size. Must be a positive integer.
:vartype validation_batch_size: str
:ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:vartype warmup_cosine_lr_cycles: str
:ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:vartype warmup_cosine_lr_warmup_epochs: str
:ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
a float in the range[0, 1].
:vartype weight_decay: str
:ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
be a positive integer.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype box_detections_per_image: str
:ivar box_score_threshold: During inference, only return proposals with a classification score
greater than
BoxScoreThreshold. Must be a float in the range[0, 1].
:vartype box_score_threshold: str
:ivar image_size: Image size for train and validation. Must be a positive integer.
Note: The training run may get into CUDA OOM if the size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
:vartype image_size: str
:ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype max_size: str
:ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype min_size: str
:ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
Note: training run may get into CUDA OOM if the model size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
:vartype model_size: str
:ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
Note: training run may get into CUDA OOM if no sufficient GPU memory.
Note: This settings is only supported for the 'yolov5' algorithm.
:vartype multi_scale: str
:ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
float in the range [0, 1].
:vartype nms_iou_threshold: str
:ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
be
None to enable small object detection logic. A string containing two integers in mxn format.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype tile_grid_size: str
:ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
in the range [0, 1).
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype tile_overlap_ratio: str
:ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
predictions from tiles and image.
Used in validation/ inference. Must be float in the range [0, 1].
Note: This settings is not supported for the 'yolov5' algorithm.
NMS: Non-maximum suppression.
:vartype tile_predictions_nms_threshold: str
:ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
float in the range [0, 1].
:vartype validation_iou_threshold: str
:ivar validation_metric_type: Metric computation method to use for validation metrics. Must be
'none', 'coco', 'voc', or 'coco_voc'.
:vartype validation_metric_type: str
"""
_attribute_map = {
'ams_gradient': {'key': 'amsGradient', 'type': 'str'},
'augmentations': {'key': 'augmentations', 'type': 'str'},
'beta1': {'key': 'beta1', 'type': 'str'},
'beta2': {'key': 'beta2', 'type': 'str'},
'distributed': {'key': 'distributed', 'type': 'str'},
'early_stopping': {'key': 'earlyStopping', 'type': 'str'},
'early_stopping_delay': {'key': 'earlyStoppingDelay', 'type': 'str'},
'early_stopping_patience': {'key': 'earlyStoppingPatience', 'type': 'str'},
'enable_onnx_normalization': {'key': 'enableOnnxNormalization', 'type': 'str'},
'evaluation_frequency': {'key': 'evaluationFrequency', 'type': 'str'},
'gradient_accumulation_step': {'key': 'gradientAccumulationStep', 'type': 'str'},
'layers_to_freeze': {'key': 'layersToFreeze', 'type': 'str'},
'learning_rate': {'key': 'learningRate', 'type': 'str'},
'learning_rate_scheduler': {'key': 'learningRateScheduler', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
'momentum': {'key': 'momentum', 'type': 'str'},
'nesterov': {'key': 'nesterov', 'type': 'str'},
'number_of_epochs': {'key': 'numberOfEpochs', 'type': 'str'},
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'str'},
'optimizer': {'key': 'optimizer', 'type': 'str'},
'random_seed': {'key': 'randomSeed', 'type': 'str'},
'split_ratio': {'key': 'splitRatio', 'type': 'str'},
'step_lr_gamma': {'key': 'stepLRGamma', 'type': 'str'},
'step_lr_step_size': {'key': 'stepLRStepSize', 'type': 'str'},
'training_batch_size': {'key': 'trainingBatchSize', 'type': 'str'},
'validation_batch_size': {'key': 'validationBatchSize', 'type': 'str'},
'warmup_cosine_lr_cycles': {'key': 'warmupCosineLRCycles', 'type': 'str'},
'warmup_cosine_lr_warmup_epochs': {'key': 'warmupCosineLRWarmupEpochs', 'type': 'str'},
'weight_decay': {'key': 'weightDecay', 'type': 'str'},
'box_detections_per_image': {'key': 'boxDetectionsPerImage', 'type': 'str'},
'box_score_threshold': {'key': 'boxScoreThreshold', 'type': 'str'},
'image_size': {'key': 'imageSize', 'type': 'str'},
'max_size': {'key': 'maxSize', 'type': 'str'},
'min_size': {'key': 'minSize', 'type': 'str'},
'model_size': {'key': 'modelSize', 'type': 'str'},
'multi_scale': {'key': 'multiScale', 'type': 'str'},
'nms_iou_threshold': {'key': 'nmsIouThreshold', 'type': 'str'},
'tile_grid_size': {'key': 'tileGridSize', 'type': 'str'},
'tile_overlap_ratio': {'key': 'tileOverlapRatio', 'type': 'str'},
'tile_predictions_nms_threshold': {'key': 'tilePredictionsNmsThreshold', 'type': 'str'},
'validation_iou_threshold': {'key': 'validationIouThreshold', 'type': 'str'},
'validation_metric_type': {'key': 'validationMetricType', 'type': 'str'},
}
def __init__(
self,
*,
ams_gradient: Optional[str] = None,
augmentations: Optional[str] = None,
beta1: Optional[str] = None,
beta2: Optional[str] = None,
distributed: Optional[str] = None,
early_stopping: Optional[str] = None,
early_stopping_delay: Optional[str] = None,
early_stopping_patience: Optional[str] = None,
enable_onnx_normalization: Optional[str] = None,
evaluation_frequency: Optional[str] = None,
gradient_accumulation_step: Optional[str] = None,
layers_to_freeze: Optional[str] = None,
learning_rate: Optional[str] = None,
learning_rate_scheduler: Optional[str] = None,
model_name: Optional[str] = None,
momentum: Optional[str] = None,
nesterov: Optional[str] = None,
number_of_epochs: Optional[str] = None,
number_of_workers: Optional[str] = None,
optimizer: Optional[str] = None,
random_seed: Optional[str] = None,
split_ratio: Optional[str] = None,
step_lr_gamma: Optional[str] = None,
step_lr_step_size: Optional[str] = None,
training_batch_size: Optional[str] = None,
validation_batch_size: Optional[str] = None,
warmup_cosine_lr_cycles: Optional[str] = None,
warmup_cosine_lr_warmup_epochs: Optional[str] = None,
weight_decay: Optional[str] = None,
box_detections_per_image: Optional[str] = None,
box_score_threshold: Optional[str] = None,
image_size: Optional[str] = None,
max_size: Optional[str] = None,
min_size: Optional[str] = None,
model_size: Optional[str] = None,
multi_scale: Optional[str] = None,
nms_iou_threshold: Optional[str] = None,
tile_grid_size: Optional[str] = None,
tile_overlap_ratio: Optional[str] = None,
tile_predictions_nms_threshold: Optional[str] = None,
validation_iou_threshold: Optional[str] = None,
validation_metric_type: Optional[str] = None,
**kwargs
):
"""
:keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:paramtype ams_gradient: str
:keyword augmentations: Settings for using Augmentations.
:paramtype augmentations: str
:keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta1: str
:keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta2: str
:keyword distributed: Whether to use distributer training.
:paramtype distributed: str
:keyword early_stopping: Enable early stopping logic during training.
:paramtype early_stopping: str
:keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
before primary metric improvement
is tracked for early stopping. Must be a positive integer.
:paramtype early_stopping_delay: str
:keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: str
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
:paramtype enable_onnx_normalization: str
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
:paramtype evaluation_frequency: str
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:paramtype gradient_accumulation_step: str
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype layers_to_freeze: str
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:paramtype learning_rate: str
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'.
:paramtype learning_rate_scheduler: str
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
:paramtype momentum: str
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
:paramtype nesterov: str
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
:paramtype number_of_epochs: str
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
:paramtype number_of_workers: str
:keyword optimizer: Type of optimizer. Must be either 'sgd', 'adam', or 'adamw'.
:paramtype optimizer: str
:keyword random_seed: Random seed to be used when using deterministic training.
:paramtype random_seed: str
:keyword split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:paramtype split_ratio: str
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
:paramtype step_lr_gamma: str
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
:paramtype step_lr_step_size: str
:keyword training_batch_size: Training batch size. Must be a positive integer.
:paramtype training_batch_size: str
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
:paramtype validation_batch_size: str
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:paramtype warmup_cosine_lr_cycles: str
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:paramtype warmup_cosine_lr_warmup_epochs: str
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
:paramtype weight_decay: str
:keyword box_detections_per_image: Maximum number of detections per image, for all classes.
Must be a positive integer.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype box_detections_per_image: str
:keyword box_score_threshold: During inference, only return proposals with a classification
score greater than
BoxScoreThreshold. Must be a float in the range[0, 1].
:paramtype box_score_threshold: str
:keyword image_size: Image size for train and validation. Must be a positive integer.
Note: The training run may get into CUDA OOM if the size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
:paramtype image_size: str
:keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype max_size: str
:keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype min_size: str
:keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
Note: training run may get into CUDA OOM if the model size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
:paramtype model_size: str
:keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
Note: training run may get into CUDA OOM if no sufficient GPU memory.
Note: This settings is only supported for the 'yolov5' algorithm.
:paramtype multi_scale: str
:keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
float in the range [0, 1].
:paramtype nms_iou_threshold: str
:keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
not be
None to enable small object detection logic. A string containing two integers in mxn format.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype tile_grid_size: str
:keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
float in the range [0, 1).
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype tile_overlap_ratio: str
:keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
predictions from tiles and image.
Used in validation/ inference. Must be float in the range [0, 1].
Note: This settings is not supported for the 'yolov5' algorithm.
NMS: Non-maximum suppression.
:paramtype tile_predictions_nms_threshold: str
:keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
be float in the range [0, 1].
:paramtype validation_iou_threshold: str
:keyword validation_metric_type: Metric computation method to use for validation metrics. Must
be 'none', 'coco', 'voc', or 'coco_voc'.
:paramtype validation_metric_type: str
"""
super(ImageModelDistributionSettingsObjectDetection, self).__init__(ams_gradient=ams_gradient, augmentations=augmentations, beta1=beta1, beta2=beta2, distributed=distributed, early_stopping=early_stopping, early_stopping_delay=early_stopping_delay, early_stopping_patience=early_stopping_patience, enable_onnx_normalization=enable_onnx_normalization, evaluation_frequency=evaluation_frequency, gradient_accumulation_step=gradient_accumulation_step, layers_to_freeze=layers_to_freeze, learning_rate=learning_rate, learning_rate_scheduler=learning_rate_scheduler, model_name=model_name, momentum=momentum, nesterov=nesterov, number_of_epochs=number_of_epochs, number_of_workers=number_of_workers, optimizer=optimizer, random_seed=random_seed, split_ratio=split_ratio, step_lr_gamma=step_lr_gamma, step_lr_step_size=step_lr_step_size, training_batch_size=training_batch_size, validation_batch_size=validation_batch_size, warmup_cosine_lr_cycles=warmup_cosine_lr_cycles, warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs, weight_decay=weight_decay, **kwargs)
self.box_detections_per_image = box_detections_per_image
self.box_score_threshold = box_score_threshold
self.image_size = image_size
self.max_size = max_size
self.min_size = min_size
self.model_size = model_size
self.multi_scale = multi_scale
self.nms_iou_threshold = nms_iou_threshold
self.tile_grid_size = tile_grid_size
self.tile_overlap_ratio = tile_overlap_ratio
self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
self.validation_iou_threshold = validation_iou_threshold
self.validation_metric_type = validation_metric_type
class ImageModelSettings(msrest.serialization.Model):
"""Settings used for training the model.
For more information on the available settings please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:ivar advanced_settings: Settings for advanced scenarios.
:vartype advanced_settings: str
:ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:vartype ams_gradient: bool
:ivar augmentations: Settings for using Augmentations.
:vartype augmentations: str
:ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta1: float
:ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta2: float
:ivar checkpoint_dataset_id: FileDataset id for pretrained checkpoint(s) for incremental
training.
Make sure to pass CheckpointFilename along with CheckpointDatasetId.
:vartype checkpoint_dataset_id: str
:ivar checkpoint_filename: The pretrained checkpoint filename in FileDataset for incremental
training.
Make sure to pass CheckpointDatasetId along with CheckpointFilename.
:vartype checkpoint_filename: str
:ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
:vartype checkpoint_frequency: int
:ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
incremental training.
:vartype checkpoint_run_id: str
:ivar distributed: Whether to use distributed training.
:vartype distributed: bool
:ivar early_stopping: Enable early stopping logic during training.
:vartype early_stopping: bool
:ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
primary metric improvement
is tracked for early stopping. Must be a positive integer.
:vartype early_stopping_delay: int
:ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:vartype early_stopping_patience: int
:ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
:vartype enable_onnx_normalization: bool
:ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
be a positive integer.
:vartype evaluation_frequency: int
:ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:vartype gradient_accumulation_step: int
:ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype layers_to_freeze: int
:ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:vartype learning_rate: float
:ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'. Possible values include: "None", "WarmupCosine", "Step".
:vartype learning_rate_scheduler: str or
~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:ivar model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype model_name: str
:ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
:vartype momentum: float
:ivar nesterov: Enable nesterov when optimizer is 'sgd'.
:vartype nesterov: bool
:ivar number_of_epochs: Number of training epochs. Must be a positive integer.
:vartype number_of_epochs: int
:ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
:vartype number_of_workers: int
:ivar optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
:vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:ivar random_seed: Random seed to be used when using deterministic training.
:vartype random_seed: int
:ivar split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:vartype split_ratio: float
:ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
the range [0, 1].
:vartype step_lr_gamma: float
:ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
positive integer.
:vartype step_lr_step_size: int
:ivar training_batch_size: Training batch size. Must be a positive integer.
:vartype training_batch_size: int
:ivar validation_batch_size: Validation batch size. Must be a positive integer.
:vartype validation_batch_size: int
:ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:vartype warmup_cosine_lr_cycles: float
:ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:vartype warmup_cosine_lr_warmup_epochs: int
:ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
a float in the range[0, 1].
:vartype weight_decay: float
"""
_attribute_map = {
'advanced_settings': {'key': 'advancedSettings', 'type': 'str'},
'ams_gradient': {'key': 'amsGradient', 'type': 'bool'},
'augmentations': {'key': 'augmentations', 'type': 'str'},
'beta1': {'key': 'beta1', 'type': 'float'},
'beta2': {'key': 'beta2', 'type': 'float'},
'checkpoint_dataset_id': {'key': 'checkpointDatasetId', 'type': 'str'},
'checkpoint_filename': {'key': 'checkpointFilename', 'type': 'str'},
'checkpoint_frequency': {'key': 'checkpointFrequency', 'type': 'int'},
'checkpoint_run_id': {'key': 'checkpointRunId', 'type': 'str'},
'distributed': {'key': 'distributed', 'type': 'bool'},
'early_stopping': {'key': 'earlyStopping', 'type': 'bool'},
'early_stopping_delay': {'key': 'earlyStoppingDelay', 'type': 'int'},
'early_stopping_patience': {'key': 'earlyStoppingPatience', 'type': 'int'},
'enable_onnx_normalization': {'key': 'enableOnnxNormalization', 'type': 'bool'},
'evaluation_frequency': {'key': 'evaluationFrequency', 'type': 'int'},
'gradient_accumulation_step': {'key': 'gradientAccumulationStep', 'type': 'int'},
'layers_to_freeze': {'key': 'layersToFreeze', 'type': 'int'},
'learning_rate': {'key': 'learningRate', 'type': 'float'},
'learning_rate_scheduler': {'key': 'learningRateScheduler', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
'momentum': {'key': 'momentum', 'type': 'float'},
'nesterov': {'key': 'nesterov', 'type': 'bool'},
'number_of_epochs': {'key': 'numberOfEpochs', 'type': 'int'},
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'optimizer': {'key': 'optimizer', 'type': 'str'},
'random_seed': {'key': 'randomSeed', 'type': 'int'},
'split_ratio': {'key': 'splitRatio', 'type': 'float'},
'step_lr_gamma': {'key': 'stepLRGamma', 'type': 'float'},
'step_lr_step_size': {'key': 'stepLRStepSize', 'type': 'int'},
'training_batch_size': {'key': 'trainingBatchSize', 'type': 'int'},
'validation_batch_size': {'key': 'validationBatchSize', 'type': 'int'},
'warmup_cosine_lr_cycles': {'key': 'warmupCosineLRCycles', 'type': 'float'},
'warmup_cosine_lr_warmup_epochs': {'key': 'warmupCosineLRWarmupEpochs', 'type': 'int'},
'weight_decay': {'key': 'weightDecay', 'type': 'float'},
}
def __init__(
self,
*,
advanced_settings: Optional[str] = None,
ams_gradient: Optional[bool] = None,
augmentations: Optional[str] = None,
beta1: Optional[float] = None,
beta2: Optional[float] = None,
checkpoint_dataset_id: Optional[str] = None,
checkpoint_filename: Optional[str] = None,
checkpoint_frequency: Optional[int] = None,
checkpoint_run_id: Optional[str] = None,
distributed: Optional[bool] = None,
early_stopping: Optional[bool] = None,
early_stopping_delay: Optional[int] = None,
early_stopping_patience: Optional[int] = None,
enable_onnx_normalization: Optional[bool] = None,
evaluation_frequency: Optional[int] = None,
gradient_accumulation_step: Optional[int] = None,
layers_to_freeze: Optional[int] = None,
learning_rate: Optional[float] = None,
learning_rate_scheduler: Optional[Union[str, "LearningRateScheduler"]] = None,
model_name: Optional[str] = None,
momentum: Optional[float] = None,
nesterov: Optional[bool] = None,
number_of_epochs: Optional[int] = None,
number_of_workers: Optional[int] = None,
optimizer: Optional[Union[str, "StochasticOptimizer"]] = None,
random_seed: Optional[int] = None,
split_ratio: Optional[float] = None,
step_lr_gamma: Optional[float] = None,
step_lr_step_size: Optional[int] = None,
training_batch_size: Optional[int] = None,
validation_batch_size: Optional[int] = None,
warmup_cosine_lr_cycles: Optional[float] = None,
warmup_cosine_lr_warmup_epochs: Optional[int] = None,
weight_decay: Optional[float] = None,
**kwargs
):
"""
:keyword advanced_settings: Settings for advanced scenarios.
:paramtype advanced_settings: str
:keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:paramtype ams_gradient: bool
:keyword augmentations: Settings for using Augmentations.
:paramtype augmentations: str
:keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta1: float
:keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta2: float
:keyword checkpoint_dataset_id: FileDataset id for pretrained checkpoint(s) for incremental
training.
Make sure to pass CheckpointFilename along with CheckpointDatasetId.
:paramtype checkpoint_dataset_id: str
:keyword checkpoint_filename: The pretrained checkpoint filename in FileDataset for incremental
training.
Make sure to pass CheckpointDatasetId along with CheckpointFilename.
:paramtype checkpoint_filename: str
:keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
integer.
:paramtype checkpoint_frequency: int
:keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
incremental training.
:paramtype checkpoint_run_id: str
:keyword distributed: Whether to use distributed training.
:paramtype distributed: bool
:keyword early_stopping: Enable early stopping logic during training.
:paramtype early_stopping: bool
:keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
before primary metric improvement
is tracked for early stopping. Must be a positive integer.
:paramtype early_stopping_delay: int
:keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: int
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
:paramtype enable_onnx_normalization: bool
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
:paramtype evaluation_frequency: int
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:paramtype gradient_accumulation_step: int
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype layers_to_freeze: int
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:paramtype learning_rate: float
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'. Possible values include: "None", "WarmupCosine", "Step".
:paramtype learning_rate_scheduler: str or
~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
:paramtype momentum: float
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
:paramtype nesterov: bool
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
:paramtype number_of_epochs: int
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
:paramtype number_of_workers: int
:keyword optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
:paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:keyword random_seed: Random seed to be used when using deterministic training.
:paramtype random_seed: int
:keyword split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:paramtype split_ratio: float
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
:paramtype step_lr_gamma: float
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
:paramtype step_lr_step_size: int
:keyword training_batch_size: Training batch size. Must be a positive integer.
:paramtype training_batch_size: int
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
:paramtype validation_batch_size: int
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:paramtype warmup_cosine_lr_cycles: float
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:paramtype warmup_cosine_lr_warmup_epochs: int
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
:paramtype weight_decay: float
"""
super(ImageModelSettings, self).__init__(**kwargs)
self.advanced_settings = advanced_settings
self.ams_gradient = ams_gradient
self.augmentations = augmentations
self.beta1 = beta1
self.beta2 = beta2
self.checkpoint_dataset_id = checkpoint_dataset_id
self.checkpoint_filename = checkpoint_filename
self.checkpoint_frequency = checkpoint_frequency
self.checkpoint_run_id = checkpoint_run_id
self.distributed = distributed
self.early_stopping = early_stopping
self.early_stopping_delay = early_stopping_delay
self.early_stopping_patience = early_stopping_patience
self.enable_onnx_normalization = enable_onnx_normalization
self.evaluation_frequency = evaluation_frequency
self.gradient_accumulation_step = gradient_accumulation_step
self.layers_to_freeze = layers_to_freeze
self.learning_rate = learning_rate
self.learning_rate_scheduler = learning_rate_scheduler
self.model_name = model_name
self.momentum = momentum
self.nesterov = nesterov
self.number_of_epochs = number_of_epochs
self.number_of_workers = number_of_workers
self.optimizer = optimizer
self.random_seed = random_seed
self.split_ratio = split_ratio
self.step_lr_gamma = step_lr_gamma
self.step_lr_step_size = step_lr_step_size
self.training_batch_size = training_batch_size
self.validation_batch_size = validation_batch_size
self.warmup_cosine_lr_cycles = warmup_cosine_lr_cycles
self.warmup_cosine_lr_warmup_epochs = warmup_cosine_lr_warmup_epochs
self.weight_decay = weight_decay
class ImageModelSettingsClassification(ImageModelSettings):
"""Settings used for training the model.
For more information on the available settings please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:ivar advanced_settings: Settings for advanced scenarios.
:vartype advanced_settings: str
:ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:vartype ams_gradient: bool
:ivar augmentations: Settings for using Augmentations.
:vartype augmentations: str
:ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta1: float
:ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta2: float
:ivar checkpoint_dataset_id: FileDataset id for pretrained checkpoint(s) for incremental
training.
Make sure to pass CheckpointFilename along with CheckpointDatasetId.
:vartype checkpoint_dataset_id: str
:ivar checkpoint_filename: The pretrained checkpoint filename in FileDataset for incremental
training.
Make sure to pass CheckpointDatasetId along with CheckpointFilename.
:vartype checkpoint_filename: str
:ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
:vartype checkpoint_frequency: int
:ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
incremental training.
:vartype checkpoint_run_id: str
:ivar distributed: Whether to use distributed training.
:vartype distributed: bool
:ivar early_stopping: Enable early stopping logic during training.
:vartype early_stopping: bool
:ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
primary metric improvement
is tracked for early stopping. Must be a positive integer.
:vartype early_stopping_delay: int
:ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:vartype early_stopping_patience: int
:ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
:vartype enable_onnx_normalization: bool
:ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
be a positive integer.
:vartype evaluation_frequency: int
:ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:vartype gradient_accumulation_step: int
:ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype layers_to_freeze: int
:ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:vartype learning_rate: float
:ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'. Possible values include: "None", "WarmupCosine", "Step".
:vartype learning_rate_scheduler: str or
~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:ivar model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype model_name: str
:ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
:vartype momentum: float
:ivar nesterov: Enable nesterov when optimizer is 'sgd'.
:vartype nesterov: bool
:ivar number_of_epochs: Number of training epochs. Must be a positive integer.
:vartype number_of_epochs: int
:ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
:vartype number_of_workers: int
:ivar optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
:vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:ivar random_seed: Random seed to be used when using deterministic training.
:vartype random_seed: int
:ivar split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:vartype split_ratio: float
:ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
the range [0, 1].
:vartype step_lr_gamma: float
:ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
positive integer.
:vartype step_lr_step_size: int
:ivar training_batch_size: Training batch size. Must be a positive integer.
:vartype training_batch_size: int
:ivar validation_batch_size: Validation batch size. Must be a positive integer.
:vartype validation_batch_size: int
:ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:vartype warmup_cosine_lr_cycles: float
:ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:vartype warmup_cosine_lr_warmup_epochs: int
:ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
a float in the range[0, 1].
:vartype weight_decay: float
:ivar training_crop_size: Image crop size that is input to the neural network for the training
dataset. Must be a positive integer.
:vartype training_crop_size: int
:ivar validation_crop_size: Image crop size that is input to the neural network for the
validation dataset. Must be a positive integer.
:vartype validation_crop_size: int
:ivar validation_resize_size: Image size to which to resize before cropping for validation
dataset. Must be a positive integer.
:vartype validation_resize_size: int
:ivar weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
0 or 1 or 2.
:vartype weighted_loss: int
"""
_attribute_map = {
'advanced_settings': {'key': 'advancedSettings', 'type': 'str'},
'ams_gradient': {'key': 'amsGradient', 'type': 'bool'},
'augmentations': {'key': 'augmentations', 'type': 'str'},
'beta1': {'key': 'beta1', 'type': 'float'},
'beta2': {'key': 'beta2', 'type': 'float'},
'checkpoint_dataset_id': {'key': 'checkpointDatasetId', 'type': 'str'},
'checkpoint_filename': {'key': 'checkpointFilename', 'type': 'str'},
'checkpoint_frequency': {'key': 'checkpointFrequency', 'type': 'int'},
'checkpoint_run_id': {'key': 'checkpointRunId', 'type': 'str'},
'distributed': {'key': 'distributed', 'type': 'bool'},
'early_stopping': {'key': 'earlyStopping', 'type': 'bool'},
'early_stopping_delay': {'key': 'earlyStoppingDelay', 'type': 'int'},
'early_stopping_patience': {'key': 'earlyStoppingPatience', 'type': 'int'},
'enable_onnx_normalization': {'key': 'enableOnnxNormalization', 'type': 'bool'},
'evaluation_frequency': {'key': 'evaluationFrequency', 'type': 'int'},
'gradient_accumulation_step': {'key': 'gradientAccumulationStep', 'type': 'int'},
'layers_to_freeze': {'key': 'layersToFreeze', 'type': 'int'},
'learning_rate': {'key': 'learningRate', 'type': 'float'},
'learning_rate_scheduler': {'key': 'learningRateScheduler', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
'momentum': {'key': 'momentum', 'type': 'float'},
'nesterov': {'key': 'nesterov', 'type': 'bool'},
'number_of_epochs': {'key': 'numberOfEpochs', 'type': 'int'},
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'optimizer': {'key': 'optimizer', 'type': 'str'},
'random_seed': {'key': 'randomSeed', 'type': 'int'},
'split_ratio': {'key': 'splitRatio', 'type': 'float'},
'step_lr_gamma': {'key': 'stepLRGamma', 'type': 'float'},
'step_lr_step_size': {'key': 'stepLRStepSize', 'type': 'int'},
'training_batch_size': {'key': 'trainingBatchSize', 'type': 'int'},
'validation_batch_size': {'key': 'validationBatchSize', 'type': 'int'},
'warmup_cosine_lr_cycles': {'key': 'warmupCosineLRCycles', 'type': 'float'},
'warmup_cosine_lr_warmup_epochs': {'key': 'warmupCosineLRWarmupEpochs', 'type': 'int'},
'weight_decay': {'key': 'weightDecay', 'type': 'float'},
'training_crop_size': {'key': 'trainingCropSize', 'type': 'int'},
'validation_crop_size': {'key': 'validationCropSize', 'type': 'int'},
'validation_resize_size': {'key': 'validationResizeSize', 'type': 'int'},
'weighted_loss': {'key': 'weightedLoss', 'type': 'int'},
}
def __init__(
self,
*,
advanced_settings: Optional[str] = None,
ams_gradient: Optional[bool] = None,
augmentations: Optional[str] = None,
beta1: Optional[float] = None,
beta2: Optional[float] = None,
checkpoint_dataset_id: Optional[str] = None,
checkpoint_filename: Optional[str] = None,
checkpoint_frequency: Optional[int] = None,
checkpoint_run_id: Optional[str] = None,
distributed: Optional[bool] = None,
early_stopping: Optional[bool] = None,
early_stopping_delay: Optional[int] = None,
early_stopping_patience: Optional[int] = None,
enable_onnx_normalization: Optional[bool] = None,
evaluation_frequency: Optional[int] = None,
gradient_accumulation_step: Optional[int] = None,
layers_to_freeze: Optional[int] = None,
learning_rate: Optional[float] = None,
learning_rate_scheduler: Optional[Union[str, "LearningRateScheduler"]] = None,
model_name: Optional[str] = None,
momentum: Optional[float] = None,
nesterov: Optional[bool] = None,
number_of_epochs: Optional[int] = None,
number_of_workers: Optional[int] = None,
optimizer: Optional[Union[str, "StochasticOptimizer"]] = None,
random_seed: Optional[int] = None,
split_ratio: Optional[float] = None,
step_lr_gamma: Optional[float] = None,
step_lr_step_size: Optional[int] = None,
training_batch_size: Optional[int] = None,
validation_batch_size: Optional[int] = None,
warmup_cosine_lr_cycles: Optional[float] = None,
warmup_cosine_lr_warmup_epochs: Optional[int] = None,
weight_decay: Optional[float] = None,
training_crop_size: Optional[int] = None,
validation_crop_size: Optional[int] = None,
validation_resize_size: Optional[int] = None,
weighted_loss: Optional[int] = None,
**kwargs
):
"""
:keyword advanced_settings: Settings for advanced scenarios.
:paramtype advanced_settings: str
:keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:paramtype ams_gradient: bool
:keyword augmentations: Settings for using Augmentations.
:paramtype augmentations: str
:keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta1: float
:keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta2: float
:keyword checkpoint_dataset_id: FileDataset id for pretrained checkpoint(s) for incremental
training.
Make sure to pass CheckpointFilename along with CheckpointDatasetId.
:paramtype checkpoint_dataset_id: str
:keyword checkpoint_filename: The pretrained checkpoint filename in FileDataset for incremental
training.
Make sure to pass CheckpointDatasetId along with CheckpointFilename.
:paramtype checkpoint_filename: str
:keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
integer.
:paramtype checkpoint_frequency: int
:keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
incremental training.
:paramtype checkpoint_run_id: str
:keyword distributed: Whether to use distributed training.
:paramtype distributed: bool
:keyword early_stopping: Enable early stopping logic during training.
:paramtype early_stopping: bool
:keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
before primary metric improvement
is tracked for early stopping. Must be a positive integer.
:paramtype early_stopping_delay: int
:keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: int
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
:paramtype enable_onnx_normalization: bool
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
:paramtype evaluation_frequency: int
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:paramtype gradient_accumulation_step: int
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype layers_to_freeze: int
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:paramtype learning_rate: float
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'. Possible values include: "None", "WarmupCosine", "Step".
:paramtype learning_rate_scheduler: str or
~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
:paramtype momentum: float
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
:paramtype nesterov: bool
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
:paramtype number_of_epochs: int
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
:paramtype number_of_workers: int
:keyword optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
:paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:keyword random_seed: Random seed to be used when using deterministic training.
:paramtype random_seed: int
:keyword split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:paramtype split_ratio: float
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
:paramtype step_lr_gamma: float
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
:paramtype step_lr_step_size: int
:keyword training_batch_size: Training batch size. Must be a positive integer.
:paramtype training_batch_size: int
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
:paramtype validation_batch_size: int
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:paramtype warmup_cosine_lr_cycles: float
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:paramtype warmup_cosine_lr_warmup_epochs: int
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
:paramtype weight_decay: float
:keyword training_crop_size: Image crop size that is input to the neural network for the
training dataset. Must be a positive integer.
:paramtype training_crop_size: int
:keyword validation_crop_size: Image crop size that is input to the neural network for the
validation dataset. Must be a positive integer.
:paramtype validation_crop_size: int
:keyword validation_resize_size: Image size to which to resize before cropping for validation
dataset. Must be a positive integer.
:paramtype validation_resize_size: int
:keyword weighted_loss: Weighted loss. The accepted values are 0 for no weighted loss.
1 for weighted loss with sqrt.(class_weights). 2 for weighted loss with class_weights. Must be
0 or 1 or 2.
:paramtype weighted_loss: int
"""
super(ImageModelSettingsClassification, self).__init__(advanced_settings=advanced_settings, ams_gradient=ams_gradient, augmentations=augmentations, beta1=beta1, beta2=beta2, checkpoint_dataset_id=checkpoint_dataset_id, checkpoint_filename=checkpoint_filename, checkpoint_frequency=checkpoint_frequency, checkpoint_run_id=checkpoint_run_id, distributed=distributed, early_stopping=early_stopping, early_stopping_delay=early_stopping_delay, early_stopping_patience=early_stopping_patience, enable_onnx_normalization=enable_onnx_normalization, evaluation_frequency=evaluation_frequency, gradient_accumulation_step=gradient_accumulation_step, layers_to_freeze=layers_to_freeze, learning_rate=learning_rate, learning_rate_scheduler=learning_rate_scheduler, model_name=model_name, momentum=momentum, nesterov=nesterov, number_of_epochs=number_of_epochs, number_of_workers=number_of_workers, optimizer=optimizer, random_seed=random_seed, split_ratio=split_ratio, step_lr_gamma=step_lr_gamma, step_lr_step_size=step_lr_step_size, training_batch_size=training_batch_size, validation_batch_size=validation_batch_size, warmup_cosine_lr_cycles=warmup_cosine_lr_cycles, warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs, weight_decay=weight_decay, **kwargs)
self.training_crop_size = training_crop_size
self.validation_crop_size = validation_crop_size
self.validation_resize_size = validation_resize_size
self.weighted_loss = weighted_loss
class ImageModelSettingsObjectDetection(ImageModelSettings):
"""Settings used for training the model.
For more information on the available settings please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:ivar advanced_settings: Settings for advanced scenarios.
:vartype advanced_settings: str
:ivar ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:vartype ams_gradient: bool
:ivar augmentations: Settings for using Augmentations.
:vartype augmentations: str
:ivar beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta1: float
:ivar beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the range
[0, 1].
:vartype beta2: float
:ivar checkpoint_dataset_id: FileDataset id for pretrained checkpoint(s) for incremental
training.
Make sure to pass CheckpointFilename along with CheckpointDatasetId.
:vartype checkpoint_dataset_id: str
:ivar checkpoint_filename: The pretrained checkpoint filename in FileDataset for incremental
training.
Make sure to pass CheckpointDatasetId along with CheckpointFilename.
:vartype checkpoint_filename: str
:ivar checkpoint_frequency: Frequency to store model checkpoints. Must be a positive integer.
:vartype checkpoint_frequency: int
:ivar checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
incremental training.
:vartype checkpoint_run_id: str
:ivar distributed: Whether to use distributed training.
:vartype distributed: bool
:ivar early_stopping: Enable early stopping logic during training.
:vartype early_stopping: bool
:ivar early_stopping_delay: Minimum number of epochs or validation evaluations to wait before
primary metric improvement
is tracked for early stopping. Must be a positive integer.
:vartype early_stopping_delay: int
:ivar early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:vartype early_stopping_patience: int
:ivar enable_onnx_normalization: Enable normalization when exporting ONNX model.
:vartype enable_onnx_normalization: bool
:ivar evaluation_frequency: Frequency to evaluate validation dataset to get metric scores. Must
be a positive integer.
:vartype evaluation_frequency: int
:ivar gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:vartype gradient_accumulation_step: int
:ivar layers_to_freeze: Number of layers to freeze for the model. Must be a positive integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype layers_to_freeze: int
:ivar learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:vartype learning_rate: float
:ivar learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'. Possible values include: "None", "WarmupCosine", "Step".
:vartype learning_rate_scheduler: str or
~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:ivar model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:vartype model_name: str
:ivar momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0, 1].
:vartype momentum: float
:ivar nesterov: Enable nesterov when optimizer is 'sgd'.
:vartype nesterov: bool
:ivar number_of_epochs: Number of training epochs. Must be a positive integer.
:vartype number_of_epochs: int
:ivar number_of_workers: Number of data loader workers. Must be a non-negative integer.
:vartype number_of_workers: int
:ivar optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
:vartype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:ivar random_seed: Random seed to be used when using deterministic training.
:vartype random_seed: int
:ivar split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:vartype split_ratio: float
:ivar step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float in
the range [0, 1].
:vartype step_lr_gamma: float
:ivar step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be a
positive integer.
:vartype step_lr_step_size: int
:ivar training_batch_size: Training batch size. Must be a positive integer.
:vartype training_batch_size: int
:ivar validation_batch_size: Validation batch size. Must be a positive integer.
:vartype validation_batch_size: int
:ivar warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:vartype warmup_cosine_lr_cycles: float
:ivar warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:vartype warmup_cosine_lr_warmup_epochs: int
:ivar weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must be
a float in the range[0, 1].
:vartype weight_decay: float
:ivar box_detections_per_image: Maximum number of detections per image, for all classes. Must
be a positive integer.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype box_detections_per_image: int
:ivar box_score_threshold: During inference, only return proposals with a classification score
greater than
BoxScoreThreshold. Must be a float in the range[0, 1].
:vartype box_score_threshold: float
:ivar image_size: Image size for train and validation. Must be a positive integer.
Note: The training run may get into CUDA OOM if the size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
:vartype image_size: int
:ivar max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype max_size: int
:ivar min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype min_size: int
:ivar model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
Note: training run may get into CUDA OOM if the model size is too big.
Note: This settings is only supported for the 'yolov5' algorithm. Possible values include:
"None", "Small", "Medium", "Large", "ExtraLarge".
:vartype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
:ivar multi_scale: Enable multi-scale image by varying image size by +/- 50%.
Note: training run may get into CUDA OOM if no sufficient GPU memory.
Note: This settings is only supported for the 'yolov5' algorithm.
:vartype multi_scale: bool
:ivar nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be a
float in the range [0, 1].
:vartype nms_iou_threshold: float
:ivar tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must not
be
None to enable small object detection logic. A string containing two integers in mxn format.
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype tile_grid_size: str
:ivar tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be float
in the range [0, 1).
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype tile_overlap_ratio: float
:ivar tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
predictions from tiles and image.
Used in validation/ inference. Must be float in the range [0, 1].
Note: This settings is not supported for the 'yolov5' algorithm.
:vartype tile_predictions_nms_threshold: float
:ivar validation_iou_threshold: IOU threshold to use when computing validation metric. Must be
float in the range [0, 1].
:vartype validation_iou_threshold: float
:ivar validation_metric_type: Metric computation method to use for validation metrics. Possible
values include: "None", "Coco", "Voc", "CocoVoc".
:vartype validation_metric_type: str or
~azure.mgmt.machinelearningservices.models.ValidationMetricType
"""
_attribute_map = {
'advanced_settings': {'key': 'advancedSettings', 'type': 'str'},
'ams_gradient': {'key': 'amsGradient', 'type': 'bool'},
'augmentations': {'key': 'augmentations', 'type': 'str'},
'beta1': {'key': 'beta1', 'type': 'float'},
'beta2': {'key': 'beta2', 'type': 'float'},
'checkpoint_dataset_id': {'key': 'checkpointDatasetId', 'type': 'str'},
'checkpoint_filename': {'key': 'checkpointFilename', 'type': 'str'},
'checkpoint_frequency': {'key': 'checkpointFrequency', 'type': 'int'},
'checkpoint_run_id': {'key': 'checkpointRunId', 'type': 'str'},
'distributed': {'key': 'distributed', 'type': 'bool'},
'early_stopping': {'key': 'earlyStopping', 'type': 'bool'},
'early_stopping_delay': {'key': 'earlyStoppingDelay', 'type': 'int'},
'early_stopping_patience': {'key': 'earlyStoppingPatience', 'type': 'int'},
'enable_onnx_normalization': {'key': 'enableOnnxNormalization', 'type': 'bool'},
'evaluation_frequency': {'key': 'evaluationFrequency', 'type': 'int'},
'gradient_accumulation_step': {'key': 'gradientAccumulationStep', 'type': 'int'},
'layers_to_freeze': {'key': 'layersToFreeze', 'type': 'int'},
'learning_rate': {'key': 'learningRate', 'type': 'float'},
'learning_rate_scheduler': {'key': 'learningRateScheduler', 'type': 'str'},
'model_name': {'key': 'modelName', 'type': 'str'},
'momentum': {'key': 'momentum', 'type': 'float'},
'nesterov': {'key': 'nesterov', 'type': 'bool'},
'number_of_epochs': {'key': 'numberOfEpochs', 'type': 'int'},
'number_of_workers': {'key': 'numberOfWorkers', 'type': 'int'},
'optimizer': {'key': 'optimizer', 'type': 'str'},
'random_seed': {'key': 'randomSeed', 'type': 'int'},
'split_ratio': {'key': 'splitRatio', 'type': 'float'},
'step_lr_gamma': {'key': 'stepLRGamma', 'type': 'float'},
'step_lr_step_size': {'key': 'stepLRStepSize', 'type': 'int'},
'training_batch_size': {'key': 'trainingBatchSize', 'type': 'int'},
'validation_batch_size': {'key': 'validationBatchSize', 'type': 'int'},
'warmup_cosine_lr_cycles': {'key': 'warmupCosineLRCycles', 'type': 'float'},
'warmup_cosine_lr_warmup_epochs': {'key': 'warmupCosineLRWarmupEpochs', 'type': 'int'},
'weight_decay': {'key': 'weightDecay', 'type': 'float'},
'box_detections_per_image': {'key': 'boxDetectionsPerImage', 'type': 'int'},
'box_score_threshold': {'key': 'boxScoreThreshold', 'type': 'float'},
'image_size': {'key': 'imageSize', 'type': 'int'},
'max_size': {'key': 'maxSize', 'type': 'int'},
'min_size': {'key': 'minSize', 'type': 'int'},
'model_size': {'key': 'modelSize', 'type': 'str'},
'multi_scale': {'key': 'multiScale', 'type': 'bool'},
'nms_iou_threshold': {'key': 'nmsIouThreshold', 'type': 'float'},
'tile_grid_size': {'key': 'tileGridSize', 'type': 'str'},
'tile_overlap_ratio': {'key': 'tileOverlapRatio', 'type': 'float'},
'tile_predictions_nms_threshold': {'key': 'tilePredictionsNmsThreshold', 'type': 'float'},
'validation_iou_threshold': {'key': 'validationIouThreshold', 'type': 'float'},
'validation_metric_type': {'key': 'validationMetricType', 'type': 'str'},
}
def __init__(
self,
*,
advanced_settings: Optional[str] = None,
ams_gradient: Optional[bool] = None,
augmentations: Optional[str] = None,
beta1: Optional[float] = None,
beta2: Optional[float] = None,
checkpoint_dataset_id: Optional[str] = None,
checkpoint_filename: Optional[str] = None,
checkpoint_frequency: Optional[int] = None,
checkpoint_run_id: Optional[str] = None,
distributed: Optional[bool] = None,
early_stopping: Optional[bool] = None,
early_stopping_delay: Optional[int] = None,
early_stopping_patience: Optional[int] = None,
enable_onnx_normalization: Optional[bool] = None,
evaluation_frequency: Optional[int] = None,
gradient_accumulation_step: Optional[int] = None,
layers_to_freeze: Optional[int] = None,
learning_rate: Optional[float] = None,
learning_rate_scheduler: Optional[Union[str, "LearningRateScheduler"]] = None,
model_name: Optional[str] = None,
momentum: Optional[float] = None,
nesterov: Optional[bool] = None,
number_of_epochs: Optional[int] = None,
number_of_workers: Optional[int] = None,
optimizer: Optional[Union[str, "StochasticOptimizer"]] = None,
random_seed: Optional[int] = None,
split_ratio: Optional[float] = None,
step_lr_gamma: Optional[float] = None,
step_lr_step_size: Optional[int] = None,
training_batch_size: Optional[int] = None,
validation_batch_size: Optional[int] = None,
warmup_cosine_lr_cycles: Optional[float] = None,
warmup_cosine_lr_warmup_epochs: Optional[int] = None,
weight_decay: Optional[float] = None,
box_detections_per_image: Optional[int] = None,
box_score_threshold: Optional[float] = None,
image_size: Optional[int] = None,
max_size: Optional[int] = None,
min_size: Optional[int] = None,
model_size: Optional[Union[str, "ModelSize"]] = None,
multi_scale: Optional[bool] = None,
nms_iou_threshold: Optional[float] = None,
tile_grid_size: Optional[str] = None,
tile_overlap_ratio: Optional[float] = None,
tile_predictions_nms_threshold: Optional[float] = None,
validation_iou_threshold: Optional[float] = None,
validation_metric_type: Optional[Union[str, "ValidationMetricType"]] = None,
**kwargs
):
"""
:keyword advanced_settings: Settings for advanced scenarios.
:paramtype advanced_settings: str
:keyword ams_gradient: Enable AMSGrad when optimizer is 'adam' or 'adamw'.
:paramtype ams_gradient: bool
:keyword augmentations: Settings for using Augmentations.
:paramtype augmentations: str
:keyword beta1: Value of 'beta1' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta1: float
:keyword beta2: Value of 'beta2' when optimizer is 'adam' or 'adamw'. Must be a float in the
range [0, 1].
:paramtype beta2: float
:keyword checkpoint_dataset_id: FileDataset id for pretrained checkpoint(s) for incremental
training.
Make sure to pass CheckpointFilename along with CheckpointDatasetId.
:paramtype checkpoint_dataset_id: str
:keyword checkpoint_filename: The pretrained checkpoint filename in FileDataset for incremental
training.
Make sure to pass CheckpointDatasetId along with CheckpointFilename.
:paramtype checkpoint_filename: str
:keyword checkpoint_frequency: Frequency to store model checkpoints. Must be a positive
integer.
:paramtype checkpoint_frequency: int
:keyword checkpoint_run_id: The id of a previous run that has a pretrained checkpoint for
incremental training.
:paramtype checkpoint_run_id: str
:keyword distributed: Whether to use distributed training.
:paramtype distributed: bool
:keyword early_stopping: Enable early stopping logic during training.
:paramtype early_stopping: bool
:keyword early_stopping_delay: Minimum number of epochs or validation evaluations to wait
before primary metric improvement
is tracked for early stopping. Must be a positive integer.
:paramtype early_stopping_delay: int
:keyword early_stopping_patience: Minimum number of epochs or validation evaluations with no
primary metric improvement before
the run is stopped. Must be a positive integer.
:paramtype early_stopping_patience: int
:keyword enable_onnx_normalization: Enable normalization when exporting ONNX model.
:paramtype enable_onnx_normalization: bool
:keyword evaluation_frequency: Frequency to evaluate validation dataset to get metric scores.
Must be a positive integer.
:paramtype evaluation_frequency: int
:keyword gradient_accumulation_step: Gradient accumulation means running a configured number of
"GradAccumulationStep" steps without
updating the model weights while accumulating the gradients of those steps, and then using
the accumulated gradients to compute the weight updates. Must be a positive integer.
:paramtype gradient_accumulation_step: int
:keyword layers_to_freeze: Number of layers to freeze for the model. Must be a positive
integer.
For instance, passing 2 as value for 'seresnext' means
freezing layer0 and layer1. For a full list of models supported and details on layer freeze,
please
see: https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype layers_to_freeze: int
:keyword learning_rate: Initial learning rate. Must be a float in the range [0, 1].
:paramtype learning_rate: float
:keyword learning_rate_scheduler: Type of learning rate scheduler. Must be 'warmup_cosine' or
'step'. Possible values include: "None", "WarmupCosine", "Step".
:paramtype learning_rate_scheduler: str or
~azure.mgmt.machinelearningservices.models.LearningRateScheduler
:keyword model_name: Name of the model to use for training.
For more information on the available models please visit the official documentation:
https://docs.microsoft.com/en-us/azure/machine-learning/how-to-auto-train-image-models.
:paramtype model_name: str
:keyword momentum: Value of momentum when optimizer is 'sgd'. Must be a float in the range [0,
1].
:paramtype momentum: float
:keyword nesterov: Enable nesterov when optimizer is 'sgd'.
:paramtype nesterov: bool
:keyword number_of_epochs: Number of training epochs. Must be a positive integer.
:paramtype number_of_epochs: int
:keyword number_of_workers: Number of data loader workers. Must be a non-negative integer.
:paramtype number_of_workers: int
:keyword optimizer: Type of optimizer. Possible values include: "None", "Sgd", "Adam", "Adamw".
:paramtype optimizer: str or ~azure.mgmt.machinelearningservices.models.StochasticOptimizer
:keyword random_seed: Random seed to be used when using deterministic training.
:paramtype random_seed: int
:keyword split_ratio: If validation data is not defined, this specifies the split ratio for
splitting
train data into random train and validation subsets. Must be a float in the range [0, 1].
:paramtype split_ratio: float
:keyword step_lr_gamma: Value of gamma when learning rate scheduler is 'step'. Must be a float
in the range [0, 1].
:paramtype step_lr_gamma: float
:keyword step_lr_step_size: Value of step size when learning rate scheduler is 'step'. Must be
a positive integer.
:paramtype step_lr_step_size: int
:keyword training_batch_size: Training batch size. Must be a positive integer.
:paramtype training_batch_size: int
:keyword validation_batch_size: Validation batch size. Must be a positive integer.
:paramtype validation_batch_size: int
:keyword warmup_cosine_lr_cycles: Value of cosine cycle when learning rate scheduler is
'warmup_cosine'. Must be a float in the range [0, 1].
:paramtype warmup_cosine_lr_cycles: float
:keyword warmup_cosine_lr_warmup_epochs: Value of warmup epochs when learning rate scheduler is
'warmup_cosine'. Must be a positive integer.
:paramtype warmup_cosine_lr_warmup_epochs: int
:keyword weight_decay: Value of weight decay when optimizer is 'sgd', 'adam', or 'adamw'. Must
be a float in the range[0, 1].
:paramtype weight_decay: float
:keyword box_detections_per_image: Maximum number of detections per image, for all classes.
Must be a positive integer.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype box_detections_per_image: int
:keyword box_score_threshold: During inference, only return proposals with a classification
score greater than
BoxScoreThreshold. Must be a float in the range[0, 1].
:paramtype box_score_threshold: float
:keyword image_size: Image size for train and validation. Must be a positive integer.
Note: The training run may get into CUDA OOM if the size is too big.
Note: This settings is only supported for the 'yolov5' algorithm.
:paramtype image_size: int
:keyword max_size: Maximum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype max_size: int
:keyword min_size: Minimum size of the image to be rescaled before feeding it to the backbone.
Must be a positive integer. Note: training run may get into CUDA OOM if the size is too big.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype min_size: int
:keyword model_size: Model size. Must be 'small', 'medium', 'large', or 'xlarge'.
Note: training run may get into CUDA OOM if the model size is too big.
Note: This settings is only supported for the 'yolov5' algorithm. Possible values include:
"None", "Small", "Medium", "Large", "ExtraLarge".
:paramtype model_size: str or ~azure.mgmt.machinelearningservices.models.ModelSize
:keyword multi_scale: Enable multi-scale image by varying image size by +/- 50%.
Note: training run may get into CUDA OOM if no sufficient GPU memory.
Note: This settings is only supported for the 'yolov5' algorithm.
:paramtype multi_scale: bool
:keyword nms_iou_threshold: IOU threshold used during inference in NMS post processing. Must be
a float in the range [0, 1].
:paramtype nms_iou_threshold: float
:keyword tile_grid_size: The grid size to use for tiling each image. Note: TileGridSize must
not be
None to enable small object detection logic. A string containing two integers in mxn format.
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype tile_grid_size: str
:keyword tile_overlap_ratio: Overlap ratio between adjacent tiles in each dimension. Must be
float in the range [0, 1).
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype tile_overlap_ratio: float
:keyword tile_predictions_nms_threshold: The IOU threshold to use to perform NMS while merging
predictions from tiles and image.
Used in validation/ inference. Must be float in the range [0, 1].
Note: This settings is not supported for the 'yolov5' algorithm.
:paramtype tile_predictions_nms_threshold: float
:keyword validation_iou_threshold: IOU threshold to use when computing validation metric. Must
be float in the range [0, 1].
:paramtype validation_iou_threshold: float
:keyword validation_metric_type: Metric computation method to use for validation metrics.
Possible values include: "None", "Coco", "Voc", "CocoVoc".
:paramtype validation_metric_type: str or
~azure.mgmt.machinelearningservices.models.ValidationMetricType
"""
super(ImageModelSettingsObjectDetection, self).__init__(advanced_settings=advanced_settings, ams_gradient=ams_gradient, augmentations=augmentations, beta1=beta1, beta2=beta2, checkpoint_dataset_id=checkpoint_dataset_id, checkpoint_filename=checkpoint_filename, checkpoint_frequency=checkpoint_frequency, checkpoint_run_id=checkpoint_run_id, distributed=distributed, early_stopping=early_stopping, early_stopping_delay=early_stopping_delay, early_stopping_patience=early_stopping_patience, enable_onnx_normalization=enable_onnx_normalization, evaluation_frequency=evaluation_frequency, gradient_accumulation_step=gradient_accumulation_step, layers_to_freeze=layers_to_freeze, learning_rate=learning_rate, learning_rate_scheduler=learning_rate_scheduler, model_name=model_name, momentum=momentum, nesterov=nesterov, number_of_epochs=number_of_epochs, number_of_workers=number_of_workers, optimizer=optimizer, random_seed=random_seed, split_ratio=split_ratio, step_lr_gamma=step_lr_gamma, step_lr_step_size=step_lr_step_size, training_batch_size=training_batch_size, validation_batch_size=validation_batch_size, warmup_cosine_lr_cycles=warmup_cosine_lr_cycles, warmup_cosine_lr_warmup_epochs=warmup_cosine_lr_warmup_epochs, weight_decay=weight_decay, **kwargs)
self.box_detections_per_image = box_detections_per_image
self.box_score_threshold = box_score_threshold
self.image_size = image_size
self.max_size = max_size
self.min_size = min_size
self.model_size = model_size
self.multi_scale = multi_scale
self.nms_iou_threshold = nms_iou_threshold
self.tile_grid_size = tile_grid_size
self.tile_overlap_ratio = tile_overlap_ratio
self.tile_predictions_nms_threshold = tile_predictions_nms_threshold
self.validation_iou_threshold = validation_iou_threshold
self.validation_metric_type = validation_metric_type
class ImageObjectDetection(AutoMLVertical, ImageObjectDetectionBase):
"""Image Object Detection. Object detection is used to identify objects in an image and locate each object with a
bounding box e.g. locate all dogs and cats in an image and draw a bounding box around each.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:ivar limit_settings: Required. [Required] Limit settings for the AutoML job.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:ivar sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:vartype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:ivar model_settings: Settings used for training the model.
:vartype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
:ivar search_space: Search space for sampling different combinations of models and their
hyperparameters.
:vartype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric to optimize for this task. Possible values include:
"MeanAveragePrecision".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
"""
_validation = {
'data_settings': {'required': True},
'limit_settings': {'required': True},
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'ImageVerticalDataSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'ImageLimitSettings'},
'sweep_settings': {'key': 'sweepSettings', 'type': 'ImageSweepSettings'},
'model_settings': {'key': 'modelSettings', 'type': 'ImageModelSettingsObjectDetection'},
'search_space': {'key': 'searchSpace', 'type': '[ImageModelDistributionSettingsObjectDetection]'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: "ImageVerticalDataSettings",
limit_settings: "ImageLimitSettings",
sweep_settings: Optional["ImageSweepSettings"] = None,
model_settings: Optional["ImageModelSettingsObjectDetection"] = None,
search_space: Optional[List["ImageModelDistributionSettingsObjectDetection"]] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
primary_metric: Optional[Union[str, "ObjectDetectionPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Required. [Required] Collection of registered Tabular Dataset Ids and
other data settings required for training and validating models.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.ImageVerticalDataSettings
:keyword limit_settings: Required. [Required] Limit settings for the AutoML job.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.ImageLimitSettings
:keyword sweep_settings: Model sweeping and hyperparameter sweeping related settings.
:paramtype sweep_settings: ~azure.mgmt.machinelearningservices.models.ImageSweepSettings
:keyword model_settings: Settings used for training the model.
:paramtype model_settings:
~azure.mgmt.machinelearningservices.models.ImageModelSettingsObjectDetection
:keyword search_space: Search space for sampling different combinations of models and their
hyperparameters.
:paramtype search_space:
list[~azure.mgmt.machinelearningservices.models.ImageModelDistributionSettingsObjectDetection]
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword primary_metric: Primary metric to optimize for this task. Possible values include:
"MeanAveragePrecision".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ObjectDetectionPrimaryMetrics
"""
super(ImageObjectDetection, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, limit_settings=limit_settings, sweep_settings=sweep_settings, model_settings=model_settings, search_space=search_space, **kwargs)
self.data_settings = data_settings
self.limit_settings = limit_settings
self.sweep_settings = sweep_settings
self.model_settings = model_settings
self.search_space = search_space
self.task_type = 'ImageObjectDetection' # type: str
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'ImageObjectDetection' # type: str
self.primary_metric = primary_metric
class ImageSweepLimitSettings(msrest.serialization.Model):
"""Limit settings for model sweeping and hyperparameter sweeping.
:ivar max_concurrent_trials: Maximum number of concurrent iterations for the underlying Sweep
job.
:vartype max_concurrent_trials: int
:ivar max_trials: Maximum number of iterations for the underlying Sweep job.
:vartype max_trials: int
"""
_attribute_map = {
'max_concurrent_trials': {'key': 'maxConcurrentTrials', 'type': 'int'},
'max_trials': {'key': 'maxTrials', 'type': 'int'},
}
def __init__(
self,
*,
max_concurrent_trials: Optional[int] = None,
max_trials: Optional[int] = None,
**kwargs
):
"""
:keyword max_concurrent_trials: Maximum number of concurrent iterations for the underlying
Sweep job.
:paramtype max_concurrent_trials: int
:keyword max_trials: Maximum number of iterations for the underlying Sweep job.
:paramtype max_trials: int
"""
super(ImageSweepLimitSettings, self).__init__(**kwargs)
self.max_concurrent_trials = max_concurrent_trials
self.max_trials = max_trials
class ImageSweepSettings(msrest.serialization.Model):
"""Model sweeping and hyperparameter sweeping related settings.
All required parameters must be populated in order to send to Azure.
:ivar early_termination: Type of early termination policy.
:vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
:ivar limits: Required. [Required] Limit settings for model sweeping and hyperparameter
sweeping.
:vartype limits: ~azure.mgmt.machinelearningservices.models.ImageSweepLimitSettings
:ivar sampling_algorithm: Required. [Required] Type of the hyperparameter sampling algorithms.
Possible values include: "Grid", "Random", "Bayesian".
:vartype sampling_algorithm: str or
~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
_validation = {
'limits': {'required': True},
'sampling_algorithm': {'required': True},
}
_attribute_map = {
'early_termination': {'key': 'earlyTermination', 'type': 'EarlyTerminationPolicy'},
'limits': {'key': 'limits', 'type': 'ImageSweepLimitSettings'},
'sampling_algorithm': {'key': 'samplingAlgorithm', 'type': 'str'},
}
def __init__(
self,
*,
limits: "ImageSweepLimitSettings",
sampling_algorithm: Union[str, "SamplingAlgorithmType"],
early_termination: Optional["EarlyTerminationPolicy"] = None,
**kwargs
):
"""
:keyword early_termination: Type of early termination policy.
:paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
:keyword limits: Required. [Required] Limit settings for model sweeping and hyperparameter
sweeping.
:paramtype limits: ~azure.mgmt.machinelearningservices.models.ImageSweepLimitSettings
:keyword sampling_algorithm: Required. [Required] Type of the hyperparameter sampling
algorithms. Possible values include: "Grid", "Random", "Bayesian".
:paramtype sampling_algorithm: str or
~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
"""
super(ImageSweepSettings, self).__init__(**kwargs)
self.early_termination = early_termination
self.limits = limits
self.sampling_algorithm = sampling_algorithm
class ImageVerticalDataSettings(DataSettings):
"""Collection of registered Tabular Dataset Ids and other data settings required for training and validating models.
All required parameters must be populated in order to send to Azure.
:ivar target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:vartype target_column_name: str
:ivar test_data: Test data input.
:vartype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:ivar training_data: Required. [Required] Training data input.
:vartype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
:ivar validation_data: Settings for the validation dataset.
:vartype validation_data:
~azure.mgmt.machinelearningservices.models.ImageVerticalValidationDataSettings
"""
_validation = {
'target_column_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'training_data': {'required': True},
}
_attribute_map = {
'target_column_name': {'key': 'targetColumnName', 'type': 'str'},
'test_data': {'key': 'testData', 'type': 'TestDataSettings'},
'training_data': {'key': 'trainingData', 'type': 'TrainingDataSettings'},
'validation_data': {'key': 'validationData', 'type': 'ImageVerticalValidationDataSettings'},
}
def __init__(
self,
*,
target_column_name: str,
training_data: "TrainingDataSettings",
test_data: Optional["TestDataSettings"] = None,
validation_data: Optional["ImageVerticalValidationDataSettings"] = None,
**kwargs
):
"""
:keyword target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:paramtype target_column_name: str
:keyword test_data: Test data input.
:paramtype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:keyword training_data: Required. [Required] Training data input.
:paramtype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
:keyword validation_data: Settings for the validation dataset.
:paramtype validation_data:
~azure.mgmt.machinelearningservices.models.ImageVerticalValidationDataSettings
"""
super(ImageVerticalDataSettings, self).__init__(target_column_name=target_column_name, test_data=test_data, training_data=training_data, **kwargs)
self.validation_data = validation_data
class ValidationDataSettings(msrest.serialization.Model):
"""Validation settings.
:ivar data: Validation data MLTable.
:vartype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:vartype validation_data_size: float
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'MLTableJobInput'},
'validation_data_size': {'key': 'validationDataSize', 'type': 'float'},
}
def __init__(
self,
*,
data: Optional["MLTableJobInput"] = None,
validation_data_size: Optional[float] = None,
**kwargs
):
"""
:keyword data: Validation data MLTable.
:paramtype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:paramtype validation_data_size: float
"""
super(ValidationDataSettings, self).__init__(**kwargs)
self.data = data
self.validation_data_size = validation_data_size
class ImageVerticalValidationDataSettings(ValidationDataSettings):
"""ImageVerticalValidationDataSettings.
:ivar data: Validation data MLTable.
:vartype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:vartype validation_data_size: float
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'MLTableJobInput'},
'validation_data_size': {'key': 'validationDataSize', 'type': 'float'},
}
def __init__(
self,
*,
data: Optional["MLTableJobInput"] = None,
validation_data_size: Optional[float] = None,
**kwargs
):
"""
:keyword data: Validation data MLTable.
:paramtype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:paramtype validation_data_size: float
"""
super(ImageVerticalValidationDataSettings, self).__init__(data=data, validation_data_size=validation_data_size, **kwargs)
class InferenceContainerProperties(msrest.serialization.Model):
"""InferenceContainerProperties.
:ivar liveness_route: The route to check the liveness of the inference server container.
:vartype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
:ivar readiness_route: The route to check the readiness of the inference server container.
:vartype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
:ivar scoring_route: The port to send the scoring requests to, within the inference server
container.
:vartype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
"""
_attribute_map = {
'liveness_route': {'key': 'livenessRoute', 'type': 'Route'},
'readiness_route': {'key': 'readinessRoute', 'type': 'Route'},
'scoring_route': {'key': 'scoringRoute', 'type': 'Route'},
}
def __init__(
self,
*,
liveness_route: Optional["Route"] = None,
readiness_route: Optional["Route"] = None,
scoring_route: Optional["Route"] = None,
**kwargs
):
"""
:keyword liveness_route: The route to check the liveness of the inference server container.
:paramtype liveness_route: ~azure.mgmt.machinelearningservices.models.Route
:keyword readiness_route: The route to check the readiness of the inference server container.
:paramtype readiness_route: ~azure.mgmt.machinelearningservices.models.Route
:keyword scoring_route: The port to send the scoring requests to, within the inference server
container.
:paramtype scoring_route: ~azure.mgmt.machinelearningservices.models.Route
"""
super(InferenceContainerProperties, self).__init__(**kwargs)
self.liveness_route = liveness_route
self.readiness_route = readiness_route
self.scoring_route = scoring_route
class JobBaseData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.JobBaseDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'JobBaseDetails'},
}
def __init__(
self,
*,
properties: "JobBaseDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.JobBaseDetails
"""
super(JobBaseData, self).__init__(**kwargs)
self.properties = properties
class JobBaseResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of JobBase entities.
:ivar next_link: The link to the next page of JobBase objects. If null, there are no additional
pages.
:vartype next_link: str
:ivar value: An array of objects of type JobBase.
:vartype value: list[~azure.mgmt.machinelearningservices.models.JobBaseData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[JobBaseData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["JobBaseData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of JobBase objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type JobBase.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.JobBaseData]
"""
super(JobBaseResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class JobService(msrest.serialization.Model):
"""Job endpoint definition.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar endpoint: Url for endpoint.
:vartype endpoint: str
:ivar error_message: Any error in the service.
:vartype error_message: str
:ivar job_service_type: Endpoint type.
:vartype job_service_type: str
:ivar port: Port for endpoint.
:vartype port: int
:ivar properties: Additional properties to set on the endpoint.
:vartype properties: dict[str, str]
:ivar status: Status of endpoint.
:vartype status: str
"""
_validation = {
'error_message': {'readonly': True},
'status': {'readonly': True},
}
_attribute_map = {
'endpoint': {'key': 'endpoint', 'type': 'str'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
'job_service_type': {'key': 'jobServiceType', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'properties': {'key': 'properties', 'type': '{str}'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
*,
endpoint: Optional[str] = None,
job_service_type: Optional[str] = None,
port: Optional[int] = None,
properties: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword endpoint: Url for endpoint.
:paramtype endpoint: str
:keyword job_service_type: Endpoint type.
:paramtype job_service_type: str
:keyword port: Port for endpoint.
:paramtype port: int
:keyword properties: Additional properties to set on the endpoint.
:paramtype properties: dict[str, str]
"""
super(JobService, self).__init__(**kwargs)
self.endpoint = endpoint
self.error_message = None
self.job_service_type = job_service_type
self.port = port
self.properties = properties
self.status = None
class KerberosCredentials(msrest.serialization.Model):
"""KerberosCredentials.
All required parameters must be populated in order to send to Azure.
:ivar kerberos_kdc_address: Required. [Required] IP Address or DNS HostName.
:vartype kerberos_kdc_address: str
:ivar kerberos_principal: Required. [Required] Kerberos Username.
:vartype kerberos_principal: str
:ivar kerberos_realm: Required. [Required] Domain over which a Kerberos authentication server
has the authority to authenticate a user, host or service.
:vartype kerberos_realm: str
"""
_validation = {
'kerberos_kdc_address': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'kerberos_principal': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'kerberos_realm': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'kerberos_kdc_address': {'key': 'kerberosKdcAddress', 'type': 'str'},
'kerberos_principal': {'key': 'kerberosPrincipal', 'type': 'str'},
'kerberos_realm': {'key': 'kerberosRealm', 'type': 'str'},
}
def __init__(
self,
*,
kerberos_kdc_address: str,
kerberos_principal: str,
kerberos_realm: str,
**kwargs
):
"""
:keyword kerberos_kdc_address: Required. [Required] IP Address or DNS HostName.
:paramtype kerberos_kdc_address: str
:keyword kerberos_principal: Required. [Required] Kerberos Username.
:paramtype kerberos_principal: str
:keyword kerberos_realm: Required. [Required] Domain over which a Kerberos authentication
server has the authority to authenticate a user, host or service.
:paramtype kerberos_realm: str
"""
super(KerberosCredentials, self).__init__(**kwargs)
self.kerberos_kdc_address = kerberos_kdc_address
self.kerberos_principal = kerberos_principal
self.kerberos_realm = kerberos_realm
class KerberosKeytabCredentials(DatastoreCredentials, KerberosCredentials):
"""KerberosKeytabCredentials.
All required parameters must be populated in order to send to Azure.
:ivar kerberos_kdc_address: Required. [Required] IP Address or DNS HostName.
:vartype kerberos_kdc_address: str
:ivar kerberos_principal: Required. [Required] Kerberos Username.
:vartype kerberos_principal: str
:ivar kerberos_realm: Required. [Required] Domain over which a Kerberos authentication server
has the authority to authenticate a user, host or service.
:vartype kerberos_realm: str
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar secrets: Required. [Required] Keytab secrets.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.KerberosKeytabSecrets
"""
_validation = {
'kerberos_kdc_address': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'kerberos_principal': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'kerberos_realm': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'credentials_type': {'required': True},
'secrets': {'required': True},
}
_attribute_map = {
'kerberos_kdc_address': {'key': 'kerberosKdcAddress', 'type': 'str'},
'kerberos_principal': {'key': 'kerberosPrincipal', 'type': 'str'},
'kerberos_realm': {'key': 'kerberosRealm', 'type': 'str'},
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
'secrets': {'key': 'secrets', 'type': 'KerberosKeytabSecrets'},
}
def __init__(
self,
*,
kerberos_kdc_address: str,
kerberos_principal: str,
kerberos_realm: str,
secrets: "KerberosKeytabSecrets",
**kwargs
):
"""
:keyword kerberos_kdc_address: Required. [Required] IP Address or DNS HostName.
:paramtype kerberos_kdc_address: str
:keyword kerberos_principal: Required. [Required] Kerberos Username.
:paramtype kerberos_principal: str
:keyword kerberos_realm: Required. [Required] Domain over which a Kerberos authentication
server has the authority to authenticate a user, host or service.
:paramtype kerberos_realm: str
:keyword secrets: Required. [Required] Keytab secrets.
:paramtype secrets: ~azure.mgmt.machinelearningservices.models.KerberosKeytabSecrets
"""
super(KerberosKeytabCredentials, self).__init__(kerberos_kdc_address=kerberos_kdc_address, kerberos_principal=kerberos_principal, kerberos_realm=kerberos_realm, **kwargs)
self.kerberos_kdc_address = kerberos_kdc_address
self.kerberos_principal = kerberos_principal
self.kerberos_realm = kerberos_realm
self.credentials_type = 'KerberosKeytab' # type: str
self.secrets = secrets
self.credentials_type = 'KerberosKeytab' # type: str
self.secrets = secrets
class KerberosKeytabSecrets(DatastoreSecrets):
"""KerberosKeytabSecrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar kerberos_keytab: Kerberos keytab secret.
:vartype kerberos_keytab: str
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
'kerberos_keytab': {'key': 'kerberosKeytab', 'type': 'str'},
}
def __init__(
self,
*,
kerberos_keytab: Optional[str] = None,
**kwargs
):
"""
:keyword kerberos_keytab: Kerberos keytab secret.
:paramtype kerberos_keytab: str
"""
super(KerberosKeytabSecrets, self).__init__(**kwargs)
self.secrets_type = 'KerberosKeytab' # type: str
self.kerberos_keytab = kerberos_keytab
class KerberosPasswordCredentials(DatastoreCredentials, KerberosCredentials):
"""KerberosPasswordCredentials.
All required parameters must be populated in order to send to Azure.
:ivar kerberos_kdc_address: Required. [Required] IP Address or DNS HostName.
:vartype kerberos_kdc_address: str
:ivar kerberos_principal: Required. [Required] Kerberos Username.
:vartype kerberos_principal: str
:ivar kerberos_realm: Required. [Required] Domain over which a Kerberos authentication server
has the authority to authenticate a user, host or service.
:vartype kerberos_realm: str
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar secrets: Required. [Required] Kerberos password secrets.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.KerberosPasswordSecrets
"""
_validation = {
'kerberos_kdc_address': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'kerberos_principal': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'kerberos_realm': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'credentials_type': {'required': True},
'secrets': {'required': True},
}
_attribute_map = {
'kerberos_kdc_address': {'key': 'kerberosKdcAddress', 'type': 'str'},
'kerberos_principal': {'key': 'kerberosPrincipal', 'type': 'str'},
'kerberos_realm': {'key': 'kerberosRealm', 'type': 'str'},
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
'secrets': {'key': 'secrets', 'type': 'KerberosPasswordSecrets'},
}
def __init__(
self,
*,
kerberos_kdc_address: str,
kerberos_principal: str,
kerberos_realm: str,
secrets: "KerberosPasswordSecrets",
**kwargs
):
"""
:keyword kerberos_kdc_address: Required. [Required] IP Address or DNS HostName.
:paramtype kerberos_kdc_address: str
:keyword kerberos_principal: Required. [Required] Kerberos Username.
:paramtype kerberos_principal: str
:keyword kerberos_realm: Required. [Required] Domain over which a Kerberos authentication
server has the authority to authenticate a user, host or service.
:paramtype kerberos_realm: str
:keyword secrets: Required. [Required] Kerberos password secrets.
:paramtype secrets: ~azure.mgmt.machinelearningservices.models.KerberosPasswordSecrets
"""
super(KerberosPasswordCredentials, self).__init__(kerberos_kdc_address=kerberos_kdc_address, kerberos_principal=kerberos_principal, kerberos_realm=kerberos_realm, **kwargs)
self.kerberos_kdc_address = kerberos_kdc_address
self.kerberos_principal = kerberos_principal
self.kerberos_realm = kerberos_realm
self.credentials_type = 'KerberosPassword' # type: str
self.secrets = secrets
self.credentials_type = 'KerberosPassword' # type: str
self.secrets = secrets
class KerberosPasswordSecrets(DatastoreSecrets):
"""KerberosPasswordSecrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar kerberos_password: Kerberos password secret.
:vartype kerberos_password: str
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
'kerberos_password': {'key': 'kerberosPassword', 'type': 'str'},
}
def __init__(
self,
*,
kerberos_password: Optional[str] = None,
**kwargs
):
"""
:keyword kerberos_password: Kerberos password secret.
:paramtype kerberos_password: str
"""
super(KerberosPasswordSecrets, self).__init__(**kwargs)
self.secrets_type = 'KerberosPassword' # type: str
self.kerberos_password = kerberos_password
class OnlineDeploymentDetails(EndpointDeploymentPropertiesBase):
"""OnlineDeploymentDetails.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: KubernetesOnlineDeployment, ManagedOnlineDeployment.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar code_configuration: Code configuration for the endpoint deployment.
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
:ivar environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar app_insights_enabled: If true, enables Application Insights logging.
:vartype app_insights_enabled: bool
:ivar egress_public_network_access: If Enabled, allow egress public network access. If
Disabled, this will create secure egress. Default: Enabled. Possible values include: "Enabled",
"Disabled".
:vartype egress_public_network_access: str or
~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
:ivar endpoint_compute_type: Required. [Required] The compute type of the endpoint.Constant
filled by server. Possible values include: "Managed", "Kubernetes", "AzureMLCompute".
:vartype endpoint_compute_type: str or
~azure.mgmt.machinelearningservices.models.EndpointComputeType
:ivar instance_type: Compute instance type.
:vartype instance_type: str
:ivar liveness_probe: Liveness probe monitors the health of the container regularly.
:vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:ivar model: The URI path to the model.
:vartype model: str
:ivar model_mount_path: The path to mount the model in custom container.
:vartype model_mount_path: str
:ivar private_network_connection: If true, enable private network connection.
DEPRECATED for future API versions. Use EgressPublicNetworkAccess.
:vartype private_network_connection: bool
:ivar provisioning_state: Provisioning state for the endpoint deployment. Possible values
include: "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
:ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
The properties and defaults are the same as liveness probe.
:vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:ivar request_settings: Request settings for the deployment.
:vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
:ivar scale_settings: Scale settings for the deployment.
If it is null or not provided,
it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
and to DefaultScaleSettings for ManagedOnlineDeployment.
:vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
"""
_validation = {
'endpoint_compute_type': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'code_configuration': {'key': 'codeConfiguration', 'type': 'CodeConfiguration'},
'description': {'key': 'description', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'app_insights_enabled': {'key': 'appInsightsEnabled', 'type': 'bool'},
'egress_public_network_access': {'key': 'egressPublicNetworkAccess', 'type': 'str'},
'endpoint_compute_type': {'key': 'endpointComputeType', 'type': 'str'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'liveness_probe': {'key': 'livenessProbe', 'type': 'ProbeSettings'},
'model': {'key': 'model', 'type': 'str'},
'model_mount_path': {'key': 'modelMountPath', 'type': 'str'},
'private_network_connection': {'key': 'privateNetworkConnection', 'type': 'bool'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'readiness_probe': {'key': 'readinessProbe', 'type': 'ProbeSettings'},
'request_settings': {'key': 'requestSettings', 'type': 'OnlineRequestSettings'},
'scale_settings': {'key': 'scaleSettings', 'type': 'OnlineScaleSettings'},
}
_subtype_map = {
'endpoint_compute_type': {'Kubernetes': 'KubernetesOnlineDeployment', 'Managed': 'ManagedOnlineDeployment'}
}
def __init__(
self,
*,
code_configuration: Optional["CodeConfiguration"] = None,
description: Optional[str] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
app_insights_enabled: Optional[bool] = False,
egress_public_network_access: Optional[Union[str, "EgressPublicNetworkAccessType"]] = None,
instance_type: Optional[str] = None,
liveness_probe: Optional["ProbeSettings"] = None,
model: Optional[str] = None,
model_mount_path: Optional[str] = None,
private_network_connection: Optional[bool] = False,
readiness_probe: Optional["ProbeSettings"] = None,
request_settings: Optional["OnlineRequestSettings"] = None,
scale_settings: Optional["OnlineScaleSettings"] = None,
**kwargs
):
"""
:keyword code_configuration: Code configuration for the endpoint deployment.
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
:keyword environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword app_insights_enabled: If true, enables Application Insights logging.
:paramtype app_insights_enabled: bool
:keyword egress_public_network_access: If Enabled, allow egress public network access. If
Disabled, this will create secure egress. Default: Enabled. Possible values include: "Enabled",
"Disabled".
:paramtype egress_public_network_access: str or
~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
:keyword instance_type: Compute instance type.
:paramtype instance_type: str
:keyword liveness_probe: Liveness probe monitors the health of the container regularly.
:paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:keyword model: The URI path to the model.
:paramtype model: str
:keyword model_mount_path: The path to mount the model in custom container.
:paramtype model_mount_path: str
:keyword private_network_connection: If true, enable private network connection.
DEPRECATED for future API versions. Use EgressPublicNetworkAccess.
:paramtype private_network_connection: bool
:keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
The properties and defaults are the same as liveness probe.
:paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:keyword request_settings: Request settings for the deployment.
:paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
:keyword scale_settings: Scale settings for the deployment.
If it is null or not provided,
it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
and to DefaultScaleSettings for ManagedOnlineDeployment.
:paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
"""
super(OnlineDeploymentDetails, self).__init__(code_configuration=code_configuration, description=description, environment_id=environment_id, environment_variables=environment_variables, properties=properties, **kwargs)
self.app_insights_enabled = app_insights_enabled
self.egress_public_network_access = egress_public_network_access
self.endpoint_compute_type = 'OnlineDeploymentDetails' # type: str
self.instance_type = instance_type
self.liveness_probe = liveness_probe
self.model = model
self.model_mount_path = model_mount_path
self.private_network_connection = private_network_connection
self.provisioning_state = None
self.readiness_probe = readiness_probe
self.request_settings = request_settings
self.scale_settings = scale_settings
class KubernetesOnlineDeployment(OnlineDeploymentDetails):
"""Properties specific to a KubernetesOnlineDeployment.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar code_configuration: Code configuration for the endpoint deployment.
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
:ivar environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar app_insights_enabled: If true, enables Application Insights logging.
:vartype app_insights_enabled: bool
:ivar egress_public_network_access: If Enabled, allow egress public network access. If
Disabled, this will create secure egress. Default: Enabled. Possible values include: "Enabled",
"Disabled".
:vartype egress_public_network_access: str or
~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
:ivar endpoint_compute_type: Required. [Required] The compute type of the endpoint.Constant
filled by server. Possible values include: "Managed", "Kubernetes", "AzureMLCompute".
:vartype endpoint_compute_type: str or
~azure.mgmt.machinelearningservices.models.EndpointComputeType
:ivar instance_type: Compute instance type.
:vartype instance_type: str
:ivar liveness_probe: Liveness probe monitors the health of the container regularly.
:vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:ivar model: The URI path to the model.
:vartype model: str
:ivar model_mount_path: The path to mount the model in custom container.
:vartype model_mount_path: str
:ivar private_network_connection: If true, enable private network connection.
DEPRECATED for future API versions. Use EgressPublicNetworkAccess.
:vartype private_network_connection: bool
:ivar provisioning_state: Provisioning state for the endpoint deployment. Possible values
include: "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
:ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
The properties and defaults are the same as liveness probe.
:vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:ivar request_settings: Request settings for the deployment.
:vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
:ivar scale_settings: Scale settings for the deployment.
If it is null or not provided,
it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
and to DefaultScaleSettings for ManagedOnlineDeployment.
:vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
:ivar container_resource_requirements: The resource requirements for the container (cpu and
memory).
:vartype container_resource_requirements:
~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
"""
_validation = {
'endpoint_compute_type': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'code_configuration': {'key': 'codeConfiguration', 'type': 'CodeConfiguration'},
'description': {'key': 'description', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'app_insights_enabled': {'key': 'appInsightsEnabled', 'type': 'bool'},
'egress_public_network_access': {'key': 'egressPublicNetworkAccess', 'type': 'str'},
'endpoint_compute_type': {'key': 'endpointComputeType', 'type': 'str'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'liveness_probe': {'key': 'livenessProbe', 'type': 'ProbeSettings'},
'model': {'key': 'model', 'type': 'str'},
'model_mount_path': {'key': 'modelMountPath', 'type': 'str'},
'private_network_connection': {'key': 'privateNetworkConnection', 'type': 'bool'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'readiness_probe': {'key': 'readinessProbe', 'type': 'ProbeSettings'},
'request_settings': {'key': 'requestSettings', 'type': 'OnlineRequestSettings'},
'scale_settings': {'key': 'scaleSettings', 'type': 'OnlineScaleSettings'},
'container_resource_requirements': {'key': 'containerResourceRequirements', 'type': 'ContainerResourceRequirements'},
}
def __init__(
self,
*,
code_configuration: Optional["CodeConfiguration"] = None,
description: Optional[str] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
app_insights_enabled: Optional[bool] = False,
egress_public_network_access: Optional[Union[str, "EgressPublicNetworkAccessType"]] = None,
instance_type: Optional[str] = None,
liveness_probe: Optional["ProbeSettings"] = None,
model: Optional[str] = None,
model_mount_path: Optional[str] = None,
private_network_connection: Optional[bool] = False,
readiness_probe: Optional["ProbeSettings"] = None,
request_settings: Optional["OnlineRequestSettings"] = None,
scale_settings: Optional["OnlineScaleSettings"] = None,
container_resource_requirements: Optional["ContainerResourceRequirements"] = None,
**kwargs
):
"""
:keyword code_configuration: Code configuration for the endpoint deployment.
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
:keyword environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword app_insights_enabled: If true, enables Application Insights logging.
:paramtype app_insights_enabled: bool
:keyword egress_public_network_access: If Enabled, allow egress public network access. If
Disabled, this will create secure egress. Default: Enabled. Possible values include: "Enabled",
"Disabled".
:paramtype egress_public_network_access: str or
~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
:keyword instance_type: Compute instance type.
:paramtype instance_type: str
:keyword liveness_probe: Liveness probe monitors the health of the container regularly.
:paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:keyword model: The URI path to the model.
:paramtype model: str
:keyword model_mount_path: The path to mount the model in custom container.
:paramtype model_mount_path: str
:keyword private_network_connection: If true, enable private network connection.
DEPRECATED for future API versions. Use EgressPublicNetworkAccess.
:paramtype private_network_connection: bool
:keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
The properties and defaults are the same as liveness probe.
:paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:keyword request_settings: Request settings for the deployment.
:paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
:keyword scale_settings: Scale settings for the deployment.
If it is null or not provided,
it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
and to DefaultScaleSettings for ManagedOnlineDeployment.
:paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
:keyword container_resource_requirements: The resource requirements for the container (cpu and
memory).
:paramtype container_resource_requirements:
~azure.mgmt.machinelearningservices.models.ContainerResourceRequirements
"""
super(KubernetesOnlineDeployment, self).__init__(code_configuration=code_configuration, description=description, environment_id=environment_id, environment_variables=environment_variables, properties=properties, app_insights_enabled=app_insights_enabled, egress_public_network_access=egress_public_network_access, instance_type=instance_type, liveness_probe=liveness_probe, model=model, model_mount_path=model_mount_path, private_network_connection=private_network_connection, readiness_probe=readiness_probe, request_settings=request_settings, scale_settings=scale_settings, **kwargs)
self.endpoint_compute_type = 'Kubernetes' # type: str
self.container_resource_requirements = container_resource_requirements
class LiteralJobInput(JobInput):
"""Literal input type.
All required parameters must be populated in order to send to Azure.
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
:ivar value: Required. [Required] Literal value for the input.
:vartype value: str
"""
_validation = {
'job_input_type': {'required': True},
'value': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
value: str,
description: Optional[str] = None,
**kwargs
):
"""
:keyword description: Description for the input.
:paramtype description: str
:keyword value: Required. [Required] Literal value for the input.
:paramtype value: str
"""
super(LiteralJobInput, self).__init__(description=description, **kwargs)
self.job_input_type = 'Literal' # type: str
self.value = value
class ManagedIdentity(IdentityConfiguration):
"""Managed identity configuration.
All required parameters must be populated in order to send to Azure.
:ivar identity_type: Required. [Required] Specifies the type of identity framework.Constant
filled by server. Possible values include: "Managed", "AMLToken", "UserIdentity".
:vartype identity_type: str or
~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
:ivar client_id: Specifies a user-assigned identity by client ID. For system-assigned, do not
set this field.
:vartype client_id: str
:ivar object_id: Specifies a user-assigned identity by object ID. For system-assigned, do not
set this field.
:vartype object_id: str
:ivar resource_id: Specifies a user-assigned identity by ARM resource ID. For system-assigned,
do not set this field.
:vartype resource_id: str
"""
_validation = {
'identity_type': {'required': True},
}
_attribute_map = {
'identity_type': {'key': 'identityType', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'object_id': {'key': 'objectId', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
}
def __init__(
self,
*,
client_id: Optional[str] = None,
object_id: Optional[str] = None,
resource_id: Optional[str] = None,
**kwargs
):
"""
:keyword client_id: Specifies a user-assigned identity by client ID. For system-assigned, do
not set this field.
:paramtype client_id: str
:keyword object_id: Specifies a user-assigned identity by object ID. For system-assigned, do
not set this field.
:paramtype object_id: str
:keyword resource_id: Specifies a user-assigned identity by ARM resource ID. For
system-assigned, do not set this field.
:paramtype resource_id: str
"""
super(ManagedIdentity, self).__init__(**kwargs)
self.identity_type = 'Managed' # type: str
self.client_id = client_id
self.object_id = object_id
self.resource_id = resource_id
class ManagedOnlineDeployment(OnlineDeploymentDetails):
"""Properties specific to a ManagedOnlineDeployment.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar code_configuration: Code configuration for the endpoint deployment.
:vartype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:ivar description: Description of the endpoint deployment.
:vartype description: str
:ivar environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar app_insights_enabled: If true, enables Application Insights logging.
:vartype app_insights_enabled: bool
:ivar egress_public_network_access: If Enabled, allow egress public network access. If
Disabled, this will create secure egress. Default: Enabled. Possible values include: "Enabled",
"Disabled".
:vartype egress_public_network_access: str or
~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
:ivar endpoint_compute_type: Required. [Required] The compute type of the endpoint.Constant
filled by server. Possible values include: "Managed", "Kubernetes", "AzureMLCompute".
:vartype endpoint_compute_type: str or
~azure.mgmt.machinelearningservices.models.EndpointComputeType
:ivar instance_type: Compute instance type.
:vartype instance_type: str
:ivar liveness_probe: Liveness probe monitors the health of the container regularly.
:vartype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:ivar model: The URI path to the model.
:vartype model: str
:ivar model_mount_path: The path to mount the model in custom container.
:vartype model_mount_path: str
:ivar private_network_connection: If true, enable private network connection.
DEPRECATED for future API versions. Use EgressPublicNetworkAccess.
:vartype private_network_connection: bool
:ivar provisioning_state: Provisioning state for the endpoint deployment. Possible values
include: "Creating", "Deleting", "Scaling", "Updating", "Succeeded", "Failed", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.DeploymentProvisioningState
:ivar readiness_probe: Readiness probe validates if the container is ready to serve traffic.
The properties and defaults are the same as liveness probe.
:vartype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:ivar request_settings: Request settings for the deployment.
:vartype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
:ivar scale_settings: Scale settings for the deployment.
If it is null or not provided,
it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
and to DefaultScaleSettings for ManagedOnlineDeployment.
:vartype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
"""
_validation = {
'endpoint_compute_type': {'required': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'code_configuration': {'key': 'codeConfiguration', 'type': 'CodeConfiguration'},
'description': {'key': 'description', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'properties': {'key': 'properties', 'type': '{str}'},
'app_insights_enabled': {'key': 'appInsightsEnabled', 'type': 'bool'},
'egress_public_network_access': {'key': 'egressPublicNetworkAccess', 'type': 'str'},
'endpoint_compute_type': {'key': 'endpointComputeType', 'type': 'str'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'liveness_probe': {'key': 'livenessProbe', 'type': 'ProbeSettings'},
'model': {'key': 'model', 'type': 'str'},
'model_mount_path': {'key': 'modelMountPath', 'type': 'str'},
'private_network_connection': {'key': 'privateNetworkConnection', 'type': 'bool'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'readiness_probe': {'key': 'readinessProbe', 'type': 'ProbeSettings'},
'request_settings': {'key': 'requestSettings', 'type': 'OnlineRequestSettings'},
'scale_settings': {'key': 'scaleSettings', 'type': 'OnlineScaleSettings'},
}
def __init__(
self,
*,
code_configuration: Optional["CodeConfiguration"] = None,
description: Optional[str] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
properties: Optional[Dict[str, str]] = None,
app_insights_enabled: Optional[bool] = False,
egress_public_network_access: Optional[Union[str, "EgressPublicNetworkAccessType"]] = None,
instance_type: Optional[str] = None,
liveness_probe: Optional["ProbeSettings"] = None,
model: Optional[str] = None,
model_mount_path: Optional[str] = None,
private_network_connection: Optional[bool] = False,
readiness_probe: Optional["ProbeSettings"] = None,
request_settings: Optional["OnlineRequestSettings"] = None,
scale_settings: Optional["OnlineScaleSettings"] = None,
**kwargs
):
"""
:keyword code_configuration: Code configuration for the endpoint deployment.
:paramtype code_configuration: ~azure.mgmt.machinelearningservices.models.CodeConfiguration
:keyword description: Description of the endpoint deployment.
:paramtype description: str
:keyword environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword app_insights_enabled: If true, enables Application Insights logging.
:paramtype app_insights_enabled: bool
:keyword egress_public_network_access: If Enabled, allow egress public network access. If
Disabled, this will create secure egress. Default: Enabled. Possible values include: "Enabled",
"Disabled".
:paramtype egress_public_network_access: str or
~azure.mgmt.machinelearningservices.models.EgressPublicNetworkAccessType
:keyword instance_type: Compute instance type.
:paramtype instance_type: str
:keyword liveness_probe: Liveness probe monitors the health of the container regularly.
:paramtype liveness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:keyword model: The URI path to the model.
:paramtype model: str
:keyword model_mount_path: The path to mount the model in custom container.
:paramtype model_mount_path: str
:keyword private_network_connection: If true, enable private network connection.
DEPRECATED for future API versions. Use EgressPublicNetworkAccess.
:paramtype private_network_connection: bool
:keyword readiness_probe: Readiness probe validates if the container is ready to serve traffic.
The properties and defaults are the same as liveness probe.
:paramtype readiness_probe: ~azure.mgmt.machinelearningservices.models.ProbeSettings
:keyword request_settings: Request settings for the deployment.
:paramtype request_settings: ~azure.mgmt.machinelearningservices.models.OnlineRequestSettings
:keyword scale_settings: Scale settings for the deployment.
If it is null or not provided,
it defaults to TargetUtilizationScaleSettings for KubernetesOnlineDeployment
and to DefaultScaleSettings for ManagedOnlineDeployment.
:paramtype scale_settings: ~azure.mgmt.machinelearningservices.models.OnlineScaleSettings
"""
super(ManagedOnlineDeployment, self).__init__(code_configuration=code_configuration, description=description, environment_id=environment_id, environment_variables=environment_variables, properties=properties, app_insights_enabled=app_insights_enabled, egress_public_network_access=egress_public_network_access, instance_type=instance_type, liveness_probe=liveness_probe, model=model, model_mount_path=model_mount_path, private_network_connection=private_network_connection, readiness_probe=readiness_probe, request_settings=request_settings, scale_settings=scale_settings, **kwargs)
self.endpoint_compute_type = 'Managed' # type: str
class ManagedServiceIdentity(msrest.serialization.Model):
"""Managed service identity (system assigned and/or user assigned identities).
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar principal_id: The service principal ID of the system assigned identity. This property
will only be provided for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant ID of the system assigned identity. This property will only be
provided for a system assigned identity.
:vartype tenant_id: str
:ivar type: Required. Type of managed service identity (where both SystemAssigned and
UserAssigned types are allowed). Possible values include: "None", "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned".
:vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
:ivar user_assigned_identities: The set of user assigned identities associated with the
resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
The dictionary values can be empty objects ({}) in requests.
:vartype user_assigned_identities: dict[str,
~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
'type': {'required': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedIdentity}'},
}
def __init__(
self,
*,
type: Union[str, "ManagedServiceIdentityType"],
user_assigned_identities: Optional[Dict[str, "UserAssignedIdentity"]] = None,
**kwargs
):
"""
:keyword type: Required. Type of managed service identity (where both SystemAssigned and
UserAssigned types are allowed). Possible values include: "None", "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned".
:paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
:keyword user_assigned_identities: The set of user assigned identities associated with the
resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
The dictionary values can be empty objects ({}) in requests.
:paramtype user_assigned_identities: dict[str,
~azure.mgmt.machinelearningservices.models.UserAssignedIdentity]
"""
super(ManagedServiceIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class MedianStoppingPolicy(EarlyTerminationPolicy):
"""Defines an early termination policy based on running averages of the primary metric of all runs.
All required parameters must be populated in order to send to Azure.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
:ivar evaluation_interval: Interval (number of runs) between policy evaluations.
:vartype evaluation_interval: int
:ivar policy_type: Required. [Required] Name of policy configuration.Constant filled by server.
Possible values include: "Bandit", "MedianStopping", "TruncationSelection".
:vartype policy_type: str or
~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
"""
_validation = {
'policy_type': {'required': True},
}
_attribute_map = {
'delay_evaluation': {'key': 'delayEvaluation', 'type': 'int'},
'evaluation_interval': {'key': 'evaluationInterval', 'type': 'int'},
'policy_type': {'key': 'policyType', 'type': 'str'},
}
def __init__(
self,
*,
delay_evaluation: Optional[int] = 0,
evaluation_interval: Optional[int] = 0,
**kwargs
):
"""
:keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
:paramtype delay_evaluation: int
:keyword evaluation_interval: Interval (number of runs) between policy evaluations.
:paramtype evaluation_interval: int
"""
super(MedianStoppingPolicy, self).__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
self.policy_type = 'MedianStopping' # type: str
class MLFlowModelJobInput(JobInput, AssetJobInput):
"""MLFlowModelJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(MLFlowModelJobInput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_input_type = 'MLFlowModel' # type: str
self.description = description
self.job_input_type = 'MLFlowModel' # type: str
class MLFlowModelJobOutput(JobOutput, AssetJobOutput):
"""MLFlowModelJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(MLFlowModelJobOutput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_output_type = 'MLFlowModel' # type: str
self.description = description
self.job_output_type = 'MLFlowModel' # type: str
class MLTableData(DataVersionBaseDetails):
"""MLTable data definition.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar data_type: Required. [Required] Specifies the type of data.Constant filled by server.
Possible values include: "UriFile", "UriFolder", "MLTable".
:vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
:ivar data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:vartype data_uri: str
:ivar referenced_uris: Uris referenced in the MLTable definition (required for lineage).
:vartype referenced_uris: list[str]
"""
_validation = {
'data_type': {'required': True},
'data_uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'data_type': {'key': 'dataType', 'type': 'str'},
'data_uri': {'key': 'dataUri', 'type': 'str'},
'referenced_uris': {'key': 'referencedUris', 'type': '[str]'},
}
def __init__(
self,
*,
data_uri: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
referenced_uris: Optional[List[str]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:paramtype data_uri: str
:keyword referenced_uris: Uris referenced in the MLTable definition (required for lineage).
:paramtype referenced_uris: list[str]
"""
super(MLTableData, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, data_uri=data_uri, **kwargs)
self.data_type = 'MLTable' # type: str
self.referenced_uris = referenced_uris
class MLTableJobInput(JobInput, AssetJobInput):
"""MLTableJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(MLTableJobInput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_input_type = 'MLTable' # type: str
self.description = description
self.job_input_type = 'MLTable' # type: str
class MLTableJobOutput(JobOutput, AssetJobOutput):
"""MLTableJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(MLTableJobOutput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_output_type = 'MLTable' # type: str
self.description = description
self.job_output_type = 'MLTable' # type: str
class ModelContainerData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'ModelContainerDetails'},
}
def __init__(
self,
*,
properties: "ModelContainerDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelContainerDetails
"""
super(ModelContainerData, self).__init__(**kwargs)
self.properties = properties
class ModelContainerDetails(AssetContainer):
"""ModelContainerDetails.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar latest_version: The latest version inside this container.
:vartype latest_version: str
:ivar next_version: The next auto incremental version.
:vartype next_version: str
"""
_validation = {
'latest_version': {'readonly': True},
'next_version': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'latest_version': {'key': 'latestVersion', 'type': 'str'},
'next_version': {'key': 'nextVersion', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
"""
super(ModelContainerDetails, self).__init__(description=description, properties=properties, tags=tags, is_archived=is_archived, **kwargs)
class ModelContainerResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of ModelContainer entities.
:ivar next_link: The link to the next page of ModelContainer objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type ModelContainer.
:vartype value: list[~azure.mgmt.machinelearningservices.models.ModelContainerData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[ModelContainerData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["ModelContainerData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of ModelContainer objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type ModelContainer.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelContainerData]
"""
super(ModelContainerResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class ModelVersionData(Resource):
"""Azure Resource Manager resource envelope.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionDetails
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'properties': {'key': 'properties', 'type': 'ModelVersionDetails'},
}
def __init__(
self,
*,
properties: "ModelVersionDetails",
**kwargs
):
"""
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.ModelVersionDetails
"""
super(ModelVersionData, self).__init__(**kwargs)
self.properties = properties
class ModelVersionDetails(AssetBase):
"""Model asset version details.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar flavors: Mapping of model flavors to their properties.
:vartype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
:ivar job_name: Name of the training job which produced this model.
:vartype job_name: str
:ivar model_type: The storage format for this entity. Used for NCD. Possible values include:
"CustomModel", "MLFlowModel", "TritonModel".
:vartype model_type: str or ~azure.mgmt.machinelearningservices.models.ModelType
:ivar model_uri: The URI path to the model contents.
:vartype model_uri: str
"""
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'flavors': {'key': 'flavors', 'type': '{FlavorData}'},
'job_name': {'key': 'jobName', 'type': 'str'},
'model_type': {'key': 'modelType', 'type': 'str'},
'model_uri': {'key': 'modelUri', 'type': 'str'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
flavors: Optional[Dict[str, "FlavorData"]] = None,
job_name: Optional[str] = None,
model_type: Optional[Union[str, "ModelType"]] = None,
model_uri: Optional[str] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword flavors: Mapping of model flavors to their properties.
:paramtype flavors: dict[str, ~azure.mgmt.machinelearningservices.models.FlavorData]
:keyword job_name: Name of the training job which produced this model.
:paramtype job_name: str
:keyword model_type: The storage format for this entity. Used for NCD. Possible values include:
"CustomModel", "MLFlowModel", "TritonModel".
:paramtype model_type: str or ~azure.mgmt.machinelearningservices.models.ModelType
:keyword model_uri: The URI path to the model contents.
:paramtype model_uri: str
"""
super(ModelVersionDetails, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, **kwargs)
self.flavors = flavors
self.job_name = job_name
self.model_type = model_type
self.model_uri = model_uri
class ModelVersionResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of ModelVersion entities.
:ivar next_link: The link to the next page of ModelVersion objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type ModelVersion.
:vartype value: list[~azure.mgmt.machinelearningservices.models.ModelVersionData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[ModelVersionData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["ModelVersionData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of ModelVersion objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type ModelVersion.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.ModelVersionData]
"""
super(ModelVersionResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class Mpi(DistributionConfiguration):
"""MPI distribution configuration.
All required parameters must be populated in order to send to Azure.
:ivar distribution_type: Required. [Required] Specifies the type of distribution
framework.Constant filled by server. Possible values include: "PyTorch", "TensorFlow", "Mpi".
:vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
:ivar process_count_per_instance: Number of processes per MPI node.
:vartype process_count_per_instance: int
"""
_validation = {
'distribution_type': {'required': True},
}
_attribute_map = {
'distribution_type': {'key': 'distributionType', 'type': 'str'},
'process_count_per_instance': {'key': 'processCountPerInstance', 'type': 'int'},
}
def __init__(
self,
*,
process_count_per_instance: Optional[int] = None,
**kwargs
):
"""
:keyword process_count_per_instance: Number of processes per MPI node.
:paramtype process_count_per_instance: int
"""
super(Mpi, self).__init__(**kwargs)
self.distribution_type = 'Mpi' # type: str
self.process_count_per_instance = process_count_per_instance
class NlpVertical(msrest.serialization.Model):
"""Abstract class for NLP related AutoML tasks.
NLP - Natural Language Processing.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
"""
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'NlpVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'NlpVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'NlpVerticalLimitSettings'},
}
def __init__(
self,
*,
data_settings: Optional["NlpVerticalDataSettings"] = None,
featurization_settings: Optional["NlpVerticalFeaturizationSettings"] = None,
limit_settings: Optional["NlpVerticalLimitSettings"] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
"""
super(NlpVertical, self).__init__(**kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
class NlpVerticalDataSettings(DataSettings):
"""Class for data inputs.
NLP - Natural Language Processing.
All required parameters must be populated in order to send to Azure.
:ivar target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:vartype target_column_name: str
:ivar test_data: Test data input.
:vartype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:ivar training_data: Required. [Required] Training data input.
:vartype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
:ivar validation_data: Validation data inputs.
:vartype validation_data:
~azure.mgmt.machinelearningservices.models.NlpVerticalValidationDataSettings
"""
_validation = {
'target_column_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'training_data': {'required': True},
}
_attribute_map = {
'target_column_name': {'key': 'targetColumnName', 'type': 'str'},
'test_data': {'key': 'testData', 'type': 'TestDataSettings'},
'training_data': {'key': 'trainingData', 'type': 'TrainingDataSettings'},
'validation_data': {'key': 'validationData', 'type': 'NlpVerticalValidationDataSettings'},
}
def __init__(
self,
*,
target_column_name: str,
training_data: "TrainingDataSettings",
test_data: Optional["TestDataSettings"] = None,
validation_data: Optional["NlpVerticalValidationDataSettings"] = None,
**kwargs
):
"""
:keyword target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:paramtype target_column_name: str
:keyword test_data: Test data input.
:paramtype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:keyword training_data: Required. [Required] Training data input.
:paramtype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
:keyword validation_data: Validation data inputs.
:paramtype validation_data:
~azure.mgmt.machinelearningservices.models.NlpVerticalValidationDataSettings
"""
super(NlpVerticalDataSettings, self).__init__(target_column_name=target_column_name, test_data=test_data, training_data=training_data, **kwargs)
self.validation_data = validation_data
class NlpVerticalFeaturizationSettings(FeaturizationSettings):
"""NlpVerticalFeaturizationSettings.
:ivar dataset_language: Dataset language, useful for the text data.
:vartype dataset_language: str
"""
_attribute_map = {
'dataset_language': {'key': 'datasetLanguage', 'type': 'str'},
}
def __init__(
self,
*,
dataset_language: Optional[str] = None,
**kwargs
):
"""
:keyword dataset_language: Dataset language, useful for the text data.
:paramtype dataset_language: str
"""
super(NlpVerticalFeaturizationSettings, self).__init__(dataset_language=dataset_language, **kwargs)
class NlpVerticalLimitSettings(msrest.serialization.Model):
"""Job execution constraints.
:ivar max_concurrent_trials: Maximum Concurrent AutoML iterations.
:vartype max_concurrent_trials: int
:ivar max_trials: Number of AutoML iterations.
:vartype max_trials: int
:ivar timeout: AutoML job timeout.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_concurrent_trials': {'key': 'maxConcurrentTrials', 'type': 'int'},
'max_trials': {'key': 'maxTrials', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
max_concurrent_trials: Optional[int] = 1,
max_trials: Optional[int] = 1,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword max_concurrent_trials: Maximum Concurrent AutoML iterations.
:paramtype max_concurrent_trials: int
:keyword max_trials: Number of AutoML iterations.
:paramtype max_trials: int
:keyword timeout: AutoML job timeout.
:paramtype timeout: ~datetime.timedelta
"""
super(NlpVerticalLimitSettings, self).__init__(**kwargs)
self.max_concurrent_trials = max_concurrent_trials
self.max_trials = max_trials
self.timeout = timeout
class NlpVerticalValidationDataSettings(ValidationDataSettings):
"""NlpVerticalValidationDataSettings.
:ivar data: Validation data MLTable.
:vartype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:vartype validation_data_size: float
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'MLTableJobInput'},
'validation_data_size': {'key': 'validationDataSize', 'type': 'float'},
}
def __init__(
self,
*,
data: Optional["MLTableJobInput"] = None,
validation_data_size: Optional[float] = None,
**kwargs
):
"""
:keyword data: Validation data MLTable.
:paramtype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:paramtype validation_data_size: float
"""
super(NlpVerticalValidationDataSettings, self).__init__(data=data, validation_data_size=validation_data_size, **kwargs)
class NoneDatastoreCredentials(DatastoreCredentials):
"""Empty/none datastore credentials.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
"""
_validation = {
'credentials_type': {'required': True},
}
_attribute_map = {
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(NoneDatastoreCredentials, self).__init__(**kwargs)
self.credentials_type = 'None' # type: str
class Objective(msrest.serialization.Model):
"""Optimization objective.
All required parameters must be populated in order to send to Azure.
:ivar goal: Required. [Required] Defines supported metric goals for hyperparameter tuning.
Possible values include: "Minimize", "Maximize".
:vartype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
:ivar primary_metric: Required. [Required] Name of the metric to optimize.
:vartype primary_metric: str
"""
_validation = {
'goal': {'required': True},
'primary_metric': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'goal': {'key': 'goal', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
goal: Union[str, "Goal"],
primary_metric: str,
**kwargs
):
"""
:keyword goal: Required. [Required] Defines supported metric goals for hyperparameter tuning.
Possible values include: "Minimize", "Maximize".
:paramtype goal: str or ~azure.mgmt.machinelearningservices.models.Goal
:keyword primary_metric: Required. [Required] Name of the metric to optimize.
:paramtype primary_metric: str
"""
super(Objective, self).__init__(**kwargs)
self.goal = goal
self.primary_metric = primary_metric
class OnlineDeploymentData(TrackedResource):
"""OnlineDeploymentData.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar location: Required. The geo-location where the resource lives.
:vartype location: str
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentDetails
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'OnlineDeploymentDetails'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
location: str,
properties: "OnlineDeploymentDetails",
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
kind: Optional[str] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword location: Required. The geo-location where the resource lives.
:paramtype location: str
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineDeploymentDetails
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
super(OnlineDeploymentData, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.kind = kind
self.properties = properties
self.sku = sku
class OnlineDeploymentTrackedResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of OnlineDeployment entities.
:ivar next_link: The link to the next page of OnlineDeployment objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type OnlineDeployment.
:vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeploymentData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[OnlineDeploymentData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["OnlineDeploymentData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of OnlineDeployment objects. If null, there are
no additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type OnlineDeployment.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineDeploymentData]
"""
super(OnlineDeploymentTrackedResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class OnlineEndpointData(TrackedResource):
"""OnlineEndpointData.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~azure.mgmt.machinelearningservices.models.SystemData
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
:ivar location: Required. The geo-location where the resource lives.
:vartype location: str
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar properties: Required. [Required] Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointDetails
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'properties': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'OnlineEndpointDetails'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
location: str,
properties: "OnlineEndpointDetails",
tags: Optional[Dict[str, str]] = None,
identity: Optional["ManagedServiceIdentity"] = None,
kind: Optional[str] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
:keyword location: Required. The geo-location where the resource lives.
:paramtype location: str
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword properties: Required. [Required] Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.OnlineEndpointDetails
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.Sku
"""
super(OnlineEndpointData, self).__init__(tags=tags, location=location, **kwargs)
self.identity = identity
self.kind = kind
self.properties = properties
self.sku = sku
class OnlineEndpointDetails(EndpointPropertiesBase):
"""Online endpoint configuration.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar auth_mode: Required. [Required] Use 'Key' for key based authentication and 'AMLToken' for
Azure Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken' does.
Possible values include: "AMLToken", "Key", "AADToken".
:vartype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:ivar description: Description of the inference endpoint.
:vartype description: str
:ivar keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the
ListKeys API.
:vartype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar scoring_uri: Endpoint URI.
:vartype scoring_uri: str
:ivar swagger_uri: Endpoint Swagger URI.
:vartype swagger_uri: str
:ivar compute: ARM resource ID of the compute if it exists.
optional.
:vartype compute: str
:ivar mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
returned scoring. Traffic values need to sum to utmost 50.
:vartype mirror_traffic: dict[str, int]
:ivar provisioning_state: Provisioning state for the endpoint. Possible values include:
"Creating", "Deleting", "Succeeded", "Failed", "Updating", "Canceled".
:vartype provisioning_state: str or
~azure.mgmt.machinelearningservices.models.EndpointProvisioningState
:ivar public_network_access: Set to "Enabled" for endpoints that should allow public access
when Private Link is enabled. Possible values include: "Enabled", "Disabled".
:vartype public_network_access: str or
~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
:ivar traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic values
need to sum to 100.
:vartype traffic: dict[str, int]
"""
_validation = {
'auth_mode': {'required': True},
'scoring_uri': {'readonly': True},
'swagger_uri': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'auth_mode': {'key': 'authMode', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'keys': {'key': 'keys', 'type': 'EndpointAuthKeys'},
'properties': {'key': 'properties', 'type': '{str}'},
'scoring_uri': {'key': 'scoringUri', 'type': 'str'},
'swagger_uri': {'key': 'swaggerUri', 'type': 'str'},
'compute': {'key': 'compute', 'type': 'str'},
'mirror_traffic': {'key': 'mirrorTraffic', 'type': '{int}'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
'traffic': {'key': 'traffic', 'type': '{int}'},
}
def __init__(
self,
*,
auth_mode: Union[str, "EndpointAuthMode"],
description: Optional[str] = None,
keys: Optional["EndpointAuthKeys"] = None,
properties: Optional[Dict[str, str]] = None,
compute: Optional[str] = None,
mirror_traffic: Optional[Dict[str, int]] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccessType"]] = None,
traffic: Optional[Dict[str, int]] = None,
**kwargs
):
"""
:keyword auth_mode: Required. [Required] Use 'Key' for key based authentication and 'AMLToken'
for Azure Machine Learning token-based authentication. 'Key' doesn't expire but 'AMLToken'
does. Possible values include: "AMLToken", "Key", "AADToken".
:paramtype auth_mode: str or ~azure.mgmt.machinelearningservices.models.EndpointAuthMode
:keyword description: Description of the inference endpoint.
:paramtype description: str
:keyword keys: EndpointAuthKeys to set initially on an Endpoint.
This property will always be returned as null. AuthKey values must be retrieved using the
ListKeys API.
:paramtype keys: ~azure.mgmt.machinelearningservices.models.EndpointAuthKeys
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword compute: ARM resource ID of the compute if it exists.
optional.
:paramtype compute: str
:keyword mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
returned scoring. Traffic values need to sum to utmost 50.
:paramtype mirror_traffic: dict[str, int]
:keyword public_network_access: Set to "Enabled" for endpoints that should allow public access
when Private Link is enabled. Possible values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
:keyword traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic
values need to sum to 100.
:paramtype traffic: dict[str, int]
"""
super(OnlineEndpointDetails, self).__init__(auth_mode=auth_mode, description=description, keys=keys, properties=properties, **kwargs)
self.compute = compute
self.mirror_traffic = mirror_traffic
self.provisioning_state = None
self.public_network_access = public_network_access
self.traffic = traffic
class OnlineEndpointTrackedResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of OnlineEndpoint entities.
:ivar next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type OnlineEndpoint.
:vartype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpointData]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[OnlineEndpointData]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["OnlineEndpointData"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of OnlineEndpoint objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type OnlineEndpoint.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.OnlineEndpointData]
"""
super(OnlineEndpointTrackedResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class OnlineRequestSettings(msrest.serialization.Model):
"""Online deployment scoring requests configuration.
:ivar max_concurrent_requests_per_instance: The number of maximum concurrent requests per node
allowed per deployment. Defaults to 1.
:vartype max_concurrent_requests_per_instance: int
:ivar max_queue_wait: The maximum amount of time a request will stay in the queue in ISO 8601
format.
Defaults to 500ms.
:vartype max_queue_wait: ~datetime.timedelta
:ivar request_timeout: The scoring timeout in ISO 8601 format.
Defaults to 5000ms.
:vartype request_timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_concurrent_requests_per_instance': {'key': 'maxConcurrentRequestsPerInstance', 'type': 'int'},
'max_queue_wait': {'key': 'maxQueueWait', 'type': 'duration'},
'request_timeout': {'key': 'requestTimeout', 'type': 'duration'},
}
def __init__(
self,
*,
max_concurrent_requests_per_instance: Optional[int] = 1,
max_queue_wait: Optional[datetime.timedelta] = "PT0.5S",
request_timeout: Optional[datetime.timedelta] = "PT5S",
**kwargs
):
"""
:keyword max_concurrent_requests_per_instance: The number of maximum concurrent requests per
node allowed per deployment. Defaults to 1.
:paramtype max_concurrent_requests_per_instance: int
:keyword max_queue_wait: The maximum amount of time a request will stay in the queue in ISO
8601 format.
Defaults to 500ms.
:paramtype max_queue_wait: ~datetime.timedelta
:keyword request_timeout: The scoring timeout in ISO 8601 format.
Defaults to 5000ms.
:paramtype request_timeout: ~datetime.timedelta
"""
super(OnlineRequestSettings, self).__init__(**kwargs)
self.max_concurrent_requests_per_instance = max_concurrent_requests_per_instance
self.max_queue_wait = max_queue_wait
self.request_timeout = request_timeout
class OutputPathAssetReference(AssetReferenceBase):
"""Reference to an asset via its path in a job output.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
:ivar job_id: ARM resource ID of the job.
:vartype job_id: str
:ivar path: The path of the file/directory in the job output.
:vartype path: str
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
'job_id': {'key': 'jobId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
*,
job_id: Optional[str] = None,
path: Optional[str] = None,
**kwargs
):
"""
:keyword job_id: ARM resource ID of the job.
:paramtype job_id: str
:keyword path: The path of the file/directory in the job output.
:paramtype path: str
"""
super(OutputPathAssetReference, self).__init__(**kwargs)
self.reference_type = 'OutputPath' # type: str
self.job_id = job_id
self.path = path
class PartialAssetReferenceBase(msrest.serialization.Model):
"""Base definition for asset references.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PartialDataPathAssetReference, PartialIdAssetReference, PartialOutputPathAssetReference.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
}
_subtype_map = {
'reference_type': {'DataPath': 'PartialDataPathAssetReference', 'Id': 'PartialIdAssetReference', 'OutputPath': 'PartialOutputPathAssetReference'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(PartialAssetReferenceBase, self).__init__(**kwargs)
self.reference_type = None # type: Optional[str]
class PartialBatchDeployment(msrest.serialization.Model):
"""Mutable batch inference settings per deployment.
:ivar code_configuration: Code configuration for the endpoint deployment.
:vartype code_configuration:
~azure.mgmt.machinelearningservices.models.PartialCodeConfiguration
:ivar compute: Compute binding definition.
:vartype compute: str
:ivar description: Description of the endpoint deployment.
:vartype description: str
:ivar environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:vartype environment_id: str
:ivar environment_variables: Environment variables configuration for the deployment.
:vartype environment_variables: dict[str, str]
:ivar error_threshold: Error threshold, if the error count for the entire input goes above this
value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
For FileDataset, this value is the count of file failures.
For TabularDataset, this value is the count of record failures.
If set to -1 (the lower bound), all failures during batch inference will be ignored.
:vartype error_threshold: int
:ivar logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:vartype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:ivar max_concurrency_per_instance: Indicates number of processes per instance.
:vartype max_concurrency_per_instance: int
:ivar mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:vartype mini_batch_size: long
:ivar model: Reference to the model asset for the endpoint deployment.
:vartype model: ~azure.mgmt.machinelearningservices.models.PartialAssetReferenceBase
:ivar output_action: Indicates how the output will be organized. Possible values include:
"SummaryOnly", "AppendRow".
:vartype output_action: str or ~azure.mgmt.machinelearningservices.models.BatchOutputAction
:ivar output_file_name: Customized output file name for append_row output action.
:vartype output_file_name: str
:ivar properties: Property dictionary. Properties can be added, but not removed or altered.
:vartype properties: dict[str, str]
:ivar retry_settings: Retry Settings for the batch inference operation.
:vartype retry_settings: ~azure.mgmt.machinelearningservices.models.PartialBatchRetrySettings
"""
_attribute_map = {
'code_configuration': {'key': 'codeConfiguration', 'type': 'PartialCodeConfiguration'},
'compute': {'key': 'compute', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'error_threshold': {'key': 'errorThreshold', 'type': 'int'},
'logging_level': {'key': 'loggingLevel', 'type': 'str'},
'max_concurrency_per_instance': {'key': 'maxConcurrencyPerInstance', 'type': 'int'},
'mini_batch_size': {'key': 'miniBatchSize', 'type': 'long'},
'model': {'key': 'model', 'type': 'PartialAssetReferenceBase'},
'output_action': {'key': 'outputAction', 'type': 'str'},
'output_file_name': {'key': 'outputFileName', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'retry_settings': {'key': 'retrySettings', 'type': 'PartialBatchRetrySettings'},
}
def __init__(
self,
*,
code_configuration: Optional["PartialCodeConfiguration"] = None,
compute: Optional[str] = None,
description: Optional[str] = None,
environment_id: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
error_threshold: Optional[int] = None,
logging_level: Optional[Union[str, "BatchLoggingLevel"]] = None,
max_concurrency_per_instance: Optional[int] = None,
mini_batch_size: Optional[int] = None,
model: Optional["PartialAssetReferenceBase"] = None,
output_action: Optional[Union[str, "BatchOutputAction"]] = None,
output_file_name: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
retry_settings: Optional["PartialBatchRetrySettings"] = None,
**kwargs
):
"""
:keyword code_configuration: Code configuration for the endpoint deployment.
:paramtype code_configuration:
~azure.mgmt.machinelearningservices.models.PartialCodeConfiguration
:keyword compute: Compute binding definition.
:paramtype compute: str
:keyword description: Description of the endpoint deployment.
:paramtype description: str
:keyword environment_id: ARM resource ID of the environment specification for the endpoint
deployment.
:paramtype environment_id: str
:keyword environment_variables: Environment variables configuration for the deployment.
:paramtype environment_variables: dict[str, str]
:keyword error_threshold: Error threshold, if the error count for the entire input goes above
this value,
the batch inference will be aborted. Range is [-1, int.MaxValue].
For FileDataset, this value is the count of file failures.
For TabularDataset, this value is the count of record failures.
If set to -1 (the lower bound), all failures during batch inference will be ignored.
:paramtype error_threshold: int
:keyword logging_level: Logging level for batch inference operation. Possible values include:
"Info", "Warning", "Debug".
:paramtype logging_level: str or ~azure.mgmt.machinelearningservices.models.BatchLoggingLevel
:keyword max_concurrency_per_instance: Indicates number of processes per instance.
:paramtype max_concurrency_per_instance: int
:keyword mini_batch_size: Size of the mini-batch passed to each batch invocation.
For FileDataset, this is the number of files per mini-batch.
For TabularDataset, this is the size of the records in bytes, per mini-batch.
:paramtype mini_batch_size: long
:keyword model: Reference to the model asset for the endpoint deployment.
:paramtype model: ~azure.mgmt.machinelearningservices.models.PartialAssetReferenceBase
:keyword output_action: Indicates how the output will be organized. Possible values include:
"SummaryOnly", "AppendRow".
:paramtype output_action: str or ~azure.mgmt.machinelearningservices.models.BatchOutputAction
:keyword output_file_name: Customized output file name for append_row output action.
:paramtype output_file_name: str
:keyword properties: Property dictionary. Properties can be added, but not removed or altered.
:paramtype properties: dict[str, str]
:keyword retry_settings: Retry Settings for the batch inference operation.
:paramtype retry_settings: ~azure.mgmt.machinelearningservices.models.PartialBatchRetrySettings
"""
super(PartialBatchDeployment, self).__init__(**kwargs)
self.code_configuration = code_configuration
self.compute = compute
self.description = description
self.environment_id = environment_id
self.environment_variables = environment_variables
self.error_threshold = error_threshold
self.logging_level = logging_level
self.max_concurrency_per_instance = max_concurrency_per_instance
self.mini_batch_size = mini_batch_size
self.model = model
self.output_action = output_action
self.output_file_name = output_file_name
self.properties = properties
self.retry_settings = retry_settings
class PartialBatchDeploymentPartialTrackedResource(msrest.serialization.Model):
"""Strictly used in update requests.
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar location: The geo-location where the resource lives.
:vartype location: str
:ivar properties: Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'identity': {'key': 'identity', 'type': 'PartialManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PartialBatchDeployment'},
'sku': {'key': 'sku', 'type': 'PartialSku'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
identity: Optional["PartialManagedServiceIdentity"] = None,
kind: Optional[str] = None,
location: Optional[str] = None,
properties: Optional["PartialBatchDeployment"] = None,
sku: Optional["PartialSku"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword location: The geo-location where the resource lives.
:paramtype location: str
:keyword properties: Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchDeployment
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(PartialBatchDeploymentPartialTrackedResource, self).__init__(**kwargs)
self.identity = identity
self.kind = kind
self.location = location
self.properties = properties
self.sku = sku
self.tags = tags
class PartialBatchEndpoint(msrest.serialization.Model):
"""Mutable Batch endpoint configuration.
:ivar defaults: Default values for Batch Endpoint.
:vartype defaults: ~azure.mgmt.machinelearningservices.models.BatchEndpointDefaults
"""
_attribute_map = {
'defaults': {'key': 'defaults', 'type': 'BatchEndpointDefaults'},
}
def __init__(
self,
*,
defaults: Optional["BatchEndpointDefaults"] = None,
**kwargs
):
"""
:keyword defaults: Default values for Batch Endpoint.
:paramtype defaults: ~azure.mgmt.machinelearningservices.models.BatchEndpointDefaults
"""
super(PartialBatchEndpoint, self).__init__(**kwargs)
self.defaults = defaults
class PartialBatchEndpointPartialTrackedResource(msrest.serialization.Model):
"""Strictly used in update requests.
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar location: The geo-location where the resource lives.
:vartype location: str
:ivar properties: Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchEndpoint
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'identity': {'key': 'identity', 'type': 'PartialManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PartialBatchEndpoint'},
'sku': {'key': 'sku', 'type': 'PartialSku'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
identity: Optional["PartialManagedServiceIdentity"] = None,
kind: Optional[str] = None,
location: Optional[str] = None,
properties: Optional["PartialBatchEndpoint"] = None,
sku: Optional["PartialSku"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword location: The geo-location where the resource lives.
:paramtype location: str
:keyword properties: Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialBatchEndpoint
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(PartialBatchEndpointPartialTrackedResource, self).__init__(**kwargs)
self.identity = identity
self.kind = kind
self.location = location
self.properties = properties
self.sku = sku
self.tags = tags
class PartialBatchRetrySettings(msrest.serialization.Model):
"""Retry settings for a batch inference operation.
:ivar max_retries: Maximum retry count for a mini-batch.
:vartype max_retries: int
:ivar timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'max_retries': {'key': 'maxRetries', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
max_retries: Optional[int] = None,
timeout: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword max_retries: Maximum retry count for a mini-batch.
:paramtype max_retries: int
:keyword timeout: Invocation timeout for a mini-batch, in ISO 8601 format.
:paramtype timeout: ~datetime.timedelta
"""
super(PartialBatchRetrySettings, self).__init__(**kwargs)
self.max_retries = max_retries
self.timeout = timeout
class PartialCodeConfiguration(msrest.serialization.Model):
"""Configuration for a scoring code asset.
:ivar code_id: ARM resource ID of the code asset.
:vartype code_id: str
:ivar scoring_script: The script to execute on startup. eg. "score.py".
:vartype scoring_script: str
"""
_validation = {
'scoring_script': {'min_length': 1},
}
_attribute_map = {
'code_id': {'key': 'codeId', 'type': 'str'},
'scoring_script': {'key': 'scoringScript', 'type': 'str'},
}
def __init__(
self,
*,
code_id: Optional[str] = None,
scoring_script: Optional[str] = None,
**kwargs
):
"""
:keyword code_id: ARM resource ID of the code asset.
:paramtype code_id: str
:keyword scoring_script: The script to execute on startup. eg. "score.py".
:paramtype scoring_script: str
"""
super(PartialCodeConfiguration, self).__init__(**kwargs)
self.code_id = code_id
self.scoring_script = scoring_script
class PartialDataPathAssetReference(PartialAssetReferenceBase):
"""Reference to an asset via its path in a datastore.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
:ivar datastore_id: ARM resource ID of the datastore where the asset is located.
:vartype datastore_id: str
:ivar path: The path of the file/directory in the datastore.
:vartype path: str
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
'datastore_id': {'key': 'datastoreId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
*,
datastore_id: Optional[str] = None,
path: Optional[str] = None,
**kwargs
):
"""
:keyword datastore_id: ARM resource ID of the datastore where the asset is located.
:paramtype datastore_id: str
:keyword path: The path of the file/directory in the datastore.
:paramtype path: str
"""
super(PartialDataPathAssetReference, self).__init__(**kwargs)
self.reference_type = 'DataPath' # type: str
self.datastore_id = datastore_id
self.path = path
class PartialIdAssetReference(PartialAssetReferenceBase):
"""Reference to an asset via its ARM resource ID.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
:ivar asset_id: ARM resource ID of the asset.
:vartype asset_id: str
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
'asset_id': {'key': 'assetId', 'type': 'str'},
}
def __init__(
self,
*,
asset_id: Optional[str] = None,
**kwargs
):
"""
:keyword asset_id: ARM resource ID of the asset.
:paramtype asset_id: str
"""
super(PartialIdAssetReference, self).__init__(**kwargs)
self.reference_type = 'Id' # type: str
self.asset_id = asset_id
class PartialOnlineDeployment(msrest.serialization.Model):
"""Mutable online deployment configuration.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PartialKubernetesOnlineDeployment, PartialManagedOnlineDeployment.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_compute_type: Required. [Required] The compute type of the endpoint.Constant
filled by server. Possible values include: "Managed", "Kubernetes", "AzureMLCompute".
:vartype endpoint_compute_type: str or
~azure.mgmt.machinelearningservices.models.EndpointComputeType
"""
_validation = {
'endpoint_compute_type': {'required': True},
}
_attribute_map = {
'endpoint_compute_type': {'key': 'endpointComputeType', 'type': 'str'},
}
_subtype_map = {
'endpoint_compute_type': {'Kubernetes': 'PartialKubernetesOnlineDeployment', 'Managed': 'PartialManagedOnlineDeployment'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(PartialOnlineDeployment, self).__init__(**kwargs)
self.endpoint_compute_type = None # type: Optional[str]
class PartialKubernetesOnlineDeployment(PartialOnlineDeployment):
"""Properties specific to a KubernetesOnlineDeployment.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_compute_type: Required. [Required] The compute type of the endpoint.Constant
filled by server. Possible values include: "Managed", "Kubernetes", "AzureMLCompute".
:vartype endpoint_compute_type: str or
~azure.mgmt.machinelearningservices.models.EndpointComputeType
"""
_validation = {
'endpoint_compute_type': {'required': True},
}
_attribute_map = {
'endpoint_compute_type': {'key': 'endpointComputeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(PartialKubernetesOnlineDeployment, self).__init__(**kwargs)
self.endpoint_compute_type = 'Kubernetes' # type: str
class PartialManagedOnlineDeployment(PartialOnlineDeployment):
"""Properties specific to a ManagedOnlineDeployment.
All required parameters must be populated in order to send to Azure.
:ivar endpoint_compute_type: Required. [Required] The compute type of the endpoint.Constant
filled by server. Possible values include: "Managed", "Kubernetes", "AzureMLCompute".
:vartype endpoint_compute_type: str or
~azure.mgmt.machinelearningservices.models.EndpointComputeType
"""
_validation = {
'endpoint_compute_type': {'required': True},
}
_attribute_map = {
'endpoint_compute_type': {'key': 'endpointComputeType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(PartialManagedOnlineDeployment, self).__init__(**kwargs)
self.endpoint_compute_type = 'Managed' # type: str
class PartialManagedServiceIdentity(msrest.serialization.Model):
"""Managed service identity (system assigned and/or user assigned identities).
:ivar type: Managed service identity (system assigned and/or user assigned identities).
Possible values include: "None", "SystemAssigned", "UserAssigned",
"SystemAssigned,UserAssigned".
:vartype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
:ivar user_assigned_identities: The set of user assigned identities associated with the
resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
The dictionary values can be empty objects ({}) in requests.
:vartype user_assigned_identities: dict[str, any]
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{object}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ManagedServiceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, Any]] = None,
**kwargs
):
"""
:keyword type: Managed service identity (system assigned and/or user assigned identities).
Possible values include: "None", "SystemAssigned", "UserAssigned",
"SystemAssigned,UserAssigned".
:paramtype type: str or ~azure.mgmt.machinelearningservices.models.ManagedServiceIdentityType
:keyword user_assigned_identities: The set of user assigned identities associated with the
resource. The userAssignedIdentities dictionary keys will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}.
The dictionary values can be empty objects ({}) in requests.
:paramtype user_assigned_identities: dict[str, any]
"""
super(PartialManagedServiceIdentity, self).__init__(**kwargs)
self.type = type
self.user_assigned_identities = user_assigned_identities
class PartialOnlineDeploymentPartialTrackedResource(msrest.serialization.Model):
"""Strictly used in update requests.
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar location: The geo-location where the resource lives.
:vartype location: str
:ivar properties: Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.PartialOnlineDeployment
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'identity': {'key': 'identity', 'type': 'PartialManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PartialOnlineDeployment'},
'sku': {'key': 'sku', 'type': 'PartialSku'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
identity: Optional["PartialManagedServiceIdentity"] = None,
kind: Optional[str] = None,
location: Optional[str] = None,
properties: Optional["PartialOnlineDeployment"] = None,
sku: Optional["PartialSku"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword location: The geo-location where the resource lives.
:paramtype location: str
:keyword properties: Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialOnlineDeployment
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(PartialOnlineDeploymentPartialTrackedResource, self).__init__(**kwargs)
self.identity = identity
self.kind = kind
self.location = location
self.properties = properties
self.sku = sku
self.tags = tags
class PartialOnlineEndpoint(msrest.serialization.Model):
"""Mutable online endpoint configuration.
:ivar mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
returned scoring. Traffic values need to sum to utmost 50.
:vartype mirror_traffic: dict[str, int]
:ivar public_network_access: Set to "Enabled" for endpoints that should allow public access
when Private Link is enabled. Possible values include: "Enabled", "Disabled".
:vartype public_network_access: str or
~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
:ivar traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic values
need to sum to 100.
:vartype traffic: dict[str, int]
"""
_attribute_map = {
'mirror_traffic': {'key': 'mirrorTraffic', 'type': '{int}'},
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
'traffic': {'key': 'traffic', 'type': '{int}'},
}
def __init__(
self,
*,
mirror_traffic: Optional[Dict[str, int]] = None,
public_network_access: Optional[Union[str, "PublicNetworkAccessType"]] = None,
traffic: Optional[Dict[str, int]] = None,
**kwargs
):
"""
:keyword mirror_traffic: Percentage of traffic to be mirrored to each deployment without using
returned scoring. Traffic values need to sum to utmost 50.
:paramtype mirror_traffic: dict[str, int]
:keyword public_network_access: Set to "Enabled" for endpoints that should allow public access
when Private Link is enabled. Possible values include: "Enabled", "Disabled".
:paramtype public_network_access: str or
~azure.mgmt.machinelearningservices.models.PublicNetworkAccessType
:keyword traffic: Percentage of traffic from endpoint to divert to each deployment. Traffic
values need to sum to 100.
:paramtype traffic: dict[str, int]
"""
super(PartialOnlineEndpoint, self).__init__(**kwargs)
self.mirror_traffic = mirror_traffic
self.public_network_access = public_network_access
self.traffic = traffic
class PartialOnlineEndpointPartialTrackedResource(msrest.serialization.Model):
"""Strictly used in update requests.
:ivar identity: Managed service identity (system assigned and/or user assigned identities).
:vartype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:ivar kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:vartype kind: str
:ivar location: The geo-location where the resource lives.
:vartype location: str
:ivar properties: Additional attributes of the entity.
:vartype properties: ~azure.mgmt.machinelearningservices.models.PartialOnlineEndpoint
:ivar sku: Sku details required for ARM contract for Autoscaling.
:vartype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:ivar tags: A set of tags. Resource tags.
:vartype tags: dict[str, str]
"""
_attribute_map = {
'identity': {'key': 'identity', 'type': 'PartialManagedServiceIdentity'},
'kind': {'key': 'kind', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'PartialOnlineEndpoint'},
'sku': {'key': 'sku', 'type': 'PartialSku'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
identity: Optional["PartialManagedServiceIdentity"] = None,
kind: Optional[str] = None,
location: Optional[str] = None,
properties: Optional["PartialOnlineEndpoint"] = None,
sku: Optional["PartialSku"] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword identity: Managed service identity (system assigned and/or user assigned identities).
:paramtype identity: ~azure.mgmt.machinelearningservices.models.PartialManagedServiceIdentity
:keyword kind: Metadata used by portal/tooling/etc to render different UX experiences for
resources of the same type.
:paramtype kind: str
:keyword location: The geo-location where the resource lives.
:paramtype location: str
:keyword properties: Additional attributes of the entity.
:paramtype properties: ~azure.mgmt.machinelearningservices.models.PartialOnlineEndpoint
:keyword sku: Sku details required for ARM contract for Autoscaling.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.PartialSku
:keyword tags: A set of tags. Resource tags.
:paramtype tags: dict[str, str]
"""
super(PartialOnlineEndpointPartialTrackedResource, self).__init__(**kwargs)
self.identity = identity
self.kind = kind
self.location = location
self.properties = properties
self.sku = sku
self.tags = tags
class PartialOutputPathAssetReference(PartialAssetReferenceBase):
"""Reference to an asset via its path in a job output.
All required parameters must be populated in order to send to Azure.
:ivar reference_type: Required. [Required] Specifies the type of asset reference.Constant
filled by server. Possible values include: "Id", "DataPath", "OutputPath".
:vartype reference_type: str or ~azure.mgmt.machinelearningservices.models.ReferenceType
:ivar job_id: ARM resource ID of the job.
:vartype job_id: str
:ivar path: The path of the file/directory in the job output.
:vartype path: str
"""
_validation = {
'reference_type': {'required': True},
}
_attribute_map = {
'reference_type': {'key': 'referenceType', 'type': 'str'},
'job_id': {'key': 'jobId', 'type': 'str'},
'path': {'key': 'path', 'type': 'str'},
}
def __init__(
self,
*,
job_id: Optional[str] = None,
path: Optional[str] = None,
**kwargs
):
"""
:keyword job_id: ARM resource ID of the job.
:paramtype job_id: str
:keyword path: The path of the file/directory in the job output.
:paramtype path: str
"""
super(PartialOutputPathAssetReference, self).__init__(**kwargs)
self.reference_type = 'OutputPath' # type: str
self.job_id = job_id
self.path = path
class PartialSku(msrest.serialization.Model):
"""Common SKU definition.
:ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
If scale out/in is not possible for the resource this may be omitted.
:vartype capacity: int
:ivar family: If the service has different generations of hardware, for the same SKU, then that
can be captured here.
:vartype family: str
:ivar name: The name of the SKU. Ex - P3. It is typically a letter+number code.
:vartype name: str
:ivar size: The SKU size. When the name field is the combination of tier and some other value,
this would be the standalone code.
:vartype size: str
:ivar tier: This field is required to be implemented by the Resource Provider if the service
has more than one tier, but is not required on a PUT. Possible values include: "Free", "Basic",
"Standard", "Premium".
:vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
_attribute_map = {
'capacity': {'key': 'capacity', 'type': 'int'},
'family': {'key': 'family', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
capacity: Optional[int] = None,
family: Optional[str] = None,
name: Optional[str] = None,
size: Optional[str] = None,
tier: Optional[Union[str, "SkuTier"]] = None,
**kwargs
):
"""
:keyword capacity: If the SKU supports scale out/in then the capacity integer should be
included. If scale out/in is not possible for the resource this may be omitted.
:paramtype capacity: int
:keyword family: If the service has different generations of hardware, for the same SKU, then
that can be captured here.
:paramtype family: str
:keyword name: The name of the SKU. Ex - P3. It is typically a letter+number code.
:paramtype name: str
:keyword size: The SKU size. When the name field is the combination of tier and some other
value, this would be the standalone code.
:paramtype size: str
:keyword tier: This field is required to be implemented by the Resource Provider if the service
has more than one tier, but is not required on a PUT. Possible values include: "Free", "Basic",
"Standard", "Premium".
:paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
super(PartialSku, self).__init__(**kwargs)
self.capacity = capacity
self.family = family
self.name = name
self.size = size
self.tier = tier
class PipelineJob(JobBaseDetails):
"""Pipeline Job definition: defines generic to MFE attributes.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar compute_id: ARM resource ID of the compute resource.
:vartype compute_id: str
:ivar display_name: Display name of job.
:vartype display_name: str
:ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:vartype experiment_name: str
:ivar identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "AutoML", "Command", "Sweep", "Pipeline".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
:ivar schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown", "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar inputs: Inputs for the pipeline job.
:vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar jobs: Jobs construct the Pipeline Job.
:vartype jobs: dict[str, any]
:ivar outputs: Outputs for the pipeline job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:ivar settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
:vartype settings: any
"""
_validation = {
'job_type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'compute_id': {'key': 'computeId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'IdentityConfiguration'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'job_type': {'key': 'jobType', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'ScheduleBase'},
'services': {'key': 'services', 'type': '{JobService}'},
'status': {'key': 'status', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '{JobInput}'},
'jobs': {'key': 'jobs', 'type': '{object}'},
'outputs': {'key': 'outputs', 'type': '{JobOutput}'},
'settings': {'key': 'settings', 'type': 'object'},
}
def __init__(
self,
*,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
compute_id: Optional[str] = None,
display_name: Optional[str] = None,
experiment_name: Optional[str] = "Default",
identity: Optional["IdentityConfiguration"] = None,
is_archived: Optional[bool] = False,
schedule: Optional["ScheduleBase"] = None,
services: Optional[Dict[str, "JobService"]] = None,
inputs: Optional[Dict[str, "JobInput"]] = None,
jobs: Optional[Dict[str, Any]] = None,
outputs: Optional[Dict[str, "JobOutput"]] = None,
settings: Optional[Any] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword compute_id: ARM resource ID of the compute resource.
:paramtype compute_id: str
:keyword display_name: Display name of job.
:paramtype display_name: str
:keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:paramtype experiment_name: str
:keyword identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:keyword inputs: Inputs for the pipeline job.
:paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword jobs: Jobs construct the Pipeline Job.
:paramtype jobs: dict[str, any]
:keyword outputs: Outputs for the pipeline job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:keyword settings: Pipeline settings, for things like ContinueRunOnStepFailure etc.
:paramtype settings: any
"""
super(PipelineJob, self).__init__(description=description, properties=properties, tags=tags, compute_id=compute_id, display_name=display_name, experiment_name=experiment_name, identity=identity, is_archived=is_archived, schedule=schedule, services=services, **kwargs)
self.job_type = 'Pipeline' # type: str
self.inputs = inputs
self.jobs = jobs
self.outputs = outputs
self.settings = settings
class ProbeSettings(msrest.serialization.Model):
"""Deployment container liveness/readiness probe configuration.
:ivar failure_threshold: The number of failures to allow before returning an unhealthy status.
:vartype failure_threshold: int
:ivar initial_delay: The delay before the first probe in ISO 8601 format.
:vartype initial_delay: ~datetime.timedelta
:ivar period: The length of time between probes in ISO 8601 format.
:vartype period: ~datetime.timedelta
:ivar success_threshold: The number of successful probes before returning a healthy status.
:vartype success_threshold: int
:ivar timeout: The probe timeout in ISO 8601 format.
:vartype timeout: ~datetime.timedelta
"""
_attribute_map = {
'failure_threshold': {'key': 'failureThreshold', 'type': 'int'},
'initial_delay': {'key': 'initialDelay', 'type': 'duration'},
'period': {'key': 'period', 'type': 'duration'},
'success_threshold': {'key': 'successThreshold', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
}
def __init__(
self,
*,
failure_threshold: Optional[int] = 30,
initial_delay: Optional[datetime.timedelta] = None,
period: Optional[datetime.timedelta] = "PT10S",
success_threshold: Optional[int] = 1,
timeout: Optional[datetime.timedelta] = "PT2S",
**kwargs
):
"""
:keyword failure_threshold: The number of failures to allow before returning an unhealthy
status.
:paramtype failure_threshold: int
:keyword initial_delay: The delay before the first probe in ISO 8601 format.
:paramtype initial_delay: ~datetime.timedelta
:keyword period: The length of time between probes in ISO 8601 format.
:paramtype period: ~datetime.timedelta
:keyword success_threshold: The number of successful probes before returning a healthy status.
:paramtype success_threshold: int
:keyword timeout: The probe timeout in ISO 8601 format.
:paramtype timeout: ~datetime.timedelta
"""
super(ProbeSettings, self).__init__(**kwargs)
self.failure_threshold = failure_threshold
self.initial_delay = initial_delay
self.period = period
self.success_threshold = success_threshold
self.timeout = timeout
class PyTorch(DistributionConfiguration):
"""PyTorch distribution configuration.
All required parameters must be populated in order to send to Azure.
:ivar distribution_type: Required. [Required] Specifies the type of distribution
framework.Constant filled by server. Possible values include: "PyTorch", "TensorFlow", "Mpi".
:vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
:ivar process_count_per_instance: Number of processes per node.
:vartype process_count_per_instance: int
"""
_validation = {
'distribution_type': {'required': True},
}
_attribute_map = {
'distribution_type': {'key': 'distributionType', 'type': 'str'},
'process_count_per_instance': {'key': 'processCountPerInstance', 'type': 'int'},
}
def __init__(
self,
*,
process_count_per_instance: Optional[int] = None,
**kwargs
):
"""
:keyword process_count_per_instance: Number of processes per node.
:paramtype process_count_per_instance: int
"""
super(PyTorch, self).__init__(**kwargs)
self.distribution_type = 'PyTorch' # type: str
self.process_count_per_instance = process_count_per_instance
class RandomSamplingAlgorithm(SamplingAlgorithm):
"""Defines a Sampling Algorithm that generates values randomly.
All required parameters must be populated in order to send to Azure.
:ivar sampling_algorithm_type: Required. [Required] The algorithm used for generating
hyperparameter values, along with configuration properties.Constant filled by server. Possible
values include: "Grid", "Random", "Bayesian".
:vartype sampling_algorithm_type: str or
~azure.mgmt.machinelearningservices.models.SamplingAlgorithmType
:ivar rule: The specific type of random algorithm. Possible values include: "Random", "Sobol".
:vartype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
:ivar seed: An optional integer to use as the seed for random number generation.
:vartype seed: int
"""
_validation = {
'sampling_algorithm_type': {'required': True},
}
_attribute_map = {
'sampling_algorithm_type': {'key': 'samplingAlgorithmType', 'type': 'str'},
'rule': {'key': 'rule', 'type': 'str'},
'seed': {'key': 'seed', 'type': 'int'},
}
def __init__(
self,
*,
rule: Optional[Union[str, "RandomSamplingAlgorithmRule"]] = None,
seed: Optional[int] = None,
**kwargs
):
"""
:keyword rule: The specific type of random algorithm. Possible values include: "Random",
"Sobol".
:paramtype rule: str or ~azure.mgmt.machinelearningservices.models.RandomSamplingAlgorithmRule
:keyword seed: An optional integer to use as the seed for random number generation.
:paramtype seed: int
"""
super(RandomSamplingAlgorithm, self).__init__(**kwargs)
self.sampling_algorithm_type = 'Random' # type: str
self.rule = rule
self.seed = seed
class RecurrencePattern(msrest.serialization.Model):
"""Recurrence schedule pattern definition.
All required parameters must be populated in order to send to Azure.
:ivar hours: Required. [Required] List of hours for recurrence schedule pattern.
:vartype hours: list[int]
:ivar minutes: Required. [Required] List of minutes for recurrence schedule pattern.
:vartype minutes: list[int]
:ivar weekdays: List of weekdays for recurrence schedule pattern.
:vartype weekdays: list[str or ~azure.mgmt.machinelearningservices.models.Weekday]
"""
_validation = {
'hours': {'required': True},
'minutes': {'required': True},
}
_attribute_map = {
'hours': {'key': 'hours', 'type': '[int]'},
'minutes': {'key': 'minutes', 'type': '[int]'},
'weekdays': {'key': 'weekdays', 'type': '[str]'},
}
def __init__(
self,
*,
hours: List[int],
minutes: List[int],
weekdays: Optional[List[Union[str, "Weekday"]]] = None,
**kwargs
):
"""
:keyword hours: Required. [Required] List of hours for recurrence schedule pattern.
:paramtype hours: list[int]
:keyword minutes: Required. [Required] List of minutes for recurrence schedule pattern.
:paramtype minutes: list[int]
:keyword weekdays: List of weekdays for recurrence schedule pattern.
:paramtype weekdays: list[str or ~azure.mgmt.machinelearningservices.models.Weekday]
"""
super(RecurrencePattern, self).__init__(**kwargs)
self.hours = hours
self.minutes = minutes
self.weekdays = weekdays
class RecurrenceSchedule(ScheduleBase):
"""Recurrence schedule definition.
All required parameters must be populated in order to send to Azure.
:ivar end_time: Specifies end time of schedule in ISO 8601 format.
If not present, the schedule will run indefinitely.
:vartype end_time: ~datetime.datetime
:ivar schedule_status: Specifies the schedule's status. Possible values include: "Enabled",
"Disabled".
:vartype schedule_status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
:ivar schedule_type: Required. [Required] Specifies the schedule type.Constant filled by
server. Possible values include: "Cron", "Recurrence".
:vartype schedule_type: str or ~azure.mgmt.machinelearningservices.models.ScheduleType
:ivar start_time: Specifies start time of schedule in ISO 8601 format.
:vartype start_time: ~datetime.datetime
:ivar time_zone: Specifies time zone in which the schedule runs.
TimeZone should follow Windows time zone format.
:vartype time_zone: str
:ivar frequency: Required. [Required] Specifies frequency with with which to trigger schedule.
Possible values include: "Minute", "Hour", "Day", "Week", "Month".
:vartype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
:ivar interval: Required. [Required] Specifies schedule interval in conjunction with frequency.
:vartype interval: int
:ivar pattern: Specifies the recurrence schedule pattern.
:vartype pattern: ~azure.mgmt.machinelearningservices.models.RecurrencePattern
"""
_validation = {
'schedule_type': {'required': True},
'frequency': {'required': True},
'interval': {'required': True},
}
_attribute_map = {
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'schedule_status': {'key': 'scheduleStatus', 'type': 'str'},
'schedule_type': {'key': 'scheduleType', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'time_zone': {'key': 'timeZone', 'type': 'str'},
'frequency': {'key': 'frequency', 'type': 'str'},
'interval': {'key': 'interval', 'type': 'int'},
'pattern': {'key': 'pattern', 'type': 'RecurrencePattern'},
}
def __init__(
self,
*,
frequency: Union[str, "RecurrenceFrequency"],
interval: int,
end_time: Optional[datetime.datetime] = None,
schedule_status: Optional[Union[str, "ScheduleStatus"]] = None,
start_time: Optional[datetime.datetime] = None,
time_zone: Optional[str] = "UTC",
pattern: Optional["RecurrencePattern"] = None,
**kwargs
):
"""
:keyword end_time: Specifies end time of schedule in ISO 8601 format.
If not present, the schedule will run indefinitely.
:paramtype end_time: ~datetime.datetime
:keyword schedule_status: Specifies the schedule's status. Possible values include: "Enabled",
"Disabled".
:paramtype schedule_status: str or ~azure.mgmt.machinelearningservices.models.ScheduleStatus
:keyword start_time: Specifies start time of schedule in ISO 8601 format.
:paramtype start_time: ~datetime.datetime
:keyword time_zone: Specifies time zone in which the schedule runs.
TimeZone should follow Windows time zone format.
:paramtype time_zone: str
:keyword frequency: Required. [Required] Specifies frequency with with which to trigger
schedule. Possible values include: "Minute", "Hour", "Day", "Week", "Month".
:paramtype frequency: str or ~azure.mgmt.machinelearningservices.models.RecurrenceFrequency
:keyword interval: Required. [Required] Specifies schedule interval in conjunction with
frequency.
:paramtype interval: int
:keyword pattern: Specifies the recurrence schedule pattern.
:paramtype pattern: ~azure.mgmt.machinelearningservices.models.RecurrencePattern
"""
super(RecurrenceSchedule, self).__init__(end_time=end_time, schedule_status=schedule_status, start_time=start_time, time_zone=time_zone, **kwargs)
self.schedule_type = 'Recurrence' # type: str
self.frequency = frequency
self.interval = interval
self.pattern = pattern
class RegenerateEndpointKeysRequest(msrest.serialization.Model):
"""RegenerateEndpointKeysRequest.
All required parameters must be populated in order to send to Azure.
:ivar key_type: Required. [Required] Specification for which type of key to generate. Primary
or Secondary. Possible values include: "Primary", "Secondary".
:vartype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
:ivar key_value: The value the key is set to.
:vartype key_value: str
"""
_validation = {
'key_type': {'required': True},
}
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'str'},
'key_value': {'key': 'keyValue', 'type': 'str'},
}
def __init__(
self,
*,
key_type: Union[str, "KeyType"],
key_value: Optional[str] = None,
**kwargs
):
"""
:keyword key_type: Required. [Required] Specification for which type of key to generate.
Primary or Secondary. Possible values include: "Primary", "Secondary".
:paramtype key_type: str or ~azure.mgmt.machinelearningservices.models.KeyType
:keyword key_value: The value the key is set to.
:paramtype key_value: str
"""
super(RegenerateEndpointKeysRequest, self).__init__(**kwargs)
self.key_type = key_type
self.key_value = key_value
class Regression(AutoMLVertical, TableVertical):
"""Regression task in AutoML Table vertical.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:ivar training_settings: Inputs for training phase for an AutoML Job.
:vartype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar allowed_models: Allowed models for regression task.
:vartype allowed_models: list[str or
~azure.mgmt.machinelearningservices.models.RegressionModels]
:ivar blocked_models: Blocked models for regression task.
:vartype blocked_models: list[str or
~azure.mgmt.machinelearningservices.models.RegressionModels]
:ivar primary_metric: Primary metric for regression task. Possible values include:
"SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score",
"NormalizedMeanAbsoluteError".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
"""
_validation = {
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'TableVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'TableVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'TableVerticalLimitSettings'},
'training_settings': {'key': 'trainingSettings', 'type': 'TrainingSettings'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'allowed_models': {'key': 'allowedModels', 'type': '[str]'},
'blocked_models': {'key': 'blockedModels', 'type': '[str]'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: Optional["TableVerticalDataSettings"] = None,
featurization_settings: Optional["TableVerticalFeaturizationSettings"] = None,
limit_settings: Optional["TableVerticalLimitSettings"] = None,
training_settings: Optional["TrainingSettings"] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
allowed_models: Optional[List[Union[str, "RegressionModels"]]] = None,
blocked_models: Optional[List[Union[str, "RegressionModels"]]] = None,
primary_metric: Optional[Union[str, "RegressionPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.TableVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings:
~azure.mgmt.machinelearningservices.models.TableVerticalLimitSettings
:keyword training_settings: Inputs for training phase for an AutoML Job.
:paramtype training_settings: ~azure.mgmt.machinelearningservices.models.TrainingSettings
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword allowed_models: Allowed models for regression task.
:paramtype allowed_models: list[str or
~azure.mgmt.machinelearningservices.models.RegressionModels]
:keyword blocked_models: Blocked models for regression task.
:paramtype blocked_models: list[str or
~azure.mgmt.machinelearningservices.models.RegressionModels]
:keyword primary_metric: Primary metric for regression task. Possible values include:
"SpearmanCorrelation", "NormalizedRootMeanSquaredError", "R2Score",
"NormalizedMeanAbsoluteError".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.RegressionPrimaryMetrics
"""
super(Regression, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, featurization_settings=featurization_settings, limit_settings=limit_settings, training_settings=training_settings, **kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.training_settings = training_settings
self.task_type = 'Regression' # type: str
self.allowed_models = allowed_models
self.blocked_models = blocked_models
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'Regression' # type: str
self.allowed_models = allowed_models
self.blocked_models = blocked_models
self.primary_metric = primary_metric
class ResourceConfiguration(msrest.serialization.Model):
"""ResourceConfiguration.
:ivar instance_count: Optional number of instances or nodes used by the compute target.
:vartype instance_count: int
:ivar instance_type: Optional type of VM used as supported by the compute target.
:vartype instance_type: str
:ivar properties: Additional properties bag.
:vartype properties: dict[str, any]
"""
_attribute_map = {
'instance_count': {'key': 'instanceCount', 'type': 'int'},
'instance_type': {'key': 'instanceType', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{object}'},
}
def __init__(
self,
*,
instance_count: Optional[int] = 1,
instance_type: Optional[str] = None,
properties: Optional[Dict[str, Any]] = None,
**kwargs
):
"""
:keyword instance_count: Optional number of instances or nodes used by the compute target.
:paramtype instance_count: int
:keyword instance_type: Optional type of VM used as supported by the compute target.
:paramtype instance_type: str
:keyword properties: Additional properties bag.
:paramtype properties: dict[str, any]
"""
super(ResourceConfiguration, self).__init__(**kwargs)
self.instance_count = instance_count
self.instance_type = instance_type
self.properties = properties
class Route(msrest.serialization.Model):
"""Route.
All required parameters must be populated in order to send to Azure.
:ivar path: Required. [Required] The path for the route.
:vartype path: str
:ivar port: Required. [Required] The port for the route.
:vartype port: int
"""
_validation = {
'path': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'port': {'required': True},
}
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
}
def __init__(
self,
*,
path: str,
port: int,
**kwargs
):
"""
:keyword path: Required. [Required] The path for the route.
:paramtype path: str
:keyword port: Required. [Required] The port for the route.
:paramtype port: int
"""
super(Route, self).__init__(**kwargs)
self.path = path
self.port = port
class SasDatastoreCredentials(DatastoreCredentials):
"""SAS datastore credentials configuration.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar secrets: Required. [Required] Storage container secrets.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
"""
_validation = {
'credentials_type': {'required': True},
'secrets': {'required': True},
}
_attribute_map = {
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
'secrets': {'key': 'secrets', 'type': 'SasDatastoreSecrets'},
}
def __init__(
self,
*,
secrets: "SasDatastoreSecrets",
**kwargs
):
"""
:keyword secrets: Required. [Required] Storage container secrets.
:paramtype secrets: ~azure.mgmt.machinelearningservices.models.SasDatastoreSecrets
"""
super(SasDatastoreCredentials, self).__init__(**kwargs)
self.credentials_type = 'Sas' # type: str
self.secrets = secrets
class SasDatastoreSecrets(DatastoreSecrets):
"""Datastore SAS secrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar sas_token: Storage container SAS token.
:vartype sas_token: str
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
'sas_token': {'key': 'sasToken', 'type': 'str'},
}
def __init__(
self,
*,
sas_token: Optional[str] = None,
**kwargs
):
"""
:keyword sas_token: Storage container SAS token.
:paramtype sas_token: str
"""
super(SasDatastoreSecrets, self).__init__(**kwargs)
self.secrets_type = 'Sas' # type: str
self.sas_token = sas_token
class ServicePrincipalDatastoreCredentials(DatastoreCredentials):
"""Service Principal datastore credentials configuration.
All required parameters must be populated in order to send to Azure.
:ivar credentials_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate",
"None", "Sas", "ServicePrincipal", "KerberosKeytab", "KerberosPassword".
:vartype credentials_type: str or ~azure.mgmt.machinelearningservices.models.CredentialsType
:ivar authority_url: Authority URL used for authentication.
:vartype authority_url: str
:ivar client_id: Required. [Required] Service principal client ID.
:vartype client_id: str
:ivar resource_url: Resource the service principal has access to.
:vartype resource_url: str
:ivar secrets: Required. [Required] Service principal secrets.
:vartype secrets: ~azure.mgmt.machinelearningservices.models.ServicePrincipalDatastoreSecrets
:ivar tenant_id: Required. [Required] ID of the tenant to which the service principal belongs.
:vartype tenant_id: str
"""
_validation = {
'credentials_type': {'required': True},
'client_id': {'required': True},
'secrets': {'required': True},
'tenant_id': {'required': True},
}
_attribute_map = {
'credentials_type': {'key': 'credentialsType', 'type': 'str'},
'authority_url': {'key': 'authorityUrl', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'resource_url': {'key': 'resourceUrl', 'type': 'str'},
'secrets': {'key': 'secrets', 'type': 'ServicePrincipalDatastoreSecrets'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
client_id: str,
secrets: "ServicePrincipalDatastoreSecrets",
tenant_id: str,
authority_url: Optional[str] = None,
resource_url: Optional[str] = None,
**kwargs
):
"""
:keyword authority_url: Authority URL used for authentication.
:paramtype authority_url: str
:keyword client_id: Required. [Required] Service principal client ID.
:paramtype client_id: str
:keyword resource_url: Resource the service principal has access to.
:paramtype resource_url: str
:keyword secrets: Required. [Required] Service principal secrets.
:paramtype secrets: ~azure.mgmt.machinelearningservices.models.ServicePrincipalDatastoreSecrets
:keyword tenant_id: Required. [Required] ID of the tenant to which the service principal
belongs.
:paramtype tenant_id: str
"""
super(ServicePrincipalDatastoreCredentials, self).__init__(**kwargs)
self.credentials_type = 'ServicePrincipal' # type: str
self.authority_url = authority_url
self.client_id = client_id
self.resource_url = resource_url
self.secrets = secrets
self.tenant_id = tenant_id
class ServicePrincipalDatastoreSecrets(DatastoreSecrets):
"""Datastore Service Principal secrets.
All required parameters must be populated in order to send to Azure.
:ivar secrets_type: Required. [Required] Credential type used to authentication with
storage.Constant filled by server. Possible values include: "AccountKey", "Certificate", "Sas",
"ServicePrincipal", "KerberosPassword", "KerberosKeytab".
:vartype secrets_type: str or ~azure.mgmt.machinelearningservices.models.SecretsType
:ivar client_secret: Service principal secret.
:vartype client_secret: str
"""
_validation = {
'secrets_type': {'required': True},
}
_attribute_map = {
'secrets_type': {'key': 'secretsType', 'type': 'str'},
'client_secret': {'key': 'clientSecret', 'type': 'str'},
}
def __init__(
self,
*,
client_secret: Optional[str] = None,
**kwargs
):
"""
:keyword client_secret: Service principal secret.
:paramtype client_secret: str
"""
super(ServicePrincipalDatastoreSecrets, self).__init__(**kwargs)
self.secrets_type = 'ServicePrincipal' # type: str
self.client_secret = client_secret
class Sku(msrest.serialization.Model):
"""The resource model definition representing SKU.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. The name of the SKU. Ex - P3. It is typically a letter+number code.
:vartype name: str
:ivar tier: This field is required to be implemented by the Resource Provider if the service
has more than one tier, but is not required on a PUT. Possible values include: "Free", "Basic",
"Standard", "Premium".
:vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
:ivar size: The SKU size. When the name field is the combination of tier and some other value,
this would be the standalone code.
:vartype size: str
:ivar family: If the service has different generations of hardware, for the same SKU, then that
can be captured here.
:vartype family: str
:ivar capacity: If the SKU supports scale out/in then the capacity integer should be included.
If scale out/in is not possible for the resource this may be omitted.
:vartype capacity: int
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'size': {'key': 'size', 'type': 'str'},
'family': {'key': 'family', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
name: str,
tier: Optional[Union[str, "SkuTier"]] = None,
size: Optional[str] = None,
family: Optional[str] = None,
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: Required. The name of the SKU. Ex - P3. It is typically a letter+number code.
:paramtype name: str
:keyword tier: This field is required to be implemented by the Resource Provider if the service
has more than one tier, but is not required on a PUT. Possible values include: "Free", "Basic",
"Standard", "Premium".
:paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
:keyword size: The SKU size. When the name field is the combination of tier and some other
value, this would be the standalone code.
:paramtype size: str
:keyword family: If the service has different generations of hardware, for the same SKU, then
that can be captured here.
:paramtype family: str
:keyword capacity: If the SKU supports scale out/in then the capacity integer should be
included. If scale out/in is not possible for the resource this may be omitted.
:paramtype capacity: int
"""
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.size = size
self.family = family
self.capacity = capacity
class SkuCapacity(msrest.serialization.Model):
"""SKU capacity information.
:ivar default: Gets or sets the default capacity.
:vartype default: int
:ivar maximum: Gets or sets the maximum.
:vartype maximum: int
:ivar minimum: Gets or sets the minimum.
:vartype minimum: int
:ivar scale_type: Gets or sets the type of the scale. Possible values include: "Automatic",
"Manual", "None".
:vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.SkuScaleType
"""
_attribute_map = {
'default': {'key': 'default', 'type': 'int'},
'maximum': {'key': 'maximum', 'type': 'int'},
'minimum': {'key': 'minimum', 'type': 'int'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
*,
default: Optional[int] = 0,
maximum: Optional[int] = 0,
minimum: Optional[int] = 0,
scale_type: Optional[Union[str, "SkuScaleType"]] = None,
**kwargs
):
"""
:keyword default: Gets or sets the default capacity.
:paramtype default: int
:keyword maximum: Gets or sets the maximum.
:paramtype maximum: int
:keyword minimum: Gets or sets the minimum.
:paramtype minimum: int
:keyword scale_type: Gets or sets the type of the scale. Possible values include: "Automatic",
"Manual", "None".
:paramtype scale_type: str or ~azure.mgmt.machinelearningservices.models.SkuScaleType
"""
super(SkuCapacity, self).__init__(**kwargs)
self.default = default
self.maximum = maximum
self.minimum = minimum
self.scale_type = scale_type
class SkuResource(msrest.serialization.Model):
"""Fulfills ARM Contract requirement to list all available SKUS for a resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar capacity: Gets or sets the Sku Capacity.
:vartype capacity: ~azure.mgmt.machinelearningservices.models.SkuCapacity
:ivar resource_type: The resource type name.
:vartype resource_type: str
:ivar sku: Gets or sets the Sku.
:vartype sku: ~azure.mgmt.machinelearningservices.models.SkuSetting
"""
_validation = {
'resource_type': {'readonly': True},
}
_attribute_map = {
'capacity': {'key': 'capacity', 'type': 'SkuCapacity'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'SkuSetting'},
}
def __init__(
self,
*,
capacity: Optional["SkuCapacity"] = None,
sku: Optional["SkuSetting"] = None,
**kwargs
):
"""
:keyword capacity: Gets or sets the Sku Capacity.
:paramtype capacity: ~azure.mgmt.machinelearningservices.models.SkuCapacity
:keyword sku: Gets or sets the Sku.
:paramtype sku: ~azure.mgmt.machinelearningservices.models.SkuSetting
"""
super(SkuResource, self).__init__(**kwargs)
self.capacity = capacity
self.resource_type = None
self.sku = sku
class SkuResourceArmPaginatedResult(msrest.serialization.Model):
"""A paginated list of SkuResource entities.
:ivar next_link: The link to the next page of SkuResource objects. If null, there are no
additional pages.
:vartype next_link: str
:ivar value: An array of objects of type SkuResource.
:vartype value: list[~azure.mgmt.machinelearningservices.models.SkuResource]
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[SkuResource]'},
}
def __init__(
self,
*,
next_link: Optional[str] = None,
value: Optional[List["SkuResource"]] = None,
**kwargs
):
"""
:keyword next_link: The link to the next page of SkuResource objects. If null, there are no
additional pages.
:paramtype next_link: str
:keyword value: An array of objects of type SkuResource.
:paramtype value: list[~azure.mgmt.machinelearningservices.models.SkuResource]
"""
super(SkuResourceArmPaginatedResult, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
class SkuSetting(msrest.serialization.Model):
"""SkuSetting fulfills the need for stripped down SKU info in ARM contract.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. [Required] The name of the SKU. Ex - P3. It is typically a letter+number
code.
:vartype name: str
:ivar tier: This field is required to be implemented by the Resource Provider if the service
has more than one tier, but is not required on a PUT. Possible values include: "Free", "Basic",
"Standard", "Premium".
:vartype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
_validation = {
'name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
tier: Optional[Union[str, "SkuTier"]] = None,
**kwargs
):
"""
:keyword name: Required. [Required] The name of the SKU. Ex - P3. It is typically a
letter+number code.
:paramtype name: str
:keyword tier: This field is required to be implemented by the Resource Provider if the service
has more than one tier, but is not required on a PUT. Possible values include: "Free", "Basic",
"Standard", "Premium".
:paramtype tier: str or ~azure.mgmt.machinelearningservices.models.SkuTier
"""
super(SkuSetting, self).__init__(**kwargs)
self.name = name
self.tier = tier
class StackEnsembleSettings(msrest.serialization.Model):
"""Advances setting to customize StackEnsemble run.
:ivar stack_meta_learner_k_wargs: Optional parameters to pass to the initializer of the
meta-learner.
:vartype stack_meta_learner_k_wargs: any
:ivar stack_meta_learner_train_percentage: Specifies the proportion of the training set (when
choosing train and validation type of training) to be reserved for training the meta-learner.
Default value is 0.2.
:vartype stack_meta_learner_train_percentage: float
:ivar stack_meta_learner_type: The meta-learner is a model trained on the output of the
individual heterogeneous models. Possible values include: "None", "LogisticRegression",
"LogisticRegressionCV", "LightGBMClassifier", "ElasticNet", "ElasticNetCV",
"LightGBMRegressor", "LinearRegression".
:vartype stack_meta_learner_type: str or
~azure.mgmt.machinelearningservices.models.StackMetaLearnerType
"""
_attribute_map = {
'stack_meta_learner_k_wargs': {'key': 'stackMetaLearnerKWargs', 'type': 'object'},
'stack_meta_learner_train_percentage': {'key': 'stackMetaLearnerTrainPercentage', 'type': 'float'},
'stack_meta_learner_type': {'key': 'stackMetaLearnerType', 'type': 'str'},
}
def __init__(
self,
*,
stack_meta_learner_k_wargs: Optional[Any] = None,
stack_meta_learner_train_percentage: Optional[float] = 0.2,
stack_meta_learner_type: Optional[Union[str, "StackMetaLearnerType"]] = None,
**kwargs
):
"""
:keyword stack_meta_learner_k_wargs: Optional parameters to pass to the initializer of the
meta-learner.
:paramtype stack_meta_learner_k_wargs: any
:keyword stack_meta_learner_train_percentage: Specifies the proportion of the training set
(when choosing train and validation type of training) to be reserved for training the
meta-learner. Default value is 0.2.
:paramtype stack_meta_learner_train_percentage: float
:keyword stack_meta_learner_type: The meta-learner is a model trained on the output of the
individual heterogeneous models. Possible values include: "None", "LogisticRegression",
"LogisticRegressionCV", "LightGBMClassifier", "ElasticNet", "ElasticNetCV",
"LightGBMRegressor", "LinearRegression".
:paramtype stack_meta_learner_type: str or
~azure.mgmt.machinelearningservices.models.StackMetaLearnerType
"""
super(StackEnsembleSettings, self).__init__(**kwargs)
self.stack_meta_learner_k_wargs = stack_meta_learner_k_wargs
self.stack_meta_learner_train_percentage = stack_meta_learner_train_percentage
self.stack_meta_learner_type = stack_meta_learner_type
class SweepJob(JobBaseDetails):
"""Sweep job definition.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar compute_id: ARM resource ID of the compute resource.
:vartype compute_id: str
:ivar display_name: Display name of job.
:vartype display_name: str
:ivar experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:vartype experiment_name: str
:ivar identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:vartype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar job_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "AutoML", "Command", "Sweep", "Pipeline".
:vartype job_type: str or ~azure.mgmt.machinelearningservices.models.JobType
:ivar schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:vartype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:ivar services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:vartype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:ivar status: Status of the job. Possible values include: "NotStarted", "Starting",
"Provisioning", "Preparing", "Queued", "Running", "Finalizing", "CancelRequested", "Completed",
"Failed", "Canceled", "NotResponding", "Paused", "Unknown", "Scheduled".
:vartype status: str or ~azure.mgmt.machinelearningservices.models.JobStatus
:ivar early_termination: Early termination policies enable canceling poor-performing runs
before they complete.
:vartype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
:ivar inputs: Mapping of input data bindings used in the job.
:vartype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:ivar limits: Sweep Job limit.
:vartype limits: ~azure.mgmt.machinelearningservices.models.SweepJobLimits
:ivar objective: Required. [Required] Optimization objective.
:vartype objective: ~azure.mgmt.machinelearningservices.models.Objective
:ivar outputs: Mapping of output data bindings used in the job.
:vartype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:ivar sampling_algorithm: Required. [Required] The hyperparameter sampling algorithm.
:vartype sampling_algorithm: ~azure.mgmt.machinelearningservices.models.SamplingAlgorithm
:ivar search_space: Required. [Required] A dictionary containing each parameter and its
distribution. The dictionary key is the name of the parameter.
:vartype search_space: any
:ivar trial: Required. [Required] Trial component definition.
:vartype trial: ~azure.mgmt.machinelearningservices.models.TrialComponent
"""
_validation = {
'job_type': {'required': True},
'status': {'readonly': True},
'objective': {'required': True},
'sampling_algorithm': {'required': True},
'search_space': {'required': True},
'trial': {'required': True},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'compute_id': {'key': 'computeId', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'experiment_name': {'key': 'experimentName', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'IdentityConfiguration'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'job_type': {'key': 'jobType', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': 'ScheduleBase'},
'services': {'key': 'services', 'type': '{JobService}'},
'status': {'key': 'status', 'type': 'str'},
'early_termination': {'key': 'earlyTermination', 'type': 'EarlyTerminationPolicy'},
'inputs': {'key': 'inputs', 'type': '{JobInput}'},
'limits': {'key': 'limits', 'type': 'SweepJobLimits'},
'objective': {'key': 'objective', 'type': 'Objective'},
'outputs': {'key': 'outputs', 'type': '{JobOutput}'},
'sampling_algorithm': {'key': 'samplingAlgorithm', 'type': 'SamplingAlgorithm'},
'search_space': {'key': 'searchSpace', 'type': 'object'},
'trial': {'key': 'trial', 'type': 'TrialComponent'},
}
def __init__(
self,
*,
objective: "Objective",
sampling_algorithm: "SamplingAlgorithm",
search_space: Any,
trial: "TrialComponent",
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
compute_id: Optional[str] = None,
display_name: Optional[str] = None,
experiment_name: Optional[str] = "Default",
identity: Optional["IdentityConfiguration"] = None,
is_archived: Optional[bool] = False,
schedule: Optional["ScheduleBase"] = None,
services: Optional[Dict[str, "JobService"]] = None,
early_termination: Optional["EarlyTerminationPolicy"] = None,
inputs: Optional[Dict[str, "JobInput"]] = None,
limits: Optional["SweepJobLimits"] = None,
outputs: Optional[Dict[str, "JobOutput"]] = None,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword compute_id: ARM resource ID of the compute resource.
:paramtype compute_id: str
:keyword display_name: Display name of job.
:paramtype display_name: str
:keyword experiment_name: The name of the experiment the job belongs to. If not set, the job is
placed in the "Default" experiment.
:paramtype experiment_name: str
:keyword identity: Identity configuration. If set, this should be one of AmlToken,
ManagedIdentity, UserIdentity or null.
Defaults to AmlToken if null.
:paramtype identity: ~azure.mgmt.machinelearningservices.models.IdentityConfiguration
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword schedule: Schedule definition of job.
If no schedule is provided, the job is run once and immediately after submission.
:paramtype schedule: ~azure.mgmt.machinelearningservices.models.ScheduleBase
:keyword services: List of JobEndpoints.
For local jobs, a job endpoint will have an endpoint value of FileStreamObject.
:paramtype services: dict[str, ~azure.mgmt.machinelearningservices.models.JobService]
:keyword early_termination: Early termination policies enable canceling poor-performing runs
before they complete.
:paramtype early_termination: ~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicy
:keyword inputs: Mapping of input data bindings used in the job.
:paramtype inputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobInput]
:keyword limits: Sweep Job limit.
:paramtype limits: ~azure.mgmt.machinelearningservices.models.SweepJobLimits
:keyword objective: Required. [Required] Optimization objective.
:paramtype objective: ~azure.mgmt.machinelearningservices.models.Objective
:keyword outputs: Mapping of output data bindings used in the job.
:paramtype outputs: dict[str, ~azure.mgmt.machinelearningservices.models.JobOutput]
:keyword sampling_algorithm: Required. [Required] The hyperparameter sampling algorithm.
:paramtype sampling_algorithm: ~azure.mgmt.machinelearningservices.models.SamplingAlgorithm
:keyword search_space: Required. [Required] A dictionary containing each parameter and its
distribution. The dictionary key is the name of the parameter.
:paramtype search_space: any
:keyword trial: Required. [Required] Trial component definition.
:paramtype trial: ~azure.mgmt.machinelearningservices.models.TrialComponent
"""
super(SweepJob, self).__init__(description=description, properties=properties, tags=tags, compute_id=compute_id, display_name=display_name, experiment_name=experiment_name, identity=identity, is_archived=is_archived, schedule=schedule, services=services, **kwargs)
self.job_type = 'Sweep' # type: str
self.early_termination = early_termination
self.inputs = inputs
self.limits = limits
self.objective = objective
self.outputs = outputs
self.sampling_algorithm = sampling_algorithm
self.search_space = search_space
self.trial = trial
class SweepJobLimits(JobLimits):
"""Sweep Job limit class.
All required parameters must be populated in order to send to Azure.
:ivar job_limits_type: Required. [Required] JobLimit type.Constant filled by server. Possible
values include: "Command", "Sweep".
:vartype job_limits_type: str or ~azure.mgmt.machinelearningservices.models.JobLimitsType
:ivar timeout: The max run duration in ISO 8601 format, after which the job will be cancelled.
Only supports duration with precision as low as Seconds.
:vartype timeout: ~datetime.timedelta
:ivar max_concurrent_trials: Sweep Job max concurrent trials.
:vartype max_concurrent_trials: int
:ivar max_total_trials: Sweep Job max total trials.
:vartype max_total_trials: int
:ivar trial_timeout: Sweep Job Trial timeout value.
:vartype trial_timeout: ~datetime.timedelta
"""
_validation = {
'job_limits_type': {'required': True},
}
_attribute_map = {
'job_limits_type': {'key': 'jobLimitsType', 'type': 'str'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'max_concurrent_trials': {'key': 'maxConcurrentTrials', 'type': 'int'},
'max_total_trials': {'key': 'maxTotalTrials', 'type': 'int'},
'trial_timeout': {'key': 'trialTimeout', 'type': 'duration'},
}
def __init__(
self,
*,
timeout: Optional[datetime.timedelta] = None,
max_concurrent_trials: Optional[int] = None,
max_total_trials: Optional[int] = None,
trial_timeout: Optional[datetime.timedelta] = None,
**kwargs
):
"""
:keyword timeout: The max run duration in ISO 8601 format, after which the job will be
cancelled. Only supports duration with precision as low as Seconds.
:paramtype timeout: ~datetime.timedelta
:keyword max_concurrent_trials: Sweep Job max concurrent trials.
:paramtype max_concurrent_trials: int
:keyword max_total_trials: Sweep Job max total trials.
:paramtype max_total_trials: int
:keyword trial_timeout: Sweep Job Trial timeout value.
:paramtype trial_timeout: ~datetime.timedelta
"""
super(SweepJobLimits, self).__init__(timeout=timeout, **kwargs)
self.job_limits_type = 'Sweep' # type: str
self.max_concurrent_trials = max_concurrent_trials
self.max_total_trials = max_total_trials
self.trial_timeout = trial_timeout
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or ~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.machinelearningservices.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class TableVerticalDataSettings(DataSettings):
"""Class for data inputs.
All required parameters must be populated in order to send to Azure.
:ivar target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:vartype target_column_name: str
:ivar test_data: Test data input.
:vartype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:ivar training_data: Required. [Required] Training data input.
:vartype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
:ivar validation_data: Validation data inputs.
:vartype validation_data:
~azure.mgmt.machinelearningservices.models.TableVerticalValidationDataSettings
:ivar weight_column_name: The name of the sample weight column. Automated ML supports a
weighted column as an input, causing rows in the data to be weighted up or down.
:vartype weight_column_name: str
"""
_validation = {
'target_column_name': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'training_data': {'required': True},
}
_attribute_map = {
'target_column_name': {'key': 'targetColumnName', 'type': 'str'},
'test_data': {'key': 'testData', 'type': 'TestDataSettings'},
'training_data': {'key': 'trainingData', 'type': 'TrainingDataSettings'},
'validation_data': {'key': 'validationData', 'type': 'TableVerticalValidationDataSettings'},
'weight_column_name': {'key': 'weightColumnName', 'type': 'str'},
}
def __init__(
self,
*,
target_column_name: str,
training_data: "TrainingDataSettings",
test_data: Optional["TestDataSettings"] = None,
validation_data: Optional["TableVerticalValidationDataSettings"] = None,
weight_column_name: Optional[str] = None,
**kwargs
):
"""
:keyword target_column_name: Required. [Required] Target column name: This is prediction values
column.
Also known as label column name in context of classification tasks.
:paramtype target_column_name: str
:keyword test_data: Test data input.
:paramtype test_data: ~azure.mgmt.machinelearningservices.models.TestDataSettings
:keyword training_data: Required. [Required] Training data input.
:paramtype training_data: ~azure.mgmt.machinelearningservices.models.TrainingDataSettings
:keyword validation_data: Validation data inputs.
:paramtype validation_data:
~azure.mgmt.machinelearningservices.models.TableVerticalValidationDataSettings
:keyword weight_column_name: The name of the sample weight column. Automated ML supports a
weighted column as an input, causing rows in the data to be weighted up or down.
:paramtype weight_column_name: str
"""
super(TableVerticalDataSettings, self).__init__(target_column_name=target_column_name, test_data=test_data, training_data=training_data, **kwargs)
self.validation_data = validation_data
self.weight_column_name = weight_column_name
class TableVerticalFeaturizationSettings(FeaturizationSettings):
"""Featurization Configuration.
:ivar dataset_language: Dataset language, useful for the text data.
:vartype dataset_language: str
:ivar blocked_transformers: These transformers shall not be used in featurization.
:vartype blocked_transformers: list[str]
:ivar column_name_and_types: Dictionary of column name and its type (int, float, string,
datetime etc).
:vartype column_name_and_types: dict[str, str]
:ivar drop_columns: Columns to be dropped from data during featurization.
:vartype drop_columns: list[str]
:ivar enable_dnn_featurization: Determines whether to use Dnn based featurizers for data
featurization.
:vartype enable_dnn_featurization: bool
:ivar mode: Featurization mode - User can keep the default 'Auto' mode and AutoML will take
care of necessary transformation of the data in featurization phase.
If 'Off' is selected then no featurization is done.
If 'Custom' is selected then user can specify additional inputs to customize how featurization
is done. Possible values include: "Auto", "Custom", "Off".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.FeaturizationMode
:ivar transformer_params: User can specify additional transformers to be used along with the
columns to which it would be applied and parameters for the transformer constructor.
:vartype transformer_params: dict[str,
list[~azure.mgmt.machinelearningservices.models.ColumnTransformer]]
"""
_attribute_map = {
'dataset_language': {'key': 'datasetLanguage', 'type': 'str'},
'blocked_transformers': {'key': 'blockedTransformers', 'type': '[str]'},
'column_name_and_types': {'key': 'columnNameAndTypes', 'type': '{str}'},
'drop_columns': {'key': 'dropColumns', 'type': '[str]'},
'enable_dnn_featurization': {'key': 'enableDnnFeaturization', 'type': 'bool'},
'mode': {'key': 'mode', 'type': 'str'},
'transformer_params': {'key': 'transformerParams', 'type': '{[ColumnTransformer]}'},
}
def __init__(
self,
*,
dataset_language: Optional[str] = None,
blocked_transformers: Optional[List[str]] = None,
column_name_and_types: Optional[Dict[str, str]] = None,
drop_columns: Optional[List[str]] = None,
enable_dnn_featurization: Optional[bool] = False,
mode: Optional[Union[str, "FeaturizationMode"]] = None,
transformer_params: Optional[Dict[str, List["ColumnTransformer"]]] = None,
**kwargs
):
"""
:keyword dataset_language: Dataset language, useful for the text data.
:paramtype dataset_language: str
:keyword blocked_transformers: These transformers shall not be used in featurization.
:paramtype blocked_transformers: list[str]
:keyword column_name_and_types: Dictionary of column name and its type (int, float, string,
datetime etc).
:paramtype column_name_and_types: dict[str, str]
:keyword drop_columns: Columns to be dropped from data during featurization.
:paramtype drop_columns: list[str]
:keyword enable_dnn_featurization: Determines whether to use Dnn based featurizers for data
featurization.
:paramtype enable_dnn_featurization: bool
:keyword mode: Featurization mode - User can keep the default 'Auto' mode and AutoML will take
care of necessary transformation of the data in featurization phase.
If 'Off' is selected then no featurization is done.
If 'Custom' is selected then user can specify additional inputs to customize how featurization
is done. Possible values include: "Auto", "Custom", "Off".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.FeaturizationMode
:keyword transformer_params: User can specify additional transformers to be used along with the
columns to which it would be applied and parameters for the transformer constructor.
:paramtype transformer_params: dict[str,
list[~azure.mgmt.machinelearningservices.models.ColumnTransformer]]
"""
super(TableVerticalFeaturizationSettings, self).__init__(dataset_language=dataset_language, **kwargs)
self.blocked_transformers = blocked_transformers
self.column_name_and_types = column_name_and_types
self.drop_columns = drop_columns
self.enable_dnn_featurization = enable_dnn_featurization
self.mode = mode
self.transformer_params = transformer_params
class TableVerticalLimitSettings(msrest.serialization.Model):
"""Job execution constraints.
:ivar enable_early_termination: Enable early termination, determines whether or not if
AutoMLJob will terminate early if there is no score improvement in last 20 iterations.
:vartype enable_early_termination: bool
:ivar exit_score: Exit score for the AutoML job.
:vartype exit_score: float
:ivar max_concurrent_trials: Maximum Concurrent iterations.
:vartype max_concurrent_trials: int
:ivar max_cores_per_trial: Max cores per iteration.
:vartype max_cores_per_trial: int
:ivar max_trials: Number of iterations.
:vartype max_trials: int
:ivar timeout: AutoML job timeout.
:vartype timeout: ~datetime.timedelta
:ivar trial_timeout: Iteration timeout.
:vartype trial_timeout: ~datetime.timedelta
"""
_attribute_map = {
'enable_early_termination': {'key': 'enableEarlyTermination', 'type': 'bool'},
'exit_score': {'key': 'exitScore', 'type': 'float'},
'max_concurrent_trials': {'key': 'maxConcurrentTrials', 'type': 'int'},
'max_cores_per_trial': {'key': 'maxCoresPerTrial', 'type': 'int'},
'max_trials': {'key': 'maxTrials', 'type': 'int'},
'timeout': {'key': 'timeout', 'type': 'duration'},
'trial_timeout': {'key': 'trialTimeout', 'type': 'duration'},
}
def __init__(
self,
*,
enable_early_termination: Optional[bool] = True,
exit_score: Optional[float] = None,
max_concurrent_trials: Optional[int] = 1,
max_cores_per_trial: Optional[int] = -1,
max_trials: Optional[int] = 1000,
timeout: Optional[datetime.timedelta] = "PT6H",
trial_timeout: Optional[datetime.timedelta] = "PT30M",
**kwargs
):
"""
:keyword enable_early_termination: Enable early termination, determines whether or not if
AutoMLJob will terminate early if there is no score improvement in last 20 iterations.
:paramtype enable_early_termination: bool
:keyword exit_score: Exit score for the AutoML job.
:paramtype exit_score: float
:keyword max_concurrent_trials: Maximum Concurrent iterations.
:paramtype max_concurrent_trials: int
:keyword max_cores_per_trial: Max cores per iteration.
:paramtype max_cores_per_trial: int
:keyword max_trials: Number of iterations.
:paramtype max_trials: int
:keyword timeout: AutoML job timeout.
:paramtype timeout: ~datetime.timedelta
:keyword trial_timeout: Iteration timeout.
:paramtype trial_timeout: ~datetime.timedelta
"""
super(TableVerticalLimitSettings, self).__init__(**kwargs)
self.enable_early_termination = enable_early_termination
self.exit_score = exit_score
self.max_concurrent_trials = max_concurrent_trials
self.max_cores_per_trial = max_cores_per_trial
self.max_trials = max_trials
self.timeout = timeout
self.trial_timeout = trial_timeout
class TableVerticalValidationDataSettings(ValidationDataSettings):
"""Validation settings for AutoML Table vertical tasks - Classification/Regression/Forecasting.
:ivar data: Validation data MLTable.
:vartype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:vartype validation_data_size: float
:ivar cv_split_column_names: Columns to use for CVSplit data.
:vartype cv_split_column_names: list[str]
:ivar n_cross_validations: Number of cross validation folds to be applied on training dataset
when validation dataset is not provided.
:vartype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'MLTableJobInput'},
'validation_data_size': {'key': 'validationDataSize', 'type': 'float'},
'cv_split_column_names': {'key': 'cvSplitColumnNames', 'type': '[str]'},
'n_cross_validations': {'key': 'nCrossValidations', 'type': 'NCrossValidations'},
}
def __init__(
self,
*,
data: Optional["MLTableJobInput"] = None,
validation_data_size: Optional[float] = None,
cv_split_column_names: Optional[List[str]] = None,
n_cross_validations: Optional["NCrossValidations"] = None,
**kwargs
):
"""
:keyword data: Validation data MLTable.
:paramtype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword validation_data_size: The fraction of training dataset that needs to be set aside for
validation purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:paramtype validation_data_size: float
:keyword cv_split_column_names: Columns to use for CVSplit data.
:paramtype cv_split_column_names: list[str]
:keyword n_cross_validations: Number of cross validation folds to be applied on training
dataset
when validation dataset is not provided.
:paramtype n_cross_validations: ~azure.mgmt.machinelearningservices.models.NCrossValidations
"""
super(TableVerticalValidationDataSettings, self).__init__(data=data, validation_data_size=validation_data_size, **kwargs)
self.cv_split_column_names = cv_split_column_names
self.n_cross_validations = n_cross_validations
class TargetUtilizationScaleSettings(OnlineScaleSettings):
"""TargetUtilizationScaleSettings.
All required parameters must be populated in order to send to Azure.
:ivar scale_type: Required. [Required] Type of deployment scaling algorithm.Constant filled by
server. Possible values include: "Default", "TargetUtilization".
:vartype scale_type: str or ~azure.mgmt.machinelearningservices.models.ScaleType
:ivar max_instances: The maximum number of instances that the deployment can scale to. The
quota will be reserved for max_instances.
:vartype max_instances: int
:ivar min_instances: The minimum number of instances to always be present.
:vartype min_instances: int
:ivar polling_interval: The polling interval in ISO 8691 format. Only supports duration with
precision as low as Seconds.
:vartype polling_interval: ~datetime.timedelta
:ivar target_utilization_percentage: Target CPU usage for the autoscaler.
:vartype target_utilization_percentage: int
"""
_validation = {
'scale_type': {'required': True},
}
_attribute_map = {
'scale_type': {'key': 'scaleType', 'type': 'str'},
'max_instances': {'key': 'maxInstances', 'type': 'int'},
'min_instances': {'key': 'minInstances', 'type': 'int'},
'polling_interval': {'key': 'pollingInterval', 'type': 'duration'},
'target_utilization_percentage': {'key': 'targetUtilizationPercentage', 'type': 'int'},
}
def __init__(
self,
*,
max_instances: Optional[int] = 1,
min_instances: Optional[int] = 1,
polling_interval: Optional[datetime.timedelta] = "PT1S",
target_utilization_percentage: Optional[int] = 70,
**kwargs
):
"""
:keyword max_instances: The maximum number of instances that the deployment can scale to. The
quota will be reserved for max_instances.
:paramtype max_instances: int
:keyword min_instances: The minimum number of instances to always be present.
:paramtype min_instances: int
:keyword polling_interval: The polling interval in ISO 8691 format. Only supports duration with
precision as low as Seconds.
:paramtype polling_interval: ~datetime.timedelta
:keyword target_utilization_percentage: Target CPU usage for the autoscaler.
:paramtype target_utilization_percentage: int
"""
super(TargetUtilizationScaleSettings, self).__init__(**kwargs)
self.scale_type = 'TargetUtilization' # type: str
self.max_instances = max_instances
self.min_instances = min_instances
self.polling_interval = polling_interval
self.target_utilization_percentage = target_utilization_percentage
class TensorFlow(DistributionConfiguration):
"""TensorFlow distribution configuration.
All required parameters must be populated in order to send to Azure.
:ivar distribution_type: Required. [Required] Specifies the type of distribution
framework.Constant filled by server. Possible values include: "PyTorch", "TensorFlow", "Mpi".
:vartype distribution_type: str or ~azure.mgmt.machinelearningservices.models.DistributionType
:ivar parameter_server_count: Number of parameter server tasks.
:vartype parameter_server_count: int
:ivar worker_count: Number of workers. If not specified, will default to the instance count.
:vartype worker_count: int
"""
_validation = {
'distribution_type': {'required': True},
}
_attribute_map = {
'distribution_type': {'key': 'distributionType', 'type': 'str'},
'parameter_server_count': {'key': 'parameterServerCount', 'type': 'int'},
'worker_count': {'key': 'workerCount', 'type': 'int'},
}
def __init__(
self,
*,
parameter_server_count: Optional[int] = 0,
worker_count: Optional[int] = None,
**kwargs
):
"""
:keyword parameter_server_count: Number of parameter server tasks.
:paramtype parameter_server_count: int
:keyword worker_count: Number of workers. If not specified, will default to the instance count.
:paramtype worker_count: int
"""
super(TensorFlow, self).__init__(**kwargs)
self.distribution_type = 'TensorFlow' # type: str
self.parameter_server_count = parameter_server_count
self.worker_count = worker_count
class TestDataSettings(msrest.serialization.Model):
"""Test data inputs.
:ivar data: Test data MLTable.
:vartype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:ivar test_data_size: The fraction of test dataset that needs to be set aside for validation
purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:vartype test_data_size: float
"""
_attribute_map = {
'data': {'key': 'data', 'type': 'MLTableJobInput'},
'test_data_size': {'key': 'testDataSize', 'type': 'float'},
}
def __init__(
self,
*,
data: Optional["MLTableJobInput"] = None,
test_data_size: Optional[float] = None,
**kwargs
):
"""
:keyword data: Test data MLTable.
:paramtype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
:keyword test_data_size: The fraction of test dataset that needs to be set aside for validation
purpose.
Values between (0.0 , 1.0)
Applied when validation dataset is not provided.
:paramtype test_data_size: float
"""
super(TestDataSettings, self).__init__(**kwargs)
self.data = data
self.test_data_size = test_data_size
class TextClassification(AutoMLVertical, NlpVertical):
"""Text Classification task in AutoML NLP vertical.
NLP - Natural Language Processing.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric for Text-Classification task. Possible values include:
"AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
_validation = {
'task_type': {'required': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'NlpVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'NlpVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'NlpVerticalLimitSettings'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: Optional["NlpVerticalDataSettings"] = None,
featurization_settings: Optional["NlpVerticalFeaturizationSettings"] = None,
limit_settings: Optional["NlpVerticalLimitSettings"] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
primary_metric: Optional[Union[str, "ClassificationPrimaryMetrics"]] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:keyword primary_metric: Primary metric for Text-Classification task. Possible values include:
"AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted".
:paramtype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
super(TextClassification, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, featurization_settings=featurization_settings, limit_settings=limit_settings, **kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.task_type = 'TextClassification' # type: str
self.primary_metric = primary_metric
self.log_verbosity = log_verbosity
self.task_type = 'TextClassification' # type: str
self.primary_metric = primary_metric
class TextClassificationMultilabel(AutoMLVertical, NlpVertical):
"""Text Classification Multilabel task in AutoML NLP vertical.
NLP - Natural Language Processing.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric for Text-Classification-Multilabel task.
Currently only Accuracy is supported as primary metric, hence user need not set it explicitly.
Possible values include: "AUCWeighted", "Accuracy", "NormMacroRecall",
"AveragePrecisionScoreWeighted", "PrecisionScoreWeighted", "IOU".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationMultilabelPrimaryMetrics
"""
_validation = {
'task_type': {'required': True},
'primary_metric': {'readonly': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'NlpVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'NlpVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'NlpVerticalLimitSettings'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: Optional["NlpVerticalDataSettings"] = None,
featurization_settings: Optional["NlpVerticalFeaturizationSettings"] = None,
limit_settings: Optional["NlpVerticalLimitSettings"] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
"""
super(TextClassificationMultilabel, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, featurization_settings=featurization_settings, limit_settings=limit_settings, **kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.task_type = 'TextClassificationMultilabel' # type: str
self.primary_metric = None
self.log_verbosity = log_verbosity
self.task_type = 'TextClassificationMultilabel' # type: str
self.primary_metric = None
class TextNer(AutoMLVertical, NlpVertical):
"""Text-NER task in AutoML NLP vertical.
NER - Named Entity Recognition.
NLP - Natural Language Processing.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar data_settings: Data inputs for AutoMLJob.
:vartype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:ivar featurization_settings: Featurization inputs needed for AutoML job.
:vartype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:ivar limit_settings: Execution constraints for AutoMLJob.
:vartype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
:ivar log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:vartype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
:ivar task_type: Required. [Required] Task type for AutoMLJob.Constant filled by server.
Possible values include: "Classification", "Regression", "Forecasting", "ImageClassification",
"ImageClassificationMultilabel", "ImageObjectDetection", "ImageInstanceSegmentation",
"TextClassification", "TextClassificationMultilabel", "TextNER".
:vartype task_type: str or ~azure.mgmt.machinelearningservices.models.TaskType
:ivar primary_metric: Primary metric for Text-NER task.
Only 'Accuracy' is supported for Text-NER, so user need not set this explicitly. Possible
values include: "AUCWeighted", "Accuracy", "NormMacroRecall", "AveragePrecisionScoreWeighted",
"PrecisionScoreWeighted".
:vartype primary_metric: str or
~azure.mgmt.machinelearningservices.models.ClassificationPrimaryMetrics
"""
_validation = {
'task_type': {'required': True},
'primary_metric': {'readonly': True},
}
_attribute_map = {
'data_settings': {'key': 'dataSettings', 'type': 'NlpVerticalDataSettings'},
'featurization_settings': {'key': 'featurizationSettings', 'type': 'NlpVerticalFeaturizationSettings'},
'limit_settings': {'key': 'limitSettings', 'type': 'NlpVerticalLimitSettings'},
'log_verbosity': {'key': 'logVerbosity', 'type': 'str'},
'task_type': {'key': 'taskType', 'type': 'str'},
'primary_metric': {'key': 'primaryMetric', 'type': 'str'},
}
def __init__(
self,
*,
data_settings: Optional["NlpVerticalDataSettings"] = None,
featurization_settings: Optional["NlpVerticalFeaturizationSettings"] = None,
limit_settings: Optional["NlpVerticalLimitSettings"] = None,
log_verbosity: Optional[Union[str, "LogVerbosity"]] = None,
**kwargs
):
"""
:keyword data_settings: Data inputs for AutoMLJob.
:paramtype data_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalDataSettings
:keyword featurization_settings: Featurization inputs needed for AutoML job.
:paramtype featurization_settings:
~azure.mgmt.machinelearningservices.models.NlpVerticalFeaturizationSettings
:keyword limit_settings: Execution constraints for AutoMLJob.
:paramtype limit_settings: ~azure.mgmt.machinelearningservices.models.NlpVerticalLimitSettings
:keyword log_verbosity: Log verbosity for the job. Possible values include: "NotSet", "Debug",
"Info", "Warning", "Error", "Critical".
:paramtype log_verbosity: str or ~azure.mgmt.machinelearningservices.models.LogVerbosity
"""
super(TextNer, self).__init__(log_verbosity=log_verbosity, data_settings=data_settings, featurization_settings=featurization_settings, limit_settings=limit_settings, **kwargs)
self.data_settings = data_settings
self.featurization_settings = featurization_settings
self.limit_settings = limit_settings
self.task_type = 'TextNER' # type: str
self.primary_metric = None
self.log_verbosity = log_verbosity
self.task_type = 'TextNER' # type: str
self.primary_metric = None
class TrainingDataSettings(msrest.serialization.Model):
"""Training data input.
All required parameters must be populated in order to send to Azure.
:ivar data: Required. [Required] Training data MLTable.
:vartype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
"""
_validation = {
'data': {'required': True},
}
_attribute_map = {
'data': {'key': 'data', 'type': 'MLTableJobInput'},
}
def __init__(
self,
*,
data: "MLTableJobInput",
**kwargs
):
"""
:keyword data: Required. [Required] Training data MLTable.
:paramtype data: ~azure.mgmt.machinelearningservices.models.MLTableJobInput
"""
super(TrainingDataSettings, self).__init__(**kwargs)
self.data = data
class TrainingSettings(msrest.serialization.Model):
"""Training related configuration.
:ivar enable_dnn_training: Enable recommendation of DNN models.
:vartype enable_dnn_training: bool
:ivar enable_model_explainability: Flag to turn on explainability on best model.
:vartype enable_model_explainability: bool
:ivar enable_onnx_compatible_models: Flag for enabling onnx compatible models.
:vartype enable_onnx_compatible_models: bool
:ivar enable_stack_ensemble: Enable stack ensemble run.
:vartype enable_stack_ensemble: bool
:ivar enable_vote_ensemble: Enable voting ensemble run.
:vartype enable_vote_ensemble: bool
:ivar ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
generation, multiple fitted models from the previous child runs are downloaded.
Configure this parameter with a higher value than 300 secs, if more time is needed.
:vartype ensemble_model_download_timeout: ~datetime.timedelta
:ivar stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
:vartype stack_ensemble_settings:
~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
"""
_attribute_map = {
'enable_dnn_training': {'key': 'enableDnnTraining', 'type': 'bool'},
'enable_model_explainability': {'key': 'enableModelExplainability', 'type': 'bool'},
'enable_onnx_compatible_models': {'key': 'enableOnnxCompatibleModels', 'type': 'bool'},
'enable_stack_ensemble': {'key': 'enableStackEnsemble', 'type': 'bool'},
'enable_vote_ensemble': {'key': 'enableVoteEnsemble', 'type': 'bool'},
'ensemble_model_download_timeout': {'key': 'ensembleModelDownloadTimeout', 'type': 'duration'},
'stack_ensemble_settings': {'key': 'stackEnsembleSettings', 'type': 'StackEnsembleSettings'},
}
def __init__(
self,
*,
enable_dnn_training: Optional[bool] = False,
enable_model_explainability: Optional[bool] = False,
enable_onnx_compatible_models: Optional[bool] = False,
enable_stack_ensemble: Optional[bool] = True,
enable_vote_ensemble: Optional[bool] = True,
ensemble_model_download_timeout: Optional[datetime.timedelta] = "PT5M",
stack_ensemble_settings: Optional["StackEnsembleSettings"] = None,
**kwargs
):
"""
:keyword enable_dnn_training: Enable recommendation of DNN models.
:paramtype enable_dnn_training: bool
:keyword enable_model_explainability: Flag to turn on explainability on best model.
:paramtype enable_model_explainability: bool
:keyword enable_onnx_compatible_models: Flag for enabling onnx compatible models.
:paramtype enable_onnx_compatible_models: bool
:keyword enable_stack_ensemble: Enable stack ensemble run.
:paramtype enable_stack_ensemble: bool
:keyword enable_vote_ensemble: Enable voting ensemble run.
:paramtype enable_vote_ensemble: bool
:keyword ensemble_model_download_timeout: During VotingEnsemble and StackEnsemble model
generation, multiple fitted models from the previous child runs are downloaded.
Configure this parameter with a higher value than 300 secs, if more time is needed.
:paramtype ensemble_model_download_timeout: ~datetime.timedelta
:keyword stack_ensemble_settings: Stack ensemble settings for stack ensemble run.
:paramtype stack_ensemble_settings:
~azure.mgmt.machinelearningservices.models.StackEnsembleSettings
"""
super(TrainingSettings, self).__init__(**kwargs)
self.enable_dnn_training = enable_dnn_training
self.enable_model_explainability = enable_model_explainability
self.enable_onnx_compatible_models = enable_onnx_compatible_models
self.enable_stack_ensemble = enable_stack_ensemble
self.enable_vote_ensemble = enable_vote_ensemble
self.ensemble_model_download_timeout = ensemble_model_download_timeout
self.stack_ensemble_settings = stack_ensemble_settings
class TrialComponent(msrest.serialization.Model):
"""Trial component definition.
All required parameters must be populated in order to send to Azure.
:ivar code_id: ARM resource ID of the code asset.
:vartype code_id: str
:ivar command: Required. [Required] The command to execute on startup of the job. eg. "python
train.py".
:vartype command: str
:ivar distribution: Distribution configuration of the job. If set, this should be one of Mpi,
Tensorflow, PyTorch, or null.
:vartype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
:ivar environment_id: Required. [Required] The ARM resource ID of the Environment specification
for the job.
:vartype environment_id: str
:ivar environment_variables: Environment variables included in the job.
:vartype environment_variables: dict[str, str]
:ivar resources: Compute Resource configuration for the job.
:vartype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
"""
_validation = {
'command': {'required': True, 'min_length': 1, 'pattern': r'[a-zA-Z0-9_]'},
'environment_id': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'code_id': {'key': 'codeId', 'type': 'str'},
'command': {'key': 'command', 'type': 'str'},
'distribution': {'key': 'distribution', 'type': 'DistributionConfiguration'},
'environment_id': {'key': 'environmentId', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'resources': {'key': 'resources', 'type': 'ResourceConfiguration'},
}
def __init__(
self,
*,
command: str,
environment_id: str,
code_id: Optional[str] = None,
distribution: Optional["DistributionConfiguration"] = None,
environment_variables: Optional[Dict[str, str]] = None,
resources: Optional["ResourceConfiguration"] = None,
**kwargs
):
"""
:keyword code_id: ARM resource ID of the code asset.
:paramtype code_id: str
:keyword command: Required. [Required] The command to execute on startup of the job. eg.
"python train.py".
:paramtype command: str
:keyword distribution: Distribution configuration of the job. If set, this should be one of
Mpi, Tensorflow, PyTorch, or null.
:paramtype distribution: ~azure.mgmt.machinelearningservices.models.DistributionConfiguration
:keyword environment_id: Required. [Required] The ARM resource ID of the Environment
specification for the job.
:paramtype environment_id: str
:keyword environment_variables: Environment variables included in the job.
:paramtype environment_variables: dict[str, str]
:keyword resources: Compute Resource configuration for the job.
:paramtype resources: ~azure.mgmt.machinelearningservices.models.ResourceConfiguration
"""
super(TrialComponent, self).__init__(**kwargs)
self.code_id = code_id
self.command = command
self.distribution = distribution
self.environment_id = environment_id
self.environment_variables = environment_variables
self.resources = resources
class TritonModelJobInput(JobInput, AssetJobInput):
"""TritonModelJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(TritonModelJobInput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_input_type = 'TritonModel' # type: str
self.description = description
self.job_input_type = 'TritonModel' # type: str
class TritonModelJobOutput(JobOutput, AssetJobOutput):
"""TritonModelJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(TritonModelJobOutput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_output_type = 'TritonModel' # type: str
self.description = description
self.job_output_type = 'TritonModel' # type: str
class TruncationSelectionPolicy(EarlyTerminationPolicy):
"""Defines an early termination policy that cancels a given percentage of runs at each evaluation interval.
All required parameters must be populated in order to send to Azure.
:ivar delay_evaluation: Number of intervals by which to delay the first evaluation.
:vartype delay_evaluation: int
:ivar evaluation_interval: Interval (number of runs) between policy evaluations.
:vartype evaluation_interval: int
:ivar policy_type: Required. [Required] Name of policy configuration.Constant filled by server.
Possible values include: "Bandit", "MedianStopping", "TruncationSelection".
:vartype policy_type: str or
~azure.mgmt.machinelearningservices.models.EarlyTerminationPolicyType
:ivar truncation_percentage: The percentage of runs to cancel at each evaluation interval.
:vartype truncation_percentage: int
"""
_validation = {
'policy_type': {'required': True},
}
_attribute_map = {
'delay_evaluation': {'key': 'delayEvaluation', 'type': 'int'},
'evaluation_interval': {'key': 'evaluationInterval', 'type': 'int'},
'policy_type': {'key': 'policyType', 'type': 'str'},
'truncation_percentage': {'key': 'truncationPercentage', 'type': 'int'},
}
def __init__(
self,
*,
delay_evaluation: Optional[int] = 0,
evaluation_interval: Optional[int] = 0,
truncation_percentage: Optional[int] = 0,
**kwargs
):
"""
:keyword delay_evaluation: Number of intervals by which to delay the first evaluation.
:paramtype delay_evaluation: int
:keyword evaluation_interval: Interval (number of runs) between policy evaluations.
:paramtype evaluation_interval: int
:keyword truncation_percentage: The percentage of runs to cancel at each evaluation interval.
:paramtype truncation_percentage: int
"""
super(TruncationSelectionPolicy, self).__init__(delay_evaluation=delay_evaluation, evaluation_interval=evaluation_interval, **kwargs)
self.policy_type = 'TruncationSelection' # type: str
self.truncation_percentage = truncation_percentage
class UriFileDataVersion(DataVersionBaseDetails):
"""uri-file data version entity.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar data_type: Required. [Required] Specifies the type of data.Constant filled by server.
Possible values include: "UriFile", "UriFolder", "MLTable".
:vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
:ivar data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:vartype data_uri: str
"""
_validation = {
'data_type': {'required': True},
'data_uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'data_type': {'key': 'dataType', 'type': 'str'},
'data_uri': {'key': 'dataUri', 'type': 'str'},
}
def __init__(
self,
*,
data_uri: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:paramtype data_uri: str
"""
super(UriFileDataVersion, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, data_uri=data_uri, **kwargs)
self.data_type = 'UriFile' # type: str
class UriFileJobInput(JobInput, AssetJobInput):
"""UriFileJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFileJobInput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_input_type = 'UriFile' # type: str
self.description = description
self.job_input_type = 'UriFile' # type: str
class UriFileJobOutput(JobOutput, AssetJobOutput):
"""UriFileJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(UriFileJobOutput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_output_type = 'UriFile' # type: str
self.description = description
self.job_output_type = 'UriFile' # type: str
class UriFolderDataVersion(DataVersionBaseDetails):
"""uri-folder data version entity.
All required parameters must be populated in order to send to Azure.
:ivar description: The asset description text.
:vartype description: str
:ivar properties: The asset property dictionary.
:vartype properties: dict[str, str]
:ivar tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:vartype tags: dict[str, str]
:ivar is_anonymous: If the name version are system generated (anonymous registration).
:vartype is_anonymous: bool
:ivar is_archived: Is the asset archived?.
:vartype is_archived: bool
:ivar data_type: Required. [Required] Specifies the type of data.Constant filled by server.
Possible values include: "UriFile", "UriFolder", "MLTable".
:vartype data_type: str or ~azure.mgmt.machinelearningservices.models.DataType
:ivar data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:vartype data_uri: str
"""
_validation = {
'data_type': {'required': True},
'data_uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
}
_attribute_map = {
'description': {'key': 'description', 'type': 'str'},
'properties': {'key': 'properties', 'type': '{str}'},
'tags': {'key': 'tags', 'type': '{str}'},
'is_anonymous': {'key': 'isAnonymous', 'type': 'bool'},
'is_archived': {'key': 'isArchived', 'type': 'bool'},
'data_type': {'key': 'dataType', 'type': 'str'},
'data_uri': {'key': 'dataUri', 'type': 'str'},
}
def __init__(
self,
*,
data_uri: str,
description: Optional[str] = None,
properties: Optional[Dict[str, str]] = None,
tags: Optional[Dict[str, str]] = None,
is_anonymous: Optional[bool] = False,
is_archived: Optional[bool] = False,
**kwargs
):
"""
:keyword description: The asset description text.
:paramtype description: str
:keyword properties: The asset property dictionary.
:paramtype properties: dict[str, str]
:keyword tags: A set of tags. Tag dictionary. Tags can be added, removed, and updated.
:paramtype tags: dict[str, str]
:keyword is_anonymous: If the name version are system generated (anonymous registration).
:paramtype is_anonymous: bool
:keyword is_archived: Is the asset archived?.
:paramtype is_archived: bool
:keyword data_uri: Required. [Required] Uri of the data. Usage/meaning depends on
Microsoft.MachineLearning.ManagementFrontEnd.Contracts.V20220201Preview.Assets.DataVersionBase.DataType.
:paramtype data_uri: str
"""
super(UriFolderDataVersion, self).__init__(description=description, properties=properties, tags=tags, is_anonymous=is_anonymous, is_archived=is_archived, data_uri=data_uri, **kwargs)
self.data_type = 'UriFolder' # type: str
class UriFolderJobInput(JobInput, AssetJobInput):
"""UriFolderJobInput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:ivar uri: Required. [Required] Input Asset URI.
:vartype uri: str
:ivar description: Description for the input.
:vartype description: str
:ivar job_input_type: Required. [Required] Specifies the type of job.Constant filled by server.
Possible values include: "Literal", "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_input_type: str or ~azure.mgmt.machinelearningservices.models.JobInputType
"""
_validation = {
'uri': {'required': True, 'pattern': r'[a-zA-Z0-9_]'},
'job_input_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_input_type': {'key': 'jobInputType', 'type': 'str'},
}
def __init__(
self,
*,
uri: str,
mode: Optional[Union[str, "InputDeliveryMode"]] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Input Asset Delivery Mode. Possible values include: "ReadOnlyMount",
"ReadWriteMount", "Download", "Direct", "EvalMount", "EvalDownload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.InputDeliveryMode
:keyword uri: Required. [Required] Input Asset URI.
:paramtype uri: str
:keyword description: Description for the input.
:paramtype description: str
"""
super(UriFolderJobInput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_input_type = 'UriFolder' # type: str
self.description = description
self.job_input_type = 'UriFolder' # type: str
class UriFolderJobOutput(JobOutput, AssetJobOutput):
"""UriFolderJobOutput.
All required parameters must be populated in order to send to Azure.
:ivar mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:vartype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:ivar uri: Output Asset URI.
:vartype uri: str
:ivar description: Description for the output.
:vartype description: str
:ivar job_output_type: Required. [Required] Specifies the type of job.Constant filled by
server. Possible values include: "UriFile", "UriFolder", "MLTable", "CustomModel",
"MLFlowModel", "TritonModel".
:vartype job_output_type: str or ~azure.mgmt.machinelearningservices.models.JobOutputType
"""
_validation = {
'job_output_type': {'required': True},
}
_attribute_map = {
'mode': {'key': 'mode', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'job_output_type': {'key': 'jobOutputType', 'type': 'str'},
}
def __init__(
self,
*,
mode: Optional[Union[str, "OutputDeliveryMode"]] = None,
uri: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword mode: Output Asset Delivery Mode. Possible values include: "ReadWriteMount", "Upload".
:paramtype mode: str or ~azure.mgmt.machinelearningservices.models.OutputDeliveryMode
:keyword uri: Output Asset URI.
:paramtype uri: str
:keyword description: Description for the output.
:paramtype description: str
"""
super(UriFolderJobOutput, self).__init__(description=description, mode=mode, uri=uri, **kwargs)
self.mode = mode
self.uri = uri
self.job_output_type = 'UriFolder' # type: str
self.description = description
self.job_output_type = 'UriFolder' # type: str
class UserAssignedIdentity(msrest.serialization.Model):
"""User assigned identity properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal ID of the assigned identity.
:vartype principal_id: str
:ivar client_id: The client ID of the assigned identity.
:vartype client_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(UserAssignedIdentity, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
class UserIdentity(IdentityConfiguration):
"""User identity configuration.
All required parameters must be populated in order to send to Azure.
:ivar identity_type: Required. [Required] Specifies the type of identity framework.Constant
filled by server. Possible values include: "Managed", "AMLToken", "UserIdentity".
:vartype identity_type: str or
~azure.mgmt.machinelearningservices.models.IdentityConfigurationType
"""
_validation = {
'identity_type': {'required': True},
}
_attribute_map = {
'identity_type': {'key': 'identityType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(UserIdentity, self).__init__(**kwargs)
self.identity_type = 'UserIdentity' # type: str
| [
"[email protected]"
] | |
d1ad790afa900acbcfab2baee1841aa7e9952ce7 | 4926535b3e0d2fe8a80bf231f72347abff6dcee4 | /experiments/spatial.py | 31164bbbb7f0f2a8b3630e61e75e76ba06247078 | [] | no_license | yangarbiter/blindguess | 1f2a34b0eb30fc53ab9be5c01fa1c184b8eb2ff9 | 86e49749ba62132a263dfea3743513b23895b03e | refs/heads/master | 2021-01-16T10:17:51.582682 | 2020-02-25T18:43:31 | 2020-02-25T18:43:31 | 243,076,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,862 | py | import os
import logging
import torch
from bistiming import Stopwatch
from mkdir_p import mkdir_p
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from .utils import set_random_seed, load_model
from lolip.utils import estimate_local_lip_v2
from lolip.variables import get_file_name
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.WARNING, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(__name__)
def run_spatial(auto_var):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
_ = set_random_seed(auto_var)
#norm = auto_var.get_var("norm")
trnX, trny, tstX, tsty = auto_var.get_var("dataset")
lbl_enc = OneHotEncoder(categories=[np.sort(np.unique(trny))], sparse=False).fit(trny.reshape(-1, 1))
auto_var.set_intermidiate_variable("lbl_enc", lbl_enc)
n_classes = len(np.unique(trny))
n_channels = trnX.shape[-1]
result = {}
#multigpu = True if len(trnX) > 90000 and torch.cuda.device_count() > 1 else False
multigpu = False
try:
model_path, model = load_model(
auto_var, trnX, trny, tstX, tsty, n_channels, model_dir="./models/experiment01", device=device)
model.model.to(device)
result['model_path'] = model_path
except:
del model
logger.info("Model not trained yet, retrain the model")
mkdir_p("./models/experiment01")
result['model_path'] = os.path.join(
'./models/experiment01', get_file_name(auto_var) + '-ep%04d.pt')
result['model_path'] = result['model_path'].replace(
auto_var.get_variable_name("attack"), "pgd")
model = auto_var.get_var("model", trnX=trnX, trny=trny, multigpu=multigpu,
n_channels=n_channels, device=device)
model.tst_ds = (tstX, tsty)
with Stopwatch("Fitting Model", logger=logger):
history = model.fit(trnX, trny)
model.save(result['model_path'])
result['model_path'] = result['model_path'] % model.epochs
result['history'] = history
result['trn_acc'] = (model.predict(trnX) == trny).mean()
result['tst_acc'] = (model.predict(tstX) == tsty).mean()
print(f"train acc: {result['trn_acc']}")
print(f"test acc: {result['tst_acc']}")
attack_model = auto_var.get_var("attack", model=model, n_classes=n_classes)
with Stopwatch("Attacking Train", logger=logger):
adv_trnX = attack_model.perturb(trnX, trny)
with Stopwatch("Attacking Test", logger=logger):
adv_tstX = attack_model.perturb(tstX, tsty)
result['adv_trn_acc'] = (model.predict(adv_trnX) == trny).mean()
result['adv_tst_acc'] = (model.predict(adv_tstX) == tsty).mean()
print(f"adv trn acc: {result['adv_trn_acc']}")
print(f"adv tst acc: {result['adv_tst_acc']}")
print(result)
return result
| [
"[email protected]"
] | |
eda928275e1b9cc5238c2f7b986b78ac46b584e3 | 313203cd01705e08bc8967246bfeacb7fa5cd6c9 | /Untitled Folder 2/newpython/strings.py | f45458de85baa2979b96b808132d94e5001b35d1 | [] | no_license | Nitesh101/Nitesh_old_backup | 17d9c8e8f9694c0ef4d995de7cbb694080523b22 | a0b0263e204f2f46c51e6f20990024396eb0ccb7 | refs/heads/master | 2020-03-19T20:51:51.587343 | 2018-09-21T09:42:11 | 2018-09-21T09:42:11 | 136,919,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | #!/usr/bin/python
"""
str = 'Votarytech Learning Center'
print str.replace("Center","1")
print str.find("L")
print str.update('4','8')
print str[0:8],str[10::]
print str[1:20]
"""
list = ["this","is","python"]
print "To find index of list : "
print list.index("is")
list = ["this","is","python"]
print "To remove element from list : "
list.remove("is")
print list
list = ["this","is","python"]
print "To add elements in a list: "
list = list+["for"]
print list
list = ["this","is","python"]
print "To add element in end of list: "
list.pop()
print list
list = ["this","is","python"]
print "To add elment in specific index: "
list.insert(2,2009)
print list
list = ["this","is","python"]
print "delete a specific element in list: "
del list[1];
print list
list = ["this","is","python"]
print "To print element in multiple times : "
list = ["this"] * 4
print list
list = ["this","is","python"]
list.reverse()
print list
list = [5,6,6,7,8,1]
list.sort()
print list
list = [12,'nitesh','python',345]
list1 = [2009,'language']
list.extend(list1)
print "Extended list: ",list
dict = {}
dict['one'] = "this is one"
dict[3] = "this is two"
dict = {'sep':'sales','code':876,'name':'nitesh','dep':'it'}
print dict
a = (1,2,3,"nitesh")
print list(a)
| [
"[email protected]"
] | |
2b690281a2c912dd8a40b26f3e3d47ba473e10c7 | 28691ec55ebce9ec7045d12ea9675932ce12d671 | /py2rhino-project/branches/sandbox2/py2rhino/_make/data/parser_out/utility/sleep.py | 747d42de5841701c0bad6df9d9e9bcd690ec7cdc | [] | no_license | ianclarksmith/design-automation | 1e71315193effc0c18b4a8b41300bda6f41a3f09 | e27cc028fe582395f4a62f06697137867bb0fc33 | refs/heads/master | 2020-04-22T22:28:39.385395 | 2009-10-26T02:48:37 | 2009-10-26T02:48:37 | 37,266,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | sleep = {
"input_folder_name": "Utility_Methods",
"input_file_name": "Sleep",
"output_package_name": "utility",
"output_module_name": "sleep",
"doc_html": """
Suspends the execution of a running script for the specified interval.
""",
"syntax_html": {
0: ("lngMilliseconds"),
},
"params_html": {
0: {
"name": "lngMilliseconds",
"py_name": "milliseconds",
"opt_or_req": "Required",
"type": "Number",
"name_prefix": "lng",
"name_main": "Milliseconds",
"doc": """
The duration in milliseconds.
"""
},
},
"returns_html": {
0: {
"type": "null",
"doc": "If successful, or on error."
},
},
"id_com": 248,
"params_com": {
0: {
"name": "vaTime",
"opt_or_req": "Required",
"type": "tagVARIANT",
},
},
"returns_com": "tagVARIANT",
}
| [
"patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb"
] | patrick.ht.janssen@d56020b2-6ac5-11de-89a9-0b20f3e2dceb |
809efa968327ec2b765eba788b2c0355bfbae597 | 07c5656f004b6a444e22ff7b4c3b6802d027f759 | /week_3/class_0228/learn_import.py | fa67da2fb5f09d290d59e56d72ed750a06f0a45b | [] | no_license | EuniceHu/python15_api_test | de2a0f0bec8057edb27c8d1f82a438da3e9c105c | 1313e56ddfa67a2490e703a1a5ef4a6967565849 | refs/heads/master | 2020-05-20T13:30:41.686327 | 2019-05-14T11:00:52 | 2019-05-14T11:00:52 | 185,599,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | #-*- coding:utf-8 _*-
"""
@author:小胡
@file: learn_import.py
@time: 2019/03/02
"""
# 导入module_import模块
# import week_3.class_0228.module_import
# print(week_3.class_0228.module_import.name) | [
"[email protected]"
] | |
4b0b8b40b54edefa3452bf0836965eec78601e79 | 0c9e8b42a1e0a6f010a6c38489c7c96b3b783991 | /photo/tests.py | 57ff49f4b1663ad73d11612a2994277554b8212c | [
"MIT"
] | permissive | Derrick-Nyongesa/Photo-Gallery | 4690ff3bd427415236fd5147188e2f452b87d487 | bbd8774bf7e8d1f9f32aa9e02d12af20e8cb0e70 | refs/heads/main | 2023-05-02T20:54:58.462766 | 2021-05-17T07:05:06 | 2021-05-17T07:05:06 | 366,949,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,422 | py | from django.test import TestCase
from .models import Image,Category,Location
# Create your tests here.
class TestImage(TestCase):
def setUp(self):
self.location = Location(name='Nairobi')
self.location.save_location()
self.category = Category(name='food')
self.category.save_category()
self.image_test = Image(id=1, name='image', description='this is a test image', location=self.location,
category=self.category)
def test_instance(self):
self.assertTrue(isinstance(self.image_test, Image))
def test_save_image(self):
self.image_test.save_image()
after = Image.objects.all()
self.assertTrue(len(after) > 0)
def test_delete_image(self):
self.image_test.delete_image()
images = Image.objects.all()
self.assertTrue(len(images) == 0)
def test_update_image(self):
self.image_test.save_image()
self.image_test.update_image(self.image_test.id, 'photos/test.jpg')
changed_img = Image.objects.filter(image='photos/test.jpg')
self.assertTrue(len(changed_img) > 0)
def test_get_image_by_id(self):
found_image = self.image_test.get_image_by_id(self.image_test.id)
image = Image.objects.filter(id=self.image_test.id)
self.assertTrue(found_image, image)
def test_search_image_by_location(self):
self.image_test.save_image()
found_images = self.image_test.filter_by_location(location='Nairobi')
self.assertTrue(len(found_images) == 1)
def test_search_image_by_category(self):
category = 'food'
found_img = self.image_test.search_by_category(category)
self.assertTrue(len(found_img) > 1)
def tearDown(self):
Image.objects.all().delete()
Location.objects.all().delete()
Category.objects.all().delete()
class TestLocation(TestCase):
def setUp(self):
self.location = Location(name='Nairobi')
self.location.save_location()
def test_instance(self):
self.assertTrue(isinstance(self.location, Location))
def test_save_location(self):
self.location.save_location()
locations = Location.get_locations()
self.assertTrue(len(locations) > 0)
def test_get_locations(self):
self.location.save_location()
locations = Location.get_locations()
self.assertTrue(len(locations) > 1)
def test_update_location(self):
new_location = 'America'
self.location.update_location(self.location.id, new_location)
changed_location = Location.objects.filter(name='America')
self.assertTrue(len(changed_location) > 0)
def test_delete_location(self):
self.location.delete_location()
location = Location.objects.all()
self.assertTrue(len(location) == 0)
class CategoryTestClass(TestCase):
def setUp(self):
self.category = Category(name='home')
self.category.save_category()
def test_instance(self):
self.assertTrue(isinstance(self.category, Category))
def test_save_category(self):
self.category.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_category(self):
self.category.delete_category()
category = Category.objects.all()
self.assertTrue(len(category) == 0)
| [
"[email protected]"
] | |
64a6de091d8defdd83efab73dcacb5a3d0e4d9a2 | e50244b666bc6af028beb3c1a83de70e7c61edd0 | /L1/L1.py | e70b76e021489eff37ea41e71b021b4c15c88804 | [] | no_license | justaleaf/ML-2020 | 21e9b3026b1dd25d0727e9868e026eca7a3301c8 | 234c2a5e16dcc2d1f71483f57ec3c806cd6fdff0 | refs/heads/main | 2023-01-24T12:32:25.070338 | 2020-11-05T11:57:05 | 2020-11-05T11:57:05 | 305,983,518 | 0 | 0 | null | 2020-10-21T10:08:33 | 2020-10-21T10:08:32 | null | UTF-8 | Python | false | false | 5,384 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from tabulate import tabulate
from IPython.core.debugger import set_trace
from IPython.display import display
from matplotlib import pyplot as plt
from sklearn import preprocessing
# In[2]:
df = pd.read_csv('../data/heart_failure_clinical_records_dataset.csv')
df = df.drop(columns=['anaemia','diabetes','high_blood_pressure','sex','smoking','time','DEATH_EVENT'])
display(df)
# In[3]:
fig, axes = plt.subplots(2, 3, figsize=(12, 6))
n_bins = 20
axes[0, 0].hist(df['age'].values, bins = n_bins)
axes[0, 0].set_title('age')
axes[0, 1].hist(df['creatinine_phosphokinase'].values, bins = n_bins)
axes[0, 1].set_title('creatinine_phosphokinase')
axes[0, 2].hist(df['ejection_fraction'].values, bins = n_bins)
axes[0, 2].set_title('ejection_fraction')
axes[1, 0].hist(df['platelets'].values, bins = n_bins)
axes[1, 0].set_title('platelets')
axes[1, 1].hist(df['serum_creatinine'].values, bins = n_bins)
axes[1, 1].set_title('serum_creatinine')
axes[1, 2].hist(df['serum_sodium'].values, bins = n_bins)
axes[1, 2].set_title('serum_sodium')
fig.tight_layout()
plt.savefig('./img/hist-1.png')
plt.show()
# In[4]:
data = df.to_numpy(dtype='float')
# In[5]:
scaler = preprocessing.StandardScaler().fit(data[:150,:])
data_scaled = scaler.transform(data)
# In[6]:
TITLES = ['age', 'creatinine_phosphokinase', 'ejection_fraction', 'platelets', 'serum_creatinine', 'serum_sodium']
def plot_data(data_scaled):
fig, axes = plt.subplots(2, 3, figsize=(12, 6))
ax_order = [(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]
for i, ax_ind in enumerate(ax_order):
axes[ax_ind].hist(data_scaled[:,i], bins = n_bins)
axes[ax_ind].set_title(TITLES[i])
fig.tight_layout()
return fig
plot_data(data_scaled)
plt.savefig('./img/hist-2.png')
plt.show()
# In[7]:
def calc_metrics(data):
mean = [np.mean(col) for col in data.T]
std = [np.std(col) for col in data.T]
return mean, std
calc_metrics(data)
# In[8]:
def shorten(s):
if len(s) < 10:
return s
return s[:10] + '...'
mean_src, std_src = calc_metrics(data)
mean_sc, std_sc = calc_metrics(data_scaled)
scaler2 = preprocessing.StandardScaler()
data_scaled2 = scaler2.fit_transform(data)
mean_sc2, std_sc2 = calc_metrics(data_scaled2)
plot_data(data_scaled2)
plt.savefig('./img/hist-3.png')
plt.show()
header = ['Признак', *[shorten(t) for t in TITLES]]
table = [
['Среднее (исх.)', *mean_src],
['Среднее (стандарт. 150)', *mean_sc],
['Среднее (стандарт. 150 scaler)', *scaler.mean_],
['Среднее (стандарт. полн.)', *mean_sc2],
['Среднее (стандарт. полн. scaler)', *scaler2.mean_],
['СКО (исх)', *std_src],
['СКО (стандарт. 150)', *std_sc],
['СКО (стандарт. 150 scaler)', *[np.sqrt(v) for v in scaler.var_]],
['СКО (стандарт. полн.)', *std_sc2],
['СКО (стандарт. полн. scaler)', *[np.sqrt(v) for v in scaler2.var_]]
]
latex_t1 = tabulate(table, headers=header, tablefmt='latex_booktabs', floatfmt=".4f")
with open('./output/t1.tex', 'w') as f:
f.write(latex_t1)
# In[9]:
min_max_scaler = preprocessing.MinMaxScaler()
min_max_data = min_max_scaler.fit_transform(data)
plot_data(min_max_data)
plt.savefig('./img/hist-min-max.png')
plt.show()
# In[10]:
header = ['Признак', 'Минимум', 'Максимум']
table = [
(title, min_, max_)
for title, min_, max_ in zip(TITLES, min_max_scaler.data_min_, min_max_scaler.data_max_)
]
latex_t2 = tabulate(table, headers=header, tablefmt='latex_booktabs')
with open('./output/t2.tex', 'w') as f:
f.write(latex_t2)
# In[11]:
max_abs_data = preprocessing.MaxAbsScaler().fit_transform(data)
robust_data = preprocessing.RobustScaler().fit_transform(data)
plot_data(max_abs_data)
plt.savefig('./img/hist-max-abs.png')
plt.show()
plot_data(robust_data)
plt.savefig('./img/hist-robust.png')
plt.show()
# In[12]:
def fit_5_10(data):
data = data.copy()
for col in range(data.shape[1]):
min_, max_ = np.min(data[:, col]), np.max(data[:, col])
data[:, col] = [(x - min_) / (max_ - min_) * 15 - 5 for x in data[:, col]]
return data
data_5_10 = fit_5_10(data)
plot_data(data_5_10)
plt.savefig('./img/hist-5-10.png')
plt.show()
# In[13]:
quantile_transformer = preprocessing.QuantileTransformer(n_quantiles=100, random_state=0)
quantile_data = quantile_transformer.fit_transform(data)
plot_data(quantile_data)
plt.savefig('./img/hist-quantile.png')
plt.show()
# In[14]:
quantile_normal_transformer = preprocessing.QuantileTransformer(n_quantiles=100, random_state=0, output_distribution='normal')
quantile_normal_data = quantile_normal_transformer.fit_transform(data)
plot_data(quantile_normal_data)
plt.savefig('./img/hist-quantile-normal.png')
plt.show()
# In[15]:
power_transformer = preprocessing.PowerTransformer()
power_data = power_transformer.fit_transform(data)
plot_data(power_data)
plt.savefig('./img/hist-power.png')
plt.show()
# In[16]:
est = preprocessing.KBinsDiscretizer(n_bins=[3, 4, 3, 10, 2, 4], encode='ordinal')
disc_data = est.fit_transform(data)
plot_data(disc_data)
plt.savefig('./img/hist-disc.png')
plt.show()
# In[ ]:
| [
"[email protected]"
] | |
a27316b1c3a96c08649b135d42376f0518fe896a | 73e147e1d49656fafba5d4bf84df5ded2c4dca73 | /team_9/cocos/test/test_layer_rotate.py | 686cddc35f9dbf85bfe18864a85263644c578451 | [
"LGPL-2.1-only",
"CC-BY-NC-4.0",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"CC-BY-NC-SA-2.0",
"BSD-3-Clause"
] | permissive | Donnyvdm/dojo19 | 2278747366c57bfc80eb9ee28ca617ec0a79bae3 | 3cf043a84e3ad6d3c4d59cd9c50b160e1ff03400 | refs/heads/master | 2020-07-26T12:22:15.882800 | 2019-09-15T20:34:36 | 2019-09-15T20:34:36 | 208,642,183 | 1 | 0 | BSD-3-Clause | 2019-09-15T18:57:53 | 2019-09-15T18:57:52 | null | UTF-8 | Python | false | false | 704 | py | from __future__ import division, print_function, unicode_literals
# This code is so you can run the samples without installing the package
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#
testinfo = "t 0.1, s, t 0.5, s, t 1.5, s, t 2.1, s, q"
tags = "Layer, RotateBy"
import cocos
from cocos.director import director
from cocos.actions import RotateBy
from cocos.layer import *
def main():
director.init()
main_scene = cocos.scene.Scene()
test_layer = ColorLayer(64,64,64,255)
test_layer.scale = 0.75
main_scene.add( test_layer )
test_layer.do( RotateBy( 360, 2 ) )
director.run (main_scene)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2dac7ea615bd751bd0c4315c3a059a77790feec3 | 4bc2af514877135a222826b2c5ac48632045f2fa | /jenkins/update_mysql.py | c05b16d304495222a0c459566afbcffa9cb12ba7 | [] | no_license | 18734865664/python | 1853481ac1dcd515f691cfc11557f76fbbb083de | 25bc355ddb2abefc5a3736fb99e6345138ebbefc | refs/heads/master | 2020-03-17T09:37:57.469741 | 2018-06-28T08:41:37 | 2018-06-28T08:41:37 | 133,482,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,007 | py | #! /usr/local/python3/bin/python3
# encodig: utf-8
import pymysql
import sys
sys.path.insert(0, '/data/nfs/python/learn_python/jenkins/')
from get_file_list import getFileList
from get_args import getJenkinsArgs
import get_args
class update_mysql():
def __init__(self):
self.mysql_host = "10.100.137.179"
self.mysql_user = "root"
self.mysql_pass = "123123"
self.mysql_port = "3306"
self.mysql_dbname = "jenkins_info"
def create_job_args_table(self):
# 实例化mysql
db = pymysql.connect(host = self.mysql_host, user = self.mysql_user, passwd = self.mysql_pass, port = int(self.mysql_port))
# 创建游标对象
cursor = db.cursor()
# 如果不存在就建库jenkins_info
try:
sql = "create database if not exists {};".format((self.mysql_dbname))
cursor.execute(sql)
except:
print("库已存在")
# 如果表不存在,则创建表
sql1 = "create table if not exists jenkins_info.{}( \
`job_name` VARCHAR(100) NOT NULL, \
`job_name_row` VARCHAR(100) NOT NULL, \
`branch_parents` VARCHAR(1000) NOT NULL DEFAULT 'NULL', \
`ftp_path` VARCHAR(100) NOT NULL DEFAULT 'NULL', \
`mvn_args`VARCHAR(50) NOT NULL DEFAULT '\[\"prod\"\]', \
`subitems_name` VARCHAR(200) NOT NULL DEFAULT 'NULL' \
);".format(("job_args"))
cursor.execute(sql1)
# 获取job列表
job_name_file_obj = getFileList("/data/nfs/jenkins/jobs/").get_file_list()
# 获取参数列表
for job_name in job_name_file_obj:
job_workspace_file = '/data/nfs/jenkins/jobs/' + job_name
job_config_file = job_workspace_file + "/config.xml"
obj = getJenkinsArgs(job_config_file, job_name)
if __name__ == "__main__":
obj = update_mysql()
obj.create_job_args_table()
| [
"[email protected]"
] | |
bd82bc93bbe5236c83258907b964786f94993fba | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Fall17/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8_cff.py | 1ac912d68f2b4a277560f5b985a265b3464358f5 | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 1,581 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIFall17MiniAODv2/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/230000/162217BB-1A08-EA11-B113-A0369FF882FA.root',
'/store/mc/RunIIFall17MiniAODv2/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/230000/6475B18F-1908-EA11-BEAC-AC1F6B23C94A.root',
'/store/mc/RunIIFall17MiniAODv2/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/230000/6E1E2A7C-1A08-EA11-ABB6-0CC47AFEFDF8.root',
'/store/mc/RunIIFall17MiniAODv2/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/230000/72F1036C-1A08-EA11-A05D-EC0D9A822666.root',
'/store/mc/RunIIFall17MiniAODv2/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/230000/AEA36076-1A08-EA11-8670-A0369FD0B228.root',
'/store/mc/RunIIFall17MiniAODv2/StealthSHH_2t4b_mStop-1150_mSo-100_TuneCP2_13TeV-madgraphMLM-pythia8/MINIAODSIM/PU2017_12Apr2018_94X_mc2017_realistic_v14-v1/230000/FC487843-1A08-EA11-8903-0CC47A4C8E3C.root',
] )
| [
"[email protected]"
] | |
98d038b91f190b29f651a48eb083b8182feee660 | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-cognitiveservices-vision-customvision/azure/cognitiveservices/vision/customvision/training/models/tag.py | b1eef24ccd54891a5c4e373cc43537a5efa21229 | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,527 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class Tag(Model):
"""Represents a Tag.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Gets the Tag ID
:vartype id: str
:param name: Gets or sets the name of the tag
:type name: str
:param description: Gets or sets the description of the tag
:type description: str
:ivar image_count: Gets the number of images with this tag
:vartype image_count: int
"""
_validation = {
'id': {'readonly': True},
'image_count': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'Id', 'type': 'str'},
'name': {'key': 'Name', 'type': 'str'},
'description': {'key': 'Description', 'type': 'str'},
'image_count': {'key': 'ImageCount', 'type': 'int'},
}
def __init__(self, name=None, description=None):
super(Tag, self).__init__()
self.id = None
self.name = name
self.description = description
self.image_count = None
| [
"[email protected]"
] | |
53a44bbbbdc69fae4e85ce4a0a8ddc355ef5e24e | 06cabd66791a5ee15bb3ba4b04d8bc8dea5bfda0 | /2016-08-16-UCNPs-SigmaAldrich-Penta/AnalysisPenta28kv.py | 611b41ba2d5d1f7b6d9b03779b15b19731053779 | [] | no_license | claiello/python_data_analysis | f7405dfd15f0dccd2089b1878af40b9d075071d2 | 0b8d3cc5717243e72214dc24a7fc823220e13179 | refs/heads/master | 2020-04-17T20:36:51.720891 | 2017-04-23T10:00:08 | 2017-04-23T10:00:08 | 66,181,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,202 | py | import os
import sys
sys.path.append("/usr/bin") # necessary for the tex fonts
sys.path.append("../Python modules/") # necessary for the tex fonts
import scipy as sp
import scipy.misc
import matplotlib
#matplotlib.use("Agg")
import matplotlib.pyplot as plt
import h5py
import numpy as np
from BackgroundCorrection import *
import matplotlib.cm as cm
import scipy.ndimage as ndimage
#from matplotlib_scalebar.scalebar import ScaleBar #### Has issue with plotting using latex font. only import when needed, then unimport
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.backends.backend_pdf import PdfPages
from MakePdf import *
from matplotlib.pyplot import cm #to plot following colors of rainbow
from matplotlib import rc
from CreateDatasets import *
import warnings
warnings.simplefilter(action = "ignore", category = RuntimeWarning)
warnings.simplefilter(action = "ignore", category = DeprecationWarning)
warnings.simplefilter(action = "ignore", category = FutureWarning)
warnings.simplefilter(action = "ignore", category = PendingDeprecationWarning)
from Registration import *
from tifffile import *
from sklearn.mixture import GMM
import matplotlib.cm as cm
from FluoDecay import *
from PlottingFcts import *
import matplotlib.animation as animation
import gc
import tempfile
from tempfile import TemporaryFile
#PARAMS THAT NEED TO BE CHANGED
###############################################################################
###############################################################################
###############################################################################
calc_blue = False
#Excitation is impulsive, 120ns per pulse,turn on at point no 80, off at point no 82
#No_pixels = 250
Time_bin = 40 #in ns; 1/clock of 25MHz
nominal_time_on = 0.12 #time during which e-beam nominally on, in mus
totalpoints = 200 #total number of time-resolved points
### data
name = ['DoubleZoom.hdf5']
name_str = ['DoubleZoom.hdf5', 'DoubleZoomTR.hdf5']
#na = ['2','2','20','20','40','40','40','40','80','80','00']
#nl = ['A','B','A','B','A','B','A','B','A','B','A']
if calc_blue is False:
pmt = ['PMT red','PMT red','PMT red','PMT red','PMT red','PMT red','PMT red','PMT red','PMT red','PMT red','PMT red']
channel = ['1','1','1','1','1','1','1','1','1','1','1']
else:
pmt = ['PMT blue','PMT blue','PMT blue','PMT blue','PMT blue','PMT blue','PMT blue','PMT blue','PMT blue','PMT blue','PMT blue']
channel = ['2','2','2','2','2','2','2','2','2','2','2']
name_str = [s + 'Blue' for s in name_str]
ap = ['30']
Pixel_size = [3.101389e-09]
Ps = [3.1] #pixel size in nm, the numbers above with round nm precision
#no experiments to consider
No_experiments = 50 #only "up" in luminescence curve
tau_single = np.zeros(len(name))
tau_single_error = np.zeros(len(name))
tau_bi = np.zeros([len(name),2])
tau_bi_error = np.zeros([len(name),2])
#original
#for index in np.arange(11,11): #11):
index = 0
if index is 1:
#for Er60 only: np.arange(9,12)
#for index in np.arange(12,12):
print(index)
file1 = h5py.File('2016-08-16-2148_ImageSequence_SANP_36.000kX_2.800kV_30mu_16.hdf5', 'r')
file2 = h5py.File('2016-08-16-2208_ImageSequence_SANP_36.000kX_2.800kV_30mu_17.hdf5', 'r')
#file3 = h5py.File('PentaZoom50expts.hdf5', 'r')
titulo = 'Upconverting NPs (3kV, 30$\mu$m aperture, 40ns time bins, 36kX or 3.1nm pixels)'
se1_dset = file1['/data/Analog channel 1 : SE2/data'] #50 frames x250 x 250 pixels
red1_dset = file2['/data/Counter channel ' + channel[index] + ' : '+ pmt[index]+'/' + pmt[index] + ' time-resolved/data']#50 frames x 200 tr pts x250 x 250 pixels
#red1_dset50 = file3['/data/Counter channel ' + channel[index] + ' : '+ pmt[index]+'/' + pmt[index] + ' time-resolved/data']#50 frames x 200 tr pts x250 x 250 pixels
#red1_dset = np.append(red1_dset , red1_dset50, axis = 0)
print(red1_dset.shape)
#cut part of frames with ebeam off, here 25 first points
cut_at_beginning = 79
red1_dset = red1_dset[:,cut_at_beginning::,:,:]
#hack to go faster
fastfactor = 1
#se1_dset = se1_dset[0::fastfactor,:,:]
#red1_dset = red1_dset[0::fastfactor,:,:,:]
#no experiments to consider
#se1_dset = se1_dset[0:No_experiments[index]+1,:,:]
red1_dset = red1_dset[0:No_experiments+1,:,:,:]
#convert to smaller data types
se1_dset2 = np.array(se1_dset, dtype=np.float16)
red1_dset = np.array(red1_dset)
#convert red1_dset to kHz!!!!!!!!!!!!!!!!!!!!!1
red1_dset = red1_dset/1.0e3
red1_dset = np.array(red1_dset, dtype=np.float32) #converting the CL dset to float16 from float64 is creating infinities! bc max value is > 2^16
print('data loaded')
#PLOT EXPT BY EXPT BEHAVIOUR ON ALL PIXELS
###############################################################################
###############################################################################
###############################################################################
###FIG 1
#if index >= 8: #8,9,10,11: the 4 from Er 60%
major_ticks = [2,4,6]
#else:
#major_ticks = [5,15,25]
#plot_expt_by_expt_behaviour(titulo + r', all pixels', red1_dset, Time_bin, nominal_time_on,fastfactor,'r',major_ticks,dark_dset=None,plot_dark=False,unit='kHz') #pass titulo as well
#fig_no = 'Fig#1'
#multipage(name_str[index] + fig_no + '.pdf',dpi=80)
###END FIG1
#REGISTRATION OF CL CHANNEL ONTO SE CHANNEL
###############################################################################
###############################################################################
###############################################################################
#independently
#se1_dset_reg = reg_images(se1_dset)
#The code below registers time resolved data to itself, across frames. Each one of the tr points is registered to the same time point in other frames
#red_dset_reg_list = reg_images_tr(red1_dset) #list is across number of time resolved points #Does not work too well for this dset BUT TRY WITH OTHERS
#Future: make tr, say, of red register to time resolved, say, of blue
#!!!!!REAL Registration - UNCOMMENT! !!!!!achtung: is giving some errors with nan and inf!!!!! WHY???????
# se1_dset_reg, red1_dset_reg, red1_dset_reg_all = reg_time_resolved_images_to_se(se1_dset, red1_dset)
##red1_dset_reg, red1_dset_reg_all = reg_images(red1_dset)
##se1_dset_reg = np.array(se1_dset, dtype=np.float16)
#se1_dset_reg = np.array(se1_dset_reg, dtype=np.float16)
#red1_dset_reg = np.array(red1_dset_reg, dtype=np.float32)
#red1_dset_reg_all = np.array(red1_dset_reg_all, dtype=np.float32)
#second step of registerist CL using bright CL: does NOT cause errors!!!
#right now, remaining totalpoints - cut_at_beginning time resolved points, over all experiments
#25 dark (in reality, dark until 28)/ 50 bright / 125 transient
#cut arrays are 3 / 50 / 125
center_cl_index = 3 # (50 + 3)/2 # this index is going to be used as reference
#end_left_index = 0#not used for the time being
#end_right_index = 0#not used for the time being
#new = reg_images_middle_cl(red1_dset_reg,center_cl_index,0,0)
#red1_dset_reg = np.array(new, dtype=np.float32)
#!!!!!END OF REAL Registration - UNCOMMENT!
#print(np.sum(np.isnan(red1_dset_reg)))
#print(np.sum(np.isinf(red1_dset_reg)))
#klklklk
# ###MOCK REGISTRATION, QUICK FOR HACK ####################################!!!!!!!!!!!!!!!!!!!!
#se1_dset_reg, sth = reg_images(se1_dset)
#del sth
se1_dset_reg = np.average(se1_dset,axis=0) # #
red1_dset_reg = np.average(red1_dset, axis=0)
red1_dset_reg_all = np.array(red1_dset)
se1_dset_reg = np.array(se1_dset_reg, dtype=np.float16)
red1_dset_reg = np.array(red1_dset_reg, dtype=np.float32)
red1_dset_reg_all = np.array(red1_dset_reg_all, dtype=np.float32)
#second step
#center_cl_index = 3 # # this index is going to be used as reference, should be no 82 in original vector
# end_left_index = 0#not used for the time being
# end_right_index = 0#not used for the time being
# new = reg_images_middle_cl(red1_dset_reg,center_cl_index,0,0)
# red1_dset_reg = np.array(new, dtype=np.float32)
## end of mock registration
del se1_dset, red1_dset
file1.close()
file2.close()
#file3.close()
#CUT DATASETS TO SUBSHAPE
###############################################################################
###############################################################################
###############################################################################
### cut only inside window: these are the base images!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#trying circular mask at center a,b
#a, b = 247,255 #y was 255 x was 243
#n = blue_dset_reg.shape[0] #not square matrix anymore; does not matter, only approximatively
#r = 160 #was 170
#y,x = np.ogrid[-a:n-a, -b:n-b]
#mask = x*x + y*y <= r*r
# cutting channels
red1_dset_cut = red1_dset_reg #np.empty([blue_dset_reg.shape[0],blue_dset_reg.shape[1]])
red1_dset_cut_all = red1_dset_reg_all
#red_dset_cut[:] = np.nan
#red_dset_cut[mask] = red_dset_reg[mask]
se1_dset_cut = se1_dset_reg #np.empty([blue_dset_reg.shape[0],blue_dset_reg.shape[1]])
#se_dset_cut[:] = np.nan
#se_dset_cut[mask] = se_dset_reg[mask]
se1_dset_cut = np.array(se1_dset_cut, dtype=np.float16)
red1_dset_cut = np.array(red1_dset_cut, dtype=np.float32)
red1_dset_cut_all = np.array(red1_dset_cut_all, dtype=np.float32)
del red1_dset_reg, red1_dset_reg_all, se1_dset_reg
gc.collect()
mycode = 'tekvPentaSEchannel = tempfile.NamedTemporaryFile(delete=False)'
exec(mycode)
np.savez('tekvPentaSEchannel', data = se1_dset_cut)
#
mycode = 'tekvPentaRedbright = tempfile.NamedTemporaryFile(delete=False)'
exec(mycode)
np.savez('tekvPentaRedbright', data = red1_dset_cut_all)
# mycode = 'tekvPentaBluebright = tempfile.NamedTemporaryFile(delete=False)'
# exec(mycode)
# np.savez('tekvPentaBluebright', data = red1_dset_cut_all)
#
####mock
#start_of_transient = 83 #time-bin 75 + 300ns/40ns = 75 + ~8 = 83
#calcdecay(red1_dset_cut[start_of_transient::,:,:], time_detail= Time_bin*1e-9*fastfactor,titulo=r'Cathodoluminescence rate decay, \n ' + titulo + ', SE `signal\' pixels',other_dset1=red1_dset_cut[start_of_transient::,:,:] ,other_dset2=red1_dset_cut[start_of_transient::,:,:])
#multipage(name_str[index] + fig_no + '.pdf',dpi=80)
#klklklk
###############################################################################
###############################################################################
###############################################################################
####################################################################### OPTIONAL
#want_gaussian_filter_correction_blue = False
#want_gaussian_filter_correction_red = False
#
#if want_gaussian_filter_correction_blue:
# sigma_blue = 1
# blue_dset_cut1 = gaussian_filter_correction(blue_dset_cut, 'Blue',sigma_blue)
# blue_dset_cut = blue_dset_cut1
#
#if want_gaussian_filter_correction_red:
# sigma_red = 1
# red_dset_cut1 = gaussian_filter_correction(red_dset_cut, 'Red',sigma_red)
# red_dset_cut = red_dset_cut1
#
################################################################ END OF OPTIONAL
#
####################################################################### OPTIONAL
#### Suggested:
## 1- Blue True, 3, [0] + Red False
## 2 - Blue True, 3, [2] + Red False
## 3 - Blue True, 3, [0] + Red True, 21, [1]
## 4 - Blue True, 3, [2] + Red True, 21, [1]
## 5 - Blue False, Red False
#
#want_background_correction_blue = False
#want_background_correction_red = False
#
#filterset = ['white_tophat','black_tophat','medfilt']
#
#if want_background_correction_blue:
# # Available algo types:
# # 'white_tophat' -> needs to change disk size
# # 'black_tophat' -> needs to change disk size
# # 'medfilt' -> needs to changer kernel size
#
# # New base dsets: blue_dset_cut, red_dset_cut
# size_blue = 3
# blue_dset_cut1 = background_correction(blue_dset_cut, filterset[0], 'Blue',size_blue)
# #blue_dset_cut2 = background_correction(blue_dset_cut, filterset[1], 'Blue',size_blue)
# blue_dset_cut3 = background_correction(blue_dset_cut, filterset[2], 'Blue',size_blue)
# #both [0] and [2] acceptable; min size_blue that makes sense = 3
#
# blue_dset_cut = blue_dset_cut1 #1 or 3
#
#if want_background_correction_red:
# size_red = 21
# #red_dset_cut1 = background_correction(red_dset_cut, filterset[0], 'Red',size_red)
# red_dset_cut2 = background_correction(red_dset_cut, filterset[1], 'Red',size_red)
# #red_dset_cut3 = background_correction(red_dset_cut, filterset[2], 'Red',size_red)
# # [1] can be good. Or no correction.
# red_dset_cut = red_dset_cut2
#
################################################################ END OF OPTIONAL
#
####TEST OTHER SEGMENTATION MODELS FOR SE, AND PLOT SE HISTOGRAM + SEGMENTATION IN THE FUTURE!!!!!
##plt.close("all")
#
#from CreateDatasets import *
#
#do_avg_dset = False
#do_median_dset = False
#do_arb_thr_one = False
do_gmmse_dset = True
#do_gmmboth_dset = False
#do_threshold_adaptive = False
#do_random_walker = False
#do_otsu = False
#
#### construct different datasets
#### 1) Simple average
#if do_avg_dset:
# print('doing avg')
# below_blue, above_blue, below_red, above_red = above_below_avg(blue_dset_cut, red_dset_cut)
# do_analysis(blue_dset_cut, red_dset_cut, below_blue, above_blue, below_red, above_red, 'YAP', 'Chlor','Above/Below avg', 'below avg', 'above avg',Pixel_size)
#
#### 1) Simple median
#if do_median_dset:
# print('doing median')
# belowm_blue, abovem_blue, belowm_red, abovem_red = above_below_median(blue_dset_cut, red_dset_cut)
# do_analysis(blue_dset_cut, red_dset_cut, belowm_blue, abovem_blue, belowm_red, abovem_red, 'YAP', 'Chlor','Above/Below median', 'below median', 'above median',Pixel_size)
#
#### 1) Arb thresh in red
#if do_arb_thr_one:
# print('doing arb thres')
# arb_threshold = 0.6 #fraction of max
# belowarb_blue, abovearb_blue, belowarb_red, abovearb_red = arb_thr_one(red_dset_cut, blue_dset_cut, arb_threshold)
# do_analysis(blue_dset_cut, red_dset_cut, belowarb_blue, abovearb_blue, belowarb_red, abovearb_red, 'YAP', 'Chlor','Above/Below arb thr = ' + str(arb_threshold) + ' of red max', 'below red thr', 'above red thr',Pixel_size)
###############################################################################
###############################################################################
###############################################################################
### 2) GMM with red mask, where red has been recognized as fluorescence
if do_gmmse_dset:
print('doing gmm se')
#Original
#gmmred_blue_dark_dset, gmmred_blue_bright_dset, gmmred_red_dark_dset, gmmred_red_bright_dset = gmmone(red_dset_cut, blue_dset_cut)
#do_analysis(blue_dset_cut, red_dset_cut, gmmred_blue_dark_dset, gmmred_blue_bright_dset, gmmred_red_dark_dset, gmmred_red_bright_dset, 'YAP', 'Chlor','GMM red', 'red dark spots', 'red bright spots',Pixel_size)
#Version for time-resolved
gc.collect()
gmmse_red1_darkse_dset, gmmse_red1_brightse_dset, gmmse_se1_dark_dset, gmmse_se1_bright_dset, darkse_pct, brightse_pct, means, covars, weights = gmmone_tr_in_masked_channel(se1_dset_cut, red1_dset_cut)
#gmmse_red1_darkse_dset, gmmse_red1_brightse_dset, gmmse_se1_dark_dset, gmmse_se1_bright_dset, darkse_pct, brightse_pct = thr_otsu_tr_in_masked_channel(se1_dset_cut, red1_dset_cut)
mycode = 'tekvPentaSEchannelGMM = tempfile.NamedTemporaryFile(delete=False)'
exec(mycode)
np.savez('tekvPentaSEchannelGMM', bright = gmmse_se1_bright_dset, means = means, covars = covars, weights = weights)
#klklklk
#do_ana0lysis(red1_dset_cut, se1_dset_cut, gmmse_red1_dark_dset, gmmse_red1_bright_dset, gmmse_se1_dark_dset, gmmse_se1_bright_dset, 'CL', 'SE','GMM SE', 'SE dark spots', 'SE bright spots',Pixel_size)
#only for plots of intensity
del gmmse_se1_dark_dset#, gmmse_se1_bright_dset
gmmse_red1_darkse_dset = np.array(gmmse_red1_darkse_dset, dtype=np.float32)
gmmse_red1_brightse_dset = np.array(gmmse_red1_brightse_dset, dtype=np.float32)
gmmse_se1_bright_dset = np.array(gmmse_se1_bright_dset, dtype=np.float32)
gc.collect()
gmmse_red1_darkse_dset_for_4D, gmmse_red1_brightse_dset_for_4D, blah, blup, darkse_pct2, brightse_pct2 = gmmone_tr_in_masked_channel(se1_dset_cut, red1_dset_cut_all, imagemasked_is_4D=True)
#gmmse_red1_darkse_dset_for_4D, gmmse_red1_brightse_dset_for_4D, blah, blup, darkse_pct2, brightse_pct2 = thr_otsu_tr_in_masked_channel(se1_dset_cut, red1_dset_cut_all, imagemasked_is_4D=True)
# mycode = 'Redbright = tempfile.NamedTemporaryFile(delete=False)'
# exec(mycode)
# np.savez('Redbright', data = gmmse_red1_brightse_dset_for_4D/brightse_pct2)
# mycode = 'Bluebright = tempfile.NamedTemporaryFile(delete=False)'
# exec(mycode)
# np.savez('Bluebright', data = gmmse_red1_brightse_dset_for_4D/brightse_pct2)
del blah, blup, darkse_pct2, brightse_pct2 #delete all SE masks
gc.collect()
gmmse_red1_darkse_dset_for_4D = np.array(gmmse_red1_darkse_dset_for_4D, dtype=np.float32)
gmmse_red1_brightse_dset_for_4D = np.array(gmmse_red1_brightse_dset_for_4D, dtype=np.float32)
else:
print('NOT doing gmm se') #assume all is bright in CL
gmmse_red1_brightse_dset = red1_dset_cut
gmmse_red1_darkse_dset = 0*red1_dset_cut #or could give 0 vector
darkse_pct = 1.0
brightse_pct = 0.0
gmmse_red1_darkse_dset_for_4D = np.array(red1_dset_cut_all, dtype=np.float32)
gmmse_red1_brightse_dset_for_4D = np.array(red1_dset_cut_all, dtype=np.float32)
# mycode = 'Bluebright = tempfile.NamedTemporaryFile(delete=False)'
# exec(mycode)
# np.savez('Bluebright', data = gmmse_red1_brightse_dset_for_4D/brightse_pct)
###############################################################################
###############################################################################
###############################################################################
#### 3) GMM with independent masks in both channels
#if do_gmmboth_dset:
# print('doing gmm both')
# gmmboth_blue_dark_dset, gmmboth_blue_bright_dset, gmmboth_red_dark_dset, gmmboth_red_bright_dset = gmmboth(red_dset_cut, blue_dset_cut)
# do_analysis(blue_dset_cut, red_dset_cut, gmmboth_blue_dark_dset, gmmboth_blue_bright_dset, gmmboth_red_dark_dset, gmmboth_red_bright_dset, 'YAP', 'Chlor','GMM both', 'dark spots', 'bright spots',Pixel_size)
#
#### 4) Threshold adapative
#if do_threshold_adaptive:
# print('doing thr adap')
# blocksize = 50
# offset = 0
# th_below_blue, th_above_blue, th_below_red, th_above_red = threshold_adaptive_dset(red_dset_cut, blue_dset_cut,blocksize, offset)
# do_analysis(blue_dset_cut, red_dset_cut, th_below_blue, th_above_blue, th_below_red, th_above_red, 'YAP', 'Chlor','Threshold adaptive' + '(blocksize, offset =' + str(blocksize) + ', ' + str(offset) + ')', 'below thr', 'above thr',Pixel_size)
#
#### 5) random_walker not yet working
### http://scikit-image.org/docs/dev/auto_examples/segmentation/plot_random_walker_segmentation.html#example-segmentation-plot-random-walker-segmentation-py
#if do_random_walker:
# print('doing random walk')
# cutofflow = 0.89
# cutoffhigh = 0.9
# rw_below_blue, rw_above_blue, rw_below_red, rw_above_red = random_walker_dset(red_dset_cut, blue_dset_cut,cutofflow, cutoffhigh)
# do_analysis(blue_dset_cut, red_dset_cut, rw_below_blue, rw_above_blue, rw_below_red, rw_above_red, 'YAP', 'Chlor','Random walker'+ '(cutoffs high, low =' + str(cutoffhigh) + ', ' + str(cutofflow) + ')', 'background', 'foreground',Pixel_size)
#
#### 6) Otsu thresholding
#if do_otsu:
# print('doing otsu')
# ot_below_blue, ot_above_blue, ot_below_red, ot_above_red = thr_otsu(red_dset_cut, blue_dset_cut)
# do_analysis(blue_dset_cut, red_dset_cut, ot_below_blue, ot_above_blue, ot_below_red, ot_above_red, 'YAP', 'Chlor','Otsu threshold', 'background', 'foreground',Pixel_size)
#
#log_dog_doh(blue_dset_cut)
#log_dog_doh(blue_dset_cut)
########### OUTPUT
###### HERE: I have red1_dset_cut, red1_dset_cut_all, se1_dset_cut, gmmse_red_darkse_dset, gmmse_red_brightse_dset, gmmse_se_darkse_dset, gmmse_se_brightse_dset
###FIG2
#frame to be shown in static frame
init_plot_no = center_cl_index #around 27 or 28
#plot_nonvideo_reg(titulo, gmmse_se1_bright_dset, red1_dset_cut,gmmse_red1_darkse_dset, gmmse_red1_brightse_dset, red1_dset_cut_all, se1_dset_cut, gmmse_red1_brightse_dset_for_4D, gmmse_red1_darkse_dset_for_4D, Time_bin,fastfactor,nominal_time_on,Pixel_size[index],darkse_pct, brightse_pct,name_str[index] ,init_plot_no,major_ticks,unit = 'kHz')
del se1_dset_cut
gc.collect()
###END FIG2
###FIG3
#fig_no = '-3plots'
#plot_expt_by_expt_behaviour(titulo + ', signal pixels', gmmse_red1_darkse_dset_for_4D/darkse_pct, Time_bin, nominal_time_on,fastfactor,'y',major_ticks,dark_dset=gmmse_red1_brightse_dset_for_4D/brightse_pct, plot_dark=True) #pass titulo as well
#two lines were uncommented; now trying to delete later
#del gmmse_red1_brightse_dset_for_4D
#gc.collect()
plot_expt_by_expt_behaviour(titulo + ', signal pixels', gmmse_red1_darkse_dset_for_4D/darkse_pct, Time_bin, nominal_time_on,fastfactor,'y',major_ticks,dark_dset=None, plot_dark=False,unit='kHz') #pass titulo as well
#del gmmse_red1_brightse_dset_for_4D
#multipage('ZZZ' + name_str[index] + fig_no + '.pdf',dpi=80)
###END FIG3
#del gmmse_red1_darkse_dset_for_4D
gc.collect()
###FIG4
fig_no = '-3plots'
start_of_transient = 82- cut_at_beginning + 1 #time-bin 75 + 300ns/40ns = 75 + ~8 = 83
last_pt_offset = -10 #sometimes use -1, last point, but sometimes this gives 0. -10 seems to work
# if index == 7000: ### 7 was a problem for red, added a thousand
# print('core only, sample B')
# init_guess = [np.average(gmmse_red1_darkse_dset[start_of_transient,:,:])/darkse_pct, 1.0, np.average(gmmse_red1_darkse_dset[last_pt_offset,:,:])/darkse_pct, np.average(gmmse_red1_darkse_dset[start_of_transient,:,:])/darkse_pct , 0.1] #e init was 0.5, d was zero before I made == a
# elif index == 10: #### 10 (oleci acid) was a single not double, added a thousand
# print("oleic")
# init_guess = [np.average(gmmse_red1_darkse_dset[start_of_transient,:,:])/darkse_pct,0.05, np.average(gmmse_red1_darkse_dset[last_pt_offset,:,:])/darkse_pct, np.average(gmmse_red1_darkse_dset[start_of_transient,:,:])/darkse_pct, 0.25] #e init was 0.5
# else:
init_guess = [np.average(gmmse_red1_darkse_dset[start_of_transient,:,:])/darkse_pct, 1.0, np.average(gmmse_red1_darkse_dset[last_pt_offset,:,:])/darkse_pct, np.average(gmmse_red1_darkse_dset[start_of_transient+50,:,:])/darkse_pct, 0.1] #e init was 0.5
#if do_gmmse_dset is False:
# brightse_pct = 0.01 #just so that I don't have a division by 0 in the function argument below!!!!
#b,e,be,ee = calcdecay(gmmse_red1_darkse_dset[start_of_transient::,:,:]/darkse_pct, time_detail= Time_bin*1e-9*fastfactor,titulo='Cathodoluminescence rate decay, bi-exponential fit, \n ' + titulo ,single=False,other_dset1=None ,other_dset2=None,init_guess=init_guess,unit='kHz')
#b,be, be, ee = calcdecay(gmmse_red1_darkse_dset[start_of_transient::,:,:]/darkse_pct, time_detail= Time_bin*1e-9*fastfactor,titulo='Cathodoluminescence rate decay, single exponential fit, \n ' + titulo ,single=False,other_dset1=red1_dset_cut[start_of_transient::,:,:]/1.0 ,other_dset2=gmmse_red1_brightse_dset[start_of_transient::,:,:]/brightse_pct,init_guess=init_guess,unit='kHz')
# tau_single[index] = b
# tau_single_error[index] = be
# tau_bi[index,:] = [b,e]
# tau_bi_error[index,:] = [be,ee]
#for plots of dose, in the Er 60%:
# if index >= 9: #8,9,10,11: the 4 from Er 60%
# mycode = 'ZZZZZZEr60' + str(index) + '= tempfile.NamedTemporaryFile(delete=False)'
# exec(mycode)
# np.savez('ZZZZZZEr60' + str(index), data = np.average(gmmse_red1_darkse_dset_for_4D/darkse_pct, axis=(1,2,3)))
#for plots of tau as a function of number of experiments in Er 2%, sample A (index0)
if index is 0:
pass
# print('series')
# calcdecay_series(gmmse_red1_darkse_dset_for_4D[:,start_of_transient::,:,:]/darkse_pct, time_detail= Time_bin*1e-9*fastfactor,titulo='Cathodoluminescence rate decay, bi-exponential fit, \n ' + titulo ,single=False,nominal_time_on=nominal_time_on,fastfactor=fastfactor,other_dset1=red1_dset_cut_all[:,start_of_transient::,:,:]/1.0 ,other_dset2=gmmse_red1_brightse_dset_for_4D[:,start_of_transient::,:,:]/brightse_pct,init_guess=init_guess)
# print('each')
# calcdecay_each(gmmse_red1_darkse_dset_for_4D[:,start_of_transient::,:,:]/darkse_pct, time_detail= Time_bin*1e-9*fastfactor,titulo='Cathodoluminescence rate decay, bi-exponential fit, \n ' + titulo ,single=False,nominal_time_on=nominal_time_on,fastfactor=fastfactor,other_dset1=red1_dset_cut_all[:,start_of_transient::,:,:]/1.0 ,other_dset2=gmmse_red1_brightse_dset_for_4D[:,start_of_transient::,:,:]/brightse_pct,init_guess=init_guess)
del gmmse_red1_darkse_dset_for_4D, gmmse_red1_brightse_dset_for_4D #last one is trying to delete later than line372
gc.collect()
mycode = 'tekvZZZRedDecay = tempfile.NamedTemporaryFile(delete=False)'
exec(mycode)
np.savez('tekvZZZRedDecay', data = gmmse_red1_darkse_dset[start_of_transient::,:,:]/darkse_pct)
multipage('tkvZZZ.pdf',dpi=80)
###END FIG4
#plt.show()
plt.close('all')
#write a temporary file with all values
#outfile = tempfile.NamedTemporaryFile(delete=False)
#np.savez(outfile, tau_single=tau_single, tau_single_error=tau_single_error, tau_bi=tau_bi, tau_bi_error=tau_bi_error)
######################################## Plot with dose for different apertures
##files below exist
### 593 dichroic
se = np.load('tekvPentaSEchannel.npz')
segmm = np.load('tekvPentaSEchannelGMM.npz')
red = np.load('tekvPentaRedbright.npz')
blue = np.load('tekvPentaBluebright.npz')
fsizetit = 18 #22 #18
fsizepl = 16 #20 #16
sizex = 8 #10 #8
sizey = 6# 10 #6
dpi_no = 80
lw = 2
Pixel_size = 3.1e-9
length_scalebar = 100.0 #in nm (1000nm == 1mum)
scalebar_legend = '100 nm'
length_scalebar_in_pixels = np.ceil(length_scalebar/(Pixel_size/1.0e-9))
titulo = '150nm upconverting NPs (2.8kV, 30$\mu$m aperture, 40ns time bins, 36kX or 3.1nm pixels)'
fig40= plt.figure(figsize=(sizex, sizey), dpi=dpi_no)
fig40.set_size_inches(1200./fig40.dpi,900./fig40.dpi)
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rc('font', serif='Palatino')
plt.suptitle("Registration and segmentation (model: 2-GMM) of cathodoluminescence signal using SE channel, \n" + titulo,fontsize=fsizetit)
gc.collect()
ax1 = plt.subplot2grid((2,3), (0, 0), colspan=1)
ax1.set_title('SE channel (0.1ms per pixel)',fontsize=fsizepl)
plt.imshow(se['data'],cmap=cm.Greys_r)
sbar = sb.AnchoredScaleBar(ax1.transData, length_scalebar_in_pixels, scalebar_legend, style = 'bright', loc = 4)
ax1.add_artist(sbar)
plt.axis('off')
gc.collect()
ax1 = plt.subplot2grid((2,3), (0, 1), colspan=1)
ax1.set_title('SE channel, signal pixels',fontsize=fsizepl)
hlp = segmm['bright']
hlp[~np.isnan(hlp)] = 0.0
hlp[np.isnan(hlp)] = 1.0
im = plt.imshow(hlp,cmap=cm.Greys) #or 'OrRd'
sbar = sb.AnchoredScaleBar(ax1.transData, length_scalebar_in_pixels, scalebar_legend, style = 'bright', loc = 4)
ax1.add_artist(sbar)
plt.axis('off')
def make_gaussian(means, covars, weights, data, max_hist, no_pts=200):
array_x = np.zeros([len(means), no_pts])
array_y = np.zeros([len(means), no_pts])
for j in np.arange(0,len(means)):
array_x[j,:] = np.linspace(np.min(data),np.max(data),no_pts)
array_y[j,:] = weights[j] * max_hist * np.exp( -(array_x[j,:] - means[j])**2/(2*covars[j]) )
return array_x, array_y
#box = ax1.get_position()
#ax1.set_position([box.x0, box.y0*1.00, box.width, box.height])
#axColor = plt.axes([box.x0, box.y0*1.1 , box.width,0.01*10 ])
#no_bins = 200
#n = axColor.hist(se['data'].flatten(),bins=no_bins)
#
#array_x, array_y = make_gaussian(segmm['means'][:,0], segmm['covars'][:,0],segmm['weights'], se['data'], np.max(n[0]), no_pts=200)
#
#axColor.plot(array_x[0],array_y[0])
#axColor.plot(array_x[1],array_y[1])
############
import skimage.morphology
from skimage.morphology import watershed
from skimage.feature import peak_local_max
image = np.abs(1-hlp)#hlp.astype(bool) #np.logical_or(mask_circle1, mask_circle2)
from scipy import ndimage
distance = ndimage.distance_transform_edt(image)
local_maxi = peak_local_max(distance, num_peaks = 9, indices = False, footprint=np.ones((16,16)),labels=image) #footprint = min dist between maxima to find #footprint was 25,25
markers = skimage.morphology.label(local_maxi)
labels_ws = watershed(-distance, markers, mask=image)
#plt.figure()
#ax1 = plt.subplot2grid((1,4), (0,0))
#ax1.imshow(image)
#ax2 = plt.subplot2grid((1,4), (0,1))
#ax2.imshow(np.log(distance))
#ax2 = plt.subplot2grid((1,4), (0,2))
#ax2.imshow(markers)
#ax2 = plt.subplot2grid((1,4), (0,3))
#ax2.imshow(labels_ws)
#plt.show()
ax1 = plt.subplot2grid((2,3), (0, 2), colspan=1)
ax1.set_title('Segmented NPs',fontsize=fsizepl)
im = plt.imshow(labels_ws,cmap=cm.Greys) #or 'OrRd'
sbar = sb.AnchoredScaleBar(ax1.transData, length_scalebar_in_pixels, scalebar_legend, style = 'dark', loc = 4)
ax1.add_artist(sbar)
plt.axis('off')
ax1 = plt.subplot2grid((2,3), (1, 0), colspan=1)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
plt.plot(np.arange(0,71)*Time_bin/1e3,np.average(red['data'],axis = (0,2,3)),c='r',label='Red photons ($>$ 593nm)',lw=3) #in mus, in MHz
plt.plot(np.arange(0,71)*Time_bin/1e3,np.average(blue['data'],axis = (0,2,3)),c='b',label='Blue photons ($<$ 593nm)',lw=3) #in mus, in MHz
ax1.axvspan(0.04, 0.16, alpha=0.25, color='yellow')
unit = 'kHz'
plt.ylabel("Average luminescence \n of each time bin, per pixel (" + unit + ")",fontsize=fsizepl)
plt.xlabel("Behaviour of e-beam during each experiment: \n 0.12-ON + OFF ($\mu$s)",fontsize=fsizepl)
plt.legend()
major_ticks0 = [1,2]
ax1.set_xticks(major_ticks0)
#ax1.set_yticks([15,30,45])
plt.xlim([0,2])
ax1 = plt.subplot2grid((2,3), (1, 1), colspan=1)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
datared = np.average(red['data'], axis = (0))
datablue = np.average(blue['data'], axis = (0))
datared = datared[4:,:,:]
datablue = datablue[4:,:,:]
fastfactor = 1
last_pt_offset = -5 #sometimes use -1, last point, but sometimes this gives 0. -10 seems to work
init_guess = [np.average(datared[0,:,:]), 0.1, np.average(datared[last_pt_offset,:,:]), np.average(datared[-30,:,:]), 0.005] #e init was 0.5
#init_guess2 = [np.average(datablue[0,:,:]), 1.0, np.average(datablue[last_pt_offset,:,:]), np.average(datablue[-30,:,:]), 0.005] #e init was 0.5
init_guess2 = [ 0.08, 0.03, 0.17, 4.6, 0.01]
b,e,be,ee = calcdecay_subplot2(datared, time_detail= Time_bin*1e-9*fastfactor,titulo='Cathodoluminescence rate decay, bi-exponential fit, \n ' + titulo ,single=False,other_dset2=datablue ,other_dset1=None,init_guess=init_guess,unit='kHz',init_guess2=init_guess2)
plt.xlim([0,2])
major_ticks0 = [1,2]
plt.ylabel("Average luminescence \n of each time bin, per pixel (" + unit + ")",fontsize=fsizepl)
plt.xlabel(r'Time after blanking the electron beam ($\mu$s)', fontsize=fsizepl)
ax1.set_xticks(major_ticks0)
ax1 = plt.subplot2grid((2,3), (1, 2), colspan=1)
ax1.spines['right'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
dataALLred = red['data'][:,0:5,:,:]
dataALLblue = blue['data'][:,0:5,:,:]
nominal_time_on = 0.12
plt.plot(np.arange(1,51)*nominal_time_on*fastfactor,np.average(dataALLblue,axis=(1,2,3)),c='b', label='From blue photons ($<$ 593nm)',linestyle='None', marker='o',markersize=4) #in mus, in MHz
plt.plot(np.arange(1,51)*nominal_time_on*fastfactor,np.average(dataALLred,axis=(1,2,3)),c='r', label='From red photons ($>$ 593nm)',linestyle='None', marker='o',markersize=4) #in mus, in MHz
plt.ylabel("Average luminescence \n for each experiment, per pixel (kHz)",fontsize=fsizepl)
plt.xlabel("Cumulative e-beam exposure time \n per pixel (nominal, $\mu$s)",fontsize=fsizepl)
#major_ticks = [25,50,75,nominal_time_on*dset.shape[0]*fastfactor]
major_ticks = [2,4,6]
ax1.set_xticks(major_ticks)
#plt.legend()
plt.xlim([nominal_time_on,nominal_time_on*50*fastfactor])
multipage_longer('ZZZ28-Penta_plot.pdf',dpi=80)
| [
"[email protected]"
] | |
b5595d5d7348a72d9b7433a750d364865b2ef7b7 | 2ae420ff508e7e6799dbd0b9e0c71be96ef2ced9 | /pyre_extensions/__init__.py | c84f0ed282dd1e45df78c74712bcd532e404169e | [
"MIT"
] | permissive | njayinthehouse/pyre-check | 5c8ab3ee2048ad395652d2079c5dcbcee288fbfc | 14872ab61ffef3fe61490c4cf0e098954157a5ac | refs/heads/master | 2020-05-31T17:23:56.392622 | 2019-06-05T05:06:58 | 2019-06-05T05:10:37 | 186,618,634 | 0 | 0 | null | 2019-05-14T12:35:46 | 2019-05-14T12:35:46 | null | UTF-8 | Python | false | false | 1,059 | py | def ParameterSpecification(name):
"""This kind of type variable captures callable parameter specifications
(known as argspecs in the runtime and inspect library) instead of types,
allowing the typing of decorators which transform the return type of the
given callable.
For example:
from typing import TypeVar, Callable, List
from pyre_extensions import ParameterSpecification
Tparams = ParameterSpecification("Tparams")
Treturn = TypeVar("Treturn")
def unwrap(
f: Callable[Tparams, List[Treturn],
) -> Callable[Tparams, Treturn]: ...
@unwrap
def foo(x: int, y: str, z: bool = False) -> List[int]:
return [1, 2, 3]
decorates foo into a callable that returns int, but still has the same
parameters, including their names and whether they are required.
The empty list is required for backwards compatibility with the runtime
implementation for callables, which requires the first argument to be
a list of types
"""
return []
| [
"[email protected]"
] | |
d4c6454dd5d3cfb9f950a4eb640378ff7e298582 | 16197783050cb044729f8a4073f6a137658cf831 | /lesson23___/lesson20/iter_class.py | 68463ffc0b1a07c822ad9e774bf4970d94b2e3f0 | [] | no_license | AlexseyPivovarov/python_scripts | 04c053d531e1d36266e82b8b9dc75161a0bcdcf9 | 99d849c834c647cf669e55f5b8f32d984a288091 | refs/heads/master | 2020-04-07T02:26:31.520861 | 2018-11-17T11:21:02 | 2018-11-17T11:21:02 | 157,976,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | class Foo:
a=1
b=2
def __init__(self,a,b):
self.a = a
self.b = b
def __iter__(self):
yield self.a
yield self.b
# return (item for item in (self.a,self.b,))
def generator(self):
return iter(self)
class FooFoo(Foo):
def __init__(self,*args):
super().__init__(args[0],args[1])
self.d = args[2]
def __iter__(self):
yield from super().__iter__()
yield self.d
childFoo = Foo(7,10)
foofoo = FooFoo(*childFoo,5)
for item in foofoo:
print(item)
| [
"[email protected]"
] | |
3972ca7ba5760c2af411c970167427094f32caf0 | 51e6720d5bc219df3ce9bc899fe04ca71d6e86af | /Python/POO I/Cinema.py | 0140e3000714059ddb4dc507936c847a7d7b3a57 | [] | no_license | ThiagoCComelli/POO | 7466d8272ccc742400d55603e70a9b3be16e80a1 | 2f878d2d2674e11ea584f6c23b94bd27dea26d65 | refs/heads/master | 2020-06-03T15:29:04.704709 | 2020-01-10T01:39:04 | 2020-01-10T01:39:04 | 191,628,147 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,866 | py | # -*- coding: utf-8 -*-
class Cinema():
def __init__(self,nome,endereco):
self.__nome = nome
self.__endereco = endereco
self.__funcionarios = []
self.__salas = []
self.__filmes = []
self.__sessoes = []
def getNome(self):
return self.__nome
def getEndereco(self):
return self.__endereco
def getFuncionarios(self):
return self.__funcionarios
def getSalas(self):
return self.__salas
def getFilmes(self):
return self.__sessoes
def getSessaoMaisCara(self):
maior = 0
nome = 0
for i in self.__sessoes:
if i.getPreco() > maior:
maior = i.getPreco()
nome = i.getLocal()
return 'Sessao mais cara(preco): R$ {} Local(sala): {}'.format(str(maior),str(nome))
def getFilmeComedia(self):
filmes = 'Filmes de Comédia no Cinema: '
for i in self.__filmes:
if i.getGenero() == 'comedia':
filmes += i.getNome()
filmes += ' | '
return filmes
def getGeneroMaisFrequente(self):
lista = []
for i in self.__sessoes:
a = i.getFilmee()
lista.append(a.getGenero())
lista1 = set(lista)
genero = ''
count = 0
for i in lista1:
if lista.count(i) > count:
genero = i
count = lista.count(i)
return 'Genero Mais Frequente Nas Sessoes: {}'.format(genero)
def getMaiorLucro(self):
maior = 0
for i in self.__sessoes:
preco = i.getPreco()
sala = i.getLocall()
cap = sala.getCap()
tot = cap*preco
if tot>maior:
maior = tot
return 'Maior Lucro Possivel Sera de: R$ {}'.format(maior)
def setSessao(self,x):
self.__sessoes.append(x)
def setSala(self,x):
self.__salas.append(x)
def setFilme(self,x):
self.__filmes.append(x)
def setFuncionario(self,x):
self.__funcionarios.append(x)
class Pessoa():
def __init__(self,nome):
self.__nome = nome
def getNome(self):
return self.__nome
class Diretor(Pessoa):
def __init__(self,nome):
super().__init__(nome)
def getNome(self):
return self.__nome
class Funcionario(Pessoa):
def __init__(self,nome,idade,salario,sexo):
super().__init__(nome)
self.__idade = idade
self.__salario = salario
self.__sexo = sexo
def getNome(self):
return self.__nome
def getIdade(self):
return self.__idade
def getSalario(self):
return self.__salario
def getSexo(self):
return self.__sexo
class Sala():
def __init__(self,id,cap):
self.__id = id
self.__cap = cap
def getId(self):
return self.__id
def getCap(self):
return self.__cap
class Filme():
def __init__(self,nome,lancamento,diretor,genero,duracao):
self.__nome = nome
self.__lancamento = lancamento
self.__diretor = diretor
self.__genero = genero
self.__duracao = duracao
def getNome(self):
return self.__nome
def getLancamento(self):
return self.__lancamento
def getDiretor(self):
return self.__diretor
def getGenero(self):
return self.__genero
def getDuracao(self):
return self.__duracao
class Sessao():
def __init__(self,inicio,preco):
self.__inicio = inicio
self.__preco = preco
self.__local = ''
self.__filme = ''
def getIncio(self):
return self.__inicio
def getLocal(self):
return self.__local.getId()
def getLocall(self):
return self.__local
def getPreco(self):
return self.__preco
def getFilme(self):
return self.__filme.getNome()
def getFilmee(self):
return self.__filme
def setFilme(self,x):
self.__filme = x
def setLocal(self,x):
self.__local = x
def __repr__(self):
return 'Incio: {}\nPreco: {}\nLocal(sala): {}\nFilme: {}'.format(self.getIncio(),self.getPreco(),self.getLocal(),self.getFilme())
cine0 = Cinema('cine0','88130-000')
func0 = Funcionario('thiago',19,1900,'masculino')
dire0 = Diretor('lorenzo')
film0 = Filme('film0','08/2019',dire0,'comedia',130)
film1 = Filme('film1','08/2012',dire0,'acao',120)
film2 = Filme('film2','08/2011',dire0,'comedia',90)
film3 = Filme('film3','08/2014',dire0,'terror',200)
film4 = Filme('film4','08/2015',dire0,'suspense',100)
film5 = Filme('film5','08/2016',dire0,'comedia',45)
sala0 = Sala(0,100)
sala1 = Sala(1,101)
sala2 = Sala(2,102)
sala3 = Sala(3,103)
sala4 = Sala(4,104)
sala5 = Sala(5,105)
sess0 = Sessao('28/04/3010 - 13:30',12)
sess1 = Sessao('28/04/3011 - 13:30',20)
sess2 = Sessao('28/04/3012 - 13:30',18)
sess3 = Sessao('28/04/3013 - 13:30',121)
sess4 = Sessao('28/04/3014 - 13:30',32)
sess5 = Sessao('28/04/3015 - 13:30',122)
cine0.setFuncionario(func0)
sess0.setFilme(film0)
sess1.setFilme(film1)
sess2.setFilme(film2)
sess3.setFilme(film3)
sess4.setFilme(film4)
sess5.setFilme(film5)
sess0.setLocal(sala0)
sess1.setLocal(sala1)
sess2.setLocal(sala2)
sess3.setLocal(sala3)
sess4.setLocal(sala4)
sess5.setLocal(sala5)
cine0.setFilme(film0)
cine0.setFilme(film1)
cine0.setFilme(film2)
cine0.setFilme(film3)
cine0.setFilme(film4)
cine0.setFilme(film5)
cine0.setSessao(sess0)
cine0.setSessao(sess1)
cine0.setSessao(sess2)
cine0.setSessao(sess3)
cine0.setSessao(sess4)
cine0.setSessao(sess5)
cine0.setSala(sala0)
cine0.setSala(sala1)
cine0.setSala(sala2)
cine0.setSala(sala3)
cine0.setSala(sala4)
cine0.setSala(sala5)
print(cine0.getSessaoMaisCara())
print()
print(cine0.getFilmeComedia())
print()
print(cine0.getGeneroMaisFrequente())
print()
print(cine0.getMaiorLucro()) | [
"[email protected]"
] | |
429ac7ba8eb6de79c8e5918dfb728409774bf846 | 25f16d9e3416e186f677e425d7c3c19fb1b6b76a | /qt5_exercises/concurrent/bad_example_1.py | fccb43f8a4882055a4ed050ebd0b638cfb424564 | [] | no_license | amisaka/pyqt5Samples | 662b33255f9fd3522a8600e23b1f5e742eef769e | 40f295232a726b4d67cc3124dcf6ac46a2efe9c8 | refs/heads/master | 2023-08-27T18:11:58.162821 | 2021-11-03T14:06:10 | 2021-11-03T14:06:10 | 419,336,692 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import sys
import time
from PyQt5.QtCore import QTimer
from PyQt5.QtWidgets import (
QApplication,
QLabel,
QMainWindow,
QPushButton,
QVBoxLayout,
QWidget,
)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.counter = 0
layout = QVBoxLayout()
self.l = QLabel("Start")
b = QPushButton("DANGER!")
b.pressed.connect(self.oh_no)
layout.addWidget(self.l)
layout.addWidget(b)
w = QWidget()
w.setLayout(layout)
self.setCentralWidget(w)
self.show()
self.timer = QTimer()
self.timer.setInterval(1000)
self.timer.timeout.connect(self.recurring_timer)
self.timer.start()
def oh_no(self):
time.sleep(5)
def recurring_timer(self):
self.counter += 1
self.l.setText("Counter: %d" % self.counter)
app = QApplication(sys.argv)
window = MainWindow()
app.exec_()
| [
"[email protected]"
] | |
6dd4201175ab7bff2cd0e7bc706dc1287b58e4f8 | d0b67717a0b1378caa2b6590173153831e98fa3b | /w.py | 52c95e1be50b8987df6f99592363dabac89ae350 | [] | no_license | anukhandelwal26/python-codes | d1931bc98b109c6c7643340d269d65574d0604ea | 0f6096a267f3f58719995b4d65fa7b2b431f5e5d | refs/heads/master | 2020-03-20T21:06:18.115849 | 2018-06-26T11:10:58 | 2018-06-26T11:10:58 | 137,724,208 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 121 | py |
print"enter a number"
n=raw_input()
a=int(n)
f=1
i=1
while (i<=a):
f=f*i
i=i+1
print "factorial of",n,"is",f
| [
"[email protected]"
] | |
4d1b9324b39491d5d9bc18a2121746ef329ae04d | b34641367c20afd688050976339ebae3ca7220a0 | /somemart_auth/somemart_auth/bin/django-admin.py | 53e30b0ed682707f5bb3c5c3d3a6699aba4d164c | [] | no_license | DrShiz/learning4 | cee6e46f733badbe17ab410e5a89b2106e493d2e | 85fa372d3bdccf7da487d13d147c1b639ddceefd | refs/heads/master | 2023-03-06T00:20:00.048331 | 2021-02-16T16:39:35 | 2021-02-16T16:39:35 | 339,447,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | #!/Users/terekhovas/PycharmProjects/learning4/somemart_auth/somemart_auth/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
ffe7dd88fd8bc92e657aaa2d716937bfe5577bb7 | 016200d5593feb15bf7737389586bd161398a09c | /Database/venv/bin/rstpep2html.py | d3334da4c02e7abe3f073230dad54c21f6a5896b | [] | no_license | MarcPartensky/Python-2019 | d74e41710c9b48887e141ef5a8251f5e5d06026d | 1b29680292fdc48af25ae45ce0e9572b8c31427d | refs/heads/master | 2021-07-07T18:46:49.708387 | 2020-08-11T19:49:01 | 2020-08-11T19:49:01 | 166,604,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | #!/Users/marcpartensky/Programs/Python/Repository-2019/Database/venv/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| [
"[email protected]"
] | |
c4f18d847f19ca495879aa38a6784637e2cdc09e | f34298735dbaee1a56da7dcc477f2734c6a1a305 | /src/tandlr/emails/views.py | 7e5be2847adb90189418ba321dbc33b1554a340c | [
"Apache-2.0"
] | permissive | shrmoud/schoolapp | 6f8e71b68cf42b6d2ac54acb42ed0a4664d5aaa9 | 7349ce18f56658d67daedf5e1abb352b5c15a029 | refs/heads/master | 2021-03-24T13:34:44.385285 | 2017-06-11T21:37:53 | 2017-06-11T21:37:53 | 94,031,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,020 | py | from django.http import Http404
from django.shortcuts import get_object_or_404
from django.template import TemplateDoesNotExist
from django.views.generic.base import TemplateView
from tandlr.scheduled_classes.models import Class
class StaticEmailView(TemplateView):
"""
View to render static email templates for development.
example:
tandlr.local/email-preview/registration/confirmation_email.html
"""
def get_template_names(self):
return 'email/%s' % self.kwargs['page']
def get(self, request, page):
try:
return self.render_to_response(self.get_context_data())
except TemplateDoesNotExist:
raise Http404
def get_context_data(self, **kwargs):
context = super(StaticEmailView, self).get_context_data(**kwargs)
if self.request.GET.get('session_id'):
context['booking'] = get_object_or_404(
Class,
id=self.request.GET.get('session_id')
)
return context
| [
"[email protected]"
] | |
80d8b80562719182cb64b44da2035610d8f622f3 | 86206b05a6e0a425ba5401de50b8645bddf77780 | /Oper Python/SFDC Oper/Training Scripts/Selenium/Oper1/Sandbox/XLLib.py | c3f4c611f5f1340cccab5f36f85d28d7b2f13bba | [] | no_license | QuestTestAutomation/PersistentDesktop1python | 2e626ea16ce0fd4c697b156fdc2f9b3ca85bbd7b | ece25957edb6f87b2777b261b31914d22ebd99ad | refs/heads/master | 2021-03-10T21:55:25.450872 | 2020-03-27T09:45:14 | 2020-03-27T09:45:14 | 246,488,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,491 | py | import openpyxl
import os
import time
from openpyxl.utils import get_column_letter
from openpyxl import Workbook
from openpyxl.compat import range
def Create_Workbook(file,sheetname):
sheetexists = 0
if os.path.isfile(file):
mywb = openpyxl.load_workbook(file)
print("XL file exists")
else:
print("XL file does not exists")
mywb = openpyxl.Workbook()
time.sleep(5)
sheet = mywb.active
sheet.title = sheetname
mywb.save(file)
time.sleep(5)
def Create_Worksheets(file,sheetnames):
sheetexists = 0
i = 0
if os.path.isfile(file):
mywb = openpyxl.load_workbook(file)
print("XL file exists")
for sheetname in sheetnames:
mywb.create_sheet(index= i, title= sheetname)
mywb.save(file)
time.sleep(3)
else:
print("XL file does not exists")
mywb = openpyxl.Workbook()
time.sleep(10)
def print_XL_cell_values(file,sheetname,irow,icolumn):
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
for row in range(1, mysheet.max_row):
for col in range(1, mysheet.max_column):
if mysheet.cell(column=col, row=row).value <> 'None':
print mysheet.cell(column=col, row=row).value
def get_XL_column_letter(file,sheetname,columnvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
for col in range(1, mysheet.max_column):
if (str(mysheet.cell(column=col, row=1).value)).upper() == (str(columnvalue)).upper():
id = format(get_column_letter(col))
break
return id
def get_XL_column_index(file,sheetname,columnvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
print "max column is " + str(mysheet.max_column)
for col in range(1, mysheet.max_column):
# print "***************"
# print str((mysheet.cell(column=col, row=1).value)).upper()
# print (str(columnvalue)).upper()
# print (str(mysheet.cell(column=col, row=1).value)).upper() == (str(columnvalue)).upper()
if (str(mysheet.cell(column=col, row=1).value)).upper() == (str(columnvalue)).upper():
id = col
break
return id
def get_XL_cell_value(file,sheetname,irow,icolumn):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
id = mysheet.cell(column=icolumn, row=irow).value
return id
def get_XL_cell_value_using_column_header(file,sheetname,irow,columnheader):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
icolumn = get_XL_column_index(file,sheetname,columnheader)
print "The Column is : " + str(icolumn)
id = mysheet.cell(column=int(icolumn), row=irow).value
id = str(id).strip()
return id
def set_XL_cell_value(file,sheetname,irow,icolumn,cellvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
mysheet.cell(column=icolumn, row=irow).value = cellvalue
mywb.save(file)
time.sleep(5)
def set_XL_cell_value_using_column_header(file,sheetname,irow,columnheader,cellvalue):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
icolumn = get_XL_column_index(file,sheetname,columnheader)
mysheet.cell(column=int(icolumn), row=irow).value = cellvalue
mywb.save(file)
time.sleep(5)
def create_XL_header_lists(file,sheetname,lists):
col = 1
Create_Workbook(file, sheetname)
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
for list in lists :
mysheet.cell(column=int(col), row=1).value = list
col = int(col) + int(1)
mywb.save(file)
time.sleep(5)
def add_XL_header_column(file,sheetname,columnheader):
Create_Workbook(file, sheetname)
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
col = int(mysheet.max_column) + int(1)
mysheet.cell(column=int(col), row=1).value = columnheader
mywb.save(file)
time.sleep(5)
def copy_XL_workbook(sourcefile,targetfile):
if os.path.isfile(sourcefile):
sourcewb = openpyxl.load_workbook(sourcefile)
wslists = sourcewb.sheetnames
for wslist in wslists:
print "sheet name is : " + str(wslist)
if os.path.isfile(targetfile):
mywb = openpyxl.load_workbook(targetfile)
print "XL file exists"
else:
print "XL file does not exists"
mywb = openpyxl.Workbook(targetfile)
# mywb.save(targetfile)
# time.sleep(5)
sourcews = sourcewb.get_sheet_by_name(str(wslist))
myws = sourcewb.create_sheet(index=int(sourcewb.get_index(sourcews)), title= str(wslist))
# Myws = mywb.active
myws = sourcewb.copy_worksheet(sourcews)
mywb.save(targetfile)
time.sleep(5)
def get_XL_row_count(file,sheetname):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
id = mysheet.max_row
return id
def get_XL_column_count(file,sheetname):
id = '0'
mywb = openpyxl.load_workbook(file)
mysheet = mywb.get_sheet_by_name(sheetname)
id = mysheet.max_column
return id
| [
"[email protected]"
] | |
a34584dbef4b37a0544599bc74d308186d99c177 | b7138d31e13920ad1bf6a82ff062a6f512c48983 | /cal_test.py | a9aa6c440b822b666232f96a284da723bc4b737a | [] | no_license | aaqqxx/ZA_cal | bcb6863a1aa882ed34fb18a4070ecb316568a3f1 | 2b604d2f1d472666d03c0a63a3c18f5710650a2e | refs/heads/master | 2020-05-04T14:03:49.846304 | 2019-04-03T01:05:04 | 2019-04-03T01:05:04 | 179,183,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,200 | py | # coding:utf-8
__author__ = 'XingHua'
"""
光栅尺位置VS出光学位置
参考点为d1前面的那个镜子,见90nm的ZA图纸。
从Excel中的光栅尺位置计算出光学位置d1,d2,d3,d5.
从光学位置计算出光栅尺位置。
"""
import matplotlib.pyplot as plt
import numpy as np
from glob import glob
import pandas as pd
import argparse
import sys
import math
def quadratic(a, b, c):
if not isinstance(a, (int, float)):
raise TypeError('a is not a number')
if not isinstance(b, (int, float)):
raise TypeError('b is not a number')
if not isinstance(c, (int, float)):
raise TypeError('c is not a number')
derta = b * b - 4 * a * c
if a == 0:
if b == 0:
if c == 0:
return '方程根是全体实数'
else:
return '方程无根'
else:
x1 = -c / b
x2 = x1
return x1, x2
else:
if derta < 0:
return '方程无根'
else:
x1 = (-b + math.sqrt(derta)) / (2 * a)
x2 = (-b - math.sqrt(derta)) / (2 * a)
return x1, x2
print(quadratic(2, 3, 1))
print(quadratic(1, 3, -4))
| [
"[email protected]"
] | |
0f47c9b1f5dd09360682347e38d451c90365764d | 3bb57eb1f7c1c0aced487e7ce88f3cb84d979054 | /semeval/corpora/semeval/test_tags.py | 9ba72eace0ee9a1f33302f8a869d091beb0940c0 | [] | no_license | ghpaetzold/phd-backup | e100cd0bbef82644dacc73a8d1c6b757b2203f71 | 6f5eee43e34baa796efb16db0bc8562243a049b6 | refs/heads/master | 2020-12-24T16:41:21.490426 | 2016-04-23T14:50:07 | 2016-04-23T14:50:07 | 37,981,094 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | f1 = open('semeval_test_clean.txt')
f2 = open('tagged_sents_semeval_test.txt')
c = 0
for line1 in f1:
line2 = f2.readline()
tokens = line1.strip().split('\t')[0].strip().split(' ')
tags = line2.strip().split(' ')
if len(tokens)!=len(tags):
print('Tokens: ' + str(tokens))
print('Tags: ' + str(tags))
print('')
c += 1
f1.close()
f2.close()
print(str(c))
| [
"[email protected]"
] | |
0ce313309436e63ed807ba4f071dae2e79b2cffc | c2f809fb0c3aaf5c92f2ec04c41df5e0e764a088 | /foolbox_custom/attacks/gen_attack.py | 139b84c528f4c13925e0c581059b53e805b9fe38 | [] | no_license | lavanova/adaptive-auto-attack | 7f4834cdc9dbeb6e161fc869f71bb284e854604a | 8ed8b33afc6757a334c4d3f046fcb7793dd2c873 | refs/heads/master | 2023-05-07T17:44:33.466128 | 2021-05-20T09:03:53 | 2021-05-20T09:03:53 | 369,143,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,739 | py | from typing import Optional, Any, Tuple, Union
import numpy as np
import eagerpy as ep
from ..devutils import atleast_kd
from ..models import Model
from attack.criteria import TargetedMisclassification
from ..distances import linf
from .base import FixedEpsilonAttack
from .base import T
from .base import get_channel_axis
from .base import raise_if_kwargs
import math
from .gen_attack_utils import rescale_images
class GenAttack(FixedEpsilonAttack):
"""A black-box algorithm for L-infinity adversarials. [#Alz18]_
This attack is performs a genetic search in order to find an adversarial
perturbation in a black-box scenario in as few queries as possible.
References:
.. [#Alz18] Moustafa Alzantot, Yash Sharma, Supriyo Chakraborty, Huan Zhang,
Cho-Jui Hsieh, Mani Srivastava,
"GenAttack: Practical Black-box Attacks with Gradient-Free
Optimization",
https://arxiv.org/abs/1805.11090
"""
def __init__(
self,
*,
steps: int = 1000,
population: int = 10,
mutation_probability: float = 0.10,
mutation_range: float = 0.15,
sampling_temperature: float = 0.3,
channel_axis: Optional[int] = None,
reduced_dims: Optional[Tuple[int, int]] = None,
):
self.steps = steps
self.population = population
self.min_mutation_probability = mutation_probability
self.min_mutation_range = mutation_range
self.sampling_temperature = sampling_temperature
self.channel_axis = channel_axis
self.reduced_dims = reduced_dims
distance = linf
def apply_noise(
self,
x: ep.TensorType,
noise: ep.TensorType,
epsilon: float,
channel_axis: Optional[int],
) -> ep.TensorType:
if noise.shape != x.shape and channel_axis is not None:
# upscale noise
noise = rescale_images(noise, x.shape, channel_axis)
return ep.clip(noise + x, -epsilon, +epsilon)
def choice(
self, a: int, size: Union[int, ep.TensorType], replace: bool, p: ep.TensorType
) -> Any:
p = p.numpy()
x = np.random.choice(a, size, replace, p)
return x
def run(
self,
model: Model,
inputs: T,
criterion: TargetedMisclassification,
*,
epsilon: float,
**kwargs: Any,
) -> T:
raise_if_kwargs(kwargs)
x, restore_type = ep.astensor_(inputs)
del inputs, kwargs
N = len(x)
if isinstance(criterion, TargetedMisclassification):
classes = criterion.target_classes
else:
raise ValueError("unsupported criterion")
if classes.shape != (N,):
raise ValueError(
f"expected target_classes to have shape ({N},), got {classes.shape}"
)
noise_shape: Union[Tuple[int, int, int, int], Tuple[int, ...]]
channel_axis: Optional[int] = None
if self.reduced_dims is not None:
if x.ndim != 4:
raise NotImplementedError(
"only implemented for inputs with two spatial dimensions"
" (and one channel and one batch dimension)"
)
if self.channel_axis is None:
maybe_axis = get_channel_axis(model, x.ndim)
if maybe_axis is None:
raise ValueError(
"cannot infer the data_format from the model, please"
" specify channel_axis when initializing the attack"
)
else:
channel_axis = maybe_axis
else:
channel_axis = self.channel_axis % x.ndim
if channel_axis == 1:
noise_shape = (x.shape[1], *self.reduced_dims)
elif channel_axis == 3:
noise_shape = (*self.reduced_dims, x.shape[3])
else:
raise ValueError(
"expected 'channel_axis' to be 1 or 3, got {channel_axis}"
)
else:
noise_shape = x.shape[1:] # pragma: no cover
def is_adversarial(logits: ep.TensorType) -> ep.TensorType:
return ep.argmax(logits, 1) == classes
num_plateaus = ep.zeros(x, len(x))
mutation_probability = (
ep.ones_like(num_plateaus) * self.min_mutation_probability
)
mutation_range = ep.ones_like(num_plateaus) * self.min_mutation_range
noise_pops = ep.uniform(
x, (N, self.population, *noise_shape), -epsilon, epsilon
)
def calculate_fitness(logits: ep.TensorType) -> ep.TensorType:
first = logits[range(N), classes]
second = ep.log(ep.exp(logits).sum(1) - first)
return first - second
n_its_wo_change = ep.zeros(x, (N,))
for step in range(self.steps):
fitness_l, is_adv_l = [], []
for i in range(self.population):
it = self.apply_noise(x, noise_pops[:, i], epsilon, channel_axis)
logits = model(it)
f = calculate_fitness(logits)
a = is_adversarial(logits)
fitness_l.append(f)
is_adv_l.append(a)
fitness = ep.stack(fitness_l)
is_adv = ep.stack(is_adv_l, 1)
elite_idxs = ep.argmax(fitness, 0)
elite_noise = noise_pops[range(N), elite_idxs]
is_adv = is_adv[range(N), elite_idxs]
# early stopping
if is_adv.all():
return restore_type( # pragma: no cover
self.apply_noise(x, elite_noise, epsilon, channel_axis)
)
probs = ep.softmax(fitness / self.sampling_temperature, 0)
parents_idxs = np.stack(
[
self.choice(
self.population,
2 * self.population - 2,
replace=True,
p=probs[:, i],
)
for i in range(N)
],
1,
)
mutations = [
ep.uniform(
x,
noise_shape,
-mutation_range[i].item() * epsilon,
mutation_range[i].item() * epsilon,
)
for i in range(N)
]
new_noise_pops = [elite_noise]
for i in range(0, self.population - 1):
parents_1 = noise_pops[range(N), parents_idxs[2 * i]]
parents_2 = noise_pops[range(N), parents_idxs[2 * i + 1]]
# calculate crossover
p = probs[parents_idxs[2 * i], range(N)] / (
probs[parents_idxs[2 * i], range(N)]
+ probs[parents_idxs[2 * i + 1], range(N)]
)
p = atleast_kd(p, x.ndim)
p = ep.tile(p, (1, *noise_shape))
crossover_mask = ep.uniform(p, p.shape, 0, 1) < p
children = ep.where(crossover_mask, parents_1, parents_2)
# calculate mutation
mutation_mask = ep.uniform(children, children.shape)
mutation_mask = mutation_mask <= atleast_kd(
mutation_probability, children.ndim
)
children = ep.where(mutation_mask, children + mutations[i], children)
# project back to epsilon range
children = ep.clip(children, -epsilon, epsilon)
new_noise_pops.append(children)
noise_pops = ep.stack(new_noise_pops, 1)
# increase num_plateaus if fitness does not improve
# for 100 consecutive steps
n_its_wo_change = ep.where(
elite_idxs == 0, n_its_wo_change + 1, ep.zeros_like(n_its_wo_change)
)
num_plateaus = ep.where(
n_its_wo_change >= 100, num_plateaus + 1, num_plateaus
)
n_its_wo_change = ep.where(
n_its_wo_change >= 100, ep.zeros_like(n_its_wo_change), n_its_wo_change
)
mutation_probability = ep.maximum(
self.min_mutation_probability,
0.5 * ep.exp(math.log(0.9) * ep.ones_like(num_plateaus) * num_plateaus),
)
mutation_range = ep.maximum(
self.min_mutation_range,
0.5 * ep.exp(math.log(0.9) * ep.ones_like(num_plateaus) * num_plateaus),
)
return restore_type(self.apply_noise(x, elite_noise, epsilon, channel_axis))
| [
"[email protected]"
] | |
923f733b0186d059380bc2c96093eb818d7f3e35 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02392/s188244302.py | 4cfcecde196f3237c6620b3b8ed79280894d91fd | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | a, b, c = [int(w) for w in input().split()]
print("Yes" if a < b and b < c else "No") | [
"[email protected]"
] | |
5b9f092d1f6e4f16af01d0261b62ddb5819e965a | dcea1a4a7df68b74e54f8c59aadd0d1b287c9727 | /F2M_train_V11.py | 047c38c3346074f642674286a7473dabe3ebf09a | [] | no_license | Kimyuhwanpeter/F2M_Version_V14 | 3dae9c8066efbac87afb7def24fb8938e3c3013f | eebba737f9f46a1b41b688a4e5d73a08579295e6 | refs/heads/main | 2023-07-02T19:01:24.695947 | 2021-08-06T04:59:34 | 2021-08-06T04:59:34 | 392,281,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,170 | py | # -*- coding:utf-8 -*-
#from F2M_model_V14 import *
from F2M_model_V14_2 import *
from random import shuffle, random
from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import easydict
import os
FLAGS = easydict.EasyDict({"img_size": 256,
"load_size": 276,
"tar_size": 256,
"tar_load_size": 276,
"batch_size": 1,
"epochs": 200,
"lr": 0.0002,
"A_txt_path": "D:/[1]DB/[2]third_paper_DB/[4]Age_and_gender/race_age_gender_generation/Morph_AFAD_16_63/first_fold/AFAD-F_Morph-M_16_39_40_63/train/female_16_39_train.txt",
"A_img_path": "D:/[1]DB/[1]second_paper_DB/AFAD_16_69_DB/backup/fix_AFAD/",
"B_txt_path": "D:/[1]DB/[2]third_paper_DB/[4]Age_and_gender/race_age_gender_generation/Morph_AFAD_16_63/first_fold/AFAD-F_Morph-M_40_63_16_39/train/male_16_39_train.txt",
"B_img_path": "D:/[1]DB/[2]third_paper_DB/[4]Age_and_gender/Morph/All/male_16_39/",
"age_range": [40, 64],
"n_classes": 256,
"train": True,
"pre_checkpoint": False,
"pre_checkpoint_path": "",
"save_checkpoint": "",
"sample_images": "C:/Users/Yuhwan/Pictures/img2",
"A_test_txt_path": "",
"A_test_img_path": "",
"B_test_txt_path": "",
"B_test_img_path": "",
"test_dir": "A2B",
"fake_B_path": "",
"fake_A_path": ""})
g_optim = tf.keras.optimizers.Adam(FLAGS.lr, beta_1=0.5)
d_optim = tf.keras.optimizers.Adam(FLAGS.lr, beta_1=0.5)
def input_func(A_data, B_data):
A_img = tf.io.read_file(A_data[0])
A_img = tf.image.decode_jpeg(A_img, 3)
A_img = tf.image.resize(A_img, [FLAGS.load_size, FLAGS.load_size])
A_img = tf.image.random_crop(A_img, [FLAGS.img_size, FLAGS.img_size, 3])
A_img = A_img / 127.5 - 1.
B_img = tf.io.read_file(B_data[0])
B_img = tf.image.decode_jpeg(B_img, 3)
B_img = tf.image.resize(B_img, [FLAGS.tar_load_size, FLAGS.tar_load_size])
B_img = tf.image.random_crop(B_img, [FLAGS.tar_size, FLAGS.tar_size, 3])
B_img = B_img / 127.5 - 1.
if random() > 0.5:
A_img = tf.image.flip_left_right(A_img)
B_img = tf.image.flip_left_right(B_img)
B_lab = int(B_data[1])
A_lab = int(A_data[1])
return A_img, A_lab, B_img, B_lab
def te_input_func(img, lab):
img = tf.io.read_file(img)
img = tf.image.decode_jpeg(img, 3)
img = tf.image.resize(img, [FLAGS.img_size, FLAGS.img_size])
lab = lab
return img, lab
#@tf.function
def model_out(model, images, training=True):
return model(images, training=training)
def decreas_func(x):
return tf.maximum(0, tf.math.exp(x * (-2.77 / 100)))
def increase_func(x):
x = tf.cast(tf.maximum(1, x), tf.float32)
return tf.math.log(x + 1e-7)
def cal_loss(A2B_G_model, B2A_G_model, A_discriminator, B_discriminator,
A_batch_images, B_batch_images, B_batch_labels, A_batch_labels):
with tf.GradientTape() as g_tape, tf.GradientTape() as d_tape:
fake_B = model_out(A2B_G_model, A_batch_images, True)
fake_A_ = model_out(B2A_G_model, tf.nn.tanh(fake_B[:, :, :, 0:3]), True)
fake_A = model_out(B2A_G_model, B_batch_images, True)
fake_B_ = model_out(A2B_G_model, tf.nn.tanh(fake_A[:, :, :, 0:3]), True)
# identification # 이것도 추가하면 괜찮지 않을까?
#id_fake_A, _ = model_out(B2A_G_model, A_batch_images, True)
#id_fake_B, _ = model_out(A2B_G_model, B_batch_images, True)
DB_real = model_out(B_discriminator, B_batch_images, True)
DB_fake = model_out(B_discriminator, tf.nn.tanh(fake_B[:, :, :, 0:3]), True)
DA_real = model_out(A_discriminator, A_batch_images, True)
DA_fake = model_out(A_discriminator, tf.nn.tanh(fake_A[:, :, :, 0:3]), True)
################################################################################################
# 나이에 대한 distance를 구하는곳
return_loss = 0.
for i in range(FLAGS.batch_size): # 기존의 데이터로하려면 compare label을 하나 더 만들어야 한다 기억해!!!!
energy_ft = tf.reduce_sum(tf.abs(tf.reduce_mean(fake_A[i, :, :, 3:], [0,1]) - tf.reduce_mean(fake_B[:, :, :, 3:], [1,2])), 1)
energy_ft2 = tf.reduce_sum(tf.abs(tf.reduce_mean(fake_A_[i, :, :, 3:], [0,1]) - tf.reduce_mean(fake_B_[:, :, :, 3:], [1,2])), 1)
compare_label = tf.subtract(A_batch_labels, B_batch_labels[i])
T = 4
label_buff = tf.less(tf.abs(compare_label), T)
label_cast = tf.cast(label_buff, tf.float32)
realB_fakeB_loss = label_cast * increase_func(energy_ft) \
+ (1 - label_cast) * 5 * decreas_func(energy_ft)
realA_fakeA_loss = label_cast * increase_func(energy_ft2) \
+ (1 - label_cast) * 5 * decreas_func(energy_ft2)
# A와 B 나이가 다르면 감소함수, 같으면 증가함수
loss_buf = 0.
for j in range(FLAGS.batch_size):
loss_buf += realB_fakeB_loss[j] + realA_fakeA_loss[j]
loss_buf /= FLAGS.batch_size
return_loss += loss_buf
return_loss /= FLAGS.batch_size
################################################################################################
# content loss 를 작성하자
f_B = tf.nn.tanh(fake_B[:, :, :, 0:3])
f_B_x, f_B_y = tf.image.image_gradients(f_B)
f_B_m = tf.add(tf.abs(f_B_x), tf.abs(f_B_y))
f_B = tf.abs(f_B - f_B_m)
f_A = tf.nn.tanh(fake_A[:, :, :, 0:3])
f_A_x, f_A_y = tf.image.image_gradients(f_A)
f_A_m = tf.add(tf.abs(f_A_x), tf.abs(f_A_y))
f_A = tf.abs(f_A - f_A_m)
r_A = A_batch_images
r_A_x, r_A_y = tf.image.image_gradients(r_A)
r_A_m = tf.add(tf.abs(r_A_x), tf.abs(r_A_y))
r_A = tf.abs(r_A - r_A_m)
r_B = B_batch_images
r_B_x, r_B_y = tf.image.image_gradients(r_B)
r_B_m = tf.add(tf.abs(r_B_x), tf.abs(r_B_y))
r_B = tf.abs(r_B - r_B_m)
id_loss = tf.reduce_mean(tf.abs(f_B - r_A)) * 5.0 \
+ tf.reduce_mean(tf.abs(f_A - r_B)) * 5.0 # content loss
Cycle_loss = (tf.reduce_mean(tf.abs(tf.nn.tanh(fake_A_[:, :, :, 0:3]) - A_batch_images))) \
* 10.0 + (tf.reduce_mean(tf.abs(tf.nn.tanh(fake_B_[:, :, :, 0:3]) - B_batch_images))) * 10.0
G_gan_loss = tf.reduce_mean((DB_fake - tf.ones_like(DB_fake))**2) \
+ tf.reduce_mean((DA_fake - tf.ones_like(DA_fake))**2)
Adver_loss = (tf.reduce_mean((DB_real - tf.ones_like(DB_real))**2) + tf.reduce_mean((DB_fake - tf.zeros_like(DB_fake))**2)) / 2. \
+ (tf.reduce_mean((DA_real - tf.ones_like(DA_real))**2) + tf.reduce_mean((DA_fake - tf.zeros_like(DA_fake))**2)) / 2.
g_loss = Cycle_loss + G_gan_loss + return_loss + id_loss
d_loss = Adver_loss
g_grads = g_tape.gradient(g_loss, A2B_G_model.trainable_variables + B2A_G_model.trainable_variables)
d_grads = d_tape.gradient(d_loss, A_discriminator.trainable_variables + B_discriminator.trainable_variables)
g_optim.apply_gradients(zip(g_grads, A2B_G_model.trainable_variables + B2A_G_model.trainable_variables))
d_optim.apply_gradients(zip(d_grads, A_discriminator.trainable_variables + B_discriminator.trainable_variables))
return g_loss, d_loss
def main():
pre_trained_encoder1 = tf.keras.applications.ResNet50V2(include_top=False, input_shape=(FLAGS.img_size, FLAGS.img_size, 3))
pre_trained_encoder2 = tf.keras.applications.VGG16(include_top=False, input_shape=(FLAGS.img_size, FLAGS.img_size, 3))
pre_trained_encoder2.summary()
A2B_G_model = F2M_generator(input_shape=(FLAGS.img_size, FLAGS.img_size, 3))
B2A_G_model = F2M_generator(input_shape=(FLAGS.img_size, FLAGS.img_size, 3))
B_discriminator = F2M_discriminator(input_shape=(FLAGS.tar_size, FLAGS.tar_size, 3))
A_discriminator = F2M_discriminator(input_shape=(FLAGS.tar_size, FLAGS.tar_size, 3))
A2B_G_model.summary()
B_discriminator.summary()
if FLAGS.pre_checkpoint:
ckpt = tf.train.Checkpoint(A2B_G_model=A2B_G_model, B2A_G_model=B2A_G_model,
B_discriminator=B_discriminator,
g_optim=g_optim, d_optim=d_optim)
ckpt_manager = tf.train.CheckpointManager(ckpt, FLAGS.pre_checkpoint_path, 5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print("Restored!!")
else:
A2B_G_model.get_layer("conv_en_1").set_weights(pre_trained_encoder1.get_layer("conv1_conv").get_weights())
B2A_G_model.get_layer("conv_en_1").set_weights(pre_trained_encoder1.get_layer("conv1_conv").get_weights())
A2B_G_model.get_layer("conv_en_3").set_weights(pre_trained_encoder2.get_layer("block2_conv1").get_weights())
B2A_G_model.get_layer("conv_en_3").set_weights(pre_trained_encoder2.get_layer("block2_conv1").get_weights())
A2B_G_model.get_layer("conv_en_5").set_weights(pre_trained_encoder2.get_layer("block3_conv1").get_weights())
B2A_G_model.get_layer("conv_en_5").set_weights(pre_trained_encoder2.get_layer("block3_conv1").get_weights())
if FLAGS.train:
count = 0
A_images = np.loadtxt(FLAGS.A_txt_path, dtype="<U100", skiprows=0, usecols=0)
A_images = [FLAGS.A_img_path + data for data in A_images]
A_labels = np.loadtxt(FLAGS.A_txt_path, dtype=np.int32, skiprows=0, usecols=1)
B_images = np.loadtxt(FLAGS.B_txt_path, dtype="<U100", skiprows=0, usecols=0)
B_images = [FLAGS.B_img_path + data for data in B_images]
B_labels = np.loadtxt(FLAGS.B_txt_path, dtype=np.int32, skiprows=0, usecols=1)
for epoch in range(FLAGS.epochs):
min_ = min(len(A_images), len(B_images))
A = list(zip(A_images, A_labels))
B = list(zip(B_images, B_labels))
shuffle(B)
shuffle(A)
B_images, B_labels = zip(*B)
A_images, A_labels = zip(*A)
A_images = A_images[:min_]
A_labels = A_labels[:min_]
B_images = B_images[:min_]
B_labels = B_labels[:min_]
A_zip = np.array(list(zip(A_images, A_labels)))
B_zip = np.array(list(zip(B_images, B_labels)))
# 가까운 나이에 대해서 distance를 구하는 loss를 구성하면, 결국에는 해당이미지의 나이를 그대로 생성하는 효과?를 볼수있을것
gener = tf.data.Dataset.from_tensor_slices((A_zip, B_zip))
gener = gener.shuffle(len(B_images))
gener = gener.map(input_func)
gener = gener.batch(FLAGS.batch_size)
gener = gener.prefetch(tf.data.experimental.AUTOTUNE)
train_idx = min_ // FLAGS.batch_size
train_it = iter(gener)
for step in range(train_idx):
A_batch_images, A_batch_labels, B_batch_images, B_batch_labels = next(train_it)
g_loss, d_loss = cal_loss(A2B_G_model, B2A_G_model, A_discriminator, B_discriminator,
A_batch_images, B_batch_images, B_batch_labels, A_batch_labels)
print("Epoch = {}[{}/{}];\nStep(iteration) = {}\nG_Loss = {}, D_loss = {}".format(epoch,step,train_idx,
count+1,
g_loss, d_loss))
if count % 100 == 0:
fake_B = model_out(A2B_G_model, A_batch_images, False)
fake_A = model_out(B2A_G_model, B_batch_images, False)
plt.imsave(FLAGS.sample_images + "/fake_B_{}.jpg".format(count), tf.nn.tanh(fake_B[0, :, :, 0:3]) * 0.5 + 0.5)
plt.imsave(FLAGS.sample_images + "/fake_A_{}.jpg".format(count), tf.nn.tanh(fake_A[0, :, :, 0:3]) * 0.5 + 0.5)
plt.imsave(FLAGS.sample_images + "/real_B_{}.jpg".format(count), B_batch_images[0] * 0.5 + 0.5)
plt.imsave(FLAGS.sample_images + "/real_A_{}.jpg".format(count), A_batch_images[0] * 0.5 + 0.5)
#if count % 1000 == 0:
# num_ = int(count // 1000)
# model_dir = "%s/%s" % (FLAGS.save_checkpoint, num_)
# if not os.path.isdir(model_dir):
# print("Make {} folder to store the weight!".format(num_))
# os.makedirs(model_dir)
# ckpt = tf.train.Checkpoint(A2B_G_model=A2B_G_model, B2A_G_model=B2A_G_model,
# A_discriminator=A_discriminator, B_discriminator=B_discriminator,
# g_optim=g_optim, d_optim=d_optim)
# ckpt_dir = model_dir + "/F2M_V8_{}.ckpt".format(count)
# ckpt.save(ckpt_dir)
count += 1
else:
if FLAGS.test_dir == "A2B": # train data는 A가 아닌 B로 해야함
A_train_data = np.loadtxt(FLAGS.A_txt_path, dtype="<U100", skiprows=0, usecols=0)
A_train_data = [FLAGS.A_img_path + data for data in A_train_data]
A_train_label = np.loadtxt(FLAGS.A_txt_path, dtype=np.int32, skiprows=0, usecols=1)
A_test_data = np.loadtxt(FLAGS.A_test_txt_path, dtype="<U200", skiprows=0, usecols=0)
A_test_data = [FLAGS.A_test_img_path + data for data in A_test_data]
A_test_label = np.loadtxt(FLAGS.A_test_txt_path, dtype=np.int32, skiprows=0, usecols=1)
tr_gener = tf.data.Dataset.from_tensor_slices((A_train_data, A_train_label))
tr_gener = tr_gener.map(te_input_func)
tr_gener = tr_gener.batch(1)
tr_gener = tr_gener.prefetch(tf.data.experimental.AUTOTUNE)
te_gener = tf.data.Dataset.from_tensor_slices((A_test_data, A_test_label))
te_gener = te_gener.map(te_input_func)
te_gener = te_gener.batch(1)
te_gener = te_gener.prefetch(tf.data.experimental.AUTOTUNE)
tr_it = iter(tr_gener)
tr_idx = len(A_train_data) // 1
te_it = iter(te_gener)
te_idx = len(A_test_data) // 1
for i in range(te_idx):
te_A_images, te_A_labels = next(te_it)
fake_B, te_feature = model_out(A2B_G_model, te_A_images, False) # [1, 256]
te_features = te_feature[0]
dis = []
lab = []
for j in range(tr_idx):
tr_A_images, tr_A_labels = next(tr_it)
_, tr_feature = model_out(A2B_G_model, tr_A_images, False) # [1, 256]
tr_features = tr_feature[0]
d = tf.reduce_sum(tf.abs(tr_features - te_features), -1)
dis.append(d.numpy())
lab.append(tr_A_labels[0].numpy())
min_distance = np.argmin(dis, axis=-1)
generated_age = lab[min_distance]
name = (A_test_data[i].split("/")[-1]).split(".")[0]
plt.imsave(FLAGS.fake_B_path + "/" + name + "_{}".format(generated_age) + ".jpg", fake_B[0].numpy() * 0.5 + 0.5)
if FLAGS.test_dir == "B2A": # train data 가 B가 아닌 A로 해야함
B_train_data = np.loadtxt(FLAGS.B_txt_path, dtype="<U100", skiprows=0, usecols=0)
B_train_data = [FLAGS.B_img_path + data for data in B_train_data]
B_train_label = np.loadtxt(FLAGS.B_txt_path, dtype=np.int32, skiprows=0, usecols=1)
B_test_data = np.loadtxt(FLAGS.B_test_txt_path, dtype="<U200", skiprows=0, usecols=0)
B_test_data = [FLAGS.B_test_img_path + data for data in B_test_data]
B_test_label = np.loadtxt(FLAGS.B_test_txt_path, dtype="<U200", skiprows=0, usecols=1)
tr_gener = tf.data.Dataset.from_tensor_slices((B_train_data, B_train_label))
tr_gener = tr_gener.map(te_input_func)
tr_gener = tr_gener.batch(1)
tr_gener = tr_gener.prefetch(tf.data.experimental.AUTOTUNE)
te_gener = tf.data.Dataset.from_tensor_slices((B_test_data, B_test_label))
te_gener = te_gener.map(te_input_func)
te_gener = te_gener.batch(1)
te_gener = te_gener.prefetch(tf.data.experimental.AUTOTUNE)
tr_it = iter(tr_gener)
tr_idx = len(B_train_data) // 1
te_it = iter(te_gener)
te_idx = len(B_test_data) // 1
for i in range(te_idx):
te_B_images, te_B_labels = next(te_it)
fake_A, te_feature = model_out(B2A_G_model, te_B_images, False) # [1, 256]
te_features = te_feature[0]
dis = []
lab = []
for j in range(tr_idx):
tr_B_images, tr_B_labels = next(tr_it)
_, tr_feature = model_out(B2A_G_model, tr_B_images, False) # [1, 256]
tr_features = tr_feature[0]
d = tf.reduce_sum(tf.abs(tr_features - te_features), -1)
dis.append(d.numpy())
lab.append(tr_B_labels[0].numpy())
min_distance = np.argmin(dis, axis=-1)
generated_age = lab[min_distance]
name = (B_test_data[i].split("/")[-1]).split(".")[0]
plt.imsave(FLAGS.fake_A_path + "/" + name + "_{}".format(generated_age) + ".jpg", fake_A[0].numpy() * 0.5 + 0.5)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7130805026446d24d544f10d895736cf095e094e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02937/s210990064.py | f15af763a1739d3abe1ae11028c005fee5325924 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from bisect import bisect_left
import sys
s = input()
t = input()
d = {char:[] for char in 'abcdefghijklmnopqrstuvwxyz'}
for i in range(len(s)):
d[s[i]].append(i)
cnt = 0
index = 0
for char in t:
if not d[char]:
print(-1)
sys.exit()
i = bisect_left(d[char], index)
if i == len(d[char]):
cnt += 1
index = d[char][0]
else:
index = d[char][i]
index += 1
ans = cnt * len(s) + index
print(ans) | [
"[email protected]"
] | |
d8947f54da103676026570125e67410bffb7919f | 99d7765da35926279c4a4fd7313d55908786f4b8 | /1/7/17256/17256.py | d960171dbed858b264d23746c09f153f0475dfbb | [
"MIT"
] | permissive | chr0m3/boj-codes | b8294c5d4d10a5af25b5276427bccd74d0866ef5 | d71d0a22d0a3ae62c225f382442461275f56fe8f | refs/heads/master | 2021-08-16T15:24:57.733088 | 2021-03-22T13:13:10 | 2021-03-22T13:13:10 | 91,523,558 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | ax, ay, az = map(int, input().split())
cx, cy, cz = map(int, input().split())
bx = cx - az
by = int(cy / ay)
bz = cz - ax
print(f'{bx} {by} {bz}')
| [
"[email protected]"
] | |
bcfda7e647bf3de983700535dd5d34d12deef7d1 | a01fb7bb8e8738a3170083d84bc3fcfd40e7e44f | /python3/core/meta/define_class.py | f2363c09f30ec08eda430d475519793a0f9d812f | [] | no_license | jk983294/CommonScript | f07acf603611b4691b176aa4a02791ef7d4d9370 | 774bcbbae9c146f37312c771c9e867fb93a0c452 | refs/heads/master | 2023-08-21T17:50:19.036159 | 2023-08-16T00:22:03 | 2023-08-16T00:22:03 | 42,732,160 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | import types
# Example of making a class manually from parts
# Methods
def __init__(self, name, shares, price):
self.name = name
self.shares = shares
self.price = price
def cost(self):
return self.shares * self.price
cls_dict = {
'__init__': __init__,
'cost': cost,
}
# Make a class
Stock = types.new_class('Stock', (), {}, lambda ns: ns.update(cls_dict))
Stock.__module__ = __name__
s = Stock('ACME', 50, 91.1)
print(s.cost()) # 4555.0
| [
"[email protected]"
] | |
38fea3cb4a84ad737b0db2e9f0e3d3bd11cf0e52 | 0343de40021f8dd72fb9a6cb31b5d2f24ccd7971 | /utilities/wake_models_mean/array_efficiency_openMDAO.py | fc3e2db61bcf729ad72f210df061bd8098d2014a | [] | no_license | sebasanper/WINDOW_dev | 47ae9252e6fadb2a3b1a0aae3383681a7955f4ea | 3c6437a777f2fc3be1dfd3d53b5d2ed25281c55c | refs/heads/master | 2021-01-01T19:45:02.555727 | 2018-05-21T20:27:56 | 2018-05-21T20:27:56 | 98,670,270 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,855 | py | from numpy import array
from openmdao.api import Component, Group, Problem
from farm_energy.layout import read_layout
from power_models import power_v90 as power
from site_conditions.wind_conditions.windrose import read_windrose
from wake_models import jensen_1angle, ainslie_1angle, larsen_1angle
class JensenWindRose(Component):
def __init__(self):
super(JensenWindRose, self).__init__()
self.add_param('layout_x', shape=(9,))
self.add_param('layout_y', shape=(9,))
self.add_param('windrose_direction', shape=(4,))
self.add_param('windrose_speed', shape=(4,))
self.add_param('windrose_probability', shape=(4,))
self.add_output('array_efficiency', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
layout_x = params['layout_x']
layout_y = params['layout_y']
wind_direction = params['windrose_direction']
wind_speed = params['windrose_speed']
wind_frequency = params['windrose_probability']
efficiency = []
profit = []
summation = 0.0
nt = len(layout_y)
P = []
U = []
efficiency_proportion = []
for wind in range(len(wind_direction)):
U0 = wind_speed[wind] # Free stream wind speed
angle = wind_direction[wind]
# angle2 = - 270.0 - angle # To read windroses where N is 0 and E is 90
U.append(jensen_1angle(layout_x, layout_y, U0, angle, rotor_radius=40.0, k=0.04))
P.append([power(u) for u in U[-1]])
# Farm efficiency
profit.append(sum(P[-1]))
efficiency.append(profit[-1] * 100.0 / (float(nt) * max(P[-1]))) # same as using U0
efficiency_proportion.append(efficiency[-1] * wind_frequency[wind] / 100.0)
summation += efficiency_proportion[wind]
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
unknowns['array_efficiency'] = summation
class AinslieWindRose(Component):
def __init__(self):
super(AinslieWindRose, self).__init__()
self.add_param('layout_x', shape=(9,))
self.add_param('layout_y', shape=(9,))
self.add_param('windrose_direction', shape=(4,))
self.add_param('windrose_speed', shape=(4,))
self.add_param('windrose_probability', shape=(4,))
self.add_output('array_efficiency', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
layout_x = params['layout_x']
layout_y = params['layout_y']
wind_direction = params['windrose_direction']
wind_speed = params['windrose_speed']
wind_frequency = params['windrose_probability']
efficiency = []
profit = []
summation = 0.0
nt = len(layout_y)
P = []
U = []
efficiency_proportion = []
for wind in range(len(wind_direction)):
U0 = wind_speed[wind] # Free stream wind speed
angle = wind_direction[wind]
# angle2 = - 270.0 - angle # To read windroses where N is 0 and E is 90
U.append(ainslie_1angle(layout_x, layout_y, U0, angle, rotor_radius=40.0, TI=0.08))
P.append([power(u) for u in U[-1]])
# Farm efficiency
profit.append(sum(P[-1]))
efficiency.append(profit[-1] * 100.0 / (float(nt) * max(P[-1]))) # same as using U0
efficiency_proportion.append(efficiency[-1] * wind_frequency[wind] / 100.0)
summation += efficiency_proportion[wind]
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
unknowns['array_efficiency'] = summation
class LarsenWindRose(Component):
def __init__(self):
super(LarsenWindRose, self).__init__()
self.add_param('layout_x', shape=(9,))
self.add_param('layout_y', shape=(9,))
self.add_param('windrose_direction', shape=(4,))
self.add_param('windrose_speed', shape=(4,))
self.add_param('windrose_probability', shape=(4,))
self.add_output('array_efficiency', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
layout_x = params['layout_x']
layout_y = params['layout_y']
wind_direction = params['windrose_direction']
wind_speed = params['windrose_speed']
wind_frequency = params['windrose_probability']
efficiency = []
profit = []
summation = 0.0
nt = len(layout_y)
P = []
U = []
efficiency_proportion = []
for wind in range(len(wind_direction)):
U0 = wind_speed[wind] # Free stream wind speed
angle = wind_direction[wind]
# angle2 = - 270.0 - angle # To read windroses where N is 0 and E is 90
U.append(larsen_1angle(layout_x, layout_y, U0, angle, rotor_radius=40.0, hub_height=100.0, TI=0.08))
P.append([power(u) for u in U[-1]])
# Farm efficiency
profit.append(sum(P[-1]))
efficiency.append(profit[-1] * 100.0 / (float(nt) * max(P[-1]))) # same as using U0
efficiency_proportion.append(efficiency[-1] * wind_frequency[wind] / 100.0)
summation += efficiency_proportion[wind]
# print profit
# print efficiency
# print efficiency_proportion
# print U
# print P
unknowns['array_efficiency'] = summation
if __name__ == '__main__':
layout_x, layout_y = read_layout('coordinates.dat')
windrose_direction, windrose_speed, windrose_probability = read_windrose('windrose.dat')
root = Group()
root.add('jensen', JensenWindRose())
root.add('ainslie', AinslieWindRose())
root.add('larsen', LarsenWindRose())
prob = Problem(root)
prob.setup()
prob['jensen.layout_x'] = prob['ainslie.layout_x'] = prob['larsen.layout_x'] = array(layout_x)
prob['jensen.layout_y'] = prob['ainslie.layout_y'] = prob['larsen.layout_y'] = array(layout_y)
prob['jensen.windrose_direction'] = prob['ainslie.windrose_direction'] = prob['larsen.windrose_direction'] = array(windrose_direction)
prob['jensen.windrose_speed'] = prob['ainslie.windrose_speed'] = prob['larsen.windrose_speed'] = array(windrose_speed)
prob['jensen.windrose_probability'] = prob['ainslie.windrose_probability'] = prob['larsen.windrose_probability'] = array(windrose_probability)
prob.run()
efficiency_jensen = prob['jensen.array_efficiency']
efficiency_ainslie = prob['ainslie.array_efficiency']
efficiency_larsen = prob['larsen.array_efficiency']
print 'Jensen'
print efficiency_jensen
print
print 'Ainslie'
print efficiency_ainslie
print
print 'Larsen'
print efficiency_larsen
| [
"[email protected]"
] | |
356538f816b7e65cf077f0dc5839f9ca201bca9f | edbb63696580638af0084ee318d2c9bc9e8c7e79 | /linkf.py | f95261c46757de6bb39993f4c951b15bf01b7640 | [] | no_license | awaddell77/Scrapers | fef34e34b8e039f4992497cae75135cdb57b2581 | 0a36fb2c99f2d7b90533834b29c0ba8f27c13a85 | refs/heads/master | 2020-05-21T13:44:06.524855 | 2020-03-16T23:00:45 | 2020-03-16T23:00:45 | 62,753,048 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | def linkF(s,n, base = 0, attrs= 0, default = '"'):#x is the item, n = tag takes link tag (MUST BE STRING) and extracts the link
l =[]
ln = ''
x = s
if attrs != 0:
x = re.sub('<a','', x)#strips the tag from the string, helps in certain situations where the location of the link changes in between elements
elif type(attrs) == str:
x = re.sub(attrs, '', x)
ln_s = x.split(default)
for i in range(0, len(ln_s)):
if ln_s[i] == n or ln_s[i] == ' ' + n:
if ln_s[i+1] != 'javascript:void(0);':
ln = ln_s[i+1] #ln is the link (still needs to be joined witht the base URL
if base == 0 and not ln or ln is None:
return ""
else:
ln = base + ln #MAJOR WORKAROUND!!!! IN THE FUTURE THS SHOULD CALL A FUNCTION THAT FINDS THE BASE
return ln
| [
"[email protected]"
] | |
6a29db1aa6a6aa74ff51bb6790448fbd6080f5d9 | eb518a18d8055400c85d1b2f714fe9d4d654b941 | /_done/tests/test_skel_filter.py | 6c108e4ac6c2a32d3368595cbd6277d0b33f1378 | [] | no_license | ver228/single-worm-analysis | c755709354025f629f7c774749394743c7b9a46b | 8d0a442fb93ad25aa30743f6c31f883639524a4d | refs/heads/master | 2021-09-14T11:31:17.761390 | 2018-05-12T23:00:54 | 2018-05-12T23:00:54 | 79,457,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,786 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 21:57:30 2015
@author: ajaver
"""
import matplotlib.pylab as plt
import h5py
import pandas as pd
import cv2
import numpy as np
from skimage.filters import threshold_otsu
from scipy.signal import medfilt
import time
import os
from MWTracker.trackWorms.getSkeletonsTables import getWormMask, binaryMask2Contour, getWormROI
from MWTracker.trackWorms.segWormPython.cleanWorm import cleanWorm
from MWTracker.trackWorms.segWormPython.linearSkeleton import linearSkeleton
from MWTracker.trackWorms.segWormPython.getHeadTail import getHeadTail, rollHead2FirstIndex, isHeadTailTouching
from MWTracker.trackWorms.segWormPython.cythonFiles.segWorm_cython import circComputeChainCodeLengths
from MWTracker.trackWorms.segWormPython.cleanWorm import circSmooth, extremaPeaksCircDist
from MWTracker.trackWorms.segWormPython.cythonFiles.circCurvature import circCurvature
#file_mask = '/Users/ajaver/Desktop/Videos/03-03-11/MaskedVideos/03-03-11/N2 swimming_2011_03_03__16_36___3___10.hdf5'
#file_mask = '/Volumes/behavgenom_archive$/MaskedVideos/nas207-3/Data/from pc207-15/laura/09-07-10/3/egl-17 (e1313)X on food R_2010_07_09__11_43_13___2___4.hdf5'
#file_mask = '/Users/ajaver/Desktop/Videos/single_worm/agar_1/MaskedVideos/431 JU298 on food L_2011_03_17__12_02_58___2___3.hdf5'
#file_mask = '/Users/ajaver/Desktop/Videos/single_worm/agar_2/MaskedVideos/798 JU258 on food L_2011_03_22__16_26_58___1___12.hdf5'
#file_mask = '/Users/ajaver/Desktop/Videos/single_worm/agar_1/MaskedVideos/unc-7 (cb5) on food R_2010_09_10__12_27_57__4.hdf5'
#file_mask = '/Users/ajaver/Desktop/Videos/single_worm/agar_1/MaskedVideos/gpa-11 (pk349)II on food L_2010_02_25__11_24_39___8___6.hdf5'
#file_mask = '/Users/ajaver/Desktop/Videos/single_worm/switched_sample/unc-104 (e1265)III on food L_2011_10_18__11_29_31__1.hdf5'
#file_skel = file_mask.replace('MaskedVideos', 'Results').replace('.hdf5', '_skeletons.hdf5')
file_skel = file_mask.replace('.hdf5', '_skeletons.hdf5')
assert(os.path.exists(file_mask))
assert(os.path.exists(file_skel))
with pd.HDFStore(file_skel, 'r') as fid:
trajectories_data = fid['/trajectories_data']
#with pd.HDFStore(file_traj, 'r') as fid:
# plate_worms = fid['/plate_worms']
current_frame = 9027#17277
with h5py.File(file_mask, 'r') as fid:
img = fid['/mask'][current_frame]
row_data = trajectories_data[trajectories_data['frame_number'] == current_frame]
row_data = row_data.iloc[0]
worm_img, roi_corner = getWormROI(img, row_data['coord_x'], row_data['coord_y'], row_data['roi_size'])
min_mask_area = row_data['area']/2
worm_mask, contour, _ = getWormMask(worm_img, row_data['threshold'], 10, min_mask_area)
if contour.dtype != np.double:
contour = contour.astype(np.double)
ske_worm_segments = 24.;
cnt_worm_segments = 2. * ske_worm_segments;
#this line does not really seem to be useful
#contour = cleanWorm(contour, cnt_worm_segments)
#make sure the contours are in the counter-clockwise direction
#head tail indentification will not work otherwise
#x1y2 - x2y1(http://mathworld.wolfram.com/PolygonArea.html)
signed_area = np.sum(contour[:-1,0]*contour[1:,1]-contour[1:,0]*contour[:-1,1])/2
if signed_area>0:
contour = np.ascontiguousarray(contour[::-1,:])
#make sure the array is C_continguous. Several functions required this.
if not contour.flags['C_CONTIGUOUS']:
contour = np.ascontiguousarray(contour)
#% Compute the contour's local high/low-frequency curvature.
#% Note: worm body muscles are arranged and innervated as staggered pairs.
#% Therefore, 2 segments have one theoretical degree of freedom (i.e. one
#% approximation of a hinge). In the head, muscles are innervated
#% individually. Therefore, we sample the worm head's curvature at twice the
#% frequency of its body.
#% Note 2: we ignore Nyquist sampling theorem (sampling at twice the
#% frequency) since the worm's cuticle constrains its mobility and practical
#% degrees of freedom.
cnt_chain_code_len = circComputeChainCodeLengths(contour);
worm_seg_length = (cnt_chain_code_len[0] + cnt_chain_code_len[-1]) / cnt_worm_segments;
edge_len_hi_freq = worm_seg_length;
cnt_ang_hi_freq = circCurvature(contour, edge_len_hi_freq, cnt_chain_code_len);
edge_len_low_freq = 2 * edge_len_hi_freq;
cnt_ang_low_freq = circCurvature(contour, edge_len_low_freq, cnt_chain_code_len);
#% Blur the contour's local high-frequency curvature.
#% Note: on a small scale, noise causes contour imperfections that shift an
#% angle from its correct location. Therefore, blurring angles by averaging
#% them with their neighbors can localize them better.
worm_seg_size = contour.shape[0] / cnt_worm_segments;
blur_size_hi_freq = np.ceil(worm_seg_size / 2);
cnt_ang_hi_freq = circSmooth(cnt_ang_hi_freq, blur_size_hi_freq)
#% Compute the contour's local high/low-frequency curvature maxima.
maxima_hi_freq, maxima_hi_freq_ind = \
extremaPeaksCircDist(1, cnt_ang_hi_freq, edge_len_hi_freq, cnt_chain_code_len)
maxima_low_freq, maxima_low_freq_ind = \
extremaPeaksCircDist(1, cnt_ang_low_freq, edge_len_low_freq, cnt_chain_code_len)
head_ind, tail_ind = \
getHeadTail(cnt_ang_low_freq, maxima_low_freq_ind, cnt_ang_hi_freq, maxima_hi_freq_ind, cnt_chain_code_len)
#one of the sides is too short so it might be touching itself (coiling)
err_msg = isHeadTailTouching(head_ind, tail_ind, cnt_chain_code_len);
#change arrays so the head correspond to the first position
head_ind, tail_ind, contour, cnt_chain_code_len, cnt_ang_low_freq, maxima_low_freq_ind = \
rollHead2FirstIndex(head_ind, tail_ind, contour, cnt_chain_code_len, cnt_ang_low_freq, maxima_low_freq_ind)
#% Compute the contour's local low-frequency curvature minima.
minima_low_freq, minima_low_freq_ind = \
extremaPeaksCircDist(-1, cnt_ang_low_freq, edge_len_low_freq, cnt_chain_code_len);
#% Compute the worm's skeleton.
skeleton, cnt_widths = linearSkeleton(head_ind, tail_ind, minima_low_freq, minima_low_freq_ind, \
maxima_low_freq, maxima_low_freq_ind, contour.copy(), worm_seg_length, cnt_chain_code_len);
#The head must be in position 0
assert head_ind == 0
# Get the contour for each side.
cnt_side1 = contour[:tail_ind+1, :].copy()
cnt_side2 = np.vstack([contour[0,:], contour[:tail_ind-1:-1,:]])
#%%
bend_low_freq_ind = minima_low_freq_ind[minima_low_freq < -30];
#if bend_low_freq_ind.size>0:
# '''% Find concavities near the head. If there are any concavities
# % near the tail, the head may be portruding from a coil; in
# % which case, the width at the end of the head may be
# % inaccurate.'''
# if hlcBounds(1) < hrcBounds(2)
# hBendI = lfCBendI(lfCBendI > hlcBounds(1) & ...
# lfCBendI < hrcBounds(2));
# else
# hBendI = lfCBendI(lfCBendI > hlcBounds(1) | ...
# lfCBendI < hrcBounds(2));
# end
#
# % Does the worm more than double its width from the head?
# % Note: if the worm coils, its width will grow to more than
# % double that at the end of the head.
# maxWidth = max(cWidths);
# if isempty(hBendI)
# if maxWidth / cWidths(hsBounds(2)) > 2 / bodyScale
# errNum = 107;
# errMsg = ['The worm more than doubles its width ' ...
# 'from end of its head. Therefore, the worm is ' ...
# 'coiled, laid an egg, and/or is significantly ' ...
# 'obscured and cannot be segmented.'];
#
# % Organize the available worm information.
# if verbose
# warning('segWorm:DoubleHeadWidth', ...
# ['Frame %d: ' errMsg], frame);
# vWorm = worm2struct(frame, contour, [], [], [], ...
# lfCAngles, headI, tailI, cCCLengths, [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], 0, [], [], ...
# 0, [], []);
# else
# return;
# end
# end
# end
#
# % Find concavities near the tail. If there are any concavities near
# % the tail, the tail may be portruding from a coil; in which case,
# % the width at the end of the tail may be inaccurate.
# if trcBounds(1) < tlcBounds(2)
# tBendI = lfCBendI(lfCBendI > trcBounds(1) & ...
# lfCBendI < tlcBounds(2));
# else
# tBendI = lfCBendI(lfCBendI > trcBounds(1) | ...
# lfCBendI < tlcBounds(2));
# end
#
# % Does the worm more than double its width from the tail?
# % If the worm coils, its width will grow to more than double
# % that at the end of the tail.
# if isempty(tBendI)
# if maxWidth / cWidths(tsBounds(1)) > 2 / bodyScale
# errNum = 108;
# errMsg = ['The worm more than doubles its width ' ...
# 'from end of its tail. Therefore, the worm is ' ...
# 'coiled, laid an egg, and/or is significantly ' ...
# 'obscured and cannot be segmented.'];
#
# % Organize the available worm information.
# if verbose
# warning('segWorm:DoubleTailWidth', ...
# ['Frame %d: ' errMsg], frame);
# vWorm = worm2struct(frame, contour, [], [], [], ...
# lfCAngles, headI, tailI, cCCLengths, [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], 0, [], [], ...
# 0, [], []);
# else
# return;
# end
# end
# end
#
# % Use the most accurate estimate of head/tail width to
# % determine whether the width of the body is more than double
# % that at the end of the head/tail; in which case; the worm is
# % coiled.
# if ~(isempty(hBendI) && isempty(tBendI))
#
# % Find the distances of bends near the head.
# hBendDist = abs(headI - hBendI);
# hBendDist = min(hBendDist, abs(hBendDist - length(lfCAngles)));
#
# % Find the distances of bends near the tail.
# tBendDist = abs(tailI - tBendI);
# tBendDist = min(tBendDist, abs(tBendDist - length(lfCAngles)));
#
# % The bend near the head is furthest and, therefore, the
# % width at the end of the head is our most accurate
# % estimate of the worm's width.
# if min(hBendDist) >= min(tBendDist)
# if maxWidth / cWidths(hsBounds(2)) > 2 / bodyScale
# errNum = 107;
# errMsg = ['The worm more than doubles its width ' ...
# 'from end of its head. Therefore, the worm is ' ...
# 'coiled, laid an egg, and/or is significantly ' ...
# 'obscured and cannot be segmented.'];
#
# % Organize the available worm information.
# if verbose
# warning('segWorm:DoubleHeadWidth', ...
# ['Frame %d: ' errMsg], frame);
# vWorm = worm2struct(frame, contour, [], [], [], ...
# lfCAngles, headI, tailI, cCCLengths, [], ...
# [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], ...
# [], 0, [], [], 0, [], []);
# else
# return;
# end
# end
#
# % The bend near the tail is furthest and, therefore, the
# % width at the end of the tail is our most accurate
# % estimate of the worm's width.
# else
# if maxWidth / cWidths(tsBounds(1)) > 2 / bodyScale
# errNum = 108;
# errMsg = ['The worm more than doubles its width ' ...
# 'from end of its tail. Therefore, the worm is ' ...
# 'coiled, laid an egg, and/or is significantly ' ...
# 'obscured and cannot be segmented.'];
#
# % Organize the available worm information.
# if verbose
# warning('segWorm:DoubleTailWidth', ...
# ['Frame %d: ' errMsg], frame);
# vWorm = worm2struct(frame, contour, [], [], [], ...
# lfCAngles, headI, tailI, cCCLengths, [], ...
# [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], ...
# [], [], [], [], [], [], [], [], [], [], [], ...
# [], 0, [], [], 0, [], []);
# else
# return;
# end
# end
# end
# end
# end
#
#
#%%
plt.figure()
plt.imshow(worm_mask, interpolation = 'none', cmap = 'gray')
plt.plot(contour[:,0], contour[:,1], 'r')
plt.xlim([0, worm_mask.shape[1]])
plt.ylim([0, worm_mask.shape[0]])
plt.grid('off')
| [
"[email protected]"
] | |
8f9fac60b55576c80814a327c149e2c7951c05eb | 1b3addbc9473b6ffb999665601470ccc4d1153b0 | /libs/magic.py | 53a91fd47331bd4941f6ce36bfa0ea6caa24c08f | [] | no_license | weijia/approot | e1f712fa92c4c3200210eb95d251d890295769ba | 15fac5b31a4d619d1bdede3d1131f5e6e57cd43b | refs/heads/master | 2020-04-15T13:15:01.956721 | 2014-08-26T14:02:17 | 2014-08-26T14:02:17 | 11,049,975 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,745 | py | """
magic is a wrapper around the libmagic file identification library.
See README for more information.
Usage:
>>> import magic
>>> magic.from_file("testdata/test.pdf")
'PDF document, version 1.2'
>>> magic.from_file("testdata/test.pdf", mime=True)
'application/pdf'
>>> magic.from_buffer(open("testdata/test.pdf").read(1024))
'PDF document, version 1.2'
>>>
"""
import os.path
import ctypes
import ctypes.util
from ctypes import c_char_p, c_int, c_size_t, c_void_p
class MagicException(Exception): pass
class Magic:
"""
Magic is a wrapper around the libmagic C library.
"""
def __init__(self, mime=False, magic_file=None, mime_encoding=False):
"""
Create a new libmagic wrapper.
mime - if True, mimetypes are returned instead of textual descriptions
mime_encoding - if True, codec is returned
magic_file - use a mime database other than the system default
"""
flags = MAGIC_NONE
if mime:
flags |= MAGIC_MIME
elif mime_encoding:
flags |= MAGIC_MIME_ENCODING
self.cookie = magic_open(flags)
magic_load(self.cookie, magic_file)
def from_buffer(self, buf):
"""
Identify the contents of `buf`
"""
return magic_buffer(self.cookie, buf)
def from_file(self, filename):
"""
Identify the contents of file `filename`
raises IOError if the file does not exist
"""
if not os.path.exists(filename):
raise IOError("File does not exist: " + filename)
return magic_file(self.cookie, filename)
def __del__(self):
if self.cookie:
magic_close(self.cookie)
self.cookie = None
_magic_mime = None
_magic = None
def _get_magic_mime():
global _magic_mime
if not _magic_mime:
_magic_mime = Magic(mime=True)
return _magic_mime
def _get_magic():
global _magic
if not _magic:
_magic = Magic()
return _magic
def _get_magic_type(mime):
if mime:
return _get_magic_mime()
else:
return _get_magic()
def from_file(filename, mime=False):
m = _get_magic_type(mime)
return m.from_file(filename)
def from_buffer(buffer, mime=False):
m = _get_magic_type(mime)
return m.from_buffer(buffer)
libmagic = None
# Let's try to find magic or magic1
dll = ctypes.util.find_library('magic') or ctypes.util.find_library('magic1')
# This is necessary because find_library returns None if it doesn't find the library
if dll:
libmagic = ctypes.CDLL(dll)
if not libmagic or not libmagic._name:
import sys
platform_to_lib = {'darwin': '/opt/local/lib/libmagic.dylib',
'win32': 'magic1.dll'}
if sys.platform in platform_to_lib:
try:
libmagic = ctypes.CDLL(platform_to_lib[sys.platform])
except OSError:
pass
if not libmagic or not libmagic._name:
# It is better to raise an ImportError since we are importing magic module
raise ImportError('failed to find libmagic. Check your installation')
magic_t = ctypes.c_void_p
def errorcheck(result, func, args):
err = magic_error(args[0])
if err is not None:
raise MagicException(err)
else:
return result
magic_open = libmagic.magic_open
magic_open.restype = magic_t
magic_open.argtypes = [c_int]
magic_close = libmagic.magic_close
magic_close.restype = None
magic_close.argtypes = [magic_t]
magic_error = libmagic.magic_error
magic_error.restype = c_char_p
magic_error.argtypes = [magic_t]
magic_errno = libmagic.magic_errno
magic_errno.restype = c_int
magic_errno.argtypes = [magic_t]
magic_file = libmagic.magic_file
magic_file.restype = c_char_p
magic_file.argtypes = [magic_t, c_char_p]
magic_file.errcheck = errorcheck
_magic_buffer = libmagic.magic_buffer
_magic_buffer.restype = c_char_p
_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
_magic_buffer.errcheck = errorcheck
def magic_buffer(cookie, buf):
return _magic_buffer(cookie, buf, len(buf))
magic_load = libmagic.magic_load
magic_load.restype = c_int
magic_load.argtypes = [magic_t, c_char_p]
magic_load.errcheck = errorcheck
magic_setflags = libmagic.magic_setflags
magic_setflags.restype = c_int
magic_setflags.argtypes = [magic_t, c_int]
magic_check = libmagic.magic_check
magic_check.restype = c_int
magic_check.argtypes = [magic_t, c_char_p]
magic_compile = libmagic.magic_compile
magic_compile.restype = c_int
magic_compile.argtypes = [magic_t, c_char_p]
MAGIC_NONE = 0x000000 # No flags
MAGIC_DEBUG = 0x000001 # Turn on debugging
MAGIC_SYMLINK = 0x000002 # Follow symlinks
MAGIC_COMPRESS = 0x000004 # Check inside compressed files
MAGIC_DEVICES = 0x000008 # Look at the contents of devices
MAGIC_MIME = 0x000010 # Return a mime string
MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
MAGIC_CONTINUE = 0x000020 # Return all matches
MAGIC_CHECK = 0x000040 # Print warnings to stderr
MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
MAGIC_RAW = 0x000100 # Don't translate unprintable chars
MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
| [
"[email protected]"
] | |
b4b7ad28601012a65664678ac79d4ce0ccda5787 | 6c4dc59ca37a2106774cc7bdd3d6ab4f5abb4794 | /data/v8-0-7.37_ForApproval/CutOpCard/OPTIMIZED_171108_ElEl_HighMass_Bin2/MakeSystCycle_ElEl.py | 367262c206330a6fff83fc3effe2a9f9712eb023 | [] | no_license | jalmond/HNL_Type1_Plotter | 8a2c4ccd9e7ae3cf8b9d60b2fb3dd46ec56a8111 | 595143893ec6d1b12efd20713b127ca2a71ba019 | refs/heads/main | 2023-08-01T06:09:47.955278 | 2021-09-09T22:30:37 | 2021-09-09T22:30:37 | 404,880,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,960 | py | import os
CutCardDirName = "OPTIMIZED_171108_ElEl_HighMass_Bin2"
## Bin1 : TwoJet_NoFatJet
## Bin2 : OneJet / OneFatJet
DoBin1 = False
os.system("mkdir -p Cycles/")
masses = [90, 100, 125, 150, 200, 250, 300, 400, 500, 600, 700, 800, 900, 1000, 1100, 1200, 1300, 1400, 1500]
for mass in masses:
filename = "HNElEl_"+str(mass)
if not DoBin1:
filename = "Bin2_"+filename
out = open("Cycles/"+filename+".C","w")
TreeSkim = ""
SkimmedTreeDir = ""
if mass <= 80:
if DoBin1:
TreeSkim = "Low_TwoJet_NoFatjet_SS"
SkimmedTreeDir = "Skimmed_Low_TwoJet_NoFatjet"
else:
TreeSkim = "Low_OneJet_NoFatjet_SS"
SkimmedTreeDir = "Skimmed_Low_OneJet_NoFatjet"
else:
if DoBin1:
TreeSkim = "High_TwoJet_NoFatjet_SS"
SkimmedTreeDir = "Skimmed_High_TwoJet_NoFatjet"
else:
TreeSkim = "High_OneFatJet_SS"
SkimmedTreeDir = "Skimmed_High_OneFatJet"
SignalSampleName = "HNElEl_"+str(mass)
print>>out,'''#include "RunNtupleBase.C"
void {4}(){{
bool DoDebug = false;
TString CutOpCardDir = "{3}";
//==== Get Envrionment Variables
TString WORKING_DIR = getenv("PLOTTER_WORKING_DIR");
TString catversion = getenv("CATVERSION");
TString dataset = getenv("CATANVERSION");
TString ENV_PLOT_PATH = getenv("FILE_PATH");
//==== Declare Object
RunNtupleBase m;
m.DoDebug = DoDebug;
m.RunSystematic = true;
//==== Skim selection for tree (tree name : Ntp_<skim>)
m.treeskim = "{0}";
//==== Dataset/channel
m.DataPD = "DoubleEG";
//==== Filename info
m.filename_prefix = "DiLeptonAnalyzer";
m.filename_suffix = "cat_"+catversion+".root";
//==== Input/Output
m.filepath = WORKING_DIR+"/rootfiles/"+dataset+"/Ntuple/{1}/";
m.plotpath = ENV_PLOT_PATH+"/"+dataset+"/cutop/";
//==== Signal Info
m.preselection = "Preselection_SS";
m.signals = {{
"HNElEl_{2}",
}};
m.MinEffPresel = 0.;
m.AddSamplesToList( m.signals );
//==== Backgrounds
vector<TString> bkgs = {{
"fake_Dijet",
"chargeflip",
"TG", "TTG",
"ZGto2LG", "WGtoLNuG",
"WZTo3LNu_powheg",
"ZZTo4L_powheg", "ggZZto2e2mu", "ggZZto2e2nu", "ggZZto2e2tau", "ggZZto2mu2nu", "ggZZto2mu2tau", "ggZZto4e", "ggZZto4mu", "ggZZto4tau", "ggHtoZZ",
"WWW", "WWZ", "WZZ", "ZZZ",
"ttW", "ttZ", "ttH_nonbb",
"WWTo2L2Nu_DS", "WpWpEWK", "WpWpQCD",
}};
m.AddSamplesToList( bkgs );
if(DoDebug) m.PrintSampleList();
//==== Fill MCNorm SF
m.analysisInputs.SetMCSF(WORKING_DIR+"/data/"+dataset+"/MCSF.txt", m.samples);
//==== Get Systematics
m.analysisInputs.SetCalculatedSysts(WORKING_DIR+"/data/"+dataset+"/Syst.txt");
m.SetSourceSystematics();
//==== Set CutCard
TString cutfilename = "HNElEl_{2}.txt";
m.SetCutCard(WORKING_DIR+"/data/"+dataset+"/CutOpCard/"+CutOpCardDir+"/"+cutfilename);
vector<TString> systs = {{
"",
"_MuonEn_up", "_MuonEn_down",
"_MuonIDSF_up", "_MuonIDSF_down",
"_ElectronEn_up", "_ElectronEn_down",
"_ElectronIDSF_up", "_ElectronIDSF_down",
"_TriggerSF_up", "_TriggerSF_down",
"_PU_up", "_PU_down",
"_JetEn_up", "_JetEn_down",
"_JetRes_up", "_JetRes_down",
"_Unclustered_up", "_Unclustered_down",
"_BTagSFEff_up", "_BTagSFEff_down",
"_BTagSFMiss_up", "_BTagSFMiss_down",
"_JetMass_up", "_JetMass_down",
"_JetMassRes_up", "_JetMassRes_down",
}};
vector<double> y_total_bkgs, y_fake_bkgs, y_prompt_bkgs, y_cf_bkgs, y_signals;
vector<TString> reldiff_sources, reldiff_sig_sources;
vector<double> reldiff_means, reldiff_prompt_means, reldiff_fake_means, reldiff_cf_means, reldiff_sig_means;
double y_bkg_central, y_fake_central, y_prompt_central, y_cf_central, y_signal_central;
double eff_signal_central;
double reldiff_up, reldiff_down;
double reldiff_prompt_up, reldiff_prompt_down;
double reldiff_fake_up, reldiff_fake_down;
double reldiff_cf_up, reldiff_cf_down;
double reldiff_sig_up, reldiff_sig_down;
//==== Add up errors
//==== THIS INCLUDES STAT ERRORS
//==== Systematic only will be calculated later..
double syst_total_bkg(0.);
double syst_sig(0.);
double prompt_stat(0.);
double fake_stat(0.); // sumw2+FR-propagation
double cf_stat(0.);
double signal_stat(0.);
double prompt_calculated_syst(0.); // MCSF
double fake_calculated_syst(0.); // Fake Syst
double cf_calculated_syst(0.); // CF Syst
double signal_calculated_syst(0.); // Pdf Syst
double prompt_tau21_syst(0.);
double signal_tau21_syst(0.);
for(unsigned int i=0; i<systs.size(); i++){{
TString this_syst = systs.at(i);
m.channel = "DiElectron"+this_syst;
m.channel_for_jetres = "DiElectron";
m.ClearSignalInfo();
m.FillSignalInfo();
m.cutrangeinfo.k_end = false;
m.Run();
y_total_bkgs.push_back( m.total_bkgs );
y_fake_bkgs.push_back( m.fake_bkgs );
y_prompt_bkgs.push_back( m.prompt_bkgs );
y_cf_bkgs.push_back( m.cf_bkgs );
y_signals.push_back( m.signal_rate.at(0) );
if(i==0){{
this_syst = "Central";
y_bkg_central = m.total_bkgs;
y_fake_central = m.fake_bkgs;
y_prompt_central = m.prompt_bkgs;
y_cf_central = m.cf_bkgs;
eff_signal_central = m.signal_eff.at(0);
//==== m.total_bkgs_err with Central Ntuple contains;
//==== 1) Prompt
//==== - Stat
//==== - Lumi
//==== - MCSF
//==== 2) Fake
//==== - Stat + FR-Stat-Propagation
//==== - Systematic
//==== 3) CF
//==== - Stat
//==== - Systematic
double CalculatedSyst = m.total_bkgs_err/m.total_bkgs;
syst_total_bkg += (CalculatedSyst)*(CalculatedSyst);
//==== Signal
//==== - Stat
//==== - Lumi
y_signal_central = m.signal_rate.at(0);
syst_sig += (m.signal_err.at(0)/m.signal_rate.at(0))*(m.signal_err.at(0)/m.signal_rate.at(0));
m.pdfsyst.Yield_Central = y_signal_central;
//m.pdfsyst.CalculatePdfSystematic();
prompt_stat = m.prompt_bkgs_stat;
fake_stat = m.fake_bkgs_stat;
cf_stat = m.cf_bkgs_stat;
signal_stat = m.signal_stat.at(0);
prompt_calculated_syst = m.prompt_bkgs_syst;
fake_calculated_syst = m.fake_bkgs_syst;
cf_calculated_syst = m.cf_bkgs_syst;
//signal_calculated_syst = (m.pdfsyst.Syst_Pdf_Total)*y_signal_central;
signal_calculated_syst = (2.5*0.01)*y_signal_central;
//==== tau21
prompt_tau21_syst = m.prompt_bkgs_tau21_syst-y_prompt_central;
signal_tau21_syst = m.signal_tau21_syst.at(0)-y_signal_central;
cout << "prompt_tau21_syst = " << prompt_tau21_syst << endl;
cout << "signal_tau21_syst = " << signal_tau21_syst << endl;
}}
double reldiff = (m.total_bkgs-y_bkg_central)/y_bkg_central;
double reldiff_prompt = (m.prompt_bkgs-y_prompt_central)/y_prompt_central;
double reldiff_fake = (m.fake_bkgs-y_fake_central)/m.fake_bkgs;
double reldiff_cf = (m.cf_bkgs-y_cf_central)/m.cf_bkgs;
double reldiff_sig = (m.signal_rate.at(0)-y_signal_central)/y_signal_central;
if(i!=0){{
//==== i=1 : up / i=2 : down
if(i%2==1){{
reldiff_up = reldiff;
reldiff_prompt_up = reldiff_prompt;
reldiff_fake_up = reldiff_fake;
reldiff_cf_up = reldiff_cf;
reldiff_sig_up = reldiff_sig;
}}
else{{
reldiff_down = reldiff;
reldiff_prompt_down = reldiff_prompt;
reldiff_fake_down = reldiff_fake;
reldiff_cf_down = reldiff_cf;
reldiff_sig_down = reldiff_sig;
double reldiff_mean = sqrt( 0.5 * (reldiff_up*reldiff_up+reldiff_down*reldiff_down) );
double reldiff_prompt_mean = sqrt( 0.5 * (reldiff_prompt_up*reldiff_prompt_up+reldiff_prompt_down*reldiff_prompt_down) );
double reldiff_fake_mean = sqrt( 0.5 * (reldiff_fake_up*reldiff_fake_up+reldiff_fake_down*reldiff_fake_down) );
double reldiff_cf_mean = sqrt( 0.5 * (reldiff_cf_up*reldiff_cf_up+reldiff_cf_down*reldiff_cf_down) );
double reldiff_sig_mean = sqrt( 0.5 * (reldiff_sig_up*reldiff_sig_up+reldiff_sig_down*reldiff_sig_down) );
reldiff_sources.push_back( this_syst );
reldiff_means.push_back( reldiff_mean );
reldiff_prompt_means.push_back( reldiff_prompt_mean );
reldiff_fake_means.push_back( reldiff_fake_mean );
reldiff_cf_means.push_back( reldiff_cf_mean );
reldiff_sig_means.push_back( reldiff_sig_mean );
}}
}} // Not Central, add systematics
//==== Print
cout << this_syst << "\t" << m.prompt_bkgs << "\t" << m.fake_bkgs << "\t" << m.cf_bkgs << endl;
}}
/*
vector<TString> sourcealias = {{
"Muon Energy Scale",
"Muon ID",
"Electron Energy Scale",
"Electron ID",
"Trigger SF",
"PU",
"Jet Energy Scale",
"Jet Energy Resolution",
"Unclustered Energy",
"B-tagging Eff",
"B-taggin Miss",
"Jet Mass Scale",
"Jet Mass Resolution",
}};
*/
vector<TString> sourcealias = {{
"MuonPt",
"MuonID",
"ElectronE",
"ElectronID",
"Trigger",
"PU",
"JES",
"JER",
"Uncl",
"BEff",
"BMiss",
"JMS",
"JMR",
}};
cout << endl;
cout << "################ Sources ################" << endl;
cout << "Source\tSystTotal\tSystPrompt\tSystFake\tSignal" << endl;
double ONLYSYST_bkg(0.), ONLYSYST_prompt(0.), ONLYSYST_fake(0.), ONLYSYST_cf(0.), ONLYSYST_sig(0.);
double syst_lumi = m.analysisInputs.CalculatedSysts["Luminosity"];
cout << "Stat" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*sqrt(prompt_stat*prompt_stat+fake_stat*fake_stat)/y_bkg_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*prompt_stat/y_prompt_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*fake_stat/y_fake_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*cf_stat/y_cf_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*signal_stat/y_signal_central << endl;
cout << "Lumi" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*syst_lumi*y_prompt_central/y_bkg_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*syst_lumi << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*syst_lumi << endl;
ONLYSYST_bkg += (syst_lumi*y_prompt_central/y_bkg_central)*(syst_lumi*y_prompt_central/y_bkg_central);
ONLYSYST_prompt += syst_lumi*syst_lumi;
ONLYSYST_sig += syst_lumi*syst_lumi;
cout << "PDF" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*signal_calculated_syst/y_signal_central << endl;
ONLYSYST_sig += (signal_calculated_syst/y_signal_central)*(signal_calculated_syst/y_signal_central);
syst_sig += (signal_calculated_syst/y_signal_central)*(signal_calculated_syst/y_signal_central);
for(unsigned int i=0; i<reldiff_means.size(); i++){{
TString this_syst = sourcealias.at(i);
//cout << this_syst << " " << y_total_bkgs.at(i) << " " << std::fixed<<std::setprecision(2) << 100.*reldiff_means.at(i) << " %" << endl;
cout << this_syst << " ";
cout << std::fixed<<std::setprecision(2) << 100.*reldiff_means.at(i) << " ";
cout << std::fixed<<std::setprecision(2) << 100.*reldiff_prompt_means.at(i) << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*reldiff_sig_means.at(i) << endl;
ONLYSYST_bkg += reldiff_means.at(i)*reldiff_means.at(i);
ONLYSYST_prompt += reldiff_prompt_means.at(i)*reldiff_prompt_means.at(i);
ONLYSYST_fake += reldiff_fake_means.at(i)*reldiff_fake_means.at(i);
ONLYSYST_cf += reldiff_cf_means.at(i)*reldiff_cf_means.at(i);
ONLYSYST_sig += reldiff_sig_means.at(i)*reldiff_sig_means.at(i);
syst_total_bkg += reldiff_means.at(i)*reldiff_means.at(i);
syst_sig += reldiff_sig_means.at(i)*reldiff_sig_means.at(i);
}}
cout << "Tau21" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*prompt_tau21_syst/y_bkg_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*prompt_tau21_syst/y_prompt_central << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*signal_tau21_syst/y_signal_central << endl;
ONLYSYST_bkg += (prompt_tau21_syst/y_bkg_central)*(prompt_tau21_syst/y_bkg_central);
ONLYSYST_prompt += (prompt_tau21_syst/y_prompt_central)*(prompt_tau21_syst/y_prompt_central);
syst_total_bkg += (prompt_tau21_syst/y_bkg_central)*(prompt_tau21_syst/y_bkg_central);
syst_sig += (signal_tau21_syst/y_signal_central)*(signal_tau21_syst/y_signal_central);
cout << "MCNorm" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*prompt_calculated_syst/y_bkg_central << " ";
cout << std::fixed<<std::setprecision(2) << 100.*prompt_calculated_syst/y_prompt_central << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << endl;
ONLYSYST_bkg += (prompt_calculated_syst/y_bkg_central)*(prompt_calculated_syst/y_bkg_central);
ONLYSYST_prompt += (prompt_calculated_syst/y_prompt_central)*(prompt_calculated_syst/y_prompt_central);
cout << "Fake" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*fake_calculated_syst/y_bkg_central << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*fake_calculated_syst/y_fake_central << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << endl;
ONLYSYST_bkg += (fake_calculated_syst/y_bkg_central)*(fake_calculated_syst/y_bkg_central);
ONLYSYST_fake += (fake_calculated_syst/y_fake_central)*(fake_calculated_syst/y_fake_central);
cout << "CF" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*cf_calculated_syst/y_bkg_central << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << "-" << " ";
cout << std::fixed<<std::setprecision(2) << 100.*cf_calculated_syst/y_cf_central << " ";
cout << std::fixed<<std::setprecision(2) << "-" << endl;
ONLYSYST_cf += (cf_calculated_syst/y_cf_central)*(cf_calculated_syst/y_cf_central);
ONLYSYST_bkg = sqrt(ONLYSYST_bkg);
ONLYSYST_prompt = sqrt(ONLYSYST_prompt);
ONLYSYST_fake = sqrt(ONLYSYST_fake);
ONLYSYST_cf = sqrt(ONLYSYST_cf);
ONLYSYST_sig = sqrt(ONLYSYST_sig);
cout << "Total Systematic" << " " << 100.*ONLYSYST_bkg << " " << 100.*ONLYSYST_prompt << " " << 100.*ONLYSYST_fake << " " << 100.*ONLYSYST_cf << " " << 100.*ONLYSYST_sig << endl;
cout << endl;
cout << "Total Background" << "\t" << std::fixed<<std::setprecision(3) << y_bkg_central << endl;
cout << "- Prompt" << "\t" << std::fixed<<std::setprecision(3) << y_prompt_central << endl;
cout << "- Fake" << "\t" << std::fixed<<std::setprecision(3) << y_fake_central << endl;
cout << "- CF" << "\t" << std::fixed<<std::setprecision(3) << y_cf_central << endl;
//===============
//==== Finalize
//===============
cout << endl;
cout << "####### Result #######" << endl;
syst_total_bkg = sqrt(syst_total_bkg);
syst_sig = sqrt(syst_sig);
cout << "Signal Eff\t"<<std::fixed<<std::setprecision(6)<<eff_signal_central<<endl;
cout << y_bkg_central << " " << std::fixed<<std::setprecision(6) << 1.+syst_total_bkg << endl;
cout << y_signal_central << " " << std::fixed<<std::setprecision(6) << 1.+syst_sig << endl;
}}'''.format(TreeSkim, SkimmedTreeDir, str(mass), CutCardDirName, filename)
| [
"[email protected]"
] | |
4a08e521718dcfe3e876c959cc6199e63c1e9c0e | 8d92a27a87b452b22c94adfe66ce45c84083fdf9 | /myproject/settings.py | f46b3505cb921eb07a55a7331d81e6bbbfe6846c | [] | no_license | dev-fahim/t_app | 336c707fed31aecaabb757c8283d4c84fa1cf88f | a0faeac3d0ce45d1f557c7c52c4bec3fffa6310e | refs/heads/master | 2022-12-15T20:09:55.857431 | 2018-11-21T06:56:18 | 2018-11-21T06:56:18 | 158,504,246 | 0 | 0 | null | 2022-12-08T01:18:29 | 2018-11-21T06:55:37 | Python | UTF-8 | Python | false | false | 3,097 | py | """
Django settings for myproject project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b^-@ytduu=^xjedb2z0wic)%@4g)g-z@wwxosh#^)g0j!p!-1@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'myproject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'myproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
1d4c22ebff1083d8a40dc2c2f66e9ac41fe68e8e | 40ca9c74e41047b4f057923894f664653c2317c8 | /002_celery/django/sth/views.py | e25e6e38ffa47e47f99b2dd7b42f50441edad371 | [
"MIT"
] | permissive | MeNsaaH/Task-Queues-Django | 1207856fd382baa63f018b60e5c6b79fc0c52b31 | 73bebc9b755e2943455939817325aa63a1aa7f62 | refs/heads/master | 2022-11-21T09:44:57.856364 | 2018-10-11T11:16:50 | 2018-10-11T11:16:50 | 152,408,206 | 0 | 0 | MIT | 2022-11-10T12:56:38 | 2018-10-10T10:49:40 | Python | UTF-8 | Python | false | false | 323 | py | from django.http import HttpResponse
from sth.tasks import some_task
# Create your views here.
def some_view(request):
result = some_task.delay()
# Other Methods on Celery AsyncResult
# result.ready()
# result.get(timeout=1)
# result.traceback
return HttpResponse('some task has being started')
| [
"="
] | = |
906d663716faf84d5efa71adc0baced6ba2e9174 | 5ecdbc6e79c408ed45327ffeae11eae91c4b8ce6 | /ilusiones_sales/models/contract_number.py | f898d4746618b0bb0ccf7fbf890269a839a2dc93 | [] | no_license | rosalesdc/ejemplos_addons | c6ee5cf9a10935b38165eca84c07a84d4d9c2538 | 8c3e74a3d0145b74cb8288772763e88f39979e6f | refs/heads/master | 2022-12-22T07:21:16.238052 | 2020-09-25T06:18:34 | 2020-09-25T06:18:34 | 298,184,002 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class ContractNumber(models.Model):
_name = 'contract.number'
_description = "Numero de conrato"
name = fields.Char(string='Numero de serie', required=True)
date = fields.Date(string="Fecha del contrato")
| [
"[email protected]"
] | |
b101bda0005c03561d755e2270489d08ff015499 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/layout/scene/zaxis/_categoryarraysrc.py | b38055e7ec20ace3ec74c3b976ac52a75b5fd63d | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 445 | py | import _plotly_utils.basevalidators
class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="categoryarraysrc", parent_name="layout.scene.zaxis", **kwargs
):
super(CategoryarraysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"[email protected]"
] | |
1f84c1a411609c8c5b853c69c2baa18edb4f027e | 05148c0ea223cfc7ed9d16234ab3e6bb40885e9d | /Packages/matplotlib-2.2.2/lib/mpl_examples/pyplots/whats_new_98_4_fill_between.py | ed4b7f4ac7d55386bb003fc91a8e1dfea59642c1 | [
"MIT"
] | permissive | NightKirie/NCKU_NLP_2018_industry3 | 9ee226e194287fd9088429f87c58c874e050a8b3 | 23ac13644b140587e23cfeffb114c7c6f46f17a2 | refs/heads/master | 2021-06-05T05:33:09.510647 | 2018-07-05T10:19:47 | 2018-07-05T10:19:47 | 133,680,341 | 1 | 4 | MIT | 2020-05-20T16:29:54 | 2018-05-16T14:43:38 | Python | UTF-8 | Python | false | false | 456 | py | """
=============================
Whats New 0.98.4 Fill Between
=============================
"""
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0.0, 2, 0.01)
y1 = np.sin(2*np.pi*x)
y2 = 1.2*np.sin(4*np.pi*x)
fig, ax = plt.subplots()
ax.plot(x, y1, x, y2, color='black')
ax.fill_between(x, y1, y2, where=y2>y1, facecolor='green')
ax.fill_between(x, y1, y2, where=y2<=y1, facecolor='red')
ax.set_title('fill between where')
plt.show()
| [
"[email protected]"
] | |
19dd60844691611bae70a8df07c9fc9cd60b0efb | 23f534a67f2d58ea556885b93584590f57f47123 | /src/tutorials/fluentpython/concurrency_futures/Standardflags.py | 319a5ec9b067726a606f19bac52621071e8c93a1 | [] | no_license | manas-mukherjee/MLTools | 2cb3e735d4967f4c60c08739c86e07224977a182 | eb18e6d1c6a8900ed47332a7dfb1ceaccc789deb | refs/heads/master | 2022-12-07T22:46:13.419343 | 2020-03-10T09:46:03 | 2020-03-10T09:46:03 | 98,766,117 | 2 | 0 | null | 2022-11-22T01:27:34 | 2017-07-30T00:36:56 | Jupyter Notebook | UTF-8 | Python | false | false | 976 | py | import os
import time
import sys
import requests
POP20_CC = ('CN IN US ID BR PK NG BD RU JP '
'MX PH VN ET EG DE IR TR CD FR').split()
BASE_URL = 'http://flupy.org/data/flags'
DEST_DIR = 'downloads/'
def save_flag(img, filename):
path = os.path.join(DEST_DIR, filename)
with open(path, 'wb') as fp:
fp.write(img)
def get_flag(cc):
url = '{}/{cc}/{cc}.gif'.format(BASE_URL, cc=cc.lower())
resp = requests.get(url)
return resp.content
def show(text):
print(text, end=' ')
sys.stdout.flush()
def download_many(cc_list):
for cc in sorted(cc_list):
image = get_flag(cc)
show(cc)
save_flag(image, cc.lower() + '.gif')
return len(cc_list)
def main(download_many):
t0 = time.time()
count = download_many(POP20_CC)
elapsed = time.time() - t0
msg = '\n{} flags downloaded in {:.2f}s'
print(msg.format(count, elapsed))
if __name__ == '__main__':
main(download_many)
| [
"[email protected]"
] | |
432e56502ab6f191dd680864cf5673b011f9a73b | f6f632bee57875e76e1a2aa713fdbe9f25e18d66 | /python/_1001_1500/1434_number-of-ways-to-wear-different-hats-to-each-other.py | 6e4134899e9fb8ff1f3ae283f20327e4315b3a62 | [] | no_license | Wang-Yann/LeetCodeMe | b50ee60beeeb3661869bb948bef4fbe21fc6d904 | 44765a7d89423b7ec2c159f70b1a6f6e446523c2 | refs/heads/master | 2023-08-07T05:31:23.428240 | 2021-09-30T15:33:53 | 2021-09-30T15:33:53 | 253,497,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,244 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Rock Wayne
# @Created : 2020-06-19 08:00:00
# @Last Modified : 2020-06-19 08:00:00
# @Mail : [email protected]
# @Version : alpha-1.0
"""
# 总共有 n 个人和 40 种不同的帽子,帽子编号从 1 到 40 。
#
# 给你一个整数列表的列表 hats ,其中 hats[i] 是第 i 个人所有喜欢帽子的列表。
#
# 请你给每个人安排一顶他喜欢的帽子,确保每个人戴的帽子跟别人都不一样,并返回方案数。
#
# 由于答案可能很大,请返回它对 10^9 + 7 取余后的结果。
#
#
#
# 示例 1:
#
#
# 输入:hats = [[3,4],[4,5],[5]]
# 输出:1
# 解释:给定条件下只有一种方法选择帽子。
# 第一个人选择帽子 3,第二个人选择帽子 4,最后一个人选择帽子 5。
#
# 示例 2:
#
#
# 输入:hats = [[3,5,1],[3,5]]
# 输出:4
# 解释:总共有 4 种安排帽子的方法:
# (3,5),(5,3),(1,3) 和 (1,5)
#
#
# 示例 3:
#
#
# 输入:hats = [[1,2,3,4],[1,2,3,4],[1,2,3,4],[1,2,3,4]]
# 输出:24
# 解释:每个人都可以从编号为 1 到 4 的帽子中选。
# (1,2,3,4) 4 个帽子的排列方案数为 24 。
#
#
# 示例 4:
#
#
# 输入:hats = [[1,2,3],[2,3,5,6],[1,3,7,9],[1,8,9],[2,5,7]]
# 输出:111
#
#
#
#
# 提示:
#
#
# n == hats.length
# 1 <= n <= 10
# 1 <= hats[i].length <= 40
# 1 <= hats[i][j] <= 40
# hats[i] 包含一个数字互不相同的整数列表。
#
# Related Topics 位运算 动态规划
"""
import functools
from typing import List
import pytest
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def numberWays(self, hats: List[List[int]]) -> int:
# 总人数
MOD = 10 ** 9 + 7
N = len(hats)
@functools.lru_cache(None)
def dp(cur, pos):
# cur 代表当前轮到第cur顶帽子可供选择
# pos 代表当前戴帽的人有哪些,为二进制压缩状态形式
# 首先,如果当前所有人都带上了帽,则返回1
if pos == (1 << N) - 1:
return 1
# 若不满足所有人都戴上了帽,且当前也没有帽子了,则返回0
if cur > 40:
return 0
# 首先考虑不戴该顶帽子,直接考虑后一顶,则其值应为dp(cur+1, pos)
res = dp(cur + 1, pos)
# 考虑有人佩戴该顶帽子
for i in range(N):
# 找到喜欢该帽子的人,且这个人并没有戴其他帽子(即二进制pos中该位置为0)
if cur in hats[i] and not pos & (1 << i):
# 给这个人戴上帽子(该位置置1),并依序进行下去
res += dp(cur + 1, pos + (1 << i))
return res % MOD
return dp(0, 0)
# leetcode submit region end(Prohibit modification and deletion)
class Solution1:
def numberWays(self, hats: List[List[int]]) -> int:
""" 状压DP"""
MOD = 10 ** 9 + 7
HAT_SIZE = 40
N = len(hats)
hat_to_people = [[] for _ in range(HAT_SIZE)]
for i in range(N):
for h in hats[i]:
hat_to_people[h - 1].append(i)
dp = [0] * (1 << N)
dp[0] = 1
for people in hat_to_people:
for mask in range(len(dp) - 1, -1, -1):
for p in people:
if mask & (1 << p):
continue
dp[mask | (1 << p)] += dp[mask]
dp[mask | (1 << p)] %= MOD
# print(dp)
return dp[-1]
@pytest.mark.parametrize("kw,expected", [
[dict(hats=[[3, 4], [4, 5], [5]]), 1],
[dict(hats=[[3, 5, 1], [3, 5]]), 4],
[dict(hats=[[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]), 24],
[dict(hats=[[1, 2, 3], [2, 3, 5, 6], [1, 3, 7, 9], [1, 8, 9], [2, 5, 7]]), 111],
])
def test_solutions(kw, expected):
assert Solution().numberWays(**kw) == expected
assert Solution1().numberWays(**kw) == expected
if __name__ == '__main__':
pytest.main(["-q", "--color=yes", "--capture=no", __file__])
| [
"[email protected]"
] | |
6318b848bbab904688fa14511ab9a2f88c7fd1b4 | 7a5b729a660a35d0d80c9836202025a719f026fb | /general codes/p18.py | 303da159416192105ea9e6922a2a6bf78a450f29 | [] | no_license | Harshit2009/My-Programs- | 7a05eb3369b98010805752a0234867b726c4ac0e | 1ac60faeb0ba514f2c35bcb82be43654b5cef785 | refs/heads/master | 2023-01-13T18:58:26.088714 | 2020-11-19T08:31:43 | 2020-11-19T08:31:43 | 269,538,702 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 50 | py | for i in range(20,0,-2) :
print(i)
| [
"[email protected]"
] | |
c26155b2c52ac0f93816f82789a0620fa8778e43 | 30319ef38df7cdd57a0e31d32009dfaaa6803ce2 | /zeijemol/views/startup.py | c2f4684c1e5c34e34f66fd847260a1c3cc6652bf | [] | no_license | neurospin/zeijemol | 9a064562387604bfbd4cd619687719d99074736c | d023e6f7907b73810c4496687d63dead5836138c | refs/heads/master | 2020-04-11T03:38:09.852328 | 2017-09-12T11:44:46 | 2017-09-12T11:44:46 | 50,499,760 | 1 | 4 | null | 2017-08-31T13:21:45 | 2016-01-27T10:26:44 | JavaScript | UTF-8 | Python | false | false | 7,706 | py | ##########################################################################
# NSAp - Copyright (C) CEA, 2013
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
# System import
from __future__ import division
import os
import collections
# Cubicweb import
from cubicweb import _
from cubicweb.web.views.startup import IndexView
from cubicweb.web.httpcache import NoHTTPCacheManager
from cubicweb.view import View
from cubicweb.predicates import authenticated_user
from cubicweb.predicates import match_user_groups
class ZEIJEMOLNotRaterIndexView(IndexView):
""" Class that defines the index view.
"""
__regid__ = "index"
__select__ = authenticated_user() & ~match_user_groups(
"managers", "moderators")
title = _("Index")
def call(self, **kwargs):
""" Create the loggedin 'index' page of our site.
"""
# Format template
template = self._cw.vreg.template_env.get_template("startup.logged.jinja2")
html = template.render(
header_url=self._cw.data_url("creative/img/neurospin.jpg"),
moderator=False,
waves_progress={})
self.w(html)
class ZEIJEMOLRaterIndexView(IndexView):
""" Class that defines the index view.
"""
__regid__ = "index"
__select__ = authenticated_user() & match_user_groups(
"managers", "moderators")
title = _("Index")
http_cache_manager = NoHTTPCacheManager
def call(self, **kwargs):
""" Create the loggedin 'index' page of our site.
"""
# Get information to display a summary table with one progress bar
# for each wave
rset = self._cw.execute(
"Any S, W, N, C Where S is SnapSet, S wave W, W name N, "
"W category C")
struct = {}
for index, row in enumerate(rset):
wave_name = row[2]
category = row[3]
struct.setdefault(category, {}).setdefault(wave_name, []).append(
rset.get_entity(index, 0))
waves_progress = {}
for category, wave_struct in struct.items():
for wave_name, snapset in wave_struct.items():
nb_of_snapset= len(snapset)
nb_rates = 0
for entity in snapset:
scores = [
e for e in entity.scores
if e.scored_by[0].login == self._cw.session.login]
if len(scores) == 1:
nb_rates += 1
elif len(scores) > 1:
raise Exception(
"We expect one score per user for one snap.")
waves_progress.setdefault(category, []).append(
(wave_name, int(nb_rates / nb_of_snapset * 100)))
# Format template
template = self._cw.vreg.template_env.get_template("startup.logged.jinja2")
html = template.render(
header_url=self._cw.data_url("creative/img/neurospin.jpg"),
moderator=True,
waves_progress=waves_progress)
self.w(html)
class ZEIJEMOLIndexView(IndexView):
""" Class that defines the index view.
"""
__regid__ = "index"
__select__ = ~authenticated_user()
title = _("Index")
templatable = False
def call(self, **kwargs):
""" Create the anonymous 'index' page of our site.
"""
# Get additional resources links
css = []
for path in ("creative/vendor/bootstrap/css/bootstrap.min.css",
"creative/vendor/font-awesome/css/font-awesome.min.css",
"creative/vendor/magnific-popup/magnific-popup.css",
"creative/css/creative.css"):
css.append(self._cw.data_url(path))
js = []
for path in ("creative/vendor/jquery/jquery.min.js",
"creative/vendor/bootstrap/js/bootstrap.min.js",
"creative/vendor/scrollreveal/scrollreveal.min.js",
"creative/vendor/magnific-popup/jquery.magnific-popup.min.js",
"creative/js/creative.js"):
js.append(self._cw.data_url(path))
# Format template
template = self._cw.vreg.template_env.get_template("startup.jinja2")
html = template.render(
header_url=self._cw.data_url("creative/img/neurospin.jpg"),
login_url=self._cw.build_url(
"login", __message=u"Please login with your account."),
contact_email=self._cw.vreg.config.get(
"administrator-emails", "[email protected]"),
css_url=css,
js_url=js)
self.w(html)
class PieChart(View):
""" Create a pie chart representing the user rates with HighCharts.
"""
__regid__ = "pie-highcharts"
paginable = False
div_id = "pie-highcharts"
def call(self, data, title, container_id=0,
highcharts_js="https://code.highcharts.com/highcharts.js",
exporting_js="https://code.highcharts.com/modules/exporting.js"):
""" Method that will create a pie chart from the user rates.
Parameters
----------
data: dict
a dictionnary with title as keys and occurence (in percent) as
values.
title: str
a title for the chart.
container_id: int
an identifier for the chart container.
"""
# Add js resources
self._cw.add_js(highcharts_js, localfile=False)
self._cw.add_js(exporting_js, localfile=False)
# Create the highcharts string representation of the data
sdata = '['
for key, value in data.items():
sdata += '["{0}", {1}], '.format(key, value)
sdata += ']'
# Generate the script
# > headers
self.w(u'<script type="text/javascript">')
self.w(u'$(function () {{ $("#hc_container_{0}").highcharts({{'.format(
container_id))
# > configure credit
self.w(u'credits : {enabled : false}, ')
# > configure chart
self.w(u'chart: {plotBackgroundColor: null, plotBorderWidth: 1, '
'plotShadow: false}, ')
# > configure title
self.w(u'title: {{text: "{0}"}}, '.format(title))
# > configure tooltip
self.w(u'tooltip: {pointFormat: "{series.name}: '
'<b>{point.percentage:.1f}%</b>" }, ')
# > configure plot
self.w(u'plotOptions: {')
self.w(u'pie: {allowPointSelect: true, cursor: "pointer", '
'dataLabels: { enabled: true, format: "<b>{point.name}</b>: '
'{point.percentage:.1f} %", style: {color: (Highcharts.theme '
'&& Highcharts.theme.contrastTextColor) || "black"}}}')
self.w(u'}, ')
# > configure series
self.w(u'series: [{{type: "pie", name: "Rate", '
'data: {0}}}] '.format(sdata))
# > close headers
self.w(u'}); ')
self.w(u'}); ')
self.w(u'</script>')
# Add a container in the body to display the pie chart
self.w(u'<div id="hc_container_{0}" class="hc_container">'
'</div>'.format(container_id))
def registration_callback(vreg):
#vreg.register_and_replace(SnapIndexView, IndexView)
vreg.register_and_replace(ZEIJEMOLIndexView, IndexView)
vreg.register(ZEIJEMOLRaterIndexView)
vreg.register(ZEIJEMOLNotRaterIndexView)
vreg.register(PieChart)
| [
"[email protected]"
] | |
d65628556268853325cd60b2ae9d5b468ddd607c | f9acdde88dbb70a2844e058f6c53c016fc8407c1 | /lfc/utils/middleware.py | 73fd4101ae5c997956b80590d97c5c105c676f5e | [] | no_license | yzl11/django-lfc | 536daccae82351af66f3894c38c8f2702691af75 | 75c900d672b4d36705fb8fa4833c446bbb78efea | refs/heads/master | 2021-01-15T13:14:37.192773 | 2015-05-03T15:03:12 | 2015-05-03T15:03:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # python imports
import logging
# permissions imports
from permissions.exceptions import Unauthorized
# lfc imports
import lfc.utils
# Load logger
logger = logging.getLogger(__name__)
class LFCMiddleware:
"""LFC specific middleware.
"""
def process_exception(self, request, exception):
"""Catches Unauthorized exceptions to display the login form.
"""
if isinstance(exception, Unauthorized):
logger.info(u"Unauthorized: %s" % exception.message)
return lfc.utils.login_form(next=request.META.get("PATH_INFO"))
| [
"[email protected]"
] | |
8b1fab387a52af792c6fd36ad12169f7f23c2915 | 1fe0b680ce53bb3bb9078356ea2b25e572d9cfdc | /venv/lib/python2.7/site-packages/ansible/modules/system/dconf.py | 8183b07648d47b187e110da34bdfeb449350dd45 | [
"MIT"
] | permissive | otus-devops-2019-02/devopscourses_infra | 1929c4a9eace3fdb0eb118bf216f3385fc0cdb1c | e42e5deafce395af869084ede245fc6cff6d0b2c | refs/heads/master | 2020-04-29T02:41:49.985889 | 2019-05-21T06:35:19 | 2019-05-21T06:35:19 | 175,780,457 | 0 | 1 | MIT | 2019-05-21T06:35:20 | 2019-03-15T08:35:54 | HCL | UTF-8 | Python | false | false | 13,220 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Branko Majic <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: dconf
author:
- "Branko Majic (@azaghal)"
short_description: Modify and read dconf database
description:
- This module allows modifications and reading of dconf database. The module
is implemented as a wrapper around dconf tool. Please see the dconf(1) man
page for more details.
- Since C(dconf) requires a running D-Bus session to change values, the module
will try to detect an existing session and reuse it, or run the tool via
C(dbus-run-session).
notes:
- This module depends on C(psutil) Python library (version 4.0.0 and upwards),
C(dconf), C(dbus-send), and C(dbus-run-session) binaries. Depending on
distribution you are using, you may need to install additional packages to
have these available.
- Detection of existing, running D-Bus session, required to change settings
via C(dconf), is not 100% reliable due to implementation details of D-Bus
daemon itself. This might lead to running applications not picking-up
changes on the fly if options are changed via Ansible and
C(dbus-run-session).
- Keep in mind that the C(dconf) CLI tool, which this module wraps around,
utilises an unusual syntax for the values (GVariant). For example, if you
wanted to provide a string value, the correct syntax would be
C(value="'myvalue'") - with single quotes as part of the Ansible parameter
value.
- The easiest way to figure out exact syntax/value you need to provide for a
key is by making the configuration change in application affected by the
key, and then having a look at value set via commands C(dconf dump
/path/to/dir/) or C(dconf read /path/to/key).
version_added: "2.4"
options:
key:
required: true
description:
- A dconf key to modify or read from the dconf database.
value:
required: false
description:
- Value to set for the specified dconf key. Value should be specified in
GVariant format. Due to complexity of this format, it is best to have a
look at existing values in the dconf database. Required for
C(state=present).
state:
required: false
default: present
choices:
- read
- present
- absent
description:
- The action to take upon the key/value.
"""
RETURN = """
value:
description: value associated with the requested key
returned: success, state was "read"
type: str
sample: "'Default'"
"""
EXAMPLES = """
- name: Configure available keyboard layouts in Gnome
dconf:
key: "/org/gnome/desktop/input-sources/sources"
value: "[('xkb', 'us'), ('xkb', 'se')]"
state: present
- name: Read currently available keyboard layouts in Gnome
dconf:
key: "/org/gnome/desktop/input-sources/sources"
state: read
register: keyboard_layouts
- name: Reset the available keyboard layouts in Gnome
dconf:
key: "/org/gnome/desktop/input-sources/sources"
state: absent
- name: Configure available keyboard layouts in Cinnamon
dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
value: "['us', 'se']"
state: present
- name: Read currently available keyboard layouts in Cinnamon
dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
state: read
register: keyboard_layouts
- name: Reset the available keyboard layouts in Cinnamon
dconf:
key: "/org/gnome/libgnomekbd/keyboard/layouts"
state: absent
- name: Disable desktop effects in Cinnamon
dconf:
key: "/org/cinnamon/desktop-effects"
value: "false"
state: present
"""
import os
import traceback
PSUTIL_IMP_ERR = None
try:
import psutil
psutil_found = True
except ImportError:
PSUTIL_IMP_ERR = traceback.format_exc()
psutil_found = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class DBusWrapper(object):
"""
Helper class that can be used for running a command with a working D-Bus
session.
If possible, command will be run against an existing D-Bus session,
otherwise the session will be spawned via dbus-run-session.
Example usage:
dbus_wrapper = DBusWrapper(ansible_module)
dbus_wrapper.run_command(["printenv", "DBUS_SESSION_BUS_ADDRESS"])
"""
def __init__(self, module):
"""
Initialises an instance of the class.
:param module: Ansible module instance used to signal failures and run commands.
:type module: AnsibleModule
"""
# Store passed-in arguments and set-up some defaults.
self.module = module
# Try to extract existing D-Bus session address.
self.dbus_session_bus_address = self._get_existing_dbus_session()
# If no existing D-Bus session was detected, check if dbus-run-session
# is available.
if self.dbus_session_bus_address is None:
self.module.get_bin_path('dbus-run-session', required=True)
def _get_existing_dbus_session(self):
"""
Detects and returns an existing D-Bus session bus address.
:returns: string -- D-Bus session bus address. If a running D-Bus session was not detected, returns None.
"""
# We'll be checking the processes of current user only.
uid = os.getuid()
# Go through all the pids for this user, try to extract the D-Bus
# session bus address from environment, and ensure it is possible to
# connect to it.
self.module.debug("Trying to detect existing D-Bus user session for user: %d" % uid)
for pid in psutil.pids():
process = psutil.Process(pid)
process_real_uid, _, _ = process.uids()
try:
if process_real_uid == uid and 'DBUS_SESSION_BUS_ADDRESS' in process.environ():
dbus_session_bus_address_candidate = process.environ()['DBUS_SESSION_BUS_ADDRESS']
self.module.debug("Found D-Bus user session candidate at address: %s" % dbus_session_bus_address_candidate)
command = ['dbus-send', '--address=%s' % dbus_session_bus_address_candidate, '--type=signal', '/', 'com.example.test']
rc, _, _ = self.module.run_command(command)
if rc == 0:
self.module.debug("Verified D-Bus user session candidate as usable at address: %s" % dbus_session_bus_address_candidate)
return dbus_session_bus_address_candidate
# This can happen with things like SSH sessions etc.
except psutil.AccessDenied:
pass
self.module.debug("Failed to find running D-Bus user session, will use dbus-run-session")
return None
def run_command(self, command):
"""
Runs the specified command within a functional D-Bus session. Command is
effectively passed-on to AnsibleModule.run_command() method, with
modification for using dbus-run-session if necessary.
:param command: Command to run, including parameters. Each element of the list should be a string.
:type module: list
:returns: tuple(result_code, standard_output, standard_error) -- Result code, standard output, and standard error from running the command.
"""
if self.dbus_session_bus_address is None:
self.module.debug("Using dbus-run-session wrapper for running commands.")
command = ['dbus-run-session'] + command
rc, out, err = self.module.run_command(command)
if self.dbus_session_bus_address is None and rc == 127:
self.module.fail_json(msg="Failed to run passed-in command, dbus-run-session faced an internal error: %s" % err)
else:
extra_environment = {'DBUS_SESSION_BUS_ADDRESS': self.dbus_session_bus_address}
rc, out, err = self.module.run_command(command, environ_update=extra_environment)
return rc, out, err
class DconfPreference(object):
def __init__(self, module, check_mode=False):
"""
Initialises instance of the class.
:param module: Ansible module instance used to signal failures and run commands.
:type module: AnsibleModule
:param check_mode: Specify whether to only check if a change should be made or if to actually make a change.
:type check_mode: bool
"""
self.module = module
self.check_mode = check_mode
def read(self, key):
"""
Retrieves current value associated with the dconf key.
If an error occurs, a call will be made to AnsibleModule.fail_json.
:returns: string -- Value assigned to the provided key. If the value is not set for specified key, returns None.
"""
command = ["dconf", "read", key]
rc, out, err = self.module.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while reading the value with error: %s' % err)
if out == '':
value = None
else:
value = out.rstrip('\n')
return value
def write(self, key, value):
"""
Writes the value for specified key.
If an error occurs, a call will be made to AnsibleModule.fail_json.
:param key: dconf key for which the value should be set. Should be a full path.
:type key: str
:param value: Value to set for the specified dconf key. Should be specified in GVariant format.
:type value: str
:returns: bool -- True if a change was made, False if no change was required.
"""
# If no change is needed (or won't be done due to check_mode), notify
# caller straight away.
if value == self.read(key):
return False
elif self.check_mode:
return True
# Set-up command to run. Since DBus is needed for write operation, wrap
# dconf command dbus-launch.
command = ["dconf", "write", key, value]
# Run the command and fetch standard return code, stdout, and stderr.
dbus_wrapper = DBusWrapper(self.module)
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while write the value with error: %s' % err)
# Value was changed.
return True
def reset(self, key):
"""
Returns value for the specified key (removes it from user configuration).
If an error occurs, a call will be made to AnsibleModule.fail_json.
:param key: dconf key to reset. Should be a full path.
:type key: str
:returns: bool -- True if a change was made, False if no change was required.
"""
# Read the current value first.
current_value = self.read(key)
# No change was needed, key is not set at all, or just notify user if we
# are in check mode.
if current_value is None:
return False
elif self.check_mode:
return True
# Set-up command to run. Since DBus is needed for reset operation, wrap
# dconf command dbus-launch.
command = ["dconf", "reset", key]
# Run the command and fetch standard return code, stdout, and stderr.
dbus_wrapper = DBusWrapper(self.module)
rc, out, err = dbus_wrapper.run_command(command)
if rc != 0:
self.module.fail_json(msg='dconf failed while reseting the value with error: %s' % err)
# Value was changed.
return True
def main():
# Setup the Ansible module
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent', 'read']),
key=dict(required=True, type='str'),
value=dict(required=False, default=None, type='str'),
),
supports_check_mode=True
)
if not psutil_found:
module.fail_json(msg=missing_required_lib("psutil"), exception=PSUTIL_IMP_ERR)
# If present state was specified, value must be provided.
if module.params['state'] == 'present' and module.params['value'] is None:
module.fail_json(msg='State "present" requires "value" to be set.')
# Create wrapper instance.
dconf = DconfPreference(module, module.check_mode)
# Process based on different states.
if module.params['state'] == 'read':
value = dconf.read(module.params['key'])
module.exit_json(changed=False, value=value)
elif module.params['state'] == 'present':
changed = dconf.write(module.params['key'], module.params['value'])
module.exit_json(changed=changed)
elif module.params['state'] == 'absent':
changed = dconf.reset(module.params['key'])
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
171445b5eb900e95286b527051d51c2668e76e2d | 35e11785421b1831fb19fef8fb03c804fafbad44 | /tests/common_app.py | 4ec8a40249e2c75286b1b661d9dc93b0ad666831 | [
"MIT"
] | permissive | bwisgood/FRF | fff4e264ceeb7397da58564aaca6be6d66c29430 | c520cb98bd2fca5f29964bcc475d84c011d43954 | refs/heads/master | 2023-01-10T19:17:01.396435 | 2019-08-26T11:23:37 | 2019-08-26T11:23:37 | 155,576,545 | 3 | 0 | MIT | 2022-12-27T15:36:47 | 2018-10-31T15:03:07 | Python | UTF-8 | Python | false | false | 2,447 | py | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from v3.wrapper import FlaskRestFramework
from v3.views import GetView, PostView, PutView, RetrieveView, DeleteView
from v3.serializer import Serializer
db = SQLAlchemy()
pwd = os.environ.get("FRF_MYSQL_PASSWORD") or ""
def config():
global db
app = Flask(__name__)
class Config(object):
# 数据库配置
SQLALCHEMY_DATABASE_URI = r'mysql+pymysql://root:{}@127.0.0.1:3306/test'.format(pwd)
SQLALCHEMY_TRACK_MODIFICATIONS = False
engine = create_engine(SQLALCHEMY_DATABASE_URI)
Base = declarative_base(engine)
TESTING = True
app.config.from_object(Config)
db.init_app(app)
frf = FlaskRestFramework()
frf.init_app(app)
return app
def test_without_db():
# db = SQLAlchemy()
app = Flask(__name__)
app.config["SQLALCHEMY_DATABASE_URL"] = r'mysql+pymysql://root:{}@127.0.0.1:3306/test'.format(pwd)
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config['TESTING'] = True
# db.init_app(app)
frf = FlaskRestFramework()
frf.init_app(app)
return app
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30))
gender = db.Column(db.String(30))
class PersonSerializer(Serializer):
model_class = Person
logical_delete = None
class PersonView(GetView):
serializer = PersonSerializer
look_up = ("name",)
class PersonPostView(PostView):
serializer = PersonSerializer
class PersonPutView(PutView):
serializer = PersonSerializer
class PersonDeleteView(DeleteView):
serializer = PersonSerializer
from v3.mixins import AllMethodMixin, ReadOnlyMixin
class PersonRetrieveView(AllMethodMixin):
serializer = PersonSerializer
app = config()
# app.add_url_rule('/persons', view_func=PersonView.as_view("person_view"))
# app.add_url_rule('/persons', view_func=PersonPostView.as_view("person_view_post"))
# app.add_url_rule('/persons', view_func=PersonPutView.as_view("person_view_put"))
# app.add_url_rule('/persons', view_func=PersonDeleteView.as_view("person_view_delete"))
app.add_url_rule('/persons', view_func=PersonRetrieveView.as_view("person_view_re"))
if __name__ == '__main__':
ap = app.test_client()
ap.post()
ap.get()
app.run(debug=True)
| [
"[email protected]"
] | |
1c67e00090b8ef676b62937c7101be1ca71afa72 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /kxrhqiE5so3AMXWS7_5.py | 854f9fb64387e018368202a96adcf4e5dab2b54d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py |
def get_number_of_apples(n, p):
p = int(p[:-1])/10
ans = int(n * (10-p)/10)
return ans if ans > 0 else "The children didn't get any apples"
| [
"[email protected]"
] | |
7cd01337d6972f928bcb5624b0c3eda164203938 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/558.py | 30c1531227e7121c4e261664bebf108bef953b37 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | #!/usr/bin/python
def precompute(capacity, groups, people):
packing = [[0, 0],]*groups
pointer = 0
for i in range(groups):
pointer = i
total = 0
while (total + people[i]) <= capacity:
total += people[i]
i += 1
if i >= groups:
i = 0
if i == pointer:
break
packing[pointer] = [total, i]
return packing
def coaster(rides, capacity, groups, people):
pointer = 0
income = 0
if groups < 1:
return income
packing = precompute(capacity, groups, people)
while rides > 0:
if packing[pointer][0] == 0:
return income
income += packing[pointer][0]
pointer = packing[pointer][1]
rides -= 1
return income
def rl():
return sys.stdin.readline().strip()
import sys
cases = int(rl())
for case in range(1, cases+1):
rides, cap, groups = [int(x) for x in rl().split(' ')]
people = [int(x) for x in rl().split(' ')]
if len(people) != groups:
raise Exception('Wrong input')
print 'Case #%s: %s' % (case, coaster(rides, cap, groups, people))
| [
"[email protected]"
] | |
a529f6fb1e0b2d70e2d519ca9bf9cc233dec5c97 | 10c459a49cbc8ee2dc3bc2a8353c48b5a96f0c1d | /spider/day05/01.threading_斗图网.py | 069c486f5ef724f2c44ff447e8feb6a0e9c1c0c7 | [] | no_license | alinzel/Demo | 1a5d0e4596ab4c91d7b580da694b852495c4ddcc | cc22bbcdbd77190014e9c26e963abd7a9f4f0829 | refs/heads/master | 2020-03-10T22:26:30.247695 | 2018-04-15T15:37:28 | 2018-04-15T15:37:28 | 129,619,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,071 | py | # -*- coding: utf-8 -*-
# @Time : 18-3-2 下午6:15
# @Author : Zwl
# @Email : [email protected]
# @File : 01.threading_斗图网.py
# @Software: PyCharm
import threading
import requests
from bs4 import BeautifulSoup
import time
import os
# TODO 确定数据
base_url = 'https://www.doutula.com/photo/list/?page='
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'
}
# 初始化存放url的容器
url_list = []
# 构造url,取三页
for i in range(1, 4):
url = base_url + str(i)
# 将拼接好的链接添加到url_list
url_list.append(url)
# 初始化存档img_url的容器
img_url_list = []
# 初始化锁
glock = threading.Lock()
# TODO 定义生产者线程,用于请求网页和解析网页
class Producer(threading.Thread):
# 重写run方法,start执行此方法
def run(self):
print('当前生产线程是%s' % threading.current_thread())
# 当存放链接的列表存在数据时,进行此循环
while len(url_list) > 0:
# TODO 当一个线程取链接时,锁住
glock.acquire()
# 从url_list中拿出一个链接,并删除 [从末尾取出,并删除有返回值]
request_url = url_list.pop()
# TODO 当取完释放锁,方便其他线程
glock.release()
# 请求链接,并返回响应
page = requests.get(request_url, headers=header)
# 根据响应,得到源码
html = page.text
# 解析页面
parse_html = BeautifulSoup(html, 'lxml')
# 获取标签及图片的url
img_urls = parse_html.select('.img-responsive.lazy.image_dta')
# 上锁
glock.acquire()
# 遍历得到的图片列表
for img_url_item in img_urls:
# 获取图片的链接
img_url = img_url_item.attrs['data-original']
# 如果图片的链接不是以http开头,则构建图片链接数据
if not img_url.startswith('http'):
img_url = 'http:' + img_url
img_url_list.append(img_url)
else:
img_url_list.append(img_url)
# 释放锁
glock.release()
# TODO 消费者线程,负责把图片写入本地
class Consumer(threading.Thread):
# 重写父方法
def run(self):
print('当前消费线程是%s'%threading.current_thread())
# TODO 设置休眠,当消费者等待生产者两秒,因为刚开始列表无数据,不能取出
time.sleep(2)
# 当img_url_list存在数据,执行此循环
while len(img_url_list) > 0:
glock.acquire()
img_url = img_url_list.pop()
glock.release()
# 请求图片链接,并显示图片
img = requests.get(img_url, headers=header).content
# 定义图片存储目录
dire = os.getcwd() + '/images/'
# 如果路径不存在则创建目录
if not os.path.exists(dire):
os.mkdir('images')
# 初始化图片的name
img_name = img_url[-14:-4]
# 构造图片路径
path = dire + img_name
# 打开路径
with open(path, 'wb') as f:
# 写入数据
f.write(img)
if __name__ == '__main__':
for i in range(1, 3):
Producer().start()
for i in range(1, 3):
Consumer().start()
| [
"[email protected]"
] | |
875345586355a9355bce7e92772c02104b0791a4 | 3faf4b9fb76145b2326446bc6bc190a5712b3b62 | /Algorithms/0143 Reorder List.py | 4af27de2eeddeed16f4dcef88dcca1c8ce33e0ab | [] | no_license | cravo123/LeetCode | b93c18f3e4ca01ea55f4fdebceca76ccf664e55e | 4c1288c99f78823c7c3bac0ceedd532e64af1258 | refs/heads/master | 2021-07-12T11:10:26.987657 | 2020-06-02T12:24:29 | 2020-06-02T12:24:29 | 152,670,206 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,358 | py | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
# Solution 1, fast and slow pointers, reverse linked list techniques
class Solution:
def cut_half(self, node):
slow = fast = node
prev = None
while fast and fast.next:
fast = fast.next.next
prev = slow
slow = slow.next
# make sure to null first half tail
prev.next = None
prev, curr = None, slow
while curr:
tmp = curr.next
curr.next = prev
prev, curr = curr, tmp
return node, prev
def reorderList(self, head: ListNode) -> None:
"""
Do not return anything, modify head in-place instead.
"""
if head is None or head.next is None:
return head
# cut in half
p, q = self.cut_half(head)
# paste half lists
dummy = curr = ListNode(0)
while p or q:
if p:
curr.next = p
curr = curr.next
p = p.next
if q:
curr.next = q
curr = curr.next
q = q.next
return dummy.next
| [
"[email protected]"
] | |
32b0be4decbdf1dcd2ea8ee3b247ad2f88aa2dc2 | cd9931a0f21480fedf63a989ba8ebdda98e05e49 | /Perper/Code/tensorflow/tfrecord_test.py | 0406d69de960a8ababf08eafb504ab4ce907bd00 | [] | no_license | WenHui-Zhou/recommand-system | 7c429203268d0ac58560c122ae7b1834ca89472f | fb97229da61aed0a90be97026d42e7a03600382b | refs/heads/main | 2023-05-10T01:04:45.424493 | 2021-06-10T01:58:04 | 2021-06-10T01:58:04 | 321,671,553 | 3 | 1 | null | 2020-12-24T03:33:27 | 2020-12-15T13:10:43 | null | UTF-8 | Python | false | false | 1,119 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author : perperzhou <[email protected]>
# Create Time : 2021-02-09
# Copyright (C)2021 All rights reserved.
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
def _int64_features(value):
return tf.train.Feature(int64_list = tf.train.Int64List(value=[value]))
def _bytes_features(value):
return tf.train.Feature(bytes_list = tf.train.BytesList(value=[value]))
mnist = input_data.read_data_sets(
"./data",dtype=tf.uint8,one_hot=True
)
images = mnist.train.images
labels = mnist.train.labels
pixels = images.shape[1]
num_examples = mnist.train.num_examples
filename = './output.tfrecords'
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'pixels': _int64_features(pixel),
'label' : _int64_features(np.argmax(labels[index])),
'image_raw': _bytes_features(image_raw)
}))
writer.write(example.SerializeToString())
writer.close()
| [
"[email protected]"
] | |
97193a752d09cf751187c8a863e2c05beba510b1 | 2c0af32f3c1486fb15bc2c0374de2043577cc634 | /modeling/strong_baseline.py | f7e3b52d6417bcf55bacb9a5ebc1e872730c66f7 | [] | no_license | ArronHZG/reid-baseline | 02b210fc3922f4bdc14352979e7ba3c51700ab90 | d20943b117573e25c75338a55cb6219d90d3d2f0 | refs/heads/master | 2021-07-07T09:01:21.784144 | 2021-01-05T08:40:43 | 2021-01-05T08:40:43 | 248,293,171 | 6 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | from torch import nn
from modeling.backbone.resnet import resnet18, resnet50, resnet101, resnet152, \
resnext50_32x4d, resnext101_32x8d, \
wide_resnet50_2, wide_resnet101_2, resnet34
from modeling.base import Base
from utils.data import Data
from modeling.model_initial import weights_init_kaiming, weights_init_classifier
model_map = {'resnet18': resnet18,
'resnet34': resnet34,
'resnet50': resnet50,
'resnet101': resnet101,
'resnet152': resnet152,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x8d': resnext101_32x8d,
'wide_resnet50_2': wide_resnet50_2,
'wide_resnet101_2': wide_resnet101_2}
class Baseline(nn.Module):
def __init__(self,
num_classes,
last_stride,
model_name,
pretrain_choice,
se=False,
ibn_a=False,
ibn_b=False):
super(Baseline, self).__init__()
self.base = model_map[model_name](last_stride=last_stride,
pretrained=True if pretrain_choice == 'imagenet' else False,
se=se,
ibn_a=ibn_a,
ibn_b=ibn_b)
self.GAP = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
self.in_planes = 512 * self.base.block.expansion
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False)
self.bottleneck.apply(weights_init_kaiming)
self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
def forward(self, x) -> Data:
x = self.base(x)
feat_t = self.GAP(x).view(x.size(0), -1)
feat_c = self.bottleneck(feat_t) # normalize for angular softmax
data = Data()
data.feat_t = feat_t
data.feat_c = feat_c
if self.training:
cls_score = self.classifier(feat_c)
data.cls_score = cls_score # global feature for triplet loss
return data | [
"[email protected]"
] | |
8550448371992efeb2431d1bd8ee6f8f0de91d3f | a1657a0c5c8f3f8b51b98074293e2f2e9b16e6f4 | /libs/pipeline_model/tensorflow_serving/apis/input_pb2.py | 7d04447c2fcdf2d9c3e04f3d1e60c2ccc6c68f68 | [
"Apache-2.0"
] | permissive | PipelineAI/pipeline | e8067636f5844dea0653aef84bd894ca2e700fc6 | 0f26e3eaad727c1d10950f592fe1949ece8153aa | refs/heads/master | 2023-01-07T15:27:33.741088 | 2022-10-25T23:01:51 | 2022-10-25T23:01:51 | 38,730,494 | 2,596 | 512 | Apache-2.0 | 2020-01-30T23:00:08 | 2015-07-08T03:49:23 | Jsonnet | UTF-8 | Python | false | true | 7,736 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow_serving/apis/input.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.example import example_pb2 as tensorflow_dot_core_dot_example_dot_example__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow_serving/apis/input.proto',
package='tensorflow.serving',
syntax='proto3',
serialized_pb=_b('\n#tensorflow_serving/apis/input.proto\x12\x12tensorflow.serving\x1a%tensorflow/core/example/example.proto\"4\n\x0b\x45xampleList\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\"e\n\x16\x45xampleListWithContext\x12%\n\x08\x65xamples\x18\x01 \x03(\x0b\x32\x13.tensorflow.Example\x12$\n\x07\x63ontext\x18\x02 \x01(\x0b\x32\x13.tensorflow.Example\"\xa1\x01\n\x05Input\x12;\n\x0c\x65xample_list\x18\x01 \x01(\x0b\x32\x1f.tensorflow.serving.ExampleListB\x02(\x01H\x00\x12S\n\x19\x65xample_list_with_context\x18\x02 \x01(\x0b\x32*.tensorflow.serving.ExampleListWithContextB\x02(\x01H\x00\x42\x06\n\x04kindB\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_example_dot_example__pb2.DESCRIPTOR,])
_EXAMPLELIST = _descriptor.Descriptor(
name='ExampleList',
full_name='tensorflow.serving.ExampleList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='examples', full_name='tensorflow.serving.ExampleList.examples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=150,
)
_EXAMPLELISTWITHCONTEXT = _descriptor.Descriptor(
name='ExampleListWithContext',
full_name='tensorflow.serving.ExampleListWithContext',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='examples', full_name='tensorflow.serving.ExampleListWithContext.examples', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='context', full_name='tensorflow.serving.ExampleListWithContext.context', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=152,
serialized_end=253,
)
_INPUT = _descriptor.Descriptor(
name='Input',
full_name='tensorflow.serving.Input',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='example_list', full_name='tensorflow.serving.Input.example_list', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('(\001'))),
_descriptor.FieldDescriptor(
name='example_list_with_context', full_name='tensorflow.serving.Input.example_list_with_context', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('(\001'))),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='kind', full_name='tensorflow.serving.Input.kind',
index=0, containing_type=None, fields=[]),
],
serialized_start=256,
serialized_end=417,
)
_EXAMPLELIST.fields_by_name['examples'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_EXAMPLELISTWITHCONTEXT.fields_by_name['examples'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_EXAMPLELISTWITHCONTEXT.fields_by_name['context'].message_type = tensorflow_dot_core_dot_example_dot_example__pb2._EXAMPLE
_INPUT.fields_by_name['example_list'].message_type = _EXAMPLELIST
_INPUT.fields_by_name['example_list_with_context'].message_type = _EXAMPLELISTWITHCONTEXT
_INPUT.oneofs_by_name['kind'].fields.append(
_INPUT.fields_by_name['example_list'])
_INPUT.fields_by_name['example_list'].containing_oneof = _INPUT.oneofs_by_name['kind']
_INPUT.oneofs_by_name['kind'].fields.append(
_INPUT.fields_by_name['example_list_with_context'])
_INPUT.fields_by_name['example_list_with_context'].containing_oneof = _INPUT.oneofs_by_name['kind']
DESCRIPTOR.message_types_by_name['ExampleList'] = _EXAMPLELIST
DESCRIPTOR.message_types_by_name['ExampleListWithContext'] = _EXAMPLELISTWITHCONTEXT
DESCRIPTOR.message_types_by_name['Input'] = _INPUT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ExampleList = _reflection.GeneratedProtocolMessageType('ExampleList', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLELIST,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleList)
))
_sym_db.RegisterMessage(ExampleList)
ExampleListWithContext = _reflection.GeneratedProtocolMessageType('ExampleListWithContext', (_message.Message,), dict(
DESCRIPTOR = _EXAMPLELISTWITHCONTEXT,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.ExampleListWithContext)
))
_sym_db.RegisterMessage(ExampleListWithContext)
Input = _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), dict(
DESCRIPTOR = _INPUT,
__module__ = 'tensorflow_serving.apis.input_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.serving.Input)
))
_sym_db.RegisterMessage(Input)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
_INPUT.fields_by_name['example_list'].has_options = True
_INPUT.fields_by_name['example_list']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('(\001'))
_INPUT.fields_by_name['example_list_with_context'].has_options = True
_INPUT.fields_by_name['example_list_with_context']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('(\001'))
try:
# THESE ELEMENTS WILL BE DEPRECATED.
# Please use the generated *_pb2_grpc.py files instead.
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
except ImportError:
pass
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
ef93fed2fe484369ac4b364a7b254ed5ed3ceaa9 | ac227cc22d5f5364e5d029a2cef83816a6954590 | /applications/physbam/physbam-lib/Scripts/Archives/log/formatlog.py | a347029839b15cbecce0c24bc25f1497c218db85 | [
"BSD-3-Clause"
] | permissive | schinmayee/nimbus | 597185bc8bac91a2480466cebc8b337f5d96bd2e | 170cd15e24a7a88243a6ea80aabadc0fc0e6e177 | refs/heads/master | 2020-03-11T11:42:39.262834 | 2018-04-18T01:28:23 | 2018-04-18T01:28:23 | 129,976,755 | 0 | 0 | BSD-3-Clause | 2018-04-17T23:33:23 | 2018-04-17T23:33:23 | null | UTF-8 | Python | false | false | 1,148 | py | #!/usr/bin/python
import os
import sys
from elementtree.ElementTree import parse
if len(sys.argv)!=2:
print>>sys.stderr, "Usage: formatlog <dir|logfile>"
sys.exit(1)
log=sys.argv[1]
if os.path.isdir(log): log=os.path.join(log,'log.txt')
tree=parse(open(log))
root=tree.getroot()
RED=chr(27) + '[1;31m'
#BRIGHTRED=chr(27) + '[1;31m'
GREEN=chr(27) + '[1;32m'
BLUE=chr(27) + '[1;34m'
CLEAR=chr(27) + '[00m'
def display(root,indent):
print "%s%*s%-*s %s s%s"%(GREEN,2*indent,"",80-2*indent,root.attrib["name"],root[-1].attrib["value"],CLEAR)
#print "%*s%s"%(5,"","hiu")
#if len(root)==1: print " %s"%(root[-1].attrib["value"])
#print " "
for child in root:
if child.tag=="time":
pass
elif child.tag=="stat":
print "%*s%s%s = %s%s"%(2*indent+2,"",BLUE,child.attrib["name"],child.attrib["value"],CLEAR)
pass
elif child.tag=="print":
print "%*s%s%s%s"%(2*indent+2,"",RED,child.text,CLEAR)
pass
elif child.tag=="error":
print child.text
pass
else:
display(child,indent+1)
display(root,0)
| [
"[email protected]"
] | |
90ff4b62719875007ffdd04e86f56aa88f14d205 | eade1861db1968645e0e17dfaa5250a4b8245b98 | /steel/chenchenglong/model.py | 747955953ab33570df66258a199d30dea61f87d8 | [] | no_license | piupiuup/competition | 5b5da56fed336e07cf99cef8f5bfe89a8f771900 | 076c30df3d2647cb3580c543e604375e84590ca7 | refs/heads/master | 2022-09-30T14:47:01.244084 | 2020-05-30T12:56:02 | 2020-05-30T12:56:02 | 268,074,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,059 | py | # https://github.com/junfu1115/DANet
from common import *
from dataset import *
from efficientnet import *
# overwrite ...
from dataset import null_collate as null_collate0
def null_collate(batch):
input, truth_label, truth_mask, infor = null_collate0(batch)
with torch.no_grad():
arange = torch.FloatTensor([1, 2, 3, 4]).to(truth_mask.device).view(1, 4, 1, 1).long()
truth_attention = truth_mask.repeat(1, 4, 1, 1)
truth_attention = (truth_attention == arange).float()
truth_attention = F.avg_pool2d(truth_attention, kernel_size=(32, 32), stride=(32, 32))
truth_attention = (truth_attention > 0 / (32 * 32)).float()
return input, truth_label, truth_mask, truth_attention, infor
####################################################################################################
class ConvGnUp2d(nn.Module):
def __init__(self, in_channel, out_channel, num_group=32, kernel_size=3, padding=1, stride=1):
super(ConvGnUp2d, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, padding=padding, stride=stride,
bias=False)
self.gn = nn.GroupNorm(num_group, out_channel)
def forward(self, x):
x = self.conv(x)
x = self.gn(x)
x = F.relu(x, inplace=True)
x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=False)
return x
def upsize_add(x, lateral):
return F.interpolate(x, size=lateral.shape[2:], mode='nearest') + lateral
def upsize(x, scale_factor=2):
x = F.interpolate(x, scale_factor=scale_factor, mode='nearest')
return x
'''
model.py: calling main function ...
stem torch.Size([10, 48, 128, 128])
block1 torch.Size([10, 24, 128, 128])
block2 torch.Size([10, 40, 64, 64])
block3 torch.Size([10, 64, 32, 32])
block4 torch.Size([10, 128, 16, 16])
block5 torch.Size([10, 176, 16, 16])
block6 torch.Size([10, 304, 8, 8])
block7 torch.Size([10, 512, 8, 8])
last torch.Size([10, 2048, 8, 8])
sucess!
'''
class Net(nn.Module):
def load_pretrain(self, skip=['logit.'], is_print=True):
load_pretrain(self, skip, pretrain_file=PRETRAIN_FILE, conversion=CONVERSION, is_print=is_print)
def __init__(self, num_class=4, drop_connect_rate=0.2):
super(Net, self).__init__()
e = EfficientNetB5(drop_connect_rate)
self.stem = e.stem
self.block1 = e.block1
self.block2 = e.block2
self.block3 = e.block3
self.block4 = e.block4
self.block5 = e.block5
self.block6 = e.block6
self.block7 = e.block7
self.last = e.last
e = None # dropped
# ---
self.lateral0 = nn.Conv2d(2048, 64, kernel_size=1, padding=0, stride=1)
self.lateral1 = nn.Conv2d(176, 64, kernel_size=1, padding=0, stride=1)
self.lateral2 = nn.Conv2d(64, 64, kernel_size=1, padding=0, stride=1)
self.lateral3 = nn.Conv2d(40, 64, kernel_size=1, padding=0, stride=1)
self.top1 = nn.Sequential(
ConvGnUp2d(64, 64),
ConvGnUp2d(64, 64),
ConvGnUp2d(64, 64),
)
self.top2 = nn.Sequential(
ConvGnUp2d(64, 64),
ConvGnUp2d(64, 64),
)
self.top3 = nn.Sequential(
ConvGnUp2d(64, 64),
)
self.top4 = nn.Sequential(
nn.Conv2d(64 * 3, 64, kernel_size=3, stride=1, padding=1, bias=False),
BatchNorm2d(64),
nn.ReLU(inplace=True),
)
self.logit_mask = nn.Conv2d(64, num_class + 1, kernel_size=1)
def forward(self, x):
batch_size, C, H, W = x.shape
x = self.stem(x) # ; print('stem ',x.shape)
x = self.block1(x);
x0 = x # ; print('block1',x.shape)
x = self.block2(x);
x1 = x # ; print('block2',x.shape)
x = self.block3(x);
x2 = x # ; print('block3',x.shape)
x = self.block4(x) # ; print('block4',x.shape)
x = self.block5(x);
x3 = x # ; print('block5',x.shape)
x = self.block6(x) # ; print('block6',x.shape)
x = self.block7(x) # ; print('block7',x.shape)
x = self.last(x);
x4 = x # ; print('last ',x.shape)
# segment
t0 = self.lateral0(x4)
t1 = upsize_add(t0, self.lateral1(x3)) # 16x16
t2 = upsize_add(t1, self.lateral2(x2)) # 32x32
t3 = upsize_add(t2, self.lateral3(x1)) # 64x64
t1 = self.top1(t1) # 128x128
t2 = self.top2(t2) # 128x128
t3 = self.top3(t3) # 128x128
t = torch.cat([t1, t2, t3], 1)
t = self.top4(t)
logit_mask = self.logit_mask(t)
logit_mask = F.interpolate(logit_mask, scale_factor=2.0, mode='bilinear', align_corners=False)
return logit_mask
#########################################################################
# use topk
# def criterion_label(logit, truth, weight=None):
# batch_size,num_class,H,W = logit.shape
# K=5
#
# logit = logit.view(batch_size,num_class,-1)
# value, index = logit.topk(K)
#
# logit_k = torch.gather(logit,dim=2,index=index)
# truth_k = truth.view(batch_size,num_class,1).repeat(1,1,5)
#
#
# if weight is None: weight=[1,1,1,1]
# weight = torch.FloatTensor(weight).to(truth.device).view(1,-1,1)
#
#
# loss = F.binary_cross_entropy_with_logits(logit_k, truth_k, reduction='none')
# #https://arxiv.org/pdf/1909.07829.pdf
# if 1:
# gamma=2.0
# p = torch.sigmoid(logit_k)
# focal = (truth_k*(1-p) + (1-truth_k)*(p))**gamma
# weight = weight*focal /focal.sum().item()
#
# loss = loss*weight
# loss = loss.mean()
# return loss
# use top only
# def criterion_label(logit, truth, weight=None):
# batch_size,num_class,H,W = logit.shape
# logit = F.adaptive_max_pool2d(logit,1).view(-1,4)
# truth = truth.view(-1,4)
#
# if weight is None: weight=[1,1,1,1]
# weight = torch.FloatTensor(weight).to(truth.device).view(1,-1)
#
# loss = F.binary_cross_entropy_with_logits(logit, truth, reduction='none')
# loss = loss*weight
# loss = loss.mean()
# return loss
# https://discuss.pytorch.org/t/numerical-stability-of-bcewithlogitsloss/8246
def criterion_attention(logit, truth, weight=None):
batch_size, num_class, H, W = logit.shape
if weight is None: weight = [1, 1, 1, 1]
weight = torch.FloatTensor(weight).to(truth.device).view(1, -1, 1, 1)
loss = F.binary_cross_entropy_with_logits(logit, truth, reduction='none')
# ---
# https://arxiv.org/pdf/1909.07829.pdf
if 0:
gamma = 2.0
p = torch.sigmoid(logit)
focal = (truth * (1 - p) + (1 - truth) * (p)) ** gamma
weight = weight * focal / focal.sum().item() * H * W
# ---
loss = loss * weight
loss = loss.mean()
return loss
#
# def criterion_mask(logit, truth, weight=None):
# if weight is not None: weight = torch.FloatTensor([1]+weight).cuda()
# batch_size,num_class,H,W = logit.shape
#
# logit = logit.permute(0, 2, 3, 1).contiguous().view(batch_size,-1, 5)
# log_probability = -F.log_softmax(logit,-1)
#
#
# truth = truth.permute(0, 2, 3, 1).contiguous().view(-1,1)
# onehot = torch.FloatTensor(batch_size*H*W, 5).to(truth.device)
# onehot.zero_()
# onehot.scatter_(1, truth, 1)
# onehot = onehot.view(batch_size,-1, 5)
#
# #loss = F.cross_entropy(logit, truth, weight=weight, reduction='none')
# loss = log_probability*onehot
#
# loss = loss*weight
# loss = loss.mean()
# return loss
# focal loss
def criterion_mask(logit, truth, weight=None):
if weight is None: weight = [1, 1, 1, 1]
weight = torch.FloatTensor([1] + weight).to(truth.device).view(1, -1)
batch_size, num_class, H, W = logit.shape
logit = logit.permute(0, 2, 3, 1).contiguous().view(-1, 5)
truth = truth.permute(0, 2, 3, 1).contiguous().view(-1)
# return F.cross_entropy(logit, truth, reduction='mean')
log_probability = -F.log_softmax(logit, -1)
probability = F.softmax(logit, -1)
onehot = torch.zeros(batch_size * H * W, num_class).to(truth.device)
onehot.scatter_(dim=1, index=truth.view(-1, 1), value=1) # F.one_hot(truth,5).float()
loss = log_probability * onehot
# ---
if 1: # image based focusing
probability = probability.view(batch_size, H * W, 5)
truth = truth.view(batch_size, H * W, 1)
weight = weight.view(1, 1, 5)
alpha = 2
focal = torch.gather(probability, dim=-1, index=truth.view(batch_size, H * W, 1))
focal = (1 - focal) ** alpha
focal_sum = focal.sum(dim=[1, 2], keepdim=True)
# focal_sum = focal.sum().view(1,1,1)
weight = weight * focal / focal_sum.detach() * H * W
weight = weight.view(-1, 5)
loss = loss * weight
loss = loss.mean()
return loss
# ----
def logit_mask_to_probability_label(logit):
batch_size, num_class, H, W = logit.shape
probability = F.softmax(logit, 1)
# probability = F.avg_pool2d(probability, kernel_size=16,stride=16)
probability = probability.permute(0, 2, 3, 1).contiguous().view(batch_size, -1, 5)
value, index = probability.max(1)
probability = value[:, 1:]
return probability
def metric_label(probability, truth, threshold=0.5):
batch_size = len(truth)
with torch.no_grad():
probability = probability.view(batch_size, 4)
truth = truth.view(batch_size, 4)
# ----
neg_index = (truth == 0).float()
pos_index = 1 - neg_index
num_neg = neg_index.sum(0)
num_pos = pos_index.sum(0)
# ----
p = (probability > threshold).float()
t = (truth > 0.5).float()
tp = ((p + t) == 2).float() # True positives
tn = ((p + t) == 0).float() # True negatives
tn = tn.sum(0)
tp = tp.sum(0)
# ----
tn = tn.data.cpu().numpy()
tp = tp.data.cpu().numpy()
num_neg = num_neg.data.cpu().numpy().astype(np.int32)
num_pos = num_pos.data.cpu().numpy().astype(np.int32)
return tn, tp, num_neg, num_pos
def truth_to_onehot(truth, num_class=4):
onehot = truth.repeat(1, num_class, 1, 1)
arange = torch.arange(1, num_class + 1).view(1, num_class, 1, 1).to(truth.device)
onehot = (onehot == arange).float()
return onehot
def predict_to_onehot(predict, num_class=4):
value, index = torch.max(predict, 1, keepdim=True)
value = value.repeat(1, num_class, 1, 1)
index = index.repeat(1, num_class, 1, 1)
arange = torch.arange(1, num_class + 1).view(1, num_class, 1, 1).to(predict.device)
onehot = (index == arange).float()
value = value * onehot
return value
def metric_mask(logit, truth, threshold=0.5, sum_threshold=100):
with torch.no_grad():
probability = torch.softmax(logit, 1)
truth = truth_to_onehot(truth)
probability = predict_to_onehot(probability)
batch_size, num_class, H, W = truth.shape
probability = probability.view(batch_size, num_class, -1)
truth = truth.view(batch_size, num_class, -1)
p = (probability > threshold).float()
t = (truth > 0.5).float()
t_sum = t.sum(-1)
p_sum = p.sum(-1)
d_neg = (p_sum < sum_threshold).float()
d_pos = 2 * (p * t).sum(-1) / ((p + t).sum(-1) + 1e-12)
neg_index = (t_sum == 0).float()
pos_index = 1 - neg_index
num_neg = neg_index.sum(0)
num_pos = pos_index.sum(0)
dn = (neg_index * d_neg).sum(0)
dp = (pos_index * d_pos).sum(0)
# ----
dn = dn.data.cpu().numpy()
dp = dp.data.cpu().numpy()
num_neg = num_neg.data.cpu().numpy().astype(np.int32)
num_pos = num_pos.data.cpu().numpy().astype(np.int32)
return dn, dp, num_neg, num_pos
##############################################################################################
def make_dummy_data(batch_size=8):
image_id = np.array([
i + '.jpg' for i in [
'0a8fddf7a', '0a29ef6f9', '0a46cc4bf', '0a058fcb6', '0a65bd8d4', '0a427a066', '0a6324223', '0b89f99d7',
'00ac8372f', '1ae56dead', '1b7bec2ba', '1bdb7f26f', '1cac6e1f3', '1d34ad26c', '1d83b44be', '1e75373b2',
'0b4c8e681', '0b5018316', '2b01fd731', '0cb590f8e', '0d4866e3c', '0e106d482', '0ebdc1277', '1bed9264f',
'0a9aaba9a', '0a26aceb2', '0a405b396', '0aa7955fd', '0bda9a0eb', '0c2522533', '0cd22bad5', '0ce3a145f',
'0adc17f1d', '0b56da4ff', '0be9bad7b', '0c888ecb5', '0d4eae8de', '0d78ac743', '0d51538b9', '0ddbc9fb5',
]
]).reshape(5, -1).T.reshape(-1).tolist()
DATA_DIR = '/root/share/project/kaggle/2019/steel/data'
folder = 'train_images'
df = pd.read_csv(DATA_DIR + '/train.csv').fillna('')
df = df_loc_by_list(df, 'ImageId_ClassId', [i + '_%d' % c for i in image_id for c in [1, 2, 3, 4]])
df = df.reset_index(drop=True)
# print(df)
# exit(0)
batch = []
for b in range(0, batch_size):
num_image = len(df) // 4
i = b % num_image
image_id = df['ImageId_ClassId'].values[i * 4][:-2]
rle = df['EncodedPixels'].values[i * 4:(i + 1) * 4:]
image = cv2.imread(DATA_DIR + '/%s/%s' % (folder, image_id), cv2.IMREAD_COLOR)
label = [0 if r == '' else 1 for r in rle]
mask = np.array([run_length_decode(r, height=256, width=1600, fill_value=c) for c, r in zip([1, 2, 3, 4], rle)])
# ---
# crop to 256x400
w = 400
mask_sum = mask.sum(1).sum(0)
mask_sum = mask_sum.cumsum()
mask_sum = mask_sum[w:] - mask_sum[:-w]
x = np.argmax(mask_sum)
image = image[:, x:x + w]
mask = mask[:, :, x:x + w]
zz = 0
# ---
mask = mask.max(0, keepdims=0)
infor = Struct(
index=i,
folder=folder,
image_id=image_id,
)
batch.append([image, label, mask, infor])
input, truth_label, truth_mask, truth_attention, infor = null_collate(batch)
input = input.cuda()
truth_label = truth_label.cuda()
truth_mask = truth_mask.cuda()
truth_attention = truth_attention.cuda()
return input, truth_label, truth_mask, truth_attention, infor
#########################################################################
def run_check_basenet():
net = Net()
print(net)
net.load_pretrain(skip=['logit'])
def run_check_net():
batch_size = 1
C, H, W = 3, 256, 400
num_class = 4
input = np.random.uniform(-1, 1, (batch_size, C, H, W))
input = np.random.uniform(-1, 1, (batch_size, C, H, W))
input = torch.from_numpy(input).float().cuda()
net = Net(num_class=num_class).cuda()
net.eval()
with torch.no_grad():
logit = net(input)
print('')
print('input: ', input.shape)
print('logit: ', logit.shape)
# print(net)
def run_check_train():
loss_weight = [1, 1, 1, 1]
if 1:
input, truth_label, truth_mask, truth_attention, infor = make_dummy_data(batch_size=10)
batch_size, C, H, W = input.shape
print('input: ', input.shape)
print('truth_label: ', truth_label.shape)
print('(count) : ', truth_label.sum(0))
print('truth_mask: ', truth_mask.shape)
print('truth_attention: ', truth_attention.shape)
print('')
# ---
net = Net().cuda()
net.load_pretrain(is_print=False) #
net = net.eval()
with torch.no_grad():
logit_mask = net(input)
print('input: ', input.shape)
print('logit_mask: ', logit_mask.shape)
print('')
loss = criterion_mask(logit_mask, truth_mask, loss_weight)
probability_label = logit_mask_to_probability_label(logit_mask)
tn, tp, num_neg, num_pos = metric_label(probability_label, truth_label)
dn, dp, num_neg, num_pos = metric_mask(logit_mask, truth_mask)
print('loss = %0.5f' % loss.item())
print('tn,tp = [%0.3f,%0.3f,%0.3f,%0.3f], [%0.3f,%0.3f,%0.3f,%0.3f] ' % (*(tn / num_neg), *(tp / num_pos)))
print('tn,tp = [%0.3f,%0.3f,%0.3f,%0.3f], [%0.3f,%0.3f,%0.3f,%0.3f] ' % (*(dn / num_neg), *(dp / num_pos)))
print('num_pos,num_neg = [%d,%d,%d,%d], [%d,%d,%d,%d] ' % (*num_neg, *num_pos))
print('')
# exit(0)
# dummy sgd to see if it can converge ...
optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=0.1, momentum=0.9, weight_decay=0.0001)
# optimizer = optim.Adam(filter(lambda p: p.requires_grad, net.parameters()),lr=0.001)
print('batch_size =', batch_size)
print('---------------------------------------------------------------------------------------------------------')
print('[iter ] loss | tn, [tp1,tp2,tp3,tp4] | dn, [dp1,dp2,dp3,dp4] ')
print('---------------------------------------------------------------------------------------------------------')
# [00000] 1.91935, 0.27055 | 0.533, [1.000,0.500,0.000,0.000] | 0.000, [0.003,0.016,0.102,0.073]
i = 0
optimizer.zero_grad()
while i <= 150:
net.train()
optimizer.zero_grad()
logit_mask = net(input)
loss = criterion_mask(logit_mask, truth_mask, loss_weight)
probability_label = logit_mask_to_probability_label(logit_mask)
tn, tp, num_neg, num_pos = metric_label(probability_label, truth_label)
dn, dp, num_neg, num_pos = metric_mask(logit_mask, truth_mask)
(loss).backward()
optimizer.step()
if i % 10 == 0:
print(
'[%05d] %8.5f | [%0.2f,%0.2f,%0.2f,%0.2f], [%0.2f,%0.2f,%0.2f,%0.2f] | [%0.2f,%0.2f,%0.2f,%0.2f], [%0.2f,%0.2f,%0.2f,%0.2f] ' % (
i,
loss.item(),
*(tn / num_neg), *(tp / num_pos),
*(dn / num_neg), *(dp / num_pos),
))
i = i + 1
print('')
# exit(0)
if 1:
# net.eval()
logit_mask = net(input)
probability_label = logit_mask_to_probability_label(logit_mask)
probability_mask = F.softmax(logit_mask, 1)
probability_label = probability_label.data.cpu().numpy()
probability_mask = probability_mask.data.cpu().numpy()
truth_label = truth_label.data.cpu().numpy()
truth_mask = truth_mask.data.cpu().numpy()
image = input_to_image(input)
for b in range(batch_size):
print('%2d ------ ' % (b))
result = draw_predict_result(
image[b], truth_label[b], truth_mask[b], probability_label[b], probability_mask[b])
image_show('result', result, resize=0.5)
cv2.waitKey(0)
# main #################################################################
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
# run_check_basenet()
# run_check_net()
run_check_train()
print('\nsucess!') | [
"[email protected]"
] | |
e472cd61c73f11e2e79ab123037f39914acf0739 | acef5161a1eeb107b116f9763114bb9f77d701b4 | /pytorch/深度学习之PyTorch入门/2.Intermediate/CNN_Net.py | 378dd4e6966f71b6f77a0e4fbd6285eb685de30e | [] | no_license | lingxiao00/PyTorch_Tutorials | aadb68582edbaa093ab200724c670b36763156b7 | 285bcfb0c60860e47343485daeb54947cd715f97 | refs/heads/master | 2021-10-20T16:56:21.275740 | 2019-03-01T02:46:42 | 2019-03-01T02:46:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,240 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-12-06 19:41:51
# @Author : cdl ([email protected])
# @Link : https://github.com/cdlwhm1217096231/python3_spider
# @Version : $Id$
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# 配置设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# 超参数设置
num_epochs = 5
num_classes = 10
batch_size = 100
learning_rate = 0.001
# MNIST 数据集
train_dataset = torchvision.datasets.MNIST(
root="./datasets/", train=True, transform=transforms.ToTensor(), download=True)
test_dataset = torchvision.datasets.MNIST(
root="./datasets/", train=False, transform=transforms.ToTensor())
# Data Loader
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, batch_size=batch_size, shuffle=False)
# 定义模型
class CNN_Net(nn.Module):
def __init__(self, num_classes=10):
super(CNN_Net, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=16,
kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=32,
kernel_size=5, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.fc = nn.Linear(7 * 7 * 32, out_features=num_classes)
def forward(self, x):
z1 = self.layer1(x)
a1 = self.layer2(z1)
z2 = a1.reshape(a1.size(0), -1) # 进入全连接层之前,需要将池化层输出的特征flatten
a2 = self.fc(z2)
return a2
model = CNN_Net(num_classes).to(device)
# 定义Loss与优化算法
loss_func = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 开始训练
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = loss_func(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print("Epoch[{}/{}], Step[{}/{}], Loss:{:.4f}".format(epoch +
1, num_epochs, i + 1, total_step, loss.item()))
# 测试模型
model.eval() # 测试模式(batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("在测试集上的精度:{}%".format(100 * correct / total))
# 保存模型
torch.save(model.state_dict(), "CNN_Net.model")
| [
"[email protected]"
] | |
b380164b0b9a3be449ed86f3869ccf1544d02d13 | 8b2e251d8ffbce4fe3987c0245d7af9aedb21c15 | /tests/test_github.py | 07cb2df9ad0ed812993750e4a9ece01778075dda | [
"BSD-3-Clause"
] | permissive | xlevus/json-spec | 0f93bbf1ceab27d3d64349782e2e593bc7c4e58e | 86db73bfbfb4c5b476b37b304060b32022a257c6 | refs/heads/master | 2021-01-21T18:51:01.112087 | 2015-06-29T10:46:47 | 2015-06-29T10:46:47 | 38,238,544 | 0 | 0 | null | 2015-06-29T09:11:22 | 2015-06-29T09:11:22 | null | UTF-8 | Python | false | false | 1,146 | py | """
tests.tests_github
~~~~~~~~~~~~~~~~~~
"""
import pytest
from jsonspec.validators import load, ValidationError
def test_issue4():
validator = load({
'$schema': 'http://json-schema.org/draft-04/schema#',
'type': 'object',
'properties': {
'props': {
'type': 'array',
'items': {
'oneOf': [
{'type': 'string'},
{'type': 'number'}
]
}
}
}
})
assert {'props': ['hello']} == validator.validate({'props': ['hello']})
assert {'props': [42, 'you']} == validator.validate({'props': [42, 'you']})
with pytest.raises(ValidationError):
validator.validate({
'props': [None]
})
with pytest.raises(ValidationError):
validator.validate({
'props': None
})
with pytest.raises(ValidationError):
validator.validate({
'props': 'hello'
})
with pytest.raises(ValidationError):
validator.validate({
'props': 42
})
| [
"[email protected]"
] | |
69924ca5d5756dc11957610d4dc812203f1ab906 | d6c117812a618ff34055488337aaffea8cf81ca1 | /ui/TabbedView.py | 47f85165a496eb4a5ad28aa23e0898bec01b89b3 | [] | no_license | c0ns0le/Pythonista | 44829969f28783b040dd90b46d08c36cc7a1f590 | 4caba2d48508eafa2477370923e96132947d7b24 | refs/heads/master | 2023-01-21T19:44:28.968799 | 2016-04-01T22:34:04 | 2016-04-01T22:34:04 | 55,368,932 | 3 | 0 | null | 2023-01-22T01:26:07 | 2016-04-03T21:04:40 | Python | UTF-8 | Python | false | false | 2,328 | py | # @ui
# https://gist.github.com/jsbain/fcadaffff4be09c4ec78
import ui
class TabbedView(ui.View):
def __init__(self,tablist=[], frame=(0,0)+ui.get_screen_size()):
'''takes an iterable of Views, using the view name as the tab selector.
empty views sre just given generic names'''
self.tabcounter=0 #unique counter, for name disambiguation
self.buttonheight=30 #height of buttonbar
#setup button bar
self.tabbuttons=ui.SegmentedControl(frame=(0,0,self.width, self.buttonheight))
self.tabbuttons.action=self.tab_action
self.tabbuttons.flex='W'
self.tabbuttons.segments=[]
self.add_subview(self.tabbuttons)
for tab in tablist:
self.addtab(tab)
def tab_action(self,sender):
if sender.selected_index >= 0:
tabname=sender.segments[sender.selected_index]
self[tabname].bring_to_front()
def focus_tab_by_index(self,index):
self.tabbuttons.selected_index=index
self.tab_action(self.tabbuttons)
def focus_tab_by_name(self,tabname):
self.tabbuttons.selected_index=self.tabbuttons.segments.index(tabname)
self.tab_action(self.tabbuttons)
def addtab(self,tab):
if not tab.name:
tab.name='tab{}'.format(self.tabcounter)
if tab.name in self.tabbuttons.segments:
#append unique counter to name
tab.name+=str(self.tabcounter)
self.tabcounter+=1
self.tabbuttons.segments+=(tab.name,)
tab.frame=(0,self.buttonheight,self.width,self.height-self.buttonheight)
tab.flex='WH'
self.add_subview(tab)
self.focus_tab_by_name(tab.name)
def removetab(self,tabname):
self.tabbuttons.segments=[x for x in self.tabbuttons.segments if x != tabname]
self.remove_subview(tabname)
# if tab was top tab, think about updating selected tab to whatever is on top
def layout(self):
pass # maybe set tabbuttons size
if __name__=='__main__':
v=TabbedView()
v.addtab(ui.View(name='red',bg_color='red'))
v.addtab(ui.View(bg_color='blue'))
v.addtab(ui.View(name='green',bg_color='green'))
v.addtab(ui.View(name='green',bg_color='green'))
v.present() | [
"[email protected]"
] | |
b61f1bbafd7a413e534ad4fbe0731bb79f7ec53e | 32676887b51845624748e5debb61a01e3942a7ee | /venv/lib/python3.8/site-packages/fanficfare/story.py | b171bfad1c3365bb2b547bb5b6569eacbd606cdb | [] | no_license | DylanB5402/SummerProject2.5 | 078589fc49cecb402656942710ce5e33e9194f88 | 1cd5fe75f859c82122cf1cc5b5371d0cb73b898a | refs/heads/master | 2022-12-03T21:49:30.392293 | 2020-08-19T04:33:58 | 2020-08-19T04:33:58 | 288,633,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59,866 | py | # -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2020 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import os, re, sys
from collections import defaultdict
import string
import json
import datetime
from math import floor
from functools import partial
import logging
logger = logging.getLogger(__name__)
# py2 vs py3 transition
from . import six
from .six.moves.urllib.parse import (urlparse, urlunparse)
from .six import text_type as unicode
from .six import string_types as basestring
from .six.moves import map
import bs4
from . import exceptions
from .htmlcleanup import conditionalRemoveEntities, removeEntities, removeAllEntities
from .configurable import Configurable, re_compile
from .htmlheuristics import was_run_marker
SPACE_REPLACE=r'\s'
SPLIT_META=r'\,'
# Create convert_image method depending on which graphics lib we can
# load. Preferred: calibre, PIL, none
imagetypes = {
'jpg':'image/jpeg',
'jpeg':'image/jpeg',
'png':'image/png',
'gif':'image/gif',
'svg':'image/svg+xml',
}
try:
from calibre.utils.magick import Image
convtype = {'jpg':'JPG', 'png':'PNG'}
def get_image_size(data):
img = Image()
img.load(data)
owidth, oheight = img.size
return owidth, oheight
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
# logger.debug("calibre convert_image called")
if url.lower().endswith('.svg'):
raise exceptions.RejectImage("Calibre image processing chokes on SVG images.")
export = False
img = Image()
img.load(data)
owidth, oheight = img.size
nwidth, nheight = sizes
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
img.size = (nwidth, nheight)
export = True
if normalize_format_name(img.format) != imgtype:
export = True
if removetrans and img.has_transparent_pixels():
canvas = Image()
canvas.create_canvas(int(img.size[0]), int(img.size[1]), unicode(background))
canvas.compose(img)
img = canvas
export = True
if grayscale and img.type != "GrayscaleType":
img.type = "GrayscaleType"
export = True
if export:
return (img.export(convtype[imgtype]),imgtype,imagetypes[imgtype])
else:
# logger.debug("image used unchanged")
return (data,imgtype,imagetypes[imgtype])
except:
# No calibre routines, try for Pillow for CLI.
try:
from PIL import Image
from .six import BytesIO
convtype = {'jpg':'JPEG', 'png':'PNG'}
def get_image_size(data):
img = Image.open(BytesIO(data))
owidth, oheight = img.size
return owidth, oheight
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
# logger.debug("Pillow convert_image called")
export = False
img = Image.open(BytesIO(data))
owidth, oheight = img.size
nwidth, nheight = sizes
scaled, nwidth, nheight = fit_image(owidth, oheight, nwidth, nheight)
if scaled:
img = img.resize((nwidth, nheight),Image.ANTIALIAS)
export = True
if normalize_format_name(img.format) != imgtype:
if img.mode == "P":
# convert pallete gifs to RGB so jpg save doesn't fail.
img = img.convert("RGB")
export = True
if removetrans and img.mode == "RGBA":
background = Image.new('RGBA', img.size, background)
# Paste the image on top of the background
background.paste(img, img)
img = background.convert('RGB')
export = True
if grayscale and img.mode != "L":
img = img.convert("L")
export = True
if export:
outsio = BytesIO()
img.save(outsio,convtype[imgtype])
return (outsio.getvalue(),imgtype,imagetypes[imgtype])
else:
# logger.debug("image used unchanged")
return (data,imgtype,imagetypes[imgtype])
except:
# No calibre or PIL, simple pass through with mimetype.
def convert_image(url,data,sizes,grayscale,
removetrans,imgtype="jpg",background='#ffffff'):
# logger.debug("NO convert_image called")
return no_convert_image(url,data)
## also used for explicit no image processing.
def no_convert_image(url,data):
parsedUrl = urlparse(url)
ext=parsedUrl.path[parsedUrl.path.rfind('.')+1:].lower()
if ext not in imagetypes:
# not found at end of path, try end of whole URL in case of
# parameter.
ext = url[url.rfind('.')+1:].lower()
if ext not in imagetypes:
logger.info("no_convert_image url:%s - no known extension -- using .jpg"%url)
# doesn't have extension? use jpg.
ext='jpg'
return (data,ext,imagetypes[ext])
def normalize_format_name(fmt):
if fmt:
fmt = fmt.lower()
if fmt == 'jpeg':
fmt = 'jpg'
return fmt
def fit_image(width, height, pwidth, pheight):
'''
Fit image in box of width pwidth and height pheight.
@param width: Width of image
@param height: Height of image
@param pwidth: Width of box
@param pheight: Height of box
@return: scaled, new_width, new_height. scaled is True iff new_width and/or new_height is different from width or height.
'''
scaled = height > pheight or width > pwidth
if height > pheight:
corrf = pheight/float(height)
width, height = floor(corrf*width), pheight
if width > pwidth:
corrf = pwidth/float(width)
width, height = pwidth, floor(corrf*height)
if height > pheight:
corrf = pheight/float(height)
width, height = floor(corrf*width), pheight
return scaled, int(width), int(height)
try:
from calibre.library.comments import sanitize_comments_html
except:
def sanitize_comments_html(t):
## should only be called by Calibre version, so this shouldn't
## trip.
# logger.debug("fake sanitize called...")
return t
# The list comes from ffnet, the only multi-language site we support
# at the time of writing. Values are taken largely from pycountry,
# but with some corrections and guesses.
langs = {
"English":"en",
"Spanish":"es",
"French":"fr",
"German":"de",
"Chinese":"zh",
"Japanese":"ja",
"Dutch":"nl",
"Portuguese":"pt",
"Russian":"ru",
"Italian":"it",
"Bulgarian":"bg",
"Polish":"pl",
"Hungarian":"hu",
"Hebrew":"he",
"Arabic":"ar",
"Swedish":"sv",
"Norwegian":"no",
"Danish":"da",
"Finnish":"fi",
"Filipino":"fil",
"Esperanto":"eo",
"Hindi":"hi",
"Punjabi":"pa",
"Farsi":"fa",
"Greek":"el",
"Romanian":"ro",
"Albanian":"sq",
"Serbian":"sr",
"Turkish":"tr",
"Czech":"cs",
"Indonesian":"id",
"Croatian":"hr",
"Catalan":"ca",
"Latin":"la",
"Korean":"ko",
"Vietnamese":"vi",
"Thai":"th",
"Devanagari":"hi",
## These are from/for AO3:
u'العربية':'ar',
u'беларуская':'be',
u'Български език':'bg',
u'Català':'ca',
u'Čeština':'cs',
u'Cymraeg':'cy',
u'Dansk':'da',
u'Deutsch':'de',
u'Ελληνικά':'el',
u'English':'en',
u'Esperanto':'eo',
u'Español':'es',
u'eesti keel':'et',
u'فارسی':'fa',
u'Suomi':'fi',
u'Wikang Filipino':'fil',
u'Français':'fr',
u'Gaeilge':'ga',
u'Gàidhlig':'gd',
u'עִבְרִית':'he',
u'हिन्दी':'hi',
u'Hrvatski':'hr',
u'Magyar':'hu',
u'Bahasa Indonesia':'id',
u'Íslenska':'is',
u'Italiano':'it',
u'日本語':'ja',
u'한국말':'ko',
u'Lingua latina':'la',
u'Lietuvių':'lt',
u'Latviešu valoda':'lv',
u'मराठी':'mr',
u'بهاس ملايو ':'ms',
u'Nederlands':'nl',
u'Norsk':'no',
u'ਪੰਜਾਬੀ':'pa',
u'Polski':'pl',
u'Português':'pt',
u'Quenya':'qya',
u'Română':'ro',
u'Русский':'ru',
u'Slovenčina':'sk',
u'Shqip':'sq',
u'српски':'sr',
u'Svenska':'sv',
u'ไทย':'th',
u'tlhIngan-Hol':'tlh', # Klingon. Has a real ISO 639-2 code.
#'Thermian':'', # Alien language from Galaxy Quest.
u'Türkçe':'fr',
u'українська':'uk',
u'Tiếng Việt':'vi',
u'中文':'zh',
u'Bahasa Malaysia':'zsm',
}
class InExMatch:
keys = []
regex = None
match = None
negate = False
def __init__(self,line):
if "=>" in line: # for back-compat when used with replace_metadata conditionals.
(self.keys,self.match) = line.split("=>")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re_compile(self.match,line)
elif "=~" in line:
(self.keys,self.match) = line.split("=~")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re_compile(self.match,line)
elif "!~" in line:
(self.keys,self.match) = line.split("!~")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.regex = re_compile(self.match,line)
self.negate = True
elif "==" in line:
(self.keys,self.match) = line.split("==")
self.match = self.match.replace(SPACE_REPLACE,' ')
elif "!=" in line:
(self.keys,self.match) = line.split("!=")
self.match = self.match.replace(SPACE_REPLACE,' ')
self.negate = True
self.keys = [x.strip() for x in self.keys.split(",")]
# For conditional, only one key
def is_key(self,key):
return key == self.keys[0]
# For conditional, only one key
def key(self):
return self.keys[0]
def in_keys(self,key):
return key in self.keys
def is_match(self,param):
if not isinstance(param,list):
param = [param]
retval = False
# print(param)
for value in param:
if self.regex:
if self.regex.search(value):
retval |= True
#print(">>>>>>>>>>>>>%s=~%s r: %s,%s=%s"%(self.match,value,self.negate,retval,self.negate != retval))
else:
retval |= self.match == value
#print(">>>>>>>>>>>>>%s==%s r: %s,%s=%s"%(self.match,value,self.negate,retval, self.negate != retval))
return self.negate != retval
def __str__(self):
if self.negate:
f='!'
else:
f='='
if self.regex:
s='~'
else:
s='='
return u'InExMatch(%s %s%s %s)'%(self.keys,f,s,self.match)
## metakey[,metakey]=~pattern
## metakey[,metakey]==string
## *for* part lines. Effect only when trailing conditional key=~regexp matches
## metakey[,metakey]=~pattern[&&metakey=~regexp]
## metakey[,metakey]==string[&&metakey=~regexp]
## metakey[,metakey]=~pattern[&&metakey==string]
## metakey[,metakey]==string[&&metakey==string]
def set_in_ex_clude(setting):
dest = []
# print("set_in_ex_clude:"+setting)
for line in setting.splitlines():
full_line=line
if line:
(match,condmatch)=(None,None)
if "&&" in line:
(line,conditional) = line.split("&&")
condmatch = InExMatch(conditional)
match = InExMatch(line)
dest.append([full_line,match,condmatch])
return dest
## Two or three part lines. Two part effect everything.
## Three part effect only those key(s) lists.
## pattern=>replacement
## metakey,metakey=>pattern=>replacement
## *Five* part lines. Effect only when trailing conditional key=>regexp matches
## metakey[,metakey]=>pattern=>replacement[&&metakey=>regexp]
def make_replacements(replace):
retval=[]
for repl_line in replace.splitlines():
line=repl_line
try:
(metakeys,regexp,replacement,cond_match)=(None,None,None,None)
if "&&" in line:
(line,conditional) = line.split("&&")
cond_match = InExMatch(conditional)
if "=>" in line:
parts = line.split("=>")
if len(parts) > 2:
metakeys = [x.strip() for x in parts[0].split(",")]
(regexp,replacement)=parts[1:]
else:
(regexp,replacement)=parts
if regexp:
regexp = re_compile(regexp,line)
# A way to explicitly include spaces in the
# replacement string. The .ini parser eats any
# trailing spaces.
replacement=replacement.replace(SPACE_REPLACE,' ')
retval.append([repl_line,metakeys,regexp,replacement,cond_match])
except Exception as e:
logger.error("Problem with Replacement Line:%s"%repl_line)
raise exceptions.PersonalIniFailed(e,'replace_metadata unpacking failed',repl_line)
# raise
# print("replace lines:%s"%len(retval))
return retval
class Story(Configurable):
def __init__(self, configuration):
Configurable.__init__(self, configuration)
try:
## calibre plugin will set externally to match PI version.
self.metadata = {'version':os.environ['CURRENT_VERSION_ID']}
except:
self.metadata = {'version':'unknown'}
self.metadata['python_version']=sys.version
self.replacements = []
self.in_ex_cludes = {}
self.chapters = [] # chapters will be dict containing(url,title,html,etc)
self.chapter_first = None
self.chapter_last = None
self.imgurls = []
self.imgtuples = []
# save processed metadata, dicts keyed by 'key', then (removeentities,dorepl)
# {'key':{(removeentities,dorepl):"value",(...):"value"},'key':... }
self.processed_metadata_cache = {}
self.processed_metadata_list_cache = {}
self.cover=None # *href* of new cover image--need to create html.
self.oldcover=None # (oldcoverhtmlhref,oldcoverhtmltype,oldcoverhtmldata,oldcoverimghref,oldcoverimgtype,oldcoverimgdata)
self.calibrebookmark=None # cheesy way to carry calibre bookmark file forward across update.
self.logfile=None # cheesy way to carry log file forward across update.
self.replacements_prepped = False
def prepare_replacements(self):
if not self.replacements_prepped and not self.is_lightweight():
# logger.debug("prepare_replacements")
# logger.debug("sections:%s"%self.configuration.sectionslist)
## Look for config parameter, split and add each to metadata field.
for (config,metadata) in [("extracategories","category"),
("extragenres","genre"),
("extracharacters","characters"),
("extraships","ships"),
("extrawarnings","warnings")]:
for val in self.getConfigList(config):
self.addToList(metadata,val)
self.replacements = make_replacements(self.getConfig('replace_metadata'))
in_ex_clude_list = ['include_metadata_pre','exclude_metadata_pre',
'include_metadata_post','exclude_metadata_post']
for ie in in_ex_clude_list:
ies = self.getConfig(ie)
# print("%s %s"%(ie,ies))
if ies:
iel = []
self.in_ex_cludes[ie] = set_in_ex_clude(ies)
self.replacements_prepped = True
def set_chapters_range(self,first=None,last=None):
self.chapter_first=first
self.chapter_last=last
def join_list(self, key, vallist):
return self.getConfig("join_string_"+key,u", ").replace(SPACE_REPLACE,' ').join([ unicode(x) for x in vallist if x is not None ])
def setMetadata(self, key, value, condremoveentities=True):
# delete cached replace'd value.
if key in self.processed_metadata_cache:
del self.processed_metadata_cache[key]
# Fixing everything downstream to handle bool primatives is a
# pain.
if isinstance(value,bool):
value = unicode(value)
# keep as list type, but set as only value.
if self.isList(key):
self.addToList(key,value,condremoveentities=condremoveentities,clear=True)
else:
## still keeps < < and &
if condremoveentities:
self.metadata[key]=conditionalRemoveEntities(value)
else:
self.metadata[key]=value
if key == "language":
try:
# getMetadata not just self.metadata[] to do replace_metadata.
self.setMetadata('langcode',langs[self.getMetadata(key)])
except:
self.setMetadata('langcode','en')
if key == 'dateUpdated' and value:
# Last Update tags for Bill.
self.addToList('lastupdate',value.strftime("Last Update Year/Month: %Y/%m"),clear=True)
self.addToList('lastupdate',value.strftime("Last Update: %Y/%m/%d"))
if key == 'sectionUrl' and value:
self.addUrlConfigSection(value) # adapter/writer share the
# same configuration.
# ignored if config
# is_lightweight()
self.replacements_prepped = False
def getMetadataForConditional(self,key,seen_list={}):
if self.getConfig("conditionals_use_lists",True) and not key.endswith("_LIST"):
condval = self.getList(key,seen_list=seen_list)
else:
condval = self.getMetadata(key.replace("_LIST",""),seen_list=seen_list)
return condval
def do_in_ex_clude(self,which,value,key,seen_list):
if value and which in self.in_ex_cludes:
include = 'include' in which
keyfound = False
found = False
for (line,match,cond_match) in self.in_ex_cludes[which]:
keyfndnow = False
if match.in_keys(key):
if line in seen_list:
logger.info("Skipping %s key(%s) value(%s) line(%s) to prevent infinite recursion."%(which,key,value,line))
continue
# key in keys and either no conditional, or conditional matched
if cond_match == None or cond_match.is_key(key):
keyfndnow = True
else:
new_seen_list = dict(seen_list)
new_seen_list[line]=True
# print(cond_match)
condval = self.getMetadataForConditional(cond_match.key(),seen_list=new_seen_list)
keyfndnow = cond_match.is_match(condval)
# print("match:%s %s\ncond_match:%s %s\n\tkeyfound:%s\n\tfound:%s"%(
# match,value,cond_match,condval,keyfound,found))
keyfound |= keyfndnow
if keyfndnow:
found = isinstance(value,basestring) and match.is_match(value)
if found:
# print("match:%s %s\n\tkeyfndnow:%s\n\tfound:%s"%(
# match,value,keyfndnow,found))
if not include:
value = None
break
if include and keyfound and not found:
value = None
return value
def doReplacements(self,value,key,return_list=False,seen_list={}):
# logger.debug("doReplacements(%s,%s,%s)"%(value,key,seen_list))
# sets self.replacements and self.in_ex_cludes if needed
self.prepare_replacements()
value = self.do_in_ex_clude('include_metadata_pre',value,key,seen_list)
value = self.do_in_ex_clude('exclude_metadata_pre',value,key,seen_list)
retlist = [value]
for replaceline in self.replacements:
(repl_line,metakeys,regexp,replacement,cond_match) = replaceline
# logger.debug("replacement tuple:%s"%replaceline)
# logger.debug("key:%s value:%s"%(key,value))
# logger.debug("value class:%s"%value.__class__.__name__)
if (metakeys == None or key in metakeys) \
and isinstance(value,basestring) \
and regexp.search(value):
# recursion on pattern, bail -- Compare by original text
# line because I saw an issue with duplicate lines in a
# huuuge replace list cause a problem. Also allows dict()
# instead of list() for quicker lookups.
if repl_line in seen_list:
logger.info("Skipping replace_metadata line %s to prevent infinite recursion."%repl_line)
continue
doreplace=True
if cond_match and cond_match.key() != key: # prevent infinite recursion.
new_seen_list = dict(seen_list)
new_seen_list[repl_line]=True
# print(cond_match)
condval = self.getMetadataForConditional(cond_match.key(),seen_list=new_seen_list)
doreplace = condval != None and cond_match.is_match(condval)
if doreplace:
# split into more than one list entry if
# SPLIT_META present in replacement string. Split
# first, then regex sub, then recurse call replace
# on each. Break out of loop, each split element
# handled individually by recursion call.
if SPLIT_META in replacement:
retlist = []
for splitrepl in replacement.split(SPLIT_META):
try:
tval = regexp.sub(splitrepl,value)
except:
logger.error("Exception with replacement line,value:(%s),(%s)"%(repl_line,value))
raise
new_seen_list = dict(seen_list)
new_seen_list[repl_line]=True
retlist.extend(self.doReplacements(tval,
key,
return_list=True,
seen_list=new_seen_list))
break
else:
# print("replacement,value:%s,%s->%s"%(replacement,value,regexp.sub(replacement,value)))
try:
value = regexp.sub(replacement,value)
retlist = [value]
except:
logger.error("Exception with replacement line,value:(%s),(%s)"%(repl_line,value))
raise
for val in retlist:
retlist = [ self.do_in_ex_clude('include_metadata_post',x,key=key,seen_list=seen_list) for x in retlist ]
retlist = [ self.do_in_ex_clude('exclude_metadata_post',x,key=key,seen_list=seen_list) for x in retlist ]
if return_list:
return retlist
else:
return self.join_list(key,retlist)
# for saving an html-ified copy of metadata.
def dump_html_metadata(self):
lines=[]
for k,v in sorted(six.iteritems(self.metadata)):
#logger.debug("k:%s v:%s"%(k,v))
classes=['metadata']
if isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
classes.append("datetime")
val = v.isoformat()
elif isinstance(v,list):
classes.append("list")
if '' in v:
v.remove('')
if None in v:
v.remove(None)
#logger.debug("k:%s v:%s"%(k,v))
# force ints/floats to strings.
val = "<ul>\n<li>%s</li>\n</ul>" % "</li>\n<li>".join([ "%s"%x for x in v ])
elif isinstance(v, (int)):
classes.append("int")
val = v
else:
val = v
# don't include items passed in for calibre cols, etc.
if not k.startswith('calibre_') and k not in ['output_css']:
lines.append("<p><span class='label'>%s</span>: <div class='%s' id='%s'>%s</div><p>\n"%(
self.get_label(k),
" ".join(classes),
k,val))
return "\n".join(lines)
# for loading an html-ified copy of metadata.
def load_html_metadata(self,data):
soup = bs4.BeautifulSoup(data,'html5lib')
for tag in soup.find_all('div','metadata'):
val = None
if 'datetime' in tag['class']:
v = tag.string
try:
val = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
try:
val = datetime.datetime.strptime(v, '%Y-%m-%dT%H:%M:%S')
except ValueError:
try:
val = datetime.datetime.strptime(v, '%Y-%m-%d')
except ValueError:
pass
elif 'list' in tag['class']:
val = []
for i in tag.find_all('li'):
# keeps & but removes <li></li> because BS4
# halps by converting NavigableString to string
# (losing entities)
val.append(unicode(i)[4:-5])
elif 'int' in tag['class']:
# Python reports true when asked isinstance(<bool>, (int))
# bools now converted to unicode when set.
if tag.string in ('True','False'):
val = tag.string
else:
val = int(tag.string)
else:
val = unicode("\n".join([ unicode(c) for c in tag.contents ]))
#logger.debug("key(%s)=val(%s)"%(tag['id'],val))
if val != None:
self.metadata[tag['id']]=val
# self.metadata = json.loads(s, object_hook=datetime_decoder)
def getChapterCount(self):
## returns chapter count adjusted for start-end range.
url_chapters = value = int(self.getMetadata("numChapters").replace(',',''))
if self.chapter_first:
value = url_chapters - (int(self.chapter_first) - 1)
if self.chapter_last:
value = value - (url_chapters - int(self.chapter_last))
return value
def getMetadataRaw(self,key):
if self.isValidMetaEntry(key) and key in self.metadata:
return self.metadata[key]
def getMetadata(self, key,
removeallentities=False,
doreplacements=True,
seen_list={}):
# check for a cached value to speed processing
if key in self.processed_metadata_cache \
and (removeallentities,doreplacements) in self.processed_metadata_cache[key]:
return self.processed_metadata_cache[key][(removeallentities,doreplacements)]
value = None
if not self.isValidMetaEntry(key):
pass # cache not valid entry, too.
# return value
elif self.isList(key):
# join_string = self.getConfig("join_string_"+key,u", ").replace(SPACE_REPLACE,' ')
# value = join_string.join(self.getList(key, removeallentities, doreplacements=True))
value = self.join_list(key,self.getList(key, removeallentities, doreplacements=True,seen_list=seen_list))
if doreplacements:
value = self.doReplacements(value,key+"_LIST",seen_list=seen_list)
elif key in self.metadata:
value = self.metadata[key]
if value:
if key in ["numWords","numChapters"]+self.getConfigList("comma_entries",[]):
try:
value = commaGroups(unicode(value))
except Exception as e:
logger.warning("Failed to add commas to %s value:(%s) exception(%s)"%(key,value,e))
if key in ("dateCreated"):
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d %H:%M:%S"))
if key in ("datePublished","dateUpdated"):
value = value.strftime(self.getConfig(key+"_format","%Y-%m-%d"))
if isinstance(value, (datetime.date, datetime.datetime, datetime.time)) and self.hasConfig(key+"_format"):
# logger.info("DATE: %s"%key)
value = value.strftime(self.getConfig(key+"_format"))
if key == "title" and (self.chapter_first or self.chapter_last) and self.getConfig("title_chapter_range_pattern"):
first = self.chapter_first or "1"
last = self.chapter_last or self.getMetadata("numChapters")
templ = string.Template(self.getConfig("title_chapter_range_pattern"))
value = templ.substitute({'title':value,
'first':commaGroups(first),
'last':commaGroups(last)})
if doreplacements:
value=self.doReplacements(value,key,seen_list=seen_list)
if removeallentities and value != None:
value = removeAllEntities(value)
else: #if self.getConfig("default_value_"+key):
value = self.getConfig("default_value_"+key)
# save a cached value to speed processing
if key not in self.processed_metadata_cache:
self.processed_metadata_cache[key] = {}
self.processed_metadata_cache[key][(removeallentities,doreplacements)] = value
return value
def getAllMetadata(self,
removeallentities=False,
doreplacements=True,
keeplists=False):
'''
All single value *and* list value metadata as strings (unless
keeplists=True, then keep lists).
'''
allmetadata = {}
# special handling for authors/authorUrls
linkhtml="<a class='%slink' href='%s'>%s</a>"
if self.isList('author'): # more than one author, assume multiple authorUrl too.
htmllist=[]
for i, v in enumerate(self.getList('author')):
if len(self.getList('authorUrl')) <= i:
aurl = None
else:
aurl = self.getList('authorUrl')[i]
auth = v
# make sure doreplacements & removeallentities are honored.
if doreplacements:
aurl=self.doReplacements(aurl,'authorUrl')
auth=self.doReplacements(auth,'author')
if removeallentities:
aurl=removeAllEntities(aurl)
auth=removeAllEntities(auth)
htmllist.append(linkhtml%('author',aurl,auth))
self.setMetadata('authorHTML',self.join_list("join_string_authorHTML",htmllist))
else:
self.setMetadata('authorHTML',linkhtml%('author',self.getMetadata('authorUrl', removeallentities, doreplacements),
self.getMetadata('author', removeallentities, doreplacements)))
self.setMetadata('titleHTML',linkhtml%('title',
self.getMetadata('storyUrl', removeallentities, doreplacements),
self.getMetadata('title', removeallentities, doreplacements)))
self.extendList("extratags",self.getConfigList("extratags"))
if self.getMetadataRaw('seriesUrl'):
self.setMetadata('seriesHTML',linkhtml%('series',
self.getMetadata('seriesUrl', removeallentities, doreplacements),
self.getMetadata('series', removeallentities, doreplacements)))
elif self.getMetadataRaw('series'):
self.setMetadata('seriesHTML',self.getMetadataRaw('series'))
# logger.debug("make_linkhtml_entries:%s"%self.getConfig('make_linkhtml_entries'))
for k in self.getConfigList('make_linkhtml_entries'):
# Assuming list, because it has to be site specific and
# they are all lists. Bail if kUrl list not the same
# length.
# logger.debug("\nk:%s\nlist:%s\nlistURL:%s"%(k,self.getList(k),self.getList(k+'Url')))
if len(self.getList(k+'Url')) != len(self.getList(k)):
continue
htmllist=[]
for i, v in enumerate(self.getList(k)):
url = self.getList(k+'Url')[i]
# make sure doreplacements & removeallentities are honored.
if doreplacements:
url=self.doReplacements(url,k+'Url')
v=self.doReplacements(v,k)
if removeallentities:
url=removeAllEntities(url)
v=removeAllEntities(v)
htmllist.append(linkhtml%(k,url,v))
# join_string = self.getConfig("join_string_"+k+"HTML",u", ").replace(SPACE_REPLACE,' ')
self.setMetadata(k+'HTML',self.join_list("join_string_"+k+"HTML",htmllist))
for k in self.getValidMetaList():
if self.isList(k) and keeplists:
allmetadata[k] = self.getList(k, removeallentities, doreplacements)
else:
allmetadata[k] = self.getMetadata(k, removeallentities, doreplacements)
return allmetadata
def get_sanitized_description(self):
'''
For calibre version so this code can be consolidated between
fff_plugin.py and jobs.py
'''
description = self.getMetadata("description")
# logger.debug("description:%s"%description)
if not description:
description = ''
else:
if not self.getConfig('keep_summary_html'):
## because of the html->MD text->html dance, text only
## (or MD/MD-like) descs come out better.
description = sanitize_comments_html(description)
# lengthy FFF_replace_br_with_p_has_been_run" causes
# problems with EpubSplit and EpubMerge comments
description = description.replace(u'<!-- ' +was_run_marker+ u' -->\n',u'')
description = description.replace(u'<div id="' +was_run_marker+ u'">\n',u'<div>')
return description
# just for less clutter in adapters.
def extendList(self,listname,l):
for v in l:
self.addToList(listname,v.strip())
def addToList(self,listname,value,condremoveentities=True,clear=False):
if listname in self.processed_metadata_list_cache:
del self.processed_metadata_list_cache[listname]
if value==None:
return
if condremoveentities:
value = conditionalRemoveEntities(value)
if clear or not self.isList(listname) or not listname in self.metadata:
# Calling addToList to a non-list meta will overwrite it.
self.metadata[listname]=[]
# prevent duplicates.
if not value in self.metadata[listname]:
self.metadata[listname].append(value)
def isList(self,listname):
'Everything set with an include_in_* is considered a list.'
return self.isListType(listname) or \
( self.isValidMetaEntry(listname) and listname in self.metadata \
and isinstance(self.metadata[listname],list) )
def getList(self,listname,
removeallentities=False,
doreplacements=True,
includelist=[],
skip_cache=False,
seen_list={}):
#print("getList(%s,%s)"%(listname,includelist))
retlist = []
# check for a cached value to speed processing
if not skip_cache and listname in self.processed_metadata_list_cache \
and (removeallentities,doreplacements) in self.processed_metadata_list_cache[listname]:
return self.processed_metadata_list_cache[listname][(removeallentities,doreplacements)]
if not self.isValidMetaEntry(listname):
retlist = []
else:
# includelist prevents infinite recursion of include_in_'s
if self.hasConfig("include_in_"+listname) and listname not in includelist:
for k in self.getConfigList("include_in_"+listname):
ldorepl = doreplacements
if k.endswith('.NOREPL'):
k = k[:-len('.NOREPL')]
ldorepl = False
retlist.extend(self.getList(k,removeallentities=False,
doreplacements=ldorepl,includelist=includelist+[listname],
skip_cache=True,
seen_list=seen_list))
else:
if not self.isList(listname):
retlist = [self.getMetadata(listname,removeallentities=False,
doreplacements=doreplacements,
seen_list=seen_list)]
else:
retlist = self.getMetadataRaw(listname)
if retlist is None:
retlist = []
# reorder ships so b/a and c/b/a become a/b and a/b/c. Only on '/',
# use replace_metadata to change separator first if needed.
# ships=>[ ]*(/|&|&)[ ]*=>/
if listname == 'ships' and self.getConfig('sort_ships') and doreplacements and retlist:
# retlist = [ '/'.join(sorted(x.split('/'))) for x in retlist ]
## empty default of /=>/
sort_ships_splits = self.getConfig('sort_ships_splits',"/=>/")
for line in sort_ships_splits.splitlines():
if line:
## logger.debug("sort_ships_splits:%s"%line)
## logger.debug(retlist)
(splitre,splitmerge) = line.split("=>")
splitmerge = splitmerge.replace(SPACE_REPLACE,' ')
newretlist = []
for x in retlist:
curlist = []
for y in re.split(splitre,x):
## logger.debug("x:(%s) y:(%s)"%(x,y))
## for SPLIT_META(\,)
if x != y and doreplacements: # doreplacements always true here (currently)
y = self.doReplacements(y,'ships_CHARS',return_list=True,
seen_list=seen_list)
else:
## needs to be a list to extend curlist.
y=[x]
if y[0]: ## skip if empty
curlist.extend(y)
## logger.debug("curlist:%s"%(curlist,))
newretlist.append( splitmerge.join(sorted(curlist)) )
retlist = newretlist
## logger.debug(retlist)
## Add value of add_genre_when_multi_category to genre if
## there's more than one category value. Does not work
## consistently well if you try to include_in_ chain genre
## back into category--breaks with fandoms sites like AO3
if listname == 'genre' and self.getConfig('add_genre_when_multi_category') and len(self.getList('category',
removeallentities=False,
# to avoid inf loops if genre/cat substs
includelist=includelist+[listname],
doreplacements=False,
skip_cache=True,
seen_list=seen_list
)) > 1:
retlist.append(self.getConfig('add_genre_when_multi_category'))
if retlist:
if doreplacements:
newretlist = []
for val in retlist:
newretlist.extend(self.doReplacements(val,listname,return_list=True,
seen_list=seen_list))
retlist = newretlist
if removeallentities:
retlist = [ removeAllEntities(x) for x in retlist ]
retlist = [x for x in retlist if x!=None and x!='']
if retlist:
if listname in ('author','authorUrl','authorId') or self.getConfig('keep_in_order_'+listname):
# need to retain order for author & authorUrl so the
# two match up.
retlist = unique_list(retlist)
else:
# remove dups and sort.
retlist = sorted(list(set(retlist)))
## Add value of add_genre_when_multi_category to
## category if there's more than one category
## value (before this, obviously). Applied
## *after* doReplacements. For normalization
## crusaders who want Crossover as a category
## instead of genre. Moved after dedup'ing so
## consolidated category values don't count.
if listname == 'category' and self.getConfig('add_category_when_multi_category') and len(retlist) > 1:
retlist.append(self.getConfig('add_category_when_multi_category'))
else:
retlist = []
if not skip_cache:
if listname not in self.processed_metadata_list_cache:
self.processed_metadata_list_cache[listname] = {}
self.processed_metadata_list_cache[listname][(removeallentities,doreplacements)] = retlist
return retlist
def getSubjectTags(self, removeallentities=False):
# set to avoid duplicates subject tags.
subjectset = set()
tags_list = self.getConfigList("include_subject_tags") + self.getConfigList("extra_subject_tags")
# metadata all go into dc:subject tags, but only if they are configured.
for (name,value) in six.iteritems(self.getAllMetadata(removeallentities=removeallentities,keeplists=True)):
if name+'.SPLIT' in tags_list:
flist=[]
if isinstance(value,list):
for tag in value:
flist.extend(tag.split(','))
else:
flist.extend(value)
for tag in flist:
subjectset.add(tag)
elif name in tags_list:
if isinstance(value,list):
for tag in value:
subjectset.add(tag)
else:
subjectset.add(value)
if None in subjectset:
subjectset.remove(None)
if '' in subjectset:
subjectset.remove('')
return list(subjectset)
def addChapter(self, chap, newchap=False):
# logger.debug("addChapter(%s,%s)"%(chap,newchap))
chapter = defaultdict(unicode,chap) # default unknown to empty string
chapter['html'] = removeEntities(chapter['html'])
if self.getConfig('strip_chapter_numbers') and \
self.getConfig('chapter_title_strip_pattern'):
chapter['title'] = re.sub(self.getConfig('chapter_title_strip_pattern'),"",chapter['title'])
chapter.update({'origtitle':chapter['title'],
'toctitle':chapter['title'],
'new':newchap,
'number':len(self.chapters)+1,
'index04':"%04d"%(len(self.chapters)+1)})
## Due to poor planning on my part, chapter_title_*_pattern
## expect index==1 while output settings expected index=0001.
## index04 is to disambiguate, but index is kept for users'
## pre-existing settings.
chapter['index']=chapter['index04']
self.chapters.append(chapter)
def getChapters(self,fortoc=False):
"Chapters will be defaultdicts(unicode)"
retval = []
## only add numbers if more than one chapter. Ditto (new) marks.
addnums = len(self.chapters) > 1 and (
self.getConfig('add_chapter_numbers') == "true"
or (self.getConfig('add_chapter_numbers') == "toconly" and fortoc) )
marknew = len(self.chapters) > 1 and self.getConfig('mark_new_chapters') # true or latestonly
defpattern = self.getConfig('chapter_title_def_pattern','${title}') # default val in case of missing defaults.ini
if addnums and marknew:
pattern = self.getConfig('chapter_title_add_pattern')
newpattern = self.getConfig('chapter_title_addnew_pattern')
elif addnums:
pattern = self.getConfig('chapter_title_add_pattern')
newpattern = pattern
elif marknew:
pattern = defpattern
newpattern = self.getConfig('chapter_title_new_pattern')
else:
pattern = defpattern
newpattern = pattern
if self.getConfig('add_chapter_numbers') in ["true","toconly"]:
tocpattern = self.getConfig('chapter_title_add_pattern')
else:
tocpattern = defpattern
# logger.debug("Patterns: (%s)(%s)"%(pattern,newpattern))
templ = string.Template(pattern)
newtempl = string.Template(newpattern)
toctempl = string.Template(tocpattern)
for index, chap in enumerate(self.chapters):
if chap['new']:
usetempl = newtempl
else:
usetempl = templ
# logger.debug("chap(%s)"%chap)
chapter = defaultdict(unicode,chap)
## Due to poor planning on my part,
## chapter_title_*_pattern expect index==1 not
## index=0001 like output settings. index04 is now
## used, but index is still included for backward
## compatibility.
chapter['index'] = chapter['number']
chapter['chapter'] = usetempl.substitute(chapter)
chapter['origtitle'] = templ.substitute(chapter)
chapter['toctitle'] = toctempl.substitute(chapter)
# set after, otherwise changes origtitle and toctitle
chapter['title'] = chapter['chapter']
retval.append(chapter)
return retval
def get_filename_safe_metadata(self,pattern=None):
origvalues = self.getAllMetadata()
values={}
if not pattern:
pattern = re_compile(self.getConfig("output_filename_safepattern",
r"(^\.|/\.|[^a-zA-Z0-9_\. \[\]\(\)&'-]+)"),
"output_filename_safepattern")
for k in origvalues.keys():
if k == 'formatext': # don't do file extension--we set it anyway.
values[k]=self.getMetadata(k)
else:
values[k]=re.sub(pattern,'_', removeAllEntities(self.getMetadata(k)))
return values
def formatFileName(self,template,allowunsafefilename=True):
# fall back default:
if not template:
template="${title}-${siteabbrev}_${storyId}${formatext}"
if allowunsafefilename:
values = self.getAllMetadata()
else:
values = self.get_filename_safe_metadata()
return string.Template(template).substitute(values) #.encode('utf8')
# pass fetch in from adapter in case we need the cookies collected
# as well as it's a base_story class method.
def addImgUrl(self,parenturl,url,fetch,cover=False,coverexclusion=None):
# otherwise it saves the image in the epub even though it
# isn't used anywhere.
if cover and self.getConfig('never_make_cover'):
return (None,None)
url = url.strip() # ran across an image with a space in the
# src. Browser handled it, so we'd better, too.
## Mistakenly ended up with some // in image urls, like:
## https://forums.spacebattles.com//styles/default/xenforo/clear.png
## Removing one /, but not ://
if not url.startswith("file"): # keep file:///
url = re.sub(r"([^:])//",r"\1/",url)
if url.startswith("http") or url.startswith("file") or parenturl == None:
imgurl = url
else:
parsedUrl = urlparse(parenturl)
if url.startswith("//") :
imgurl = urlunparse(
(parsedUrl.scheme,
'',
url,
'','',''))
elif url.startswith("/") :
imgurl = urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
url,
'','',''))
else:
toppath=""
if parsedUrl.path.endswith("/"):
toppath = parsedUrl.path
else:
toppath = parsedUrl.path[:parsedUrl.path.rindex('/')+1]
imgurl = urlunparse(
(parsedUrl.scheme,
parsedUrl.netloc,
toppath + url,
'','',''))
# logger.debug("\n===========\nparsedUrl.path:%s\ntoppath:%s\nimgurl:%s\n\n"%(parsedUrl.path,toppath,imgurl))
# apply coverexclusion to explicit covers, too. Primarily for ffnet imageu.
#print("[[[[[\n\n %s %s \n\n]]]]]]]"%(imgurl,coverexclusion))
if cover and coverexclusion and re.search(coverexclusion,imgurl):
return (None,None)
prefix='ffdl'
if imgurl not in self.imgurls:
try:
if imgurl.endswith('failedtoload'):
return ("failedtoload","failedtoload")
parsedUrl = urlparse(imgurl)
if self.getConfig('no_image_processing'):
(data,ext,mime) = no_convert_image(imgurl,
fetch(imgurl,referer=parenturl))
else:
try:
sizes = [ int(x) for x in self.getConfigList('image_max_size',['580', '725']) ]
except Exception as e:
raise exceptions.FailedToDownload("Failed to parse image_max_size from personal.ini:%s\nException: %s"%(self.getConfigList('image_max_size'),e))
grayscale = self.getConfig('grayscale_images')
imgtype = self.getConfig('convert_images_to')
if not imgtype:
imgtype = "jpg"
removetrans = self.getConfig('remove_transparency')
removetrans = removetrans or grayscale or imgtype=="jpg"
if 'ffdl-' in imgurl:
raise exceptions.FailedToDownload("ffdl image is internal only...")
bgcolor = self.getConfig('background_color','ffffff')
if not bgcolor or len(bgcolor)<3 or len(bgcolor)>6 or not re.match(r"^[0-9a-fA-F]+$",bgcolor):
logger.info("background_color(%s) needs to be a hexidecimal color--using ffffff instead."%bgcolor)
bgcolor = 'ffffff'
(data,ext,mime) = convert_image(imgurl,
fetch(imgurl,referer=parenturl),
sizes,
grayscale,
removetrans,
imgtype,
background="#"+bgcolor)
except Exception as e:
try:
logger.info("Failed to load or convert image, \nparent:%s\nskipping:%s\nException: %s"%(parenturl,imgurl,e))
except:
logger.info("Failed to load or convert image, \nparent:%s\nskipping:%s\n(Exception output also caused exception)"%(parenturl,imgurl))
return ("failedtoload","failedtoload")
cover_big_enough = True
try:
sizes = [ int(x) for x in self.getConfigList('cover_min_size') ]
if sizes:
owidth, oheight = get_image_size(data)
cover_big_enough = owidth >= sizes[0] and oheight >= sizes[1]
# logger.debug("cover_big_enough:%s %s>=%s, %s>=%s"%(cover_big_enough,owidth,sizes[0],oheight,sizes[1]))
except Exception as e:
raise exceptions.FailedToDownload("Failed to process cover_min_size from personal.ini:%s\nException: %s"%(self.getConfigList('cover_min_size'),e))
# explicit cover, make the first image.
if cover and cover_big_enough:
if len(self.imgtuples) > 0 and 'cover' in self.imgtuples[0]['newsrc']:
# remove existing cover, if there is one.
del self.imgurls[0]
del self.imgtuples[0]
self.imgurls.insert(0,imgurl)
newsrc = "images/cover.%s"%ext
self.cover=newsrc
self.setMetadata('cover_image','specific')
self.imgtuples.insert(0,{'newsrc':newsrc,'mime':mime,'data':data})
else:
self.imgurls.append(imgurl)
# First image, copy not link because calibre will replace with it's cover.
# Only if: No cover already AND
# make_firstimage_cover AND
# NOT never_make_cover AND
# either no coverexclusion OR coverexclusion doesn't match
if self.cover == None and \
self.getConfig('make_firstimage_cover') and \
not self.getConfig('never_make_cover') and \
not (coverexclusion and re.search(coverexclusion,imgurl)) and \
cover_big_enough:
newsrc = "images/cover.%s"%ext
self.cover=newsrc
self.setMetadata('cover_image','first')
self.imgtuples.append({'newsrc':newsrc,'mime':mime,'data':data})
self.imgurls.append(imgurl)
newsrc = "images/%s-%s.%s"%(
prefix,
self.imgurls.index(imgurl),
ext)
self.imgtuples.append({'newsrc':newsrc,'mime':mime,'data':data})
#logger.debug("\nimgurl:%s\nnewsrc:%s\nimage size:%d\n"%(imgurl,newsrc,len(data)))
else:
newsrc = self.imgtuples[self.imgurls.index(imgurl)]['newsrc']
#print("===============\n%s\nimg url:%s\n============"%(newsrc,self.imgurls[-1]))
return (newsrc, imgurl)
def getImgUrls(self):
retlist = []
for i, url in enumerate(self.imgurls):
#parsedUrl = urlparse(url)
retlist.append(self.imgtuples[i])
return retlist
def __str__(self):
return "Metadata: " +unicode(self.metadata)
def commaGroups(s):
groups = []
while s and s[-1].isdigit():
groups.append(s[-3:])
s = s[:-3]
return s + ','.join(reversed(groups))
# http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order
def unique_list(seq):
seen = set()
seen_add = seen.add
try:
return [x for x in seq if not (x in seen or seen_add(x))]
except:
logger.debug("unique_list exception seq:%s"%seq)
raise
| [
"[email protected]"
] | |
5909b171a789cfadf92b5428de5babd92ef70753 | 519b1185421f33d43a0917ffe5ec582099c000eb | /src/wtfjson/fields/unbound_field.py | 3f98a69b0b9992b3dcacb13c542767d1caa166ce | [
"MIT"
] | permissive | binary-butterfly/wtfjson | d58f710b228149a34570a4677262cc3b16ffab6d | 551ad07c895ce3c94ac3015b6b5ecc2102599b56 | refs/heads/main | 2023-08-11T04:13:15.907617 | 2021-10-11T09:21:34 | 2021-10-11T09:21:34 | 359,063,547 | 0 | 0 | MIT | 2021-10-11T09:21:34 | 2021-04-18T06:29:17 | Python | UTF-8 | Python | false | false | 559 | py | # encoding: utf-8
"""
binary butterfly validator
Copyright (c) 2021, binary butterfly GmbH
Use of this source code is governed by an MIT-style license that can be found in the LICENSE.txt.
"""
class UnboundField:
def __init__(self, field_class, *args, name=None, **kwargs):
self.field_class = field_class
self.args = args
self.name = name
self.kwargs = kwargs
def bind(self, form, field_name, **kwargs):
return self.field_class(*self.args, **dict(form=form, field_name=field_name, **self.kwargs, **kwargs))
| [
"[email protected]"
] | |
591869b47c31b2765711783418bef12a1fc2b8a5 | 96ca1945a32c5ea708d4871e320a2f19b557e68b | /test_testing_utils.py | d3215a6df0258b199b4e80cc6c0f361735494524 | [] | no_license | u8sand/backup | 81922941296c0b1e9f1fc4e2b851e3691421b8cc | 9d44bd541b36069acc9610a9719eff8ea9c2e0d7 | refs/heads/master | 2021-05-11T15:33:28.376210 | 2018-01-30T22:23:20 | 2018-01-30T22:23:20 | 117,734,883 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,071 | py | #!/usr/bin/env python3
from testing_utils import ExtendedTestCase
class TestTestingUtils(ExtendedTestCase):
def test_mkdir_rmdir(self):
self.mkdir('test_dir/test_dir_2')
self.assertIsDir('test_dir')
self.assertIsDir('test_dir/test_dir_2')
self.rmdir('test_dir')
self.assertIsNotDir('test_dir')
def test_touch_remove(self):
self.touch('test_file_1')
self.assertIsFile('test_file_1')
self.touch('test_dir/test_file_2', 'Test')
self.assertIsFile('test_dir/test_file_2')
self.assertFileIs('test_dir/test_file_2', 'Test')
self.remove('test_dir/test_file_2')
self.assertIsNotFile('test_dir/test_file_2')
def test_add_copy(self):
self.add('test_testing_utils.py')
self.assertIsFile('test_testing_utils.py')
self.copy('test_testing_utils.py', 'test_testing_utils_2.py')
self.assertIsFile('test_testing_utils_2.py')
def test_execute(self):
self.touch('test.sh', 'exit 0')
self.execute('test.sh')
def test_recursive_touch(self):
pass # TODO
def test_assert_paths(self):
pass # TODO
| [
"[email protected]"
] | |
a7032e64cfe9697f723ec0d06e52281280064da1 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5686313294495744_0/Python/biran0079/c.py | f78ae9b6e91a9374279f6467852dac2df252b4a1 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | def dfs(i):
if i == len(l): return 0
res=dfs(i+1)
a,b=l[i]
if fst[a]>1 and snd[b] > 1:
fst[a]-=1
snd[b]-=1
res=max(res, 1+dfs(i+1))
fst[a]+=1
snd[b]+=1
return res
for t in range(int(raw_input())):
n=int(raw_input())
l=[]
fst={}
snd={}
for i in range(n):
s=raw_input()
a,b=s.split()
l.append((a,b))
fst[a] = fst.get(a,0)+1
snd[b] = snd.get(b,0)+1
res=dfs(0)
print "Case #{}: {}".format(t+1, res)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.