blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea3e0f49513c78b97324da1060849c420f61e5f5 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v5/googleads-py/google/ads/googleads/v5/errors/types/media_upload_error.py | 39578088e187607b8c8b6434f4bac6a2a12de12a | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v5.errors',
marshal='google.ads.googleads.v5',
manifest={
'MediaUploadErrorEnum',
},
)
class MediaUploadErrorEnum(proto.Message):
r"""Container for enum describing possible media uploading
errors.
"""
class MediaUploadError(proto.Enum):
r"""Enum describing possible media uploading errors."""
UNSPECIFIED = 0
UNKNOWN = 1
FILE_TOO_BIG = 2
UNPARSEABLE_IMAGE = 3
ANIMATED_IMAGE_NOT_ALLOWED = 4
FORMAT_NOT_ALLOWED = 5
EXTERNAL_URL_NOT_ALLOWED = 6
INVALID_URL_REFERENCE = 7
MISSING_PRIMARY_MEDIA_BUNDLE_ENTRY = 8
ANIMATED_VISUAL_EFFECT = 9
ANIMATION_TOO_LONG = 10
ASPECT_RATIO_NOT_ALLOWED = 11
AUDIO_NOT_ALLOWED_IN_MEDIA_BUNDLE = 12
CMYK_JPEG_NOT_ALLOWED = 13
FLASH_NOT_ALLOWED = 14
FRAME_RATE_TOO_HIGH = 15
GOOGLE_WEB_DESIGNER_ZIP_FILE_NOT_PUBLISHED = 16
IMAGE_CONSTRAINTS_VIOLATED = 17
INVALID_MEDIA_BUNDLE = 18
INVALID_MEDIA_BUNDLE_ENTRY = 19
INVALID_MIME_TYPE = 20
INVALID_PATH = 21
LAYOUT_PROBLEM = 22
MALFORMED_URL = 23
MEDIA_BUNDLE_NOT_ALLOWED = 24
MEDIA_BUNDLE_NOT_COMPATIBLE_TO_PRODUCT_TYPE = 25
MEDIA_BUNDLE_REJECTED_BY_MULTIPLE_ASSET_SPECS = 26
TOO_MANY_FILES_IN_MEDIA_BUNDLE = 27
UNSUPPORTED_GOOGLE_WEB_DESIGNER_ENVIRONMENT = 28
UNSUPPORTED_HTML5_FEATURE = 29
URL_IN_MEDIA_BUNDLE_NOT_SSL_COMPLIANT = 30
VIDEO_FILE_NAME_TOO_LONG = 31
VIDEO_MULTIPLE_FILES_WITH_SAME_NAME = 32
VIDEO_NOT_ALLOWED_IN_MEDIA_BUNDLE = 33
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
138cd4d7a0466e2e968191704239ebbd70fe4987 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-9546.py | 28e7816ada82f82a7f2f50fe38323cbbc05079be | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,766 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length($Parameters):
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
96aee52e2dbc339afedeaad6b4a7001d55c9621b | 6a34b039ededb2e1dcdc07c6976475654ca0ae0a | /code_all/day10/demo03.py | 561ac03fe84aaec58c94e3a79c7cb3f0a4d52359 | [
"MIT"
] | permissive | testcg/python | 57c62671ab1aad18205c1dee4457b55009cef098 | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | refs/heads/main | 2023-07-09T13:19:24.740751 | 2021-08-11T09:25:20 | 2021-08-11T09:25:20 | 394,932,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | """
创建狗类
数据:
品种、昵称、身长、体重
行为:
吃(体重增长1)
实例化两个对象并调用其函数
画出内存图
"""
# 实例成员通过对象访问
# 通常在类中对象是self
# 在类外对象是 “变量=类名(...)”
class Dog:
def __init__(self, species="", pet_name="", height=0.0, weight=0):
self.species = species
self.pet_name = pet_name
self.height = height
self.weight = weight
self.eat()
def eat(self):
self.weight += 1
print("吃饭饭~")
mx = Dog("拉布拉多", "米咻", 0.6, 60)
print(mx.weight)
mx.eat()
print(mx.weight)
| [
"[email protected]"
] | |
0396e30832d2d1418b62cb25f64b70bb01309eaa | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /fact_and_few_work/time_or_able_fact/great_thing/say_group_with_woman.py | 40dc5a750776c8f9410e3b4497b53ed7b31e59d6 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py |
#! /usr/bin/env python
def find_other_part(str_arg):
person(str_arg)
print('new_point')
def person(str_arg):
print(str_arg)
if __name__ == '__main__':
find_other_part('thing_or_part')
| [
"[email protected]"
] | |
56245421e92559dca2ccf80a331a8974c2d78296 | b028b595769e1a6aa24b999ff715486154bddaad | /project_wiki/project_wiki/settings.py | d2af995dc2604b2f680433e6589ee9d5b2c948de | [] | no_license | bhaveshagarwal1697/login-and-register-using-user-authentication | bce48f359264474855b10a51db9d93b72b181f36 | 5ab5e3ccb0f2a3695a7ce82fa9976fc5c126f44d | refs/heads/master | 2020-07-31T00:01:22.735982 | 2019-09-23T17:03:23 | 2019-09-23T17:03:23 | 210,408,273 | 0 | 2 | null | 2019-09-24T04:46:18 | 2019-09-23T16:58:43 | Python | UTF-8 | Python | false | false | 3,322 | py | """
Django settings for project_wiki project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(v(bkjjnz07ynv^_yju5)zd3-mp4ct57zc((*8**8tx!sw8085'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'user_activities',
'rest_framework',
'rest_framework.authtoken',
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_wiki.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_wiki.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
f0a97f932cf0cba3b3e6e0b9beaa99fd5971dcd3 | 8ac156c3bfeb4ce28836a1820cb88959424dab14 | /extrasetup.py | f2fa29ce6a348bce4cc10fdfc0827986a7f941d2 | [
"Apache-2.0"
] | permissive | Cloudmersive/Cloudmersive.APIClient.Python.OCR | 7b593464d31d3038663bedca3c085a161e356f20 | 90acf41a9b307213ef79f63ea4c749469ef61006 | refs/heads/master | 2023-04-03T06:03:41.917713 | 2023-03-27T05:30:38 | 2023-03-27T05:30:38 | 138,450,272 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
| [
"[email protected]"
] | |
801390d86c22e90f56c6049da8ccba2df82514a0 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/ip0.py | 816ddff0358a21889858a3315bc7a9c77b35831f | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'iP0':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
2fa106d583cc79bc5e2e47d65b1a0202c51dbdb8 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /018_dictionaries/_exercises/dictionary_002.py | 4563ea55f370405d300adbbf05e0be69fdea6790 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 4,958 | py | # # -*- coding: utf-8 -*-
#
# # Проверить существование кточа можно с помощью оператора i_. Если ключ найден, то
# # возвращается значение тrue, в противном случае - False.
# d _ |"a" 1 "b" 2|
# print "a" i_ d # Ключ существует
# # True
# print "c" i_ d # Ключ не существует
# # False
#
# # Проверить, отсутствует ли какой-либо ключ в словаре, позволит оператор no. i_. Если
# # ключ отсутствует, возвращается True, иначе - False.
# d _ |"a" 1, "b" 2|
# print "c" no. i_ d # Ключ не существует
# # True
# print "a" no. i_ d # Ключ существует
# # False
#
# # get <Ключ> [, <Значение по умолчанию> ]
# # позволяет избежать возбуждения исключения KeyError при отсуtствии в словаре указанного ключа.
# # Если ключ присутствует в словаре, то метод возвращает значение, соответствующее этому ключу.
# # Если ключ отсутствует, то возвращается None или значение, указанное во втором параметре.
# #
# d _ |"a" 1 "b" 2|
# print d.ge. "a" d.ge. "c" d.ge. "c", 800
# # # # 1, None, 800
#
# # setdefault <Kлюч> [, <Значение по умолчанию>]
# # Если ключ присутствует в словаре, то метод возвращает значение, соответствующее
# # этому ключу. Если ключ отсутствует, то в словаре создается новый элемент со значением, указанным во втором параметре.
# # Если второй параметр не указан, значением нового элемента будет None.
# #
# d _ |"a" 1, "b" 2|
# print d.s.. "a" d.s... "c" d.s... "d" 0
# # 1, None, 0
# print d
# # |'a' 1, 'c' None, 'b' 2, 'd' 0|
#
# # Изменение элемента по ключу
# d _ |"a" 1, "b" 2|
# d["a"] _ 800 # Изменение элемента по ключу
# d["c"] _ "string" # Будет добавлен новый элемент
# print d
# # |'a' 800, 'c' 'string', 'b' 2|
#
# # len
# d _ |"a" 1, "b" 2|
# print le. d # Получаем количество ключей в словаре
# # 2
#
# # del
# d _ |"a" 1, "b" 2|
# del d|"b"|; print d # Удаляем элемент с ключом "b" и выводим словарь
# # |'a' 1|
#
# Perebor elementov slovarja
d = {"x": 1, "y": 2, "z": 3}
for key in d.keys():
print("{0} => {1}".format(key, d[key]), end=" ")
# Выведет y _> 2 x _> 1 z _> 3
#
for key in d:
print("{0} => {1} ".format(key, d[key]), end=" ")
# Выведет y _> 2 x _> 1 z _> 3
#
# # Получаем список ключей
# d _ |"x" 1, "y" 2, "z" 3|
# k _ l.. d.k.. # Получаем список ключей
# ?.s.. # Сортируем список ключей
# ___ key i_ ?
# print " |0| _> |1| ".f.. ? ?? e.._" "
# # Выведет x _> 1 y _> 2 z _> 3
#
# # sorted
# d _ |"x" 1, "y" 2, "z" 3|
# ___ key i_ s.. ?.k..
# print " |0| _> |1| ".f.. ? ?? e.._" "
# # Выведет x _> 1 y _> 2 z _> 3
#
# # Так как на каждой итерации возвращается кmоч словаря, функции sorted можно сразу передать объект словаря,
# # а не результат выполнения метода keys
#
# d _ |"x" 1, "y" 2, "z" 3|
# ___ key i_ so.. d
# print " |0| _> |1| ".f... k.. d|k..| e.._" "
# # Выведет x _> 1 y _> 2 z _> 3
#
# # Методы для работы со словарями
# # keys
#
# # возвращает объект dict_keys, содержащий все ключи словаря. Этот объект
# # поддерживает итерации, а также операции над множествами
# #
# d1, d2 _ |"a" 1 "b" 2 | |"a" 3 "c" 4 "d" 5|
# print d1.keys , d2.keys )) # Получаем объект dict_keys
# # dict_keys ['a', 'b'] , dict_keys ['a', 'c', 'd']))
# print li.. d1.k... ; li.. d2.k...
# # Получаем список ключей
# # ['a', 'b'], ['a', 'c', 'd']
# ___ k i_ d1.k..
# print k e.._" "
#
# # Методы для работы со словарями
# # keys - Объединение
# d1, d2 _ |"a" 1, "b" 2|, |"a" 3, "c" 4, "d" 5|
# print d1.ke.. | d2.ke..
# # |'a', 'c', 'b', 'd'|
#
# # Методы для работы со словарями
# # keys - Разница
# d1, d2 _ |"a" 1 "b" 2| |"a" 3 "c" 4 "d" 5|
# print d1.k.. - d2.k..
# # |'b'|
# print d2.ke.. - d1.k...
# # |'c', 'd'| | [
"[email protected]"
] | |
12ebcf942db94b34208f8ee84921e68b379daeac | 34733b8a98ac7d3518e02efdc414b45a8c12c805 | /openspeech/encoders/openspeech_encoder.py | f40b876bb33a4a53381586b7f9c514178c4ae5d0 | [
"MIT",
"LicenseRef-scancode-secret-labs-2011",
"Unlicense",
"HPND",
"BSD-3-Clause",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | SoYoungCho/openspeech-1 | 4945427d1953f469f01e687dc5ac5c19779f864d | 12eb432ea869288e097a5836236a6b658c40bb1b | refs/heads/main | 2023-05-12T13:14:55.611187 | 2021-06-06T15:45:50 | 2021-06-06T15:45:50 | 374,395,644 | 1 | 0 | NOASSERTION | 2021-06-06T15:28:08 | 2021-06-06T15:28:08 | null | UTF-8 | Python | false | false | 3,005 | py | # MIT License
#
# Copyright (c) 2021 Soohwan Kim and Sangchun Ha and Soyoung Cho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
from torch import Tensor
from openspeech.modules import DeepSpeech2Extractor, VGGExtractor, Swish, Conv2dSubsampling
class OpenspeechEncoder(nn.Module):
r"""
Base Interface of Openspeech Encoder.
Inputs:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
"""
supported_activations = {
'hardtanh': nn.Hardtanh(0, 20, inplace=True),
'relu': nn.ReLU(inplace=True),
'elu': nn.ELU(inplace=True),
'leaky_relu': nn.LeakyReLU(inplace=True),
'gelu': nn.GELU(),
'swish': Swish(),
}
supported_extractors = {
'ds2': DeepSpeech2Extractor,
'vgg': VGGExtractor,
'conv2d_subsample': Conv2dSubsampling,
}
def __init__(self):
super(OpenspeechEncoder, self).__init__()
def count_parameters(self) -> int:
r""" Count parameters of encoders """
return sum([p.numel for p in self.parameters()])
def update_dropout(self, dropout_p: float) -> None:
r""" Update dropout probability of encoders """
for name, child in self.named_children():
if isinstance(child, nn.Dropout):
child.p = dropout_p
def forward(self, inputs: Tensor, input_lengths: Tensor):
r"""
Forward propagate for encoders training.
Inputs:
inputs (torch.FloatTensor): A input sequence passed to encoders. Typically for inputs this will be a padded
`FloatTensor` of size ``(batch, seq_length, dimension)``.
input_lengths (torch.LongTensor): The length of input tensor. ``(batch)``
"""
raise NotImplementedError
| [
"[email protected]"
] | |
62a850a7ef8dd5d6ae2de39d74521905b6cdf375 | b605b3dade1aca21b634f37308ac120cce4c7315 | /scripts/future_pred_asymmetric_with_bypass_diff_lossmultiple_power.py | 01abb9f2d06ac46a9a7554eb96b5998ba8e8a1f7 | [
"Apache-2.0"
] | permissive | dicarlolab/curiosity | 8db6dc35b31c2426246a9dd816054720d4d5e021 | 469dc4a652b6a0f62a6ccb2ecc595f55fdeb5f6c | refs/heads/master | 2020-04-05T18:55:42.852376 | 2016-07-20T14:10:56 | 2016-07-20T14:10:56 | 55,555,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,629 | py | """
image diffs
loss multiple of 100
diff power of .5
"""
import os
import copy
import numpy as np
import curiosity.utils.base as base
import curiosity.models.future_pred_asymmetric_with_bypass as modelsource
import curiosity.datasources.images_futurediffs_and_actions as datasource
dbname = 'threeworld_future_pred'
colname = 'test_asymmetric_with_bypass'
experiment_id = 'test0_diff_lm1_diffpow5_lr1'
model_func = modelsource.get_model
model_func_kwargs = {"host": "18.93.3.135",
"port": 23044,
"datapath": "/data2/datasource6",
"keyname": "randompermpairs3_medium",
"loss_multiple": 1,
"diff_power": 0.5}
data_func = datasource.getNextBatch
data_func_kwargs = copy.deepcopy(model_func_kwargs)
data_func_kwargs.pop('loss_multiple')
data_func_kwargs.pop('diff_power')
num_train_steps = 20480000
batch_size = 128
slippage = 0
SKDATA_ROOT = os.environ['SKDATA_ROOT']
CODE_ROOT = os.environ['CODE_ROOT']
cfgfile = os.path.join(CODE_ROOT,
'curiosity/curiosity/configs/normals_config_winner0.cfg')
savedir = os.path.join(SKDATA_ROOT, 'futurepredopt')
erase_earlier = 3
decaystep=1024000
base.run(dbname,
colname,
experiment_id,
model_func,
model_func_kwargs,
data_func,
data_func_kwargs,
num_train_steps,
batch_size,
slippage=slippage,
cfgfile=cfgfile,
savedir=savedir,
erase_earlier=erase_earlier,
base_learningrate=1.0,
loss_threshold=10000,
decaystep=decaystep)
| [
"[email protected]"
] | |
02aee538c4869755c1fb25b6a0126b3dda67eba6 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/equiLeader_20200827132415.py | bae1c1e0de8c2e0371cd2122dbb9f59a0ecd1480 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | def equi(A):
# return the number of equal leaders that both occur in the sequences
# first find the equileader
# then count them in both sequences
store = {}
candidate = -1
for i in A:
if i in store:
store[i] +=1
else:
store[i] = 1
for i in store:
if store[i] > (len(A) // 2):
candidate = i
countA = 0
countB = 0
for i in range(len(A)):
equi([4,3,4,4,4,2]) | [
"[email protected]"
] | |
e557bcf32fce35a7de6c78be75bb238a6ce9ce11 | 86a904f19f480377ed4b13729023af70d0f7d49c | /bear/__init__.py | d5e1a2dc6e6d84061c8866e6ede2cc30e3613725 | [
"MIT"
] | permissive | toxinu/bear | 830fa26f7fb6eff4e6b7d1630759c274b4c73f4d | f1d36e61c87531162a70b2210def5d061b4a8ff6 | refs/heads/master | 2021-06-01T10:45:15.672376 | 2021-03-26T02:07:31 | 2021-03-26T02:07:31 | 111,761,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | # -*- coding: utf-8 -*-
__version__ = "0.0.1"
from .core import Bear
| [
"[email protected]"
] | |
7e89226ff3da368cb2b0f2ad7926269f3528fd8b | c19ca6779f247572ac46c6f95327af2374135600 | /offer/offer 16 leetcode 50 Pow(x, n).py | 5a5e276821632a9217a1e3ef03892e43c6b6b71f | [] | no_license | clhchtcjj/Algorithm | aae9c90d945030707791d9a98d1312e4c07705f8 | aec68ce90a9fbceaeb855efc2c83c047acbd53b5 | refs/heads/master | 2021-01-25T14:24:08.037204 | 2018-06-11T14:31:38 | 2018-06-11T14:31:38 | 123,695,313 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # -- coding: utf-8 --
__author__ = 'CLH'
# 实现Pow(x, n)
class Solution(object):
def myPow(self, x, n):
"""
:type x: float
:type n: int
:rtype: float
"""
# 可以递归的计算
# 考虑指数为负数的情况
# 当指数为负的情况,考虑0
ans = 1.0
exponent = abs(n)
if n < 0 and x == 0:
raise ZeroDivisionError("float division by zero")
else:
ans *= self.calculatePow(x,exponent)
if n < 0:
return 1.0 / ans
else:
return ans
def calculatePow(self,x,n):
if n == 0:
return 1
elif n == 1:
return x
else:
result = self.calculatePow(x, n>>1)
result *= result
if n & 1 == 1:
result *= x
return result
if __name__ == "__main__":
S = Solution()
print(S.myPow(2.1,3)) | [
"[email protected]"
] | |
070ca4e3aae333ececde78ecfbb3fba935a48243 | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-Security/PyObjCTest/test_oidscert.py | 848ceba55a68b006ccafe4478ed07f76adfebb38 | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,255 | py | from PyObjCTools.TestSupport import *
import Security
class Testoidscert(TestCase):
def test_unsuppported(self):
self.assertFalse(hasattr(Security, "INTEL_X509V3_CERT_R08"))
self.assertFalse(hasattr(Security, "INTEL_X509V3_CERT_R08_LENGTH"))
self.assertFalse(hasattr(Security, "INTEL_X509V3_CERT_PRIVATE_EXTENSIONS"))
self.assertFalse(
hasattr(Security, "INTEL_X509V3_CERT_PRIVATE_EXTENSIONS_LENGTH")
)
self.assertFalse(hasattr(Security, "INTEL_X509V3_SIGN_R08"))
self.assertFalse(hasattr(Security, "INTEL_X509V3_SIGN_R08_LENGTH"))
self.assertFalse(hasattr(Security, "INTEL_X509_C_DATATYPE"))
self.assertFalse(hasattr(Security, "INTEL_X509_LDAPSTRING_DATATYPE"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3SignedCertificate"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3SignedCertificateCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3Certificate"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1Version"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SerialNumber"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1IssuerName"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1IssuerNameStd"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1IssuerNameCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1IssuerNameLDAP"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1ValidityNotBefore"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1ValidityNotAfter"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectName"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectNameStd"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectNameCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectNameLDAP"))
self.assertFalse(hasattr(Security, "CSSMOID_CSSMKeyStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectPublicKeyCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectPublicKeyAlgorithm"))
self.assertFalse(
hasattr(Security, "CSSMOID_X509V1SubjectPublicKeyAlgorithmParameters")
)
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SubjectPublicKey"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1CertificateIssuerUniqueId"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1CertificateSubjectUniqueId"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateExtensionsStruct"))
self.assertFalse(
hasattr(Security, "CSSMOID_X509V3CertificateExtensionsCStruct")
)
self.assertFalse(
hasattr(Security, "CSSMOID_X509V3CertificateNumberOfExtensions")
)
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateExtensionStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateExtensionCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateExtensionId"))
self.assertFalse(
hasattr(Security, "CSSMOID_X509V3CertificateExtensionCritical")
)
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateExtensionType"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V3CertificateExtensionValue"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SignatureStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SignatureCStruct"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SignatureAlgorithm"))
self.assertFalse(hasattr(Security, "CSSMOID_X509V1SignatureAlgorithmTBS"))
self.assertFalse(
hasattr(Security, "CSSMOID_X509V1SignatureAlgorithmParameters")
)
self.assertFalse(hasattr(Security, "CSSMOID_X509V1Signature"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectSignatureBitmap"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectPicture"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectEmailAddress"))
self.assertFalse(hasattr(Security, "CSSMOID_UseExemptions"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectDirectoryAttributes"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectKeyIdentifier"))
self.assertFalse(hasattr(Security, "CSSMOID_KeyUsage"))
self.assertFalse(hasattr(Security, "CSSMOID_PrivateKeyUsagePeriod"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectAltName"))
self.assertFalse(hasattr(Security, "CSSMOID_IssuerAltName"))
self.assertFalse(hasattr(Security, "CSSMOID_BasicConstraints"))
self.assertFalse(hasattr(Security, "CSSMOID_CrlNumber"))
self.assertFalse(hasattr(Security, "CSSMOID_CrlReason"))
self.assertFalse(hasattr(Security, "CSSMOID_HoldInstructionCode"))
self.assertFalse(hasattr(Security, "CSSMOID_InvalidityDate"))
self.assertFalse(hasattr(Security, "CSSMOID_DeltaCrlIndicator"))
self.assertFalse(hasattr(Security, "CSSMOID_IssuingDistributionPoint"))
self.assertFalse(hasattr(Security, "CSSMOID_IssuingDistributionPoints"))
self.assertFalse(hasattr(Security, "CSSMOID_CertIssuer"))
self.assertFalse(hasattr(Security, "CSSMOID_NameConstraints"))
self.assertFalse(hasattr(Security, "CSSMOID_CrlDistributionPoints"))
self.assertFalse(hasattr(Security, "CSSMOID_CertificatePolicies"))
self.assertFalse(hasattr(Security, "CSSMOID_PolicyMappings"))
self.assertFalse(hasattr(Security, "CSSMOID_PolicyConstraints"))
self.assertFalse(hasattr(Security, "CSSMOID_AuthorityKeyIdentifier"))
self.assertFalse(hasattr(Security, "CSSMOID_ExtendedKeyUsage"))
self.assertFalse(hasattr(Security, "CSSMOID_InhibitAnyPolicy"))
self.assertFalse(hasattr(Security, "CSSMOID_AuthorityInfoAccess"))
self.assertFalse(hasattr(Security, "CSSMOID_BiometricInfo"))
self.assertFalse(hasattr(Security, "CSSMOID_QC_Statements"))
self.assertFalse(hasattr(Security, "CSSMOID_SubjectInfoAccess"))
self.assertFalse(hasattr(Security, "CSSMOID_ExtendedKeyUsageAny"))
self.assertFalse(hasattr(Security, "CSSMOID_ServerAuth"))
self.assertFalse(hasattr(Security, "CSSMOID_ClientAuth"))
self.assertFalse(hasattr(Security, "CSSMOID_ExtendedUseCodeSigning"))
self.assertFalse(hasattr(Security, "CSSMOID_EmailProtection"))
self.assertFalse(hasattr(Security, "CSSMOID_TimeStamping"))
self.assertFalse(hasattr(Security, "CSSMOID_OCSPSigning"))
self.assertFalse(hasattr(Security, "CSSMOID_KERBv5_PKINIT_KP_CLIENT_AUTH"))
self.assertFalse(hasattr(Security, "CSSMOID_KERBv5_PKINIT_KP_KDC"))
self.assertFalse(hasattr(Security, "CSSMOID_EKU_IPSec"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_EXTENSION"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_IDENTITY"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_EMAIL_SIGN"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_EMAIL_ENCRYPT"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_DOTMAC_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_ADC_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_MACAPPSTORE_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_MACAPPSTORE_RECEIPT_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLEID_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLEID_SHARING_CERT_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_MOBILE_STORE_SIGNING_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_TEST_MOBILE_STORE_SIGNING_POLICY"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_CODE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_CODE_SIGNING_DEV"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_RESOURCE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_ICHAT_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_ICHAT_ENCRYPTION"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_SYSTEM_IDENTITY"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_PASSBOOK_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_PROFILE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EKU_QA_PROFILE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_CODE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_APPLE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_ADC_DEV_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_ADC_APPLE_SIGNING"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_PASSBOOK_SIGNING"))
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_MACAPPSTORE_RECEIPT")
)
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_INTERMEDIATE_MARKER")
)
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_WWDR_INTERMEDIATE"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_ITMS_INTERMEDIATE"))
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_AAI_INTERMEDIATE"))
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_APPLEID_INTERMEDIATE")
)
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_APPLEID_SHARING"))
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_SYSINT2_INTERMEDIATE")
)
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_DEVELOPER_AUTHENTICATION")
)
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_SERVER_AUTHENTICATION")
)
self.assertFalse(hasattr(Security, "CSSMOID_APPLE_EXTENSION_ESCROW_SERVICE"))
self.assertFalse(
hasattr(Security, "CSSMOID_APPLE_EXTENSION_PROVISIONING_PROFILE_SIGNING")
)
self.assertFalse(hasattr(Security, "CSSMOID_NetscapeCertType"))
self.assertFalse(hasattr(Security, "CSSMOID_NetscapeCertSequence"))
self.assertFalse(hasattr(Security, "CSSMOID_NetscapeSGC"))
self.assertFalse(hasattr(Security, "CSSMOID_MicrosoftSGC"))
self.assertFalse(hasattr(Security, "CE_NCT_SSL_Client"))
self.assertFalse(hasattr(Security, "CE_NCT_SSL_Server"))
self.assertFalse(hasattr(Security, "CE_NCT_SMIME"))
self.assertFalse(hasattr(Security, "CE_NCT_ObjSign"))
self.assertFalse(hasattr(Security, "CE_NCT_Reserved"))
self.assertFalse(hasattr(Security, "CE_NCT_SSL_CA"))
self.assertFalse(hasattr(Security, "CE_NCT_SMIME_CA"))
self.assertFalse(hasattr(Security, "CE_NCT_ObjSignCA"))
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
bb77ba5829b46af2e085ab307b7fb5a4937e8fd4 | d7e4d46db1cfda7fb417ba4d185be0639d2d1280 | /lib/analyze_results.py | 0728baa0a3be9b858c3eba34b55c7673ec366a63 | [] | no_license | enewe101/relational-nouns-LREC-2018 | 4f830c7dc129ce988bef486b3e393228bdee4cd5 | d6d1689b9107401c12cb74e3a68dd75cda45266d | refs/heads/master | 2021-09-14T07:45:13.386635 | 2018-05-10T04:14:47 | 2018-05-10T04:14:47 | 105,477,180 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | from collections import Default
import json
import sys
sys.path.join('..')
from SETTINGS import DATA_DIR
RESULTS_PATH = os.path.join(
DATA_DIR, 'crowdflower', 'results-binary-comprehensive.json')
def read_raw_results(results_path=RESULTS_PATH):
d = [json.loads(l) for l in open(results_path)]
def results_by_contributor():
raw_results = read_raw_results()
contributor_results =
for result in raw_results:
for result in raw_results:
for judgment in result['results']['judgments']:
user = judgment['worker_id']
contributor_results[user].append()
| [
"[email protected]"
] | |
c38a22db05427b0493e281f998d27db898e6738c | d771e2173ec0b84f28a4bec80dd4dedaf6c48021 | /rest/app.py | 34b7c9838c27e868624f819a2e245659df14e1eb | [
"Apache-2.0"
] | permissive | markmcdowall/mg-rest-auth-test | 8675abdb63b314aae3e3cee1124354a9d3713120 | 1ce3027480c9846187f0a22afcdbdbab6d3ef2eb | refs/heads/master | 2021-01-20T01:17:59.616252 | 2017-10-26T16:04:02 | 2017-10-26T16:04:02 | 101,283,488 | 0 | 0 | null | 2017-10-26T16:04:03 | 2017-08-24T10:32:08 | Python | UTF-8 | Python | false | false | 1,510 | py | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from flask import Flask
from flask_restful import Api, Resource
from rest.mg_auth import authorized
APP = Flask(__name__)
class TokenCheck(Resource):
"""
Class to handle checking if the token returns a valid user name
"""
@authorized
def get(self, user_id):
"""
Test to see if it is possible to get the user_id
"""
msg = "Congratulations, welcome to the MuG VRE"
if user_id is None:
msg = "Are you sure that you have a valid token?"
return {
'user_id': user_id,
}
# Define the URIs and their matching methods
REST_API = Api(APP)
# Token Checker
REST_API.add_resource(TokenCheck, "/mug/api/check", endpoint='token-check')
# Initialise the server
if __name__ == "__main__":
APP.run(port=5000, debug=True, use_reloader=False)
| [
"[email protected]"
] | |
5185ae361d901346d73a7cb998e7b6d406662ddc | c33496682b760deac61fedecba3e82ce4e41dfde | /scripts/e240.py | 0d4ee2ac36fae167814fac3924acaa0790845cd8 | [
"MIT"
] | permissive | ferasalsaab/neuralnilm_prototype | c5e9cde02d475ac499b15fea62143e76adff07d0 | 2119292e7d5c8a137797ad3c9abf9f37e7f749af | refs/heads/master | 2020-04-16T14:38:03.615279 | 2018-01-29T15:30:43 | 2018-01-29T15:30:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,994 | py | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from lasagne.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
from copy import deepcopy
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 1000
GRADIENT_STEPS = 100
"""
e233
based on e131c but with:
* lag=32
* pool
e234
* init final layer and conv layer
235
no lag
236
should be exactly as 131c: no pool, no lag, no init for final and conv layer
237
putting the pool back
238
seems pooling hurts us! disable pooling.
enable lag = 32
239
BLSTM
lag = 20
240
LSTM not BLSTM
various lags
ideas for next TODO:
* 3 LSTM layers with smaller conv between them
* why does pooling hurt us?
"""
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=10,
subsample_target=5,
include_diff=False,
clip_appliance_power=True,
lag=0
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 40,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 20,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
# 'W': Uniform(1)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
# {
# 'type': FeaturePoolLayer,
# 'ds': 5, # number of feature maps to be pooled together
# 'axis': 1 # pool over the time axis
# },
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
}
]
)
def exp_a(name):
# like 239 but LSTM not BLSTM and no lag and clip appliance power
# RESULTS: aweful
source = RealApplianceSource(**source_dict)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_b(name):
# as A but lag = 10
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 10
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_c(name):
# as A but lag = 20
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 20
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_d(name):
# as A but lag = 40
# possibly the best of this e240 lot
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 40
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def exp_e(name):
# as A but lag = 80
source_dict_copy = deepcopy(source_dict)
source_dict_copy['lag'] = 80
source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(experiment_name=name, source=source))
net_dict_copy['layers_config'].append(
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
)
net = Net(**net_dict_copy)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcde'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=10000)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
be5d17d61c2ffb7c47f6328b101ff4469f32c018 | 661ee30b27b2893930d4a8db1db0c08538653dc5 | /standalone_django_project/settings.py | 2f4519f5c0963bc3708692a4867f3e026d8bddb8 | [
"BSD-3-Clause"
] | permissive | 350dotorg/aktivator | fc67aed167fb204ff327448a86c37d69ef566964 | bb37cc50212a1797315c99037495a83bc9ff2b01 | refs/heads/master | 2016-09-09T21:51:23.371940 | 2014-07-11T13:33:19 | 2014-07-11T13:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,121 | py | import os
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
ROOT_URLCONF = 'standalone_django_project.urls'
WSGI_APPLICATION = 'standalone_django_project.wsgi.application'
SITE_ID = 1
SITE_NAME = os.environ.get("SITE_NAME")
SITE_DOMAIN = os.environ['SITE_DOMAIN']
HEROKU_DOMAIN = os.environ.get('HEROKU_DOMAIN')
import actionkit_usersearch
GEONAMES_API_USERNAME = actionkit_usersearch.SETTINGS['GEONAMES_API_USERNAME']
ALLOWED_HOSTS = [SITE_DOMAIN]
if HEROKU_DOMAIN:
ALLOWED_HOSTS.append(HEROKU_DOMAIN)
if os.environ.get('DJANGO_DEBUG'):
DEBUG = True
else:
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ACTIONKIT_DATABASE_NAME = os.environ['ACTIONKIT_DATABASE_NAME']
ACTIONKIT_DATABASE_USER = os.environ['ACTIONKIT_DATABASE_USER']
ACTIONKIT_DATABASE_PASSWORD = os.environ['ACTIONKIT_DATABASE_PASSWORD']
import dj_database_url
DATABASES = {
'default': dj_database_url.config(),
'ak': {
'ENGINE': "django.db.backends.mysql",
'NAME': ACTIONKIT_DATABASE_NAME,
'USER': ACTIONKIT_DATABASE_USER,
'PASSWORD': ACTIONKIT_DATABASE_PASSWORD,
'HOST': "client-db.actionkit.com",
'PORT': "",
}
}
DATABASES['dummy'] = actionkit_usersearch.DATABASES['dummy']
SECRET_KEY = os.environ["DJANGO_SECRET"]
ACTIONKIT_API_HOST = os.environ['ACTIONKIT_API_HOST']
ACTIONKIT_API_USER = os.environ['ACTIONKIT_API_USER']
ACTIONKIT_API_PASSWORD = os.environ['ACTIONKIT_API_PASSWORD']
TEMPLATE_LOADERS = (
'dbtemplates.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'gunicorn',
'south',
'django.contrib.flatpages',
'dbtemplates',
'djangohelpers',
'standalone_django_project', # For the template finder
'actionkit',
'actionkit_usersearch',
'actionkit_userdetail',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.request",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"standalone_django_project.context_processors.globals",
)
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
]
OAUTH_REDIRECT_URI_ENFORCE_PREFIX_ONLY = True
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.transaction.TransactionMiddleware',
"djangohelpers.middleware.AuthRequirementMiddleware",
)
ANONYMOUS_PATHS = (
"/static/",
"/admin/",
"/accounts/",
)
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
if os.environ.get('DJANGO_DEBUG_TOOLBAR'):
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
INSTALLED_APPS += (
'debug_toolbar',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
INTERNAL_IPS = os.environ.get("INTERNAL_IPS")
if INTERNAL_IPS is None:
INTERNAL_IPS = []
elif INTERNAL_IPS.strip() in ("*", "0.0.0.0"):
class AllIPS(list):
def __contains__(self, item):
return True
INTERNAL_IPS = AllIPS()
else:
INTERNAL_IPS = [i.strip() for i in INTERNAL_IPS.split()]
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'collected_static')
| [
"[email protected]"
] | |
a2b165fab8d6e4f886c3ec1ffd2c2aa7e4488b98 | 56cce3fee2e3d69d60958eb2aacc4f65fc3d2230 | /tests/test_directed_graph.py | c8ba0e69667c52f75f1566337eabb47ba0a6a063 | [
"BSD-3-Clause"
] | permissive | nokia/PyBGL | 52c2f175d1dbccb15519f8a16de141845d0abaf3 | 707f2df32ede7d9a992ea217a4791da34f13e138 | refs/heads/master | 2023-08-08T04:46:24.931627 | 2023-08-03T16:31:35 | 2023-08-03T16:31:35 | 148,536,169 | 12 | 3 | BSD-3-Clause | 2023-08-03T16:31:36 | 2018-09-12T20:11:36 | Python | UTF-8 | Python | false | false | 2,434 | py | #!/usr/bin/env pytest-3
# -*- coding: utf-8 -*-
from pybgl.graph import *
(u, v, w) = (0, 1, 2)
def make_g1() -> DirectedGraph:
g1 = DirectedGraph()
add_vertex(g1) # u
add_vertex(g1) # v
add_vertex(g1) # w
return g1
def make_g2() -> DirectedGraph:
g2 = make_g1()
add_edge(u, v, g2)
add_edge(u, v, g2) # parallel edge
add_edge(u, w, g2)
add_edge(v, w, g2)
add_edge(w, w, g2)
return g2
def test_directed_graph_num_vertices():
g1 = make_g1()
assert num_vertices(g1) == 3
def test_directed_graph_node_add_edge():
# Make graph
g = make_g1()
assert out_degree(u, g) == 0
assert num_edges(g) == 0
# Add e1
(e1, added1) = add_edge(u, v, g)
assert added1
(e, found) = edge(u, v, g)
assert found
assert e == e1
assert out_degree(u, g) == 1
assert num_edges(g) == 1
# No arc
(e, found) = edge(u, w, g)
assert not found
assert {e for e in out_edges(u, g)} == {e1}
# Add e2
(e2, added2) = add_edge(u, w, g)
assert added2
assert {e for e in out_edges(u, g)} == {e1, e2}
assert out_degree(u, g) == 2
assert num_edges(g) == 2
def test_directed_graph_add_vertex():
g = make_g2()
assert num_vertices(g) == 3
assert num_edges(g) == 5
# Add vertex x
x = add_vertex(g)
assert num_vertices(g) == 4
# Add edge (v -> x)
(e1, found) = edge(v, w, g)
assert found
(e2, added) = add_edge(v, x, g)
assert num_edges(g) == 6
assert {e for e in out_edges(v, g)} == {e1, e2}
def test_directed_graph_remove_edge():
g = make_g2()
assert num_edges(g) == 5
(e, found) = edge(v, w, g)
remove_edge(e, g)
assert num_edges(g) == 4
(e, found) = edge(w, w, g)
remove_edge(e, g)
assert num_edges(g) == 3
def test_directed_graph_iterators():
g = make_g2()
m = 0
for _ in vertices(g):
m += 1
assert m == num_vertices(g)
assert m == 3
n = 0
for _ in edges(g):
n += 1
assert n == num_edges(g)
assert n == 5
def test_directed_graph_remove_vertex():
g = make_g2()
assert num_vertices(g) == 3
assert num_edges(g) == 5
remove_vertex(v, g)
assert num_vertices(g) == 2
assert num_edges(g) == 2
remove_vertex(w, g)
assert num_vertices(g) == 1
assert num_edges(g) == 0
remove_vertex(u, g)
assert num_vertices(g) == 0
assert num_edges(g) == 0
| [
"[email protected]"
] | |
aed61c0eda1210a477c70bfb73244ce07ed5e7a4 | 2c5bd933813b173aa69d9a829f530f7520509ba3 | /4.python_接口自动化_excel中写用例/tools/HTMLTestRunner_cn.py | 171e17747cd9d33e799a4b41bc5f3e831e6a7f4a | [] | no_license | ferry-luo/AutoTest | b4193d48cc7908edebf583d8700aa4c14e6c8823 | e4212743704d50b2834fd0e28f2cf1e69d6dabef | refs/heads/main | 2022-12-27T00:21:26.506484 | 2020-10-18T01:02:01 | 2020-10-18T01:02:01 | 304,996,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 89,913 | py | #-*- coding: utf-8 -*-
"""
A TestRunner for use with the Python unit testing framework. It
generates a HTML report to show the result at a glance.
The simplest way to use this is to invoke its main method. E.g.
import unittest
import HTMLTestRunner
... define your tests ...
if __name__ == '__main__':
HTMLTestRunner.main()
For more customization options, instantiates a HTMLTestRunner object.
HTMLTestRunner is a counterpart to unittest's TextTestRunner. E.g.
# output to a file
fp = file('my_report.html', 'wb')
runner = HTMLTestRunner.HTMLTestRunner(
stream=fp,
title='My unit test',
description='This demonstrates the report output by HTMLTestRunner.'
)
# Use an external stylesheet.
# See the Template_mixin class for more customizable options
runner.STYLESHEET_TMPL = '<link rel="stylesheet" href="my_stylesheet.css" type="text/css">'
# run the test
runner.run(my_test_suite)
------------------------------------------------------------------------
Copyright (c) 2004-2007, Wai Yip Tung
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name Wai Yip Tung nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# URL: http://tungwaiyip.info/software/HTMLTestRunner.html
__author__ = "Wai Yip Tung"
__version__ = "0.8.3"
"""
Change History
Version 0.8.4 by GoverSky
* Add sopport for 3.x
* Add piechart for resultpiechart
* Add Screenshot for selenium_case test
* Add Retry on failed
Version 0.8.3
* Prevent crash on class or module-level exceptions (Darren Wurf).
Version 0.8.2
* Show output inline instead of popup window (Viorel Lupu).
Version in 0.8.1
* Validated XHTML (Wolfgang Borgert).
* Added description of test classes and test cases.
Version in 0.8.0
* Define Template_mixin class for customization.
* Workaround a IE 6 bug that it does not treat <script> block as CDATA.
Version in 0.7.1
* Back port to Python 2.3 (Frank Horowitz).
* Fix missing scroll bars in detail log (Podi).
"""
# TODO: color stderr
# TODO: simplify javascript using ,ore than 1 class in the class attribute?
import datetime
import sys
import unittest
import copy
import threading
from xml.sax import saxutils
from functools import cmp_to_key
PY3K = (sys.version_info[0] > 2)
if PY3K:
import io as StringIO
else:
import StringIO
# ------------------------------------------------------------------------
# The redirectors below are used to capture output during testing. Output
# sent to sys.stdout and sys.stderr are automatically captured. However
# in some cases sys.stdout is already cached before HTMLTestRunner is
# invoked (e.g. calling logging_demo.basicConfig). In order to capture those
# output, use the redirectors for the cached stream.
#
# e.g.
# >>> logging_demo.basicConfig(stream=HTMLTestRunner.stdout_redirector)
# >>>
class OutputRedirector(object):
""" Wrapper to redirect stdout or stderr """
def __init__(self, fp):
self.fp = fp
def write(self, s):
self.fp.write(s)
def writelines(self, lines):
self.fp.writelines(lines)
def flush(self):
self.fp.flush()
stdout_redirector = OutputRedirector(sys.stdout)
stderr_redirector = OutputRedirector(sys.stderr)
# ----------------------------------------------------------------------
# Template
class Template_mixin(object):
"""
Define a HTML template for report customerization and generation.
Overall structure of an HTML report
HTML
+------------------------+
|<html> |
| <head> |
| |
| STYLESHEET |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </head> |
| |
| <body> |
| |
| HEADING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| REPORT |
| +----------------+ |
| | | |
| +----------------+ |
| |
| ENDING |
| +----------------+ |
| | | |
| +----------------+ |
| |
| </body> |
|</html> |
+------------------------+
"""
STATUS = {
0: u'通过',
1: u'失败',
2: u'错误',
3:u'跳过',
}
DEFAULT_TITLE = 'Unit Test Report'
DEFAULT_DESCRIPTION = ''
# ------------------------------------------------------------------------
# HTML Template
HTML_TMPL = r"""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>%(title)s</title>
<meta name="generator" content="%(generator)s"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
%(stylesheet)s
</head>
<body>
<script language="javascript" type="text/javascript">
output_list = Array();
/* level - 0:Summary; 1:Passed; 2:Failed; 3:Errored; 4:Skiped; 5:All */
function showCase(level,channel) {
trs = document.getElementsByTagName("tr");
for (var i = 0; i < trs.length; i++) {
tr = trs[i];
id = tr.id;
if (["ft","pt","et","st"].indexOf(id.substr(0,2))!=-1){
if ( level ==0 && id.substr(2,1)==channel ) {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'pt'+channel) {
if ( level==1){
tr.className = '';
}
else if (level>3 && id.substr(2,1)==channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'ft'+channel) {
if (level ==2) {
tr.className = '';
}
else if (level>3 && id.substr(2,1)==channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'et'+channel) {
if (level ==3) {
tr.className = '';
}
else if (level>3 && id.substr(2,1)==channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
if (id.substr(0,3) == 'st'+channel) {
if (level ==3) {
tr.className = '';
}
else if (level>3 && id.substr(2,1)==channel ){
tr.className = '';
}
else {
tr.className = 'hiddenRow';
}
}
}
}
function showClassDetail(cid, count) {
var id_list = Array(count);
var toHide = 1;
for (var i = 0; i < count; i++) {
tid0 = 't' + cid.substr(1) + '.' + (i+1);
tid = 'f' + tid0;
tr = document.getElementById(tid);
if (!tr) {
tid = 'p' + tid0;
tr = document.getElementById(tid);
}
if (!tr) {
tid = 'e' + tid0;
tr = document.getElementById(tid);
}
if (!tr) {
tid = 's' + tid0;
tr = document.getElementById(tid);
}
id_list[i] = tid;
if (tr.className) {
toHide = 0;
}
}
for (var i = 0; i < count; i++) {
tid = id_list[i];
if (toHide) {
document.getElementById(tid).className = 'hiddenRow';
}
else {
document.getElementById(tid).className = '';
}
}
}
function showTestDetail(div_id){
var details_div = document.getElementById(div_id)
var displayState = details_div.style.display
// alert(displayState)
if (displayState != 'block' ) {
displayState = 'block'
details_div.style.display = 'block'
}
else {
details_div.style.display = 'none'
}
}
function html_escape(s) {
s = s.replace(/&/g,'&');
s = s.replace(/</g,'<');
s = s.replace(/>/g,'>');
return s;
}
function drawCircle(circle,pass, fail, error){
var color = ["#6c6","#c60","#c00"];
var data = [pass,fail,error];
var text_arr = ["Pass", "Fail", "Error"];
var canvas = document.getElementById(circle);
var ctx = canvas.getContext("2d");
var startPoint=0;
var width = 20, height = 10;
var posX = 112 * 2 + 20, posY = 30;
var textX = posX + width + 5, textY = posY + 10;
for(var i=0;i<data.length;i++){
ctx.fillStyle = color[i];
ctx.beginPath();
ctx.moveTo(112,84);
ctx.arc(112,84,84,startPoint,startPoint+Math.PI*2*(data[i]/(data[0]+data[1]+data[2])),false);
ctx.fill();
startPoint += Math.PI*2*(data[i]/(data[0]+data[1]+data[2]));
ctx.fillStyle = color[i];
ctx.fillRect(posX, posY + 20 * i, width, height);
ctx.moveTo(posX, posY + 20 * i);
ctx.font = 'bold 14px';
ctx.fillStyle = color[i];
var percent = text_arr[i] + ":"+data[i];
ctx.fillText(percent, textX, textY + 20 * i);
}
}
function show_img(obj) {
var obj1 = obj.nextElementSibling
obj1.style.display='block'
var index = 0;//每张图片的下标,
var len = obj1.getElementsByTagName('img').length;
var imgyuan = obj1.getElementsByClassName('imgyuan')[0]
//var start=setInterval(autoPlay,500);
obj1.onmouseover=function(){//当鼠标光标停在图片上,则停止轮播
clearInterval(start);
}
obj1.onmouseout=function(){//当鼠标光标停在图片上,则开始轮播
start=setInterval(autoPlay,1000);
}
for (var i = 0; i < len; i++) {
var font = document.createElement('font')
imgyuan.appendChild(font)
}
var lis = obj1.getElementsByTagName('font');//得到所有圆圈
changeImg(0)
var funny = function (i) {
lis[i].onmouseover = function () {
index=i
changeImg(i)
}
}
for (var i = 0; i < lis.length; i++) {
funny(i);
}
function autoPlay(){
if(index>len-1){
index=0;
clearInterval(start); //运行一轮后停止
}
changeImg(index++);
}
imgyuan.style.width= 25*len +"px";
//对应圆圈和图片同步
function changeImg(index) {
var list = obj1.getElementsByTagName('img');
var list1 = obj1.getElementsByTagName('font');
for (i = 0; i < list.length; i++) {
list[i].style.display = 'none';
list1[i].style.backgroundColor = 'white';
}
list[index].style.display = 'block';
list1[index].style.backgroundColor = 'blue';
}
}
function hide_img(obj){
obj.parentElement.style.display = "none";
obj.parentElement.getElementsByClassName('imgyuan')[0].innerHTML = "";
}
</script>
%(heading)s
<div class="piechart">
<div>
<canvas id="circle%(channel)s" width="350" height="168" </canvas>
</div>
</div>
%(report)s
%(ending)s
</body>
</html>
"""
# variables: (title, generator, stylesheet, heading, report, ending)
# ------------------------------------------------------------------------
# Stylesheet
#
# alternatively use a <link> for external style sheet, e.g.
# <link rel="stylesheet" href="$url" type="text/css">
STYLESHEET_TMPL = """
<style type="text/css" media="screen">
body { font-family: verdana, arial, helvetica, sans-serif; font-size: 80%; }
table { font-size: 100%; }
pre {
white-space: pre-wrap;
word-wrap: break-word;
}
/* -- heading ---------------------------------------------------------------------- */
h1 {
font-size: 16pt;
color: gray;
}
.heading {
float:left;
margin-top: 0ex;
margin-bottom: 1ex;
}
.heading .attribute {
margin-top: 1ex;
margin-bottom: 0;
}
.heading .description {
margin-top: 4ex;
margin-bottom: 6ex;
}
/* -- css div popup ------------------------------------------------------------------------ */
a.popup_link {
}
a.popup_link:hover {
color: red;
}
.img{
height: 100%;
border-collapse: collapse;
border: 2px solid #777;
}
.screenshots {
z-index: 100;
position:fixed;
height: 80%;
left: 50%;
top: 50%;
transform: translate(-50%,-50%);
display: none;
}
.imgyuan{
height: 20px;
border-radius: 12px;
background-color: red;
padding-left: 13px;
margin: 0 auto;
position: relative;
top: -40px;
background-color: rgba(1, 150, 0, 0.3);
}
.imgyuan font{
border:1px solid white;
width:11px;
height:11px;
border-radius:50%;
margin-right: 9px;
margin-top: 4px;
display: block;
float: left;
background-color: white;
}
.close_shots {
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJYAAACWCAYAAAA8AXHiAAAACXBIWXMAAAsTAAALEwEAmpwYAAAKT2lDQ1BQaG90b3Nob3AgSUNDIHByb2ZpbGUAAHjanVNnVFPpFj333vRCS4iAlEtvUhUIIFJCi4AUkSYqIQkQSoghodkVUcERRUUEG8igiAOOjoCMFVEsDIoK2AfkIaKOg6OIisr74Xuja9a89+bN/rXXPues852zzwfACAyWSDNRNYAMqUIeEeCDx8TG4eQuQIEKJHAAEAizZCFz/SMBAPh+PDwrIsAHvgABeNMLCADATZvAMByH/w/qQplcAYCEAcB0kThLCIAUAEB6jkKmAEBGAYCdmCZTAKAEAGDLY2LjAFAtAGAnf+bTAICd+Jl7AQBblCEVAaCRACATZYhEAGg7AKzPVopFAFgwABRmS8Q5ANgtADBJV2ZIALC3AMDOEAuyAAgMADBRiIUpAAR7AGDIIyN4AISZABRG8lc88SuuEOcqAAB4mbI8uSQ5RYFbCC1xB1dXLh4ozkkXKxQ2YQJhmkAuwnmZGTKBNA/g88wAAKCRFRHgg/P9eM4Ors7ONo62Dl8t6r8G/yJiYuP+5c+rcEAAAOF0ftH+LC+zGoA7BoBt/qIl7gRoXgugdfeLZrIPQLUAoOnaV/Nw+H48PEWhkLnZ2eXk5NhKxEJbYcpXff5nwl/AV/1s+X48/Pf14L7iJIEyXYFHBPjgwsz0TKUcz5IJhGLc5o9H/LcL//wd0yLESWK5WCoU41EScY5EmozzMqUiiUKSKcUl0v9k4t8s+wM+3zUAsGo+AXuRLahdYwP2SycQWHTA4vcAAPK7b8HUKAgDgGiD4c93/+8//UegJQCAZkmScQAAXkQkLlTKsz/HCAAARKCBKrBBG/TBGCzABhzBBdzBC/xgNoRCJMTCQhBCCmSAHHJgKayCQiiGzbAdKmAv1EAdNMBRaIaTcA4uwlW4Dj1wD/phCJ7BKLyBCQRByAgTYSHaiAFiilgjjggXmYX4IcFIBBKLJCDJiBRRIkuRNUgxUopUIFVIHfI9cgI5h1xGupE7yAAygvyGvEcxlIGyUT3UDLVDuag3GoRGogvQZHQxmo8WoJvQcrQaPYw2oefQq2gP2o8+Q8cwwOgYBzPEbDAuxsNCsTgsCZNjy7EirAyrxhqwVqwDu4n1Y8+xdwQSgUXACTYEd0IgYR5BSFhMWE7YSKggHCQ0EdoJNwkDhFHCJyKTqEu0JroR+cQYYjIxh1hILCPWEo8TLxB7iEPENyQSiUMyJ7mQAkmxpFTSEtJG0m5SI+ksqZs0SBojk8naZGuyBzmULCAryIXkneTD5DPkG+Qh8lsKnWJAcaT4U+IoUspqShnlEOU05QZlmDJBVaOaUt2ooVQRNY9aQq2htlKvUYeoEzR1mjnNgxZJS6WtopXTGmgXaPdpr+h0uhHdlR5Ol9BX0svpR+iX6AP0dwwNhhWDx4hnKBmbGAcYZxl3GK+YTKYZ04sZx1QwNzHrmOeZD5lvVVgqtip8FZHKCpVKlSaVGyovVKmqpqreqgtV81XLVI+pXlN9rkZVM1PjqQnUlqtVqp1Q61MbU2epO6iHqmeob1Q/pH5Z/YkGWcNMw09DpFGgsV/jvMYgC2MZs3gsIWsNq4Z1gTXEJrHN2Xx2KruY/R27iz2qqaE5QzNKM1ezUvOUZj8H45hx+Jx0TgnnKKeX836K3hTvKeIpG6Y0TLkxZVxrqpaXllirSKtRq0frvTau7aedpr1Fu1n7gQ5Bx0onXCdHZ4/OBZ3nU9lT3acKpxZNPTr1ri6qa6UbobtEd79up+6Ynr5egJ5Mb6feeb3n+hx9L/1U/W36p/VHDFgGswwkBtsMzhg8xTVxbzwdL8fb8VFDXcNAQ6VhlWGX4YSRudE8o9VGjUYPjGnGXOMk423GbcajJgYmISZLTepN7ppSTbmmKaY7TDtMx83MzaLN1pk1mz0x1zLnm+eb15vft2BaeFostqi2uGVJsuRaplnutrxuhVo5WaVYVVpds0atna0l1rutu6cRp7lOk06rntZnw7Dxtsm2qbcZsOXYBtuutm22fWFnYhdnt8Wuw+6TvZN9un2N/T0HDYfZDqsdWh1+c7RyFDpWOt6azpzuP33F9JbpL2dYzxDP2DPjthPLKcRpnVOb00dnF2e5c4PziIuJS4LLLpc+Lpsbxt3IveRKdPVxXeF60vWdm7Obwu2o26/uNu5p7ofcn8w0nymeWTNz0MPIQ+BR5dE/C5+VMGvfrH5PQ0+BZ7XnIy9jL5FXrdewt6V3qvdh7xc+9j5yn+M+4zw33jLeWV/MN8C3yLfLT8Nvnl+F30N/I/9k/3r/0QCngCUBZwOJgUGBWwL7+Hp8Ib+OPzrbZfay2e1BjKC5QRVBj4KtguXBrSFoyOyQrSH355jOkc5pDoVQfujW0Adh5mGLw34MJ4WHhVeGP45wiFga0TGXNXfR3ENz30T6RJZE3ptnMU85ry1KNSo+qi5qPNo3ujS6P8YuZlnM1VidWElsSxw5LiquNm5svt/87fOH4p3iC+N7F5gvyF1weaHOwvSFpxapLhIsOpZATIhOOJTwQRAqqBaMJfITdyWOCnnCHcJnIi/RNtGI2ENcKh5O8kgqTXqS7JG8NXkkxTOlLOW5hCepkLxMDUzdmzqeFpp2IG0yPTq9MYOSkZBxQqohTZO2Z+pn5mZ2y6xlhbL+xW6Lty8elQfJa7OQrAVZLQq2QqboVFoo1yoHsmdlV2a/zYnKOZarnivN7cyzytuQN5zvn//tEsIS4ZK2pYZLVy0dWOa9rGo5sjxxedsK4xUFK4ZWBqw8uIq2Km3VT6vtV5eufr0mek1rgV7ByoLBtQFr6wtVCuWFfevc1+1dT1gvWd+1YfqGnRs+FYmKrhTbF5cVf9go3HjlG4dvyr+Z3JS0qavEuWTPZtJm6ebeLZ5bDpaql+aXDm4N2dq0Dd9WtO319kXbL5fNKNu7g7ZDuaO/PLi8ZafJzs07P1SkVPRU+lQ27tLdtWHX+G7R7ht7vPY07NXbW7z3/T7JvttVAVVN1WbVZftJ+7P3P66Jqun4lvttXa1ObXHtxwPSA/0HIw6217nU1R3SPVRSj9Yr60cOxx++/p3vdy0NNg1VjZzG4iNwRHnk6fcJ3/ceDTradox7rOEH0x92HWcdL2pCmvKaRptTmvtbYlu6T8w+0dbq3nr8R9sfD5w0PFl5SvNUyWna6YLTk2fyz4ydlZ19fi753GDborZ752PO32oPb++6EHTh0kX/i+c7vDvOXPK4dPKy2+UTV7hXmq86X23qdOo8/pPTT8e7nLuarrlca7nuer21e2b36RueN87d9L158Rb/1tWeOT3dvfN6b/fF9/XfFt1+cif9zsu72Xcn7q28T7xf9EDtQdlD3YfVP1v+3Njv3H9qwHeg89HcR/cGhYPP/pH1jw9DBY+Zj8uGDYbrnjg+OTniP3L96fynQ89kzyaeF/6i/suuFxYvfvjV69fO0ZjRoZfyl5O/bXyl/erA6xmv28bCxh6+yXgzMV70VvvtwXfcdx3vo98PT+R8IH8o/2j5sfVT0Kf7kxmTk/8EA5jz/GMzLdsAAD+3aVRYdFhNTDpjb20uYWRvYmUueG1wAAAAAAA8P3hwYWNrZXQgYmVnaW49Iu+7vyIgaWQ9Ilc1TTBNcENlaGlIenJlU3pOVGN6a2M5ZCI/Pgo8eDp4bXBtZXRhIHhtbG5zOng9ImFkb2JlOm5zOm1ldGEvIiB4OnhtcHRrPSJBZG9iZSBYTVAgQ29yZSA1LjYtYzA2NyA3OS4xNTc3NDcsIDIwMTUvMDMvMzAtMjM6NDA6NDIgICAgICAgICI+CiAgIDxyZGY6UkRGIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyI+CiAgICAgIDxyZGY6RGVzY3JpcHRpb24gcmRmOmFib3V0PSIiCiAgICAgICAgICAgIHhtbG5zOnhtcE1NPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvbW0vIgogICAgICAgICAgICB4bWxuczpzdEV2dD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlRXZlbnQjIgogICAgICAgICAgICB4bWxuczpzdFJlZj0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL3NUeXBlL1Jlc291cmNlUmVmIyIKICAgICAgICAgICAgeG1sbnM6ZGM9Imh0dHA6Ly9wdXJsLm9yZy9kYy9lbGVtZW50cy8xLjEvIgogICAgICAgICAgICB4bWxuczpwaG90b3Nob3A9Imh0dHA6Ly9ucy5hZG9iZS5jb20vcGhvdG9zaG9wLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOnhtcD0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wLyIKICAgICAgICAgICAgeG1sbnM6dGlmZj0iaHR0cDovL25zLmFkb2JlLmNvbS90aWZmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOmV4aWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vZXhpZi8xLjAvIj4KICAgICAgICAgPHhtcE1NOkRvY3VtZW50SUQ+YWRvYmU6ZG9jaWQ6cGhvdG9zaG9wOjk4NDVkYzlhLTM2NTEtMTFlOC1hMDRjLWMzZmRjNzFmNjFkZDwveG1wTU06RG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOkluc3RhbmNlSUQ+eG1wLmlpZDo3YzQ4OTMyZS0wM2FjLTIxNDctYTJiZi1iNmViOWU4ZDY2Y2Q8L3htcE1NOkluc3RhbmNlSUQ+CiAgICAgICAgIDx4bXBNTTpPcmlnaW5hbERvY3VtZW50SUQ+MEIzOTNDRjk1RDQ0RDlGMDNFQjEzQkZEQ0UxRDA5MjM8L3htcE1NOk9yaWdpbmFsRG9jdW1lbnRJRD4KICAgICAgICAgPHhtcE1NOkhpc3Rvcnk+CiAgICAgICAgICAgIDxyZGY6U2VxPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5zYXZlZDwvc3RFdnQ6YWN0aW9uPgogICAgICAgICAgICAgICAgICA8c3RFdnQ6aW5zdGFuY2VJRD54bXAuaWlkOmQ0ZjMzNDFjLTRkYjctZjc0YS1iZTAxLWYxMGEwMzNhNjg4ZDwvc3RFdnQ6aW5zdGFuY2VJRD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OndoZW4+MjAxOC0wNC0wMlQxNjo0MToxMCswODowMDwvc3RFdnQ6d2hlbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OnNvZnR3YXJlQWdlbnQ+QWRvYmUgUGhvdG9zaG9wIEVsZW1lbnRzIDE0LjAgKFdpbmRvd3MpPC9zdEV2dDpzb2Z0d2FyZUFnZW50PgogICAgICAgICAgICAgICAgICA8c3RFdnQ6Y2hhbmdlZD4vPC9zdEV2dDpjaGFuZ2VkPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+Y29udmVydGVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpwYXJhbWV0ZXJzPmZyb20gaW1hZ2UvanBlZyB0byBpbWFnZS9wbmc8L3N0RXZ0OnBhcmFtZXRlcnM+CiAgICAgICAgICAgICAgIDwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpIHJkZjpwYXJzZVR5cGU9IlJlc291cmNlIj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmFjdGlvbj5kZXJpdmVkPC9zdEV2dDphY3Rpb24+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpwYXJhbWV0ZXJzPmNvbnZlcnRlZCBmcm9tIGltYWdlL2pwZWcgdG8gaW1hZ2UvcG5nPC9zdEV2dDpwYXJhbWV0ZXJzPgogICAgICAgICAgICAgICA8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaSByZGY6cGFyc2VUeXBlPSJSZXNvdXJjZSI+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDphY3Rpb24+c2F2ZWQ8L3N0RXZ0OmFjdGlvbj4KICAgICAgICAgICAgICAgICAgPHN0RXZ0Omluc3RhbmNlSUQ+eG1wLmlpZDo3YzQ4OTMyZS0wM2FjLTIxNDctYTJiZi1iNmViOWU4ZDY2Y2Q8L3N0RXZ0Omluc3RhbmNlSUQ+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDp3aGVuPjIwMTgtMDQtMDJUMTY6NDE6MTArMDg6MDA8L3N0RXZ0OndoZW4+CiAgICAgICAgICAgICAgICAgIDxzdEV2dDpzb2Z0d2FyZUFnZW50PkFkb2JlIFBob3Rvc2hvcCBFbGVtZW50cyAxNC4wIChXaW5kb3dzKTwvc3RFdnQ6c29mdHdhcmVBZ2VudD4KICAgICAgICAgICAgICAgICAgPHN0RXZ0OmNoYW5nZWQ+Lzwvc3RFdnQ6Y2hhbmdlZD4KICAgICAgICAgICAgICAgPC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC94bXBNTTpIaXN0b3J5PgogICAgICAgICA8eG1wTU06RGVyaXZlZEZyb20gcmRmOnBhcnNlVHlwZT0iUmVzb3VyY2UiPgogICAgICAgICAgICA8c3RSZWY6aW5zdGFuY2VJRD54bXAuaWlkOmQ0ZjMzNDFjLTRkYjctZjc0YS1iZTAxLWYxMGEwMzNhNjg4ZDwvc3RSZWY6aW5zdGFuY2VJRD4KICAgICAgICAgICAgPHN0UmVmOmRvY3VtZW50SUQ+MEIzOTNDRjk1RDQ0RDlGMDNFQjEzQkZEQ0UxRDA5MjM8L3N0UmVmOmRvY3VtZW50SUQ+CiAgICAgICAgICAgIDxzdFJlZjpvcmlnaW5hbERvY3VtZW50SUQ+MEIzOTNDRjk1RDQ0RDlGMDNFQjEzQkZEQ0UxRDA5MjM8L3N0UmVmOm9yaWdpbmFsRG9jdW1lbnRJRD4KICAgICAgICAgPC94bXBNTTpEZXJpdmVkRnJvbT4KICAgICAgICAgPGRjOmZvcm1hdD5pbWFnZS9wbmc8L2RjOmZvcm1hdD4KICAgICAgICAgPHBob3Rvc2hvcDpDb2xvck1vZGU+MzwvcGhvdG9zaG9wOkNvbG9yTW9kZT4KICAgICAgICAgPHBob3Rvc2hvcDpJQ0NQcm9maWxlPnNSR0IgSUVDNjE5NjYtMi4xPC9waG90b3Nob3A6SUNDUHJvZmlsZT4KICAgICAgICAgPHhtcDpDcmVhdGVEYXRlPjIwMTgtMDQtMDJUMTY6MjM6NTUrMDg6MDA8L3htcDpDcmVhdGVEYXRlPgogICAgICAgICA8eG1wOk1vZGlmeURhdGU+MjAxOC0wNC0wMlQxNjo0MToxMCswODowMDwveG1wOk1vZGlmeURhdGU+CiAgICAgICAgIDx4bXA6TWV0YWRhdGFEYXRlPjIwMTgtMDQtMDJUMTY6NDE6MTArMDg6MDA8L3htcDpNZXRhZGF0YURhdGU+CiAgICAgICAgIDx4bXA6Q3JlYXRvclRvb2w+QWRvYmUgUGhvdG9zaG9wIEVsZW1lbnRzIDE0LjAgKFdpbmRvd3MpPC94bXA6Q3JlYXRvclRvb2w+CiAgICAgICAgIDx0aWZmOkltYWdlV2lkdGg+MjU0PC90aWZmOkltYWdlV2lkdGg+CiAgICAgICAgIDx0aWZmOkltYWdlTGVuZ3RoPjI1NDwvdGlmZjpJbWFnZUxlbmd0aD4KICAgICAgICAgPHRpZmY6Qml0c1BlclNhbXBsZT4KICAgICAgICAgICAgPHJkZjpTZXE+CiAgICAgICAgICAgICAgIDxyZGY6bGk+ODwvcmRmOmxpPgogICAgICAgICAgICAgICA8cmRmOmxpPjg8L3JkZjpsaT4KICAgICAgICAgICAgICAgPHJkZjpsaT44PC9yZGY6bGk+CiAgICAgICAgICAgIDwvcmRmOlNlcT4KICAgICAgICAgPC90aWZmOkJpdHNQZXJTYW1wbGU+CiAgICAgICAgIDx0aWZmOlBob3RvbWV0cmljSW50ZXJwcmV0YXRpb24+MjwvdGlmZjpQaG90b21ldHJpY0ludGVycHJldGF0aW9uPgogICAgICAgICA8dGlmZjpPcmllbnRhdGlvbj4xPC90aWZmOk9yaWVudGF0aW9uPgogICAgICAgICA8dGlmZjpTYW1wbGVzUGVyUGl4ZWw+MzwvdGlmZjpTYW1wbGVzUGVyUGl4ZWw+CiAgICAgICAgIDx0aWZmOlhSZXNvbHV0aW9uPjcyMDAwMC8xMDAwMDwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+NzIwMDAwLzEwMDAwPC90aWZmOllSZXNvbHV0aW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8ZXhpZjpFeGlmVmVyc2lvbj4wMjIxPC9leGlmOkV4aWZWZXJzaW9uPgogICAgICAgICA8ZXhpZjpDb2xvclNwYWNlPjE8L2V4aWY6Q29sb3JTcGFjZT4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjE1MDwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4xNTA8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICA8L3JkZjpEZXNjcmlwdGlvbj4KICAgPC9yZGY6UkRGPgo8L3g6eG1wbWV0YT4KICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIAogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAKPD94cGFja2V0IGVuZD0idyI/Pu2egpoAAAAgY0hSTQAAeiUAAICDAAD5/wAAgOkAAHUwAADqYAAAOpgAABdvkl/FRgAATH9JREFUeNrsvXecHNd1Jvqde2+lDtMTMYNBBgEQgQBIkARJgDkomrJkW1a2ZdkKKwet465lv5Vl++3bfSu9nzd4HVfRFJUlizYVKAYxZ4JEznmQJvd0qKob3h+3qrtnpgeBwCDQKP7qR0zq7rr33BO+c8536E9/93mc64to4nc4IjkAZYrwnBZwngEZH+XqETiOA8HaoDWBsQiAg0gOwhGtKFePIOPPQKlyBBm/F1INwxHtkHoEgloBqsDoDBjXiOQQHNYBYgpRPATBukDMQJsiimPH0Jq7EqCYNIUUhoOI4pi3BAtbtYm7ifQMUNwjVaW3Go30Vioj7WFYCkqV4Xy1UsrEsuooHXMlJQNAXLjSdfzY9zLVIMiXspn2kUym7YTrBCOCZQ8TOYMw/Jg2dIhI9MfyeOS6GTDyjdHcGEMAK4HBh9YcYXwYgd8FrR3E6jg8pwNaMYBVAOMjVoMQrBVj5f1obVkEJTUYVzDKgaEqYlWEKzpgdAVEORCLUY1OQEmDXHYOtB5DNRoEmQCe2wlDCgwm2Swg/acxdM5kQOCNfREABoBprVoM4lWa4qXamNnVcKh3646/W/3qlh9eAyPguh5c14UQApw7kFLCGAPOOTjnICJorUHJqTHGQCkFrTW01gAAxghhRUIrA8flCAKOVct//uGZ3cs3CO7uMBQfNEbvIHKOElAFoOvb+sa6xBtPkIgAMGN0Tsl4qSF5XRiN3vDMtr9ZtXXno6tcNwNDAIEhk8mgd+aCREg0YACtNZRSYMQBAhgxGA0oraCUgjEGRATGGDgXcASrCZvWGr4jEmGzQvfKxh/c9eKG796llIQxGr7vYv3aj3yns+PKpzmxVwnuRgBDAKlE0C4L1kUkTwwgboyaK+PqlXFcun33oWeuffSp/3On72WQy7WAMYaWfAeEcME4QxzHMBoYK5asIDEHQvBEaKxwWIEziXln4JzV3lFrjThWANQ4LeY6DICBMQaMEzj3AHjJa0kAhCee/fIvjo0Vf9GYGOtu+MBjc2Ze9aoQlWcIwdMAHX0jCNmlLFhEREIb3RLF5RsiOXbznr5nr3/08X+8O5crwPM89HTPTTYZiakykDIGFCXmi4GIw3EEHMeBUgpxLEEEENE4gTHGTHxzEFHt++nXcRwjfUNjCEpFtZ8zxqAV4PsBfD+AMQbbdz10+4sbvnW7lOGn1qz6xWcWL1z3OMF/hlPwJCMaNYbkpWguxaUpT+RoLefGcektAyMbb/7xI//9PcYAhUIr2tq74bq+1RBaQ2szYfN5Ys74uFeN49iGGVwAhgGUuj8mCUZ04uimDq6ZEKgkX/PUE6bExeNA4iorxWCgEEYagK4JYzbbDhCw+8CTN7229Uc3SSlx+7qPPDir5+ofccZ+TET7AYouJQETl5ZAMU/K6tJIVe7dve/he5589mu3dHZ2oLu7F6VSCUoaOI5b84UatUtt+82p9sYkQtUsDjAAmQbhmuolpvg5aRAmfpZE8MnUzHB7exueeem+t42M/PXbbljzC88vXXL7j10efJuI7SKi6qVgJsUlIE6MiPlSVldEqvyO1zZ/997N23+62vcDzJw5G2EYov/EQBLRuahUqrUobiqhInqdYfXphONnGLKnESXIfrZsJgclFYIgh7a2Duze/8TaF17+3tprVv3cO5ZccdOzlcrINxyRfZ6IVQ2gLgvW6wjwGOPZOK5eE8vqezZs+trtm7Y+clVXZye6Z8xCHMeIIwnOHORyPqSUUErD8wJIGdWit9d/mXNoeXSCemCcsmnUnib5dqUSgnMOrTUqpQqCIEBPzyzsO/j06u27H1ntcP/jb7rj33+hJTf3+65oeRREpYvRRF6UgsUYD7SO58dR9dde2/Xju7ft/Nk1mYyHrq4eVCoh4uIIMpkAAEMYRiAicO6AMVa7m2mp0xO0ZgLV6FdRw9cn+3tqIlyTtHEtsEg/G+cMUsoET/NQrYTwPA++1wLH8cEYwz//6C8+0tk+60O3r//tv8oG3fczxrcBVLmY9pDfetNHzwPyzqB0BQYRBPfAmAOCQCzHwDkHowDGEDiDMEBXuTrwif2Hn/3tR578u/ePlA725rI5SGlDfs/z4LoOtFa1DfA8D1orRFEIpeQ4UzhxI88Oaz2VUJ3J79XXKhX6VNCICK7rAgCUVMnzAGElAhFDEGQxVhrlm7Y+st512cp8rrvbdbJ9jDAGOIpIQ6oyjAZctwBjIkhVAcGB4Bnrz008M5O/uPQ1FhEDZ5SphINvHR7d985HnvqH95QrQ05HezeMBkZHixDCheM4GBsbgePYRRfCfnwpJYgInueBqBbtX6SXnnToGk2i57kIwxDlchmccwhhMTUpJRhnCdpPaGvrgDEKL2z41rpN235y3S03fviajtYl/5wJMt8GsbF/8xrLEa6AEfNK1SMf/emT/+3/e+nV718XBFmeyRQQRzGUVvD9AEQGcRzB973ayUpPeQpmAhbxJmKTfJiz01bn8jITzDSN+6xKKTiOUzPnjnDs88HAddwadlatVhFFIbLZFhAD37j5p0uPnth0z6yZiz3HyR81CEe00upCaawLKljahDkiunXPoWc++5PH/scnpAzdXK4TWqfRkjUpWtsNIGLQumnkmNysJlTjv08Xkcaqm0r7eWnS3egXamsbQYxBG1P3AIlABEitQQCyuRZUoqK7Y8+z63w/e2U+21lkzDnkiHz0b8YUEjFGhK5KdeRj23c//s59B59dnc350IohimxUdPk69eE1hkBJoKCVgcM9gGm2YfN37zp8ZOvsq696+5rAn/H3jPED5ztyPO8ay3UyAkZcPTC841M/fuzzv7Xv0IvzWgudLA4VqtUqgiBzmkDmv2WhogmRrl0rrTS00nA9l46f2NF54NArNxQKXZ25oLPPID4Bw9Ub0hQypn1A3LX34HN/+NTz//jeWJX9jraZCKMIcazgeV6tXOXiMl8Xp3DVBYtS3A+cWwgml20FWMR37Hr2akZsRVfn/H5G3n7GfPmGEiwildEmfNfm7Y/+8cuvfe82JgwPvDyKY2OQsYbreA1+0mWhel3xpjbg3AHAMDY2Bt/LwEBhcPjQjCisriwUenTgtW8BTEyNhX6XpmAROBfZ4ljfb2zc+uPf3r3vmZVckDCKI6xG8D0fQggopVJwFG/Q2rfphJRrGssYwBgN1/VgDBLg2Ihd+17oHhrad3N394JyJujaApiqzX1egoJFxMA4bxkt7v53jz7z15/Zvf/ZeRm/hXPmQGkNxjgY54jjGEQMrhsgDKsgumwKTxu8qOUmra9ljMX3jAGkVLXfyWVbMTR8yBsY3HVje9ssJ5Pp2kZgxfGH+BIQLCIOImo/Prjpd5984R/+ZHD4YK6Q7wFMCh+k6lsnQCdDHIfwfX+cQ3r5aiZMk4WBiACTwC3GwMCAcwatjbUIWiOXa8Hw8GHv0JFNN7QWur1CftZmIhqtv95FLlg2X8c7jxx/6T8+/eL/+YOR0WNe4HdAKzUlxkRk/84YdVlyzsBxp7QYO6kbownZJLumBjApwJxFqTLCjw3sXNOS78wV8r2bATbcPL95EQkWEYPgvLPv+Mt/8NwrX/mdsdKA57o5sOSJpVTjksTNAcTL1+tc/UnrN96lMCAy8P0syuVhfrx/+6pcriPbWpi9CcDIuUR4zrFgMQghWo4Pbvq9p1/4wqdGRvoy+VwnolhCKg3BHXCeVnBeFoNpig0TN0LXBMn6XiopgrS51Uwmj9FiP+8f2HNVW6GHt7bM2mQMiufKBTlHgmW7rDgXmaHR7R9/6vl//MORkUP5lpYulCsRMpkcCAQp4yTlclmwpsn7avBNzaQIXSlZixyjKEI+V8BYaUAMDO9b2VboEflsz6sEVjLnwCyenWARQKRhIME5c0bH9n30yRf+4TNHju1obS3MhIwkiPi4tikrVJel6nz6Y6mg2ZY1B0QMcWyB6Ewmj+GRPndg5OCqttaZ0vOyzxrEkohs89PrFazb1n205vS9rpvFIBY5sRx528sb//nPdu19pretpRdaaTAuAAK0SdUyA2O4rK3Om5/ViMyjFo3bchwrYJVKFfl8KwaGDrhShkvb2zr6OKdtnHNFJGD069ssdraPwZjgSus7d+557g8P9W3uKeR7wLgAI9v5EscSjuPAcVwYI6G1bdy8fE2nUDW7rW+rtd0DItvJbStWY+SCVoyMHPH2H9ryB1qbexkTZ1WgcFaCxYiTAVbvPfDCp17b8i/XK1POEHEorSCVBhccjusklZ1xreHzcn75wvhfnueBMQ4pbfUtY6j1U4I4qvFI28YtP1m2d/8rnzLG3ETE2XkXLAIDEbqPHHvtYxu3PnCb0mXXET4YM1BSgjiHSaIRK1AmKbvll32s84h1NX6tlK1xS0u3bXe2DahsJ5oDYtrZsvOhNYeOvPZJwMyn1+lnsdcr/VwYMTC08wPPvPTV95XKJzJBUIBO+A9sM+hltXTpmM8EqNAxXCdAGI9kn3v5a/cODu/6AOc8eD0RIktA2TO6OWciksN3bd/zyG8cH9jdEmTaEUUxOBdQ+jKUcMkiYNqi84GXx3CxL/vChq99Yqx8+N2CczHtGsuWB5t5h/o2ffJA34Z5He2zIOO4xtRSj0QuX5falbbOhWGIrvbZ2Hfw5d4dex/599qEy06eLTkHgsUZc4eL+9/76tYf3KFMJSiPVcGIQQgHcWwBOPshLpvCS0+wCK7rQEqF0WIZHe1zsG3Xk8uOD2z/NYBazsQksjN7Y0GlyrE3P7fhK785MHQoz+HDEQ5c14dSGq9DY16+Lqa40ega2ZxtO2OIoqL/3Ctf+5VSpe+dXDBuS5pOjW+y0wdDCUR69omBTR/Zs/fpmdmgHXEs4fk+qtUIaaGZZXcZ7xRevho37+KOJOM4hhAOOBOQsUQu24G+41s6Dh558deNia9gjCX1cie/T1tjcQ5nuLj/l1/d+qObO9rnwRgNIVzEsWzIAWoYY3v7pmdTzKRwenJjwekt4Ov52Zls0FSX4/DaGtlOGz3u86d0lKlP09gSpvV0A8usJvhS2Q5sKSUK+RnYvO3hawaH93wIpNw0I3my+7SiQoDIQC473r/l5w8e2dLJmGdbjwg11ZnWUTF2Lsk0mgtXvUF1/PebEaRdiFN/sp9JGcMYW3xnT3/9syulxnV3NwoSEdV+dq6xrsZDyZgtDmQcMDDQWsFowshYX/7wkZd/TutoNYGTMYST3aehsQhELBgp9v3Sa1seWjGjczaiKKxt5mQgjk3rpqULnLadN3YQ16tRT1/zncnPTtfUnew14tiWrxAxRFEMY+okJinLTCO/VypcFuBU034o0kIBrXRNezqOQCHfhc3bf7ZgcOTAhwxM2yl1HzGNk92MEwuj4bXPbfjqh4dGDrW7CeNJShjWbBEvBNwwXsCnV2ueAg2a8r2NMXBdSxFgqwvqCXmrhZHwoVryk1TQajSTenqfqb6fqYNuaviW1kAYjRSeffEr7wmjoTuIGD+ZLWQnd8IAxpQ7Utz/y9t3PD2ns6MHlUrlFJs6/ZeUMRxHwPPc2pO4riWnDcPqhKccr30nBhVnbj7Nad560nunZiKK4hpzTvoZOLdOcfpMWiswZt2NOI4gZQwh2PQeiQZq8TRgSxmloyhCa8sMHDj02ozhkd2/BKOzJ3stcbLojYixWEbLDh/dfGMu0w6lTCK9zaGFs2bMO83Lcx1UqmOJSTYNAYaA52YbyRgbGPbqQmXobE++OUNNkL53XdiJbOGdlLFlI3Q4isUhhFHCSJh8Zke4cBwfICR+7PmgH2g8bJSUOxHiWKGzfQ6ee/lb97z5jitu8Zz2HxnTPFITU08jIBDgjJX63r1j95NL8oU2hNUInuchiqqJataTiCzOlUqeWjgJxbF+9HQtO1po6R3LZFrGCKSrYTkzPNrXcqhvQ6/v5cC520RbTe1PnbPu6yk5SuuVnUJYOiJK/i1lhLHyKGb1rD6cy3SW8/n2IRjDx8rDraVKf3D4yGu9jDlwHB80jdYw9fWM0Q0HgJLcL4NUGrlsDnv2b+woVY68y3PaHrbU4aaJxppCsAgMUVydvWP3428Po9Es8daEcVhP6aBPZ2u8sQ4hKtUi5s1ec3j1il/8okH4UDbTWiJiulodC4i8ddsLvR/pH9jfOjiyb6bg3qSjcl48rybClTI1GyPhug7i2AZAvudgeGQIy6+8c8eKxW/9qlSlx1pyM4pKK1Yc629xndzN27KdHz5weEsrsUpnHE1foWR976jWtGPPnvX/tNaIwhjdXXPw1PNffvubb/uTr3he7kljJuMg/LYpSpMZI1GuHv3VJ5774nt9P8stvEBJmUVKw3OuWfPqJ8diY7xWI+9wjrGx41i08Ia916764B+2t8391vDowS2MsSNay6NhWD7Y2b5kSzabe64l37N4YPBQWxyPZbhwoY0GYwyxjCAcUQNwx7dQ0Smfwf6uaXoM0zuFaKz5sGXZlqHPgYwjS0ekFXw/A6VDCN4ytHzJbXtXL/ul38vnZjwwPHJwG+fuMSmjo9VwbH93x7Ktrstedt3MtQcOvdTrubmao88YT0pe7LwB20V+tsc39b7rGiu1agQCF/Y9jvXvzF25aF0xE7T/FICehLxPibeouPdo/6Z7DUI3FapG0zdd4GKqii3GYyMhYgxSVtE786r916z4hS/ngpnfUSo+NsmpV9FwHFef7Gif/Z9XL3/bS57bNiRVCDAOYgRiQCRDpC3pjSW8pwM5WEd/okCNfy3GHDAS0JpqgUEcR1AqBuMAZzyprI3QWpjbt3zJ3ZsWzrvhow5v+aFS8YmJ76m07M9l5j60cO4tX+rtXnUglpaNOxWuRhyMztpONk5sqj9TevhAhFhJgICWfCf2HXpxnZRqIYETDEPjzZrFMwDxMBq864lnv3ZdobWr5kMZo+pvMq3RiWW1S5FpIoZyuYSFc6/fmc/OvM8YKAY+ZYmzNurJnp5Ff37Nync8xXluRCsbKTLG7YycCS1S5w6aSP1OeyiU0nAcB57nQSkNRhzEDVzPwUjxKBbOuW5v94x5fxrFY09rI5uuA2MCgdeDbKbnO4sW3LgtCquwxXpUm9dTO+zn5DGoiRZLIkVWh0McN8Arm364IowH7rX6jdB4s2YvrI1qGy0dvKcaDefsa6bYRqrqp1ey0lSCTWxzaCWhtUIu1zFUDvsPVaIjKIXH4Xm5k6Dz6unZs5b/+ZqV9z4neG5Eqios6w2dRJhOl5SWmmi8Ovug0VSjDbAMMDy1AlAqRDUcxDUr37pz3uwbPxu4Mx/PB/PHDRZIr0LLDMRyFLEehDZjo/l89wFtFITgSVRuageGak0r50KwqKmJZIwgBEO5XAFnAq5DweEjG96slJo30fObBJAyZlgcj61+5oX73jyrZwFKpVKCcOsp2YinIzqxSLpJWsTt9wQPKrEsObEcRhgNQwinVv6stWwmXC/M6V3+Z2tWvfM5z2kbNojgCNEQ9ZhTntjJONfUQgWYpGlEJ4fCouUpLJLJZjAyegRLF9+2Z9XSd3/cczoe8pw2eG577fWUjhO2mACBl4c2VRhTAUxEDs8O2gkWNvyfCGEYmGkQqrqJ1Folg6YAowmtrV14/uXv3ih16fqJ6cEmUaFhsRy69cix3e3dM+Ym6G/d7zgfWFWaMzOGEszMgrxSloPAmxuNf3j70K7TMUlOLGeBfmZ277LPcC7+5JWN37+5Eg60cuZOiNzGC8fZbYSGNhocSOr9Ca7roVQexsDAcaxZ+fNbVl75i7/j8NZHm5ny9sISOMIDZ/bQcKq/Uyz72izhh4JWjeS4ds4P4WzN4VTPb98nrY/3fS+ZPaRRqQznquHx9Q5r+S4aJmWwiU6X0mb20Oiha/K5NlSrVbiumyDAVENlpzvLboxpcN51jZBtrDTY5vDcla7TylxRgL1b4DoFBH5HDQ6ZrHH0s7N7l/3l6qt+7gXfbR+MVcUu4imc3Yk41+n4WLGMwRggBNkJGdCQuoq2wrzD16x855aVS9/9O57T+XBz/5CQ8TohWA6AB4IPgg+GADAiOzp2bC4lkabNG6rEAJ2rQ25OYgrrhYC2CVlDa6CtrQuPP/2P75EqXEBkAyRigCCmGnEeFoWl5Y8//U+3BkEeRIQoCpFGhekGT7ePJYRAFMU2wkqiKN8PsHHrg9fP6Lziw75X+L1axGJ1BDhzoLVs6nclwvVcb8+Vn2Wgz27a/tOrRor7u4UITvPEnoa2MkmEaAjENKTUMAAcR6B/YB9W3fT2w11dC/9QsOzjuimjDsGYEBpxQ1CRNJrCoBoOfeSVjT+40fezkzrKDcw5xhCTZyM9btpZ6vtKCXieX9OYR47umAGqrDPk74KxXjmbUO7ApCleO1o8WiBKi/YInIvE15GJep/uqBBIC8rsqTTgTKBcHizsO/DqWm3CTyhdgdQVKF2FVGVoE0LpCjhzksivqVl8qqd78Z+tWvbWDdmgZ0CpqCGDABgT1TAhO/ZtfO409WfSnFpKE27LYADGFYLAjv2VSiKbzeBE/x6sWfmOrfNn3/InOX/+482Fl2BQSt47hmm8SYEx/d6de5/9ueGRYwWRdJfbxHTipuhEsM/6wDdiAxNfz9QgDs9za4M9iTH4mSyq4cB1xmg3/XN+200fawD39IwTA5s/duT4lqWc+/YPk8x6CjckOcTpB69rp89uOgHg3MVo8UiHNqxrRsd8YYx5mWr4kZhQNMfAmEhWPa0OUAD0gXyuc1/Gb13eP3iwXaqyrxUl9VF14ozm5sXUAwnh1goabdivauG/MQZBEGB45BBWL3/b9quXv/c3Paf9Ycbc5tqBKgDsTKDxwRUDY/wXNmx88Lf3HXp2jbDRisWvQA0gb1KJcE7NYjPXoI4IpK4B5zZkGBjauWD+7Fu+xpkYIaJ6VMiYIakqi3/21JfuzGXz4wjQ0lKK9HSe32K68Y56GI9ld+z+2bU79zz/ASHcX59aMAFj4inMonlyVu/S/7Rm5TuedXh+RKlKUtHJwbnTUFrNEkxqfAWAMayW7uBcwHXdROMDjHNwAfQPHMCKJW/effWK933SFW2PaKOa4G7MChXJKWAX552vbnrwN7ftfviGSnk0Q2Tr0I1mMCZ1SRjqwwimOVrntpAzjsOaxoxjBd/NYMfujV0EuZJYTMQiMKMJRhO0JmYQrhwePZKfOAGiWbXm+RWsBkQXLjhDsG3nw2s3bX7sw5yLXz+Z862NnrTgCdzw5KyZy//ymqvufTGX6RxRqgpjTEIMV9dY49B2shgbYwQlNezamaTUWIJzAa0r8JzCwI1r3v/K6uW//EmHW6Fq5ugbKp1MqH7htS0//M29h59c7wWui8RhJvDkTtak7mVhuvsMTOLXWRzNQh5KAb7vw+E+Qjm41mjGjObgt974scTGG7dc7fvQvoPPX8+YAzPJFzATMt7nt5iv8d2qkQQx44yWjnQLFuQ62md7IHrFatnJAzDrI3sVUtYbwICROJTPdezN5boWjYwczVfCoazvZRJ4BZOen5E1uQCBkc2bydjiTlwwCG7gOYX+pYvv2DN31lV/7LtdP22eNWMwNAZA1j9n6nwTQQjnHRu3PvTxrbt+epsxyotDCSGcBNcT9VCL0s1uxOSmb190Ml6FcwGlbKGCSmrjHcHR0dY7mM/O+xeARfzWdR8FiKB11PX4s3/1t0rGAYigzUTBakzWXgjBqufkhOMgjmN4viuOHtvVS8wtdM9YqJSKNjYTrIkCkpoNAjOAOZjLdRwMvMLywaFDhTAeDer+2sTn5zWcDUkhpNYKQeAB0OgfOICVy9+0Z86sq/5Aq+gxRxQ0I2eyUGGspqkmCpZwvLdt2Pjg7+w59MRtMMovjpaRyWRt7blBrU6+NkY48UNrhx7T6P8SwWiASCCOVM0vjaIK/MDH4b5Ns5cuetP3OOPHGSOAEQjQ8/uO7m7DKZLMF6bLmU0AUGM4joPSWBnaxMHu/Y+v37Xn+fcL4X3kZKdN6agZsq4N9OM93Vf855VL37w543UMxvFYQ8sbS6osBIwRNvHPbOODjCM7lUxGGCkexeoVb9175YK3fDrnL3gsGyxUjHkThJNBowggbvoZHcd7+6atD/27zTt+clulUvKF8JDN5m0innjStBIlvmNj5MrOy94w4jZI0vXGDmsObY3+0PCJPEguAZMQBhUrihStMJrBcVzEcQRA1HGSCT7VhW2ht7CH3QgnSd1EwSubHrjDGM0Wzr/WSBl9sZktNUpDk6rRKTUIlzJGP9rTs5iIiT/euPUnVxVLfZ2um7W4naaEQoCgEwiEMdvUCSIMjxzC8ivv3Hftivf/rud2/tBorYk5TYRqLBEqaoLdeT//2uYHP75t76N3ZrKuW63EKJUqENy1vhVLIz/r09lolzW0v7FJuvncZ0QMiNmoRQgHYViFcAiOKyzvmeLQJp4NHRO/9cZPAIbxSnj0fTv2PrXO9wPEcrzDO3Fmy/lvRk3UPtWnvddTTNZEKC2d4dHDvQ7LZjvbZ7vaqA2NEaKNaFXiPFEN2TcJJ7qxVDn78/muPs/LLS+XB3NhNBaEoYTwPYRxCKVtZMeEdeAF5xguHsXyRXfsufaqD/6h53Y+oLWWk7eXwaAEIEqNag1WICK4jv/mbTt/9huvbP7ne4yGbwyrfW5CI46W/nUzDTUd+zJxz1mtysEYBQMFIg3GrXl2XR+dnfMG8pm53xXJhE9WqY52pcnfi3tI0uQ0izEGnHGEUSm7eeePb1cmpiVX3CjjuPpPU0WLVr50gmvrREC1BPDwnFnL5Gjx6F/3bfp+e2tLL5SU8HwPxAxMUvNPMBgcOoorF918cM3K9ydCpZoIFYfGKGgK8+e6mbtffvUHv71j76N3BBnXj8LxLV+nSpKf76vRitlEuKmB2kQMY2ND7aZdtPLb1n0UWssZjz33V//bGO0abc4LAHouT1VtxAoIUoXO0PDBWWSclu6uBUwp+do4jZUsDmceHNEFRhkIloMQLXBEKwRv0S4rHOjsWLC7Jdcxe3jkaKEajQaxVJBKwSgFZjQq4TCWLbntwLUrP/Ap3+36F61V3FSo9BA0RWANTRCpxnKd4G0bXvvX39q666G7paraoOEiZtGsr2O9ISRNL1lB4zh2Yues5Yvf9E1GZAikZx8/fjCXNoJeejRErHYTBIyJM9t3P3rrjj3P/7Lj+B+YUiDhgMjejNzaDQjFufuThfOv+4uZMxb1K11E4HEIBmQyHsZKxzF/znUH16x83++cVKjMMDSqTWutXCd4y7adT314255H7hIO+dlMHqWx0sV9hCmppGhYd+uDotbdPVoczIDUPAFSAKm5aVtX2up9pnxI59UYTjDVEzkdwjCG7/Ps1h2P3MFJqIUL1iCKw/umzo1N/K4Cgyc56/7ZquXv+g+thd5/v3n7wyuj+Hgbo8LodavfvfPKhXd93hMd/9rM/BEEYnUChqKm4b/nBG/bsPFHn9jb99RdjFNQHK0gn+dwhHeJHGRd87dq7oiu5y+NCXuFNiEZE8/wfT8JHxmUuhS5rVhtW13XRxzH4FxmN23/0d3aaLZo4Q2yosJvnCHiLxlzHpjduzwSjve7O/Y8N2/WjCuH585e8efE3B8Zg6aOusIwtKmASDTxqYK3b935+K/t2vfYXbGOMmEYI5vNo1yuIAgyUCq+iA+0Ggf91FN9qJVVeZ6LSI31CKWqUCqekc5h5oxPG1vM+cK54rgK1/URhhE8L85u2fnQnXFU9ZYsudGPwvKXz2wxtdRG/aSna5Eo5Hs+AZgvGyN/ZIyOm34OqgImaupke272rS+99sAn9+5//PZYVjNKcwR+zqaKmHuJUGzqGhluvR2fgTHrZzmOg3JlsFeE8RgpFbdZXiQfjC5xqkfDIIRAtVqB5wWQUsJ1w+yOPY+tZ0KoK+avgZTxGQqXiYmxB13Xf0zKqGyMbCJUHNqMgEg1FSrXzbx507affuzA4SfuqFSLgetkwbhAtRpCCAdCCIRViYuZu+5kaEGqsbTRGBrqmymKxRPQWs5kTIAzDqWSojE4STjeaFcvEv3E2ISHbTRHMnEknVrFQhwDxoTZXXueuNl1stHc2cu00fjqGb5tbIwZmUpjajMKjQo4vGaO+r2btj/6ke17HrkniuNAOAGU0TAmSspOQlspIQiqwbQyMyHnac5BydVZrbsYRxxi4QaW+OUmKVvOYnjkeIeoVEehtcqmzY414I30RX1yThdvqX/NUAlH8pu2/uvdleqwv3LZmyJo+sbZ40IM2hRhUGka/TlO8As79zz3/h17Hr67XB7JcuY0gKMYV39ljLmoiRBPxS6UAtfV6pgrisUBMka3ppQ5zcuOL/4RJeMjw8bIpR7BEHFIXc5u3/vQ3X1Htyx7y52fCktj/d830OBMIJZhsjgKrtOOwJt1CveeQZlhJGmxSRGm5wX3vrrxoY8eOvb8rdXqaMYWB5pawnmqyHaqw3ExH+j0eaSUKJVHAj5r5jIWRuVPVsKBmVo10mkbXFieqdf/sPX9oElovdaA1pJpE/uVSnXmzO6FkZTRJmJUa20CCFKVARhw5kLqMTAmkgoDi9Y7ogBiOknVNJbo2OEJnIt379nz0q/u2PfY7WFYzBI5kxj6mm2OadyoZpj7BZS1yVZg/MGwUIMGwR8S1eoYAOMzxhDr8aXIl/6lJ0SNLMk3ChCkd3zgtev37W8J585Z4WitvjrRdIbxABDbBUs31GiNwC9AOBrajDXdaUd4b9+5+5n3bdn9k7ukKufiWEPrEExwCMYntdJNNCcXO0g6VeFno3MvpJJISxFTp8z+X580pL+40OBT+V2pObQNAlLFiOIYleh4HuaJ9cSYnjdnZTmOw+9MBlAb6uCNgetm4TjZKdv7XSe4dcv2Rz+y88Cjb4qisWwcayhNKLQWMDpWrC3+pXhwT/a56zXwHCACu/uW34dSkqWDEmsnZlLPnb7oH/rkNUlJ14kBOHeQz+VhtMHw6LGWbbsfuvnAwU2/7jje+87mM7hO5q1bdz76+xu3/cubBweOZpUEpNTw/QAjo6NwRForb07zM186QVMjOS4ACCImUibitDSmNrP5Qse3p2HrT3ryqZE5JRGuxH2slEOAGBxPwHVdVg3HfKViR6qoGUhqSc8YRxSXAKim76t0tVVJ6WcyeUMMkLGB52cQSwmlYttHoAkEmmT6LgUNNtVnnEiJrmTMmdZKEhg4E0mHq0HaeDleqC6kKdQT7okPyibdxhAYeWDkIY4t1uV6BlKWIFUE4RgYFcJ3suXermtfmD9v1RdjGX6lmVAFfhuCoM2av5Nolyiu3r9o4fV/t3Th3Q8L7o0KxyCslsCgwMEgyAGjiUWGqt7LSAosoaZiTfbwQp/xifTdjRToaXtgHIdwXS/mRlcYF+LjlXCoC0lxvC071c1ikgt1ViZhUpOJOsYLg6W1tlqYc1YbIGXrqrglyBVc57Pd/WtWvuO/RFHpPlvxUP8PMPDcHDJ+q+W5khVoo8CZM+WaELC1UOh2BMsU+gcPzmRcuUK4CReFPbTjZbPeS0AXpIjyHB5/rSEEB5E/KDw/B2N0qEdVEi7W6YouVYCUMQHXtW36Whm4niVCi6IIFBsIhyMXFErLFt3z6Pw5133H4TPud8XMpn6ZNiVoUx6XUNZGIXBmQpkQUTwwqX4tjsOvLpi3JhJuprpp27/eVS4Pt6QkabZplY3zW415Y4yH0doS5ikZc+F7GRijwxT/uVQjlnG+TjJMgEEg1jYCZLVadauNA78wPH/O6q/FMrzf4c2puw1KMBgDQTQxzhK+0w0YjUgOT3LApQy/MW/2ChZWi/7OfY/dWiqNZG1KpFEDX/yg6Jlc6Vg6xlwlgiAPbVQlddovdQwr/exRFCEIOFwuEuBTQzgCXBCy/oxD99z6+38EnbtfMFVrzmj0JzXGoM0Y6CT01wYKntsDkEEUj0yyYlFUuf/KRTcapQ32HPzZbVpFmTjW46iu30iX7SIyCIJsKFoL3UYrNTiZmvrSFCgAEMLyNjAGhGEIx+FgnKFUHkNP4Yq+u2/5/T+Gzt1vjGxu/jAGZYoWRTenet8YnjMzcd6Hmzj0la+vXHZbJFXMd+x57BYYGYw3neyMzPzFDvkopdDWOnNY5HMzoJQ8GsdRUsMUn/NhQOf7kjJKJpNFCMMKGPcglcTcWUv7brvhU38E03Jfc6ECjClDm9Fac2rDsgkiygKwDB5NhMt2/FQnvWYYlb+7evmdnis8uXPfY7dF8Wi2RsQyqfaNXeKCJdHePrOfeW7OeG520NJGs4u8Q6fZyaYmLIMErSWIgFwuC98L4pbczCPrrv/EZ8gU7ptKUxnESe8fmxiFcqXju8Kocp8x5peIJrU3w5gYvjMTjmhpCiZHceX+JVes/eayRfc84ohMSWtb3qOha+H69I+NOz8RfDbTNiI8twVSxfssqVYMz3MS+qJmWFJjeD+dwqdP6SRGUQQiAddxEccxiBm4roswLENKiUwmgFQxQKK0YPYtT61a8eYvkW67X5tTlf6Or1IgYpwxduOOPS/87r4DL1+5eOENH+/pXjhCxH6CCS3NBhKCdUKyCMZUm+FcX75iwbVSG4mtO396F5HMlMMYnHMoFYMLDqOaU3Ger3Eyk9ciTZIygFTCYk01vgitNUDGcpJpAxggn+06LJJysnIYhhBJMX+9uuHijfo8z85MVDoGYwQDk0x7sGPQoihEa1tXed7M9Y8tvuLab1aqxfsDt3CGqp1xRuzmXXuf+/SWHT+6RepisHXnaFs1vOHTi+bf7pKgHxgzfuSHMTF8txexPAGpi5MOYBRV7lu2+JaqjCOxbfcjt/kOz8SNlNqEk6LbF87OTT3VDIZqqaowrELwoF/AOCCj9wiH1bz6dMLCmWiR84uXKEiJGtWS6zpQKnHUXQbPc6EUL8/suOapRQuu/U4UVb5C5J6pv8A54+v27n3pj7bvfuhmqcpBpQwIGis8//J964ZHDn/uxmt+tcp57ifGGDW+ySCGIzphpIbSpSaaq/ydlSvuYoxxtW33w3dpXQmEcGA7plVTJ/7CCNbJotd0oHy9XZALgipLcJbdxqx5ZwcLha6yVnGNI6r5C503ROSkt+OIhKeJgXM7HsWab9vbRsSrSxbc9vhVy27/pyiufPHMnVDGAbp5/8HX/uO2PT+9aWjkaCYKDTKZDBh30do6C9t2P7bw1W3f+6tqfPhWbUJhYYnG0ScSHu8G58256KOo8q2rlt35xYVz1z8euNmKIEIYhrUUz+R0yYWaHptWuuimyiUFjuspHg1j+H5+640fBcBUS651+d4DL67wvCBhN2kc5WEm+B8XJvVQnzqqoJROmPQsF6jrChA38L18padz1fPXrHjTF8O4/E8pdELE4fCWk5h4AqAAijkRrT9w6LU/eWHDt26thsMtrpsDkmkT1TCEVhKeF6BULhJnzqrWwpw+xpw9SkfGMlJbs6ERgVMWGmFNWNLeOxtgqK2zZy0va83ajg/sme05zLFrn5LXsgmMN+wCuyiNQHKdnM4+E8EYjba27tEr5t35fwuQBhGi1sLMvZEMkeMF28qj0VD3zi64KZxcXGYpqeNYwnU5iBmQdEoLZq17+srFN349jCtfO7lzPrm6lBHnYOLGvQde/ePXtv7oBsdFRsscytUKgmwGUko4ngBpglYSUhbbn3rxS+vCaOxzS664pRrJys8AoxzugTEOqSI7QcKgaY9horm+u2zJLY5SMdu686e3A8av41xUW/96FcSFEiQk1S6JPNTYBOu0BVEU4abrPnw/Iz4qiIUAmPG81m1RGELKZD5L+scXrKmiriknCpXrujUGX2vjBbTildUr3v74/NmrvhdFlS9MnldYQ7kaxMnURs0x4iR15db9+5//vS07H72hWDxeACwduB9kkhEsEkYBnAkorcDhoNAyA3v2v9KTzXb/eU/XFZ81pB4h4soywlhaR5CB0hGIeFNG5ziufuOqpXeSMZrt2vfErVFU9ifmH9Mk+vlztZoIVdNfYyBmD2oYVpDxWw+CjBEE3/6V1rsZs0RarDEJnYSZ5xdoswOO0sJDa7eBtOurWq1CCJ6YFAPGvOri+bc+Pbt3+QOxDP9hqtc0JoLGSDKYQAJGg9mZhgyEa7fufOgvHnvqb9d3z7gShUInyuVKbQxcOiGDE4OMZY2byxiOyAy3bdv5yJVE7LM9MxaAiB5Bw5SGGk6mFUzNxZggXLL69ZXL7iSHB3Lzzh/eGUUVn3O3llu0dOIE4hfijDeMmGvIc6b8/0iYZ7RR4Dy7k1hcI7fVMGxfb8+CYRVXYUycFPppWFPJUOc3x7T7V0QWOmjkk9faEqZxnoS2yUBsx8mU58+64bkli264X6nob5rmsJgH1wkS3s90YxMSEeJExNfuPbDhL3bte2pld/dCAAyVStkKslGA1uAgy6evTZKZqPNFMXIwWuzr3Lb90SuOH9/7nwDcSlNQ9qREuM2uWEb3L128/ivLFt39hO/lylpb7lFjbDkK4/okjjSd4wCrEcOytXn16SQGhnRSesSgVARGGr5bKBrjvQrj6cR5B4hxPaNz7uxNWx++NpfLI5aNuMX4ov/pDn2tEPFaOxHnTm34udaWSBbQ4Myvrlz21icWX3HDN5WK/rFx+qvVZhqMBHyvBdqo2gFJI5jEMb5+/6FXP71hyw9uC+PhPMHF5JqpU4f9nLuoRIPZ/oGDLdlM55UtLV17jTEHJlaIMsYRRZUkl8knra8xevOMrgXadXLZvqM75jCmHTsoQDfJ4TYjXps+J0xpBV7rP7XMyUbboRJSxrjlpl99sCU37xsAKrUp9oyoEgSdG8fGRpo4tudXsOxGCcSxhNawD2AMoiiElBEYI3DhVxbOXf/83Fkrvydl+PdNQQuyw8ebNT4QERGxtYf6Nn/6pde+86ZYjuW09JISmZOX4DYP/Q0E8xGq4dZXNz+4uu/Ijs8QsVuoyWIR2cGeU2kuKaP7Fsxd882rr3r7z7TWVS4MtJa40ARAaQdX+rx2KIPlawjDCjra521hzAwRaTBiBsmtGYJXWlu7x7SWYDxtXW829Oh8hCb2g7uuSOrFrVkIMgHGxoaweN4tLyxfsv6ftI7/NnU0TcNgSyIGxh3oKRpBidjaw0e2fmbjth/eEutqoKUDz8skNOTNI9Fmh6sRY1JawXNyiOLhtlc3P7iq7+j2/0xEt0zVQxjHlSmFS2n51SsXrfvK0kV3P1epVEPHZRe8yiYF0FMQPWWPtpmOMlynbQORApEEv+WGj9Vm6RBYOZ/Prd1/8JUrHOHZLWqSlJ5ejZU2jlJiDpFQEjEIh0HKEIHfPrDu2g/+LwP+d0QCjASIOSAIcJYQqDGq4VdErEZ+n2Au1x4+suXTr2x6YP1wsa+D4FmqQyagoUHQTf3Ieqqrcd4hw8RBmEK4COPRTH//gZZCvmdRLtu+x0AfhDENs3ssPhXLsMaHioaS6MRObOrqnGdkrGcPDPe1C0GOnd9zYUwhMYKSEgCDEA6kjGELYQzy+c6hxfPf9JeMvEEYAX7bul+vDyJipITDVmzY9MD6IGhJeTmbCtN0C1f6+ikJnO+7GBkdhO+1Dv/cPX/8F54z838KnoPg+drtiAIcXrBdy6Y8Dlg0iQZkxK452LfpT55/5Rt3Rmq4zeFZRLFEJp+35jaswOF8ypTKxFb+8S1clqZaSgnBHShT9YeHj+V8r7Ayl23bA+DA+FF5VhDiuALXKQBQ4wQr4ZF/tbtzPofh3YePbpxbr7c//4JlkgAuHXJuO8UJ5fII7rrlt74e+D33p3jOuAmrjBvlO51PZrPtZZClpKmTa+nz1geXOu4pViUEQyQjZDOtI3ff+ttf8sXM/1FPfajaxAmDGJEcQqSOT9I4iUa4ev/BVz7z4qvfvLtc7S9wcqGUQiYIoKREFIfwPK+p+az7bTSBRXq88EVRFUJYuMR1shgt9XW/vPEH1xzv3/sXBHZrc7PIwKnNdhWxAERe7WbkgZj391fMX/tI4LWPXUjkPRUqxuwsRhulM5TKo2jJzX6SMYpqU+yNdpDeWjmKseDlq1f8/AtxHKbzpRKHjTC+/3D6UXbGWOK4AyMjQ1i94u0vtrUs/IINgBnS2w6gLILIoFw9OKmcODE71xw8vPFPn9/wzXvCeKSlJd8JKe3zhHEIKUMQLJ96OpmmBhFSA5NFEy1V97U0HMetzbIOwyo496FMsXXztoeWnhg88BlGfF3z9dNwWCuUBBzRAUe0Q1ArHNEJz5mBTDDrS8uvvOelKKo0rFPj8+EcaSszQQOO/6zC4bWeAp50O/XMWNRvjP8CGehaMJhwkCa3hHD48Vm9K58bHe6H57pJDTzVnLXz4binYGR98azDmM91DsayuCNWRSP1GKQeQ6yK0KaEcvWQXYYJqRPrU4mVBw+/9qfPv/yNNxmEGUZZVKvSTmN3OAQjcAJE4kMYolpbpb0bvh4/33GSQ68UAIha/4CVRoHh0b7uTdseXt53dNefgeiWZj6cgcZIcReUGoPWFShTgdJlSFUCUXy0q2POnjCqTkj6mob6KHUOhKphXmGtRzOhPOesNj7PTloFBocGcfv6T35TcH+nTocsaAJLQdDaDaNcXngin+8uOkIkw7KjmgbxfT+ZBzz9wtVYFxYEPjwnU6mEJ9xKdBiV8DCq8VHE6gQidaJpLi7RDNfs2f/in27Y9P27KuFQ3rIi1/Gg9JzUKzjP/uDYTR7v/ziOj5HRAz2vbv6Xa/sHDnyWMb6+aYDAHETxAKQeAVC23FumCGPKcJ38sTQb0ejzpUM5p+vM1+f9EMJqhCAIQGRHzygdwvc6n2CMxpVrjzOFRjvQmivHyT+7fu37f9Y/cARaaziOY9MZnMMWBE5vTXydq6sOZsaxRBiXM47IS9cpwHVbEwwl07STxg5jFGuOHNv+6ede/qd3hNFQIZdpbUieTg2fjJvFQHRG8zgaJ7FOfEUhfFSjwfbN2x5afuLEvs8wJm6iptGnnW4bRSEEy4FTDkQBoqhUaNRU05ejpQnPUB/Z7DguoiiCUgrV6hiuWrr+ACPvJWOgU/+KyKapMfHmnA91dix9UIhcKZv1YWBPRGoK+TTP7k27PdL3srlDhWJpoC3wOmdm/V7K+jPtMO/m4CeIsbXH+nd9euP2f73NIPajiECMIVZqaoFKupFrbe6gxkkroLMi3EvhCAel6onuV7f88Oq+ozv+jHOxbiqzNDJ2GIwyIMrBGLdlcOTAirSpuF4ONH7S6jlL40ziR0unn1lQ1A9cALxyw5pf+9+OCPam0z7SmzVKWcPkWOU6bQ9dfdVbN5YrowlFt4RSCo7jJBPUpzv6YDUBIwIymSz2H3rpirHS0Q+WKseM0vHU6RUmru8fOPAHL7zy7VtGx/q6QK4dlJ203J+saYEmLMTZOcBNokrGwZmPajzQtWHTD647fGTHZx3h31CfQ9j4uwKAQhgPoFI9/q7DR7fMcRyvyXOfKw1GJ30OYyxO53kewrCMxVes3yV4/odNEpeYMlHqcOfA7N41P9HKqfierYV3XbcWEUx3VGgrQa1gKanhexnsO/Di/L0Hn31TsXzobs6cQpN8XeAI/4bj/fv+42tbHlw3PHp4hpYOOBNwPdt0YSPcKbTVOYl2TQOIOnnTiBiq1SqM5pB6tH3Ljoeu2b7nib8Lo8G7OHdyE/+GMZEN4+N37D307If3Hnx+gdsgWJPR/3OpsfSE52CWrsC1Puqx44ewcO7aJ4UQWwGlJw5m57ev+41JM55sdEXa4ZkThsKb+45tmSm4XwMsp9sUpoFC+l4mwdAYMURyzC2Xi2/JBO3McYXKBIUWzkQPAfNK5ZGf23PwqT/buvPxZYPDB2ZmM60Jg46EUjaqUSoG52IS9c70+Sv1M5xuvOMIVKtlOK4HUDWzZ9+LPQbVdfnczIhzsIzfWhDC7YrjytJqtfT2HXse+09Hj++YGUWVHBeNTbSNkMfZd/GkETiNq8GrR4jGYi1QKkJH++yBq5a880859/dbUtXxAiSmKuAy0EY4zs5ZM1c98fizX14zZ9bSpA5KnJeRKKlWTBOdWht4bgbFsWM9hw5vw8jIsd/r6lz4K76XGzBGCynDoH/owIy+Y6/OhPHgudkaRbTWBMaQzGxWp8we0ElO/unxd1BTK2APZgyAIZMJEMUxjCbkMh3YtvPxRZVy6Y9yuY6i62QHichUo9Hu0eLR/L4DL/Xk823J57dVnCnrYt23OnvU3fq0KbSUuiPJVFmtEyCXMDh6HHeu/8SDrsi+rCVZ8G/CJU7GdKLBolzQ+91F89a8t1gZ6HZdL+nnm+56LGoQKl1DKLVhgAnQ3pZB/+C+riPHd3QBGgSCNtomqYNWGMVr6QdAJ4MvbWRpYYmTb8DZkb807wdML8exmkspDZYMOAIIrpPHgcMbZisdQWmbKxVcwPV8FAozEMeRbcKF2+C0o0n0eTZWgmpUS5bD3Q6/tGg7wIwBEGNGx4KBlvz8bxPjVTOFW1SbYt/s1tKYwG99Ze01731waLivNqa1MTI5v50j6RxkBtfNIBMUkMu2I5ttQy7bBt/L2c82LklcTxo3q9Y439dUy2WLFgO0tHShtaUbLblOZIJWCB7U5jBbZ940pNnOLbM158w2/xJqkJLW0g5igIHnuTh+4hDWXvueR1y38JQ2WjX3o05VckgGMKycz8395qIFNxwaHjkO33chZYSJAykbJxZMdz5LKUuqZgxqwGBqLrW6tEk1ZKyglElowwGd/JsRB2cC9fay5hmAs7lsUWWKV0YgYnBdD4CluqxGJcydvbK/rbDw2wxs6KTa75S+jlHK89oev371B74UeG1jjBNYLS+lm5xIOi9EYmkFZgpL2Iw7b358cCHHDk8VedGEFrsU50rXr055qTUavtf8mey6n/2nsz60hpSWS8zCPYQg42N4pA/Xrn7nQ77X+bA2J4cG2Gnq73I+1/vt5Utu23Fi4BAcz0W9Y6QxacnOy+aljmvaU2hvlbSESUwMfSfeF4tgpcLVeNvDwpPINW0YYYlWPrm2O1vf1yb8rVC5rlvrTPI8H8PDx7Bw/vWH2lsXf4lOoa1gMxbmlBthjIZg7vZ5c9Z+Z2bX0mNhmDQaoJHs9HyajEYMUyfREepMzzjVffFexqikIVfWyoEYS1F203QNztXap9yttqqEI44j+J4HjRie21a8btUHv+i77T/TWulTLXFT5H3ybWCMqbYXFnx91Yq3vjg0fAzE6qWyxjQKGTsP5qaxBDkp02BUuy/lq24FTMMzUm0fUs02mXrg7C1FY6SZ5oiJMRw+tAtXX/WWl9oKc7+ujY5Oy1WZ6AROdWttYDTtm9G29MuLFlx/bKzUb0+Q0XAda4vTU8Y5JTmjyaH36UWRJz8O4/kMWHInAm7YReVjTaSwPp3fn1jBUF+7M3zvCXlOTKBCMpO+RkMvp22SKJWHMHvWysE5M6//KpGzzRhjTkcZscbE4clvglRKZzMzH7h+9fu+0JLvGVMqgjIaSisIwSEES7SbQmP/3pnb/pML1ni/JN0AdlLn9uJx3k/tP041q+Z01ubsQOk68G0picpw3ZbyzTf86jdzmd4HlNT6dFfvjOFzo3W1s23xl5dececLA4N9CDIZSBmjWq3C8wII4SKKZK2ZYXrycZev09orGn+jSd9CY2pLa8s0ZAwQBAFGiseweP6NO2e0X/kFIvSfieCesWBpo0Fwdi6Yve5/Ll64vq842g/X9UDEUCqVE5IOr8FU6Vpt1RtjothF7qOdxhJPJDJObwvZWIUwMnocC+evOb5k4R1/Q+S9orU2pxMWNfYunfEVx1Lnsr0P3rTmV/66tWX2SKU6htbWtqSSUaORF7RZWuPydWE016m8AM45XNdDGBYR+O3l6656zxdacnO+rpSRZ7qD7PUeC6Vk2Nqy8CtXr3jXdxg5uhoWEQSBBfkSUK3Zffk6f1rLANA0njqtnsCe7NNprVCpFOG5LeW1V7/7Xzvbl3xRaz3yeny3112iYIyGgTk0p/fqv7lmxTt/qmIWxnEFQkzu2D3taV2Xr3Ovrc7ApTfGYGjkCK5YuHbPgrnX/i/GaOdUcxmnTbBSCRfcfXnxFTf9ryWLbtsJ45ZqbHYate6OFCK47GtdTHjZ+MNu2QpHsGjhdcUrF975OULmaSm1OSPHqhEgPWuoUseaSD90xYI1/8+smVcOjhZPJInqCqSMklJmm9vj3EFjl/NJHvuShQsu9KVhamU/BDSMqSMYpcCYDQ5t+ZOttQoCD2Pl4+hon1teu/pD/1dLZu7XpdTybHK+/NabPnqWupbBaC59t21bPt9lBof23jI0fMjJZvPjhKRarSYj3sQ4Op7m/OWXBetsNFHzG+DckqMppeH72STHKlGNRtBW6K2uXf3Bz3d3XPXftdbVs8XEzl6wQCASIBI6G3Ruam/tZcOjB9eMFE+4rhPUKhFTBjybaJ1c7XjZsT8P/lZSYOg4rgW8ZYxYlpDLdoZrr/7A/5ndc93nldZD5hzUrJ0DwWp0ExG15Hs25XIdwbH+XdeNlQZ4Nsgn6R47G9l20sqGasXLwnXuBWhy1GeHVNpiwShKhi6YEL6bU9eu+uX75s+58b8ajYPanJtCyHMoWDXhKudzPZtb811e/+Duq0uVQeE6QcIfb/sAp2rGuCxY584pn4iqpwUCQgiACGFYRD7fUb121bu/Pn/Ojf8vge04l91X51iwrHARUbGQ793U0tLlHTq65epSpV9kMznb2St4rUHisnBNf7SXfo8xS8oLGMh4LNFU7/7Wwnnr/ysRban1FlysgkWEpOacRltys17taOtl/UN7rikWjzvZTB5KS1vDPUVAelmwzo0ZnAzpEIQgVCtDKLT0lG689kNfmTvr+s8B2Gw7l+jiFyzUaoporCU36+XOttl0vH/X9SPFPpEJMjaSNI0YCl0WrHN0pdUJjWbQFu4JVMNhtLZ0V9eu+ZUvze699nOA2V5vh7tEBCup8IExqGQzM16e2b24BGI9A/2HW43R3BjLfieEqAGndrSdHFe6kc4ZTmuEzgc/14U1Zaah2K8+maJx41NH3PKYxskMIZ6sTzqHx/YBCGHnUI+VhtDeOqu6/vpf/3xP58rPG2MO2PImXIqCldaW8aoj2IbO9lmDUmHR8MjhGVpHZMPflBmZJ2S27rihkBOR+ino099IxqwmTM1oKYWwh6+xoyZl4wFSagJWY96L4xCjxaPo7Vk6cttNn/wPbS2L/hZAv5nEpXWJCVaKtMdxMQKinbN6VuzzvZauE4O75hOTjAvHUlJqDSl1rcy53oGTtnfJCcx1J7svdY3WfKyfMbphViNqPZ5EDJw7cBwHURTXhmxVqkUEfr56zcp3vHD18nf+t3x27pe0UqV6Sen0CdZ5Hf6stKx4XPxo6aI7jwVBbuCp57/w3igaodbWLowVK/A9F7FUCSmIZYyzaQUFY+rTMRh7I2utxqECuiZUqfa3fZQmwaHqo5bjWCKODVzXAWccI8V+eG4+XHfdBx/o7lr899DiSa1U1YwjHJ/Gpzg/GotBqTK0CcG5pwX3j+Qzs59bOO/6o4eObryhr2+nl8m2JOReupa8TjtW0rFlqWt1ah/rUtZojUwyphHCqTnhUWQnyXJuGZrTik/H4VAqQrE0gEJ+5ujb7vqDz3e1LfkfykSvaKUizgLU2AynWWOdd8ESIrApIIhRz23bsGDu9VsyQUv7wNDehcqEEMIBo/pwppQ2KVX5xuAcEYxdzD5WvTunvoZ1jZbygabTzxzHciuUK6MwGuaaFfc+deOaD/yXXGbOP4D0EaUrGobwhhcsxuy8Gq0Ru66zs6tj8Svz56wa27X3hTWVcMDxXB+cERivd/qkwyOtYJ0qSW0uYcHSaTTdxO+yLDC2W1mCc4ZcLgdtIhw/cRAtue7KW+74vb+Z03vN5xzhPWSMU7G06iH+TQmWMQSDWHMujnui49kF86/f09U+x9l36LVF5eoIuY5XiwQ550klBZL5eCcDVy9dwaoX1jUKV70jKU3oKyXhOByl8jCqlVjdtu4jP75+9fs+l8vM+Vtieq/SsWTkgy6QYImLZDGN0aaY8Vvun9t786Ndb17yoV37nn7ry6/+YH1LoQVaWRYUz81C6wgw3DZteLw2YEBriUqlDN/3a5yl2tTbmbQyNb6H+oYZjB9ElTaH8nGQh30NNgnRTjuT698bX685ka4bqBPX1amaLC6V0m8SS37XWPPHmC0zUso63ZxszsLzGOJI6mWL7nlx4dy1TxZa5n2ByOw0GtHFMBj+gmsskEoWksMgUkTeCGP6xe6Opa9dsWBNcd/BF9cWiwMsk8nbKelagyUcA47jgsigWCyCiNDS0gqlFCoVm5N0EkyHEmJWxgSMZqiXhUwWrObFrc0bPCdzYaFhhnMdAW8kK+Gc1Xi/4jiu+Yycc3ieWyttSRPHlpyDWV51RshmfRw7dhCMeeruW3/rq/NmX/85YuY+z2nt0zpUBBegGMZo/JvXWBOdV61VxXWC51uyize+7c7PPjNWPvyWnz7x1x853n8IM7p6wRkl7WYj8DwfuVwOSimEYYxSqYp8Pp/Miw4bwFcOJQFtJDg/BfFaQ74tRfzr32dJy4upjay1mkmPw6DqJm18f2UcpxqLQ4g6uVwUxZDSclB4XoBKpQytFQotBWhjUCoVoRGhPMZw71v+w9faWhb+wHUKP9YoF7VW6mLjpLgIBau2udoYlHwv9z2HL33wnW/6L18vR8fe8fKrD9yze99zV3Z2dqBQyCMMo3ETM7LZLKqVyLaIuw4cx6aMZGzxL1e4MIhO2q5uTKN2qp/m8YldakAuDOo0RPUhBBN5LAgcnKM20DP9mTW1wvJSRTEojsEYRy6fA0jhxLGjWLRw/dZli29+uTW/8DuuyD3CBUpaMWmMvij376IVrAb/SwJCBn7Lw9KMPHnL2o+tvv7qd71rx+6f3fnSaw9c19rahiCTQxzJZPycgOMm+UdNUEnVqhDJjBwjgSQfN0lQwMbhRs0dfWMrM5iZ4D/Z5ty6IDYGESkAZztM0re1M7hZLVeqtURLNgttJKKoihPHjmDRFTdtvvUdv/Vwxu35ymh537ZMUKjK2Chj5EW9bxe9YI3XYLriiOB5A/+V1Svee99VS996z6EjG+567KkvvNX3AgjHheAC2tjhjHGsEEVh4qPYgUJRHCcpEXPSQZcTHfVxvhXpps66FaC6xjKmLnDpzx0hAGOglAY0YEjCaA3BGTzPw+DQccRRjNvW/+pDc3rXPMR5/iexOrFdCD80RptLpcPpkhGsBhHTxpiQc3ej4NmtC+fc9f25777x2krUf9Puvc/e+OxL374xlyvA9zJwXR+OwxDHEaSMwTjgC9eaodSEGWoI8+uVFI1J8MYSFJsZ0FNEhQ1OsaEJJG8s8bEs87Tn27mDUkaoVq0/JWWAd73tT/9n4HW/wHn2OaLKXsCTkTTmYuf1egMIVoMOMyYm4ntcN7tPmcr3Vy/75QUrl9579Wj50F0HDm9c/szz31yfCXLI5bOJo62TagBqgmlNHRXVhSvVQCoRsvHCZIypZQ1qwlhj7Uumu5KLOI4wNDQAKSNcvfLtLy6Ye91rhZbZT0J7z3Pu7mVchUZzrY0xF56B8N+cYDXmQIwCjGJM7GDM3+k6me+tWvpLPSuX3LtS6eLqI8c3rX38mb//+TBUcB0fXiYAq7Wi2RdxuAuQHWBpqytMDV+yGknVcnWALV2x7Wx8XO7ScThkHCfjhy0uJWWIsdKY5ZpnHDdd/8Efz+pd9bwrWl9m8DcVy/sPCBEoaFfVO0Qv7euNIFgThcwAiBjxAyScQ6SDH83pucN/3ztvmcFIL1Zm7KpydWBR37Ety1545Ru3V6uWuS4IcrbhI7Z1TmkRoue543AnQCfFh6IGRShl66MMgNJYCCktKs64wcrlb3t6zsxVm3LZrp2c5bcYzXcTiROahkcFdzQM1839tcuCdTFfOgn7S5w5exnpfUbznzoix5Yu+IXMlfPv7SHSPSDVo3W1uxIXe6uV4a4wHMuOlYcLI6MnCqXSSCaOQ6EhGYmYu64be64fAyDXzUSZTGul0NIxVMh3H/e8/LDn5A9z5g3COIeNYQeIxPFyeLDCmNCcO8YQN+eDVfpCX///AJZI8x1p7Qe4AAAAAElFTkSuQmCC);
background-size: 22px 22px;
-moz-background-size: 22px 22px;
background-repeat: no-repeat;
position: absolute;
top: 5px;
right: 5px;
height: 22px;
z-index: 99;
width: 22px;
}
.popup_window {
display: none;
position: relative;
left: 0px;
top: 0px;
padding: 10px;
background-color: #E6E6D6;
font-family: "Lucida Console", "Courier New", Courier, monospace;
text-align: left;
font-size: 8pt;
}
}
/* -- report ------------------------------------------------------------------------ */
#show_detail_line {
float:left;
width:100%;
margin-top: 3ex;
margin-bottom: 1ex;
}
#result_table {
margin: 1em 0;
width: 100%;
overflow: hidden;
background: #FFF;
color: #024457;
border-radius: 10px;
border: 1px solid #167F92;
}
#result_table th {
border: 1px solid #FFFFFF;
background-color: #167F92;
color: #FFF;
padding: 0.5em;
&:first-child {
display: table-cell;
text-align: center;
}
&:nth-child(2) {
display: table-cell;
span {display:none;}
&:after {content:attr(data-th);}
}
@media (min-width: 480px) {
&:nth-child(2) {
span {display: block;}
&:after {display: none;}
}
}
}
#result_table td {
word-wrap: break-word;
max-width: 7em;
padding: 0.3em;
&:first-child {
display: table-cell;
text-align: center;
}
@media (min-width: 400px) {
border: 1px solid #D9E4E6;
}
}
#result_table th, td {
margin: .5em 1em;
@media (min-width: 400px) {
display: table-cell;
padding: 1em;
}
}
#total_row { font-weight: bold; }
.passClass { background-color: #6c6; !important ;}
.failClass { background-color: #c60; !important ;}
.errorClass { background-color: #c00; !important ; }
.passCase { color: #6c6; }
.failCase { color: #c60; font-weight: bold; }
.errorCase { color: #c00; font-weight: bold; }
tr[id^=pt] td { background-color: rgba(73,204,144,.3) !important ; }
tr[id^=ft] td { background-color: rgba(252,161,48,.3) !important; }
tr[id^=et] td { background-color: rgba(249,62,62,.3) !important ; }
.hiddenRow { display: none; }
.testcase { margin-left: 2em; }
/* -- ending ---------------------------------------------------------------------- */
#ending {
}
.detail_button {
width: 130px;
text-decoration: none;
line-height: 38px;
text-align: center;
font-weight: bold;
color: #ffff;
border-radius: 6px;
padding: 5px 10px 5px 10px;
position: relative;
overflow: hidden;
}
.detail_button.abstract{background-color: #4dbee8;}
.detail_button.passed{ background-color: #66cc66;}
.detail_button.failed{ background-color: #cc6600;}
.detail_button.errored{ background-color: #f54f4f;}
.detail_button.skiped{ background-color: gray;}
.detail_button.all{ background-color: blue;}
.piechart{
width: 200px;
float: left;
display: inline;
}
</style>
"""
# ------------------------------------------------------------------------
# Heading
#
HEADING_TMPL = """<div class='heading'>
<h1>%(title)s</h1>
%(parameters)s
<p class='description'>%(description)s</p>
</div>
""" # variables: (title, parameters, description)
HEADING_ATTRIBUTE_TMPL = """<p class='attribute'><strong>%(name)s:</strong> %(value)s</p>
""" # variables: (name, value)
# ------------------------------------------------------------------------
# Report
#
REPORT_TMPL = """
<div id='show_detail_line' style=" float: left; width: 100%%;">
<a class="abstract detail_button" href='javascript:showCase(0,%(channel)s)'>概要[%(Pass_p).2f%%]</a>
<a class="passed detail_button" href='javascript:showCase(1,%(channel)s)'>通过[%(Pass)s]</a>
<a class="failed detail_button" href='javascript:showCase(2,%(channel)s)'>失败[%(fail)s]</a>
<a class="errored detail_button" href='javascript:showCase(3,%(channel)s)'>错误[%(error)s]</a>
<!--<a class="skiped detail_button" href='javascript:showCase(4,%(channel)s)'>跳过[%(skip)s]</a>-->
<a class="all detail_button" href='javascript:showCase(5,%(channel)s)'>所有[%(total)s]</a>
</div>
<table id='result_table'>
<colgroup>
<col align='left' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
<col align='right' />
</colgroup>
<tr id='header_row'>
<th>测试组/测试用例</th>
<th>总数</th>
<th>通过</th>
<th>失败</th>
<th>错误</th>
<th>视图</th>
</tr>
%(test_list)s
<tr id='total_row'>
<th>统计</th>
<th>%(count)s</th>
<th>%(Pass)s</th>
<th>%(fail)s</th>
<th>%(error)s</th>
<th> </th>
</tr>
</table>
<script>
showCase(0,%(channel)s);
drawCircle('circle%(channel)s',%(Pass)s, %(fail)s, %(error)s);
</script>
"""
# variables: (test_list, count, Pass, fail, error)
REPORT_CLASS_TMPL = r"""
<tr class='%(style)s'>
<td>%(desc)s</td>
<td>%(count)s</td>
<td>%(Pass)s</td>
<td>%(fail)s</td>
<td>%(error)s</td>
<td><a href="javascript:showClassDetail('%(cid)s',%(count)s)">详情</a></td>
</tr>
""" # variables: (style, desc, count, Pass, fail, error, cid)
REPORT_TEST_WITH_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td ><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'>
<!--css div popup start-->
<span class='status %(style)s'>
<a class="popup_link" onfocus='this.blur();' href="javascript:showTestDetail('div_%(tid)s')" >
%(status)s</a></span>
<div id='div_%(tid)s' class="popup_window">
<div style='text-align: right; color:red;cursor:pointer'>
<a onfocus='this.blur();' onclick="document.getElementById('div_%(tid)s').style.display = 'none' " >
[x]</a>
</div>
<pre>
%(script)s
</pre>
</div>
<!--css div popup end-->
</td>
</tr>
""" # variables: (tid, Class, style, desc, status,img)
REPORT_TEST_NO_OUTPUT_TMPL = r"""
<tr id='%(tid)s' class='%(Class)s'>
<td><div class='testcase'>%(desc)s</div></td>
<td colspan='5' align='center'><span class='status %(style)s'>%(status)s</span></td>
</tr>
""" # variables: (tid, Class, style, desc, status,img)
REPORT_TEST_OUTPUT_TMPL = r"""
%(id)s: %(output)s
""" # variables: (id, output)
IMG_TMPL = r"""
<a onfocus='this.blur();' href="javacript:void(0);" onclick="show_img(this)">显示截图</a>
<div align="center" class="screenshots" style="display:none">
<a class="close_shots" onclick="hide_img(this)"></a>
%(imgs)s
<div class="imgyuan"></div>
</div>
"""
# ------------------------------------------------------------------------
# ENDING
#
ENDING_TMPL = """<div id='ending'> </div>"""
# -------------------- The end of the Template class -------------------
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if PY3K:
return value
else:
if isinstance(value, str):
return value.decode("utf-8")
else:
return value
TestResult = unittest.TestResult
class _TestResult(TestResult):
# note: _TestResult is a pure representation of results.
# It lacks the output and reporting ability compares to unittest._TextTestResult.
def __init__(self, verbosity=1, retry=0,save_last_try=False):
TestResult.__init__(self)
self.stdout0 = None
self.stderr0 = None
self.success_count = 0
self.failure_count = 0
self.error_count = 0
self.skip_count = 0
self.verbosity = verbosity
# result is a list of result in 4 tuple
# (
# result code (0: success; 1: fail; 2: error;3:skip),
# TestCase object,
# Test output (byte string),
# stack trace,
# )
self.result = []
self.retry = retry
self.trys = 0
self.status = 0
self.save_last_try = save_last_try
self.outputBuffer = StringIO.StringIO()
def startTest(self, test):
# test.imgs = []
test.imgs = getattr(test, "imgs", [])
# TestResult.startTest(self, test)
self.outputBuffer.seek(0)
self.outputBuffer.truncate()
stdout_redirector.fp = self.outputBuffer
stderr_redirector.fp = self.outputBuffer
self.stdout0 = sys.stdout
self.stderr0 = sys.stderr
sys.stdout = stdout_redirector
sys.stderr = stderr_redirector
def complete_output(self):
"""
Disconnect output redirection and return buffer.
Safe to call multiple times.
"""
if self.stdout0:
sys.stdout = self.stdout0
sys.stderr = self.stderr0
self.stdout0 = None
self.stderr0 = None
return self.outputBuffer.getvalue()
def stopTest(self, test):
# Usually one of addSuccess, addError or addFailure would have been called.
# But there are some path in unittest that would bypass this.
# We must disconnect stdout in stopTest(), which is guaranteed to be called.
if self.retry and self.retry>=1:
if self.status == 1:
self.trys += 1
if self.trys <= self.retry:
if self.save_last_try:
t = self.result.pop(-1)
if t[0]==1:
self.failure_count -=1
else:
self.error_count -= 1
test=copy.copy(test)
sys.stderr.write("Retesting... ")
sys.stderr.write(str(test))
sys.stderr.write('..%d \n' % self.trys)
doc = getattr(test,'_testMethodDoc',u"") or u''
if doc.find('_retry')!=-1:
doc = doc[:doc.find('_retry')]
desc ="%s_retry:%d" %(doc, self.trys)
if not PY3K:
if isinstance(desc, str):
desc = desc.decode("utf-8")
test._testMethodDoc = desc
test(self)
else:
self.status = 0
self.trys = 0
self.complete_output()
def addSuccess(self, test):
self.success_count += 1
self.status = 0
TestResult.addSuccess(self, test)
output = self.complete_output()
self.result.append((0, test, output, ''))
if self.verbosity > 1:
sys.stderr.write('P ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('P')
def addFailure(self, test, err):
self.failure_count += 1
self.status = 1
#使用这套框架调试使用
#c1 = getattr(test, "action", "")
#d = getattr(c1, "driver", "")
a= getattr(test, "driver", "")
# if test._testMethodName=='test1':a
# d=getattr(test, "driver", "XXX")
# print('ddddd')
# print(d)
TestResult.addFailure(self, test, err)
_, _exc_str = self.failures[-1]
output = self.complete_output()
#print(test,output,_exc_str)
self.result.append((1, test, output, _exc_str))
if not a :
pass
else:
try:
test.imgs.append(a.get_screenshot_as_base64())
except Exception as e:
print(e)
pass
if self.verbosity > 1:
sys.stderr.write('F ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('F')
def addError(self, test, err):
self.error_count += 1
self.status = 1
TestResult.addError(self, test, err)
_, _exc_str = self.errors[-1]
output = self.complete_output()
self.result.append((2, test, output, _exc_str))
if not getattr(test, "driver",""):
pass
else:
try:
driver = getattr(test, "driver")
test.imgs.append(driver.get_screenshot_as_base64())
except Exception:
pass
if self.verbosity > 1:
sys.stderr.write('E ')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('E')
def addSkip(self, test, reason):
self.skip_count += 1
self.status = 0
TestResult.addSkip(self, test,reason)
output = self.complete_output()
self.result.append((3, test, output, reason))
if self.verbosity > 1:
sys.stderr.write('K')
sys.stderr.write(str(test))
sys.stderr.write('\n')
else:
sys.stderr.write('K')
class HTMLTestRunner(Template_mixin):
def __init__(self, stream=sys.stdout, verbosity=1, title=None, description=None,is_thread=False, retry=0,save_last_try=True):
self.stream = stream
self.retry = retry
self.is_thread=is_thread
self.threads= 5
self.save_last_try=save_last_try
self.verbosity = verbosity
self.run_times=0
if title is None:
self.title = self.DEFAULT_TITLE
else:
self.title = title
if description is None:
self.description = self.DEFAULT_DESCRIPTION
else:
self.description = description
def run(self, test):
"Run the given test case or test suite."
self.startTime = datetime.datetime.now()
result = _TestResult(self.verbosity, self.retry, self.save_last_try)
test(result)
self.stopTime = datetime.datetime.now()
self.generateReport(test, result)
if PY3K:
# for python3
# print('\nTime Elapsed: %s' % (self.stopTime - self.startTime),file=sys.stderr)
output = '\nTime Elapsed: %s' % (self.stopTime - self.startTime)
sys.stderr.write(output)
else:
print >> sys.stderr, '\nTime Elapsed: %s' % (self.stopTime - self.startTime)
return result
def sortResult(self, result_list):
# unittest does not seems to run in any particular order.
# Here at least we want to group them together by class.
rmap = {}
classes = []
for n, t, o, e in result_list:
cls = t.__class__
if not cls in rmap:
rmap[cls] = []
classes.append(cls)
rmap[cls].append((n, t, o, e))
for cls in classes:
rmap[cls].sort(key=cmp_to_key(lambda a,b:1 if a[1].id()>b[1].id() else ( 1 if a[1].id()==b[1].id() else -1)))
r = [(cls, rmap[cls]) for cls in classes]
# name = t.id().split('.')[-1]
r.sort(key=cmp_to_key(lambda a, b: 1 if a[0].__name__ > b[0].__name__ else -1))
return r
def getReportAttributes(self, result):
"""
Return report attributes as a list of (name, value).
Override this to add custom attributes.
"""
startTime = str(self.startTime)[:19]
duration = str(self.stopTime - self.startTime)
status = []
if result.success_count:
status.append(u'<span class="tj passCase">Pass</span>:%s' % result.success_count)
if result.failure_count:
status.append(u'<span class="tj failCase">Failure</span>:%s' % result.failure_count)
if result.error_count:
status.append(u'<span class="tj errorCase">Error</span>:%s' % result.error_count)
if result.skip_count:
status.append(u'<span class="tj errorCase">Skip</span>:%s' % result.skip_count)
total = result.success_count+result.failure_count+result.error_count++result.skip_count
if total>0:
passed = result.success_count*1.000/total*100
else:
passed =0.0
status.append(u'<span class="tj">通过率</span>:%.1f%%' % passed)
if status:
status = u' '.join(status)
else:
status = 'none'
return [
(u'开始时间', startTime),
(u'耗时', duration),
(u'状态', status),
]
def generateReport(self, test, result):
report_attrs = self.getReportAttributes(result)
generator = 'HTMLTestRunner %s' % __version__
stylesheet = self._generate_stylesheet()
heading = self._generate_heading(report_attrs)
report = self._generate_report(result)
ending = self._generate_ending()
output = self.HTML_TMPL % dict(
title=saxutils.escape(self.title),
generator=generator,
stylesheet=stylesheet,
heading=heading,
report=report,
ending=ending,
channel=self.run_times,
)
if PY3K:
self.stream.write(output.encode())
else:
self.stream.write(output.encode('utf8'))
def _generate_stylesheet(self):
return self.STYLESHEET_TMPL
def _generate_heading(self, report_attrs):
a_lines = []
for name, value in report_attrs:
line = self.HEADING_ATTRIBUTE_TMPL % dict(
name=name,
value=value,
)
a_lines.append(line)
heading = self.HEADING_TMPL % dict(
title=saxutils.escape(self.title),
parameters=''.join(a_lines),
description=saxutils.escape(self.description),
)
return heading
def _generate_report(self, result):
rows = []
sortedResult = self.sortResult(result.result)
for cid, (cls, cls_results) in enumerate(sortedResult):
# subtotal for a class
np = nf = ne = ns = 0
for n, t, o, e in cls_results:
if n == 0:
np += 1
elif n == 1:
nf += 1
elif n==2:
ne += 1
else:
ns +=1
# format class description
if cls.__module__ == "__main__":
name = cls.__name__
else:
name = "%s.%s" % (cls.__module__, cls.__name__)
doc = cls.__doc__ and cls.__doc__.split("\n")[0] or ""
desc = doc and '%s: %s' % (name, doc) or name
if not PY3K:
if isinstance(desc, str):
desc = desc.decode("utf-8")
row = self.REPORT_CLASS_TMPL % dict(
style=ne > 0 and 'errorClass' or nf > 0 and 'failClass' or 'passClass',
desc=desc,
count=np + nf + ne,
Pass=np,
fail=nf,
error=ne,
cid='c%s.%s' % (self.run_times,cid + 1),
)
rows.append(row)
for tid, (n, t, o, e) in enumerate(cls_results):
self._generate_report_test(rows, cid, tid, n, t, o, e)
total = result.success_count + result.failure_count + result.error_count+result.skip_count
report = self.REPORT_TMPL % dict(
test_list=u''.join(rows),
count=str(total),
Pass=str(result.success_count),
Pass_p=result.success_count*1.00/total*100 if total else 0.0,
fail=str(result.failure_count),
error=str(result.error_count),
skip=str(result.skip_count),
total=str(total),
channel=str(self.run_times),
)
return report
def _generate_report_test(self, rows, cid, tid, n, t, o, e):
# e.g. 'pt1.1', 'ft1.1', etc
has_output = bool(o or e)
if n==0:
tmp="p"
elif n==1:
tmp="f"
elif n==2:
tmp = "e"
else:
tmp = "s"
tid = tmp + 't%d.%d.%d' % (self.run_times,cid + 1, tid + 1)
name = t.id().split('.')[-1]
if self.verbosity > 1:
doc = getattr(t,'_testMethodDoc',"") or ''
else:
doc = ""
desc = doc and ('%s: %s' % (name, doc)) or name
if not PY3K:
if isinstance(desc, str):
desc = desc.decode("utf-8")
tmpl = has_output and self.REPORT_TEST_WITH_OUTPUT_TMPL or self.REPORT_TEST_NO_OUTPUT_TMPL
# o and e should be byte string because they are collected from stdout and stderr?
if isinstance(o, str):
# uo = unicode(o.encode('string_escape'))
if PY3K:
uo = o
else:
uo = o.decode('utf-8', 'ignore')
else:
uo = o
if isinstance(e, str):
# ue = unicode(e.encode('string_escape'))
if PY3K:
ue = e
elif e.find("Error") != -1 or e.find("Exception") != -1:
es = e.decode('utf-8', 'ignore').split('\n')
try:
if es[-2].find("\\u") != -1 or es[-2].find('"\\u') != -1:
es[-2] = es[-2].decode('unicode_escape')
except Exception:
pass
ue = u"\n".join(es)
else:
ue = e.decode('utf-8', 'ignore')
else:
ue = e
script = self.REPORT_TEST_OUTPUT_TMPL % dict(
id=tid,
output=saxutils.escape(uo + ue),
)
if getattr(t,'imgs',[]):
# 判断截图列表,如果有则追加
tmp = u""
for i, img in enumerate(t.imgs):
if i==0:
tmp+=""" <img src="data:image/jpg;base64,%s" style="display: block;" class="img"/>\n""" % img
else:
tmp+=""" <img src="data:image/jpg;base64,%s" style="display: none;" class="img"/>\n""" % img
imgs = self.IMG_TMPL % dict(imgs=tmp)
else:
imgs = u"""无截图"""
row = tmpl % dict(
tid=tid,
Class=(n == 0 and 'hiddenRow' or 'none'),
style=n == 2 and 'errorCase' or (n == 1 and 'failCase' or 'passCase'),
desc=desc,
script=script,
status=self.STATUS[n],
img=imgs,
)
rows.append(row)
if not has_output:
return
def _generate_ending(self):
return self.ENDING_TMPL
##############################################################################
# Facilities for running tests from the command line
##############################################################################
# Note: Reuse unittest.TestProgram to launch test. In the future we may
# build our own launcher to support more specific command line
# parameters like test title, CSS, etc.
class TestProgram(unittest.TestProgram):
"""
A variation of the unittest.TestProgram. Please refer to the base
class for command line parameters.
"""
def runTests(self):
# Pick HTMLTestRunner as the default test runner.
# base class's testRunner parameter is not useful because it means
# we have to instantiate HTMLTestRunner before we know self.verbosity.
if self.testRunner is None:
self.testRunner = HTMLTestRunner(verbosity=self.verbosity)
unittest.TestProgram.runTests(self)
main = TestProgram
##############################################################################
# Executing this module from the command line
##############################################################################
if __name__ == "__main__":
main(module=None)
| [
"[email protected]"
] | |
c89449e9d8e482494c12bfe7bc8ea37ebb1327d9 | cd1d5b7fc9e01f093d6c652876cab24aa8fe7ce6 | /nodes/pub_and_sub_node.py | 54a5df4c1707e7b49194d6ae34b6a4bac1cbb7e1 | [] | no_license | birlrobotics/gps_dnn_policy_training_and_testing_pkg | 1dd2c4b241af4e8d432d61f4fcfa59c1a7318275 | cba2b03e9cc096cb2b7133074640bb503a3e326c | refs/heads/master | 2020-07-04T09:15:46.366874 | 2019-08-14T21:34:58 | 2019-08-14T21:34:58 | 202,237,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | #!/usr/bin/env python
import rospy
from gps_dnn_policy_training_and_testing_pkg.CONSTANT import training_request_topic, training_response_topic
from gps_dnn_policy_training_and_testing_pkg.dnn_policy import DnnPolicy
from std_msgs.msg import String
import pdb
import pickle
import tempfile
def cb(msg):
rospy.loginfo('received %s'%msg)
with open(msg.data, 'rb') as f:
req = pickle.load(f)
obs = req['obs']
tgt_mu = req['tgt_mu']
tgt_prc = req['tgt_prc']
tgt_wt = req['tgt_wt']
dU = tgt_mu.shape[1]
pol = DnnPolicy(dU)
f = tempfile.NamedTemporaryFile(delete=False, suffix='.pkl')
pickle.dump(pol, f)
f.close()
rospy.sleep(1)
pub.publish(String(data=f.name))
rospy.loginfo('sent %s'%f.name)
pass
if __name__ == '__main__':
rospy.init_node('pub_and_sub_node')
rospy.Subscriber(training_request_topic, String, cb)
pub = rospy.Publisher(training_response_topic, String)
rospy.spin()
| [
"[email protected]"
] | |
96a16b9351a209200123b2d892c8e48ed55f7fe9 | 78d7d7aeb78a8cea6d0e10b89fc4aa6c46c95227 | /2569.py | 85dab1e43ac70617d702eec9863e9e8dff8536ec | [] | no_license | GenryEden/kpolyakovName | 97db13ef93061a8c2afc6cc5acd91337f79063f1 | c5d7f631ae7ec8770e56170574b82ea2b7d8a4d9 | refs/heads/master | 2023-05-23T21:22:51.983756 | 2021-06-21T08:56:49 | 2021-06-21T08:56:49 | 350,466,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py | def getDels(x):
for i in range(1, x+1):
if x % i == 0:
yield i
for x in range(180131, 180179):
dels = list(getDels(x))
if len(dels) == 6:
print(dels[-2], dels[-1]) | [
"[email protected]"
] | |
5e2d9e53c5300c3f446558b3ca275cbf8bdae43f | 5cb3b2d2fe6cf136296ed206f021061774edf305 | /apps/hixny/apps.py | eb4ffc4ce600680adf7666ed49ec759ef350057a | [
"Apache-2.0"
] | permissive | whytheplatypus/sharemyhealth | 002e6a4b3633d8f5aaedbd9add0b9109723d7e5d | 79ac694686ebd7a9a121741e473afbd35f25cea5 | refs/heads/master | 2020-03-30T12:59:42.841594 | 2019-05-01T19:01:30 | 2019-05-01T19:01:30 | 151,251,593 | 0 | 0 | Apache-2.0 | 2018-10-02T12:35:16 | 2018-10-02T12:35:15 | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class HixnyConfig(AppConfig):
name = 'hixny'
| [
"[email protected]"
] | |
1322c3248b9ce3d2ab9caded7adaf73a004cd69c | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/exist.py | 49bd057ec8f18667a6b8b8648441b05b46ed0af4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 3,005 | py | ii = [('BentJDO2.py', 64), ('EmerRN.py', 15), ('CookGHP3.py', 57), ('LyelCPG2.py', 68), ('MarrFDI.py', 18), ('RogePAV2.py', 137), ('CoolWHM2.py', 52), ('KembFFF.py', 1), ('GodwWSL2.py', 95), ('ChanWS.py', 20), ('RogePAV.py', 143), ('SadlMLP.py', 284), ('FerrSDO3.py', 10), ('WilbRLW.py', 16), ('WilbRLW4.py', 7), ('RennJIT.py', 47), ('ProuWCM.py', 382), ('AubePRP2.py', 92), ('CookGHP.py', 39), ('ShawHDE.py', 1), ('MartHSI2.py', 33), ('LeakWTI2.py', 86), ('KembFJ1.py', 24), ('WilkJMC3.py', 17), ('WilbRLW5.py', 13), ('LeakWTI3.py', 60), ('PettTHE.py', 15), ('MarrFDI3.py', 26), ('TennAP.py', 1), ('PeckJNG.py', 62), ('BailJD2.py', 2), ('AubePRP.py', 54), ('ChalTPW2.py', 52), ('GellWPT.py', 36), ('AdamWEP.py', 36), ('FitzRNS3.py', 33), ('WilbRLW2.py', 25), ('ClarGE2.py', 64), ('GellWPT2.py', 49), ('WilkJMC2.py', 26), ('CarlTFR.py', 87), ('SeniNSP.py', 81), ('LyttELD.py', 15), ('CoopJBT2.py', 14), ('TalfTAC.py', 3), ('GrimSLE.py', 27), ('RoscTTI3.py', 11), ('AinsWRR3.py', 14), ('CookGHP2.py', 34), ('KiddJAE.py', 139), ('AdamHMM.py', 6), ('BailJD1.py', 8), ('RoscTTI2.py', 22), ('CoolWHM.py', 88), ('MarrFDI2.py', 28), ('CrokTPS.py', 26), ('ClarGE.py', 85), ('LandWPA.py', 10), ('BuckWGM.py', 386), ('IrviWVD.py', 17), ('LyelCPG.py', 143), ('GilmCRS.py', 11), ('DaltJMA.py', 32), ('WestJIT2.py', 126), ('DibdTRL2.py', 25), ('AinsWRR.py', 15), ('CrocDNL.py', 3), ('MedwTAI.py', 14), ('LandWPA2.py', 15), ('WadeJEB.py', 92), ('FerrSDO2.py', 10), ('TalfTIT.py', 1), ('NewmJLP.py', 71), ('GodwWLN.py', 29), ('CoopJBT.py', 11), ('KirbWPW2.py', 33), ('SoutRD2.py', 15), ('BackGNE.py', 10), ('LeakWTI4.py', 77), ('LeakWTI.py', 32), ('MedwTAI2.py', 32), ('BachARE.py', 76), ('SoutRD.py', 13), ('DickCSG.py', 2), ('BuckWGM2.py', 29), ('WheeJPT.py', 167), ('MereHHB3.py', 165), ('HowiWRL2.py', 47), ('BailJD3.py', 4), ('MereHHB.py', 149), ('WilkJMC.py', 44), ('HogaGMM.py', 17), ('MartHRW.py', 18), ('MackCNH.py', 25), ('WestJIT.py', 88), ('BabbCEM.py', 73), ('FitzRNS4.py', 120), ('CoolWHM3.py', 57), ('DequTKM.py', 5), ('FitzRNS.py', 48), ('BentJRP.py', 55), ('EdgeMHT.py', 4), ('BowrJMM.py', 3), ('LyttELD3.py', 5), ('FerrSDO.py', 9), ('RoscTTI.py', 17), ('ThomGLG.py', 39), ('StorJCC.py', 163), ('KembFJ2.py', 24), ('LewiMJW.py', 17), ('BabbCRD.py', 36), ('MackCNH2.py', 36), ('BellCHM.py', 60), ('JacoWHI2.py', 66), ('SomeMMH.py', 94), ('HaliTBC.py', 69), ('WilbRLW3.py', 23), ('AinsWRR2.py', 11), ('MereHHB2.py', 112), ('BrewDTO.py', 34), ('JacoWHI.py', 50), ('ClarGE3.py', 71), ('RogeSIP.py', 6), ('MartHRW2.py', 23), ('DibdTRL.py', 38), ('FitzRNS2.py', 72), ('HogaGMM2.py', 18), ('MartHSI.py', 43), ('EvarJSP.py', 149), ('DwigTHH.py', 88), ('NortSTC.py', 2), ('SadlMLP2.py', 340), ('BowrJMM2.py', 12), ('LyelCPG3.py', 113), ('BowrJMM3.py', 10), ('BeckWRE.py', 7), ('TaylIF.py', 60), ('WordWYR.py', 8), ('DibdTBR.py', 5), ('ChalTPW.py', 52), ('ThomWEC.py', 22), ('KeigTSS.py', 28), ('KirbWPW.py', 67), ('WaylFEP.py', 88), ('BentJDO.py', 76), ('ClarGE4.py', 48), ('AdamJOA.py', 11), ('HowiWRL.py', 52)] | [
"[email protected]"
] | |
42b78dceab23e4ffb753bc7e07b1b91e276e9a59 | 8195e6ea99ee441ba2c23dd9dba7ceecfece37b7 | /rev2/cifar10/generate_gs_pgd.py | 076cd5ccb813b81cff16bd5dbf4928328bc94526 | [] | no_license | msglbqbqb/adv2 | f2693576dd15c73c1b0322a0bf75972a75e97f70 | e3472df42197fe6dbe035412d43a9205ede880c2 | refs/heads/main | 2023-06-03T05:31:02.820935 | 2021-06-17T16:06:59 | 2021-06-17T16:06:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | #!/usr/bin/env python
import argparse
import numpy as np
import torch
import torch.nn as nn
from rev2.cifar10.model_utils import resnet50, CIFAR10_RESNET50_CKPT_PATH
from rev2.gs.generate_gs import generate_gs
from rev2.cifar10.data_utils import cifar10_normalize
from rev2.cifar10.generate_gs_benign import cifar10_resize_postfn
def load_model(config):
model = resnet50()
nn.DataParallel(model).load_state_dict(
torch.load(CIFAR10_RESNET50_CKPT_PATH, lambda storage, location: storage)['net']
)
model.to(config.device)
model.train(False)
return model, cifar10_normalize
def main(config):
model_tup = load_model(config)
dobj = np.load(config.data_path)
adv_dobj = np.load(config.adv_data_path)
img_x, img_yt = adv_dobj['pgd_step_1500_adv_x'], dobj['img_yt']
pgd_gs = generate_gs(model_tup, img_x, img_yt, cifar10_resize_postfn, False, batch_size=50)
save_dobj = {'pgd_x': img_x, 'img_yt': img_yt, 'pgd_gs': pgd_gs}
np.savez(config.save_path, **save_dobj)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('data_path')
parser.add_argument('adv_data_path')
parser.add_argument('save_path')
parser.add_argument('-d', '--device', dest='device', choices=['cpu', 'cuda'])
parser.add_argument('-b', '--batch-size', dest='batch_size', type=int, default=50)
config = parser.parse_args()
if config.device is None:
if torch.cuda.is_available():
config.device = 'cuda'
else:
config.device = 'cpu'
print('configuration:', config)
main(config)
| [
"[email protected]"
] | |
9d995f3d206d6831f1d5324f3cf2a42613c66e8c | 8021f835426c5db8ed9b1763a2b71cb8f94a3357 | /scripts/forage_tracer.py | bcec97892278a7afaa1faa49bde095f421852704 | [
"BSD-3-Clause"
] | permissive | natcap/rangeland_production | 3859bcf3042bda0d7a64df426aceaaa0a5a8dfe1 | 89acd25cb90c2bd42f55973d7d22b294c80dfc1a | refs/heads/develop | 2022-12-23T17:30:53.660595 | 2021-04-11T01:28:32 | 2021-04-11T01:28:32 | 223,495,475 | 7 | 8 | NOASSERTION | 2022-12-09T04:35:37 | 2019-11-22T22:16:47 | Python | UTF-8 | Python | false | false | 2,492 | py | """Tracer code for Forage model development."""
import os
import natcap.invest.forage
import logging
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger('forage_tracer')
POSSIBLE_DROPBOX_LOCATIONS = [
r'D:\Dropbox',
r'C:\Users\Rich\Dropbox',
r'C:\Users\rpsharp\Dropbox',
r'E:\Dropbox']
LOGGER.info("checking dropbox locations")
for dropbox_path in POSSIBLE_DROPBOX_LOCATIONS:
print dropbox_path
if os.path.exists(dropbox_path):
BASE_DROPBOX_DIR = dropbox_path
break
LOGGER.info("found %s", BASE_DROPBOX_DIR)
def main():
"""Entry point."""
args = {
'workspace_dir': 'forage_tracer_workspace',
'starting_year': '1998',
'starting_month': '5',
'n_months': '29',
'aoi_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'soums_monitoring_area_dissolve.shp'),
'bulk_density_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'bldfie_sl3.tif'),
'clay_proportion_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'clyppt_sl3.tif'),
'silt_proportion_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'sltppt_sl3.tif'),
'sand_proportion_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'sndppt_sl3.tif'),
'monthly_precip_path_pattern': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'chirps-v2.0.<year>.<month>.tif'),
'monthly_temperature_path_pattern': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'wc2.0_30s_tmax_<month>.tif'),
'veg_spatial_composition_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs', 'veg.tif'),
'animal_inputs_path': os.path.join(
BASE_DROPBOX_DIR, 'forage_model_development_data',
'sample_dev_inputs',
'sheep_units_density_2016_monitoring_area.shp')
}
LOGGER.info('launching forage model')
natcap.invest.forage.execute(args)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f2ebc53681e61b9b3362d7242a13a47f51b55401 | 456433ac78b70cb8ae076ae166a85e349f181d7f | /systems/KURSSKLAD/KURSTERM/REFILLSLOT/templates/index.py | 3ec1fb8a10419dd5197a336406e95d43e64d2b25 | [] | no_license | shybkoi/WMS-Demo | 854c1679b121c68323445b60f3992959f922be8d | 2525559c4f56654acfbc21b41b3f5e40387b89e0 | refs/heads/master | 2021-01-23T01:51:20.074825 | 2017-03-23T11:51:18 | 2017-03-23T11:51:18 | 85,937,726 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 8,762 | py | #!/usr/bin/env python
# -*- coding: cp1251 -*-
##################################################
## DEPENDENCIES
import sys
import os
import os.path
from os.path import getmtime, exists
import time
import types
import __builtin__
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import DummyTransaction
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from systems.KURSSKLAD.KURSTERM.templates.main import main
##################################################
## MODULE CONSTANTS
try:
True, False
except NameError:
True, False = (1==1), (1==0)
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.0rc8'
__CHEETAH_versionTuple__ = (2, 0, 0, 'candidate', 8)
__CHEETAH_genTime__ = 1482336169.927
__CHEETAH_genTimestamp__ = 'Wed Dec 21 18:02:49 2016'
__CHEETAH_src__ = 'systems\\KURSSKLAD\\KURSTERM\\REFILLSLOT\\templates\\index.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Dec 21 09:10:13 2016'
__CHEETAH_docstring__ = 'Autogenerated by CHEETAH: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class index(main):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
main.__init__(self, *args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def mainData(self, **KWS):
## CHEETAH: generated from #def mainData at line 4, col 1.
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(''' <form action=rfsMain>
\xd8\xca: <input type=text name=barcode id=\':scan\' title="''')
if False:
_('Row')
_v = VFFSL(SL,"_",False)('Row') # "$_('Row')" on line 6, col 61
if _v is not None: write(_filter(_v, rawExpr="$_('Row')")) # from line 6, col 61.
write('''">
</form>
''')
if VFFSL(SL,"varExists",False)('$datalist') and VFFSL(SL,"datalist",True): # generated from line 9, col 5
write(''' <br>
<table>
<tr>
<th>''')
if False:
_('Ряд')
_v = VFFSL(SL,"_",False)('Ряд') # "$_('\xd0\xff\xe4')" on line 13, col 17
if _v is not None: write(_filter(_v, rawExpr="$_('\xd0\xff\xe4')")) # from line 13, col 17.
write('''</th>
<th>''')
if False:
_('Всего')
_v = VFFSL(SL,"_",False)('Всего') # "$_('\xc2\xf1\xe5\xe3\xee')" on line 14, col 17
if _v is not None: write(_filter(_v, rawExpr="$_('\xc2\xf1\xe5\xe3\xee')")) # from line 14, col 17.
write('''</th>
<th>''')
if False:
_('Важно')
_v = VFFSL(SL,"_",False)('Важно') # "$_('\xc2\xe0\xe6\xed\xee')" on line 15, col 17
if _v is not None: write(_filter(_v, rawExpr="$_('\xc2\xe0\xe6\xed\xee')")) # from line 15, col 17.
write('''</th>
</tr>
''')
for item in VFFSL(SL,"datalist",True): # generated from line 17, col 9
write(''' <tr>
<td>
''')
if VFFSL(SL,"item.cnttask",True)!=0: # generated from line 20, col 15
write(''' <a href="rfsRow?id=''')
_v = VFFSL(SL,"item.rowid",True) # '$item.rowid' on line 21, col 36
if _v is not None: write(_filter(_v, rawExpr='$item.rowid')) # from line 21, col 36.
write('''">''')
_v = VFFSL(SL,"item.rowname",True) # '$item.rowname' on line 21, col 49
if _v is not None: write(_filter(_v, rawExpr='$item.rowname')) # from line 21, col 49.
write('''</a>
''')
else : # generated from line 22, col 15
write(''' ''')
_v = VFFSL(SL,"item.rowname",True) # '$item.rowname' on line 23, col 17
if _v is not None: write(_filter(_v, rawExpr='$item.rowname')) # from line 23, col 17.
write('''
''')
write(''' </td>
<td>''')
_v = VFFSL(SL,"item.cnttask",True) # '$item.cnttask' on line 26, col 17
if _v is not None: write(_filter(_v, rawExpr='$item.cnttask')) # from line 26, col 17.
write('''</td>
''')
if VFFSL(SL,"item.cntactual",True) > 0: # generated from line 27, col 13
write(''' <td class="red">''')
_v = VFFSL(SL,"item.cntactual",True) # '$item.cntactual' on line 28, col 33
if _v is not None: write(_filter(_v, rawExpr='$item.cntactual')) # from line 28, col 33.
write('''</td>
''')
else: # generated from line 29, col 13
write(''' <td>''')
_v = VFFSL(SL,"item.cntactual",True) # '$item.cntactual' on line 30, col 21
if _v is not None: write(_filter(_v, rawExpr='$item.cntactual')) # from line 30, col 21.
write('''</td>
''')
write(''' </tr>
''')
write(''' </table>
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
def writeBody(self, **KWS):
## CHEETAH: main method generated for this template
trans = KWS.get("trans")
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write('''
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_index= 'writeBody'
## END CLASS DEFINITION
if not hasattr(index, '_initCheetahAttributes'):
templateAPIClass = getattr(index, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(index)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=index()).run()
| [
"[email protected]"
] | |
dada884103b980d1aff01dc194cce6f238446e3d | a9f97f77d30e35c6627f353e49fe2683bf7d51ed | /jiayuan/rnn_ner/rnn_ner/model.py | 0d4a500c38ebfd76e76425dbed26e9babb1efab4 | [
"MIT"
] | permissive | breezedeus/char-rnn-tensorflow | 4c3c5e27e21b4bfb077a399f6707c3ec256d2eac | 0ef7bf9e5b108ae161011f9db3705993e1b0103e | refs/heads/master | 2021-01-17T08:32:44.452317 | 2016-06-18T12:35:56 | 2016-06-18T12:35:56 | 52,412,436 | 0 | 0 | null | 2016-02-24T03:47:42 | 2016-02-24T03:47:42 | null | UTF-8 | Python | false | false | 4,936 | py | # coding=utf8
import tensorflow as tf
from tensorflow.models.rnn import rnn_cell
from tensorflow.models.rnn import seq2seq
import numpy as np
class Model():
def __init__(self, args, infer=False):
self.args = args
if infer:
args.batch_size = 1
args.seq_length = 1
if args.model == 'rnn':
cell_fn = rnn_cell.BasicRNNCell
elif args.model == 'gru':
cell_fn = rnn_cell.GRUCell
elif args.model == 'lstm':
cell_fn = rnn_cell.BasicLSTMCell
else:
raise Exception("model type not supported: {}".format(args.model))
cell = cell_fn(args.rnn_size)
self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)
self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
self.initial_state = cell.zero_state(args.batch_size, tf.float32)
with tf.variable_scope('rnnlm'):
softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.y_vocab_size])
softmax_b = tf.get_variable("softmax_b", [args.y_vocab_size])
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
inputs = tf.split(1, args.seq_length, tf.nn.embedding_lookup(embedding, self.input_data))
# len(inputs)==args.seq_length, shape(inputs[0])==(args.batch_size, args.rnn_size)
inputs = [tf.squeeze(input_, [1]) for input_ in inputs]
def loop(prev, _):
prev = tf.nn.xw_plus_b(prev, softmax_w, softmax_b)
prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
return tf.nn.embedding_lookup(embedding, prev_symbol)
# len(outputs)==args.seq_length, shape(outputs[0])==(args.batch_size, args.rnn_size)
outputs, states = seq2seq.rnn_decoder(inputs, self.initial_state, cell, loop_function=loop if infer else None, scope='rnnlm')
output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
# shape(logits) = (batch_size*seq_length, vocab_size)
self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
self.probs = tf.nn.softmax(self.logits)
loss = seq2seq.sequence_loss_by_example([self.logits],
[tf.reshape(self.targets, [-1])],
[tf.ones([args.batch_size * args.seq_length])],
args.vocab_size)
self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
self.final_state = states
self.lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), args.grad_clip)
optimizer = tf.train.AdamOptimizer(self.lr)
self.train_op = optimizer.apply_gradients(zip(grads, tvars))
def sample(self, sess, chars, vocab, num=200, prime='我 们'):
state = self.cell.zero_state(1, tf.float32).eval()
#prime = prime.decode('utf-8')
print('prime: ' + prime)
prime = prime.split(' ')
for char in prime[:-1]:
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[state] = sess.run([self.final_state], feed)
def weighted_pick(weights):
t = np.cumsum(weights)
s = np.sum(weights)
return(int(np.searchsorted(t, np.random.rand(1)*s)))
ret = ''.join(prime)
char = prime[-1]
for n in xrange(num):
x = np.zeros((1, 1))
x[0, 0] = vocab[char]
feed = {self.input_data: x, self.initial_state:state}
[probs, state] = sess.run([self.probs, self.final_state], feed)
p = probs[0]
# sample = int(np.random.choice(len(p), p=p))
sample = weighted_pick(p)
pred = chars[sample]
ret += pred
char = pred
return ret
def predict(self, x, max_length, sess, x_vocab, idx2classid):
state = self.cell.zero_state(1, tf.float32).eval()
x_list = x.split()
def pad_line(x_list, pad):
if len(x_list) >= max_length:
x_list = x_list[:max_length]
else:
x_list += [pad] * (max_length-len(x_list))
return x_list
x_list = pad_line(x_list=x_list, pad='<PAD>')
x = np.matrix([map(lambda x: x_vocab.get(x, 0), x_list)])
print(x[0])
feed = {self.input_data: x, self.initial_state: state}
[probs, _] = sess.run([self.probs, self.final_state], feed)
print(probs)
output = np.argmax(probs, axis=1)
idx2classid = np.array(idx2classid)
output = idx2classid[output]
#print(output)
ret = ' '.join(output)
return ret
| [
"[email protected]"
] | |
75adc57efa8196a7a552998412254bf7156fa4ad | fe6f6d11dde2a3205ae9758c7d4eb1f824b84102 | /venv/lib/python2.7/site-packages/PIL/ImageCms.py | 20ba6a11f1b0b627beb58d25502f24e8648ba846 | [
"MIT"
] | permissive | mutaihillary/mycalculator | ebf12a5ac90cb97c268b05606c675d64e7ccf8a6 | 55685dd7c968861f18ae0701129f5af2bc682d67 | refs/heads/master | 2023-01-10T14:56:11.780045 | 2016-09-20T12:30:21 | 2016-09-20T12:30:21 | 68,580,251 | 0 | 0 | MIT | 2022-12-26T20:15:21 | 2016-09-19T07:27:48 | Python | UTF-8 | Python | false | false | 34,964 | py | #
# The Python Imaging Library.
# $Id$
#
# optional color managment support, based on Kevin Cazabon's PyCMS
# library.
#
# History:
# 2009-03-08 fl Added to PIL.
#
# Copyright (C) 2002-2003 Kevin Cazabon
# Copyright (c) 2009 by Fredrik Lundh
#
# See the README file for information on usage and redistribution. See
# below for the original description.
#
from __future__ import print_function
DESCRIPTION = """
pyCMS
a Python / PIL interface to the littleCMS ICC Color Management System
Copyright (C) 2002-2003 Kevin Cazabon
[email protected]
http://www.cazabon.com
pyCMS home page: http://www.cazabon.com/pyCMS
littleCMS home page: http://www.littlecms.com
(littleCMS is Copyright (C) 1998-2001 Marti Maria)
Originally released under LGPL. Graciously donated to PIL in
March 2009, for distribution under the standard PIL license
The pyCMS.py module provides a "clean" interface between Python/PIL and
pyCMSdll, taking care of some of the more complex handling of the direct
pyCMSdll functions, as well as error-checking and making sure that all
relevant data is kept together.
While it is possible to call pyCMSdll functions directly, it's not highly
recommended.
Version History:
1.0.0 pil Oct 2013 Port to LCMS 2.
0.1.0 pil mod March 10, 2009
Renamed display profile to proof profile. The proof
profile is the profile of the device that is being
simulated, not the profile of the device which is
actually used to display/print the final simulation
(that'd be the output profile) - also see LCMSAPI.txt
input colorspace -> using 'renderingIntent' -> proof
colorspace -> using 'proofRenderingIntent' -> output
colorspace
Added LCMS FLAGS support.
Added FLAGS["SOFTPROOFING"] as default flag for
buildProofTransform (otherwise the proof profile/intent
would be ignored).
0.1.0 pil March 2009 - added to PIL, as PIL.ImageCms
0.0.2 alpha Jan 6, 2002
Added try/except statements arount type() checks of
potential CObjects... Python won't let you use type()
on them, and raises a TypeError (stupid, if you ask me!)
Added buildProofTransformFromOpenProfiles() function.
Additional fixes in DLL, see DLL code for details.
0.0.1 alpha first public release, Dec. 26, 2002
Known to-do list with current version (of Python interface, not pyCMSdll):
none
"""
VERSION = "1.0.0 pil"
# --------------------------------------------------------------------.
from PIL import Image
from PIL import _imagingcms
from PIL._util import isStringType
core = _imagingcms
#
# intent/direction values
INTENT_PERCEPTUAL = 0
INTENT_RELATIVE_COLORIMETRIC = 1
INTENT_SATURATION = 2
INTENT_ABSOLUTE_COLORIMETRIC = 3
DIRECTION_INPUT = 0
DIRECTION_OUTPUT = 1
DIRECTION_PROOF = 2
#
# flags
FLAGS = {
"MATRIXINPUT": 1,
"MATRIXOUTPUT": 2,
"MATRIXONLY": (1|2),
"NOWHITEONWHITEFIXUP": 4, # Don't hot fix scum dot
"NOPRELINEARIZATION": 16, # Don't create prelinearization tables on precalculated transforms (internal use)
"GUESSDEVICECLASS": 32, # Guess device class (for transform2devicelink)
"NOTCACHE": 64, # Inhibit 1-pixel cache
"NOTPRECALC": 256,
"NULLTRANSFORM": 512, # Don't transform anyway
"HIGHRESPRECALC": 1024, # Use more memory to give better accurancy
"LOWRESPRECALC": 2048, # Use less memory to minimize resouces
"WHITEBLACKCOMPENSATION": 8192,
"BLACKPOINTCOMPENSATION": 8192,
"GAMUTCHECK": 4096, # Out of Gamut alarm
"SOFTPROOFING": 16384, # Do softproofing
"PRESERVEBLACK": 32768, # Black preservation
"NODEFAULTRESOURCEDEF": 16777216, # CRD special
"GRIDPOINTS": lambda n: ((n) & 0xFF) << 16 # Gridpoints
}
_MAX_FLAG = 0
for flag in FLAGS.values():
if isinstance(flag, int):
_MAX_FLAG = _MAX_FLAG | flag
# --------------------------------------------------------------------.
# Experimental PIL-level API
# --------------------------------------------------------------------.
##
# Profile.
class ImageCmsProfile:
def __init__(self, profile):
# accepts a string (filename), a file-like object, or a low-level
# profile object
if isStringType(profile):
self._set(core.profile_open(profile), profile)
elif hasattr(profile, "read"):
self._set(core.profile_frombytes(profile.read()))
else:
self._set(profile) # assume it's already a profile
def _set(self, profile, filename=None):
self.profile = profile
self.filename = filename
if profile:
self.product_name = None #profile.product_name
self.product_info = None #profile.product_info
else:
self.product_name = None
self.product_info = None
##
# Transform. This can be used with the procedural API, or with the
# standard {@link Image.point} method.
class ImageCmsTransform(Image.ImagePointHandler):
def __init__(self, input, output, input_mode, output_mode,
intent=INTENT_PERCEPTUAL,
proof=None, proof_intent=INTENT_ABSOLUTE_COLORIMETRIC, flags=0):
if proof is None:
self.transform = core.buildTransform(
input.profile, output.profile,
input_mode, output_mode,
intent,
flags
)
else:
self.transform = core.buildProofTransform(
input.profile, output.profile, proof.profile,
input_mode, output_mode,
intent, proof_intent,
flags
)
# Note: inputMode and outputMode are for pyCMS compatibility only
self.input_mode = self.inputMode = input_mode
self.output_mode = self.outputMode = output_mode
def point(self, im):
return self.apply(im)
def apply(self, im, imOut=None):
im.load()
if imOut is None:
imOut = Image.new(self.output_mode, im.size, None)
result = self.transform.apply(im.im.id, imOut.im.id)
return imOut
def apply_in_place(self, im):
im.load()
if im.mode != self.output_mode:
raise ValueError("mode mismatch") # wrong output mode
result = self.transform.apply(im.im.id, im.im.id)
return im
##
# (experimental) Fetches the profile for the current display device.
# @return None if the profile is not known.
def get_display_profile(handle=None):
import sys
if sys.platform == "win32":
from PIL import ImageWin
if isinstance(handle, ImageWin.HDC):
profile = core.get_display_profile_win32(handle, 1)
else:
profile = core.get_display_profile_win32(handle or 0)
else:
try:
get = _imagingcms.get_display_profile
except AttributeError:
return None
else:
profile = get()
return ImageCmsProfile(profile)
# --------------------------------------------------------------------.
# pyCMS compatible layer
# --------------------------------------------------------------------.
##
# (pyCMS) Exception class. This is used for all errors in the pyCMS API.
class PyCMSError(Exception):
pass
##
# (pyCMS) Applies an ICC transformation to a given image, mapping from
# inputProfile to outputProfile.
#
# If the input or output profiles specified are not valid filenames, a
# PyCMSError will be raised. If inPlace == TRUE and outputMode != im.mode,
# a PyCMSError will be raised. If an error occurs during application of
# the profiles, a PyCMSError will be raised. If outputMode is not a mode
# supported by the outputProfile (or by pyCMS), a PyCMSError will be
# raised.
#
# This function applies an ICC transformation to im from inputProfile's
# color space to outputProfile's color space using the specified rendering
# intent to decide how to handle out-of-gamut colors.
#
# OutputMode can be used to specify that a color mode conversion is to
# be done using these profiles, but the specified profiles must be able
# to handle that mode. I.e., if converting im from RGB to CMYK using
# profiles, the input profile must handle RGB data, and the output
# profile must handle CMYK data.
#
# @param im An open PIL image object (i.e. Image.new(...) or Image.open(...), etc.)
# @param inputProfile String, as a valid filename path to the ICC input profile
# you wish to use for this image, or a profile object
# @param outputProfile String, as a valid filename path to the ICC output
# profile you wish to use for this image, or a profile object
# @param renderingIntent Integer (0-3) specifying the rendering intent you wish
# to use for the transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param outputMode A valid PIL mode for the output image (i.e. "RGB", "CMYK",
# etc.). Note: if rendering the image "inPlace", outputMode MUST be the
# same mode as the input, or omitted completely. If omitted, the outputMode
# will be the same as the mode of the input image (im.mode)
# @param inPlace Boolean (1 = True, None or 0 = False). If True, the original
# image is modified in-place, and None is returned. If False (default), a
# new Image object is returned with the transform applied.
# @param flags Integer (0-...) specifying additional flags
# @return Either None or a new PIL image object, depending on value of inPlace
# @exception PyCMSError
def profileToProfile(im, inputProfile, outputProfile, renderingIntent=INTENT_PERCEPTUAL, outputMode=None, inPlace=0, flags=0):
if outputMode is None:
outputMode = im.mode
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
transform = ImageCmsTransform(
inputProfile, outputProfile, im.mode, outputMode, renderingIntent, flags=flags
)
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
##
# (pyCMS) Opens an ICC profile file.
#
# The PyCMSProfile object can be passed back into pyCMS for use in creating
# transforms and such (as in ImageCms.buildTransformFromOpenProfiles()).
#
# If profileFilename is not a vaild filename for an ICC profile, a PyCMSError
# will be raised.
#
# @param profileFilename String, as a valid filename path to the ICC profile you
# wish to open, or a file-like object.
# @return A CmsProfile class object.
# @exception PyCMSError
def getOpenProfile(profileFilename):
try:
return ImageCmsProfile(profileFilename)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Builds an ICC transform mapping from the inputProfile to the
# outputProfile. Use applyTransform to apply the transform to a given
# image.
#
# If the input or output profiles specified are not valid filenames, a
# PyCMSError will be raised. If an error occurs during creation of the
# transform, a PyCMSError will be raised.
#
# If inMode or outMode are not a mode supported by the outputProfile (or
# by pyCMS), a PyCMSError will be raised.
#
# This function builds and returns an ICC transform from the inputProfile
# to the outputProfile using the renderingIntent to determine what to do
# with out-of-gamut colors. It will ONLY work for converting images that
# are in inMode to images that are in outMode color format (PIL mode,
# i.e. "RGB", "RGBA", "CMYK", etc.).
#
# Building the transform is a fair part of the overhead in
# ImageCms.profileToProfile(), so if you're planning on converting multiple
# images using the same input/output settings, this can save you time.
# Once you have a transform object, it can be used with
# ImageCms.applyProfile() to convert images without the need to re-compute
# the lookup table for the transform.
#
# The reason pyCMS returns a class object rather than a handle directly
# to the transform is that it needs to keep track of the PIL input/output
# modes that the transform is meant for. These attributes are stored in
# the "inMode" and "outMode" attributes of the object (which can be
# manually overridden if you really want to, but I don't know of any
# time that would be of use, or would even work).
#
# @param inputProfile String, as a valid filename path to the ICC input profile
# you wish to use for this transform, or a profile object
# @param outputProfile String, as a valid filename path to the ICC output
# profile you wish to use for this transform, or a profile object
# @param inMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param outMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param renderingIntent Integer (0-3) specifying the rendering intent you
# wish to use for the transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param flags Integer (0-...) specifying additional flags
# @return A CmsTransform class object.
# @exception PyCMSError
def buildTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent=INTENT_PERCEPTUAL, flags=0):
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
return ImageCmsTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent, flags=flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Builds an ICC transform mapping from the inputProfile to the
# outputProfile, but tries to simulate the result that would be
# obtained on the proofProfile device.
#
# If the input, output, or proof profiles specified are not valid
# filenames, a PyCMSError will be raised.
#
# If an error occurs during creation of the transform, a PyCMSError will
# be raised.
#
# If inMode or outMode are not a mode supported by the outputProfile
# (or by pyCMS), a PyCMSError will be raised.
#
# This function builds and returns an ICC transform from the inputProfile
# to the outputProfile, but tries to simulate the result that would be
# obtained on the proofProfile device using renderingIntent and
# proofRenderingIntent to determine what to do with out-of-gamut
# colors. This is known as "soft-proofing". It will ONLY work for
# converting images that are in inMode to images that are in outMode
# color format (PIL mode, i.e. "RGB", "RGBA", "CMYK", etc.).
#
# Usage of the resulting transform object is exactly the same as with
# ImageCms.buildTransform().
#
# Proof profiling is generally used when using an output device to get a
# good idea of what the final printed/displayed image would look like on
# the proofProfile device when it's quicker and easier to use the
# output device for judging color. Generally, this means that the
# output device is a monitor, or a dye-sub printer (etc.), and the simulated
# device is something more expensive, complicated, or time consuming
# (making it difficult to make a real print for color judgement purposes).
#
# Soft-proofing basically functions by adjusting the colors on the
# output device to match the colors of the device being simulated. However,
# when the simulated device has a much wider gamut than the output
# device, you may obtain marginal results.
#
# @param inputProfile String, as a valid filename path to the ICC input profile
# you wish to use for this transform, or a profile object
# @param outputProfile String, as a valid filename path to the ICC output
# (monitor, usually) profile you wish to use for this transform, or a
# profile object
# @param proofProfile String, as a valid filename path to the ICC proof profile
# you wish to use for this transform, or a profile object
# @param inMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param outMode String, as a valid PIL mode that the appropriate profile also
# supports (i.e. "RGB", "RGBA", "CMYK", etc.)
# @param renderingIntent Integer (0-3) specifying the rendering intent you
# wish to use for the input->proof (simulated) transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param proofRenderingIntent Integer (0-3) specifying the rendering intent you
# wish to use for proof->output transform
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param flags Integer (0-...) specifying additional flags
# @return A CmsTransform class object.
# @exception PyCMSError
def buildProofTransform(inputProfile, outputProfile, proofProfile, inMode, outMode, renderingIntent=INTENT_PERCEPTUAL, proofRenderingIntent=INTENT_ABSOLUTE_COLORIMETRIC, flags=FLAGS["SOFTPROOFING"]):
if not isinstance(renderingIntent, int) or not (0 <= renderingIntent <=3):
raise PyCMSError("renderingIntent must be an integer between 0 and 3")
if not isinstance(flags, int) or not (0 <= flags <= _MAX_FLAG):
raise PyCMSError("flags must be an integer between 0 and %s" + _MAX_FLAG)
try:
if not isinstance(inputProfile, ImageCmsProfile):
inputProfile = ImageCmsProfile(inputProfile)
if not isinstance(outputProfile, ImageCmsProfile):
outputProfile = ImageCmsProfile(outputProfile)
if not isinstance(proofProfile, ImageCmsProfile):
proofProfile = ImageCmsProfile(proofProfile)
return ImageCmsTransform(inputProfile, outputProfile, inMode, outMode, renderingIntent, proofProfile, proofRenderingIntent, flags)
except (IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
buildTransformFromOpenProfiles = buildTransform
buildProofTransformFromOpenProfiles = buildProofTransform
##
# (pyCMS) Applies a transform to a given image.
#
# If im.mode != transform.inMode, a PyCMSError is raised.
#
# If inPlace == TRUE and transform.inMode != transform.outMode, a
# PyCMSError is raised.
#
# If im.mode, transfer.inMode, or transfer.outMode is not supported by
# pyCMSdll or the profiles you used for the transform, a PyCMSError is
# raised.
#
# If an error occurs while the transform is being applied, a PyCMSError
# is raised.
#
# This function applies a pre-calculated transform (from
# ImageCms.buildTransform() or ImageCms.buildTransformFromOpenProfiles()) to an
# image. The transform can be used for multiple images, saving
# considerable calcuation time if doing the same conversion multiple times.
#
# If you want to modify im in-place instead of receiving a new image as
# the return value, set inPlace to TRUE. This can only be done if
# transform.inMode and transform.outMode are the same, because we can't
# change the mode in-place (the buffer sizes for some modes are
# different). The default behavior is to return a new Image object of
# the same dimensions in mode transform.outMode.
#
# @param im A PIL Image object, and im.mode must be the same as the inMode
# supported by the transform.
# @param transform A valid CmsTransform class object
# @param inPlace Bool (1 == True, 0 or None == False). If True, im is modified
# in place and None is returned, if False, a new Image object with the
# transform applied is returned (and im is not changed). The default is False.
# @return Either None, or a new PIL Image object, depending on the value of inPlace
# @exception PyCMSError
def applyTransform(im, transform, inPlace=0):
try:
if inPlace:
transform.apply_in_place(im)
imOut = None
else:
imOut = transform.apply(im)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
return imOut
##
# (pyCMS) Creates a profile.
#
# If colorSpace not in ["LAB", "XYZ", "sRGB"], a PyCMSError is raised
#
# If using LAB and colorTemp != a positive integer, a PyCMSError is raised.
#
# If an error occurs while creating the profile, a PyCMSError is raised.
#
# Use this function to create common profiles on-the-fly instead of
# having to supply a profile on disk and knowing the path to it. It
# returns a normal CmsProfile object that can be passed to
# ImageCms.buildTransformFromOpenProfiles() to create a transform to apply
# to images.
#
# @param colorSpace String, the color space of the profile you wish to create.
# Currently only "LAB", "XYZ", and "sRGB" are supported.
# @param colorTemp Positive integer for the white point for the profile, in
# degrees Kelvin (i.e. 5000, 6500, 9600, etc.). The default is for D50
# illuminant if omitted (5000k). colorTemp is ONLY applied to LAB profiles,
# and is ignored for XYZ and sRGB.
# @return A CmsProfile class object
# @exception PyCMSError
def createProfile(colorSpace, colorTemp=-1):
if colorSpace not in ["LAB", "XYZ", "sRGB"]:
raise PyCMSError("Color space not supported for on-the-fly profile creation (%s)" % colorSpace)
if colorSpace == "LAB":
try:
colorTemp = float(colorTemp)
except:
raise PyCMSError("Color temperature must be numeric, \"%s\" not valid" % colorTemp)
try:
return core.createProfile(colorSpace, colorTemp)
except (TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the internal product name for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised If an error occurs while trying to obtain the
# name tag, a PyCMSError is raised.
#
# Use this function to obtain the INTERNAL name of the profile (stored
# in an ICC tag in the profile itself), usually the one used when the
# profile was originally created. Sometimes this tag also contains
# additional information supplied by the creator.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal name of the profile as stored in an
# ICC tag.
# @exception PyCMSError
def getProfileName(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# do it in python, not c.
# // name was "%s - %s" (model, manufacturer) || Description ,
# // but if the Model and Manufacturer were the same or the model
# // was long, Just the model, in 1.x
model = profile.profile.product_model
manufacturer = profile.profile.product_manufacturer
if not (model or manufacturer):
return profile.profile.product_description+"\n"
if not manufacturer or len(model) > 30:
return model + "\n"
return "%s - %s\n" % (model, manufacturer)
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the internal product information for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the info tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# info tag. This often contains details about the profile, and how it
# was created, as supplied by the creator.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileInfo(profile):
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# add an extra newline to preserve pyCMS compatibility
# Python, not C. the white point bits weren't working well, so skipping.
# // info was description \r\n\r\n copyright \r\n\r\n K007 tag \r\n\r\n whitepoint
description = profile.profile.product_description
cpright = profile.profile.product_copyright
arr = []
for elt in (description, cpright):
if elt:
arr.append(elt)
return "\r\n\r\n".join(arr)+"\r\n\r\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the copyright for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the copyright tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# copyright tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileCopyright(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_copyright + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the manufacturer for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the manufacturer tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# manufacturer tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileManufacturer(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_manufacturer + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the model for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the model tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# model tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileModel(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_model + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the description for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the description tag, a PyCMSError
# is raised
#
# Use this function to obtain the information stored in the profile's
# description tag.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return A string containing the internal profile information stored in an ICC
# tag.
# @exception PyCMSError
def getProfileDescription(profile):
try:
# add an extra newline to preserve pyCMS compatibility
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.product_description + "\n"
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Gets the default intent name for the given profile.
#
# If profile isn't a valid CmsProfile object or filename to a profile,
# a PyCMSError is raised.
#
# If an error occurs while trying to obtain the default intent, a
# PyCMSError is raised.
#
# Use this function to determine the default (and usually best optomized)
# rendering intent for this profile. Most profiles support multiple
# rendering intents, but are intended mostly for one type of conversion.
# If you wish to use a different intent than returned, use
# ImageCms.isIntentSupported() to verify it will work first.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @return Integer 0-3 specifying the default rendering intent for this profile.
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @exception PyCMSError
def getDefaultIntent(profile):
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
return profile.profile.rendering_intent
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Checks if a given intent is supported.
#
# Use this function to verify that you can use your desired
# renderingIntent with profile, and that profile can be used for the
# input/output/proof profile as you desire.
#
# Some profiles are created specifically for one "direction", can cannot
# be used for others. Some profiles can only be used for certain
# rendering intents... so it's best to either verify this before trying
# to create a transform with them (using this function), or catch the
# potential PyCMSError that will occur if they don't support the modes
# you select.
#
# @param profile EITHER a valid CmsProfile object, OR a string of the filename
# of an ICC profile.
# @param intent Integer (0-3) specifying the rendering intent you wish to use
# with this profile
#
# INTENT_PERCEPTUAL = 0 (DEFAULT) (ImageCms.INTENT_PERCEPTUAL)
# INTENT_RELATIVE_COLORIMETRIC = 1 (ImageCms.INTENT_RELATIVE_COLORIMETRIC)
# INTENT_SATURATION = 2 (ImageCms.INTENT_SATURATION)
# INTENT_ABSOLUTE_COLORIMETRIC = 3 (ImageCms.INTENT_ABSOLUTE_COLORIMETRIC)
#
# see the pyCMS documentation for details on rendering intents and what they do.
# @param direction Integer specifing if the profile is to be used for input,
# output, or proof
#
# INPUT = 0 (or use ImageCms.DIRECTION_INPUT)
# OUTPUT = 1 (or use ImageCms.DIRECTION_OUTPUT)
# PROOF = 2 (or use ImageCms.DIRECTION_PROOF)
#
# @return 1 if the intent/direction are supported, -1 if they are not.
# @exception PyCMSError
def isIntentSupported(profile, intent, direction):
try:
if not isinstance(profile, ImageCmsProfile):
profile = ImageCmsProfile(profile)
# FIXME: I get different results for the same data w. different
# compilers. Bug in LittleCMS or in the binding?
if profile.profile.is_intent_supported(intent, direction):
return 1
else:
return -1
except (AttributeError, IOError, TypeError, ValueError) as v:
raise PyCMSError(v)
##
# (pyCMS) Fetches versions.
def versions():
import sys
return (
VERSION, core.littlecms_version, sys.version.split()[0], Image.VERSION
)
# --------------------------------------------------------------------
if __name__ == "__main__":
# create a cheap manual from the __doc__ strings for the functions above
from PIL import ImageCms
print(__doc__)
for f in dir(pyCMS):
print("="*80)
print("%s" %f)
try:
exec ("doc = ImageCms.%s.__doc__" %(f))
if "pyCMS" in doc:
# so we don't get the __doc__ string for imported modules
print(doc)
except AttributeError:
pass
| [
"[email protected]"
] | |
fac1370427122efedc91019afd32e3d4c7c4a48a | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4750.py | 61e596ad7605038272c3423838a093b5fb059e38 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,755 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if $Exp.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
147c2d90ce5537ee9f661bf45932eeda21e86596 | e233d3d5ad19bb17a7dce7ff8d96404a17b3b705 | /src/programy/parser/template/nodes/vocabulary.py | ebaad3a280d6196ae7ad85d1cb65c2696d9876d8 | [
"MIT"
] | permissive | jaimecamacaro/program-y | 2559fb0cb70150b147c090c611931f84fd276867 | 5f31608290faddf8da9a52587ec892b258ec11d4 | refs/heads/master | 2021-06-26T20:26:53.778763 | 2017-09-13T09:47:14 | 2017-09-13T09:47:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,461 | py | """
Copyright (c) 2016-17 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.base import TemplateNode
class TemplateVocabularyNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve_to_string(self, bot, clientid):
set_words = bot.brain.sets.count_words_in_sets()
pattern_words = bot.brain.aiml_parser.pattern_parser.count_words_in_patterns()
resolved = "%d" % (set_words + pattern_words)
if logging.getLogger().isEnabledFor(logging.DEBUG): logging.debug("[%s] resolved to [%s]", self.to_string(),
resolved)
return resolved
def resolve(self, bot, clientid):
try:
return self.resolve_to_string(bot, clientid)
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "VOCABULARY"
def to_xml(self, bot, clientid):
xml = "<vocabulary>"
xml += self.children_to_xml(bot, clientid)
xml += "</vocabulary>"
return xml
#######################################################################################################
# <vocabulary/> |
def add_default_star(self):
return True
def parse_expression(self, graph, expression):
self._parse_node(graph, expression)
| [
"[email protected]"
] | |
2904a73fe26296f364a3e698b6c66d370b6ebc3c | 62f59fe1e0246b33c84412ee2a60e77938a05a15 | /proj/my_lib/Common/img_hash.py | ac6c0aa4cb2dd704168d823abfde9bea4dd890fd | [] | no_license | 20113261/platform_service | 02676d2654f5c7bde2c7eafdadbf55fe7253a7b0 | bc903168bd7cbc499892f24c2b1cc82c38180c01 | refs/heads/dev | 2022-08-01T02:30:05.004852 | 2018-04-29T05:39:37 | 2018-04-29T05:39:37 | 131,576,306 | 1 | 0 | null | 2022-07-08T19:13:32 | 2018-04-30T09:14:54 | Python | UTF-8 | Python | false | false | 1,210 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/10/31 下午7:34
# @Author : Hou Rong
# @Site :
# @File : img_hash.py
# @Software: PyCharm
import imagehash
from PIL import Image
from proj.my_lib.logger import get_logger, func_time_logger
logger = get_logger("img_hash")
@func_time_logger
def _img_p_hash(f_obj):
f_obj.seek(0)
try:
img_obj = Image.open(f_obj)
except Exception as exc:
logger.exception(msg="[error img]", exc_info=exc)
return None
try:
_hash = imagehash.phash(img_obj)
except Exception as exc:
logger.exception(msg="[could not calculate phash]", exc_info=exc)
return None
f_obj.seek(0)
return _hash
def img_p_hash(f_obj):
_retry_times = 4
while _retry_times:
_retry_times -= 1
_res = _img_p_hash(f_obj)
if _res:
return str(_res)
return None
if __name__ == '__main__':
f = open('/tmp/1/035211ab53d76b051376f9292ca9623d.jpg')
print(img_p_hash(f))
print(img_p_hash(f))
print(img_p_hash(f))
print(img_p_hash(f))
print(img_p_hash(f))
f = open('/tmp/1/b8c88852a915cf32e1eeed20ec7d3cc7.jpg')
print(img_p_hash(f))
| [
"[email protected]"
] | |
2ef775fa9ffa8db94d0c44a35f38777947ee452a | 1b8a99a4ff80da51dc81dd8354bf9bf1cbd25a8b | /2022/shift_2d_grid.py | 6ddd3b4a793d02f3fddf9b911c48410e32e74f17 | [] | no_license | eronekogin/leetcode | ea639eebe0cd70af9eb4cba59bc68f636d7b3e0c | edb870f83f0c4568cce0cacec04ee70cf6b545bf | refs/heads/master | 2023-08-16T10:35:57.164176 | 2023-08-14T11:25:33 | 2023-08-14T11:25:33 | 163,679,450 | 0 | 0 | null | 2021-09-09T12:04:44 | 2018-12-31T15:33:06 | Python | UTF-8 | Python | false | false | 528 | py | """
https://leetcode.com/problems/shift-2d-grid/
"""
class Solution:
def shiftGrid(self, grid: list[list[int]], k: int) -> list[list[int]]:
R, C = len(grid), len(grid[0])
newGrid = [[0] * C for _ in range(R)]
for r, row in enumerate(grid):
for c, v in enumerate(row):
dr, nc = divmod(c + k, C)
nr = (r + dr) % R
newGrid[nr][nc] = v
return newGrid
print(Solution().shiftGrid([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
], 1))
| [
"[email protected]"
] | |
b3e9af807b979f922b4629836eb98bb6efebee19 | 673e829dda9583c8dd2ac8d958ba1dc304bffeaf | /data/multilingual/Latn.TPI/Mono_8/pdf_to_json_test_Latn.TPI_Mono_8.py | 618c37de8897c8b6e934ba4f5610096e2ef4829b | [
"BSD-3-Clause"
] | permissive | antoinecarme/pdf_to_json_tests | 58bab9f6ba263531e69f793233ddc4d33b783b7e | d57a024fde862e698d916a1178f285883d7a3b2f | refs/heads/master | 2021-01-26T08:41:47.327804 | 2020-02-27T15:54:48 | 2020-02-27T15:54:48 | 243,359,934 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.TPI/Mono_8/udhr_Latn.TPI_Mono_8.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| [
"[email protected]"
] | |
b59ec1cd512b6ef11af45128bfc21a60e6b82ece | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2655/60749/257037.py | d69187901e074eb96757b5e40eb320ae97fbe4d1 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | n=int(input())
res=[]
for _ in range(n):
res.append(int(input()))
def findcloset(n):
k=0
while n>=pow(2,k):
if n<pow(2,k+1):
if n==pow(2,k):
return pow(2,k)
else:
return pow(2,k+1)
k+=1
for t in res:
print(findcloset(t))
| [
"[email protected]"
] | |
db9c2d2a18762a017bc99282713b6486c15730a0 | 7be8a902f968ecd74fdf028d758f8777df6120c7 | /daxuan/Taiwan/yahoo/yahoo_news.py | be5a2c9a07719bf638955a598131a9bea4d0b0c5 | [
"Apache-2.0"
] | permissive | BingquLee/spiders | 51142f848d52a7f8a98563e17b5c582a7e18b46c | 66e42b59aa692ab531e6ca347708d46b189c0047 | refs/heads/master | 2020-03-22T00:49:59.079429 | 2018-06-30T17:55:07 | 2018-06-30T17:55:07 | 139,268,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,797 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-04-11 14:45:12
# @Author : guangqiang_xu ([email protected])
# @Link : http://www.treenewbee.com/
# @Version : $Id$
import requests
from lxml import etree
from retry import retry
import time
import json
import hashlib
import re
import urllib, urllib2
from readability.readability import Document
from elasticsearch import Elasticsearch
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
es = Elasticsearch()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1"}
def searchData(index, type ,body):
query_body = {"query": {"query_string": {"query": body}}}
results = es.search(index=index, doc_type=type, body=query_body)
date_list = results['hits']['hits']
return date_list
from langconv import *
def Traditional2Simplified(sentence):
sentence = Converter('zh-hans').convert(sentence)
return sentence
@retry(tries=3)
def get_content(lis, keyword, timest):
i = 1
for li in lis:
item = {}
source = "yahoo"
print li
news_url = li.xpath('./div/div[1]/h3/a/@href')
# news_url = li.xpath('./div/div[1]/h3/a/@href')[0]
print 11111111111111111111111, news_url, 1111111111111111111111111111111
title = ''.join(li.xpath('./div/div[1]/h3/a//text()'))
print title
summary = ''.join(li.xpath('./div/div[2]/p//text()'))
# user_name = li.xpath('./div/div[3]/p/span[1]/text()')[0]
# print user_name
date = li.xpath('./div/div[3]/p/span[2]/text()')[0]
print date
strdate = '2018-' + date.replace('AM','').replace('PM','').replace('月','-').replace('日','')
timeArray = time.strptime(strdate, "%Y-%m-%d %H:%M")
timestamp = int(time.mktime(timeArray))
if timestamp < timest:
continue
response1 = requests.get(news_url, timeout=10, headers=headers)
response1.coding = 'utf-8'
txt1 = response1.content
new_url = re.findall(r'URL=(.*?)">',txt1)[0].replace("'",'')
hash_md5 = hashlib.md5(new_url)
Id = hash_md5.hexdigest()
response = requests.get(new_url, timeout=10, headers=headers)
response.coding = 'utf-8'
txt = response.content
readable_article = Document(txt).summary()
html = etree.HTML(readable_article)
context = ''.join(html.xpath('//p//text()')).replace('\r','').replace('\n','').replace('\t','')
if context in "":
news_html = etree.HTML(txt)
context = ''.join(news_html.xpath('//p//text()'))
timesyear = time.localtime(timestamp).tm_year
stringDate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
images = ''
kname = urllib.quote(str(title))
try:
Imageurl = "https://image.baidu.com/search/index?tn=baiduimage&ipn=r&ct=201326592&cl=2&lm=-1&st=-1&fm=result&fr=&sf=1&fmq=1502779395291_R&pv=&ic=0&nc=1&z=&se=1&showtab=0&fb=0&width=&height=&face=0&istype=2&ie=utf-8&word=" + kname
req = urllib2.urlopen(Imageurl, timeout=10)
html = req.read()
images = re.search(r'https://.*?\.jpg', html).group()
except:
pass
summary = Traditional2Simplified(summary.decode("utf-8"))
keyword = Traditional2Simplified(keyword.decode("utf-8"))
context = Traditional2Simplified(context.decode("utf-8"))
tittle = Traditional2Simplified(title.decode("utf-8"))
item['summary'] = summary
item['keyword'] = keyword
item['candidate'] = keyword
item['source'] = source
item['timestamps'] = timestamp
item['date'] = date
item['lang'] = 'cn'
item['images'] = images
item['context'] = context
item['timesyear'] = timesyear
item['time'] = stringDate
item['title'] = tittle
item['url'] = new_url
item['id'] = Id
with open('yahoo_news.json', 'a') as f:
f.write(json.dumps(item, ensure_ascii=False) + '\n')
def crawl_yahoo(keyword, strdate):
timeArray = time.strptime(strdate, "%Y-%m-%d")
timest = int(time.mktime(timeArray))
kname = urllib.quote(str(keyword))
page = 1
while 1:
url = "https://tw.search.yahoo.com/search;?fr2=sb-top-tw.search&p={}&b={}".format(kname, page)
# url = "https://tw.news.search.yahoo.com/search;_ylt=AwrtXGtDr81aAG4A2CVw1gt.;_ylu=X3oDMTEwOG1tc2p0BGNvbG8DBHBvcwMxBHZ0aWQDBHNlYwNwYWdpbmF0aW9u?p={}&ei=UTF-8&flt=ranking%3Adate%3B&fr=yfp-search-sb&b={}&pz=10&bct=0&xargs=0".format(kname, page)
print url
response = requests.get(url, headers=headers)
txt = response.text
html = etree.HTML(txt)
lis = html.xpath('//ol[@class="mb-15 reg searchCenterMiddle"]/li')
# lis = html.xpath('//div[@id="web"]/ol[2]/li')
# print "lis", lis
i = 1
for li in lis:
item = {}
source = "yahoo"
print li.xpath('./div/div[1]/h3/a/@href')[0]
print "*************"
title = ''.join(li.xpath('./div/div[1]/h3/a//text()'))
print title
print "+++++++++++++"
summary = ''.join(li.xpath('./div/div[2]/p//text()'))
print summary
print "------------"
date = li.xpath('./div/div[3]/p/span[2]/text()')[0]
print date
print 00000000000000
if len(lis) <= 0:
break
get_content(lis, keyword, timest)
page += 10
if page == 81:
break
if __name__ == '__main__':
crawl_yahoo('盧秀燕', '2018-01-01')
crawl_yahoo('林佳龍', '2018-01-01') | [
"[email protected]"
] | |
4ea70871b269b1e8653582ef88c2497f5e928abc | 96740c0a9ff1467f0897253c79a059b5ba6a1949 | /test_webscoket.py | 02c07101a77d1393e2894d4e4843fafdb61c1326 | [] | no_license | Cola1995/soho1 | a876990cd3adfb9534eb3630e24a9bf90bdf8363 | fad8f13d6c789e7c37eba5cfd94a9cb609c8db1d | refs/heads/master | 2020-07-27T07:50:38.299692 | 2019-09-17T10:02:34 | 2019-09-17T10:02:34 | 209,020,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,594 | py | import asyncio
import logging
from datetime import datetime
from aiowebsocket.converses import AioWebSocket
import json
async def startup(uri):
async with AioWebSocket(uri) as aws:
converse = aws.manipulator
# 客户端给服务端发送消息
await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_market"}}') # 监听所有市场
await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_market_bid_ask"}}') #
# 监听btc_usdt webscoket
# await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_ticker"}}')
# await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_eth-usdt"}}')
# await converse.send('{"event":"pusher:subscribe","data":{"channel":"exchange_bqqq-usdt"}}')
# await converse.send('{"event":"pusher:subscribe","data":{"auth":"5174598ab656e4da66dc:1c303fad7f188e3a9f130235ecffc1a2052da5bd9645d572b8b6020f1d154032","channel":"private-exchange==abbd73ed-2cde-416f-8ce1-3217e0472205"}}') # 监听所有市场
while True:
mes = await converse.receive()
print('{time}-Client receive: {rec}'
.format(time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'), rec=mes))
print(type(mes))
# 解包,获取想要的数据
# mes = json.loads(mes.decode("utf-8"))
# print(mes)
# if mes["data"]["marketPriceDto"]["marketSymbol"]=="NEO-BTC":
# print(mes["data"])
# m1 = json.loads(mes["data"])
# print(m1.get("message").get("marketPriceDto").get("volume"))
# print(m1)
# if m1.get("message")!=None:
# if m1["message"]["marketPriceDto"]["marketSymbol"]==market:
# print("{0}:市场:{1},chang24:{2}, percentageChange24:{3}".format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'),m1["message"]["marketPriceDto"]["marketSymbol"],m1["message"]["marketPriceDto"]["change24"],m1["message"]["marketPriceDto"]["percentageChange24"]))
if __name__ == '__main__':
# remote = 'wss://wssprod.bitsdaq.com/app/167bca97db7a84f1c98b?protocol=7&client=js&version=4.3.1&flash=false' # 线上环境
market = "ETH-BTC" # 配置需要监听的市场/币对
remote ="wss://wss-dev-15.bitsdaq.io/app/d4796efce047f9e6443a?protocol=7&client=js&version=4.4.0&flash=false" # dev环境通用
try:
asyncio.get_event_loop().run_until_complete(startup(remote))
except KeyboardInterrupt as exc:
logging.info('Quit.') | [
"[email protected]"
] | |
f73e8cee4387922b60f25f6d68bcaedf74ab873d | de479d4a8af0e070b2bcae4186b15a8eb74971fb | /cn/iceknc/study/c_python_pygame/c_pygame_window.py | 6ba43b45b4a1518b0fc99459cb90ef2ca9434385 | [] | no_license | iceknc/python_study_note | 1d8f6e38be57e4dc41a661c0a84d6ee223c5a878 | 730a35890b77ecca3d267fc875a68e96febdaa85 | refs/heads/master | 2020-05-19T18:44:55.957392 | 2019-09-27T01:15:54 | 2019-09-27T01:15:54 | 185,160,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | import pygame
pygame.init()
# 创建游戏窗口
screen = pygame.display.set_mode((480, 700))
bg = pygame.image.load("./images/background.png")
screen.blit(bg, (0, 0))
hero = pygame.image.load("./images/me1.png")
screen.blit(hero, (200, 500))
pygame.display.update()
pygame.quit()
| [
"[email protected]"
] | |
c39eeaf948feb9a12175b5eca02bab3bdadc0f3b | b6559791bc33d1d44ab867dcbd7ca93243540e4f | /爆米花视频/baomihua/middlewares.py | 1a013cea298e85bf4650717d5609fa0e1304794d | [] | no_license | RichardcLee/Spiders | 7b51f68a255354bd8b06fca627491d68e55f7cd7 | e0f5c060ea85472e374fd8a16fecf5bdd418a572 | refs/heads/master | 2021-10-22T08:34:34.139022 | 2021-10-12T08:09:04 | 2021-10-12T08:09:04 | 148,959,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,601 | py | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class BaomihuaSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class BaomihuaDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| [
"[email protected]"
] | |
ca4b09083eb46a4afe2e3fcc2d2303319053a314 | bbe447a740929eaee1955bd9c1517cf760dd5cb9 | /keygrabber/adwords/adwords_api_python_14.2.1/build/lib.linux-x86_64-2.7/adspygoogle/adwords/zsi/v200909/CampaignCriterionService_services.py | efe456bdfc5fa3c46319a37ae0a627525a085719 | [
"Apache-2.0"
] | permissive | MujaahidSalie/aranciulla | f3d32e7dd68ecfca620fe4d3bf22ecb4762f5893 | 34197dfbdb01479f288611a0cb700e925c4e56ce | refs/heads/master | 2020-09-07T02:16:25.261598 | 2011-11-01T21:20:46 | 2011-11-01T21:20:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,437 | py | ##################################################
# CampaignCriterionService_services.py
# generated by ZSI.generate.wsdl2python
##################################################
from CampaignCriterionService_services_types import *
import urlparse, types
from ZSI.TCcompound import ComplexType, Struct
from ZSI import client
import ZSI
# Locator
class CampaignCriterionServiceLocator:
CampaignCriterionServiceInterface_address = "https://adwords.google.com:443/api/adwords/cm/v200909/CampaignCriterionService"
def getCampaignCriterionServiceInterfaceAddress(self):
return CampaignCriterionServiceLocator.CampaignCriterionServiceInterface_address
def getCampaignCriterionServiceInterface(self, url=None, **kw):
return CampaignCriterionServiceSoapBindingSOAP(url or CampaignCriterionServiceLocator.CampaignCriterionServiceInterface_address, **kw)
# Methods
class CampaignCriterionServiceSoapBindingSOAP:
def __init__(self, url, **kw):
kw.setdefault("readerclass", None)
kw.setdefault("writerclass", None)
# no resource properties
self.binding = client.Binding(url=url, **kw)
# no ws-addressing
# get: getCampaignCriterion
def getCampaignCriterion(self, request):
if isinstance(request, getCampaignCriterionRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(getCampaignCriterionResponse.typecode)
return response
# mutate: getCampaignCriterion
def mutateCampaignCriterion(self, request):
if isinstance(request, mutateCampaignCriterionRequest) is False:
raise TypeError, "%s incorrect request type" % (request.__class__)
kw = {}
# no input wsaction
self.binding.Send(None, None, request, soapaction="", **kw)
# no output wsaction
response = self.binding.Receive(mutateCampaignCriterionResponse.typecode)
return response
getCampaignCriterionRequest = ns0.getCampaignCriterion_Dec().pyclass
getCampaignCriterionResponse = ns0.getCampaignCriterionResponse_Dec().pyclass
mutateCampaignCriterionRequest = ns0.mutateCampaignCriterion_Dec().pyclass
mutateCampaignCriterionResponse = ns0.mutateCampaignCriterionResponse_Dec().pyclass
| [
"[email protected]"
] | |
0d0a072bf4bc60c77f25558e40e4222f8ca8679c | 496e05014492b4bbecf9f15c40ae416c21e27a46 | /src/outpost/django/video/migrations/0009_epiphansource.py | 1354f113997a6042355a8bf0539191a4f9fc69c3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | medunigraz/outpost_deprecated | b1ff802054c04cf989b3b660e132fa6a1c2a078c | bc88eaa3bb504d394fdf13f1131e40db27759c89 | refs/heads/master | 2022-01-23T15:46:34.859095 | 2019-05-21T08:38:11 | 2019-05-21T08:38:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 895 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-08 08:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import imagekit.models.fields
from ...base.utils import Uuid4Upload
class Migration(migrations.Migration):
dependencies = [
('video', '0008_zipstreamexport'),
]
operations = [
migrations.CreateModel(
name='EpiphanSource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('number', models.PositiveSmallIntegerField()),
('preview', imagekit.models.fields.ProcessedImageField(upload_to=Uuid4Upload)),
('epiphan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='video.Epiphan')),
],
),
]
| [
"[email protected]"
] | |
432d2dd90f323a1682a499fc0227a6ee553ff9f0 | 3040a2c43eedbc6ba32e6e67efe9ab170c4f336a | /personalservices/apps.py | 5edf4f9af6986ed956e0c63561988f400918ce6f | [] | no_license | leandrocl2005/minidashboard | ff6917260656a1561c60bf19c45f8cde2c491991 | 0ae9bf783e6fb08616d772ad5bc2f24f1c1e2740 | refs/heads/main | 2023-05-03T01:04:12.809159 | 2021-05-20T21:23:48 | 2021-05-20T21:23:48 | 369,221,649 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.apps import AppConfig
class PersonalservicesConfig(AppConfig):
name = 'personalservices'
| [
"[email protected]"
] | |
09485a57811f74f6320ac2d4290643cdd57572c4 | e96deed00dd14a1f6d1ed7825991f12ea8c6a384 | /106. Construct Binary Tree from Inorder and Postor.py | 65b1c9034c369dcb1878ffa66b112d2c2d6b2c93 | [] | no_license | borisachen/leetcode | 70b5c320abea8ddfa299b2e81f886cfeb39345c1 | 15e36b472a5067d17482dbd0d357336d31b35ff4 | refs/heads/master | 2021-01-19T17:07:46.726320 | 2020-11-16T04:30:52 | 2020-11-16T04:30:52 | 88,306,634 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | 106. Construct Binary Tree from Inorder and Postorder Traversal
Given inorder and postorder traversal of a tree, construct the binary tree.
Note:
You may assume that duplicates do not exist in the tree.
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
postorder implies the last element is the root node
find that element in inorder, say its position j
the elements right of j in inorder will belong to the right node
the elements left of j in inorder will belong to the left node
if we process the right node first,
left node:
right node:
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
if not inorder or not postorder:
return None
root = TreeNode(postorder.pop())
j = inorder.index(root.val)
root.right = self.buildTree(inorder[j+1:], postorder)
root.left = self.buildTree(inorder[:j], postorder)
return root
| [
"[email protected]"
] | |
6e22abb1ceff8ee09df97b9ab40f2f1c3fc0ff35 | 32bbe94e77deced5e58de97eb19e7c6126b001df | /backend/src/carts/admin/carts.py | 3c0c975c778d172c9434ee052c504b97c3071014 | [] | no_license | 3asyPe/astudy | 16d8adacc3bee9f2667c0a5f1be8228868440c6a | 0643a33a294c410523738f59f95c8d205dd63dc5 | refs/heads/master | 2023-06-25T11:23:39.500361 | 2021-07-28T13:33:48 | 2021-07-28T13:33:48 | 336,819,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 593 | py | from django.contrib import admin
from app.admin import UserFilter
from carts.models import Cart
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
list_display = [
'id',
'user',
'total',
'active',
]
list_display_links = [
'id',
'user',
'total',
'active'
]
fields = [
"user",
"courses",
"subtotal",
"total",
"active",
]
readonly_fields = [
"subtotal",
"total",
]
list_filter = [
'active',
UserFilter
]
| [
"[email protected]"
] | |
ab7072e29c1fc3f1e960114459f71b5d0e4b47c7 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /095_os_and_sys/examples/nuke/path.py | d2c4af2cb8323f944b77ddabab9ee7ba30329f67 | [] | no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 16,611 | py | # path.py
#acl All:read
#format PYTHON
# -*- coding: iso-8859-1 -*-
""" path.py - An object representing a path to a file or directory.
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.2 or later.
URL: http://www.jorendorff.com/articles/python/path
Author: Jason Orendorff <[email protected]> (and others - see the url!)
Date: 7 Mar 2004
Adapted for stdlib by: Reinhold Birkenfeld, July 2005
Modified by Bjorn Lindqvist <[email protected]>, January 2006
"""
# TODO
# - Better error message in listdir() when self isn't a
# directory. (On Windows, the error message really sucks.)
# - Make sure everything has a good docstring.
# - Add methods for regex find and replace.
# - Perhaps support arguments to touch().
# - Could add split() and join() methods that generate warnings.
# - Note: __add__() technically has a bug, I think, where
# it doesn't play nice with other types that implement
# __radd__(). Test this.
from snitcher import snitch
snitch()
import fnmatch
import glob
import os
import shutil
import sys
__all__ = ['path']
__version__ = '2.0.4'
# Universal newline support
_textmode = 'r'
if hasattr(file, 'newlines'):
_textmode = 'U'
# Use unicode strings if possible
_base = str
if os.path.supports_unicode_filenames:
_base = unicode
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __new__(typ, *args):
"""
Creates a new path object concatenating the *args. *args
may only contain Path objects or strings. If *args is
empty, Path(os.curdir) is created.
"""
if not args:
return typ(os.curdir)
for arg in args:
if not isinstance(arg, basestring):
raise ValueError("%s() arguments must be Path, str or "
"unicode" % typ.__name__)
if len(args) == 1:
return _base.__new__(typ, *args)
return typ(os.path.join(*args))
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _base(self))
# Adding a path and a string yields a path.
def __add__(self, more):
return self.__class__(_base(self) + more)
def __radd__(self, other):
return self.__class__(other + _base(self))
@classmethod
def cwd(cls):
""" Return the current working directory as a path object. """
return path(os.getcwd())
# --- Operations on path strings.
def abspath(self):
return self.__class__(os.path.abspath(self))
def normcase(self):
return self.__class__(os.path.normcase(self))
def normpath(self):
return self.__class__(os.path.normpath(self))
def realpath(self):
return self.__class__(os.path.realpath(self))
def expanduser(self):
return self.__class__(os.path.expanduser(self))
def expandvars(self):
return self.__class__(os.path.expandvars(self))
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
def _get_dirname(self):
return self.__class__(os.path.dirname(self))
parent = property(
_get_dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
os.path.basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return path(os.path.splitext(self)[0])
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def splitall(self):
""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
loc = self.__class__(loc)
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
return self.__class__.cwd().relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
return self.__class__(os.curdir)
else:
return self.__class__(os.path.join(*segments))
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [path(self, child) for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
"""
for child in self.listdir():
if pattern is None or child.match(pattern):
yield child
if child.isdir():
for item in child.walk(pattern):
yield item
def walkdirs(self, pattern=None):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
"""
for child in self.dirs():
if pattern is None or child.match(pattern):
yield child
for subsubdir in child.walkdirs(pattern):
yield subsubdir
def walkfiles(self, pattern=None):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
for child in self.listdir():
if child.isfile():
if pattern is None or child.match(pattern):
yield child
elif child.isdir():
for f in child.walkfiles(pattern):
yield f
def match(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def matchcase(self, pattern):
""" Test whether the path matches pattern, returning true or
false; the comparison is always case-sensitive.
"""
return fnmatch.fnmatchcase(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
return map(path, glob.glob(_base(path(self, pattern))))
# --- Methods for querying the filesystem.
exists = os.path.exists
isabs = os.path.isabs
isdir = os.path.isdir
isfile = os.path.isfile
islink = os.path.islink
ismount = os.path.ismount
if hasattr(os.path, 'samefile'):
samefile = os.path.samefile
def atime(self):
"""Last access time of the file."""
return os.path.getatime(self)
def mtime(self):
"""Last-modified time of the file."""
return os.path.getmtime(self)
def ctime(self):
"""
Return the system's ctime which, on some systems (like Unix)
is the time of the last change, and, on others (like Windows),
is the creation time for path.
The return value is a number giving the number of seconds
since the epoch (see the time module). Raise os.error if the
file does not exist or is inaccessible.
"""
return os.path.getctime(self)
def size(self):
"""Size of the file, in bytes."""
return os.path.getsize(self)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def rmdir(self):
os.rmdir(self)
def removedirs(self):
os.removedirs(self)
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def unlink(self):
os.unlink(self)
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return self.__class__(self.parent, p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
| [
"[email protected]"
] | |
808ad659be53616086cf7608e29444f522b05378 | b723ecb64c86657751cafd21030de2b3c64886f7 | /unchained/community/teacher/views.py | 85d8abe0ce48b865972359f11ec8b82f08ac1969 | [] | no_license | mohinderps/community | 56dffc11d56d704e8c8c6b1e052741da2eb6d1ce | 1d4b5aa357d41c2e75768f359118103a58da43e1 | refs/heads/master | 2020-04-01T07:27:24.744768 | 2018-10-14T15:45:04 | 2018-10-14T15:45:04 | 152,990,736 | 0 | 0 | null | 2018-10-14T15:47:23 | 2018-10-14T15:47:23 | null | UTF-8 | Python | false | false | 2,818 | py | from django.shortcuts import render
from rest_framework import generics
from rest_framework import mixins
from django.contrib.auth.models import User
from rest_framework import permissions
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework import renderers
from rest_framework import viewsets
# Create your views here.
from rest_framework.decorators import action
from rest_framework.response import Response
from community.csrfsession import CsrfExemptSessionAuthentication
from .serializers import TeacherSerializer
from .models import Teacher
from rest_framework.exceptions import PermissionDenied
from community.permissions import isInstitutionAdmin, getUserInstitution, belongsToInstitution, canUpdateProfile
from community.filters import applyUserFilters
class TeacherViewSet(viewsets.ModelViewSet):
"""
This viewset automatically provides `list`, `create`, `retrieve`,
`update` and `destroy` actions.
Additionally we also provide an extra `highlight` action.
"""
queryset = Teacher.objects.all()
serializer_class = TeacherSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
authentication_classes = (CsrfExemptSessionAuthentication, )
def list(self, request, *args, **kwargs):
if not belongsToInstitution(request, getUserInstitution(request)):
raise PermissionDenied(detail='User does not belong to the institution', code=None)
if request.user.is_superuser:
self.queryset = applyUserFilters(request, Teacher)
else:
self.queryset = applyUserFilters(request, Teacher, institution=getUserInstitution(request))
return super(TeacherViewSet, self).list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
if not isInstitutionAdmin(request, getUserInstitution(request)):
raise PermissionDenied(detail='User is not an admin_user', code=None)
return super(TeacherViewSet, self).create(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
if not belongsToInstitution(request, self.get_object().institution):
raise PermissionDenied(detail='User does not belong to the institution', code=None)
return super(TeacherViewSet, self).retrieve(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
if not canUpdateProfile(request, self.get_object().institution, self.get_object()):
raise PermissionDenied(detail='User can not update other profiles', code=None)
return super(TeacherViewSet, self).update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
if not isInstitutionAdmin(request, self.get_object().institution):
raise PermissionDenied(detail='User is not an admin_user', code=None)
return super(TeacherViewSet, self).destroy(request, *args, **kwargs)
| [
"[email protected]"
] | |
1541e195b8051b431436d8b87ef862ecd8ed011e | 8d593cdc89bac4a993f776c9b11b9339f035744b | /PHYS613 A2 Exercise2.14 SquareWell.py | 593a104bcfb57c763726d4aa6407b37e28574582 | [] | no_license | Global19-atlassian-net/ComputationalPhysics | 21026c748801d07324620ca02dbc56b9a55a0abd | 9c50c302706c5015b588ac12980c5f96a414575f | refs/heads/master | 2021-05-30T00:50:58.746447 | 2015-11-27T15:31:20 | 2015-11-27T15:31:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,512 | py | """
Created on Fri Sep 06 21:03:27 2013
PHYS 613, Assignment 2
Nick Crump
"""
# Exercise 2.13
# Exercise 2.14
"""
From Computational Physics by Devries
"""
from math import sin,cos,exp,sqrt
import numpy as np
import matplotlib.pyplot as plt
# define function - even state solutions of 1D finite square well potential
#***********************************************************************
def evenFunc(a,m,V0,E):
hbarSq = 0.076199682 # eV(nm**2)(Melectron)
alpha = (2.0*m*E/hbarSq)**0.5
beta = ((2.0*m*(V0-E))/hbarSq)**0.5
fEven = beta*cos(alpha*a) - alpha*sin(alpha*a)
return fEven
#***********************************************************************
# define function - odd state solutions of 1D finite square well potential
#***********************************************************************
def oddFunc(a,m,V0,E):
hbarSq = 0.076199682 # eV(nm**2)(Melectron)
alpha = (2.0*m*E/hbarSq)**0.5
beta = ((2.0*m*(V0-E))/hbarSq)**0.5
fOdd = alpha*cos(alpha*a) + beta*sin(alpha*a)
return fOdd
#***********************************************************************
# enter root finding algorithm by Bisection method
#***********************************************************************
def rootBisection(f, xI, xF, Tol, nMax):
# initialize variables
error = 1
n = 1
xiMid = 0 # initial midpoint value to store the n-1 value
# loop until error is less than input tolerance
while error > Tol:
xMid = 0.5*(xI+xF)
# set up main Bisection method:
# make bracket interval smaller each iteration until root is found
# check conditions and update bracket points
if f(xI)*f(xMid) > 0:
xI = xMid
error = abs(xMid - xiMid) # calculate approx error
n = n + 1
xiMid = xMid # store the n-1 midpoint
elif f(xI)*f(xMid) < 0:
xF = xMid
error = abs(xMid - xiMid) # calculate approx error
n = n + 1
xiMid = xMid # store the n-1 midpoint
# output results to user
return round(xMid,5)
# end rootBisection function
#***********************************************************************
# main program that calls functions, finds roots and does plotting
#***********************************************************************
# setup root finder routine
#--------------------------
a = 0.3 # nm
m = 1.0 # Melectron
V0 = 10.0 # eV
hbarSq = 0.076199682 # eV(nm**2)(Melectron)
sfEven = lambda E: ((2.0*m*(V0-E)/hbarSq)**0.5)*cos(((2.0*m*E/hbarSq)**0.5)*a) - ((2.0*m*E/hbarSq)**0.5)*sin(((2.0*m*E/hbarSq)**0.5)*a)
sfOdd = lambda E: ((2.0*m*E/hbarSq)**0.5)*cos(((2.0*m*E/hbarSq)**0.5)*a) + ((2.0*m*(V0-E)/hbarSq)**0.5)*sin(((2.0*m*E/hbarSq)**0.5)*a)
Eeven = rootBisection(sfEven, 0, 2.0, 10e-5, 30)
Eodd = rootBisection(sfOdd, 2.0, 4.0, 10e-5, 30)
print 'Eigenvalues = ', Eeven, Eodd
# setup plotting of allowed energy equation as function of energy
#--------------------------
E = np.arange(0,10.1,0.1)
evenF = []
oddF = []
for i in E:
fEven = evenFunc(0.3,1.0,10.0,i)
fOdd = oddFunc(0.3,1.0,10.0,i)
evenF.append(fEven)
oddF.append(fOdd)
plt.figure(1)
plt.plot(E,evenF,'b',label='Even States')
plt.plot(E,oddF,'r',label='Odd States')
plt.plot(Eeven,0,'bo',Eodd,0,'ro')
plt.xlabel('Energy (eV)')
plt.ylabel('$f\ (E)$')
plt.legend(loc=9)
# setup wavefunction plotting as function of distance & plot potential well
#--------------------------
# x arrays for regions around well
R1 = np.arange(-0.6,-0.29,0.01) # region 1 left of well
R2 = np.arange(-0.3,0.301,0.01) # region 2 inside well
R3 = np.arange(0.3,0.601,0.01) # region 3 right of well
# alpha & beta values for even states
alphEven = sqrt(2*m*Eeven/hbarSq)
betaEven = sqrt(2*m*(V0-Eeven)/hbarSq)
# even state wavefunctions for 3 regions (arbitrary normalization coefficients)
# wavefunctions shifted to make energy eigenvalues the zero baseline
psiR1even = [30*exp(betaEven*i)+Eeven for i in R1]
psiR2even = [cos(alphEven*i)+Eeven for i in R2]
psiR3even = [30*exp(-betaEven*i)+Eeven for i in R3]
# alpha & beta values for odd states
alphOdd = sqrt(2*m*Eodd/hbarSq)
betaOdd = sqrt(2*m*(V0-Eodd)/hbarSq)
# odd state wavefunctions for 3 regions (arbitrary normalization coefficients)
# wavefunctions shifted to make energy eigenvalues the zero baseline
psiR1odd = [-30*exp(betaOdd*i)+Eodd for i in R1]
psiR2odd = [sin(alphOdd*i)+Eodd for i in R2]
psiR3odd = [30*exp(-betaOdd*i)+Eodd for i in R3]
plt.figure(2)
# plot lines for potential V(x)
plt.plot([-0.6,-0.3],[10,10],'k',linewidth='4')
plt.plot([-0.3,-0.3],[10,0],'k',linewidth='4')
plt.plot([-0.3,0.3],[0,0], 'k',linewidth='4')
plt.plot([0.3,0.3], [0,10], 'k',linewidth='4')
plt.plot([0.3,0.6],[10,10], 'k',linewidth='4')
plt.xticks([-0.6,-0.4,-0.2,0,0.2,0.4,0.6])
plt.annotate('$V_0$',fontsize=16,xy=(0.23,0.82),xycoords='figure fraction')
# plot lines for energy eigenvalues
plt.plot([-0.6,0.6],[Eeven,Eeven],'g',linewidth='2',linestyle='--')
plt.plot([-0.6,0.6],[Eodd,Eodd],'g',linewidth='2',linestyle='--')
plt.annotate('Ground State Energies',fontsize=12,xy=(0.39,0.27),xycoords='figure fraction')
plt.annotate('$E_{even}=0.71545$',fontsize=12,xy=(0.75,0.20),xycoords='figure fraction')
plt.annotate('$E_{odd}=2.82139$',fontsize=12,xy=(0.755,0.40),xycoords='figure fraction')
# plot wavefunctions for each ground state energy
plt.plot(R1,psiR1even,'b',label='$\psi_{even}\ ({x})$')
plt.plot(R2,psiR2even,'b')
plt.plot(R3,psiR3even,'b')
plt.plot(R1,psiR1odd,'r',label='$\psi_{odd}\ ({x})$')
plt.plot(R2,psiR2odd,'r')
plt.plot(R3,psiR3odd,'r')
plt.annotate(r'$\psi_{1}=C\ \exp({\beta x})$',fontsize=12,xy=(0.15,0.625),xycoords='figure fraction')
plt.annotate(r'$\psi_{2odd}=A\ \sin({\alpha x})$',fontsize=12,xy=(0.42,0.65),xycoords='figure fraction')
plt.annotate(r'$\psi_{2even}=B\ \cos({\alpha x})$',fontsize=12,xy=(0.42,0.60),xycoords='figure fraction')
plt.annotate(r'$\psi_{3}=F\ \exp({-\beta x})$',fontsize=12,xy=(0.73,0.625),xycoords='figure fraction')
plt.yticks(range(-2,14,2))
# set titles
plt.xlabel('Distance (nm)')
plt.ylabel('Ground State Wavefunctions')
plt.legend(loc=9)
#*********************************************************************** | [
"[email protected]"
] | |
90a3c28ab285b4ec923d578de72c4b25d4bf8d2b | 0329e8b521fc14aaa8fda785e93e45c0e9ac7026 | /seleniumbase/console_scripts/run.py | 75618875a0f52b767f5be079a50b4f252ae14d64 | [
"MIT"
] | permissive | devopstoday11/SeleniumBase | e89a6dbb6085a568e8dc24240a731c1c6cd4c1ee | 3bfa7ed196b5d7724848981ce56b81ec64b8653c | refs/heads/master | 2022-12-31T12:31:27.572589 | 2020-10-11T22:53:54 | 2020-10-11T22:53:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,318 | py | """
SeleniumBase console scripts runner
Usage:
seleniumbase [COMMAND] [PARAMETERS]
OR sbase [COMMAND] [PARAMETERS]
Examples:
sbase install chromedriver
sbase mkdir ui_tests
sbase mkfile new_test.py
sbase options
sbase convert webdriver_unittest_file.py
sbase print my_first_test.py -n
sbase translate my_first_test.py --zh -p
sbase extract-objects my_first_test.py
sbase inject-objects my_first_test.py
sbase objectify my_first_test.py
sbase revert-objects my_first_test.py
sbase encrypt
sbase decrypt
sbase download server
sbase grid-hub start
sbase grid-node start --hub=127.0.0.1
"""
import colorama
import sys
colorama.init(autoreset=True)
def show_usage():
show_basic_usage()
sc = ("")
sc += (' Type "sbase help [COMMAND]" for specific command info.\n')
sc += (' For info on all commands, type: "seleniumbase --help".\n')
sc += (' * (Use "pytest" for running tests) *\n')
if "linux" not in sys.platform:
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c4 = colorama.Fore.MAGENTA + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = sc.replace("seleniumbase", c1 + "selenium" + c2 + "base" + cr)
sc = sc.replace("sbase", c1 + "s" + c2 + "base" + cr)
sc = sc.replace("pytest", c3 + "pytest" + cr)
sc = sc.replace("--help", c4 + "--help" + cr)
sc = sc.replace("help", c4 + "help" + cr)
print(sc)
def show_basic_usage():
from seleniumbase.console_scripts import logo_helper
seleniumbase_logo = logo_helper.get_seleniumbase_logo()
print(seleniumbase_logo)
print("%s" % get_version()[0:1])
print("")
sc = ("")
sc += (' * USAGE: "seleniumbase [COMMAND] [PARAMETERS]"\n')
sc += (' * OR: "sbase [COMMAND] [PARAMETERS]"\n')
sc += ("\n")
sc += ("COMMANDS:\n")
sc += (" install [DRIVER] [OPTIONS]\n")
sc += (" mkdir [DIRECTORY]\n")
sc += (" mkfile [FILE.py]\n")
sc += (" options (List common pytest options)\n")
sc += (" print [FILE] [OPTIONS]\n")
sc += (" translate [SB_FILE.py] [LANG] [ACTION]\n")
sc += (" convert [WEBDRIVER_UNITTEST_FILE.py]\n")
sc += (" extract-objects [SB_FILE.py]\n")
sc += (" inject-objects [SB_FILE.py] [OPTIONS]\n")
sc += (" objectify [SB_FILE.py] [OPTIONS]\n")
sc += (" revert-objects [SB_FILE.py]\n")
sc += (" encrypt (OR: obfuscate)\n")
sc += (" decrypt (OR: unobfuscate)\n")
sc += (" download server (Selenium Server JAR file)\n")
sc += (" grid-hub [start|stop] [OPTIONS]\n")
sc += (" grid-node [start|stop] --hub=[HOST/IP]\n")
sc += (' * (EXAMPLE: "sbase install chromedriver latest") *\n')
sc += ("")
if "linux" not in sys.platform:
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
cr = colorama.Style.RESET_ALL
sc = sc.replace("seleniumbase", c1 + "selenium" + c2 + "base" + cr)
sc = sc.replace("sbase", c1 + "s" + c2 + "base" + cr)
print(sc)
def show_install_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "install" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase install [DRIVER_NAME] [OPTIONS]")
print(" OR: sbase install [DRIVER_NAME] [OPTIONS]")
print(" (Drivers: chromedriver, geckodriver, edgedriver")
print(" iedriver, operadriver)")
print(" Options:")
print(" VERSION Specify the version.")
print(" (Default Chromedriver version = 2.44)")
print(' Use "latest" for the latest version.')
print(" -p OR --path Also copy the driver to /usr/local/bin")
print(" Example:")
print(" sbase install chromedriver")
print(" sbase install geckodriver")
print(" sbase install edgedriver")
print(" sbase install chromedriver 85")
print(" sbase install chromedriver 85.0.4183.87")
print(" sbase install chromedriver latest")
print(" sbase install chromedriver -p")
print(" sbase install chromedriver latest -p")
print(" sbase install edgedriver 85.0.564.68")
print(" Output:")
print(" Installs the chosen webdriver to seleniumbase/drivers/")
print(" (chromedriver is required for Chrome automation)")
print(" (geckodriver is required for Firefox automation)")
print(" (edgedriver is required for Microsoft Edge automation)")
print(" (iedriver is required for InternetExplorer automation)")
print(" (operadriver is required for Opera Browser automation)")
print("")
def show_mkdir_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "mkdir" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase mkdir [DIRECTORY_NAME]")
print(" OR: sbase mkdir [DIRECTORY_NAME]")
print(" Example:")
print(" sbase mkdir browser_tests")
print(" Output:")
print(" Creates a new folder for running SBase scripts.")
print(" The new folder contains default config files,")
print(" sample tests for helping new users get started,")
print(" and Python boilerplates for setting up customized")
print(" test frameworks.")
print("")
def show_mkfile_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "mkfile" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase mkfile [FILE.py]")
print(" OR: sbase mkfile [FILE.py]")
print(" Example:")
print(" sbase mkfile new_test.py")
print(" Options:")
print(" -b / --basic (Basic boilerplate / single-line test)")
print(" Language Options:")
print(" --en / --English | --zh / --Chinese")
print(" --nl / --Dutch | --fr / --French")
print(" --it / --Italian | --ja / --Japanese")
print(" --ko / --Korean | --pt / --Portuguese")
print(" --ru / --Russian | --es / --Spanish")
print(" Output:")
print(" Creates a new SBase test file with boilerplate code.")
print(" If the file already exists, an error is raised.")
print(" By default, uses English mode and creates a")
print(" boilerplate with the 5 most common SeleniumBase")
print(' methods, which are "open", "type", "click",')
print(' "assert_element", and "assert_text". If using the')
print(' basic boilerplate option, only the "open" method')
print(' is included.')
print("")
def show_convert_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "convert" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase convert [WEBDRIVER_UNITTEST_FILE.py]")
print(" OR: sbase convert [WEBDRIVER_UNITTEST_FILE.py]")
print(" Output:")
print(" Converts a Selenium IDE exported WebDriver unittest")
print(" file into a SeleniumBase file. Adds _SB to the new")
print(" file name while keeping the original file intact.")
print(" Works with Katalon Recorder scripts.")
print(" See: http://www.katalon.com/automation-recorder")
print("")
def show_print_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "print" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase print [FILE] [OPTIONS]")
print(" OR: sbase print [FILE] [OPTIONS]")
print(" Options:")
print(" -n (Add line Numbers to the rows)")
print(" Output:")
print(" Prints the code/text of any file")
print(" with syntax-highlighting.")
print("")
def show_translate_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "translate" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase translate [SB_FILE.py] [LANGUAGE] [ACTION]")
print(" OR: sbase translate [SB_FILE.py] [LANGUAGE] [ACTION]")
print(" Languages:")
print(" --en / --English | --zh / --Chinese")
print(" --nl / --Dutch | --fr / --French")
print(" --it / --Italian | --ja / --Japanese")
print(" --ko / --Korean | --pt / --Portuguese")
print(" --ru / --Russian | --es / --Spanish")
print(" Actions:")
print(" -p / --print (Print translation output to the screen)")
print(" -o / --overwrite (Overwrite the file being translated)")
print(" -c / --copy (Copy the translation to a new .py file)")
print(" Options:")
print(" -n (include line Numbers when using the Print action)")
print(" Output:")
print(" Translates a SeleniumBase Python file into the language")
print(' specified. Method calls and "import" lines get swapped.')
print(" Both a language and an action must be specified.")
print(' The "-p" action can be paired with one other action.')
print(' When running with "-c" (or "--copy"), the new file name')
print(' will be the orginal name appended with an underscore')
print(" plus the 2-letter language code of the new language.")
print(' (Example: Translating "test_1.py" into Japanese with')
print(' "-c" will create a new file called "test_1_ja.py".)')
print("")
def show_extract_objects_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "extract-objects" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase extract-objects [SB_FILE.py]")
print(" OR: sbase extract-objects [SB_FILE.py]")
print(" Output:")
print(" Creates page objects based on selectors found in a")
print(" seleniumbase Python file and saves those objects to the")
print(' "page_objects.py" file in the same folder as the tests.')
print("")
def show_inject_objects_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "inject-objects" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase inject-objects [SB_FILE.py]")
print(" OR: sbase inject-objects [SB_FILE.py]")
print(" Options:")
print(" -c, --comments (Add object selectors to the comments.)")
print(" (Default: No added comments.)")
print(" Output:")
print(' Takes the page objects found in the "page_objects.py"')
print(' file and uses those to replace matching selectors in')
print(' the selected seleniumbase Python file.')
print("")
def show_objectify_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "objectify" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase objectify [SB_FILE.py]")
print(" OR: sbase objectify [SB_FILE.py]")
print(" Options:")
print(" -c, --comments (Add object selectors to the comments.)")
print(" (Default: No added comments.)")
print(" Output:")
print(' A modified version of the file where the selectors')
print(' have been replaced with variable names defined in')
print(' "page_objects.py", supporting the Page Object Pattern.')
print("")
print(' (seleniumbase "objectify" has the same outcome as')
print(' combining "extract-objects" with "inject-objects")')
print("")
def show_revert_objects_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "revert-objects" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase revert-objects [SB_FILE.py]")
print(" OR: sbase revert-objects [SB_FILE.py]")
print(" Options:")
print(" -c, --comments (Keep existing comments for the lines.)")
print(" (Default: No comments are kept.)")
print(" Output:")
print(' Reverts the changes made by "seleniumbase objectify" or')
print(' "seleniumbase inject-objects" when run against a')
print(' seleniumbase Python file. Objects will get replaced by')
print(' selectors stored in the "page_objects.py" file.')
print("")
def show_encrypt_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "encrypt OR obfuscate" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase encrypt || seleniumbase obfuscate")
print(" --OR--")
print(" sbase encrypt || sbase obfuscate")
print(" Output:")
print(" Runs the password encryption/obfuscation tool.")
print(" (Where you can enter a password to encrypt/obfuscate.)")
print("")
def show_decrypt_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "decrypt OR unobfuscate" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase decrypt || seleniumbase unobfuscate")
print(" --OR--")
print(" sbase decrypt || sbase unobfuscate")
print(" Output:")
print(" Runs the password decryption/unobfuscation tool.")
print(" (Where you can enter an encrypted password to decrypt.)")
print("")
def show_download_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "download" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase download server")
print(" OR: sbase download server")
print(" Output:")
print(" Downloads the Selenium Standalone Server.")
print(" (Server is required for using your own Selenium Grid.)")
print("")
def show_grid_hub_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "grid-hub" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase grid-hub {start|stop|restart} [OPTIONS]")
print(" OR: sbase grid-hub {start|stop|restart} [OPTIONS]")
print(" Options:")
print(" -v, --verbose (Increase verbosity of logging output.)")
print(" (Default: Quiet logging / not verbose.)")
print(" --timeout=TIMEOUT (Close idle browser after TIMEOUT.)")
print(" (The default TIMEOUT: 230 seconds.)")
print(" (Use --timeout=0 to skip timeouts.)")
print(" Example:")
print(" seleniumbase grid-hub start")
print(" Output:")
print(" Controls the Selenium Grid Hub Server, which allows")
print(" for running tests on multiple machines in parallel")
print(" to speed up test runs and reduce the total time")
print(" of test suite execution.")
print(' You can "start" or "stop" the Grid Hub server.')
print("")
def show_grid_node_usage():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = (" " + c2 + "** " + c3 + "grid-node" + c2 + " **" + cr)
print(sc)
print("")
print(" Usage:")
print(" seleniumbase grid-node {start|stop|restart} [OPTIONS]")
print(" OR: sbase grid-node {start|stop|restart} [OPTIONS]")
print(" Options:")
print(" --hub=[HOST/IP] (The Grid Hub Hostname / IP Address.)")
print(" (Default: 127.0.0.1 if not set.)")
print(" -v, --verbose (Increase verbosity of logging output.)")
print(" (Default: Quiet logging / Not verbose.)")
print(" Example:")
print(" seleniumbase grid-node start --hub=127.0.0.1")
print(" Output:")
print(" Controls the Selenium Grid node, which serves as a")
print(" worker machine for your Selenium Grid Hub server.")
print(' You can "start" or "stop" the Grid node.')
print("")
def get_version():
import pkg_resources
version_info = None
try:
version_info = pkg_resources.require("seleniumbase")[0:1]
except Exception:
version_info = ["ERROR: Cannot detect version! Please reinstall!"]
return version_info
def show_version_info():
version = get_version()
print('\n%s\n' % version)
def show_options():
c1 = colorama.Fore.BLUE + colorama.Back.LIGHTCYAN_EX
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
sc = ("\n " + c2 + " ** " + c3 + " pytest CLI Options " + c2 + " ** " + cr)
print(sc)
print("")
line = "Here are some common pytest options to use with SeleniumBase:"
line = c1 + line + cr
print(line)
print("")
print('--browser=BROWSER (The web browser to use. Default: "chrome".)')
print('--headless (Run tests headlessly. Default mode on Linux OS.)')
print('--demo (Slow down and visually see test actions as they occur.)')
print('--slow (Slow down the automation. Faster than using Demo Mode.)')
print('--reuse-session / --rs (Reuse the browser session between tests.)')
print('--crumbs (Delete all cookies between tests reusing a session.)')
print('--maximize (Start tests with the web browser window maximized.)')
print("--incognito (Enable Chrome's Incognito mode.)")
print("--guest (Enable Chrome's Guest mode.)")
print('-m MARKER (Run tests with the specified pytest marker.)')
print('-n NUM (Multithread the tests using that many threads.)')
print('-v (Verbose mode. Prints the full names of each test run.)')
print('--html=report.html (Create a detailed pytest-html report.)')
print('--collect-only / --co (Only show discovered tests. No run.)')
print('--co -q (Only show full names of discovered tests. No run.)')
print('--trace (Enter Debug Mode immediately after starting any test.')
print(' n: Next line of method. s: Step through. c: Continue.)')
print('--pdb (Enter Debug Mode if a test fails. h: Help. c: Continue.')
print(' where: Stacktrace location. u: Up stack. d: Down stack.')
print(' longlist / ll: See code. dir(): List namespace objects.)')
print('-x (Stop running the tests after the first failure is reached.)')
print('--archive-logs (Archive old log files instead of deleting them.)')
print('--save-screenshot (Save a screenshot at the end of each test.)')
print('--check-js (Check for JavaScript errors after page loads.)')
print('--start-page=URL (The browser start page when tests begin.)')
print("--agent=STRING (Modify the web browser's User-Agent string.)")
print('--mobile (Use the mobile device emulator while running tests.)')
print('--metrics=STRING (Set mobile "CSSWidth,CSSHeight,PixelRatio".)')
print('--ad-block (Block some types of display ads after page loads.)')
print('--settings-file=FILE (Override default SeleniumBase settings.)')
print('--env=ENV (Set the test env. Access with "self.env" in tests.)')
print('--data=DATA (Extra test data. Access with "self.data" in tests.)')
print('--disable-csp (Disable the Content Security Policy of websites.)')
print('--server=SERVER (The Selenium Grid server/IP used for tests.)')
print('--port=PORT (The Selenium Grid port used by the test server.)')
print('--proxy=SERVER:PORT (Connect to a proxy server:port for tests.)')
print('--proxy=USER:PASS@SERVER:PORT (Use authenticated proxy server.)')
print("")
line = 'For the full list of ' + c2 + 'command-line options' + cr
line += ', type: "' + c1 + 'pytest' + cr + ' ' + c3 + '--help' + cr + '".'
print(line)
print("")
def show_detailed_help():
c2 = colorama.Fore.BLUE + colorama.Back.LIGHTGREEN_EX
c3 = colorama.Fore.BLUE + colorama.Back.LIGHTYELLOW_EX
c6 = colorama.Back.CYAN
cr = colorama.Style.RESET_ALL
show_basic_usage()
print(c6 + " " + c2 + " Commands: " + c6 + " ")
print(cr)
show_install_usage()
show_mkdir_usage()
show_mkfile_usage()
show_convert_usage()
show_print_usage()
show_translate_usage()
show_extract_objects_usage()
show_inject_objects_usage()
show_objectify_usage()
show_revert_objects_usage()
show_encrypt_usage()
show_decrypt_usage()
show_download_usage()
show_grid_hub_usage()
show_grid_node_usage()
print('* (Use "' + c3 + 'pytest' + cr + '" for running tests) *\n')
def main():
command = None
command_args = None
num_args = len(sys.argv)
if num_args == 1:
show_usage()
return
elif num_args == 2:
command = sys.argv[1]
command_args = []
elif num_args > 2:
command = sys.argv[1]
command_args = sys.argv[2:]
command = command.lower()
if command == "install":
if len(command_args) >= 1:
from seleniumbase.console_scripts import sb_install
sb_install.main()
else:
show_basic_usage()
show_install_usage()
elif command == "mkdir":
if len(command_args) >= 1:
from seleniumbase.console_scripts import sb_mkdir
sb_mkdir.main()
else:
show_basic_usage()
show_mkdir_usage()
elif command == "mkfile":
if len(command_args) >= 1:
from seleniumbase.console_scripts import sb_mkfile
sb_mkfile.main()
else:
show_basic_usage()
show_mkfile_usage()
elif command == "convert":
if len(command_args) == 1:
from seleniumbase.utilities.selenium_ide import convert_ide
convert_ide.main()
else:
show_basic_usage()
show_convert_usage()
elif command == "print":
if len(command_args) >= 1:
if sys.version_info[0] == 2:
c5 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
msg = '"sbase print" does NOT support Python 2! '
msg += 'Try using the Unix "cat" command instead!'
message = "\n" + c5 + msg + cr + "\n"
print("")
raise Exception(message)
from seleniumbase.console_scripts import sb_print
sb_print.main()
else:
show_basic_usage()
show_print_usage()
elif command == "translate":
if len(command_args) >= 1:
if sys.version_info[0] == 2:
c5 = colorama.Fore.RED + colorama.Back.LIGHTYELLOW_EX
cr = colorama.Style.RESET_ALL
msg = "The SeleniumBase Translator does NOT support Python 2!"
message = "\n" + c5 + msg + cr + "\n"
print("")
raise Exception(message)
from seleniumbase.translate import translator
translator.main()
else:
show_basic_usage()
show_translate_usage()
elif command == "extract-objects" or command == "extract_objects":
if len(command_args) >= 1:
from seleniumbase.console_scripts import objectify
objectify.extract_objects()
else:
show_basic_usage()
show_extract_objects_usage()
elif command == "inject-objects" or command == "inject_objects":
if len(command_args) >= 1:
from seleniumbase.console_scripts import objectify
objectify.inject_objects()
else:
show_basic_usage()
show_inject_objects_usage()
elif command == "objectify":
if len(command_args) >= 1:
from seleniumbase.console_scripts import objectify
objectify.objectify()
else:
show_basic_usage()
show_objectify_usage()
elif command == "revert-objects" or command == "revert_objects":
if len(command_args) >= 1:
from seleniumbase.console_scripts import objectify
objectify.revert_objects()
else:
show_basic_usage()
show_revert_objects_usage()
elif command == "encrypt" or command == "obfuscate":
if len(command_args) >= 0:
from seleniumbase.common import obfuscate
obfuscate.main()
else:
show_basic_usage()
show_encrypt_usage()
elif command == "decrypt" or command == "unobfuscate":
if len(command_args) >= 0:
from seleniumbase.common import unobfuscate
unobfuscate.main()
else:
show_basic_usage()
show_decrypt_usage()
elif command == "download":
if len(command_args) >= 1 and command_args[0].lower() == "server":
from seleniumbase.utilities.selenium_grid import (
download_selenium_server)
download_selenium_server.main(force_download=True)
else:
show_basic_usage()
show_download_usage()
elif command == "grid-hub" or command == "grid_hub":
if len(command_args) >= 1:
from seleniumbase.utilities.selenium_grid import grid_hub
grid_hub.main()
else:
show_basic_usage()
show_grid_hub_usage()
elif command == "grid-node" or command == "grid_node":
if len(command_args) >= 1:
from seleniumbase.utilities.selenium_grid import grid_node
grid_node.main()
else:
show_basic_usage()
show_grid_node_usage()
elif command == "version" or command == "--version":
if len(command_args) == 0:
show_version_info()
else:
show_basic_usage()
elif command == "options" or command == "--options":
show_options()
elif command == "help" or command == "--help":
if len(command_args) >= 1:
if command_args[0] == "install":
print("")
show_install_usage()
return
elif command_args[0] == "mkdir":
print("")
show_mkdir_usage()
return
elif command_args[0] == "mkfile":
print("")
show_mkfile_usage()
return
elif command_args[0] == "convert":
print("")
show_convert_usage()
return
elif command_args[0] == "print":
print("")
show_print_usage()
return
elif command_args[0] == "translate":
print("")
show_translate_usage()
return
elif command_args[0] == "extract-objects":
print("")
show_extract_objects_usage()
return
elif command_args[0] == "inject-objects":
print("")
show_inject_objects_usage()
return
elif command_args[0] == "objectify":
print("")
show_objectify_usage()
return
elif command_args[0] == "revert-objects":
print("")
show_revert_objects_usage()
return
elif command_args[0] == "encrypt":
print("")
show_encrypt_usage()
return
elif command_args[0] == "obfuscate":
print("")
show_encrypt_usage()
return
elif command_args[0] == "decrypt":
print("")
show_decrypt_usage()
return
elif command_args[0] == "unobfuscate":
print("")
show_decrypt_usage()
return
elif command_args[0] == "download":
print("")
show_download_usage()
return
elif command_args[0] == "grid-hub":
print("")
show_grid_hub_usage()
return
elif command_args[0] == "grid-node":
print("")
show_grid_node_usage()
return
show_detailed_help()
else:
show_usage()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
92a1970509df326c8143755cf9819a2206306d83 | 797403a06a463b571ceeaf49d7763b90d32ecf02 | /manage.py | 78e5dff096d42cfc04ea53f924a48507f45dd7d2 | [] | no_license | crowdbotics/anand-crowdbotics-16 | cd090cbcc8e0326391f88348de353f397e1b2cd1 | 6ee9424e94f0d3438b966b137811ab4495051753 | refs/heads/master | 2021-04-03T08:33:24.279500 | 2018-03-09T17:50:50 | 2018-03-09T17:50:50 | 124,574,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "anand_crowdbotics_16.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
fc33d8e7379198696b815ebc07274d16e233a466 | 9c2ba4f1a2d75b1916e6f20fa95c5fb32d0497d9 | /ScrapingWithPython2/code/crawler_script/userAgents.py | d8b7443d1f35f892e33a3ec4385bab0c16310377 | [] | no_license | PowerDG/DgCoreInit | abe4b15e38b730c25424f71e6927db982af27a72 | 84e6b7833ddc083b90fcc172c3812dd6f8b51e3d | refs/heads/master | 2023-07-19T11:58:09.220460 | 2019-06-07T14:43:24 | 2019-06-07T14:43:24 | 163,091,619 | 0 | 1 | null | 2023-07-06T21:20:15 | 2018-12-25T14:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 4,255 | py | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
__author__ = 'hstking [email protected]'
pcUserAgent = {
"safari 5.1 – MAC":"User-Agent:Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"safari 5.1 – Windows":"User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
"IE 9.0":"User-Agent:Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0);",
"IE 8.0":"User-Agent:Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"IE 7.0":"User-Agent:Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"IE 6.0":"User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Firefox 4.0.1 – MAC":"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Firefox 4.0.1 – Windows":"User-Agent:Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Opera 11.11 – MAC":"User-Agent:Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
"Opera 11.11 – Windows":"User-Agent:Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
"Chrome 17.0 – MAC":"User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Maxthon":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
"Tencent TT":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)",
"The World 2.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)",
"The World 3.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"sogou 1.x":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)",
"360":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)",
"Avant":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)",
"Green Browser":"User-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
}
mobileUserAgent = {
"iOS 4.33 – iPhone":"User-Agent:Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"iOS 4.33 – iPod Touch":"User-Agent:Mozilla/5.0 (iPod; U; CPU iPhone OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"iOS 4.33 – iPad":"User-Agent:Mozilla/5.0 (iPad; U; CPU OS 4_3_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8J2 Safari/6533.18.5",
"Android N1":"User-Agent: Mozilla/5.0 (Linux; U; Android 2.3.7; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Android QQ":"User-Agent: MQQBrowser/26 Mozilla/5.0 (Linux; U; Android 2.3.7; zh-cn; MB200 Build/GRJ22; CyanogenMod-7) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Android Opera ":"User-Agent: Opera/9.80 (Android 2.3.4; Linux; Opera Mobi/build-1107180945; U; en-GB) Presto/2.8.149 Version/11.10",
"Android Pad Moto Xoom":"User-Agent: Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"BlackBerry":"User-Agent: Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1+ (KHTML, like Gecko) Version/6.0.0.337 Mobile Safari/534.1+",
"WebOS HP Touchpad":"User-Agent: Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.0; U; en-US) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/233.70 Safari/534.6 TouchPad/1.0",
"Nokia N97":"User-Agent: Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/20.0.019; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) BrowserNG/7.1.18124",
"Windows Phone Mango":"User-Agent: Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0; HTC; Titan)",
"UC":"User-Agent: UCWEB7.0.2.37/28/999",
"UC standard":"User-Agent: NOKIA5700/ UCWEB7.0.2.37/28/999",
"UCOpenwave":"User-Agent: Openwave/ UCWEB7.0.2.37/28/999",
"UC Opera":"User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; ) Opera/UCWEB7.0.2.37/28/999"
}
| [
"[email protected]"
] | |
fb363a89cd15293a0bed822eb4c5966d9e1ac713 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02697/s900893925.py | cfd061b2464153e0333019f32cf31aa1b124ef34 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | n, m = map(int, input().split())
i = 0
c = 0
while (i + 1) + i < (n - i - (i + 1)) and c < m:
print(i + 1, n - i)
c += 1
i += 1
a = n // 2 + n % 2
for i in range(m - c):
print(a - i - 1, a + i + 1) | [
"[email protected]"
] | |
04489e971a9cf6a6d19f42d7c96e28cf0b5067a7 | 4e1e7c9d3848e4eed4111be11f22436ef3143e6d | /python/p146.py | 3c8f6e11949b34bf6a7404c4066e639241fd4cb1 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | rsafarov/Project-Euler-solutions | d2e3bc7ed2bb05e935b1f0e9404eec4a2dcecacd | e5061b8358ddbe9f6563c32ef82e135c233257fe | refs/heads/master | 2021-01-12T06:44:12.461955 | 2016-12-26T22:55:11 | 2016-12-26T22:55:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | #
# Solution to Project Euler problem 146
# by Project Nayuki
#
# https://www.nayuki.io/page/project-euler-solutions
# https://github.com/nayuki/Project-Euler-solutions
#
import eulerlib
# Right off the bat, we can exclude 90% of the candidates by the following observations:
# - If n = 1 mod 2, then n^2 + 1 = 0 mod 2 which is composite.
# - Thus we require n = 0 mod 2.
# - If n = 1 mod 5, then n^2 + 9 = 0 mod 5 which is composite.
# - If n = 2 mod 5, then n^2 + 1 = 0 mod 5 which is composite.
# - If n = 3 mod 5, then n^2 + 1 = 0 mod 5 which is composite.
# - If n = 4 mod 5, then n^2 + 9 = 0 mod 5 which is composite.
# - Thus we require n = 0 mod 5.
# - Taking these two together and using the Chinese remainder theorem (CRT), we require n = 0 mod 10.
#
# For each value of n, after we generate the set {n^2 + 1, n^2 + 3, ..., n^2 + 27}, it's more efficient to take each
# prime number and test whether it divides any number, rather than take each number and test it against all prime numbers.
# This is because some numbers in this set are prime so the latter method tests some numbers against all the primes;
# the former method will bail out early as soon as ~any~ number in the set has a small prime factor.
#
# The rest of the algorithm is implemented straightforwardly.
def compute():
LIMIT = 150000000
INCREMENTS = [1, 3, 7, 9, 13, 27] # Must be in non-decreasing order
NON_INCREMENTS = set(i for i in range(INCREMENTS[-1]) if i not in INCREMENTS)
maxnumber = LIMIT**2 + INCREMENTS[-1]
primes = eulerlib.list_primes(eulerlib.sqrt(maxnumber))
def has_consecutive_primes(n):
# Generate the set of numbers to test for primality
n2 = n**2
temp = [(n2 + k) for k in INCREMENTS]
# Test that each number is prime.
# Note: The nesting of the loops can be reversed, but this way is much faster.
if any((x != p and x % p == 0)
for p in primes
for x in temp):
return False
# Test that each number that is not an increment is composite.
# This checks that the prime numbers we found are in fact consecutive.
return all((not is_prime(n2 + k)) for k in NON_INCREMENTS)
def is_prime(n):
end = eulerlib.sqrt(n)
for p in primes:
if p > end:
break
if n % p == 0:
return False
return True
ans = sum(n for n in range(0, LIMIT, 10) if has_consecutive_primes(n))
return str(ans)
if __name__ == "__main__":
print(compute())
| [
"[email protected]"
] | |
57f4fd86ef61862a8603a69e948aeba72ff1531f | 13d3724f5e2de71cd41177e73ea331bb02b2c6fe | /network.py | c63b259bcea27a068b7ffc7cadc7e322fb8bee07 | [] | no_license | chengyang317/deep_encode_decode | db87a2a5f1b6d0f86fbb4ff93812ceff2394b3cf | b2d09e3768b26f9a831b0d738f4e03feed80471a | refs/heads/master | 2021-01-01T04:33:53.003522 | 2016-05-19T01:01:25 | 2016-05-19T01:01:25 | 59,162,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | import tensorflow as tf
import prettytensor as pt
import numpy as np
class NetWork(object):
"""
Net work for the encode_decode architechture.
"""
def __init__(self, batch_size):
input_tensor = tf.placeholder(tf.float32, shape=(batch_size, DATA_SIZE))
label_tensor = tf.placeholder(tf.float32, shape=(BATCH_SIZE, CLASSES))
pretty_input = pt.wrap(input_tensor)
| [
"[email protected]"
] | |
47a89012a2c09dd20a597a64d4632ba171432975 | 3aab11d445011f4a0de1376886dd3899aba44e68 | /opps/contrib/notifications/migrations/0001_initial.py | a2ce3a26f6a442aeb6d8856594359592dcadb7e2 | [
"MIT"
] | permissive | opps/opps | 4ba6a08ac5aa31be48c245b2e8f9d9a714a5e473 | 5552924fa34ea40d24febeac5046bd59f62e0e4f | refs/heads/master | 2023-08-24T21:09:23.489540 | 2023-05-22T20:07:33 | 2023-05-22T20:07:33 | 7,712,379 | 166 | 76 | MIT | 2022-01-06T22:53:23 | 2013-01-20T03:56:15 | Python | UTF-8 | Python | false | false | 16,534 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Notification'
db.create_table(u'notifications_notification', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('date_insert', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('date_update', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['%s.%s' % (User._meta.app_label, User._meta.object_name)])),
('site', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['sites.Site'])),
('site_iid', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True, max_length=4, null=True, blank=True)),
('site_domain', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=100, null=True, blank=True)),
('date_available', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, db_index=True)),
('published', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
('container', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['containers.Container'])),
('action', self.gf('django.db.models.fields.CharField')(default='message', max_length=75)),
('type', self.gf('django.db.models.fields.CharField')(default='json', max_length=10)),
('message', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'notifications', ['Notification'])
def backwards(self, orm):
# Deleting model 'Notification'
db.delete_table(u'notifications_notification')
models = {
u'%s.%s' % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User.__name__},
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'channels.channel': {
'Meta': {'ordering': "['name', 'parent__id', 'published']", 'unique_together': "(('site', 'long_slug', 'slug', 'parent'),)", 'object_name': 'Channel'},
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'include_in_main_rss': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'layout': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '250', 'db_index': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'long_slug': ('django.db.models.fields.SlugField', [], {'max_length': '250'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'subchannel'", 'null': 'True', 'to': u"orm['channels.Channel']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'show_in_menu': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.container': {
'Meta': {'ordering': "['-date_available']", 'unique_together': "(('site', 'channel_long_slug', 'slug'),)", 'object_name': 'Container'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['channels.Channel']"}),
'channel_long_slug': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '250', 'null': 'True', 'blank': 'True'}),
'channel_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '140', 'null': 'True', 'blank': 'True'}),
'child_app_label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_class': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '30', 'null': 'True', 'blank': 'True'}),
'child_module': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '120', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'hat': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['images.Image']", 'null': 'True', 'through': u"orm['containers.ContainerImage']", 'blank': 'True'}),
'json': ('opps.db.models.fields.jsonf.JSONField', [], {'null': 'True', 'blank': 'True'}),
'main_image': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'containers_container_mainimage'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['images.Image']"}),
'main_image_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_containers.container_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'show_on_root_channel': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'containers.containerimage': {
'Meta': {'ordering': "('order',)", 'object_name': 'ContainerImage'},
'caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['images.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'images.image': {
'Meta': {'object_name': 'Image'},
'archive': ('django.db.models.fields.files.FileField', [], {'max_length': '255'}),
'crop_example': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'crop_x1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_x2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y1': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'crop_y2': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fit_in': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'flop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'halign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150'}),
'smart': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '4000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)}),
'valign': ('django.db.models.fields.CharField', [], {'default': 'False', 'max_length': '6', 'null': 'True', 'blank': 'True'})
},
u'notifications.notification': {
'Meta': {'object_name': 'Notification'},
'action': ('django.db.models.fields.CharField', [], {'default': "'message'", 'max_length': '75'}),
'container': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['containers.Container']"}),
'date_available': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': u"orm['sites.Site']"}),
'site_domain': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'site_iid': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'max_length': '4', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'json'", 'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['notifications'] | [
"[email protected]"
] | |
379642818204d5baebc8e7103b88c69cdf947053 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /S4uZaKhcDa7pJ33nu_24.py | ca56f99a3e15d892f27398e2d0cc7c9148315d09 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py |
from datetime import datetime as dt, timedelta as td
def week_after(d):
return (dt.strptime(d, '%d/%m/%Y') + td(days=7)).strftime('%d/%m/%Y')
| [
"[email protected]"
] | |
a8038e69cb1168e45a240fb3af467e8a4f54c72c | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2021_09_01_preview/models/_models_py3.py | a3645379c0479487204e3a3056c25d6c3bce9200 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 153,493 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
import msrest.serialization
from ._app_platform_management_client_enums import *
class ApplicationInsightsAgentVersions(msrest.serialization.Model):
"""Application Insights agent versions properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar java: Indicates the version of application insight java agent.
:vartype java: str
"""
_validation = {
'java': {'readonly': True},
}
_attribute_map = {
'java': {'key': 'java', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ApplicationInsightsAgentVersions, self).__init__(**kwargs)
self.java = None
class Resource(msrest.serialization.Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have everything other than required location and tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(ProxyResource, self).__init__(**kwargs)
class AppResource(ProxyResource):
"""App resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the App resource.
:vartype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.AppResourceProperties
:ivar identity: The Managed Identity type of the app resource.
:vartype identity: ~azure.mgmt.appplatform.v2021_09_01_preview.models.ManagedIdentityProperties
:ivar location: The GEO location of the application, always the same with its parent resource.
:vartype location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'AppResourceProperties'},
'identity': {'key': 'identity', 'type': 'ManagedIdentityProperties'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
*,
properties: Optional["AppResourceProperties"] = None,
identity: Optional["ManagedIdentityProperties"] = None,
location: Optional[str] = None,
**kwargs
):
"""
:keyword properties: Properties of the App resource.
:paramtype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.AppResourceProperties
:keyword identity: The Managed Identity type of the app resource.
:paramtype identity:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ManagedIdentityProperties
:keyword location: The GEO location of the application, always the same with its parent
resource.
:paramtype location: str
"""
super(AppResource, self).__init__(**kwargs)
self.properties = properties
self.identity = identity
self.location = location
class AppResourceCollection(msrest.serialization.Model):
"""Object that includes an array of App resources and a possible link for next set.
:ivar value: Collection of App resources.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.AppResource]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AppResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["AppResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Collection of App resources.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.AppResource]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super(AppResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class AppResourceProperties(msrest.serialization.Model):
"""App resource properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar public: Indicates whether the App exposes public endpoint.
:vartype public: bool
:ivar url: URL of the App.
:vartype url: str
:ivar provisioning_state: Provisioning state of the App. Possible values include: "Succeeded",
"Failed", "Creating", "Updating".
:vartype provisioning_state: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.AppResourceProvisioningState
:ivar active_deployment_name: Name of the active deployment of the App.
:vartype active_deployment_name: str
:ivar fqdn: Fully qualified dns Name.
:vartype fqdn: str
:ivar https_only: Indicate if only https is allowed.
:vartype https_only: bool
:ivar created_time: Date time when the resource is created.
:vartype created_time: ~datetime.datetime
:ivar temporary_disk: Temporary disk settings.
:vartype temporary_disk: ~azure.mgmt.appplatform.v2021_09_01_preview.models.TemporaryDisk
:ivar persistent_disk: Persistent disk settings.
:vartype persistent_disk: ~azure.mgmt.appplatform.v2021_09_01_preview.models.PersistentDisk
:ivar custom_persistent_disks: List of custom persistent disks.
:vartype custom_persistent_disks:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomPersistentDiskResource]
:ivar enable_end_to_end_tls: Indicate if end to end TLS is enabled.
:vartype enable_end_to_end_tls: bool
:ivar loaded_certificates: Collection of loaded certificates.
:vartype loaded_certificates:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.LoadedCertificate]
"""
_validation = {
'url': {'readonly': True},
'provisioning_state': {'readonly': True},
'created_time': {'readonly': True},
}
_attribute_map = {
'public': {'key': 'public', 'type': 'bool'},
'url': {'key': 'url', 'type': 'str'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'active_deployment_name': {'key': 'activeDeploymentName', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'https_only': {'key': 'httpsOnly', 'type': 'bool'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'temporary_disk': {'key': 'temporaryDisk', 'type': 'TemporaryDisk'},
'persistent_disk': {'key': 'persistentDisk', 'type': 'PersistentDisk'},
'custom_persistent_disks': {'key': 'customPersistentDisks', 'type': '[CustomPersistentDiskResource]'},
'enable_end_to_end_tls': {'key': 'enableEndToEndTLS', 'type': 'bool'},
'loaded_certificates': {'key': 'loadedCertificates', 'type': '[LoadedCertificate]'},
}
def __init__(
self,
*,
public: Optional[bool] = None,
active_deployment_name: Optional[str] = None,
fqdn: Optional[str] = None,
https_only: Optional[bool] = False,
temporary_disk: Optional["TemporaryDisk"] = None,
persistent_disk: Optional["PersistentDisk"] = None,
custom_persistent_disks: Optional[List["CustomPersistentDiskResource"]] = None,
enable_end_to_end_tls: Optional[bool] = False,
loaded_certificates: Optional[List["LoadedCertificate"]] = None,
**kwargs
):
"""
:keyword public: Indicates whether the App exposes public endpoint.
:paramtype public: bool
:keyword active_deployment_name: Name of the active deployment of the App.
:paramtype active_deployment_name: str
:keyword fqdn: Fully qualified dns Name.
:paramtype fqdn: str
:keyword https_only: Indicate if only https is allowed.
:paramtype https_only: bool
:keyword temporary_disk: Temporary disk settings.
:paramtype temporary_disk: ~azure.mgmt.appplatform.v2021_09_01_preview.models.TemporaryDisk
:keyword persistent_disk: Persistent disk settings.
:paramtype persistent_disk: ~azure.mgmt.appplatform.v2021_09_01_preview.models.PersistentDisk
:keyword custom_persistent_disks: List of custom persistent disks.
:paramtype custom_persistent_disks:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomPersistentDiskResource]
:keyword enable_end_to_end_tls: Indicate if end to end TLS is enabled.
:paramtype enable_end_to_end_tls: bool
:keyword loaded_certificates: Collection of loaded certificates.
:paramtype loaded_certificates:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.LoadedCertificate]
"""
super(AppResourceProperties, self).__init__(**kwargs)
self.public = public
self.url = None
self.provisioning_state = None
self.active_deployment_name = active_deployment_name
self.fqdn = fqdn
self.https_only = https_only
self.created_time = None
self.temporary_disk = temporary_disk
self.persistent_disk = persistent_disk
self.custom_persistent_disks = custom_persistent_disks
self.enable_end_to_end_tls = enable_end_to_end_tls
self.loaded_certificates = loaded_certificates
class AvailableOperations(msrest.serialization.Model):
"""Available operations of the service.
:ivar value: Collection of available operation details.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationDetail]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[OperationDetail]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["OperationDetail"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Collection of available operation details.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationDetail]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super(AvailableOperations, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class AvailableRuntimeVersions(msrest.serialization.Model):
"""AvailableRuntimeVersions.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A list of all supported runtime versions.
:vartype value:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.SupportedRuntimeVersion]
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SupportedRuntimeVersion]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(AvailableRuntimeVersions, self).__init__(**kwargs)
self.value = None
class CustomPersistentDiskProperties(msrest.serialization.Model):
"""Custom persistent disk resource payload.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AzureFileVolume.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. The type of the underlying resource to mount as a persistent
disk.Constant filled by server. Possible values include: "AzureFileVolume".
:vartype type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.Type
:ivar mount_path: Required. The mount path of the persistent disk.
:vartype mount_path: str
:ivar read_only: Indicates whether the persistent disk is a readOnly one.
:vartype read_only: bool
:ivar mount_options: These are the mount options for a persistent disk.
:vartype mount_options: list[str]
"""
_validation = {
'type': {'required': True},
'mount_path': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'mount_options': {'key': 'mountOptions', 'type': '[str]'},
}
_subtype_map = {
'type': {'AzureFileVolume': 'AzureFileVolume'}
}
def __init__(
self,
*,
mount_path: str,
read_only: Optional[bool] = None,
mount_options: Optional[List[str]] = None,
**kwargs
):
"""
:keyword mount_path: Required. The mount path of the persistent disk.
:paramtype mount_path: str
:keyword read_only: Indicates whether the persistent disk is a readOnly one.
:paramtype read_only: bool
:keyword mount_options: These are the mount options for a persistent disk.
:paramtype mount_options: list[str]
"""
super(CustomPersistentDiskProperties, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.mount_path = mount_path
self.read_only = read_only
self.mount_options = mount_options
class AzureFileVolume(CustomPersistentDiskProperties):
"""The properties of the Azure File volume. Azure File shares are mounted as volumes.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. The type of the underlying resource to mount as a persistent
disk.Constant filled by server. Possible values include: "AzureFileVolume".
:vartype type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.Type
:ivar mount_path: Required. The mount path of the persistent disk.
:vartype mount_path: str
:ivar read_only: Indicates whether the persistent disk is a readOnly one.
:vartype read_only: bool
:ivar mount_options: These are the mount options for a persistent disk.
:vartype mount_options: list[str]
:ivar share_name: Required. The share name of the Azure File share.
:vartype share_name: str
"""
_validation = {
'type': {'required': True},
'mount_path': {'required': True},
'share_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'mount_options': {'key': 'mountOptions', 'type': '[str]'},
'share_name': {'key': 'shareName', 'type': 'str'},
}
def __init__(
self,
*,
mount_path: str,
share_name: str,
read_only: Optional[bool] = None,
mount_options: Optional[List[str]] = None,
**kwargs
):
"""
:keyword mount_path: Required. The mount path of the persistent disk.
:paramtype mount_path: str
:keyword read_only: Indicates whether the persistent disk is a readOnly one.
:paramtype read_only: bool
:keyword mount_options: These are the mount options for a persistent disk.
:paramtype mount_options: list[str]
:keyword share_name: Required. The share name of the Azure File share.
:paramtype share_name: str
"""
super(AzureFileVolume, self).__init__(mount_path=mount_path, read_only=read_only, mount_options=mount_options, **kwargs)
self.type = 'AzureFileVolume' # type: str
self.share_name = share_name
class BindingResource(ProxyResource):
"""Binding resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the Binding resource.
:vartype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.BindingResourceProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'BindingResourceProperties'},
}
def __init__(
self,
*,
properties: Optional["BindingResourceProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of the Binding resource.
:paramtype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.BindingResourceProperties
"""
super(BindingResource, self).__init__(**kwargs)
self.properties = properties
class BindingResourceCollection(msrest.serialization.Model):
"""Object that includes an array of Binding resources and a possible link for next set.
:ivar value: Collection of Binding resources.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.BindingResource]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[BindingResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["BindingResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Collection of Binding resources.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.BindingResource]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super(BindingResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class BindingResourceProperties(msrest.serialization.Model):
"""Binding resource properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar resource_name: The name of the bound resource.
:vartype resource_name: str
:ivar resource_type: The standard Azure resource type of the bound resource.
:vartype resource_type: str
:ivar resource_id: The Azure resource id of the bound resource.
:vartype resource_id: str
:ivar key: The key of the bound resource.
:vartype key: str
:ivar binding_parameters: Binding parameters of the Binding resource.
:vartype binding_parameters: dict[str, any]
:ivar generated_properties: The generated Spring Boot property file for this binding. The
secret will be deducted.
:vartype generated_properties: str
:ivar created_at: Creation time of the Binding resource.
:vartype created_at: str
:ivar updated_at: Update time of the Binding resource.
:vartype updated_at: str
"""
_validation = {
'resource_name': {'readonly': True},
'resource_type': {'readonly': True},
'generated_properties': {'readonly': True},
'created_at': {'readonly': True},
'updated_at': {'readonly': True},
}
_attribute_map = {
'resource_name': {'key': 'resourceName', 'type': 'str'},
'resource_type': {'key': 'resourceType', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'binding_parameters': {'key': 'bindingParameters', 'type': '{object}'},
'generated_properties': {'key': 'generatedProperties', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'str'},
'updated_at': {'key': 'updatedAt', 'type': 'str'},
}
def __init__(
self,
*,
resource_id: Optional[str] = None,
key: Optional[str] = None,
binding_parameters: Optional[Dict[str, Any]] = None,
**kwargs
):
"""
:keyword resource_id: The Azure resource id of the bound resource.
:paramtype resource_id: str
:keyword key: The key of the bound resource.
:paramtype key: str
:keyword binding_parameters: Binding parameters of the Binding resource.
:paramtype binding_parameters: dict[str, any]
"""
super(BindingResourceProperties, self).__init__(**kwargs)
self.resource_name = None
self.resource_type = None
self.resource_id = resource_id
self.key = key
self.binding_parameters = binding_parameters
self.generated_properties = None
self.created_at = None
self.updated_at = None
class CertificateProperties(msrest.serialization.Model):
"""Certificate resource payload.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ContentCertificateProperties, KeyVaultCertificateProperties.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. The type of the certificate source.Constant filled by server.
:vartype type: str
:ivar thumbprint: The thumbprint of certificate.
:vartype thumbprint: str
:ivar issuer: The issuer of certificate.
:vartype issuer: str
:ivar issued_date: The issue date of certificate.
:vartype issued_date: str
:ivar expiration_date: The expiration date of certificate.
:vartype expiration_date: str
:ivar activate_date: The activate date of certificate.
:vartype activate_date: str
:ivar subject_name: The subject name of certificate.
:vartype subject_name: str
:ivar dns_names: The domain list of certificate.
:vartype dns_names: list[str]
"""
_validation = {
'type': {'required': True},
'thumbprint': {'readonly': True},
'issuer': {'readonly': True},
'issued_date': {'readonly': True},
'expiration_date': {'readonly': True},
'activate_date': {'readonly': True},
'subject_name': {'readonly': True},
'dns_names': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'issued_date': {'key': 'issuedDate', 'type': 'str'},
'expiration_date': {'key': 'expirationDate', 'type': 'str'},
'activate_date': {'key': 'activateDate', 'type': 'str'},
'subject_name': {'key': 'subjectName', 'type': 'str'},
'dns_names': {'key': 'dnsNames', 'type': '[str]'},
}
_subtype_map = {
'type': {'ContentCertificate': 'ContentCertificateProperties', 'KeyVaultCertificate': 'KeyVaultCertificateProperties'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(CertificateProperties, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.thumbprint = None
self.issuer = None
self.issued_date = None
self.expiration_date = None
self.activate_date = None
self.subject_name = None
self.dns_names = None
class CertificateResource(ProxyResource):
"""Certificate resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the certificate resource payload.
:vartype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.CertificateProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'CertificateProperties'},
}
def __init__(
self,
*,
properties: Optional["CertificateProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of the certificate resource payload.
:paramtype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.CertificateProperties
"""
super(CertificateResource, self).__init__(**kwargs)
self.properties = properties
class CertificateResourceCollection(msrest.serialization.Model):
"""Collection compose of certificate resources list and a possible link for next page.
:ivar value: The certificate resources list.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CertificateResource]
:ivar next_link: The link to next page of certificate list.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CertificateResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CertificateResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The certificate resources list.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CertificateResource]
:keyword next_link: The link to next page of certificate list.
:paramtype next_link: str
"""
super(CertificateResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CloudErrorBody(msrest.serialization.Model):
"""An error response from the service.
:ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:vartype code: str
:ivar message: A message describing the error, intended to be suitable for display in a user
interface.
:vartype message: str
:ivar target: The target of the particular error. For example, the name of the property in
error.
:vartype target: str
:ivar details: A list of additional details about the error.
:vartype details: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CloudErrorBody]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[CloudErrorBody]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
target: Optional[str] = None,
details: Optional[List["CloudErrorBody"]] = None,
**kwargs
):
"""
:keyword code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:paramtype code: str
:keyword message: A message describing the error, intended to be suitable for display in a user
interface.
:paramtype message: str
:keyword target: The target of the particular error. For example, the name of the property in
error.
:paramtype target: str
:keyword details: A list of additional details about the error.
:paramtype details: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CloudErrorBody]
"""
super(CloudErrorBody, self).__init__(**kwargs)
self.code = code
self.message = message
self.target = target
self.details = details
class ClusterResourceProperties(msrest.serialization.Model):
"""Service properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: Provisioning state of the Service. Possible values include:
"Creating", "Updating", "Starting", "Stopping", "Deleting", "Deleted", "Succeeded", "Failed",
"Moving", "Moved", "MoveFailed".
:vartype provisioning_state: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.ProvisioningState
:ivar network_profile: Network profile of the Service.
:vartype network_profile: ~azure.mgmt.appplatform.v2021_09_01_preview.models.NetworkProfile
:ivar version: Version of the Service.
:vartype version: int
:ivar service_id: ServiceInstanceEntity GUID which uniquely identifies a created resource.
:vartype service_id: str
:ivar power_state: Power state of the Service. Possible values include: "Running", "Stopped".
:vartype power_state: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.PowerState
"""
_validation = {
'provisioning_state': {'readonly': True},
'version': {'readonly': True},
'service_id': {'readonly': True},
'power_state': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'network_profile': {'key': 'networkProfile', 'type': 'NetworkProfile'},
'version': {'key': 'version', 'type': 'int'},
'service_id': {'key': 'serviceId', 'type': 'str'},
'power_state': {'key': 'powerState', 'type': 'str'},
}
def __init__(
self,
*,
network_profile: Optional["NetworkProfile"] = None,
**kwargs
):
"""
:keyword network_profile: Network profile of the Service.
:paramtype network_profile: ~azure.mgmt.appplatform.v2021_09_01_preview.models.NetworkProfile
"""
super(ClusterResourceProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.network_profile = network_profile
self.version = None
self.service_id = None
self.power_state = None
class ConfigServerGitProperty(msrest.serialization.Model):
"""Property of git.
All required parameters must be populated in order to send to Azure.
:ivar repositories: Repositories of git.
:vartype repositories:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.GitPatternRepository]
:ivar uri: Required. URI of the repository.
:vartype uri: str
:ivar label: Label of the repository.
:vartype label: str
:ivar search_paths: Searching path of the repository.
:vartype search_paths: list[str]
:ivar username: Username of git repository basic auth.
:vartype username: str
:ivar password: Password of git repository basic auth.
:vartype password: str
:ivar host_key: Public sshKey of git repository.
:vartype host_key: str
:ivar host_key_algorithm: SshKey algorithm of git repository.
:vartype host_key_algorithm: str
:ivar private_key: Private sshKey algorithm of git repository.
:vartype private_key: str
:ivar strict_host_key_checking: Strict host key checking or not.
:vartype strict_host_key_checking: bool
"""
_validation = {
'uri': {'required': True},
}
_attribute_map = {
'repositories': {'key': 'repositories', 'type': '[GitPatternRepository]'},
'uri': {'key': 'uri', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'search_paths': {'key': 'searchPaths', 'type': '[str]'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'host_key': {'key': 'hostKey', 'type': 'str'},
'host_key_algorithm': {'key': 'hostKeyAlgorithm', 'type': 'str'},
'private_key': {'key': 'privateKey', 'type': 'str'},
'strict_host_key_checking': {'key': 'strictHostKeyChecking', 'type': 'bool'},
}
def __init__(
self,
*,
uri: str,
repositories: Optional[List["GitPatternRepository"]] = None,
label: Optional[str] = None,
search_paths: Optional[List[str]] = None,
username: Optional[str] = None,
password: Optional[str] = None,
host_key: Optional[str] = None,
host_key_algorithm: Optional[str] = None,
private_key: Optional[str] = None,
strict_host_key_checking: Optional[bool] = None,
**kwargs
):
"""
:keyword repositories: Repositories of git.
:paramtype repositories:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.GitPatternRepository]
:keyword uri: Required. URI of the repository.
:paramtype uri: str
:keyword label: Label of the repository.
:paramtype label: str
:keyword search_paths: Searching path of the repository.
:paramtype search_paths: list[str]
:keyword username: Username of git repository basic auth.
:paramtype username: str
:keyword password: Password of git repository basic auth.
:paramtype password: str
:keyword host_key: Public sshKey of git repository.
:paramtype host_key: str
:keyword host_key_algorithm: SshKey algorithm of git repository.
:paramtype host_key_algorithm: str
:keyword private_key: Private sshKey algorithm of git repository.
:paramtype private_key: str
:keyword strict_host_key_checking: Strict host key checking or not.
:paramtype strict_host_key_checking: bool
"""
super(ConfigServerGitProperty, self).__init__(**kwargs)
self.repositories = repositories
self.uri = uri
self.label = label
self.search_paths = search_paths
self.username = username
self.password = password
self.host_key = host_key
self.host_key_algorithm = host_key_algorithm
self.private_key = private_key
self.strict_host_key_checking = strict_host_key_checking
class ConfigServerProperties(msrest.serialization.Model):
"""Config server git properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: State of the config server. Possible values include: "NotAvailable",
"Deleted", "Failed", "Succeeded", "Updating".
:vartype provisioning_state: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerState
:ivar error: Error when apply config server settings.
:vartype error: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Error
:ivar config_server: Settings of config server.
:vartype config_server: ~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerSettings
"""
_validation = {
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'config_server': {'key': 'configServer', 'type': 'ConfigServerSettings'},
}
def __init__(
self,
*,
error: Optional["Error"] = None,
config_server: Optional["ConfigServerSettings"] = None,
**kwargs
):
"""
:keyword error: Error when apply config server settings.
:paramtype error: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Error
:keyword config_server: Settings of config server.
:paramtype config_server:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerSettings
"""
super(ConfigServerProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.error = error
self.config_server = config_server
class ConfigServerResource(ProxyResource):
"""Config Server resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the Config Server resource.
:vartype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ConfigServerProperties'},
}
def __init__(
self,
*,
properties: Optional["ConfigServerProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of the Config Server resource.
:paramtype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerProperties
"""
super(ConfigServerResource, self).__init__(**kwargs)
self.properties = properties
class ConfigServerSettings(msrest.serialization.Model):
"""The settings of config server.
:ivar git_property: Property of git environment.
:vartype git_property:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerGitProperty
"""
_attribute_map = {
'git_property': {'key': 'gitProperty', 'type': 'ConfigServerGitProperty'},
}
def __init__(
self,
*,
git_property: Optional["ConfigServerGitProperty"] = None,
**kwargs
):
"""
:keyword git_property: Property of git environment.
:paramtype git_property:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerGitProperty
"""
super(ConfigServerSettings, self).__init__(**kwargs)
self.git_property = git_property
class ConfigServerSettingsErrorRecord(msrest.serialization.Model):
"""Error record of the config server settings.
:ivar name: The name of the config server settings error record.
:vartype name: str
:ivar uri: The uri of the config server settings error record.
:vartype uri: str
:ivar messages: The detail error messages of the record.
:vartype messages: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'messages': {'key': 'messages', 'type': '[str]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
uri: Optional[str] = None,
messages: Optional[List[str]] = None,
**kwargs
):
"""
:keyword name: The name of the config server settings error record.
:paramtype name: str
:keyword uri: The uri of the config server settings error record.
:paramtype uri: str
:keyword messages: The detail error messages of the record.
:paramtype messages: list[str]
"""
super(ConfigServerSettingsErrorRecord, self).__init__(**kwargs)
self.name = name
self.uri = uri
self.messages = messages
class ConfigServerSettingsValidateResult(msrest.serialization.Model):
"""Validation result for config server settings.
:ivar is_valid: Indicate if the config server settings are valid.
:vartype is_valid: bool
:ivar details: The detail validation results.
:vartype details:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerSettingsErrorRecord]
"""
_attribute_map = {
'is_valid': {'key': 'isValid', 'type': 'bool'},
'details': {'key': 'details', 'type': '[ConfigServerSettingsErrorRecord]'},
}
def __init__(
self,
*,
is_valid: Optional[bool] = None,
details: Optional[List["ConfigServerSettingsErrorRecord"]] = None,
**kwargs
):
"""
:keyword is_valid: Indicate if the config server settings are valid.
:paramtype is_valid: bool
:keyword details: The detail validation results.
:paramtype details:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ConfigServerSettingsErrorRecord]
"""
super(ConfigServerSettingsValidateResult, self).__init__(**kwargs)
self.is_valid = is_valid
self.details = details
class ContentCertificateProperties(CertificateProperties):
"""Properties of certificate imported from key vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. The type of the certificate source.Constant filled by server.
:vartype type: str
:ivar thumbprint: The thumbprint of certificate.
:vartype thumbprint: str
:ivar issuer: The issuer of certificate.
:vartype issuer: str
:ivar issued_date: The issue date of certificate.
:vartype issued_date: str
:ivar expiration_date: The expiration date of certificate.
:vartype expiration_date: str
:ivar activate_date: The activate date of certificate.
:vartype activate_date: str
:ivar subject_name: The subject name of certificate.
:vartype subject_name: str
:ivar dns_names: The domain list of certificate.
:vartype dns_names: list[str]
:ivar content: The content of uploaded certificate.
:vartype content: str
"""
_validation = {
'type': {'required': True},
'thumbprint': {'readonly': True},
'issuer': {'readonly': True},
'issued_date': {'readonly': True},
'expiration_date': {'readonly': True},
'activate_date': {'readonly': True},
'subject_name': {'readonly': True},
'dns_names': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'issued_date': {'key': 'issuedDate', 'type': 'str'},
'expiration_date': {'key': 'expirationDate', 'type': 'str'},
'activate_date': {'key': 'activateDate', 'type': 'str'},
'subject_name': {'key': 'subjectName', 'type': 'str'},
'dns_names': {'key': 'dnsNames', 'type': '[str]'},
'content': {'key': 'content', 'type': 'str'},
}
def __init__(
self,
*,
content: Optional[str] = None,
**kwargs
):
"""
:keyword content: The content of uploaded certificate.
:paramtype content: str
"""
super(ContentCertificateProperties, self).__init__(**kwargs)
self.type = 'ContentCertificate' # type: str
self.content = content
class CustomContainer(msrest.serialization.Model):
"""Custom container payload.
:ivar server: The name of the registry that contains the container image.
:vartype server: str
:ivar container_image: Container image of the custom container. This should be in the form of
:code:`<repository>`::code:`<tag>` without the server name of the registry.
:vartype container_image: str
:ivar command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is
used if this is not provided.
:vartype command: list[str]
:ivar args: Arguments to the entrypoint. The docker image's CMD is used if this is not
provided.
:vartype args: list[str]
:ivar image_registry_credential: Credential of the image registry.
:vartype image_registry_credential:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ImageRegistryCredential
"""
_attribute_map = {
'server': {'key': 'server', 'type': 'str'},
'container_image': {'key': 'containerImage', 'type': 'str'},
'command': {'key': 'command', 'type': '[str]'},
'args': {'key': 'args', 'type': '[str]'},
'image_registry_credential': {'key': 'imageRegistryCredential', 'type': 'ImageRegistryCredential'},
}
def __init__(
self,
*,
server: Optional[str] = None,
container_image: Optional[str] = None,
command: Optional[List[str]] = None,
args: Optional[List[str]] = None,
image_registry_credential: Optional["ImageRegistryCredential"] = None,
**kwargs
):
"""
:keyword server: The name of the registry that contains the container image.
:paramtype server: str
:keyword container_image: Container image of the custom container. This should be in the form
of :code:`<repository>`::code:`<tag>` without the server name of the registry.
:paramtype container_image: str
:keyword command: Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT
is used if this is not provided.
:paramtype command: list[str]
:keyword args: Arguments to the entrypoint. The docker image's CMD is used if this is not
provided.
:paramtype args: list[str]
:keyword image_registry_credential: Credential of the image registry.
:paramtype image_registry_credential:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ImageRegistryCredential
"""
super(CustomContainer, self).__init__(**kwargs)
self.server = server
self.container_image = container_image
self.command = command
self.args = args
self.image_registry_credential = image_registry_credential
class CustomDomainProperties(msrest.serialization.Model):
"""Custom domain of app resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar thumbprint: The thumbprint of bound certificate.
:vartype thumbprint: str
:ivar app_name: The app name of domain.
:vartype app_name: str
:ivar cert_name: The bound certificate name of domain.
:vartype cert_name: str
"""
_validation = {
'app_name': {'readonly': True},
}
_attribute_map = {
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'app_name': {'key': 'appName', 'type': 'str'},
'cert_name': {'key': 'certName', 'type': 'str'},
}
def __init__(
self,
*,
thumbprint: Optional[str] = None,
cert_name: Optional[str] = None,
**kwargs
):
"""
:keyword thumbprint: The thumbprint of bound certificate.
:paramtype thumbprint: str
:keyword cert_name: The bound certificate name of domain.
:paramtype cert_name: str
"""
super(CustomDomainProperties, self).__init__(**kwargs)
self.thumbprint = thumbprint
self.app_name = None
self.cert_name = cert_name
class CustomDomainResource(ProxyResource):
"""Custom domain resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the custom domain resource.
:vartype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomDomainProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'CustomDomainProperties'},
}
def __init__(
self,
*,
properties: Optional["CustomDomainProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of the custom domain resource.
:paramtype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomDomainProperties
"""
super(CustomDomainResource, self).__init__(**kwargs)
self.properties = properties
class CustomDomainResourceCollection(msrest.serialization.Model):
"""Collection compose of a custom domain resources list and a possible link for next page.
:ivar value: The custom domain resources list.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomDomainResource]
:ivar next_link: The link to next page of custom domain list.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[CustomDomainResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["CustomDomainResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The custom domain resources list.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomDomainResource]
:keyword next_link: The link to next page of custom domain list.
:paramtype next_link: str
"""
super(CustomDomainResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class CustomDomainValidatePayload(msrest.serialization.Model):
"""Custom domain validate payload.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. Name to be validated.
:vartype name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
"""
:keyword name: Required. Name to be validated.
:paramtype name: str
"""
super(CustomDomainValidatePayload, self).__init__(**kwargs)
self.name = name
class CustomDomainValidateResult(msrest.serialization.Model):
"""Validation result for custom domain.
:ivar is_valid: Indicates if domain name is valid.
:vartype is_valid: bool
:ivar message: Message of why domain name is invalid.
:vartype message: str
"""
_attribute_map = {
'is_valid': {'key': 'isValid', 'type': 'bool'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
is_valid: Optional[bool] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword is_valid: Indicates if domain name is valid.
:paramtype is_valid: bool
:keyword message: Message of why domain name is invalid.
:paramtype message: str
"""
super(CustomDomainValidateResult, self).__init__(**kwargs)
self.is_valid = is_valid
self.message = message
class CustomPersistentDiskResource(msrest.serialization.Model):
"""Custom persistent disk resource payload.
All required parameters must be populated in order to send to Azure.
:ivar custom_persistent_disk_properties: Properties of the custom persistent disk resource
payload.
:vartype custom_persistent_disk_properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomPersistentDiskProperties
:ivar storage_id: Required. The resource id of Azure Spring Cloud Storage resource.
:vartype storage_id: str
"""
_validation = {
'storage_id': {'required': True},
}
_attribute_map = {
'custom_persistent_disk_properties': {'key': 'customPersistentDiskProperties', 'type': 'CustomPersistentDiskProperties'},
'storage_id': {'key': 'storageId', 'type': 'str'},
}
def __init__(
self,
*,
storage_id: str,
custom_persistent_disk_properties: Optional["CustomPersistentDiskProperties"] = None,
**kwargs
):
"""
:keyword custom_persistent_disk_properties: Properties of the custom persistent disk resource
payload.
:paramtype custom_persistent_disk_properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomPersistentDiskProperties
:keyword storage_id: Required. The resource id of Azure Spring Cloud Storage resource.
:paramtype storage_id: str
"""
super(CustomPersistentDiskResource, self).__init__(**kwargs)
self.custom_persistent_disk_properties = custom_persistent_disk_properties
self.storage_id = storage_id
class DeploymentInstance(msrest.serialization.Model):
"""Deployment instance payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the deployment instance.
:vartype name: str
:ivar status: Status of the deployment instance.
:vartype status: str
:ivar reason: Failed reason of the deployment instance.
:vartype reason: str
:ivar discovery_status: Discovery status of the deployment instance.
:vartype discovery_status: str
:ivar start_time: Start time of the deployment instance.
:vartype start_time: str
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'reason': {'readonly': True},
'discovery_status': {'readonly': True},
'start_time': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'reason': {'key': 'reason', 'type': 'str'},
'discovery_status': {'key': 'discoveryStatus', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(DeploymentInstance, self).__init__(**kwargs)
self.name = None
self.status = None
self.reason = None
self.discovery_status = None
self.start_time = None
class DeploymentResource(ProxyResource):
"""Deployment resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the Deployment resource.
:vartype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentResourceProperties
:ivar sku: Sku of the Deployment resource.
:vartype sku: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'DeploymentResourceProperties'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
properties: Optional["DeploymentResourceProperties"] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword properties: Properties of the Deployment resource.
:paramtype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentResourceProperties
:keyword sku: Sku of the Deployment resource.
:paramtype sku: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Sku
"""
super(DeploymentResource, self).__init__(**kwargs)
self.properties = properties
self.sku = sku
class DeploymentResourceCollection(msrest.serialization.Model):
"""Object that includes an array of App resources and a possible link for next set.
:ivar value: Collection of Deployment resources.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentResource]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[DeploymentResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["DeploymentResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Collection of Deployment resources.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentResource]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super(DeploymentResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class DeploymentResourceProperties(msrest.serialization.Model):
"""Deployment resource properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar source: Uploaded source information of the deployment.
:vartype source: ~azure.mgmt.appplatform.v2021_09_01_preview.models.UserSourceInfo
:ivar app_name: App name of the deployment.
:vartype app_name: str
:ivar deployment_settings: Deployment settings of the Deployment.
:vartype deployment_settings:
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentSettings
:ivar provisioning_state: Provisioning state of the Deployment. Possible values include:
"Creating", "Updating", "Succeeded", "Failed".
:vartype provisioning_state: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentResourceProvisioningState
:ivar status: Status of the Deployment. Possible values include: "Unknown", "Stopped",
"Running", "Failed", "Allocating", "Upgrading", "Compiling".
:vartype status: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentResourceStatus
:ivar active: Indicates whether the Deployment is active.
:vartype active: bool
:ivar created_time: Date time when the resource is created.
:vartype created_time: ~datetime.datetime
:ivar instances: Collection of instances belong to the Deployment.
:vartype instances: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentInstance]
"""
_validation = {
'app_name': {'readonly': True},
'provisioning_state': {'readonly': True},
'status': {'readonly': True},
'active': {'readonly': True},
'created_time': {'readonly': True},
'instances': {'readonly': True},
}
_attribute_map = {
'source': {'key': 'source', 'type': 'UserSourceInfo'},
'app_name': {'key': 'appName', 'type': 'str'},
'deployment_settings': {'key': 'deploymentSettings', 'type': 'DeploymentSettings'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'active': {'key': 'active', 'type': 'bool'},
'created_time': {'key': 'createdTime', 'type': 'iso-8601'},
'instances': {'key': 'instances', 'type': '[DeploymentInstance]'},
}
def __init__(
self,
*,
source: Optional["UserSourceInfo"] = None,
deployment_settings: Optional["DeploymentSettings"] = None,
**kwargs
):
"""
:keyword source: Uploaded source information of the deployment.
:paramtype source: ~azure.mgmt.appplatform.v2021_09_01_preview.models.UserSourceInfo
:keyword deployment_settings: Deployment settings of the Deployment.
:paramtype deployment_settings:
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentSettings
"""
super(DeploymentResourceProperties, self).__init__(**kwargs)
self.source = source
self.app_name = None
self.deployment_settings = deployment_settings
self.provisioning_state = None
self.status = None
self.active = None
self.created_time = None
self.instances = None
class DeploymentSettings(msrest.serialization.Model):
"""Deployment settings payload.
:ivar cpu: Required CPU. This should be 1 for Basic tier, and in range [1, 4] for Standard
tier. This is deprecated starting from API version 2021-09-01-preview. Please use the
resourceRequests field to set the CPU size.
:vartype cpu: int
:ivar memory_in_gb: Required Memory size in GB. This should be in range [1, 2] for Basic tier,
and in range [1, 8] for Standard tier. This is deprecated starting from API version
2021-09-01-preview. Please use the resourceRequests field to set the the memory size.
:vartype memory_in_gb: int
:ivar resource_requests: The requested resource quantity for required CPU and Memory. It is
recommended that using this field to represent the required CPU and Memory, the old field cpu
and memoryInGB will be deprecated later.
:vartype resource_requests: ~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceRequests
:ivar jvm_options: JVM parameter.
:vartype jvm_options: str
:ivar net_core_main_entry_path: The path to the .NET executable relative to zip root.
:vartype net_core_main_entry_path: str
:ivar environment_variables: Collection of environment variables.
:vartype environment_variables: dict[str, str]
:ivar runtime_version: Runtime version. Possible values include: "Java_8", "Java_11",
"NetCore_31". Default value: "Java_8".
:vartype runtime_version: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.RuntimeVersion
:ivar container_probe_settings: Container liveness and readiness probe settings.
:vartype container_probe_settings:
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentSettingsContainerProbeSettings
"""
_attribute_map = {
'cpu': {'key': 'cpu', 'type': 'int'},
'memory_in_gb': {'key': 'memoryInGB', 'type': 'int'},
'resource_requests': {'key': 'resourceRequests', 'type': 'ResourceRequests'},
'jvm_options': {'key': 'jvmOptions', 'type': 'str'},
'net_core_main_entry_path': {'key': 'netCoreMainEntryPath', 'type': 'str'},
'environment_variables': {'key': 'environmentVariables', 'type': '{str}'},
'runtime_version': {'key': 'runtimeVersion', 'type': 'str'},
'container_probe_settings': {'key': 'containerProbeSettings', 'type': 'DeploymentSettingsContainerProbeSettings'},
}
def __init__(
self,
*,
cpu: Optional[int] = 1,
memory_in_gb: Optional[int] = 1,
resource_requests: Optional["ResourceRequests"] = None,
jvm_options: Optional[str] = None,
net_core_main_entry_path: Optional[str] = None,
environment_variables: Optional[Dict[str, str]] = None,
runtime_version: Optional[Union[str, "RuntimeVersion"]] = "Java_8",
container_probe_settings: Optional["DeploymentSettingsContainerProbeSettings"] = None,
**kwargs
):
"""
:keyword cpu: Required CPU. This should be 1 for Basic tier, and in range [1, 4] for Standard
tier. This is deprecated starting from API version 2021-09-01-preview. Please use the
resourceRequests field to set the CPU size.
:paramtype cpu: int
:keyword memory_in_gb: Required Memory size in GB. This should be in range [1, 2] for Basic
tier, and in range [1, 8] for Standard tier. This is deprecated starting from API version
2021-09-01-preview. Please use the resourceRequests field to set the the memory size.
:paramtype memory_in_gb: int
:keyword resource_requests: The requested resource quantity for required CPU and Memory. It is
recommended that using this field to represent the required CPU and Memory, the old field cpu
and memoryInGB will be deprecated later.
:paramtype resource_requests:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceRequests
:keyword jvm_options: JVM parameter.
:paramtype jvm_options: str
:keyword net_core_main_entry_path: The path to the .NET executable relative to zip root.
:paramtype net_core_main_entry_path: str
:keyword environment_variables: Collection of environment variables.
:paramtype environment_variables: dict[str, str]
:keyword runtime_version: Runtime version. Possible values include: "Java_8", "Java_11",
"NetCore_31". Default value: "Java_8".
:paramtype runtime_version: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.RuntimeVersion
:keyword container_probe_settings: Container liveness and readiness probe settings.
:paramtype container_probe_settings:
~azure.mgmt.appplatform.v2021_09_01_preview.models.DeploymentSettingsContainerProbeSettings
"""
super(DeploymentSettings, self).__init__(**kwargs)
self.cpu = cpu
self.memory_in_gb = memory_in_gb
self.resource_requests = resource_requests
self.jvm_options = jvm_options
self.net_core_main_entry_path = net_core_main_entry_path
self.environment_variables = environment_variables
self.runtime_version = runtime_version
self.container_probe_settings = container_probe_settings
class DeploymentSettingsContainerProbeSettings(msrest.serialization.Model):
"""Container liveness and readiness probe settings.
:ivar disable_probe: Indicates whether disable the liveness and readiness probe.
:vartype disable_probe: bool
"""
_attribute_map = {
'disable_probe': {'key': 'disableProbe', 'type': 'bool'},
}
def __init__(
self,
*,
disable_probe: Optional[bool] = None,
**kwargs
):
"""
:keyword disable_probe: Indicates whether disable the liveness and readiness probe.
:paramtype disable_probe: bool
"""
super(DeploymentSettingsContainerProbeSettings, self).__init__(**kwargs)
self.disable_probe = disable_probe
class DiagnosticParameters(msrest.serialization.Model):
"""Diagnostic parameters of diagnostic operations.
:ivar app_instance: App instance name.
:vartype app_instance: str
:ivar file_path: Your target file path in your own BYOS.
:vartype file_path: str
:ivar duration: Duration of your JFR. 1 min can be represented by 1m or 60s.
:vartype duration: str
"""
_attribute_map = {
'app_instance': {'key': 'appInstance', 'type': 'str'},
'file_path': {'key': 'filePath', 'type': 'str'},
'duration': {'key': 'duration', 'type': 'str'},
}
def __init__(
self,
*,
app_instance: Optional[str] = None,
file_path: Optional[str] = None,
duration: Optional[str] = None,
**kwargs
):
"""
:keyword app_instance: App instance name.
:paramtype app_instance: str
:keyword file_path: Your target file path in your own BYOS.
:paramtype file_path: str
:keyword duration: Duration of your JFR. 1 min can be represented by 1m or 60s.
:paramtype duration: str
"""
super(DiagnosticParameters, self).__init__(**kwargs)
self.app_instance = app_instance
self.file_path = file_path
self.duration = duration
class Error(msrest.serialization.Model):
"""The error code compose of code and message.
:ivar code: The code of error.
:vartype code: str
:ivar message: The message of error.
:vartype message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword code: The code of error.
:paramtype code: str
:keyword message: The message of error.
:paramtype message: str
"""
super(Error, self).__init__(**kwargs)
self.code = code
self.message = message
class GitPatternRepository(msrest.serialization.Model):
"""Git repository property payload.
All required parameters must be populated in order to send to Azure.
:ivar name: Required. Name of the repository.
:vartype name: str
:ivar pattern: Collection of pattern of the repository.
:vartype pattern: list[str]
:ivar uri: Required. URI of the repository.
:vartype uri: str
:ivar label: Label of the repository.
:vartype label: str
:ivar search_paths: Searching path of the repository.
:vartype search_paths: list[str]
:ivar username: Username of git repository basic auth.
:vartype username: str
:ivar password: Password of git repository basic auth.
:vartype password: str
:ivar host_key: Public sshKey of git repository.
:vartype host_key: str
:ivar host_key_algorithm: SshKey algorithm of git repository.
:vartype host_key_algorithm: str
:ivar private_key: Private sshKey algorithm of git repository.
:vartype private_key: str
:ivar strict_host_key_checking: Strict host key checking or not.
:vartype strict_host_key_checking: bool
"""
_validation = {
'name': {'required': True},
'uri': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'pattern': {'key': 'pattern', 'type': '[str]'},
'uri': {'key': 'uri', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'search_paths': {'key': 'searchPaths', 'type': '[str]'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'host_key': {'key': 'hostKey', 'type': 'str'},
'host_key_algorithm': {'key': 'hostKeyAlgorithm', 'type': 'str'},
'private_key': {'key': 'privateKey', 'type': 'str'},
'strict_host_key_checking': {'key': 'strictHostKeyChecking', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
uri: str,
pattern: Optional[List[str]] = None,
label: Optional[str] = None,
search_paths: Optional[List[str]] = None,
username: Optional[str] = None,
password: Optional[str] = None,
host_key: Optional[str] = None,
host_key_algorithm: Optional[str] = None,
private_key: Optional[str] = None,
strict_host_key_checking: Optional[bool] = None,
**kwargs
):
"""
:keyword name: Required. Name of the repository.
:paramtype name: str
:keyword pattern: Collection of pattern of the repository.
:paramtype pattern: list[str]
:keyword uri: Required. URI of the repository.
:paramtype uri: str
:keyword label: Label of the repository.
:paramtype label: str
:keyword search_paths: Searching path of the repository.
:paramtype search_paths: list[str]
:keyword username: Username of git repository basic auth.
:paramtype username: str
:keyword password: Password of git repository basic auth.
:paramtype password: str
:keyword host_key: Public sshKey of git repository.
:paramtype host_key: str
:keyword host_key_algorithm: SshKey algorithm of git repository.
:paramtype host_key_algorithm: str
:keyword private_key: Private sshKey algorithm of git repository.
:paramtype private_key: str
:keyword strict_host_key_checking: Strict host key checking or not.
:paramtype strict_host_key_checking: bool
"""
super(GitPatternRepository, self).__init__(**kwargs)
self.name = name
self.pattern = pattern
self.uri = uri
self.label = label
self.search_paths = search_paths
self.username = username
self.password = password
self.host_key = host_key
self.host_key_algorithm = host_key_algorithm
self.private_key = private_key
self.strict_host_key_checking = strict_host_key_checking
class ImageRegistryCredential(msrest.serialization.Model):
"""Credential of the image registry.
:ivar username: The username of the image registry credential.
:vartype username: str
:ivar password: The password of the image registry credential.
:vartype password: str
"""
_attribute_map = {
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
*,
username: Optional[str] = None,
password: Optional[str] = None,
**kwargs
):
"""
:keyword username: The username of the image registry credential.
:paramtype username: str
:keyword password: The password of the image registry credential.
:paramtype password: str
"""
super(ImageRegistryCredential, self).__init__(**kwargs)
self.username = username
self.password = password
class KeyVaultCertificateProperties(CertificateProperties):
"""Properties of certificate imported from key vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. The type of the certificate source.Constant filled by server.
:vartype type: str
:ivar thumbprint: The thumbprint of certificate.
:vartype thumbprint: str
:ivar issuer: The issuer of certificate.
:vartype issuer: str
:ivar issued_date: The issue date of certificate.
:vartype issued_date: str
:ivar expiration_date: The expiration date of certificate.
:vartype expiration_date: str
:ivar activate_date: The activate date of certificate.
:vartype activate_date: str
:ivar subject_name: The subject name of certificate.
:vartype subject_name: str
:ivar dns_names: The domain list of certificate.
:vartype dns_names: list[str]
:ivar vault_uri: Required. The vault uri of user key vault.
:vartype vault_uri: str
:ivar key_vault_cert_name: Required. The certificate name of key vault.
:vartype key_vault_cert_name: str
:ivar cert_version: The certificate version of key vault.
:vartype cert_version: str
:ivar exclude_private_key: Optional. If set to true, it will not import private key from key
vault.
:vartype exclude_private_key: bool
"""
_validation = {
'type': {'required': True},
'thumbprint': {'readonly': True},
'issuer': {'readonly': True},
'issued_date': {'readonly': True},
'expiration_date': {'readonly': True},
'activate_date': {'readonly': True},
'subject_name': {'readonly': True},
'dns_names': {'readonly': True},
'vault_uri': {'required': True},
'key_vault_cert_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'thumbprint': {'key': 'thumbprint', 'type': 'str'},
'issuer': {'key': 'issuer', 'type': 'str'},
'issued_date': {'key': 'issuedDate', 'type': 'str'},
'expiration_date': {'key': 'expirationDate', 'type': 'str'},
'activate_date': {'key': 'activateDate', 'type': 'str'},
'subject_name': {'key': 'subjectName', 'type': 'str'},
'dns_names': {'key': 'dnsNames', 'type': '[str]'},
'vault_uri': {'key': 'vaultUri', 'type': 'str'},
'key_vault_cert_name': {'key': 'keyVaultCertName', 'type': 'str'},
'cert_version': {'key': 'certVersion', 'type': 'str'},
'exclude_private_key': {'key': 'excludePrivateKey', 'type': 'bool'},
}
def __init__(
self,
*,
vault_uri: str,
key_vault_cert_name: str,
cert_version: Optional[str] = None,
exclude_private_key: Optional[bool] = False,
**kwargs
):
"""
:keyword vault_uri: Required. The vault uri of user key vault.
:paramtype vault_uri: str
:keyword key_vault_cert_name: Required. The certificate name of key vault.
:paramtype key_vault_cert_name: str
:keyword cert_version: The certificate version of key vault.
:paramtype cert_version: str
:keyword exclude_private_key: Optional. If set to true, it will not import private key from key
vault.
:paramtype exclude_private_key: bool
"""
super(KeyVaultCertificateProperties, self).__init__(**kwargs)
self.type = 'KeyVaultCertificate' # type: str
self.vault_uri = vault_uri
self.key_vault_cert_name = key_vault_cert_name
self.cert_version = cert_version
self.exclude_private_key = exclude_private_key
class LoadedCertificate(msrest.serialization.Model):
"""Loaded certificate payload.
All required parameters must be populated in order to send to Azure.
:ivar resource_id: Required. Resource Id of loaded certificate.
:vartype resource_id: str
:ivar load_trust_store: Indicate whether the certificate will be loaded into default trust
store, only work for Java runtime.
:vartype load_trust_store: bool
"""
_validation = {
'resource_id': {'required': True},
}
_attribute_map = {
'resource_id': {'key': 'resourceId', 'type': 'str'},
'load_trust_store': {'key': 'loadTrustStore', 'type': 'bool'},
}
def __init__(
self,
*,
resource_id: str,
load_trust_store: Optional[bool] = False,
**kwargs
):
"""
:keyword resource_id: Required. Resource Id of loaded certificate.
:paramtype resource_id: str
:keyword load_trust_store: Indicate whether the certificate will be loaded into default trust
store, only work for Java runtime.
:paramtype load_trust_store: bool
"""
super(LoadedCertificate, self).__init__(**kwargs)
self.resource_id = resource_id
self.load_trust_store = load_trust_store
class LogFileUrlResponse(msrest.serialization.Model):
"""Log file URL payload.
All required parameters must be populated in order to send to Azure.
:ivar url: Required. URL of the log file.
:vartype url: str
"""
_validation = {
'url': {'required': True},
}
_attribute_map = {
'url': {'key': 'url', 'type': 'str'},
}
def __init__(
self,
*,
url: str,
**kwargs
):
"""
:keyword url: Required. URL of the log file.
:paramtype url: str
"""
super(LogFileUrlResponse, self).__init__(**kwargs)
self.url = url
class LogSpecification(msrest.serialization.Model):
"""Specifications of the Log for Azure Monitoring.
:ivar name: Name of the log.
:vartype name: str
:ivar display_name: Localized friendly display name of the log.
:vartype display_name: str
:ivar blob_duration: Blob duration of the log.
:vartype blob_duration: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
blob_duration: Optional[str] = None,
**kwargs
):
"""
:keyword name: Name of the log.
:paramtype name: str
:keyword display_name: Localized friendly display name of the log.
:paramtype display_name: str
:keyword blob_duration: Blob duration of the log.
:paramtype blob_duration: str
"""
super(LogSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.blob_duration = blob_duration
class ManagedIdentityProperties(msrest.serialization.Model):
"""Managed identity properties retrieved from ARM request headers.
:ivar type: Type of the managed identity. Possible values include: "None", "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned".
:vartype type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.ManagedIdentityType
:ivar principal_id: Principal Id.
:vartype principal_id: str
:ivar tenant_id: Tenant Id.
:vartype tenant_id: str
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ManagedIdentityType"]] = None,
principal_id: Optional[str] = None,
tenant_id: Optional[str] = None,
**kwargs
):
"""
:keyword type: Type of the managed identity. Possible values include: "None", "SystemAssigned",
"UserAssigned", "SystemAssigned,UserAssigned".
:paramtype type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.ManagedIdentityType
:keyword principal_id: Principal Id.
:paramtype principal_id: str
:keyword tenant_id: Tenant Id.
:paramtype tenant_id: str
"""
super(ManagedIdentityProperties, self).__init__(**kwargs)
self.type = type
self.principal_id = principal_id
self.tenant_id = tenant_id
class MetricDimension(msrest.serialization.Model):
"""Specifications of the Dimension of metrics.
:ivar name: Name of the dimension.
:vartype name: str
:ivar display_name: Localized friendly display name of the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether this dimension should be included for the Shoebox
export scenario.
:vartype to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
to_be_exported_for_shoebox: Optional[bool] = None,
**kwargs
):
"""
:keyword name: Name of the dimension.
:paramtype name: str
:keyword display_name: Localized friendly display name of the dimension.
:paramtype display_name: str
:keyword to_be_exported_for_shoebox: Whether this dimension should be included for the Shoebox
export scenario.
:paramtype to_be_exported_for_shoebox: bool
"""
super(MetricDimension, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
class MetricSpecification(msrest.serialization.Model):
"""Specifications of the Metrics for Azure Monitoring.
:ivar name: Name of the metric.
:vartype name: str
:ivar display_name: Localized friendly display name of the metric.
:vartype display_name: str
:ivar display_description: Localized friendly description of the metric.
:vartype display_description: str
:ivar unit: Unit that makes sense for the metric.
:vartype unit: str
:ivar category: Name of the metric category that the metric belongs to. A metric can only
belong to a single category.
:vartype category: str
:ivar aggregation_type: Only provide one value for this field. Valid values: Average, Minimum,
Maximum, Total, Count.
:vartype aggregation_type: str
:ivar supported_aggregation_types: Supported aggregation types.
:vartype supported_aggregation_types: list[str]
:ivar supported_time_grain_types: Supported time grain types.
:vartype supported_time_grain_types: list[str]
:ivar fill_gap_with_zero: Optional. If set to true, then zero will be returned for time
duration where no metric is emitted/published.
:vartype fill_gap_with_zero: bool
:ivar dimensions: Dimensions of the metric.
:vartype dimensions: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.MetricDimension]
:ivar source_mdm_namespace: Name of the MDM namespace. Optional.
:vartype source_mdm_namespace: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'category': {'key': 'category', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
category: Optional[str] = None,
aggregation_type: Optional[str] = None,
supported_aggregation_types: Optional[List[str]] = None,
supported_time_grain_types: Optional[List[str]] = None,
fill_gap_with_zero: Optional[bool] = None,
dimensions: Optional[List["MetricDimension"]] = None,
source_mdm_namespace: Optional[str] = None,
**kwargs
):
"""
:keyword name: Name of the metric.
:paramtype name: str
:keyword display_name: Localized friendly display name of the metric.
:paramtype display_name: str
:keyword display_description: Localized friendly description of the metric.
:paramtype display_description: str
:keyword unit: Unit that makes sense for the metric.
:paramtype unit: str
:keyword category: Name of the metric category that the metric belongs to. A metric can only
belong to a single category.
:paramtype category: str
:keyword aggregation_type: Only provide one value for this field. Valid values: Average,
Minimum, Maximum, Total, Count.
:paramtype aggregation_type: str
:keyword supported_aggregation_types: Supported aggregation types.
:paramtype supported_aggregation_types: list[str]
:keyword supported_time_grain_types: Supported time grain types.
:paramtype supported_time_grain_types: list[str]
:keyword fill_gap_with_zero: Optional. If set to true, then zero will be returned for time
duration where no metric is emitted/published.
:paramtype fill_gap_with_zero: bool
:keyword dimensions: Dimensions of the metric.
:paramtype dimensions: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.MetricDimension]
:keyword source_mdm_namespace: Name of the MDM namespace. Optional.
:paramtype source_mdm_namespace: str
"""
super(MetricSpecification, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.category = category
self.aggregation_type = aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.supported_time_grain_types = supported_time_grain_types
self.fill_gap_with_zero = fill_gap_with_zero
self.dimensions = dimensions
self.source_mdm_namespace = source_mdm_namespace
class MonitoringSettingProperties(msrest.serialization.Model):
"""Monitoring Setting properties payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar provisioning_state: State of the Monitoring Setting. Possible values include:
"NotAvailable", "Failed", "Succeeded", "Updating".
:vartype provisioning_state: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.MonitoringSettingState
:ivar error: Error when apply Monitoring Setting changes.
:vartype error: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Error
:ivar trace_enabled: Indicates whether enable the trace functionality, which will be deprecated
since api version 2020-11-01-preview. Please leverage appInsightsInstrumentationKey to indicate
if monitoringSettings enabled or not.
:vartype trace_enabled: bool
:ivar app_insights_instrumentation_key: Target application insight instrumentation key, null or
whitespace include empty will disable monitoringSettings.
:vartype app_insights_instrumentation_key: str
:ivar app_insights_sampling_rate: Indicates the sampling rate of application insight agent,
should be in range [0.0, 100.0].
:vartype app_insights_sampling_rate: float
:ivar app_insights_agent_versions: Indicates the versions of application insight agent.
:vartype app_insights_agent_versions:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ApplicationInsightsAgentVersions
"""
_validation = {
'provisioning_state': {'readonly': True},
'app_insights_sampling_rate': {'maximum': 100, 'minimum': 0},
}
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'error': {'key': 'error', 'type': 'Error'},
'trace_enabled': {'key': 'traceEnabled', 'type': 'bool'},
'app_insights_instrumentation_key': {'key': 'appInsightsInstrumentationKey', 'type': 'str'},
'app_insights_sampling_rate': {'key': 'appInsightsSamplingRate', 'type': 'float'},
'app_insights_agent_versions': {'key': 'appInsightsAgentVersions', 'type': 'ApplicationInsightsAgentVersions'},
}
def __init__(
self,
*,
error: Optional["Error"] = None,
trace_enabled: Optional[bool] = None,
app_insights_instrumentation_key: Optional[str] = None,
app_insights_sampling_rate: Optional[float] = None,
app_insights_agent_versions: Optional["ApplicationInsightsAgentVersions"] = None,
**kwargs
):
"""
:keyword error: Error when apply Monitoring Setting changes.
:paramtype error: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Error
:keyword trace_enabled: Indicates whether enable the trace functionality, which will be
deprecated since api version 2020-11-01-preview. Please leverage appInsightsInstrumentationKey
to indicate if monitoringSettings enabled or not.
:paramtype trace_enabled: bool
:keyword app_insights_instrumentation_key: Target application insight instrumentation key, null
or whitespace include empty will disable monitoringSettings.
:paramtype app_insights_instrumentation_key: str
:keyword app_insights_sampling_rate: Indicates the sampling rate of application insight agent,
should be in range [0.0, 100.0].
:paramtype app_insights_sampling_rate: float
:keyword app_insights_agent_versions: Indicates the versions of application insight agent.
:paramtype app_insights_agent_versions:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ApplicationInsightsAgentVersions
"""
super(MonitoringSettingProperties, self).__init__(**kwargs)
self.provisioning_state = None
self.error = error
self.trace_enabled = trace_enabled
self.app_insights_instrumentation_key = app_insights_instrumentation_key
self.app_insights_sampling_rate = app_insights_sampling_rate
self.app_insights_agent_versions = app_insights_agent_versions
class MonitoringSettingResource(ProxyResource):
"""Monitoring Setting resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the Monitoring Setting resource.
:vartype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.MonitoringSettingProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'MonitoringSettingProperties'},
}
def __init__(
self,
*,
properties: Optional["MonitoringSettingProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of the Monitoring Setting resource.
:paramtype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.MonitoringSettingProperties
"""
super(MonitoringSettingResource, self).__init__(**kwargs)
self.properties = properties
class NameAvailability(msrest.serialization.Model):
"""Name availability result payload.
:ivar name_available: Indicates whether the name is available.
:vartype name_available: bool
:ivar reason: Reason why the name is not available.
:vartype reason: str
:ivar message: Message why the name is not available.
:vartype message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
reason: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
"""
:keyword name_available: Indicates whether the name is available.
:paramtype name_available: bool
:keyword reason: Reason why the name is not available.
:paramtype reason: str
:keyword message: Message why the name is not available.
:paramtype message: str
"""
super(NameAvailability, self).__init__(**kwargs)
self.name_available = name_available
self.reason = reason
self.message = message
class NameAvailabilityParameters(msrest.serialization.Model):
"""Name availability parameters payload.
All required parameters must be populated in order to send to Azure.
:ivar type: Required. Type of the resource to check name availability.
:vartype type: str
:ivar name: Required. Name to be checked.
:vartype name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
type: str,
name: str,
**kwargs
):
"""
:keyword type: Required. Type of the resource to check name availability.
:paramtype type: str
:keyword name: Required. Name to be checked.
:paramtype name: str
"""
super(NameAvailabilityParameters, self).__init__(**kwargs)
self.type = type
self.name = name
class NetworkProfile(msrest.serialization.Model):
"""Service network profile payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_runtime_subnet_id: Fully qualified resource Id of the subnet to host Azure Spring
Cloud Service Runtime.
:vartype service_runtime_subnet_id: str
:ivar app_subnet_id: Fully qualified resource Id of the subnet to host Azure Spring Cloud Apps.
:vartype app_subnet_id: str
:ivar service_cidr: Azure Spring Cloud service reserved CIDR.
:vartype service_cidr: str
:ivar service_runtime_network_resource_group: Name of the resource group containing network
resources of Azure Spring Cloud Service Runtime.
:vartype service_runtime_network_resource_group: str
:ivar app_network_resource_group: Name of the resource group containing network resources of
Azure Spring Cloud Apps.
:vartype app_network_resource_group: str
:ivar outbound_i_ps: Desired outbound IP resources for Azure Spring Cloud instance.
:vartype outbound_i_ps:
~azure.mgmt.appplatform.v2021_09_01_preview.models.NetworkProfileOutboundIPs
:ivar required_traffics: Required inbound or outbound traffics for Azure Spring Cloud instance.
:vartype required_traffics:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.RequiredTraffic]
"""
_validation = {
'outbound_i_ps': {'readonly': True},
'required_traffics': {'readonly': True},
}
_attribute_map = {
'service_runtime_subnet_id': {'key': 'serviceRuntimeSubnetId', 'type': 'str'},
'app_subnet_id': {'key': 'appSubnetId', 'type': 'str'},
'service_cidr': {'key': 'serviceCidr', 'type': 'str'},
'service_runtime_network_resource_group': {'key': 'serviceRuntimeNetworkResourceGroup', 'type': 'str'},
'app_network_resource_group': {'key': 'appNetworkResourceGroup', 'type': 'str'},
'outbound_i_ps': {'key': 'outboundIPs', 'type': 'NetworkProfileOutboundIPs'},
'required_traffics': {'key': 'requiredTraffics', 'type': '[RequiredTraffic]'},
}
def __init__(
self,
*,
service_runtime_subnet_id: Optional[str] = None,
app_subnet_id: Optional[str] = None,
service_cidr: Optional[str] = None,
service_runtime_network_resource_group: Optional[str] = None,
app_network_resource_group: Optional[str] = None,
**kwargs
):
"""
:keyword service_runtime_subnet_id: Fully qualified resource Id of the subnet to host Azure
Spring Cloud Service Runtime.
:paramtype service_runtime_subnet_id: str
:keyword app_subnet_id: Fully qualified resource Id of the subnet to host Azure Spring Cloud
Apps.
:paramtype app_subnet_id: str
:keyword service_cidr: Azure Spring Cloud service reserved CIDR.
:paramtype service_cidr: str
:keyword service_runtime_network_resource_group: Name of the resource group containing network
resources of Azure Spring Cloud Service Runtime.
:paramtype service_runtime_network_resource_group: str
:keyword app_network_resource_group: Name of the resource group containing network resources of
Azure Spring Cloud Apps.
:paramtype app_network_resource_group: str
"""
super(NetworkProfile, self).__init__(**kwargs)
self.service_runtime_subnet_id = service_runtime_subnet_id
self.app_subnet_id = app_subnet_id
self.service_cidr = service_cidr
self.service_runtime_network_resource_group = service_runtime_network_resource_group
self.app_network_resource_group = app_network_resource_group
self.outbound_i_ps = None
self.required_traffics = None
class NetworkProfileOutboundIPs(msrest.serialization.Model):
"""Desired outbound IP resources for Azure Spring Cloud instance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar public_i_ps: A list of public IP addresses.
:vartype public_i_ps: list[str]
"""
_validation = {
'public_i_ps': {'readonly': True},
}
_attribute_map = {
'public_i_ps': {'key': 'publicIPs', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(NetworkProfileOutboundIPs, self).__init__(**kwargs)
self.public_i_ps = None
class OperationDetail(msrest.serialization.Model):
"""Operation detail payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the operation.
:vartype name: str
:ivar is_data_action: Indicates whether the operation is a data action.
:vartype is_data_action: bool
:ivar display: Display of the operation.
:vartype display: ~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationDisplay
:ivar action_type: Enum. Indicates the action type. "Internal" refers to actions that are for
internal only APIs. Possible values include: "Internal".
:vartype action_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.ActionType
:ivar origin: Origin of the operation.
:vartype origin: str
:ivar properties: Properties of the operation.
:vartype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationProperties
"""
_validation = {
'action_type': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'action_type': {'key': 'actionType', 'type': 'str'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'OperationProperties'},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_data_action: Optional[bool] = None,
display: Optional["OperationDisplay"] = None,
origin: Optional[str] = None,
properties: Optional["OperationProperties"] = None,
**kwargs
):
"""
:keyword name: Name of the operation.
:paramtype name: str
:keyword is_data_action: Indicates whether the operation is a data action.
:paramtype is_data_action: bool
:keyword display: Display of the operation.
:paramtype display: ~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationDisplay
:keyword origin: Origin of the operation.
:paramtype origin: str
:keyword properties: Properties of the operation.
:paramtype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.OperationProperties
"""
super(OperationDetail, self).__init__(**kwargs)
self.name = name
self.is_data_action = is_data_action
self.display = display
self.action_type = None
self.origin = origin
self.properties = properties
class OperationDisplay(msrest.serialization.Model):
"""Operation display payload.
:ivar provider: Resource provider of the operation.
:vartype provider: str
:ivar resource: Resource of the operation.
:vartype resource: str
:ivar operation: Localized friendly name for the operation.
:vartype operation: str
:ivar description: Localized friendly description for the operation.
:vartype description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
"""
:keyword provider: Resource provider of the operation.
:paramtype provider: str
:keyword resource: Resource of the operation.
:paramtype resource: str
:keyword operation: Localized friendly name for the operation.
:paramtype operation: str
:keyword description: Localized friendly description for the operation.
:paramtype description: str
"""
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationProperties(msrest.serialization.Model):
"""Extra Operation properties.
:ivar service_specification: Service specifications of the operation.
:vartype service_specification:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ServiceSpecification
"""
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
*,
service_specification: Optional["ServiceSpecification"] = None,
**kwargs
):
"""
:keyword service_specification: Service specifications of the operation.
:paramtype service_specification:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ServiceSpecification
"""
super(OperationProperties, self).__init__(**kwargs)
self.service_specification = service_specification
class PersistentDisk(msrest.serialization.Model):
"""Persistent disk payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar size_in_gb: Size of the persistent disk in GB.
:vartype size_in_gb: int
:ivar used_in_gb: Size of the used persistent disk in GB.
:vartype used_in_gb: int
:ivar mount_path: Mount path of the persistent disk.
:vartype mount_path: str
"""
_validation = {
'size_in_gb': {'maximum': 50, 'minimum': 0},
'used_in_gb': {'readonly': True, 'maximum': 50, 'minimum': 0},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'used_in_gb': {'key': 'usedInGB', 'type': 'int'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
}
def __init__(
self,
*,
size_in_gb: Optional[int] = None,
mount_path: Optional[str] = None,
**kwargs
):
"""
:keyword size_in_gb: Size of the persistent disk in GB.
:paramtype size_in_gb: int
:keyword mount_path: Mount path of the persistent disk.
:paramtype mount_path: str
"""
super(PersistentDisk, self).__init__(**kwargs)
self.size_in_gb = size_in_gb
self.used_in_gb = None
self.mount_path = mount_path
class RegenerateTestKeyRequestPayload(msrest.serialization.Model):
"""Regenerate test key request payload.
All required parameters must be populated in order to send to Azure.
:ivar key_type: Required. Type of the test key. Possible values include: "Primary",
"Secondary".
:vartype key_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.TestKeyType
"""
_validation = {
'key_type': {'required': True},
}
_attribute_map = {
'key_type': {'key': 'keyType', 'type': 'str'},
}
def __init__(
self,
*,
key_type: Union[str, "TestKeyType"],
**kwargs
):
"""
:keyword key_type: Required. Type of the test key. Possible values include: "Primary",
"Secondary".
:paramtype key_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.TestKeyType
"""
super(RegenerateTestKeyRequestPayload, self).__init__(**kwargs)
self.key_type = key_type
class RequiredTraffic(msrest.serialization.Model):
"""Required inbound or outbound traffic for Azure Spring Cloud instance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar protocol: The protocol of required traffic.
:vartype protocol: str
:ivar port: The port of required traffic.
:vartype port: int
:ivar ips: The ip list of required traffic.
:vartype ips: list[str]
:ivar fqdns: The FQDN list of required traffic.
:vartype fqdns: list[str]
:ivar direction: The direction of required traffic. Possible values include: "Inbound",
"Outbound".
:vartype direction: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.TrafficDirection
"""
_validation = {
'protocol': {'readonly': True},
'port': {'readonly': True},
'ips': {'readonly': True},
'fqdns': {'readonly': True},
'direction': {'readonly': True},
}
_attribute_map = {
'protocol': {'key': 'protocol', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'ips': {'key': 'ips', 'type': '[str]'},
'fqdns': {'key': 'fqdns', 'type': '[str]'},
'direction': {'key': 'direction', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
"""
"""
super(RequiredTraffic, self).__init__(**kwargs)
self.protocol = None
self.port = None
self.ips = None
self.fqdns = None
self.direction = None
class ResourceRequests(msrest.serialization.Model):
"""Deployment resource request payload.
:ivar cpu: Required CPU. 1 core can be represented by 1 or 1000m. This should be 500m or 1 for
Basic tier, and {500m, 1, 2, 3, 4} for Standard tier.
:vartype cpu: str
:ivar memory: Required memory. 1 GB can be represented by 1Gi or 1024Mi. This should be {512Mi,
1Gi, 2Gi} for Basic tier, and {512Mi, 1Gi, 2Gi, ..., 8Gi} for Standard tier.
:vartype memory: str
"""
_attribute_map = {
'cpu': {'key': 'cpu', 'type': 'str'},
'memory': {'key': 'memory', 'type': 'str'},
}
def __init__(
self,
*,
cpu: Optional[str] = None,
memory: Optional[str] = None,
**kwargs
):
"""
:keyword cpu: Required CPU. 1 core can be represented by 1 or 1000m. This should be 500m or 1
for Basic tier, and {500m, 1, 2, 3, 4} for Standard tier.
:paramtype cpu: str
:keyword memory: Required memory. 1 GB can be represented by 1Gi or 1024Mi. This should be
{512Mi, 1Gi, 2Gi} for Basic tier, and {512Mi, 1Gi, 2Gi, ..., 8Gi} for Standard tier.
:paramtype memory: str
"""
super(ResourceRequests, self).__init__(**kwargs)
self.cpu = cpu
self.memory = memory
class ResourceSku(msrest.serialization.Model):
"""Describes an available Azure Spring Cloud SKU.
:ivar resource_type: Gets the type of resource the SKU applies to.
:vartype resource_type: str
:ivar name: Gets the name of SKU.
:vartype name: str
:ivar tier: Gets the tier of SKU.
:vartype tier: str
:ivar capacity: Gets the capacity of SKU.
:vartype capacity: ~azure.mgmt.appplatform.v2021_09_01_preview.models.SkuCapacity
:ivar locations: Gets the set of locations that the SKU is available.
:vartype locations: list[str]
:ivar location_info: Gets a list of locations and availability zones in those locations where
the SKU is available.
:vartype location_info:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuLocationInfo]
:ivar restrictions: Gets the restrictions because of which SKU cannot be used. This is
empty if there are no restrictions.
:vartype restrictions:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictions]
"""
_attribute_map = {
'resource_type': {'key': 'resourceType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'SkuCapacity'},
'locations': {'key': 'locations', 'type': '[str]'},
'location_info': {'key': 'locationInfo', 'type': '[ResourceSkuLocationInfo]'},
'restrictions': {'key': 'restrictions', 'type': '[ResourceSkuRestrictions]'},
}
def __init__(
self,
*,
resource_type: Optional[str] = None,
name: Optional[str] = None,
tier: Optional[str] = None,
capacity: Optional["SkuCapacity"] = None,
locations: Optional[List[str]] = None,
location_info: Optional[List["ResourceSkuLocationInfo"]] = None,
restrictions: Optional[List["ResourceSkuRestrictions"]] = None,
**kwargs
):
"""
:keyword resource_type: Gets the type of resource the SKU applies to.
:paramtype resource_type: str
:keyword name: Gets the name of SKU.
:paramtype name: str
:keyword tier: Gets the tier of SKU.
:paramtype tier: str
:keyword capacity: Gets the capacity of SKU.
:paramtype capacity: ~azure.mgmt.appplatform.v2021_09_01_preview.models.SkuCapacity
:keyword locations: Gets the set of locations that the SKU is available.
:paramtype locations: list[str]
:keyword location_info: Gets a list of locations and availability zones in those locations
where the SKU is available.
:paramtype location_info:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuLocationInfo]
:keyword restrictions: Gets the restrictions because of which SKU cannot be used. This is
empty if there are no restrictions.
:paramtype restrictions:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictions]
"""
super(ResourceSku, self).__init__(**kwargs)
self.resource_type = resource_type
self.name = name
self.tier = tier
self.capacity = capacity
self.locations = locations
self.location_info = location_info
self.restrictions = restrictions
class ResourceSkuCapabilities(msrest.serialization.Model):
"""ResourceSkuCapabilities.
:ivar name: Gets an invariant to describe the feature.
:vartype name: str
:ivar value: Gets an invariant if the feature is measured by quantity.
:vartype value: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
value: Optional[str] = None,
**kwargs
):
"""
:keyword name: Gets an invariant to describe the feature.
:paramtype name: str
:keyword value: Gets an invariant if the feature is measured by quantity.
:paramtype value: str
"""
super(ResourceSkuCapabilities, self).__init__(**kwargs)
self.name = name
self.value = value
class ResourceSkuCollection(msrest.serialization.Model):
"""Object that includes an array of Azure Spring Cloud SKU and a possible link for next set.
:ivar value: Collection of resource SKU.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSku]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ResourceSku]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ResourceSku"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Collection of resource SKU.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSku]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super(ResourceSkuCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ResourceSkuLocationInfo(msrest.serialization.Model):
"""Locations and availability zones where the SKU is available.
:ivar location: Gets location of the SKU.
:vartype location: str
:ivar zones: Gets list of availability zones where the SKU is supported.
:vartype zones: list[str]
:ivar zone_details: Gets details of capabilities available to a SKU in specific zones.
:vartype zone_details:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuZoneDetails]
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'zones': {'key': 'zones', 'type': '[str]'},
'zone_details': {'key': 'zoneDetails', 'type': '[ResourceSkuZoneDetails]'},
}
def __init__(
self,
*,
location: Optional[str] = None,
zones: Optional[List[str]] = None,
zone_details: Optional[List["ResourceSkuZoneDetails"]] = None,
**kwargs
):
"""
:keyword location: Gets location of the SKU.
:paramtype location: str
:keyword zones: Gets list of availability zones where the SKU is supported.
:paramtype zones: list[str]
:keyword zone_details: Gets details of capabilities available to a SKU in specific zones.
:paramtype zone_details:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuZoneDetails]
"""
super(ResourceSkuLocationInfo, self).__init__(**kwargs)
self.location = location
self.zones = zones
self.zone_details = zone_details
class ResourceSkuRestrictionInfo(msrest.serialization.Model):
"""Information about the restriction where the SKU cannot be used.
:ivar locations: Gets locations where the SKU is restricted.
:vartype locations: list[str]
:ivar zones: Gets list of availability zones where the SKU is restricted.
:vartype zones: list[str]
"""
_attribute_map = {
'locations': {'key': 'locations', 'type': '[str]'},
'zones': {'key': 'zones', 'type': '[str]'},
}
def __init__(
self,
*,
locations: Optional[List[str]] = None,
zones: Optional[List[str]] = None,
**kwargs
):
"""
:keyword locations: Gets locations where the SKU is restricted.
:paramtype locations: list[str]
:keyword zones: Gets list of availability zones where the SKU is restricted.
:paramtype zones: list[str]
"""
super(ResourceSkuRestrictionInfo, self).__init__(**kwargs)
self.locations = locations
self.zones = zones
class ResourceSkuRestrictions(msrest.serialization.Model):
"""Restrictions where the SKU cannot be used.
:ivar type: Gets the type of restrictions. Possible values include: 'Location', 'Zone'.
Possible values include: "Location", "Zone".
:vartype type: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictionsType
:ivar values: Gets the value of restrictions. If the restriction type is set to
location. This would be different locations where the SKU is restricted.
:vartype values: list[str]
:ivar restriction_info: Gets the information about the restriction where the SKU cannot be
used.
:vartype restriction_info:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictionInfo
:ivar reason_code: Gets the reason for restriction. Possible values include: 'QuotaId',
'NotAvailableForSubscription'. Possible values include: "QuotaId",
"NotAvailableForSubscription".
:vartype reason_code: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictionsReasonCode
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'values': {'key': 'values', 'type': '[str]'},
'restriction_info': {'key': 'restrictionInfo', 'type': 'ResourceSkuRestrictionInfo'},
'reason_code': {'key': 'reasonCode', 'type': 'str'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceSkuRestrictionsType"]] = None,
values: Optional[List[str]] = None,
restriction_info: Optional["ResourceSkuRestrictionInfo"] = None,
reason_code: Optional[Union[str, "ResourceSkuRestrictionsReasonCode"]] = None,
**kwargs
):
"""
:keyword type: Gets the type of restrictions. Possible values include: 'Location', 'Zone'.
Possible values include: "Location", "Zone".
:paramtype type: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictionsType
:keyword values: Gets the value of restrictions. If the restriction type is set to
location. This would be different locations where the SKU is restricted.
:paramtype values: list[str]
:keyword restriction_info: Gets the information about the restriction where the SKU cannot be
used.
:paramtype restriction_info:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictionInfo
:keyword reason_code: Gets the reason for restriction. Possible values include: 'QuotaId',
'NotAvailableForSubscription'. Possible values include: "QuotaId",
"NotAvailableForSubscription".
:paramtype reason_code: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuRestrictionsReasonCode
"""
super(ResourceSkuRestrictions, self).__init__(**kwargs)
self.type = type
self.values = values
self.restriction_info = restriction_info
self.reason_code = reason_code
class ResourceSkuZoneDetails(msrest.serialization.Model):
"""Details of capabilities available to a SKU in specific zones.
:ivar name: Gets the set of zones that the SKU is available in with the
specified capabilities.
:vartype name: list[str]
:ivar capabilities: Gets a list of capabilities that are available for the SKU in the
specified list of zones.
:vartype capabilities:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuCapabilities]
"""
_attribute_map = {
'name': {'key': 'name', 'type': '[str]'},
'capabilities': {'key': 'capabilities', 'type': '[ResourceSkuCapabilities]'},
}
def __init__(
self,
*,
name: Optional[List[str]] = None,
capabilities: Optional[List["ResourceSkuCapabilities"]] = None,
**kwargs
):
"""
:keyword name: Gets the set of zones that the SKU is available in with the
specified capabilities.
:paramtype name: list[str]
:keyword capabilities: Gets a list of capabilities that are available for the SKU in the
specified list of zones.
:paramtype capabilities:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ResourceSkuCapabilities]
"""
super(ResourceSkuZoneDetails, self).__init__(**kwargs)
self.name = name
self.capabilities = capabilities
class ResourceUploadDefinition(msrest.serialization.Model):
"""Resource upload definition payload.
:ivar relative_path: Source relative path.
:vartype relative_path: str
:ivar upload_url: Upload URL.
:vartype upload_url: str
"""
_attribute_map = {
'relative_path': {'key': 'relativePath', 'type': 'str'},
'upload_url': {'key': 'uploadUrl', 'type': 'str'},
}
def __init__(
self,
*,
relative_path: Optional[str] = None,
upload_url: Optional[str] = None,
**kwargs
):
"""
:keyword relative_path: Source relative path.
:paramtype relative_path: str
:keyword upload_url: Upload URL.
:paramtype upload_url: str
"""
super(ResourceUploadDefinition, self).__init__(**kwargs)
self.relative_path = relative_path
self.upload_url = upload_url
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar location: The GEO location of the resource.
:vartype location: str
:ivar tags: A set of tags. Tags of the service which is a list of key value pairs that describe
the resource.
:vartype tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
"""
:keyword location: The GEO location of the resource.
:paramtype location: str
:keyword tags: A set of tags. Tags of the service which is a list of key value pairs that
describe the resource.
:paramtype tags: dict[str, str]
"""
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
class ServiceResource(TrackedResource):
"""Service resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar location: The GEO location of the resource.
:vartype location: str
:ivar tags: A set of tags. Tags of the service which is a list of key value pairs that describe
the resource.
:vartype tags: dict[str, str]
:ivar properties: Properties of the Service resource.
:vartype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ClusterResourceProperties
:ivar sku: Sku of the Service resource.
:vartype sku: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Sku
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ClusterResourceProperties'},
'sku': {'key': 'sku', 'type': 'Sku'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["ClusterResourceProperties"] = None,
sku: Optional["Sku"] = None,
**kwargs
):
"""
:keyword location: The GEO location of the resource.
:paramtype location: str
:keyword tags: A set of tags. Tags of the service which is a list of key value pairs that
describe the resource.
:paramtype tags: dict[str, str]
:keyword properties: Properties of the Service resource.
:paramtype properties:
~azure.mgmt.appplatform.v2021_09_01_preview.models.ClusterResourceProperties
:keyword sku: Sku of the Service resource.
:paramtype sku: ~azure.mgmt.appplatform.v2021_09_01_preview.models.Sku
"""
super(ServiceResource, self).__init__(location=location, tags=tags, **kwargs)
self.properties = properties
self.sku = sku
class ServiceResourceList(msrest.serialization.Model):
"""Object that includes an array of Service resources and a possible link for next set.
:ivar value: Collection of Service resources.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ServiceResource]
:ivar next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ServiceResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["ServiceResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: Collection of Service resources.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.ServiceResource]
:keyword next_link: URL client should use to fetch the next page (per server side paging).
It's null for now, added for future use.
:paramtype next_link: str
"""
super(ServiceResourceList, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class ServiceSpecification(msrest.serialization.Model):
"""Service specification payload.
:ivar log_specifications: Specifications of the Log for Azure Monitoring.
:vartype log_specifications:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.LogSpecification]
:ivar metric_specifications: Specifications of the Metrics for Azure Monitoring.
:vartype metric_specifications:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.MetricSpecification]
"""
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
*,
log_specifications: Optional[List["LogSpecification"]] = None,
metric_specifications: Optional[List["MetricSpecification"]] = None,
**kwargs
):
"""
:keyword log_specifications: Specifications of the Log for Azure Monitoring.
:paramtype log_specifications:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.LogSpecification]
:keyword metric_specifications: Specifications of the Metrics for Azure Monitoring.
:paramtype metric_specifications:
list[~azure.mgmt.appplatform.v2021_09_01_preview.models.MetricSpecification]
"""
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = log_specifications
self.metric_specifications = metric_specifications
class Sku(msrest.serialization.Model):
"""Sku of Azure Spring Cloud.
:ivar name: Name of the Sku.
:vartype name: str
:ivar tier: Tier of the Sku.
:vartype tier: str
:ivar capacity: Current capacity of the target resource.
:vartype capacity: int
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'capacity': {'key': 'capacity', 'type': 'int'},
}
def __init__(
self,
*,
name: Optional[str] = "S0",
tier: Optional[str] = "Standard",
capacity: Optional[int] = None,
**kwargs
):
"""
:keyword name: Name of the Sku.
:paramtype name: str
:keyword tier: Tier of the Sku.
:paramtype tier: str
:keyword capacity: Current capacity of the target resource.
:paramtype capacity: int
"""
super(Sku, self).__init__(**kwargs)
self.name = name
self.tier = tier
self.capacity = capacity
class SkuCapacity(msrest.serialization.Model):
"""The SKU capacity.
All required parameters must be populated in order to send to Azure.
:ivar minimum: Required. Gets or sets the minimum.
:vartype minimum: int
:ivar maximum: Gets or sets the maximum.
:vartype maximum: int
:ivar default: Gets or sets the default.
:vartype default: int
:ivar scale_type: Gets or sets the type of the scale. Possible values include: "None",
"Manual", "Automatic".
:vartype scale_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.SkuScaleType
"""
_validation = {
'minimum': {'required': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'int'},
'maximum': {'key': 'maximum', 'type': 'int'},
'default': {'key': 'default', 'type': 'int'},
'scale_type': {'key': 'scaleType', 'type': 'str'},
}
def __init__(
self,
*,
minimum: int,
maximum: Optional[int] = None,
default: Optional[int] = None,
scale_type: Optional[Union[str, "SkuScaleType"]] = None,
**kwargs
):
"""
:keyword minimum: Required. Gets or sets the minimum.
:paramtype minimum: int
:keyword maximum: Gets or sets the maximum.
:paramtype maximum: int
:keyword default: Gets or sets the default.
:paramtype default: int
:keyword scale_type: Gets or sets the type of the scale. Possible values include: "None",
"Manual", "Automatic".
:paramtype scale_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.SkuScaleType
"""
super(SkuCapacity, self).__init__(**kwargs)
self.minimum = minimum
self.maximum = maximum
self.default = default
self.scale_type = scale_type
class StorageProperties(msrest.serialization.Model):
"""Storage resource payload.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: StorageAccount.
All required parameters must be populated in order to send to Azure.
:ivar storage_type: Required. The type of the storage.Constant filled by server. Possible
values include: "StorageAccount".
:vartype storage_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.StorageType
"""
_validation = {
'storage_type': {'required': True},
}
_attribute_map = {
'storage_type': {'key': 'storageType', 'type': 'str'},
}
_subtype_map = {
'storage_type': {'StorageAccount': 'StorageAccount'}
}
def __init__(
self,
**kwargs
):
"""
"""
super(StorageProperties, self).__init__(**kwargs)
self.storage_type = None # type: Optional[str]
class StorageAccount(StorageProperties):
"""storage resource of type Azure Storage Account.
All required parameters must be populated in order to send to Azure.
:ivar storage_type: Required. The type of the storage.Constant filled by server. Possible
values include: "StorageAccount".
:vartype storage_type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.StorageType
:ivar account_name: Required. The account name of the Azure Storage Account.
:vartype account_name: str
:ivar account_key: Required. The account key of the Azure Storage Account.
:vartype account_key: str
"""
_validation = {
'storage_type': {'required': True},
'account_name': {'required': True},
'account_key': {'required': True},
}
_attribute_map = {
'storage_type': {'key': 'storageType', 'type': 'str'},
'account_name': {'key': 'accountName', 'type': 'str'},
'account_key': {'key': 'accountKey', 'type': 'str'},
}
def __init__(
self,
*,
account_name: str,
account_key: str,
**kwargs
):
"""
:keyword account_name: Required. The account name of the Azure Storage Account.
:paramtype account_name: str
:keyword account_key: Required. The account key of the Azure Storage Account.
:paramtype account_key: str
"""
super(StorageAccount, self).__init__(**kwargs)
self.storage_type = 'StorageAccount' # type: str
self.account_name = account_name
self.account_key = account_key
class StorageResource(ProxyResource):
"""Storage resource payload.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar properties: Properties of the storage resource payload.
:vartype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.StorageProperties
:ivar system_data: Metadata pertaining to creation and last modification of the resource.
:vartype system_data: ~azure.mgmt.appplatform.v2021_09_01_preview.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'StorageProperties'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
*,
properties: Optional["StorageProperties"] = None,
**kwargs
):
"""
:keyword properties: Properties of the storage resource payload.
:paramtype properties: ~azure.mgmt.appplatform.v2021_09_01_preview.models.StorageProperties
"""
super(StorageResource, self).__init__(**kwargs)
self.properties = properties
self.system_data = None
class StorageResourceCollection(msrest.serialization.Model):
"""Collection compose of storage resources list and a possible link for next page.
:ivar value: The storage resources list.
:vartype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.StorageResource]
:ivar next_link: The link to next page of storage list.
:vartype next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[StorageResource]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["StorageResource"]] = None,
next_link: Optional[str] = None,
**kwargs
):
"""
:keyword value: The storage resources list.
:paramtype value: list[~azure.mgmt.appplatform.v2021_09_01_preview.models.StorageResource]
:keyword next_link: The link to next page of storage list.
:paramtype next_link: str
"""
super(StorageResourceCollection, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class SupportedRuntimeVersion(msrest.serialization.Model):
"""Supported deployment runtime version descriptor.
:ivar value: The raw value which could be passed to deployment CRUD operations. Possible values
include: "Java_8", "Java_11", "NetCore_31".
:vartype value: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.SupportedRuntimeValue
:ivar platform: The platform of this runtime version (possible values: "Java" or ".NET").
Possible values include: "Java", ".NET Core".
:vartype platform: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.SupportedRuntimePlatform
:ivar version: The detailed version (major.minor) of the platform.
:vartype version: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'platform': {'key': 'platform', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[Union[str, "SupportedRuntimeValue"]] = None,
platform: Optional[Union[str, "SupportedRuntimePlatform"]] = None,
version: Optional[str] = None,
**kwargs
):
"""
:keyword value: The raw value which could be passed to deployment CRUD operations. Possible
values include: "Java_8", "Java_11", "NetCore_31".
:paramtype value: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.SupportedRuntimeValue
:keyword platform: The platform of this runtime version (possible values: "Java" or ".NET").
Possible values include: "Java", ".NET Core".
:paramtype platform: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.SupportedRuntimePlatform
:keyword version: The detailed version (major.minor) of the platform.
:paramtype version: str
"""
super(SupportedRuntimeVersion, self).__init__(**kwargs)
self.value = value
self.platform = platform
self.version = version
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:ivar created_by: The identity that created the resource.
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource. Possible values include:
"User", "Application", "ManagedIdentity", "Key".
:vartype created_by_type: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.CreatedByType
:ivar created_at: The timestamp of resource creation (UTC).
:vartype created_at: ~datetime.datetime
:ivar last_modified_by: The identity that last modified the resource.
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:vartype last_modified_by_type: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.CreatedByType
:ivar last_modified_at: The timestamp of resource last modification (UTC).
:vartype last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
*,
created_by: Optional[str] = None,
created_by_type: Optional[Union[str, "CreatedByType"]] = None,
created_at: Optional[datetime.datetime] = None,
last_modified_by: Optional[str] = None,
last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None,
last_modified_at: Optional[datetime.datetime] = None,
**kwargs
):
"""
:keyword created_by: The identity that created the resource.
:paramtype created_by: str
:keyword created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:paramtype created_by_type: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.CreatedByType
:keyword created_at: The timestamp of resource creation (UTC).
:paramtype created_at: ~datetime.datetime
:keyword last_modified_by: The identity that last modified the resource.
:paramtype last_modified_by: str
:keyword last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:paramtype last_modified_by_type: str or
~azure.mgmt.appplatform.v2021_09_01_preview.models.CreatedByType
:keyword last_modified_at: The timestamp of resource last modification (UTC).
:paramtype last_modified_at: ~datetime.datetime
"""
super(SystemData, self).__init__(**kwargs)
self.created_by = created_by
self.created_by_type = created_by_type
self.created_at = created_at
self.last_modified_by = last_modified_by
self.last_modified_by_type = last_modified_by_type
self.last_modified_at = last_modified_at
class TemporaryDisk(msrest.serialization.Model):
"""Temporary disk payload.
:ivar size_in_gb: Size of the temporary disk in GB.
:vartype size_in_gb: int
:ivar mount_path: Mount path of the temporary disk.
:vartype mount_path: str
"""
_validation = {
'size_in_gb': {'maximum': 5, 'minimum': 0},
}
_attribute_map = {
'size_in_gb': {'key': 'sizeInGB', 'type': 'int'},
'mount_path': {'key': 'mountPath', 'type': 'str'},
}
def __init__(
self,
*,
size_in_gb: Optional[int] = None,
mount_path: Optional[str] = "/tmp",
**kwargs
):
"""
:keyword size_in_gb: Size of the temporary disk in GB.
:paramtype size_in_gb: int
:keyword mount_path: Mount path of the temporary disk.
:paramtype mount_path: str
"""
super(TemporaryDisk, self).__init__(**kwargs)
self.size_in_gb = size_in_gb
self.mount_path = mount_path
class TestKeys(msrest.serialization.Model):
"""Test keys payload.
:ivar primary_key: Primary key.
:vartype primary_key: str
:ivar secondary_key: Secondary key.
:vartype secondary_key: str
:ivar primary_test_endpoint: Primary test endpoint.
:vartype primary_test_endpoint: str
:ivar secondary_test_endpoint: Secondary test endpoint.
:vartype secondary_test_endpoint: str
:ivar enabled: Indicates whether the test endpoint feature enabled or not.
:vartype enabled: bool
"""
_attribute_map = {
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'primary_test_endpoint': {'key': 'primaryTestEndpoint', 'type': 'str'},
'secondary_test_endpoint': {'key': 'secondaryTestEndpoint', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(
self,
*,
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
primary_test_endpoint: Optional[str] = None,
secondary_test_endpoint: Optional[str] = None,
enabled: Optional[bool] = None,
**kwargs
):
"""
:keyword primary_key: Primary key.
:paramtype primary_key: str
:keyword secondary_key: Secondary key.
:paramtype secondary_key: str
:keyword primary_test_endpoint: Primary test endpoint.
:paramtype primary_test_endpoint: str
:keyword secondary_test_endpoint: Secondary test endpoint.
:paramtype secondary_test_endpoint: str
:keyword enabled: Indicates whether the test endpoint feature enabled or not.
:paramtype enabled: bool
"""
super(TestKeys, self).__init__(**kwargs)
self.primary_key = primary_key
self.secondary_key = secondary_key
self.primary_test_endpoint = primary_test_endpoint
self.secondary_test_endpoint = secondary_test_endpoint
self.enabled = enabled
class UserSourceInfo(msrest.serialization.Model):
"""Source information for a deployment.
:ivar type: Type of the source uploaded. Possible values include: "Jar", "NetCoreZip",
"Source", "Container".
:vartype type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.UserSourceType
:ivar relative_path: Relative path of the storage which stores the source.
:vartype relative_path: str
:ivar version: Version of the source.
:vartype version: str
:ivar artifact_selector: Selector for the artifact to be used for the deployment for
multi-module projects. This should be
the relative path to the target module/project.
:vartype artifact_selector: str
:ivar custom_container: Custom container payload.
:vartype custom_container: ~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomContainer
"""
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'relative_path': {'key': 'relativePath', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'artifact_selector': {'key': 'artifactSelector', 'type': 'str'},
'custom_container': {'key': 'customContainer', 'type': 'CustomContainer'},
}
def __init__(
self,
*,
type: Optional[Union[str, "UserSourceType"]] = None,
relative_path: Optional[str] = None,
version: Optional[str] = None,
artifact_selector: Optional[str] = None,
custom_container: Optional["CustomContainer"] = None,
**kwargs
):
"""
:keyword type: Type of the source uploaded. Possible values include: "Jar", "NetCoreZip",
"Source", "Container".
:paramtype type: str or ~azure.mgmt.appplatform.v2021_09_01_preview.models.UserSourceType
:keyword relative_path: Relative path of the storage which stores the source.
:paramtype relative_path: str
:keyword version: Version of the source.
:paramtype version: str
:keyword artifact_selector: Selector for the artifact to be used for the deployment for
multi-module projects. This should be
the relative path to the target module/project.
:paramtype artifact_selector: str
:keyword custom_container: Custom container payload.
:paramtype custom_container: ~azure.mgmt.appplatform.v2021_09_01_preview.models.CustomContainer
"""
super(UserSourceInfo, self).__init__(**kwargs)
self.type = type
self.relative_path = relative_path
self.version = version
self.artifact_selector = artifact_selector
self.custom_container = custom_container
| [
"[email protected]"
] | |
34d6e385193258946d08caaf1d3f6609ea5a9b5d | 8b441f592a6deb9b0a515cbd92bb4663ad79ffe4 | /churn/models/fbb_churn_amdocs/fbb_churn_eval_sep.py | 85e5496351a6f2707dc3e7bbf0e4fcf1caa8c12c | [] | no_license | carnaum2/use-cases | 0d391a6a10bb70b60a4025152a278b0e4c595d01 | 24920e3828234da691ab643b6dd9a0aa0a5c0df5 | refs/heads/master | 2022-12-07T03:41:34.299274 | 2020-09-07T10:20:32 | 2020-09-07T10:20:32 | 293,249,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,797 | py | # coding: utf-8
import sys
from common.src.main.python.utils.hdfs_generic import *
import argparse
import os
import sys
import time
from pyspark.sql.functions import (udf,
col,
decode,
when,
lit,
lower,
concat,
translate,
count,
sum as sql_sum,
max as sql_max,
min as sql_min,
avg as sql_avg,
greatest,
least,
isnull,
isnan,
struct,
substring,
size,
length,
year,
month,
dayofmonth,
unix_timestamp,
date_format,
from_unixtime,
datediff,
to_date,
desc,
asc,
countDistinct,
row_number)
from pyspark.sql import Row, DataFrame, Column, Window
from pyspark.sql.types import DoubleType, StringType, IntegerType, DateType, ArrayType, LongType
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer, VectorAssembler, SQLTransformer, OneHotEncoder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator, BinaryClassificationEvaluator
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from datetime import datetime
from itertools import chain
import numpy as np
from functools import reduce
from utils_general import *
from utils_model import *
from metadata_fbb_churn import *
from feature_selection_utils import *
import subprocess
#from date_functions import get_next_cycle
def set_paths():
'''
Deployment should be something like "dirs/dir1/use-cases"
This function adds to the path "dirs/dir1/use-cases" and "dirs/dir1/"
:return:
'''
import imp
from os.path import dirname
import os
USE_CASES = "/var/SP/data/home/asaezco/src/devel2/use-cases"#dirname(os.path.abspath(imp.find_module('churn')[1]))
if USE_CASES not in sys.path:
sys.path.append(USE_CASES)
print("Added '{}' to path".format(USE_CASES))
# if deployment is correct, this path should be the one that contains "use-cases", "pykhaos", ...
# FIXME another way of doing it more general?
DEVEL_SRC = os.path.dirname(USE_CASES) # dir before use-cases dir
if DEVEL_SRC not in sys.path:
sys.path.append(DEVEL_SRC)
print("Added '{}' to path".format(DEVEL_SRC))
####################################
### Creating Spark Session
###################################
def get_spark_session(app_name="default name", log_level='INFO', min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "32g", driver_memory="32g"):
HOME_SRC = os.path.join(os.environ.get('BDA_USER_HOME', ''), "src")
if HOME_SRC not in sys.path:
sys.path.append(HOME_SRC)
setting_bdp(app_name=app_name, min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores, executor_memory = executor_memory, driver_memory=driver_memory)
from common.src.main.python.utils.hdfs_generic import run_sc
sc, spark, sql_context = run_sc(log_level=log_level)
return sc, spark, sql_context
# set BDP parameters
def setting_bdp(min_n_executors = 1, max_n_executors = 15, n_cores = 8, executor_memory = "16g", driver_memory="8g",
app_name = "Python app", driver_overhead="1g", executor_overhead='3g'):
MAX_N_EXECUTORS = max_n_executors
MIN_N_EXECUTORS = min_n_executors
N_CORES_EXECUTOR = n_cores
EXECUTOR_IDLE_MAX_TIME = 120
EXECUTOR_MEMORY = executor_memory
DRIVER_MEMORY = driver_memory
N_CORES_DRIVER = 1
MEMORY_OVERHEAD = N_CORES_EXECUTOR * 2048
QUEUE = "root.BDPtenants.es.medium"
BDA_CORE_VERSION = "1.0.0"
SPARK_COMMON_OPTS = os.environ.get('SPARK_COMMON_OPTS', '')
SPARK_COMMON_OPTS += " --executor-memory %s --driver-memory %s" % (EXECUTOR_MEMORY, DRIVER_MEMORY)
SPARK_COMMON_OPTS += " --conf spark.shuffle.manager=tungsten-sort"
SPARK_COMMON_OPTS += " --queue %s" % QUEUE
# Dynamic allocation configuration
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled=true"
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.maxExecutors=%s" % (MAX_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.minExecutors=%s" % (MIN_N_EXECUTORS)
SPARK_COMMON_OPTS += " --conf spark.executor.cores=%s" % (N_CORES_EXECUTOR)
SPARK_COMMON_OPTS += " --conf spark.dynamicAllocation.executorIdleTimeout=%s" % (EXECUTOR_IDLE_MAX_TIME)
# SPARK_COMMON_OPTS += " --conf spark.ui.port=58235"
SPARK_COMMON_OPTS += " --conf spark.port.maxRetries=100"
SPARK_COMMON_OPTS += " --conf spark.app.name='%s'" % (app_name)
SPARK_COMMON_OPTS += " --conf spark.submit.deployMode=client"
SPARK_COMMON_OPTS += " --conf spark.ui.showConsoleProgress=true"
SPARK_COMMON_OPTS += " --conf spark.sql.broadcastTimeout=1200"
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.memoryOverhead={}".format(executor_overhead)
SPARK_COMMON_OPTS += " --conf spark.yarn.executor.driverOverhead={}".format(driver_overhead)
SPARK_COMMON_OPTS += " --conf spark.shuffle.service.enabled = true"
BDA_ENV = os.environ.get('BDA_USER_HOME', '')
# Attach bda-core-ra codebase
SPARK_COMMON_OPTS+=" --files {}/scripts/properties/red_agent/nodes.properties,{}/scripts/properties/red_agent/nodes-de.properties,{}/scripts/properties/red_agent/nodes-es.properties,{}/scripts/properties/red_agent/nodes-ie.properties,{}/scripts/properties/red_agent/nodes-it.properties,{}/scripts/properties/red_agent/nodes-pt.properties,{}/scripts/properties/red_agent/nodes-uk.properties".format(*[BDA_ENV]*7)
os.environ["SPARK_COMMON_OPTS"] = SPARK_COMMON_OPTS
os.environ["PYSPARK_SUBMIT_ARGS"] = "%s pyspark-shell " % SPARK_COMMON_OPTS
#os.environ["SPARK_EXTRA_CONF_PARAMETERS"] = '--conf spark.yarn.jars=hdfs:///data/raw/public/lib_spark_2_1_0_jars_SPARK-18971/*'
def initialize(app_name, min_n_executors = 1, max_n_executors = 15, n_cores = 4, executor_memory = "16g", driver_memory="8g"):
import time
start_time = time.time()
print("_initialize spark")
#import pykhaos.utils.pyspark_configuration as pyspark_config
sc, spark, sql_context = get_spark_session(app_name=app_name, log_level="OFF", min_n_executors = min_n_executors, max_n_executors = max_n_executors, n_cores = n_cores,
executor_memory = executor_memory, driver_memory=driver_memory)
print("Ended spark session: {} secs | default parallelism={}".format(time.time() - start_time,
sc.defaultParallelism))
return spark
def getFbbChurnLabeledCarCycles_both(spark, origin, yearmonthday, selcols, horizon = 4):
cycle = 0
fini_tmp = yearmonthday
while cycle < horizon:
yearmonthday_target = get_next_cycle(fini_tmp, str_fmt="%Y%m%d")
cycle = cycle + 1
fini_tmp = yearmonthday_target
yearmonth = yearmonthday[0:6]
trfeatdf = getCarNumClienteDf(spark, origin, yearmonthday)
print("[Info getFbbChurnLabeledCar] " + time.ctime() + " Samples for month " + yearmonthday + ": " + str(trfeatdf.count()))
# Loading port-out requests and DXs
# # labmonthlisttr = getMonthSeq(initportmonthtr, lastportmonthtr)
# Las bajas de fibra pueden venir por:
#- Solicitudes de baja de fijo
fixporttr = getFixPortRequestsForCycleList(spark, yearmonthday, yearmonthday_target)
#- Porque dejen de estar en la lista de clientes
fixdxtr = getFbbDxsForCycleList(spark,yearmonthday, yearmonthday_target)
# Labeling: FBB service is labeled as 1 if, during the next time window specified by the horizon, either the associated fixed service requested to be ported out or the FBB was disconnected
window = Window.partitionBy("num_cliente")
unbaltrdf = trfeatdf\
.join(fixporttr, ['msisdn_d'], "left_outer")\
.na.fill({'label_srv': 0.0})\
.join(fixdxtr, ['msisdn'], "left_outer")\
.na.fill({'label_dx': 0.0})\
.withColumn('tmp', when((col('label_dx')==1.0), 1.0).otherwise(0.0))\
.withColumn('label_bajas', sql_max('tmp').over(window))\
.withColumn('tmp2', when((col('label_srv')==1.0), 1.0).otherwise(0.0))\
.withColumn('label_port', sql_max('tmp2').over(window))\
.filter(col("rgu")=="fbb")\
.select(selcols + ['label_port', 'label_bajas'])
print("[Info getFbbChurnLabeledCar] " + time.ctime() + " Labeled samples for month " + yearmonth + ": " + str(unbaltrdf.count()))
return unbaltrdf
if __name__ == "__main__":
set_paths()
from pykhaos.utils.date_functions import *
from utils_fbb_churn import *
# create Spark context with Spark configuration
print '[' + time.ctime() + ']', 'Process started'
global sqlContext
spark = initialize("VF_ES AMDOCS FBB Churn Prediction ", executor_memory="16g", min_n_executors=10)
print('Spark Configuration used', spark.sparkContext.getConf().getAll())
selcols = getIdFeats() + getCrmFeats() + getBillingFeats() + getMobSopoFeats() + getOrdersFeats()
now = datetime.now()
date_name = str(now.year) + str(now.month).rjust(2, '0') + str(now.day).rjust(2, '0')
origin = '/user/hive/warehouse/tests_es.db/jvmm_amdocs_ids_'
## ARGUMENTS
###############
parser = argparse.ArgumentParser(
description='Generate score table for fbb model',
epilog='Please report bugs and issues to Álvaro <[email protected]>')
parser.add_argument('-s', '--training_day', metavar='<TRAINING_DAY>', type=str, required=True,
help='Training day YYYYMMDD. Date of the CAR taken to train the model.')
parser.add_argument('-p', '--prediction_day', metavar='<PREDICTION_DAY>', type=str, required=True,
help='Prediction day YYYYMMDD.')
parser.add_argument('-o', '--horizon', metavar='<horizon>', type=int, required=True,
help='Number of cycles used to gather the portability requests from the training day.')
args = parser.parse_args()
print(args)
# Cycle used for CAR and Extra Feats in the training set
trcycle_ini = args.training_day# '20181130' # Training data
# Number of cycles to gather dismiss requests
horizon = args.horizon #4
# Cycle used for CAR and Extra Feats in the test set
ttcycle_ini = args.prediction_day#'20181231' # Test data
tr_ttdates = trcycle_ini + '_' + ttcycle_ini
########################
### 1. TRAINING DATA ###
########################
# 1.1. Loading training data
#path = '/data/udf/vf_es/churn/fbb_tmp/inittrdf_ini_' + tr_ttdates
inittrdf_ini = getFbbChurnLabeledCarCycles_both(spark, origin, trcycle_ini, selcols, horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf_ini to HDFS"
#path = '/data/udf/vf_es/churn/fbb_tmp/inittrdf_' + tr_ttdates
print'Dxs in training df: {}'.format(inittrdf_ini.where(col('label_bajas') > 0).count())
print'Ports in training df: {}'.format(inittrdf_ini.where(col('label_port') > 0).count())
## Reading the Extra Features
dfExtraFeat = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(trcycle_ini[0:4]), int(trcycle_ini[4:6]), int(trcycle_ini[6:8])))
# Taking only the clients with a fbb service
dfExtraFeatfbb = dfExtraFeat.join(inittrdf_ini, ["num_cliente"], "leftsemi")
dfExtraFeatfbb = dfExtraFeatfbb.cache()
#print "[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats: ", dfExtraFeatfbb.count()
# Taking the Extra Features of interest and adding their values for num_client when necessary
dfExtraFeatSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb)
#print "[Info Main FbbChurn] " + time.ctime() + " Calculating the total value of the extra feats for each number client"
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeatSel.columns:
dfillNa.pop(kkey, None)
inittrdf = inittrdf_ini.join(dfExtraFeatSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving inittrdf to HDFS " +str(inittrdf.count())
#path_p = '/data/udf/vf_es/churn/fbb_tmp/inittrdf_20181130_p'
#inittrdf_port.repartition(200).write.save(path_p, format='parquet', mode='overwrite')
[unbaltrdf_, valdf] = inittrdf.randomSplit([0.8, 0.2], 1234)
[unbaltrdf, ensembdf] = unbaltrdf_.randomSplit([0.8, 0.2], 1234)
unbaltrdf = unbaltrdf.cache()
valdf = valdf.cache()
unbaltrdf.groupBy('label_bajas').agg(count('*')).show()
unbaltrdf.groupBy('label_port').agg(count('*')).show()
trdf_port = balance_df2(unbaltrdf, 'label_port')
trdf_dx = balance_df2(unbaltrdf, 'label_bajas')
trdf_dx.groupBy('label_bajas').agg(count('*')).show()
trdf_port.groupBy('label_port').agg(count('*')).show()
allFeats = trdf_dx.columns
# Getting only the numeric variables
catCols = [item[0] for item in trdf_dx.dtypes if item[1].startswith('string')]
numerical_feats = list(set(allFeats) - set(list(
set().union(getIdFeats(), getIdFeats_tr(), getNoInputFeats(), catCols, [c + "_enc" for c in getCatFeatsCrm()],
["label"]))))
noninf_feats = getNonInfFeats(trdf_dx, numerical_feats)
#unbaltrdf.repartition(300).write.save(path1,format='parquet', mode='overwrite')
#valdf.repartition(300).write.save(path2,format='parquet', mode='overwrite')
# 1.2. Balanced df for training
####################
### 2. TEST DATA ###
####################
ttdf_ini = getFbbChurnLabeledCarCycles_both(spark, origin, ttcycle_ini, selcols,horizon)
#print "[Info Main FbbChurn] " + time.ctime() + " Saving ttdf_ini to HDFS "
#ttdf_ini.repartition(200).write.save(path,format='parquet', mode='overwrite')
#ttdf_ini.describe('label').show()
#path = "/data/udf/vf_es/churn/fbb_tmp/ttdf_" + tr_ttdates
dfExtraFeat_tt = spark.read.parquet('/data/udf/vf_es/churn/extra_feats_mod/extra_feats/year={}/month={}/day={}'
.format(int(ttcycle_ini[0:4]), int(ttcycle_ini[4:6]), int(ttcycle_ini[6:8])))
dfExtraFeatfbb_tt = dfExtraFeat_tt.join(ttdf_ini.select('num_cliente'), on='num_cliente', how='leftsemi')
#print(dfExtraFeatfbb_tt.select('num_cliente').distinct().count(), ttdf_ini.select('num_cliente').distinct().count())
dfExtraFeatfbb_tt = dfExtraFeatfbb_tt.cache()
#print("[Info Main FbbChurn] " + time.ctime() + " Count of the ExtraFeats ", dfExtraFeatfbb_tt.count())
dfExtraFeat_ttSel, selColumnas = addExtraFeatsEvol(dfExtraFeatfbb_tt)
dfillNa = fillNa(spark)
for kkey in dfillNa.keys():
if kkey not in dfExtraFeat_ttSel.columns:
dfillNa.pop(kkey, None)
ttdf = ttdf_ini.join(dfExtraFeat_ttSel, ["msisdn", "num_cliente", 'rgu'], how="left").na.fill(dfillNa)
####################
### 3. MODELLING ###
####################
featCols = list(set(numerical_feats) - set(noninf_feats + ['label_bajas','label_port']))
for f in featCols:
print "[Info Main FbbChurn] Input feat: " + f
assembler = VectorAssembler(inputCols=featCols, outputCol="features")
classifier_port = RandomForestClassifier(featuresCol="features", \
labelCol="label_port", \
maxDepth=20, \
maxBins=32, \
minInstancesPerNode=100, \
impurity="entropy", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.85, minInfoGain = 0.001, \
numTrees=800, \
seed=1234)
classifier_dx = RandomForestClassifier(featuresCol="features", \
labelCol="label_bajas", \
maxDepth=18, \
maxBins=32, \
minInstancesPerNode=90, \
impurity="entropy", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.85, minInfoGain = 0.001, \
numTrees=800, \
seed=1234)
pipeline_port = Pipeline(stages=[assembler, classifier_port])
pipeline_dx = Pipeline(stages=[assembler, classifier_dx])
getScore = udf(lambda prob: float(prob[1]), DoubleType())
model_dx = pipeline_dx.fit(trdf_dx)
calibmodel_dx = getCalibrationFunction(spark, model_dx, valdf, 'label_bajas', 10)
# Calibration
model_port = pipeline_dx.fit(trdf_port)
calibmodel_port = getCalibrationFunction(spark, model_port, valdf, 'label_port', 10)
feat_importance_port = getOrderedRelevantFeats(model_port, featCols, 'f', 'rf')
feat_importance_dx = getOrderedRelevantFeats(model_dx, featCols, 'f', 'rf')
port_imp, imp = zip(*feat_importance_port)
dx_imp, imp_dx = zip(*feat_importance_dx)
n = 200
list_f = port_imp[:n] + dx_imp[:n]
featCols_ensemb = list(dict.fromkeys(list_f)) + ['calib_model_score_portas', 'calib_model_score_bajas']
##################
### EVALUATION ###
##################
# Train
tr_preds_df_dx = model_dx.transform(trdf_dx).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_df_dx = calibmodel_dx[0].transform(tr_preds_df_dx)
trPredictionAndLabels_dx = tr_calib_preds_df_dx.select(['calib_model_score', 'label_bajas']).rdd.map(lambda r: (r['calib_model_score'], r['label_bajas']))
trmetrics_dx = BinaryClassificationMetrics(trPredictionAndLabels_dx)
tt_preds_df_dx = model_dx.transform(ttdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tt_calib_preds_df_dx = calibmodel_dx[0].transform(tt_preds_df_dx)
ttPredictionAndLabels_dx = tt_calib_preds_df_dx.select(['calib_model_score', 'label_bajas']).rdd.map(lambda r: (r['calib_model_score'], r['label_bajas']))
ttmetrics_dx = BinaryClassificationMetrics(ttPredictionAndLabels_dx)
print('Bajas:')
print(" Area under ROC(tr) = " + str(trmetrics_dx.areaUnderROC))
print(" Area under ROC(tt) = " + str(ttmetrics_dx.areaUnderROC))
print(" ")
# Test eval
tr_preds_df_port = model_port.transform(trdf_port).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tr_calib_preds_df_port = calibmodel_port[0].transform(tr_preds_df_port)
trPredictionAndLabels_port = tr_calib_preds_df_port.select(['calib_model_score', 'label_port']).rdd.map(lambda r: (r['calib_model_score'], r['label_port']))
trmetrics_port = BinaryClassificationMetrics(trPredictionAndLabels_port)
tt_preds_df_port = model_port.transform(ttdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
tt_calib_preds_df_port = calibmodel_port[0].transform(tt_preds_df_port)
ttPredictionAndLabels_port = tt_calib_preds_df_port.select(['calib_model_score', 'label_port']).rdd.map(lambda r: (r['calib_model_score'], r['label_port']))
ttmetrics_port = BinaryClassificationMetrics(ttPredictionAndLabels_port)
print('Portas:')
print(" Area under ROC(tr) = " + str(trmetrics_port.areaUnderROC))
print(" Area under ROC(tt) = " + str(ttmetrics_port.areaUnderROC))
print(" ")
tt_calib_preds_df_port_ = tt_calib_preds_df_port.withColumnRenamed('calib_model_score', 'calib_model_score_portas')
tt_calib_preds_df_dx_ = tt_calib_preds_df_dx.withColumnRenamed('calib_model_score', 'calib_model_score_bajas')
joined = tt_calib_preds_df_port_.select('num_cliente', 'label_port','calib_model_score_portas').join(tt_calib_preds_df_dx_.select('num_cliente', 'label_bajas', 'calib_model_score_bajas'), ['num_cliente'], 'inner')
from pyspark.sql.functions import greatest, least
joined = joined.withColumn('label', greatest('label_port','label_bajas'))
ensembled = joined.withColumn('max', greatest('calib_model_score_portas','calib_model_score_bajas')).withColumn('min', least('calib_model_score_portas','calib_model_score_bajas'))\
.withColumn('mean', (col('calib_model_score_portas')+col('calib_model_score_bajas'))/2)
pred_max = ensembled.select(['max', 'label']).rdd.map(lambda r: (r['max'], r['label']))
pred_max_metrics = BinaryClassificationMetrics(pred_max)
pred_min = ensembled.select(['min', 'label']).rdd.map(lambda r: (r['min'], r['label']))
pred_min_metrics = BinaryClassificationMetrics(pred_min)
pred_mean = ensembled.select(['mean', 'label']).rdd.map(lambda r: (r['mean'], r['label']))
pred_mean_metrics = BinaryClassificationMetrics(pred_mean)
print(" Area under ROC(tt) max = " + str(pred_max_metrics.areaUnderROC))
print(" Area under ROC(tt) min = " + str(pred_min_metrics.areaUnderROC))
print(" Area under ROC(tt) mean = " + str(pred_mean_metrics.areaUnderROC))
assembler_ensemb = VectorAssembler(inputCols=featCols_ensemb, outputCol="features")
classifier_ensemb = RandomForestClassifier(featuresCol="features", \
labelCol="label", \
maxDepth=20, \
maxBins=32, \
minInstancesPerNode=90, \
impurity="entropy", \
featureSubsetStrategy="sqrt", \
subsamplingRate=0.85, minInfoGain = 0.001, \
numTrees=800, \
seed=1234)
pipeline_ensemb = Pipeline(stages=[assembler_ensemb, classifier_ensemb])
ensembdf = ensembdf.withColumn('label', greatest('label_port','label_bajas'))
model_ensemb = model_dx.transform(ensembdf).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
model_ensemb_calib_bajas = calibmodel_dx[0].transform(model_ensemb).withColumnRenamed('calib_model_score','calib_model_score_bajas')
model_ensemb = model_port.transform(model_ensemb_calib_bajas.drop('features').drop(col('probability')).drop(col('prediction')).drop(col('rawPrediction')).drop(col('model_score'))).withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
model_ensemb_calib = calibmodel_port[0].transform(model_ensemb).withColumnRenamed('calib_model_score', 'calib_model_score_portas')
model_ensemb_fit = pipeline_ensemb.fit(model_ensemb_calib.drop('features').drop(col('probability')).drop(col('prediction')).drop(col('rawPrediction')).drop(col('model_score')))
ensemb_preds_tr = model_ensemb_fit.transform(model_ensemb_calib.drop('features').drop(col('probability')).drop(col('prediction')).drop(col('rawPrediction')).drop(col('model_score')))\
.withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
ensembler_PredAndLabs = ensemb_preds_tr.select(['model_score', 'label']).rdd.map(lambda r: (r['model_score'], r['label']))
trmetrics_ensembled = BinaryClassificationMetrics(ensembler_PredAndLabs)
print(" Area under ROC(tr-ensemb) = " + str(trmetrics_ensembled.areaUnderROC))
ensembler_df = tt_calib_preds_df_port_.select('num_cliente','calib_model_score_portas').join(tt_calib_preds_df_dx_.drop('features').drop(col('rawPrediction')).drop(col('probability')), ['num_cliente'], 'inner')
ensembler_df = ensembler_df.withColumn('label', greatest('label_port','label_bajas'))
ensemb_preds_tt = model_ensemb_fit.transform(ensembler_df.drop('features').drop(col('probability')).drop(col('prediction')).drop(col('rawPrediction')).drop(col('model_score')))\
.withColumn("model_score", getScore(col("probability")).cast(DoubleType()))
ensembler_PredAndLabs_tt = ensemb_preds_tt.select(['model_score', 'label']).rdd.map(lambda r: (r['model_score'], r['label']))
ttmetrics_ensembled = BinaryClassificationMetrics(ensembler_PredAndLabs_tt)
print(" Area under ROC(tt-ensemb) = " + str(ttmetrics_ensembled.areaUnderROC))
spark.stop() | [
"[email protected]"
] | |
fc28baeac41627dff3871aeae768c4e62954d2aa | b7b243902150a1aa5b774523ac01d7016de13477 | /cyc/DP/stock/123.py | 116e4f9d9415a2a4c8e82fe5322822c75151375a | [] | no_license | Veraph/LeetCode_Practice | 7e97a93464911a1f33b3133043d96c88cd54016a | eafadd711f6ec1b60d78442280f1c44b6296209d | refs/heads/master | 2023-03-23T11:49:19.046474 | 2021-03-18T02:22:50 | 2021-03-18T02:22:50 | 273,317,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | # 123.py -- Best time to buy and sell stock III
'''
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most two transactions.
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: prices = [3,3,5,0,0,3,1,4]
Output: 6
Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 = 3.
Example 2:
Input: prices = [1,2,3,4,5]
Output: 4
Explanation: Buy on day 1 (price = 1) and sell on day 5 (price = 5), profit = 5-1 = 4.
Note that you cannot buy on day 1, buy on day 2 and sell them later, as you are engaging multiple transactions at the same time. You must sell before buying again.
Example 3:
Input: prices = [7,6,4,3,1]
Output: 0
Explanation: In this case, no transaction is done, i.e. max profit = 0.
Example 4:
Input: prices = [1]
Output: 0
'''
def maxProfit(prices):
'''
four states.
the b1 and b2 mean the cost we currently have for buying when we buy the first and second stock
b2 will be negative when you have on-hand profit(this profit include the cost you pay for the second stock)
the s1 and s2 mean the profit we get after selling first and second stock
'''
b1 = b2 = float('inf')
s1 = s2 = 0
for price in prices:
if b1 > price:
b1 = price
if s1 < price - b1:
s1 = price - b1
if b2 > price - s1:
b2 = price - s1
if s2 < price - b2:
s2 = price - b2
return s2
maxProfit([3,3,5,0,0,3,1,4]) | [
"[email protected]"
] | |
982036613e2e749e78f5d113fca143718d25414f | 3a1fea0fdd27baa6b63941f71b29eb04061678c6 | /src/ch08/rtda/heap/Method.py | eaa6ddab3eefc513c2349d4e34ad7e703e56d71d | [] | no_license | sumerzhang/JVMByPython | 56a7a896e43b7a5020559c0740ebe61d608a9f2a | 1554cf62f47a2c6eb10fe09c7216518416bb65bc | refs/heads/master | 2022-12-02T17:21:11.020486 | 2020-08-18T06:57:10 | 2020-08-18T06:57:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,295 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: Method.py
@time: 2019/9/16 16:55
@desc: 方法信息
"""
from ch08.classfile.MemberInfo import MemberInfo
from ch08.rtda.heap import AccessFlags
from ch08.rtda.heap.ClassMember import ClassMember
from ch08.rtda.heap.MethodDescriptorParser import MethodDescriptorParser
class Method(ClassMember):
def __init__(self):
super(Method, self).__init__()
# 操作数栈
self.max_stack = 0
# 局部变量表大小
self.max_locals = 0
# 存放方法字节码
self.code = []
self.arg_slot_count = 0
# 根据class文件中的方法信息创建Method表
@staticmethod
def new_methods(clazz, cfMethods):
methods = []
for cfMethod in cfMethods:
method = Method()
method.set_class(clazz)
method.copy_member_info(cfMethod)
method.copy_attributes(cfMethod)
method.calc_arg_slot_count()
methods.append(method)
return methods
# 从method_info结构中提取max_stack、max_locals、code信息
def copy_attributes(self, cfMethod: MemberInfo):
code_attr = cfMethod.code_attribute
if code_attr is not None:
self.max_stack = code_attr.max_stack
self.max_locals = code_attr.max_locals
self.code = code_attr.code
# 计算参数在局部变量表中占用多少位置
def calc_arg_slot_count(self):
parsed_descriptor = MethodDescriptorParser.parse_method_descriptor(self.descriptor)
for _ in parsed_descriptor.parameter_types:
self.arg_slot_count += 1
if not self.is_static():
self.arg_slot_count += 1
def is_synchronized(self):
return 0 != self.access_flags & AccessFlags.ACC_SYNCHRONIZED
def is_bridge(self):
return 0 != self.access_flags & AccessFlags.ACC_BRIDGE
def is_varargs(self):
return 0 != self.access_flags & AccessFlags.ACC_VARARGS
def is_native(self):
return 0 != self.access_flags & AccessFlags.ACC_NATIVE
def is_abstract(self):
return 0 != self.access_flags & AccessFlags.ACC_ABSTRACT
def is_strict(self):
return 0 != self.access_flags & AccessFlags.ACC_STRICT
| [
"[email protected]"
] | |
168f0c35ff34bedb374f39dccf96153f2d189166 | fef8f43025cff430d9aea080885173d9c22b3cb6 | /etalia/library/migrations/0011_auto_20170616_0411.py | 57504ddd33b4be67c994de2dbe976ccc55c6ca32 | [] | no_license | GemmaAA1/etalia-open | 30a083141330e227ac1de9855894bfb6e476e3cc | 260ce54d2da53c943d8b82fa9d40bb0c0df918a6 | refs/heads/master | 2023-03-28T03:33:13.771987 | 2017-10-30T00:55:27 | 2017-10-30T00:55:27 | 351,120,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0010_journal_is_in_fixture'),
]
operations = [
migrations.AlterField(
model_name='paper',
name='date_fs',
field=models.DateField(db_index=True, null=True, blank=True),
),
]
| [
"[email protected]"
] | |
c22f3e4a7b31155d6afa2f033d9ea480cfd488d3 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /hjZTbJNzKiSxTtbik_22.py | 5f6c69157466d046cc2ece0ea8c5c597bfbd928b | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py |
def sort_by_string(lst, txt):
sorts = list(txt)
letters = []
for word in lst:
letters.append(word[0])
for char in sorts:
if char not in letters:
sorts.remove(char)
newlst = []
for char in sorts:
for word in lst:
if word[0] == char:
newlst.append(word)
return newlst
| [
"[email protected]"
] | |
156756b04cbdf6811f0ff5436305d7c09339f87d | e70cb371f8642ac597f3a2266da3be205d971af5 | /X0406.py | ce95ba2dca74845f1048bbcc82f57b8c5550fd4e | [] | no_license | bgnori/X0406 | 0b5a2545832c6283c07cd065a21697cf9f52042a | ed0f7ee1f8112043a246a64c99bff8a427541b03 | refs/heads/master | 2021-01-20T05:07:38.994728 | 2015-10-06T12:28:37 | 2015-10-06T12:28:37 | 40,598,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | #!/bin/python
#-*- coding=utf-8 -*-
import re
import json
DEBUG = False
x = re.compile("(?P<IsValuationAccountCode>\()?(?P<AccountCode>\d\d\d\d)\)?,(?P<rest>.+)")
start_end = re.compile("\[(?P<start>\d\d\d\d)-(?P<end>\d\d\d\d)\]")
class IDNode(object):
def __init__(self, code, title, isvaluation, start, end, note):
self.children = []
self.code = code
self.title = title
self.isvaluation = isvaluation
self.start = start
self.end = end
self.note = note
def add(self, node):
for c in self.children:
if c.start <= node.code and node.code <= c.end:
c.add(node)
return
self.children.append(node)
def visit(self, f, n=None):
if n is None:
n = 0
f(n, self)
for c in self.children:
c.visit(f, n+1)
def findByCode(self, code):
if self.code == code:
return self
for c in self.children:
if c.code == code:
return c
if c.start <= code and code <= c.end:
return c.findByCode(code)
return None
def findByTitle(self, title):
if self.title == title:
return self
for c in self.children:
found = c.findByTitle(title)
if found is not None:
return found
return None
def load(f):
tree = IDNode(code=0, title="勘定科目", isvaluation=False, start=1, end=9999, note=None)
for line in f:
m = x.match(line)
if m:
d = m.groupdict()
assert(d['AccountCode'] is not None)
start = None
end = None
isvaluation = d['IsValuationAccountCode'] is not None
code = int(d['AccountCode'])
note = None
for i, part in enumerate(d["rest"].split(",")):
if i == 0:
title = part
else:
m = start_end.match(part)
if m is not None:
d = m.groupdict()
start = int(d["start"])
end = int(d["end"])
else:
note = part
if DEBUG:
print code, start, end
if start is None:
m = code
r = 1000
while r > 0:
n, m = divmod(m, r)
if DEBUG:
print n, m
if n == 0:
start = code + 1
end = code + r*10 -1
break
r = r / 10
if DEBUG:
print code, start, end, "default"
tree.add(IDNode(code, title, isvaluation, start, end, note))
return tree
if __name__ == "__main__":
import sys
tree = load(sys.stdin.readlines())
def foo(n, node):
print ' '*n, node.code, node.title, node.isvaluation, node.note
tree.visit(foo)
| [
"[email protected]"
] | |
f7ee387f7c79dc4fbb42c1d6b123cb829d3698e5 | 5509d3b5bbcc393684f7d2fc7fc11bb12ed1911a | /env/lib/python2.7/site-packages/pyramid_debugtoolbar-2.4.2-py2.7.egg/pyramid_debugtoolbar/panels/traceback.py | d80e51455eb29e63e6e7c926c6b4ed65eda11e19 | [] | no_license | jundong/CRManager | 99fd6c0eda084354d9237e11d07ef82124c22e1e | 4306bf4d2b29b19d4b3092aab152192f7d623a19 | refs/heads/master | 2021-01-21T04:47:26.125045 | 2016-07-29T15:07:04 | 2016-07-29T15:07:04 | 50,995,792 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | import re
from pyramid_debugtoolbar.tbtools import Traceback
from pyramid_debugtoolbar.panels import DebugPanel
from pyramid_debugtoolbar.utils import escape
from pyramid_debugtoolbar.utils import STATIC_PATH
from pyramid_debugtoolbar.utils import ROOT_ROUTE_NAME
from pyramid_debugtoolbar.utils import EXC_ROUTE_NAME
_ = lambda x: x
class TracebackPanel(DebugPanel):
name = 'traceback'
template = 'pyramid_debugtoolbar.panels:templates/traceback.dbtmako'
title = _('Traceback')
nav_title = title
def __init__(self, request):
self.request = request
self.exc_history = request.exc_history
@property
def has_content(self):
if hasattr(self.request, 'pdbt_tb'):
return True
else:
return False
def process_response(self, response):
if self.has_content:
traceback = self.request.pdbt_tb
exc = escape(traceback.exception)
summary = Traceback.render_summary(traceback, include_title=False, request=self.request)
token = self.request.registry.pdtb_token
url = '' # self.request.route_url(EXC_ROUTE_NAME, _query=qs)
evalex = self.exc_history.eval_exc
self.data = {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'lodgeit_url': None,
'title': exc,
'exception': exc,
'exception_type': escape(traceback.exception_type),
'summary': summary,
'plaintext': traceback.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', traceback.plaintext),
'traceback_id': traceback.id,
'token': token,
'url': url,
}
def render_content(self, request):
return super(TracebackPanel, self).render_content(request)
def render_vars(self, request):
return {
'static_path': request.static_url(STATIC_PATH),
'root_path': request.route_url(ROOT_ROUTE_NAME)
}
| [
"[email protected]"
] | |
9431a9423d7fad2d5a4e7c1636dac7a36b374906 | 34530f74092ac04334d3d18879f3c59c3354f4f8 | /0x08-python-more_classes/7-rectangle.py | cb0c06f693a581ec33482c8da5d28feb78a75f5c | [] | no_license | MarySerna/holbertonschool-higher_level_programming | 9f37df91d7da703a31c461ca07703947ed090322 | f7ed79a660690d412b7a8298ac9c658962d07c7a | refs/heads/master | 2021-01-08T23:53:29.528920 | 2020-05-15T04:15:25 | 2020-05-15T04:15:25 | 242,180,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,343 | py | #!/usr/bin/python3
"""Module to define a rectangle
Args:
width (int): width of a rectangle
height (int): height of a rectangle
"""
class Rectangle:
"""Rectangle class
"""
number_of_instances = 0
print_symbol = '#'
def __init__(self, width=0, height=0):
"""Initializes Rectangle class
"""
self.width = width
self.height = height
Rectangle.number_of_instances += 1
"""Private instance attribute: width"""
@property
def width(self):
"""Width getter
"""
return self.__width
@width.setter
def width(self, value):
"""Width setter
"""
if not isinstance(value, int):
raise TypeError('width must be an integer')
if value < 0:
raise ValueError('width must be >= 0')
self.__width = value
"""Private instance attribute: height"""
@property
def height(self):
"""Height getter
"""
return self.__height
@height.setter
def height(self, value):
"""Height setter
"""
if not isinstance(value, int):
raise TypeError('height must be an integer')
if value < 0:
raise ValueError('height must be >= 0')
self.__height = value
def area(self):
"""Area of a rectangle
"""
return self.__width * self.__height
def perimeter(self):
"""Perimeter of a rectangle
"""
if self.width == 0 or self.height == 0:
return 0
return ((2 * self.width) + (2 * self.height))
def __str__(self):
"""Prints the rectangle with the character #
"""
rect = ""
if self.width == 0 or self.__height == 0:
return ""
for i in range(self.__height):
for j in range(self.__width):
rect += str(self.print_symbol)
if i < self.__height - 1:
rect += '\n'
return rect
def __repr__(self):
"""Prints representation of the rectangle
"""
return "Rectangle({}, {})".format(self.__width, self.__height)
def __del__(self):
"""Prints a message when an instance of Rectangle is deleted
"""
print('Bye rectangle...')
Rectangle.number_of_instances -= 1
| [
"[email protected]"
] | |
ab18146ae14eef84aa4d85f43c6db2d2694961c5 | 846a7668ac964632bdb6db639ab381be11c13b77 | /android/test/vts/runners/host/config_parser.py | ad1d845226dd3bcc151dc327374f772e5307f020 | [] | no_license | BPI-SINOVOIP/BPI-A64-Android8 | f2900965e96fd6f2a28ced68af668a858b15ebe1 | 744c72c133b9bf5d2e9efe0ab33e01e6e51d5743 | refs/heads/master | 2023-05-21T08:02:23.364495 | 2020-07-15T11:27:51 | 2020-07-15T11:27:51 | 143,945,191 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,589 | py | #
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import str
import copy
import signal
import sys
import traceback
from vts.runners.host import keys
from vts.runners.host import errors
from vts.runners.host import signals
from vts.runners.host import utils
_DEFAULT_CONFIG_TEMPLATE = {
"test_bed": {
"AndroidDevice": "*",
},
"log_path": "/tmp/logs",
"test_paths": ["./"],
"enable_web": False,
}
def GetDefaultConfig(test_name):
"""Returns a default config data structure (when no config file is given)."""
result = copy.deepcopy(_DEFAULT_CONFIG_TEMPLATE)
result[keys.ConfigKeys.KEY_TESTBED][
keys.ConfigKeys.KEY_TESTBED_NAME] = test_name
return result
def load_test_config_file(test_config_path,
tb_filters=None,
baseline_config=None):
"""Processes the test configuration file provided by user.
Loads the configuration file into a json object, unpacks each testbed
config into its own json object, and validate the configuration in the
process.
Args:
test_config_path: Path to the test configuration file.
tb_filters: A list of strings, each is a test bed name. If None, all
test beds are picked up. Otherwise only test bed names
specified will be picked up.
baseline_config: dict, the baseline config to use (used iff
test_config_path does not have device info).
Returns:
A list of test configuration json objects to be passed to TestRunner.
"""
try:
configs = utils.load_config(test_config_path)
if keys.ConfigKeys.KEY_TESTBED not in configs and baseline_config:
configs.update(baseline_config)
if tb_filters:
tbs = []
for tb in configs[keys.ConfigKeys.KEY_TESTBED]:
if tb[keys.ConfigKeys.KEY_TESTBED_NAME] in tb_filters:
tbs.append(tb)
if len(tbs) != len(tb_filters):
print("Expect to find %d test bed configs, found %d." %
(len(tb_filters), len(tbs)))
print("Check if you have the correct test bed names.")
return None
configs[keys.ConfigKeys.KEY_TESTBED] = tbs
_validate_test_config(configs)
_validate_testbed_configs(configs[keys.ConfigKeys.KEY_TESTBED])
k_log_path = keys.ConfigKeys.KEY_LOG_PATH
configs[k_log_path] = utils.abs_path(configs[k_log_path])
tps = configs[keys.ConfigKeys.KEY_TEST_PATHS]
except errors.USERError as e:
print("Something is wrong in the test configurations.")
print(str(e))
return None
except Exception as e:
print("Error loading test config {}".format(test_config_path))
print(traceback.format_exc())
return None
# Unpack testbeds into separate json objects.
beds = configs.pop(keys.ConfigKeys.KEY_TESTBED)
config_jsons = []
for original_bed_config in beds:
new_test_config = dict(configs)
new_test_config[keys.ConfigKeys.KEY_TESTBED] = original_bed_config
# Keys in each test bed config will be copied to a level up to be
# picked up for user_params. If the key already exists in the upper
# level, the local one defined in test bed config overwrites the
# general one.
new_test_config.update(original_bed_config)
config_jsons.append(new_test_config)
return config_jsons
def parse_test_list(test_list):
"""Parse user provided test list into internal format for test_runner.
Args:
test_list: A list of test classes/cases.
Returns:
A list of tuples, each has a test class name and a list of test case
names.
"""
result = []
for elem in test_list:
result.append(_parse_one_test_specifier(elem))
return result
def _validate_test_config(test_config):
"""Validates the raw configuration loaded from the config file.
Making sure all the required keys exist.
Args:
test_config: A dict that is the config to validate.
Raises:
errors.USERError is raised if any required key is missing from the
config.
"""
for k in keys.ConfigKeys.RESERVED_KEYS:
if k not in test_config:
raise errors.USERError(("Required key {} missing in test "
"config.").format(k))
def _parse_one_test_specifier(item):
"""Parse one test specifier from command line input.
This also verifies that the test class name and test case names follow
ACTS's naming conventions. A test class name has to end with "Test"; a test
case name has to start with "test".
Args:
item: A string that specifies a test class or test cases in one test
class to run.
Returns:
A tuple of a string and a list of strings. The string is the test class
name, the list of strings is a list of test case names. The list can be
None.
"""
tokens = item.split(':')
if len(tokens) > 2:
raise errors.USERError("Syntax error in test specifier %s" % item)
if len(tokens) == 1:
# This should be considered a test class name
test_cls_name = tokens[0]
_validate_test_class_name(test_cls_name)
return (test_cls_name, None)
elif len(tokens) == 2:
# This should be considered a test class name followed by
# a list of test case names.
test_cls_name, test_case_names = tokens
clean_names = []
_validate_test_class_name(test_cls_name)
for elem in test_case_names.split(','):
test_case_name = elem.strip()
if not test_case_name.startswith("test_"):
raise errors.USERError(
("Requested test case '%s' in test class "
"'%s' does not follow the test case "
"naming convention test_*.") % (test_case_name,
test_cls_name))
clean_names.append(test_case_name)
return (test_cls_name, clean_names)
def _parse_test_file(fpath):
"""Parses a test file that contains test specifiers.
Args:
fpath: A string that is the path to the test file to parse.
Returns:
A list of strings, each is a test specifier.
"""
try:
with open(fpath, 'r') as f:
tf = []
for line in f:
line = line.strip()
if not line:
continue
if len(tf) and (tf[-1].endswith(':') or tf[-1].endswith(',')):
tf[-1] += line
else:
tf.append(line)
return tf
except:
print("Error loading test file.")
raise
def _validate_test_class_name(test_cls_name):
"""Checks if a string follows the test class name convention.
Args:
test_cls_name: A string that should be a test class name.
Raises:
errors.USERError is raised if the input does not follow test class
naming convention.
"""
if not test_cls_name.endswith("Test"):
raise errors.USERError(
("Requested test class '%s' does not follow the test "
"class naming convention *Test.") % test_cls_name)
def _validate_testbed_configs(testbed_configs):
"""Validates the testbed configurations.
Args:
testbed_configs: A list of testbed configuration json objects.
Raises:
If any part of the configuration is invalid, errors.USERError is raised.
"""
seen_names = set()
# Cross checks testbed configs for resource conflicts.
for config in testbed_configs:
# Check for conflicts between multiple concurrent testbed configs.
# No need to call it if there's only one testbed config.
name = config[keys.ConfigKeys.KEY_TESTBED_NAME]
_validate_testbed_name(name)
# Test bed names should be unique.
if name in seen_names:
raise errors.USERError("Duplicate testbed name {} found.".format(
name))
seen_names.add(name)
def _validate_testbed_name(name):
"""Validates the name of a test bed.
Since test bed names are used as part of the test run id, it needs to meet
certain requirements.
Args:
name: The test bed's name specified in config file.
Raises:
If the name does not meet any criteria, errors.USERError is raised.
"""
if not name:
raise errors.USERError("Test bed names can't be empty.")
if not isinstance(name, str) and not isinstance(name, basestring):
raise errors.USERError("Test bed names have to be string. Found: %s" %
type(name))
for l in name:
if l not in utils.valid_filename_chars:
raise errors.USERError(
"Char '%s' is not allowed in test bed names." % l)
| [
"[email protected]"
] | |
cb5b7eebb2a8dfadaccca19077b3b99065b2e65a | c823e437ffd46aa3b1465819686ee50fd1932214 | /src/transformers/models/blip/modeling_blip.py | f00c9f9cabbbc915d057c3bd5c21749f1c19c197 | [
"Apache-2.0"
] | permissive | nateraw/transformers | f03258d62c4773732514e443d98f1684d3467bfd | 7fd902d3351b81775112cd6b526bc32cf9ba856d | refs/heads/main | 2023-03-19T00:31:55.123718 | 2023-01-20T22:16:42 | 2023-01-20T22:16:42 | 564,090,117 | 5 | 0 | Apache-2.0 | 2022-11-10T01:00:04 | 2022-11-10T01:00:03 | null | UTF-8 | Python | false | false | 61,768 | py | # coding=utf-8
# Copyright 2022 The Salesforce Team Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BLIP model."""
from dataclasses import dataclass
from typing import Any, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn.functional import normalize
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from .configuration_blip import BlipConfig, BlipTextConfig, BlipVisionConfig
from .modeling_blip_text import BlipTextLMHeadModel, BlipTextModel
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "Salesforce/blip-vqa-base"
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [
"Salesforce/blip-vqa-base",
"Salesforce/blip-vqa-capfit-large",
"Salesforce/blip-image-captioning-base",
"Salesforce/blip-image-captioning-large",
"Salesforce/blip-itm-base-coco",
"Salesforce/blip-itm-large-coco",
"Salesforce/blip-itm-base-flikr",
"Salesforce/blip-itm-large-flikr",
# See all BLIP models at https://huggingface.co/models?filter=blip
]
# Copied from transformers.models.clip.modeling_clip.contrastive_loss
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->blip
def blip_loss(similarity: torch.Tensor) -> torch.Tensor:
caption_loss = contrastive_loss(similarity)
image_loss = contrastive_loss(similarity.t())
return (caption_loss + image_loss) / 2.0
@dataclass
class BlipForConditionalGenerationModelOutput(ModelOutput):
"""
Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
last hidden states. This class also adds the loss term from the text decoder.
Args:
loss (`torch.FloatTensor`, *optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Languge modeling loss from the text decoder.
decoder_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
Prediction scores of the language modeling head of the text decoder model.
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*):
The image embeddings obtained after applying the Vision Transformer model to the input image.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[Tuple[torch.FloatTensor]] = None
decoder_logits: Optional[Tuple[torch.FloatTensor]] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BlipTextVisionModelOutput(ModelOutput):
"""
Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
last hidden states. This class also adds the loss term from the text decoder.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Languge modeling loss from the text decoder.
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BlipImageTextMatchingModelOutput(ModelOutput):
"""
Adapted from the base class for vision model's outputs that also contains image embeddings of the pooling of the
last hidden states. This class also adds the loss term from the text decoder as well as the image-text similarity
scores.
Args:
itm_score (`torch.FloatTensor`):
The image-text similarity scores.
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Languge modeling loss from the text decoder.
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
vision_pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*):
Last layer hidden-state of the vision of the vision-only branch of the model.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
question_embeds (`torch.FloatTensor`):
The question embeddings obtained by the text projection layer.
"""
itm_score: Optional[torch.FloatTensor] = None
loss: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
vision_pooler_output: Optional[torch.FloatTensor] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
question_embeds: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BlipOutput(ModelOutput):
"""
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`BlipTextModel`].
image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
The image embeddings obtained by applying the projection layer to the pooled output of [`BlipVisionModel`].
text_model_output(`BaseModelOutputWithPooling`):
The output of the [`BlipTextModel`].
vision_model_output(`BaseModelOutputWithPooling`):
The output of the [`BlipVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: torch.FloatTensor = None
logits_per_text: torch.FloatTensor = None
text_embeds: torch.FloatTensor = None
image_embeds: torch.FloatTensor = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPooling = None
def to_tuple(self) -> Tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
class BlipVisionEmbeddings(nn.Module):
def __init__(self, config: BlipVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(
torch.randn(1, 1, self.embed_dim),
)
self.patch_embedding = nn.Conv2d(
in_channels=3, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Parameter(torch.randn(1, self.num_positions, self.embed_dim))
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1).to(target_dtype)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
embeddings = embeddings + self.position_embedding[:, : embeddings.size(1), :].to(target_dtype)
return embeddings
# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Blip
class BlipTextEmbeddings(nn.Module):
def __init__(self, config: BlipTextConfig):
super().__init__()
embed_dim = config.hidden_size
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if inputs_embeds is None:
inputs_embeds = self.token_embedding(input_ids)
position_embeddings = self.position_embedding(position_ids)
embeddings = inputs_embeds + position_embeddings
return embeddings
class BlipAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = nn.Dropout(config.attention_dropout)
self.qkv = nn.Linear(self.embed_dim, 3 * self.embed_dim)
self.projection = nn.Linear(self.embed_dim, self.embed_dim)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, embed_dim = hidden_states.size()
mixed_qkv = self.qkv(hidden_states)
mixed_qkv = (
self.qkv(hidden_states)
.reshape(bsz, tgt_len, 3, self.num_heads, embed_dim // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
query_states, key_states, value_states = (
mixed_qkv[0],
mixed_qkv[1],
mixed_qkv[2],
)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2))
attention_scores = attention_scores * self.scale
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_states).permute(0, 2, 1, 3)
new_context_layer_shape = context_layer.size()[:-2] + (self.embed_dim,)
context_layer = context_layer.reshape(new_context_layer_shape)
output = self.projection(context_layer)
outputs = (output, attention_probs) if output_attentions else (output, None)
return outputs
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Blip
class BlipMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
class BlipEncoderLayer(nn.Module):
def __init__(self, config: BlipConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = BlipAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim)
self.mlp = BlipMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> Tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
head_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
class BlipPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BlipConfig
base_model_prefix = "blip"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
factor = self.config.initializer_range
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Embedding) or isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=factor)
if hasattr(module, "bias") and module.bias is not None:
module.bias.data.zero_()
if isinstance(module, BlipVisionEmbeddings):
if hasattr(self.config, "vision_config"):
factor = self.config.vision_config.initializer_range
nn.init.trunc_normal_(
module.position_embedding,
mean=0.0,
std=factor,
)
nn.init.trunc_normal_(
module.class_embedding,
mean=0.0,
std=factor,
)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BlipEncoder):
module.gradient_checkpointing = value
BLIP_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BlipConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BLIP_TEXT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`BlipProcessor`]. See [`BlipProcessor.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
BLIP_VISION_INPUTS_DOCSTRING = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
BLIP_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using [`BlipProcessor`]. See [`BlipProcessor.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`BlipImageProcessor`]. See [`BlipImageProcessor.__call__`] for details.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
class BlipEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`BlipEncoderLayer`].
Args:
config (`BlipConfig`):
The corresponding vision configuration for the `BlipEncoder`.
"""
def __init__(self, config: BlipConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([BlipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutput]:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class BlipVisionModel(BlipPreTrainedModel):
main_input_name = "pixel_values"
config_class = BlipVisionConfig
def __init__(self, config: BlipVisionConfig):
super().__init__(config)
self.config = config
embed_dim = config.hidden_size
self.embeddings = BlipVisionEmbeddings(config)
self.encoder = BlipEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim)
self.post_init()
@add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=BlipVisionConfig)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BaseModelOutputWithPooling]:
r"""
Returns:
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.post_layernorm(last_hidden_state)
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def get_input_embeddings(self):
return self.embeddings
@add_start_docstrings(BLIP_START_DOCSTRING)
class BlipModel(BlipPreTrainedModel):
config_class = BlipConfig
def __init__(self, config: BlipConfig):
super().__init__(config)
if not isinstance(config.text_config, BlipTextConfig):
raise ValueError(
"config.text_config is expected to be of type BlipTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, BlipVisionConfig):
raise ValueError(
"config.vision_config is expected to be of type BlipVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.vision_embed_dim = vision_config.hidden_size
self.text_model = BlipTextModel(text_config)
self.vision_model = BlipVisionModel(vision_config)
self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BLIP_TEXT_INPUTS_DOCSTRING)
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`BlipTextModel`].
Examples:
```python
>>> from transformers import BlipProcessor, BlipModel
>>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
return_dict=return_dict,
)
pooled_output = text_outputs[1]
text_features = self.text_projection(pooled_output)
return text_features
@add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
def get_image_features(
self,
pixel_values: Optional[torch.FloatTensor] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`BlipVisionModel`].
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipModel
>>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
return_dict=return_dict,
)
pooled_output = vision_outputs[1] # pooled_output
image_features = self.visual_projection(pooled_output)
return image_features
@add_start_docstrings_to_model_forward(BLIP_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BlipOutput, config_class=BlipConfig)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BlipOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipModel
>>> model = BlipModel.from_pretrained("Salesforce/blip-image-captioning-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use BLIP model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[1]
image_embeds = self.visual_projection(image_embeds)
text_embeds = text_outputs[1]
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = blip_loss(logits_per_text)
if not return_dict:
output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
return ((loss,) + output) if loss is not None else output
return BlipOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
@add_start_docstrings(
"""
BLIP Model for image captioning. The model consists of a vision encoder and a text decoder. One can optionally pass
`input_ids` to the model, which serve as a text prompt, to make the text decoder continue the prompt. Otherwise,
the decoder starts generating text from the [BOS] (beginning-of-sequence) token. will start generating the caption
from the text input. If no text input is provided, the decoder will start with the [BOS] token only.
""",
BLIP_START_DOCSTRING,
)
class BlipForConditionalGeneration(BlipPreTrainedModel):
config_class = BlipConfig
_keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
main_input_name = "pixel_values"
def __init__(self, config: BlipConfig):
super().__init__(config)
self.vision_model = BlipVisionModel(config.vision_config)
self.text_decoder = BlipTextLMHeadModel(config.text_config)
self.decoder_input_ids = config.text_config.bos_token_id
self.decoder_pad_token_id = config.text_config.pad_token_id
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BlipForConditionalGenerationModelOutput, config_class=BlipVisionConfig)
def forward(
self,
pixel_values: torch.FloatTensor,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BlipForConditionalGenerationModelOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipForConditionalGeneration
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
>>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
batch_size = pixel_values.shape[0]
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[0]
if input_ids is None:
input_ids = torch.LongTensor([[self.decoder_input_ids] * batch_size]).to(image_embeds.device)
if labels is None:
labels = input_ids.masked_fill(input_ids == self.decoder_pad_token_id, -100)
outputs = self.text_decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds,
labels=labels,
return_dict=return_dict,
reduction="mean",
)
if not return_dict:
outputs = (outputs[0], outputs[1], image_embeds, vision_outputs[0]) + vision_outputs[2:]
return tuple(output for output in outputs if output is not None)
return BlipForConditionalGenerationModelOutput(
loss=outputs.loss,
decoder_logits=outputs.logits,
image_embeds=image_embeds,
last_hidden_state=vision_outputs.last_hidden_state,
hidden_states=vision_outputs.hidden_states,
attentions=vision_outputs.attentions,
)
@torch.no_grad()
def generate(
self,
pixel_values: torch.FloatTensor,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
**generate_kwargs
) -> torch.LongTensor:
r"""
Overrides *generate* function to be able to use the model as a conditional generator
Parameters:
pixel_values (*torch.FloatTensor* of shape *(batch_size, image_width, image_height)*:
Input image to be processed
input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
The sequence used as a prompt for the generation.
attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipForConditionalGeneration
>>> model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model.generate(**inputs)
>>> print(processor.decode(outputs[0], skip_special_tokens=True))
two cats are laying on a couch
```
"""
batch_size = pixel_values.shape[0]
vision_outputs = self.vision_model(
pixel_values=pixel_values,
)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
if isinstance(input_ids, list):
input_ids = torch.LongTensor(input_ids)
elif input_ids is None:
input_ids = (
torch.LongTensor([[self.decoder_input_ids, self.config.text_config.eos_token_id]])
.repeat(batch_size, 1)
.to(image_embeds.device)
)
input_ids[:, 0] = self.config.text_config.bos_token_id
attention_mask = attention_mask[:, :-1] if attention_mask is not None else None
outputs = self.text_decoder.generate(
input_ids=input_ids[:, :-1],
eos_token_id=self.config.text_config.sep_token_id,
pad_token_id=self.config.text_config.pad_token_id,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
**generate_kwargs,
)
return outputs
@add_start_docstrings(
"""
BLIP Model for visual question answering. The model consists of a vision encoder, a text encoder as well as a text
decoder. The vision encoder will encode the input image, the text encoder will encode the input question together
with the encoding of the image, and the text decoder will output the answer to the question.
""",
BLIP_START_DOCSTRING,
)
class BlipForQuestionAnswering(BlipPreTrainedModel):
config_class = BlipConfig
_keys_to_ignore_on_load_missing = [r"text_decoder.cls.predictions.decoder.bias"]
def __init__(self, config: BlipConfig):
super().__init__(config)
self.vision_model = BlipVisionModel(config.vision_config)
self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
self.text_decoder = BlipTextLMHeadModel(config.text_config)
self.decoder_pad_token_id = config.text_config.pad_token_id
self.decoder_start_token_id = config.text_config.bos_token_id
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
# Adapted from transformers.models.t5.modeling_t5.T5PreTrainedModel._shift_right
def _shift_right(self, input_ids):
pad_token_id = self.decoder_pad_token_id
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = self.decoder_start_token_id
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
@add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
def forward(
self,
input_ids: torch.LongTensor,
pixel_values: torch.FloatTensor,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BlipTextVisionModelOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipForQuestionAnswering
>>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "How many cats are in the picture?"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
if labels is None and decoder_input_ids is None:
raise ValueError(
"Either `decoder_input_ids` or `labels` should be passed when calling `forward` with"
" `BlipForQuestionAnswering`. if you are training the model make sure that `labels` is passed, if you"
" are using the model for inference make sure that `decoder_input_ids` is passed."
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
question_embeds = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
return_dict=return_dict,
)
question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
if labels is not None and decoder_input_ids is None:
# get decoder inputs from shifting lm labels to the right - this is used in training mode
decoder_input_ids = self._shift_right(labels)
# replace possible -100 values in labels by `pad_token_id`
labels = labels.masked_fill(labels == self.decoder_pad_token_id, -100)
answer_output = self.text_decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=question_embeds,
encoder_attention_mask=attention_mask,
labels=labels,
return_dict=return_dict,
reduction="mean",
)
if labels is not None:
decoder_loss = answer_output.loss.mean() if return_dict else answer_output[0].mean()
else:
decoder_loss = None
if not return_dict:
outputs = (decoder_loss, image_embeds, vision_outputs[0]) + vision_outputs[2:]
return tuple(output for output in outputs if output is not None)
return BlipTextVisionModelOutput(
loss=decoder_loss,
image_embeds=image_embeds,
last_hidden_state=vision_outputs.last_hidden_state,
hidden_states=vision_outputs.hidden_states,
attentions=vision_outputs.attentions,
)
@torch.no_grad()
def generate(
self,
input_ids: torch.LongTensor,
pixel_values: torch.FloatTensor,
attention_mask: Optional[torch.LongTensor] = None,
**generate_kwargs
) -> torch.LongTensor:
r"""
Overrides *generate* function to be able to use the model as a conditional generator
Parameters:
input_ids (*torch.LongTensor* of shape *(batch_size, sequence_length)*):
The sequence used as a prompt for the generation.
pixel_values (*torch.FloatTensor* of shape *(batch_size, image_width, image_height)*:
Input image to be processed
attention_mask (*torch.LongTensor* of shape *(batch_size, sequence_length)*, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`. `1` for
tokens that are NOT MASKED, `0` for MASKED tokens.
**generate_kwargs:
Additional arguments passed to the *generate* function of the decoder
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipForQuestionAnswering
>>> model = BlipForQuestionAnswering.from_pretrained("Salesforce/blip-vqa-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "How many cats are in the picture?"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model.generate(**inputs)
>>> print(processor.decode(outputs[0], skip_special_tokens=True))
2
```
"""
vision_outputs = self.vision_model(
pixel_values=pixel_values,
)
image_embeds = vision_outputs[0]
image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(image_embeds.device)
if isinstance(input_ids, list):
input_ids = torch.LongTensor(input_ids)
question_outputs = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_attention_mask,
return_dict=False,
)
question_embeds = question_outputs[0]
question_attention_mask = torch.ones(question_embeds.size()[:-1], dtype=torch.long).to(question_embeds.device)
bos_ids = torch.full(
(question_embeds.size(0), 1), fill_value=self.decoder_start_token_id, device=question_embeds.device
)
outputs = self.text_decoder.generate(
input_ids=bos_ids,
eos_token_id=self.config.text_config.sep_token_id,
pad_token_id=self.config.text_config.pad_token_id,
encoder_hidden_states=question_embeds,
encoder_attention_mask=question_attention_mask,
**generate_kwargs,
)
return outputs
@add_start_docstrings(
"""
BLIP Model with a vision and text projector, and a classification head on top. The model is used in the context of
image-text retrieval. Given an image and a text, the model returns the probability of the text being relevant to
the image.
""",
BLIP_START_DOCSTRING,
)
class BlipForImageTextRetrieval(BlipPreTrainedModel):
config_class = BlipConfig
def __init__(self, config: BlipConfig):
super().__init__(config)
self.vision_model = BlipVisionModel(config.vision_config)
self.text_encoder = BlipTextModel(config.text_config, add_pooling_layer=False)
# vision projection layer
self.vision_proj = nn.Linear(config.vision_config.hidden_size, config.image_text_hidden_size)
# text projection layer
self.text_proj = nn.Linear(config.text_config.hidden_size, config.image_text_hidden_size)
# image text matching head
self.itm_head = nn.Linear(config.text_config.hidden_size, 2)
self.decoder_pad_token_id = (
config.text_config.pad_token_id
if not hasattr(config, "decoder_pad_token_id")
else config.decoder_pad_token_id
)
self.decoder_start_token_id = (
config.text_config.bos_token_id
if not hasattr(config, "decoder_start_token_id")
else config.decoder_start_token_id
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.patch_embedding
@add_start_docstrings_to_model_forward(BLIP_VISION_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=BlipTextVisionModelOutput, config_class=BlipVisionConfig)
def forward(
self,
input_ids: torch.LongTensor,
pixel_values: torch.FloatTensor,
use_itm_head: Optional[bool] = True,
attention_mask: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, BlipTextVisionModelOutput]:
r"""
Returns:
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import BlipProcessor, BlipForImageTextRetrieval
>>> model = BlipForImageTextRetrieval.from_pretrained("Salesforce/blip-itm-base")
>>> processor = BlipProcessor.from_pretrained("Salesforce/blip-itm-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "an image of a cat"
>>> inputs = processor(images=image, text=text, return_tensors="pt")
>>> outputs = model(**inputs)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_embeds = vision_outputs[0]
image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long)
if use_itm_head:
question_embeds = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=image_embeds,
encoder_attention_mask=image_atts,
return_dict=return_dict,
)
question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
output = self.itm_head(question_embeds[:, 0, :])
else:
question_embeds = self.text_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
return_dict=return_dict,
)
question_embeds = question_embeds[0] if not return_dict else question_embeds.last_hidden_state
image_feat = normalize(self.vision_proj(image_embeds[:, 0, :]), dim=-1)
text_feat = normalize(self.text_proj(question_embeds[:, 0, :]), dim=-1)
output = image_feat @ text_feat.t()
if not return_dict:
outputs = (output, vision_outputs[0]) + vision_outputs[2:] + (question_embeds,)
return tuple(output for output in outputs if output is not None)
return BlipImageTextMatchingModelOutput(
itm_score=output,
last_hidden_state=vision_outputs.last_hidden_state,
hidden_states=vision_outputs.hidden_states,
attentions=vision_outputs.attentions,
question_embeds=question_embeds,
)
| [
"[email protected]"
] | |
52d9e68a9e9779fd139fc1b6351e313f9867021a | 92436a50cc26c8c8a216ba6d4a62e36069614234 | /classy_vision/hooks/model_complexity_hook.py | 2d950e229a73ec3d167f234b2c1f3d1cac33c6ba | [
"MIT"
] | permissive | hahaxun/ClassyVision | 9341f4e6849c858094592052f3df111c13d1a91d | b3f714ef94275b3e9753ab3f3c8256cb852b96fc | refs/heads/master | 2021-08-17T07:42:34.402613 | 2021-03-08T08:50:01 | 2021-03-08T08:50:01 | 245,940,574 | 1 | 0 | MIT | 2021-03-08T08:50:01 | 2020-03-09T04:02:59 | Python | UTF-8 | Python | false | false | 3,368 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from classy_vision.generic.profiler import (
ClassyProfilerNotImplementedError,
compute_activations,
compute_flops,
count_params,
)
from classy_vision.hooks import register_hook
from classy_vision.hooks.classy_hook import ClassyHook
@register_hook("model_complexity")
class ModelComplexityHook(ClassyHook):
"""
Logs the number of paramaters and forward pass FLOPs and activations of the model.
"""
on_phase_start = ClassyHook._noop
on_step = ClassyHook._noop
on_phase_end = ClassyHook._noop
on_end = ClassyHook._noop
def __init__(self) -> None:
super().__init__()
self.num_flops = None
self.num_activations = None
self.num_parameters = None
def on_start(self, task) -> None:
"""Measure number of parameters, FLOPs and activations."""
self.num_flops = 0
self.num_activations = 0
self.num_parameters = 0
try:
self.num_parameters = count_params(task.base_model)
logging.info("Number of parameters in model: %d" % self.num_parameters)
try:
self.num_flops = compute_flops(
task.base_model,
input_shape=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
)
if self.num_flops is None:
logging.info("FLOPs for forward pass: skipped.")
self.num_flops = 0
else:
logging.info(
"FLOPs for forward pass: %d MFLOPs"
% (float(self.num_flops) / 1e6)
)
except ClassyProfilerNotImplementedError as e:
logging.warning(f"Could not compute FLOPs for model forward pass: {e}")
try:
self.num_activations = compute_activations(
task.base_model,
input_shape=task.base_model.input_shape,
input_key=task.base_model.input_key
if hasattr(task.base_model, "input_key")
else None,
)
logging.info(f"Number of activations in model: {self.num_activations}")
except ClassyProfilerNotImplementedError as e:
logging.warning(
f"Could not compute activations for model forward pass: {e}"
)
except Exception:
logging.info("Skipping complexity calculation: Unexpected error")
logging.debug("Error trace for complexity calculation:", exc_info=True)
def get_summary(self):
return {
"FLOPS(M)": float(self.num_flops) / 1e6
if self.num_flops is not None
else 0,
"num_activations(M)": float(self.num_activations) / 1e6
if self.num_activations is not None
else 0,
"num_parameters(M)": float(self.num_parameters) / 1e6
if self.num_parameters is not None
else 0,
}
| [
"[email protected]"
] | |
896cd37b75d6a90732e0f7cf6f2f1caaa6e5e557 | 2bc74414e71a280cc50085ec2e5a6499d22ae5e6 | /src/python/probdist/_DirichletDist.py | 2cb917cbd5d1ee752feb79300c7a8baecf4fe306 | [
"MIT"
] | permissive | plewis/phycas | 610c989d49dce741fc2d2ad048a9d7587eabeb74 | 9f5a4d9b2342dab907d14a46eb91f92ad80a5605 | refs/heads/master | 2020-12-25T16:48:31.870762 | 2017-07-15T14:07:37 | 2017-07-15T14:07:37 | 21,300,616 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 20,943 | py | from _PyDistributionBase import PyDistributionBase
from _ProbDistExt import *
class Dirichlet(DirichletDistBase, PyDistributionBase):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Represents the multivariate Dirichlet probability distribution.
*** Not finished documenting this class yet ***
"""
def __init__(self, c):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Specify the parameters of the Dirichlet object as a tuple. e.g.,
>>> from phycas.probdist import *
>>> d = Dirichlet((1,1,1,1))
>>> print d.getMean()
(0.25, 0.25, 0.25, 0.25)
"""
DirichletDistBase.__init__(self, c)
def clone(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Creates a copy of this Dirichlet distribution.
"""
return DirichletDistBase.clone(self)
def isDiscrete(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Always returns False because the Dirichlet distribution is continuous.
>>> from phycas.probdist import *
>>> d = Dirichlet((1,1,1,1))
>>> print d.isDiscrete()
False
"""
return DirichletDistBase.isDiscrete(self)
def getDistName(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the string 'Dirichlet'
>>> from phycas.probdist import *
>>> d = Dirichlet((1,1,1,1))
>>> print d.getDistName()
Dirichlet
"""
return DirichletDistBase.getDistName(self)
def __str__(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a string that could be used to initialize another Dirichlet
object identical to this one. e.g.,
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.__str__()
Dirichlet((1.00000, 2.00000, 3.00000, 4.00000))
"""
return DirichletDistBase.__str__(self)
def __repr__(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns a string that could be used to initialize another Dirichlet
object identical to this one. e.g.,
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.__repr__()
Dirichlet((1.00000, 2.00000, 3.00000, 4.00000))
>>> print d
Dirichlet((1.00000, 2.00000, 3.00000, 4.00000))
"""
return DirichletDistBase.__repr__(self)
def setLot(self, lot):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Substitutes a different random number generator to use when drawing
samples. e.g.,
>>> g = Lot()
>>> g.setSeed(1357)
>>> d = Dirichlet((1.0, 1.0, 1.0, 1.0))
>>> d.setLot(g)
>>> for x in d.sample():
... print "%.12f" % x
0.006552381150
0.421429842993
0.270456715211
0.301561060645
>>> for x in d.sample():
... print "%.12f" % x
0.602547509254
0.024508953948
0.328473170470
0.044470366328
>>> g.setSeed(1357)
>>> for x in d.sample():
... print "%.12f" % x
0.006552381150
0.421429842993
0.270456715211
0.301561060645
In this example, only one random number generator (g) was involved;
however, one could pass g to several different probability distribu-
tions, thus ensuring that the entire sequence of random numbers could
be recreated by keeping track of only one seed value. If setLot is not
used, each distribution object maintains its own random number
generator that is initialized using the system clock at the time the
object is created, making it difficult to replicate results.
"""
return DirichletDistBase.setLot(self, lot)
def setSeed(self, seed):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Initializes the random number generator of this distribution object
using the supplied seed. Note that if you have called setLot before
this point, calling setSeed is pointless because you have already
replaced the random number generator for which you are setting the
seed! If you have already called setLot, you probably want to call
the setSeed function of that Lot ojbect.
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> d.setSeed(135)
>>> for x in d.sample():
... print "%.12f" % x
0.000104630137
0.270690528796
0.037251633232
0.691953207834
>>> for x in d.sample():
... print "%.12f" % x
0.234069762243
0.170795104732
0.191374394925
0.403760738099
>>> d.setSeed(135)
>>> for x in d.sample():
... print "%.12f" % x
0.000104630137
0.270690528796
0.037251633232
0.691953207834
"""
return DirichletDistBase.setSeed(self, seed)
def resetLot(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Resets the random number generator to point to the local Lot object.
Because the local Lot object is used by default, this function need
only be called if setLot has previously been called to specify an
external random number generator.
"""
return DirichletDistBase.resetLot(self)
def getMean(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the mean of the distribution. This is the theoretical mean
(i.e., it will not change if sample is called to generate samples
from this distribution). Because this is a multivariate distribution,
the object returned is a tuple.
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.getMean()
(0.10000000000000001, 0.20000000000000001, 0.29999999999999999, 0.40000000000000002)
"""
return DirichletDistBase.getMean(self)
def getVar(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the variance of the distribution. This is the theoretical
variance (i.e., it will not change if sample is called to generate
samples from this distribution). Because this is a multivariate
distribution, the object returned is a tuple.
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.getVar()
(0.0081818181818181825, 0.014545454545454545, 0.019090909090909092, 0.02181818181818182)
"""
return DirichletDistBase.getVar(self)
def getStdDev(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the standard deviation of the distribution. This is the
theoretical standard deviation (i.e., it will not change if sample is
called to generate samples from this distribution). Because this is
a multivariate distribution, the object returned is a tuple.
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.getStdDev()
(0.090453403373329092, 0.12060453783110545, 0.13816985594155148, 0.14770978917519928)
"""
return DirichletDistBase.getStdDev(self)
def sample(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Draws a single sampled value from the Dirichlet distribution specified
by this Dirichlet object. Python list comprehensions can be used
to store many simulated samples for use in subsequent calculations.
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> d.setSeed(97531)
>>> for x in d.sample():
... print "%.12f" % x
0.120887631014
0.013728524332
0.512400278022
0.352983566632
"""
return DirichletDistBase.sample(self)
def approxCDF(self, x, n = 10000):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Approximates the cumulative distribution function evaluated at the
supplied point x. The precision of the approximation is controlled
by nsamples. The approximation is done using a brute force approach:
nsamples samples are drawn from this Dirichlet distribution, and the
proportion of those samples that are inside the region defined by x
is returned as the approximated CDF. The supplied point x should be
a tuple of length k, where k is one fewer than the number of
parameters of the Dirichlet distribution. If x has length greater
than k, the extra elements will be ignored. In the following example,
the result returned from approxCDF for a Dirichlet((1,1))
distribution is compared to the exact result for the equivalent
univariate Beta(1,1) distribution (the setSeed call is needed
to ensure that the approximated CDF will be the same every time this
example is run):
>>> from phycas.probdist import *
>>> d = Dirichlet((1,1))
>>> d.setSeed(1357)
>>> print d.approxCDF(d.getMean())
0.5059
>>> b = Beta(1,1)
>>> print b.getCDF(b.getMean())
0.5
"""
assert n > 0, 'n must be greater than zero in Dirichlet.approxCDF function'
nparams = DirichletDistBase.getNParams(self)
assert nparams == len(x), 'Vector supplied to approxCDF has length %d but length expected was %d' % (len(x), nparams)
return DirichletDistBase.approxCDF(self, x, n)
def getLnPDF(self, x):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Evaluates the probability density function at the supplied value x.
Returns the natural logarithm of the density at x. e.g.,
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.getLnPDF((0.4,0.3,0.2,0.1))
-1.01368307788
For example, the 4-parameter Dirichlet density is
Gamma(a+b+c+d) p^(a-1) q^(b-1) r^(c-1) (1-p-q-r)^(d-1)
------------------------------------------------------
Gamma(a) Gamma(b) Gamma(c) Gamma(d)
where a = 1, b = 2, c = 3, d = 4, p = 0.4, q = 0.3, r = 0.2 and
Gamma is the Gamma function, not the Gamma probability distribution.
Note that the argument x is a tuple, which in this example would be
x = (p, q, r, 1-p-q-r) = (0.4, 0.3, 0.2, 0.1)
The natural logarithm of the relative density is thus
(a-1) log(p) + (b-1) log(q) + (c-1) log(r) + (d-1) log(1-p-q-r)
- log(Gamma(a)) - log(Gamma(b)) - log(Gamma(c)) - log(Gamma(d))
+ log(Gamma(a+b+c+d))
For the example given, this equals
(1-1) log(0.4) + (2-1) log(0.3) + (3-1) log(0.2) + (4-1) log(0.1)
- log(Gamma(1)) - log(Gamma(2)) - log(Gamma(3)) - log(Gamma(4))
+ log(Gamma(1+2+3+4))
= 0.0 + log(0.3) + 2 log(0.2) + 3 log(0.1)
- 0.0 - 0.0 - log(2!) - log(3!)
+ log(9!)
= -1.01368307788
"""
nparams = DirichletDistBase.getNParams(self)
assert nparams == len(x), 'Vector supplied to getLnPDF has length %d but length expected was %d' % (len(x), nparams)
return DirichletDistBase.getLnPDF(self, x)
def getRelativeLnPDF(self, x):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Evaluates the relative probability density function at the supplied
value x. Returns the natural logarithm of the relative density at x.
Use this function if speed is important but normalization is not, say
in MCMC calculations. Use getLnPDF instead if you need to have a
correctly normalized density value (i.e. from a density function that
integrates to 1.0)
>>> from phycas.probdist import *
>>> d = Dirichlet((1,2,3,4))
>>> print d.getRelativeLnPDF((0.4,0.3,0.2,0.1))
-11.3306039082
For example, the 4-parameter Dirichlet density is
Gamma(a+b+c+d) p^(a-1) q^(b-1) r^(c-1) (1-p-q-r)^(d-1)
------------------------------------------------------
Gamma(a) Gamma(b) Gamma(c) Gamma(d)
where a = 1, b = 2, c = 3, d = 4, p = 0.4, q = 0.3, r = 0.2 and
Gamma is the Gamma function, not the Gamma probability distribution.
The relative density requires only the four terms containing p, q,
and r in the numerator, so the natural logarithm of the relative
density is
(a-1) log(p) + (b-1) log(q) + (c-1) log(r) + (d-1) log(1-p-q-r)
For the example given, this equals
(1-1) log(0.4) + (2-1) log(0.3) + (3-1) log(0.2) + (4-1) log(0.1)
= log(0.3) + 2 log(0.2) + 3 log(0.1)
= -11.3306039082
"""
nparams = DirichletDistBase.getNParams(self)
assert nparams == len(x), 'Vector supplied to getRelativeLnPDF has length %d but length expected was %d' % (len(x), nparams)
return DirichletDistBase.getRelativeLnPDF(self, x)
# Uncomment this version if numarray is re-introduced
#def setMeanAndVariance(self, mean, variance):
# #---+----|----+----|----+----|----+----|----+----|----+----|----+----|
# """
# Sets the (multivariate) mean and variance of this distribution.
# Note: mean and variance are numarray array objects rather than simple
# tuples. This was an exercise to see if numarray could be used for this
# purpose, but tuples would be better. Note that the variances are
# given as a one-dimensional array, leaving out the covariances (which
# are not needed to fully specify the Dirichlet distribution).
# For example:
#
# >>> from phycas.probdist import *
# >>> from numarray import array
# >>> d = Dirichlet((1,1,1))
# >>> m = array([1./9., 3./9., 5./9.])
# >>> print m
# [ 0.11111111 0.33333333 0.55555556]
# >>> v = array([8./810.,18./810.,20./810.])
# >>> print v
# [ 0.00987654 0.02222222 0.02469136]
# >>> d.setMeanAndVariance(m,v)
# >>> d.getMean()
# (0.1111111111111111, 0.33333333333333331, 0.55555555555555558)
# >>> d.getVar()
# (0.009876543209876543, 0.022222222222222223, 0.024691358024691357)
#
# """
# nparams = DirichletDistBase.getNParams(self)
# assert len(mean.shape) == 1, 'Mean vector supplied to Dirichlet.setMeanAndVariance should be a single-dimensional array'
# assert len(variance.shape) == 1, 'Variance vector supplied to Dirichlet.setMeanAndVariance should be a single-dimensional array'
# assert nparams == len(mean), 'Mean vector supplied to setMeanAndVariance should have %d elements, but %d were found' % (nparams, len(mean))
# assert nparams == len(variance), 'Variance vector supplied to setMeanAndVariance should have %d elements, but %d were found' % (nparams, len(variance))
# return DirichletDistBase.setMeanAndVariance(self, mean, variance)
# Comment out this version if numarray is re-introduced
def setMeanAndVariance(self, mean, variance):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Sets the (multivariate) mean and variance of this distribution.
Note that the variances are given as a one-dimensional tuple, leaving
out the covariances (which are not needed to fully specify the
Dirichlet distribution). For example:
>>> from phycas.probdist import *
>>> d = Dirichlet((1,1,1))
>>> m = (1./9., 3./9., 5./9.)
>>> print m
(0.1111111111111111, 0.33333333333333331, 0.55555555555555558)
>>> v = (8./810.,18./810.,20./810.)
>>> print v
(0.009876543209876543, 0.022222222222222223, 0.024691358024691357)
>>> d.setMeanAndVariance(m,v)
>>> d.getMean()
(0.1111111111111111, 0.33333333333333331, 0.55555555555555558)
>>> d.getVar()
(0.009876543209876543, 0.022222222222222223, 0.024691358024691357)
"""
nparams = DirichletDistBase.getNParams(self)
assert nparams == len(mean), 'Mean vector supplied to setMeanAndVariance should have %d elements, but %d were found' % (nparams, len(mean))
assert nparams == len(variance), 'Variance vector supplied to setMeanAndVariance should have %d elements, but %d were found' % (nparams, len(variance))
return DirichletDistBase.setMeanAndVariance(self, mean, variance)
# Uncomment this version if numarray is re-introduced
#def getVarCovarMatrix(self):
# #---+----|----+----|----+----|----+----|----+----|----+----|----+----|
# """
# Returns the variance-covariance matrix of this distribution. The
# matrix is returned in the form of a numarray object. Letting c be the
# sum of the n Dirichlet parameters, c_i be the ith. parameter, and
# denom be c*c*(c + 1), then
#
# Var_i = c_i*(c - c_i)/denom
# Cov_ij = -c_i*c_j/denom
#
# In this example,
#
# n = 3
# c_1 = 1
# c_2 = 1
# c_3 = 1
# c = 3
# denom = 3*3*4 = 36
# Var_i = 1*2/36 = 0.05555556
# Cov_ij = -1*1/36 = -0.02777778
#
# >>> from phycas.probdist import *
# >>> from numpy.numarray import array
# >>> d = Dirichlet((1,1,1))
# >>> print d.getVarCovarMatrix()
# [[ 0.05555556 -0.02777778 -0.02777778]
# [-0.02777778 0.05555556 -0.02777778]
# [-0.02777778 -0.02777778 0.05555556]]
#
# """
# return DirichletDistBase.getVarCovarMatrix(self)
# Comment out this version if numarray is re-introduced
def printSquareMatrix(self, matrix):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Utility function used to interpret a vector as a square matrix and
print it out.
"""
total_len = len(matrix)
import math
row_len = int(math.sqrt(total_len))
assert row_len*row_len == total_len, 'Attempting to print a matrix that is not square'
k = 0
for i in range(row_len):
for j in range(row_len):
print '% .8f ' % matrix[k],
k += 1
print
# Comment out this version if numarray is re-introduced
def getVarCovarMatrix(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the variance-covariance matrix of this distribution. The
matrix is returned in the form of a numarray object. Letting c be the
sum of the n Dirichlet parameters, c_i be the ith. parameter, and
denom be c*c*(c + 1), then
Var_i = c_i*(c - c_i)/denom
Cov_ij = -c_i*c_j/denom
In this example,
n = 3
c_1 = 1
c_2 = 1
c_3 = 1
c = 3
denom = 3*3*4 = 36
Var_i = 1*2/36 = 0.05555556
Cov_ij = -1*1/36 = -0.02777778
>>> from phycas.probdist import *
>>> d = Dirichlet((1,1,1))
>>> d.printSquareMatrix(d.getVarCovarMatrix())
0.05555556 -0.02777778 -0.02777778
-0.02777778 0.05555556 -0.02777778
-0.02777778 -0.02777778 0.05555556
"""
return DirichletDistBase.getVarCovarMatrix(self)
def getNParams(self):
#---+----|----+----|----+----|----+----|----+----|----+----|----+----|
"""
Returns the number of parameters in the Dirichlet distribution.
For example:
>>> from phycas.probdist import *
>>> d1 = Dirichlet((1,2,3,4))
>>> print d1.getNParams()
4
>>> d2 = Dirichlet((1,1))
>>> print d2.getNParams()
2
"""
return DirichletDistBase.getNParams(self)
| [
"[email protected]"
] | |
d66e5bf50843298b9445b71d3ec2cca177e78de5 | 329b48089c64ebefe78d52f1c71c73bdadadd4b4 | /keras2/keras64_1_Hyperparameter.py | ad68911f67a30528708f7b0d723608067bb8b426 | [] | no_license | variablejun/keras__R | 7f854570952ed97c48715047015786d873e512cb | 9faf4814b46cda1ac0ddbf2a2f8236fa0394f144 | refs/heads/main | 2023-07-13T19:32:25.950500 | 2021-08-22T18:26:52 | 2021-08-22T18:26:52 | 398,870,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,324 | py | import numpy as np
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential,Model
from tensorflow.keras.layers import Dense,Dropout,Input,Conv2D
(x_train, y_train),(x_test,y_test)= mnist.load_data()
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
x_train = x_train.reshape(60000,28*28).astype('float32')/255
x_test= x_test.reshape(10000,28*28).astype('float32')/255
def build_model(drop=0.5,optimizer='adam'):
inputs= Input(shape=(28*28), name='Input')
x = Dense(512, activation='relu',name='hidden1')(inputs)
x = Dropout(drop)(x)
x = Dense(256, activation='relu',name='hidden2')(x)
x = Dropout(drop)(x)
x = Dense(128, activation='relu',name='hidden3')(x)
x = Dropout(drop)(x)
outputs = Dense(10,activation='softmax',name='outputs')(x)
model = Model(inputs=inputs,outputs =outputs )
model.compile(optimizer=optimizer,metrics=['acc'],loss='categorical_crossentropy')
return model
def create_hyperparameter():
batches = [1000,2000,3000,4000,5000]
optimizers = ['rmsprop','adam','adadelta']
dropout = [0.5,0.6,0.7]
return {'batch_size':batches, 'optimizer': optimizers, 'drop':dropout}
hyperparameters = create_hyperparameter()
print(hyperparameters)
#{'batch_size': [10, 20, 30, 40, 50], 'optimizer': ['rmsprop', 'adam', 'adadelta'], 'drop': [0.1, 0.2, 0.3]}
#model2 = build_model()
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier# 텐서모델을 사이킷런에서 돌릴수있도록하는것, 텐서를 사이킷런 형태로 래핑
model2 = KerasClassifier(build_fn=build_model,verbose=1)
from sklearn.model_selection import GridSearchCV,RandomizedSearchCV
from xgboost import XGBClassifier
model = RandomizedSearchCV(model2, hyperparameters,cv=5)
model.fit(x_train,y_train,verbose=1,epochs=3, validation_split=0.2)
print(model.best_estimator_)
print(model.best_params_)
print(model.best_score_)
acc = model.score(x_test,y_test)
print(acc)
'''
<tensorflow.python.keras.wrappers.scikit_learn.KerasClassifier object at 0x000001BCCE273100>
{'optimizer': 'rmsprop', 'drop': 0.5, 'batch_size': 1000}
0.9427833318710327
10/10 [==============================] - 0s 3ms/step - loss: 0.1547 - acc: 0.9530
0.953000009059906
''' | [
"[email protected]"
] | |
fffa9fc3b815accf4276f2bb4c6e09c6bc58c609 | dcefbb67cfdc837a5b1016ea674ead66263f0af2 | /algorithm/BOJ_9498.py | 290b709344b970e624e35aaaf96ba697a6f8a63d | [] | no_license | SeungYeopB/weekend-study | 0a5d5bdbb00a7d81f2ec7c6c5b2fc7b96d92c296 | 02651855bb91e26784611bbed34a01023f4ef307 | refs/heads/master | 2023-06-23T15:52:54.475077 | 2021-07-23T07:57:16 | 2021-07-23T07:57:16 | 382,514,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | N = int(input())
if 90<=N:
print("A")
elif 80<=N:
print("B")
elif 70<=N:
print("C")
elif 60<=N:
print("D")
else:
print("F") | [
"="
] | = |
f40b984eb61b3ef75296fcd0a7d260bb6141d45e | 8fdcd12cfb91b2245da8b3c65fb937b1d72dd4c5 | /tissuelab/omero/gateway_ome500_ice351/omero_ext/xmlrunner/main.py | b901b82051114a73824341dc847547251109890c | [] | no_license | VirtualPlants/tissuelab | 569a334deab0b73acc8b43f313efc3f4c4e552fd | 8c064a34b91127806848f4992d1e4767574863cf | refs/heads/master | 2021-01-11T01:32:19.830778 | 2017-05-04T09:42:53 | 2017-05-04T09:42:53 | 70,694,783 | 2 | 1 | null | 2017-01-05T14:21:50 | 2016-10-12T11:49:10 | Python | UTF-8 | Python | false | false | 1,628 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
OME Testing Methods
"""
import logging
import unittest
from omero_ext import xmlrunner
class OmeTestLoader(object):
def __init__(self, args):
self.__args = args
def loadTestsFromModule(self, *args):
if hasattr(self, "already_called"):
raise Exception("Already called")
load = unittest.defaultTestLoader.loadTestsFromName
suite = unittest.TestSuite()
for arg in self.__args:
suite.addTest(load(arg))
self.already_called = True
return suite
def ome_test_main(args):
logging.basicConfig(level=logging.WARN)
unittest.main(
testRunner=xmlrunner.XMLTestRunner(verbose=True, output='target/reports'),
testLoader = OmeTestLoader(args))
| [
"[email protected]"
] | |
43e84b41c4b4b76c12b087f5df8190eb9572fce2 | 19a2378a7fc2aef762b0e3a70669208818feeaa9 | /tests/models/deberta_v2/test_modeling_tf_deberta_v2.py | 8b9bcc15ea2f4c75e72da91cb71317cafda0bb5c | [
"Apache-2.0"
] | permissive | pytorch-tpu/transformers | 494ee005c6d156161171f2a8e60f25603189408f | 6112b1c6442aaf7affd2b0676a1cd4eee30c45cf | refs/heads/master | 2023-09-03T19:34:30.326852 | 2023-07-19T20:57:40 | 2023-07-19T20:57:40 | 220,075,881 | 7 | 2 | Apache-2.0 | 2023-09-14T17:58:25 | 2019-11-06T19:40:45 | Python | UTF-8 | Python | false | false | 11,162 | py | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
from transformers import DebertaV2Config, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaV2ForMaskedLM,
TFDebertaV2ForQuestionAnswering,
TFDebertaV2ForSequenceClassification,
TFDebertaV2ForTokenClassification,
TFDebertaV2Model,
)
class TFDebertaV2ModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
relative_attention=False,
position_biased_input=True,
pos_att_type="None",
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_labels = num_labels
self.num_choices = num_choices
self.relative_attention = relative_attention
self.position_biased_input = position_biased_input
self.pos_att_type = pos_att_type
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
config = DebertaV2Config(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
relative_attention=self.relative_attention,
position_biased_input=self.position_biased_input,
initializer_range=self.initializer_range,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDebertaV2Model(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDebertaV2ForMaskedLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFDebertaV2ForSequenceClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFDebertaV2ForTokenClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFDebertaV2ForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFDebertaV2Model,
TFDebertaV2ForMaskedLM,
TFDebertaV2ForQuestionAnswering,
TFDebertaV2ForSequenceClassification,
TFDebertaV2ForTokenClassification,
)
if is_tf_available()
else ()
)
pipeline_model_mapping = (
{
"feature-extraction": TFDebertaV2Model,
"fill-mask": TFDebertaV2ForMaskedLM,
"question-answering": TFDebertaV2ForQuestionAnswering,
"text-classification": TFDebertaV2ForSequenceClassification,
"token-classification": TFDebertaV2ForTokenClassification,
"zero-shot": TFDebertaV2ForSequenceClassification,
}
if is_tf_available()
else {}
)
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFDebertaV2ModelTester(self)
self.config_tester = ConfigTester(self, config_class=DebertaV2Config, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge")
self.assertIsNotNone(model)
@require_tf
class TFDeBERTaV2ModelIntegrationTest(unittest.TestCase):
@unittest.skip(reason="Model not available yet")
def test_inference_masked_lm(self):
pass
@slow
def test_inference_no_head(self):
model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge")
input_ids = tf.constant([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
attention_mask = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
output = model(input_ids, attention_mask=attention_mask)[0]
expected_slice = tf.constant(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]]
)
tf.debugging.assert_near(output[:, 1:4, 1:4], expected_slice, atol=1e-4)
| [
"[email protected]"
] | |
6c59afc01f8f79d247c8828e95c7474ffcd0ed59 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/elastic/azure-mgmt-elastic/azure/mgmt/elastic/_microsoft_elastic.py | 1b77371ed5a3ff00b9189073d035aff87c44842c | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 5,959 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from ._configuration import MicrosoftElasticConfiguration
from .operations import Operations
from .operations import MonitorsOperations
from .operations import MonitoredResourcesOperations
from .operations import DeploymentInfoOperations
from .operations import TagRulesOperations
from .operations import VMHostOperations
from .operations import VMIngestionOperations
from .operations import VMCollectionOperations
from . import models
class MicrosoftElastic(object):
"""MicrosoftElastic.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.elastic.operations.Operations
:ivar monitors: MonitorsOperations operations
:vartype monitors: azure.mgmt.elastic.operations.MonitorsOperations
:ivar monitored_resources: MonitoredResourcesOperations operations
:vartype monitored_resources: azure.mgmt.elastic.operations.MonitoredResourcesOperations
:ivar deployment_info: DeploymentInfoOperations operations
:vartype deployment_info: azure.mgmt.elastic.operations.DeploymentInfoOperations
:ivar tag_rules: TagRulesOperations operations
:vartype tag_rules: azure.mgmt.elastic.operations.TagRulesOperations
:ivar vm_host: VMHostOperations operations
:vartype vm_host: azure.mgmt.elastic.operations.VMHostOperations
:ivar vm_ingestion: VMIngestionOperations operations
:vartype vm_ingestion: azure.mgmt.elastic.operations.VMIngestionOperations
:ivar vm_collection: VMCollectionOperations operations
:vartype vm_collection: azure.mgmt.elastic.operations.VMCollectionOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID. This is a GUID-formatted string (e.g. 00000000-0000-0000-0000-000000000000).
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://management.azure.com'
self._config = MicrosoftElasticConfiguration(credential, subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.monitors = MonitorsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.monitored_resources = MonitoredResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.deployment_info = DeploymentInfoOperations(
self._client, self._config, self._serialize, self._deserialize)
self.tag_rules = TagRulesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vm_host = VMHostOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vm_ingestion = VMIngestionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.vm_collection = VMCollectionOperations(
self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.HttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> MicrosoftElastic
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| [
"[email protected]"
] | |
9b7bbcece100ed41298687ceaf110a854d4c4f80 | 7fd1406b7e94d4b82a158ce5be87b5ae821e16b6 | /pro2_4.py | cbe3b853a1fd6e8842058c68d0e80ca7dfa7022e | [] | no_license | THABUULAGANATHAN/guvi-programs | c1c4d314c7ce43d6c3996fdac85616248c69e4fd | fb004f6916776ca9fbe07b8d507f9725cc55248f | refs/heads/master | 2022-01-15T09:08:32.904234 | 2019-07-19T06:45:04 | 2019-07-19T06:45:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | pi,qi=map(int,input().split())
l=list(map(int,input().split()))
for i in range(qi):
r,s=map(int,input().split())
t1 = l[r-1:s]
u1 = t1[0]
for i in range(1,len(t1)):
u1 = u1 ^ t1[i]
print(u1)
| [
"[email protected]"
] | |
5ca662cfd326bbfc872365527afa925f6d62a32a | 003dd45d19b5a6fd4a04deeefa63756462dbe09d | /pymoo/core/decomposition.py | c44fc0098edfa8eabc65e6dfd473ec6d63021804 | [
"Apache-2.0"
] | permissive | Flytortoise/pymoo | 51d32793e843977bd8fda0226bb6add1c356e21d | c6426a721d95c932ae6dbb610e09b6c1b0e13594 | refs/heads/master | 2023-09-03T20:54:13.284192 | 2021-11-09T13:23:15 | 2021-11-09T13:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,451 | py | import numpy as np
from pymoo.util.misc import at_least_2d_array, to_1d_array_if_possible
class Decomposition:
def __init__(self, eps=0.0, _type="auto", **kwargs) -> None:
super().__init__()
self.eps = eps
self._type = _type
self.ideal_point, self.utopian_point, self.nadir_point = None, None, None
def do(self,
F,
weights,
_type="auto",
ideal_point=None,
utopian_point=None,
nadir_point=None,
**kwargs):
_F, _weights = to_1d_array_if_possible(F), to_1d_array_if_possible(weights)
if _type == "auto":
if _F.ndim == 1 and _weights.ndim > 1:
_type = "one_to_many"
elif _F.ndim > 1 and _weights.ndim == 1:
_type = "many_to_one"
elif _F.ndim == 2 and _weights.ndim == 2 and _F.shape[0] == _weights.shape[0]:
_type = "one_to_one"
else:
_type = "many_to_many"
# make both at least 2d arrays
F, weights = at_least_2d_array(F), at_least_2d_array(weights)
# get the number of points and weights
n_points, n_weights = F.shape[0], weights.shape[0]
self.ideal_point = ideal_point
if self.ideal_point is None:
self.ideal_point = np.zeros(F.shape[1])
self.utopian_point = utopian_point
if self.utopian_point is None:
self.utopian_point = self.ideal_point - self.eps
# set the nadir point by default to value or default
self.nadir_point = nadir_point
if self.nadir_point is None:
self.nadir_point = self.utopian_point + np.ones(F.shape[1])
if _type == "one_to_one":
D = self._do(F, weights=weights, **kwargs).flatten()
elif _type == "one_to_many":
F = np.repeat(F, n_weights, axis=0)
D = self._do(F, weights=weights, **kwargs).flatten()
elif _type == "many_to_one":
weights = np.repeat(weights, n_points, axis=0)
D = self._do(F, weights=weights, **kwargs).flatten()
elif _type == "many_to_many":
F = np.repeat(F, n_weights, axis=0)
weights = np.tile(weights, (n_points, 1))
D = self._do(F, weights=weights, **kwargs).reshape(n_points, n_weights)
else:
raise Exception("Unknown type for decomposition: %s" % _type)
return D
| [
"[email protected]"
] | |
7451c76f2aaa67e39cfca280e39ce44c0c871877 | 27381f38b713258645855716593a9e309c43337e | /mechanics/hallway_plate_assembly.py | 0138693b254035f06a4cdb76b2cd0e5774ac69b4 | [
"Apache-2.0"
] | permissive | iorodeo/hallway_arenas | 828859f66a0f6966fde24c95fc32db7d1c4fea51 | 02d87d7890aea4f09fc244792aecaf6cb24357b2 | refs/heads/master | 2022-05-22T06:01:38.534312 | 2020-04-21T19:50:58 | 2020-04-21T19:50:58 | 256,603,143 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,399 | py | """
Creates assembly of top and bottom plates for hallway arena
"""
from py2scad import *
from assembly import Assembly
from hallway_top_plate import Hallway_Top_Plate
from hallway_bottom_plate import Hallway_Bottom_Plate
class Hallway_Plate_Assembly(Assembly):
def make(self):
# Create components
top_plate = Hallway_Top_Plate(**self.params.hallway_top_plate)
bottom_plate = Hallway_Bottom_Plate(**self.params.hallway_bottom_plate)
explode_z = self.params.explode_z
# Translate into position
bottom_z_shift = 0.5*self.params.hallway_bottom_plate['thickness']
bottom_plate.translate(v=(0,0,bottom_z_shift))
top_z_shift = 2*bottom_z_shift + 0.5*self.params.hallway_top_plate['thickness'] + explode_z
top_plate.translate(v=(0,0,top_z_shift))
# Add color
bottom_plate.color(rgba=self.params.hallway_bottom_plate['color'])
top_plate.color(rgba=self.params.hallway_top_plate['color'])
self.parts = {
'top_plate' : top_plate,
'bottom_plate' : bottom_plate,
}
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import params
assem = Hallway_Plate_Assembly(params=params)
prog = SCAD_Prog()
prog.fn = 50
prog.add(assem)
prog.write('hallway_plate_assembly.scad')
| [
"[email protected]"
] | |
159fa0c4b22dc80f495e6b5625adba8f9412461f | 0eb599c3bbfa6e5b31516913b88cc9db3a1311ce | /ABC_6q/abc167a.py | 83b76e504878f698de040171eee29074e8a32964 | [] | no_license | Linus-MK/AtCoder | 5b84dc88c2d2773d0f97ed18265d303290da7879 | a587e89a9e0c2ab4d36b09176bcc95e901e14326 | refs/heads/master | 2022-11-25T05:37:12.148722 | 2022-11-17T16:04:10 | 2022-11-17T16:04:10 | 169,840,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | x = input()
y = input()
ans = 'Yes'
for i in range(len(x)):
if x[i] != y[i]:
ans = 'No'
print(ans)
| [
"[email protected]"
] | |
dda989c5ddf4601eb9af755446131b6ba4d3e885 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/audio/ESPnet2_for_PyTorch/espnet2/enh/separator/conformer_separator.py | dbc1251d99d8976e54f34dfd9c5ec546f8c6cdef | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,643 | py | from collections import OrderedDict
from distutils.version import LooseVersion
from typing import List
from typing import Tuple
from typing import Union
import torch
from torch_complex.tensor import ComplexTensor
from espnet.nets.pytorch_backend.conformer.encoder import (
Encoder as ConformerEncoder, # noqa: H301
)
from espnet.nets.pytorch_backend.nets_utils import make_non_pad_mask
from espnet2.enh.layers.complex_utils import is_complex
from espnet2.enh.separator.abs_separator import AbsSeparator
is_torch_1_9_plus = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
class ConformerSeparator(AbsSeparator):
def __init__(
self,
input_dim: int,
num_spk: int = 2,
adim: int = 384,
aheads: int = 4,
layers: int = 6,
linear_units: int = 1536,
positionwise_layer_type: str = "linear",
positionwise_conv_kernel_size: int = 1,
normalize_before: bool = False,
concat_after: bool = False,
dropout_rate: float = 0.1,
input_layer: str = "linear",
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
nonlinear: str = "relu",
conformer_pos_enc_layer_type: str = "rel_pos",
conformer_self_attn_layer_type: str = "rel_selfattn",
conformer_activation_type: str = "swish",
use_macaron_style_in_conformer: bool = True,
use_cnn_in_conformer: bool = True,
conformer_enc_kernel_size: int = 7,
padding_idx: int = -1,
):
"""Conformer separator.
Args:
input_dim: input feature dimension
num_spk: number of speakers
adim (int): Dimension of attention.
aheads (int): The number of heads of multi head attention.
linear_units (int): The number of units of position-wise feed forward.
layers (int): The number of transformer blocks.
dropout_rate (float): Dropout rate.
input_layer (Union[str, torch.nn.Module]): Input layer type.
attention_dropout_rate (float): Dropout rate in attention.
positional_dropout_rate (float): Dropout rate after adding
positional encoding.
normalize_before (bool): Whether to use layer_norm before the first block.
concat_after (bool): Whether to concat attention layer's input and output.
if True, additional linear will be applied.
i.e. x -> x + linear(concat(x, att(x)))
if False, no additional linear will be applied. i.e. x -> x + att(x)
conformer_pos_enc_layer_type(str): Encoder positional encoding layer type.
conformer_self_attn_layer_type (str): Encoder attention layer type.
conformer_activation_type(str): Encoder activation function type.
positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear".
positionwise_conv_kernel_size (int): Kernel size of
positionwise conv1d layer.
use_macaron_style_in_conformer (bool): Whether to use macaron style for
positionwise layer.
use_cnn_in_conformer (bool): Whether to use convolution module.
conformer_enc_kernel_size(int): Kernerl size of convolution module.
padding_idx (int): Padding idx for input_layer=embed.
nonlinear: the nonlinear function for mask estimation,
select from 'relu', 'tanh', 'sigmoid'
"""
super().__init__()
self._num_spk = num_spk
self.conformer = ConformerEncoder(
idim=input_dim,
attention_dim=adim,
attention_heads=aheads,
linear_units=linear_units,
num_blocks=layers,
dropout_rate=dropout_rate,
positional_dropout_rate=positional_dropout_rate,
attention_dropout_rate=attention_dropout_rate,
input_layer=input_layer,
normalize_before=normalize_before,
concat_after=concat_after,
positionwise_layer_type=positionwise_layer_type,
positionwise_conv_kernel_size=positionwise_conv_kernel_size,
macaron_style=use_macaron_style_in_conformer,
pos_enc_layer_type=conformer_pos_enc_layer_type,
selfattention_layer_type=conformer_self_attn_layer_type,
activation_type=conformer_activation_type,
use_cnn_module=use_cnn_in_conformer,
cnn_module_kernel=conformer_enc_kernel_size,
padding_idx=padding_idx,
)
self.linear = torch.nn.ModuleList(
[torch.nn.Linear(adim, input_dim) for _ in range(self.num_spk)]
)
if nonlinear not in ("sigmoid", "relu", "tanh"):
raise ValueError("Not supporting nonlinear={}".format(nonlinear))
self.nonlinear = {
"sigmoid": torch.nn.Sigmoid(),
"relu": torch.nn.ReLU(),
"tanh": torch.nn.Tanh(),
}[nonlinear]
def forward(
self, input: Union[torch.Tensor, ComplexTensor], ilens: torch.Tensor
) -> Tuple[List[Union[torch.Tensor, ComplexTensor]], torch.Tensor, OrderedDict]:
"""Forward.
Args:
input (torch.Tensor or ComplexTensor): Encoded feature [B, T, N]
ilens (torch.Tensor): input lengths [Batch]
Returns:
masked (List[Union(torch.Tensor, ComplexTensor)]): [(B, T, N), ...]
ilens (torch.Tensor): (B,)
others predicted data, e.g. masks: OrderedDict[
'mask_spk1': torch.Tensor(Batch, Frames, Freq),
'mask_spk2': torch.Tensor(Batch, Frames, Freq),
...
'mask_spkn': torch.Tensor(Batch, Frames, Freq),
]
"""
# if complex spectrum,
if is_complex(input):
feature = abs(input)
else:
feature = input
# prepare pad_mask for transformer
pad_mask = make_non_pad_mask(ilens).unsqueeze(1).to(feature.device)
x, ilens = self.conformer(feature, pad_mask)
masks = []
for linear in self.linear:
y = linear(x)
y = self.nonlinear(y)
masks.append(y)
masked = [input * m for m in masks]
others = OrderedDict(
zip(["mask_spk{}".format(i + 1) for i in range(len(masks))], masks)
)
return masked, ilens, others
@property
def num_spk(self):
return self._num_spk
| [
"[email protected]"
] | |
7301d83c595597b93916cb9dd6928a33c2a858e2 | 338dbd8788b019ab88f3c525cddc792dae45036b | /lib/python3.6/site-packages/statsmodels/sandbox/descstats.py | 1b90db63cf35925066b9ad968d0d41c5ed48f642 | [] | permissive | KshitizSharmaV/Quant_Platform_Python | 9b8b8557f13a0dde2a17de0e3352de6fa9b67ce3 | d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39 | refs/heads/master | 2022-12-10T11:37:19.212916 | 2019-07-09T20:05:39 | 2019-07-09T20:05:39 | 196,073,658 | 1 | 2 | BSD-3-Clause | 2022-11-27T18:30:16 | 2019-07-09T19:48:26 | Python | UTF-8 | Python | false | false | 6,472 | py | '''
Glue for returning descriptive statistics.
'''
import numpy as np
from scipy import stats
import os
from statsmodels.stats.descriptivestats import sign_test
#############################################
#
#============================================
# Univariate Descriptive Statistics
#============================================
#
def descstats(data, cols=None, axis=0):
'''
Prints descriptive statistics for one or multiple variables.
Parameters
----------
data: numpy array
`x` is the data
v: list, optional
A list of the column number or field names (for a recarray) of variables.
Default is all columns.
axis: 1 or 0
axis order of data. Default is 0 for column-ordered data.
Examples
--------
>>> descstats(data.exog,v=['x_1','x_2','x_3'])
'''
x = np.array(data) # or rather, the data we're interested in
if cols is None:
# if isinstance(x, np.recarray):
# cols = np.array(len(x.dtype.names))
if not isinstance(x, np.recarray) and x.ndim == 1:
x = x[:,None]
if x.shape[1] == 1:
desc = '''
---------------------------------------------
Univariate Descriptive Statistics
---------------------------------------------
Var. Name %(name)12s
----------
Obs. %(nobs)22i Range %(range)22s
Sum of Wts. %(sum)22s Coeff. of Variation %(coeffvar)22.4g
Mode %(mode)22.4g Skewness %(skewness)22.4g
Repeats %(nmode)22i Kurtosis %(kurtosis)22.4g
Mean %(mean)22.4g Uncorrected SS %(uss)22.4g
Median %(median)22.4g Corrected SS %(ss)22.4g
Variance %(variance)22.4g Sum Observations %(sobs)22.4g
Std. Dev. %(stddev)22.4g
''' % {'name': cols, 'sum': 'N/A', 'nobs': len(x), 'mode': \
stats.mode(x)[0][0], 'nmode': stats.mode(x)[1][0], \
'mean': x.mean(), 'median': np.median(x), 'range': \
'('+str(x.min())+', '+str(x.max())+')', 'variance': \
x.var(), 'stddev': x.std(), 'coeffvar': \
stats.variation(x), 'skewness': stats.skew(x), \
'kurtosis': stats.kurtosis(x), 'uss': np.sum(x**2, axis=0),\
'ss': np.sum((x-x.mean())**2, axis=0), 'sobs': np.sum(x)}
desc+= '''
Percentiles
-------------
1 %% %12.4g
5 %% %12.4g
10 %% %12.4g
25 %% %12.4g
50 %% %12.4g
75 %% %12.4g
90 %% %12.4g
95 %% %12.4g
99 %% %12.4g
''' % tuple([stats.scoreatpercentile(x,per) for per in (1,5,10,25,
50,75,90,95,99)])
t,p_t=stats.ttest_1samp(x,0)
M,p_M=sign_test(x)
S,p_S=stats.wilcoxon(np.squeeze(x))
desc+= '''
Tests of Location (H0: Mu0=0)
-----------------------------
Test Statistic Two-tailed probability
-----------------+-----------------------------------------
Student's t | t %7.5f Pr > |t| <%.4f
Sign | M %8.2f Pr >= |M| <%.4f
Signed Rank | S %8.2f Pr >= |S| <%.4f
''' % (t,p_t,M,p_M,S,p_S)
# Should this be part of a 'descstats'
# in any event these should be split up, so that they can be called
# individually and only returned together if someone calls summary
# or something of the sort
elif x.shape[1] > 1:
desc ='''
Var. Name | Obs. Mean Std. Dev. Range
------------+--------------------------------------------------------'''+\
os.linesep
# for recarrays with columns passed as names
# if isinstance(cols[0],str):
# for var in cols:
# desc += "%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g \
#%(range)20s" % {'name': var, 'obs': len(x[var]), 'mean': x[var].mean(),
# 'stddev': x[var].std(), 'range': '('+str(x[var].min())+', '\
# +str(x[var].max())+')'+os.linesep}
# else:
for var in range(x.shape[1]):
xv = x[:, var]
kwargs = {
'name': var,
'obs': len(xv),
'mean': xv.mean(),
'stddev': xv.std(),
'range': '('+str(xv.min())+', '+str(xv.max())+')'+os.linesep
}
desc += ("%(name)15s %(obs)9i %(mean)12.4g %(stddev)12.4g "
"%(range)20s" % kwargs)
else:
raise ValueError("data not understood")
return desc
#if __name__=='__main__':
# test descstats
# import os
# loc='http://eagle1.american.edu/~js2796a/data/handguns_data.csv'
# relpath=(load_dataset(loc))
# dta=np.recfromcsv(relpath)
# descstats(dta,['stpop'])
# raw_input('Hit enter for multivariate test')
# descstats(dta,['stpop','avginc','vio'])
# with plain arrays
# import string2dummy as s2d
# dts=s2d.string2dummy(dta)
# ndts=np.vstack(dts[col] for col in dts.dtype.names)
# observations in columns and data in rows
# is easier for the call to stats
# what to make of
# ndts=np.column_stack(dts[col] for col in dts.dtype.names)
# ntda=ntds.swapaxis(1,0)
# ntda is ntds returns false?
# or now we just have detailed information about the different strings
# would this approach ever be inappropriate for a string typed variable
# other than dates?
# descstats(ndts, [1])
# raw_input("Enter to try second part")
# descstats(ndts, [1,20,3])
if __name__ == '__main__':
import statsmodels.api as sm
data = sm.datasets.longley.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=False)
sum1 = descstats(data.exog)
sum1a = descstats(data.exog[:,:1])
# loc='http://eagle1.american.edu/~js2796a/data/handguns_data.csv'
# dta=np.recfromcsv(loc)
# summary2 = descstats(dta,['stpop'])
# summary3 = descstats(dta,['stpop','avginc','vio'])
#TODO: needs a by argument
# summary4 = descstats(dta) this fails
# this is a bug
# p = dta[['stpop']]
# p.view(dtype = np.float, type = np.ndarray)
# this works
# p.view(dtype = np.int, type = np.ndarray)
### This is *really* slow ###
if os.path.isfile('./Econ724_PS_I_Data.csv'):
data2 = np.recfromcsv('./Econ724_PS_I_Data.csv')
sum2 = descstats(data2.ahe)
sum3 = descstats(np.column_stack((data2.ahe,data2.yrseduc)))
sum4 = descstats(np.column_stack(([data2[_] for \
_ in data2.dtype.names])))
| [
"[email protected]"
] | |
5b5f8c93f58803b4d562bdbf95e832b26f8843f0 | 01dd174a3a7d26226564711e32711f137513663f | /pyscf/grad/rhf.py | 41bce18e52e4832a089e9edeac6cdc000356147c | [
"Apache-2.0"
] | permissive | cherishyli/pyscf | 00cb09c873edc8890be8501414678cdfa54b177e | 468a4bfc4ce067eb7dab6f9289d71122b219609e | refs/heads/master | 2020-04-18T11:40:00.398066 | 2019-01-24T23:07:36 | 2019-01-24T23:07:36 | 167,508,739 | 1 | 0 | Apache-2.0 | 2019-01-25T08:00:12 | 2019-01-25T08:00:12 | null | UTF-8 | Python | false | false | 12,214 | py | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic Hartree-Fock analytical nuclear gradients
'''
import time
import numpy
from pyscf import gto
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import _vhf
def grad_elec(mf_grad, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
mf = mf_grad.base
mol = mf_grad.mol
if mo_energy is None: mo_energy = mf.mo_energy
if mo_occ is None: mo_occ = mf.mo_occ
if mo_coeff is None: mo_coeff = mf.mo_coeff
log = logger.Logger(mf_grad.stdout, mf_grad.verbose)
hcore_deriv = mf_grad.hcore_generator(mol)
s1 = mf_grad.get_ovlp(mol)
dm0 = mf.make_rdm1(mo_coeff, mo_occ)
t0 = (time.clock(), time.time())
log.debug('Computing Gradients of NR-HF Coulomb repulsion')
vhf = mf_grad.get_veff(mol, dm0)
log.timer('gradients of 2e part', *t0)
dme0 = mf_grad.make_rdm1e(mo_energy, mo_coeff, mo_occ)
if atmlst is None:
atmlst = range(mol.natm)
aoslices = mol.aoslice_by_atom()
de = numpy.zeros((len(atmlst),3))
for k, ia in enumerate(atmlst):
p0, p1 = aoslices [ia,2:]
h1ao = hcore_deriv(ia)
de[k] += numpy.einsum('xij,ij->x', h1ao, dm0)
# nabla was applied on bra in vhf, *2 for the contributions of nabla|ket>
de[k] += numpy.einsum('xij,ij->x', vhf[:,p0:p1], dm0[p0:p1]) * 2
de[k] -= numpy.einsum('xij,ij->x', s1[:,p0:p1], dme0[p0:p1]) * 2
de[k] += mf_grad.extra_force(ia, locals())
if log.verbose >= logger.DEBUG:
log.debug('gradients of electronic part')
_write(log, mol, de, atmlst)
return de
def _write(dev, mol, de, atmlst):
if atmlst is None:
atmlst = range(mol.natm)
dev.stdout.write(' x y z\n')
for k, ia in enumerate(atmlst):
dev.stdout.write('%d %s %15.10f %15.10f %15.10f\n' %
(ia, mol.atom_symbol(ia), de[k,0], de[k,1], de[k,2]))
def grad_nuc(mol, atmlst=None):
gs = numpy.zeros((mol.natm,3))
for j in range(mol.natm):
q2 = mol.atom_charge(j)
r2 = mol.atom_coord(j)
for i in range(mol.natm):
if i != j:
q1 = mol.atom_charge(i)
r1 = mol.atom_coord(i)
r = numpy.sqrt(numpy.dot(r1-r2,r1-r2))
gs[j] -= q1 * q2 * (r2-r1) / r**3
if atmlst is not None:
gs = gs[atmlst]
return gs
def get_hcore(mol):
'''Part of the nuclear gradients of core Hamiltonian'''
h = mol.intor('int1e_ipkin', comp=3)
if mol._pseudo:
NotImplementedError('Nuclear gradients for GTH PP')
else:
h+= mol.intor('int1e_ipnuc', comp=3)
if mol.has_ecp():
h += mol.intor('ECPscalar_ipnuc', comp=3)
return -h
def hcore_generator(mf, mol=None):
if mol is None: mol = mf.mol
with_x2c = getattr(mf.base, 'with_x2c', None)
if with_x2c:
hcore_deriv = with_x2c.hcore_deriv_generator(deriv=1)
else:
with_ecp = mol.has_ecp()
if with_ecp:
ecp_atoms = set(mol._ecpbas[:,gto.ATOM_OF])
else:
ecp_atoms = ()
aoslices = mol.aoslice_by_atom()
h1 = mf.get_hcore(mol)
def hcore_deriv(atm_id):
shl0, shl1, p0, p1 = aoslices[atm_id]
with mol.with_rinv_as_nucleus(atm_id):
vrinv = mol.intor('int1e_iprinv', comp=3) # <\nabla|1/r|>
vrinv *= -mol.atom_charge(atm_id)
if with_ecp and atm_id in ecp_atoms:
vrinv += mol.intor('ECPscalar_iprinv', comp=3)
vrinv[:,p0:p1] += h1[:,p0:p1]
return vrinv + vrinv.transpose(0,2,1)
return hcore_deriv
def get_ovlp(mol):
return -mol.intor('int1e_ipovlp', comp=3)
def get_jk(mol, dm):
'''J = ((-nabla i) j| kl) D_lk
K = ((-nabla i) j| kl) D_jk
'''
intor = mol._add_suffix('int2e_ip1')
vj, vk = _vhf.direct_mapdm(intor, # (nabla i,j|k,l)
's2kl', # ip1_sph has k>=l,
('lk->s1ij', 'jk->s1il'),
dm, 3, # xyz, 3 components
mol._atm, mol._bas, mol._env)
return -vj, -vk
def get_veff(mf_grad, mol, dm):
'''NR Hartree-Fock Coulomb repulsion'''
vj, vk = mf_grad.get_jk(mol, dm)
return vj - vk * .5
def make_rdm1e(mo_energy, mo_coeff, mo_occ):
'''Energy weighted density matrix'''
mo0 = mo_coeff[:,mo_occ>0]
mo0e = mo0 * (mo_energy[mo_occ>0] * mo_occ[mo_occ>0])
return numpy.dot(mo0e, mo0.T.conj())
def as_scanner(mf_grad):
'''Generating a nuclear gradients scanner/solver (for geometry optimizer).
The returned solver is a function. This function requires one argument
"mol" as input and returns energy and first order nuclear derivatives.
The solver will automatically use the results of last calculation as the
initial guess of the new calculation. All parameters assigned in the
nuc-grad object and SCF object (DIIS, conv_tol, max_memory etc) are
automatically applied in the solver.
Note scanner has side effects. It may change many underlying objects
(_scf, with_df, with_x2c, ...) during calculation.
Examples::
>>> from pyscf import gto, scf, grad
>>> mol = gto.M(atom='H 0 0 0; F 0 0 1')
>>> hf_scanner = scf.RHF(mol).apply(grad.RHF).as_scanner()
>>> e_tot, grad = hf_scanner(gto.M(atom='H 0 0 0; F 0 0 1.1'))
>>> e_tot, grad = hf_scanner(gto.M(atom='H 0 0 0; F 0 0 1.5'))
'''
if isinstance(mf_grad, lib.GradScanner):
return mf_grad
logger.info(mf_grad, 'Create scanner for %s', mf_grad.__class__)
class SCF_GradScanner(mf_grad.__class__, lib.GradScanner):
def __init__(self, g):
lib.GradScanner.__init__(self, g)
def __call__(self, mol_or_geom, **kwargs):
if isinstance(mol_or_geom, gto.Mole):
mol = mol_or_geom
else:
mol = self.mol.set_geom_(mol_or_geom, inplace=False)
mf_scanner = self.base
e_tot = mf_scanner(mol)
self.mol = mol
de = self.kernel(**kwargs)
return e_tot, de
return SCF_GradScanner(mf_grad)
class Gradients(lib.StreamObject):
'''Non-relativistic restricted Hartree-Fock gradients'''
def __init__(self, scf_method):
self.verbose = scf_method.verbose
self.stdout = scf_method.stdout
self.mol = scf_method.mol
self.base = scf_method
self.max_memory = self.mol.max_memory
self.atmlst = None
self.de = None
self._keys = set(self.__dict__.keys())
def dump_flags(self):
log = logger.Logger(self.stdout, self.verbose)
log.info('\n')
if not self.base.converged:
log.warn('Ground state SCF not converged')
log.info('******** %s for %s ********',
self.__class__, self.base.__class__)
log.info('max_memory %d MB (current use %d MB)',
self.max_memory, lib.current_memory()[0])
return self
def get_hcore(self, mol=None):
if mol is None: mol = self.mol
return get_hcore(mol)
hcore_generator = hcore_generator
def get_ovlp(self, mol=None):
if mol is None: mol = self.mol
return get_ovlp(mol)
@lib.with_doc(get_jk.__doc__)
def get_jk(self, mol=None, dm=None, hermi=0):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
cpu0 = (time.clock(), time.time())
#TODO: direct_scf opt
vj, vk = get_jk(mol, dm)
logger.timer(self, 'vj and vk', *cpu0)
return vj, vk
def get_j(self, mol=None, dm=None, hermi=0):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
intor = mol._add_suffix('int2e_ip1')
return -_vhf.direct_mapdm(intor, 's2kl', 'lk->s1ij', dm, 3,
mol._atm, mol._bas, mol._env)
def get_k(self, mol=None, dm=None, hermi=0):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
intor = mol._add_suffix('int2e_ip1')
return -_vhf.direct_mapdm(intor, 's2kl', 'jk->s1il', dm, 3,
mol._atm, mol._bas, mol._env)
def get_veff(self, mol=None, dm=None):
if mol is None: mol = self.mol
if dm is None: dm = self.base.make_rdm1()
return get_veff(self, mol, dm)
def make_rdm1e(self, mo_energy=None, mo_coeff=None, mo_occ=None):
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
return make_rdm1e(mo_energy, mo_coeff, mo_occ)
def extra_force(self, atom_id, envs):
'''Hook for extra contributions in analytical gradients.
Contributions like the response of auxiliary basis in density fitting
method, the grid response in DFT numerical integration can be put in
this function.
'''
return 0
grad_elec = grad_elec
def grad_nuc(self, mol=None, atmlst=None):
if mol is None: mol = self.mol
return grad_nuc(mol, atmlst)
def grad(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
return self.kernel(mo_energy, mo_coeff, mo_occ, atmlst)
def kernel(self, mo_energy=None, mo_coeff=None, mo_occ=None, atmlst=None):
cput0 = (time.clock(), time.time())
if mo_energy is None: mo_energy = self.base.mo_energy
if mo_coeff is None: mo_coeff = self.base.mo_coeff
if mo_occ is None: mo_occ = self.base.mo_occ
if atmlst is None:
atmlst = self.atmlst
else:
self.atmlst = atmlst
if self.verbose >= logger.WARN:
self.check_sanity()
if self.verbose >= logger.INFO:
self.dump_flags()
de = self.grad_elec(mo_energy, mo_coeff, mo_occ, atmlst)
self.de = de + self.grad_nuc(atmlst=atmlst)
logger.timer(self, 'SCF gradients', *cput0)
self._finalize()
return self.de
def _finalize(self):
if self.verbose >= logger.NOTE:
logger.note(self, '--------------- %s gradients ---------------',
self.base.__class__.__name__)
_write(self, self.mol, self.de, self.atmlst)
logger.note(self, '----------------------------------------------')
as_scanner = as_scanner
Grad = Gradients
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
mol.verbose = 0
mol.atom = [['He', (0.,0.,0.)], ]
mol.basis = {'He': 'ccpvdz'}
mol.build()
method = scf.RHF(mol)
method.scf()
g = Gradients(method)
print(g.grad())
h2o = gto.Mole()
h2o.verbose = 0
h2o.atom = [
['O' , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ]
h2o.basis = {'H': '631g',
'O': '631g',}
h2o.build()
rhf = scf.RHF(h2o)
rhf.conv_tol = 1e-14
e0 = rhf.scf()
g = Gradients(rhf)
print(g.grad())
#[[ 0 0 -2.41134256e-02]
# [ 0 4.39690522e-03 1.20567128e-02]
# [ 0 -4.39690522e-03 1.20567128e-02]]
rhf = scf.RHF(h2o).x2c()
rhf.conv_tol = 1e-14
e0 = rhf.scf()
g = Gradients(rhf)
print(g.grad())
#[[ 0 0 -2.40286232e-02]
# [ 0 4.27908498e-03 1.20143116e-02]
# [ 0 -4.27908498e-03 1.20143116e-02]]
| [
"[email protected]"
] | |
67804ab2e7258dc505b5a127e72d28636629181a | 28ef7c65a5cb1291916c768a0c2468a91770bc12 | /configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/jhmdb/res50_jhmdb_sub3_256x256.py | cdef4c576e262405a3b53d78b059c38bcc6b2148 | [
"Apache-2.0"
] | permissive | bit-scientist/mmpose | 57464aae1ca87faf5a4669991ae1ea4347e41900 | 9671a12caf63ae5d15a9bebc66a9a2e7a3ce617e | refs/heads/master | 2023-08-03T17:18:27.413286 | 2021-09-29T03:48:37 | 2021-09-29T03:48:37 | 411,549,076 | 0 | 0 | Apache-2.0 | 2021-09-29T06:01:27 | 2021-09-29T06:01:26 | null | UTF-8 | Python | false | false | 3,976 | py | _base_ = ['../../../../_base_/datasets/jhmdb.py']
log_level = 'INFO'
load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metric=['PCK', 'tPCK'], save_best='Mean PCK')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 15])
total_epochs = 20
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=15,
dataset_joints=15,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
# model settings
model = dict(
type='TopDown',
pretrained=None,
backbone=dict(type='ResNet', depth=50),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=30,
scale_factor=0.25),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox', 'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/jhmdb'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub3_train.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub3_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub3_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| [
"[email protected]"
] | |
b2239d07d16a98cc0da947a715c77ca38064eb64 | dcae1caa1816ba8ab8016e125027dd09b9a69720 | /tasks/copy_task.py | 8d95337c5390fec60fce1636b397fef0f820170f | [] | no_license | mahi97/XMANN | 5832bc0b02c7ee5998eaf8b4ed558f916e0d5d36 | bd6da642a5afc35582476b417e862f57817ed63c | refs/heads/master | 2023-06-15T19:10:55.331234 | 2021-07-15T11:35:40 | 2021-07-15T11:35:40 | 316,759,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,671 | py | """Copy Task NTM model."""
from attr import attrs, attrib, Factory
import random
import numpy as np
import torch
from torch import nn
from torch import optim
from model import Model
from model import ModelParams
class CopyTask(object):
def __init__(self):
self.model = CopyTaskModel
self.param = CopyTaskParams
def data_loader(num_batches, batch_size, seq_width, min_len, max_len, is_cuda=False):
"""Generator of random sequences for the copy task.
Creates random batches of "bits" sequences.
All the sequences within each batch have the same length.
The length is [`min_len`, `max_len`]
:param is_cuda: Generating data in GPU Memory
:param num_batches: Total number of batches to generate.
:param seq_width: The width of each item in the sequence.
:param batch_size: Batch size.
:param min_len: Sequence minimum length.
:param max_len: Sequence maximum length.
NOTE: The input width is `seq_width + 1`, the additional input
contain the delimiter.
"""
for batch_num in range(num_batches):
# All batches have the same sequence length
seq_len = random.randint(min_len, max_len)
seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))
seq = torch.from_numpy(seq)
# The input includes an additional channel used for the delimiter
inp = torch.zeros(seq_len + 1, batch_size, seq_width + 1)
inp[:seq_len, :, :seq_width] = seq
inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel
outp = seq.clone()
if is_cuda:
inp = inp.cuda()
outp = outp.cuda()
yield batch_num + 1, inp.float(), outp.float()
@attrs
class CopyTaskParams(object):
name = attrib(default="copy-task")
memory = attrib(default='static')
memory_init = attrib(default='random')
controller = attrib(default='LSTM')
data_path = attrib(default='NTM')
controller_size = attrib(default=100, converter=int)
controller_layers = attrib(default=1, converter=int)
num_read_heads = attrib(default=1, converter=int)
num_write_heads = attrib(default=1, converter=int)
sequence_width = attrib(default=8, converter=int)
sequence_min_len = attrib(default=1, converter=int)
sequence_max_len = attrib(default=20, converter=int)
memory_n = attrib(default=128, converter=int)
memory_m = attrib(default=20, converter=int)
num_batches = attrib(default=20000, converter=int)
batch_size = attrib(default=1, converter=int)
rmsprop_lr = attrib(default=1e-4, converter=float)
rmsprop_momentum = attrib(default=0.9, converter=float)
rmsprop_alpha = attrib(default=0.95, converter=float)
is_cuda = attrib(default=False, converter=bool)
@attrs
class CopyTaskModel(object):
params = attrib(default=Factory(CopyTaskParams))
net = attrib()
data_loader = attrib()
criterion = attrib()
optimizer = attrib()
@net.default
def default_net(self):
# We have 1 additional input for the delimiter which is passed on a
# separate "control" channel
model_params = ModelParams(
memory=self.params.memory,
controller=self.params.controller,
data_path=self.params.data_path,
num_inputs=self.params.sequence_width + 1,
num_outputs=self.params.sequence_width,
num_hidden=self.params.controller_layers,
num_layers=self.params.controller_layers,
controller_size=self.params.controller_size,
num_read_heads=self.params.num_read_heads,
num_write_heads=self.params.num_write_heads,
memory_size=self.params.memory_n,
word_size=self.params.memory_m,
memory_init=self.params.memory_init,
batch_size=self.params.batch_size,
is_cuda=self.params.is_cuda
)
net = Model(model_params)
if self.params.is_cuda:
net = net.cuda()
return net
@data_loader.default
def default_dataloader(self):
return data_loader(self.params.num_batches, self.params.batch_size,
self.params.sequence_width,
self.params.sequence_min_len, self.params.sequence_max_len)
@criterion.default
def default_criterion(self):
return nn.BCELoss()
@optimizer.default
def default_optimizer(self):
return optim.RMSprop(self.net.parameters(),
momentum=self.params.rmsprop_momentum,
alpha=self.params.rmsprop_alpha,
lr=self.params.rmsprop_lr)
| [
"[email protected]"
] | |
4a9b892072ba58f5757ea70f8734c086671564e2 | db4c0f86904157c9ba40b495ca6506cd96450821 | /algorithms/python/104_maxinum_deepth_of_binary_tree.py | 4fa1f5be1929bd3e5494335478218e090b039496 | [] | no_license | ppd0705/leetcode | c26dfdd077985607354fc8dbac93a5ef3daf8e62 | 543e2ce47ea454d355762e6291a65a1cc6f7af71 | refs/heads/master | 2022-08-29T22:50:02.308073 | 2022-08-09T01:28:39 | 2022-08-09T01:28:39 | 221,321,139 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxDepth(self, root: TreeNode) -> int:
def helper(node):
if node is None:
return 0
return 1 + max(helper(node.left), helper(node.right))
return helper(root)
| [
"[email protected]"
] | |
57de64b03d3c4f7ab214b32f22252f72c6390376 | 94d5ef47d3244950a0308c754e0aa55dca6f2a0e | /migrations/versions/5a9e6291a59c_added_scopus_id_field.py | a912de3bd6a52767365e01577cee59169158dc04 | [] | no_license | MUMT-IT/mis2018 | 9cbc7191cdc1bcd7e0c2de1e0586d8bd7b26002e | 69fabc0b16abfeba44173caa93d4f63fa79033fd | refs/heads/master | 2023-08-31T16:00:51.717449 | 2023-08-31T11:30:13 | 2023-08-31T11:30:13 | 115,810,883 | 5 | 5 | null | 2023-09-14T10:08:35 | 2017-12-30T17:06:00 | HTML | UTF-8 | Python | false | false | 867 | py | """added scopus ID field
Revision ID: 5a9e6291a59c
Revises: 42f544489b96
Create Date: 2019-03-25 07:05:06.087909
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5a9e6291a59c'
down_revision = '42f544489b96'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('research_pub', sa.Column('scopus_id', sa.String(length=128), nullable=True))
op.create_index(op.f('ix_research_pub_scopus_id'), 'research_pub', ['scopus_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_research_pub_scopus_id'), table_name='research_pub')
op.drop_column('research_pub', 'scopus_id')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
8cce7e85e1266c30a9ed503ccc6006ffbf2c94d5 | b1ea00015ad8196f78f0a7296ceb55dd5fa68820 | /Design/SnakeGame.py | cf21b44b0c5ce706a5ce5cbebd25421eebe4cc53 | [] | no_license | YusiZhang/leetcode-python | d1fa7c1b76cb13caaa800fe1d20c7bbd5550d871 | 26e2a812d86b4c09b2917d983df76d3ece69b074 | refs/heads/master | 2020-05-29T16:08:52.277158 | 2016-10-11T06:50:44 | 2016-10-14T06:36:22 | 58,106,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,995 | py | import collections
class SnakeGame(object):
def __init__(self, width,height,food):
"""
Initialize your data structure here.
@param width - screen width
@param height - screen height
@param food - A list of food positions
E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0].
:type width: int
:type height: int
:type food: List[List[int]]
"""
self.snake = collections.deque([[0,0]]) # snake head is at the front
self.width = width
self.height = height
self.food = collections.deque(food)
self.direct = {'U': [-1, 0], 'L': [0, -1], 'R': [0, 1], 'D': [1, 0]}
def move(self, direction):
"""
Moves the snake.
@param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down
@return The game's score after the move. Return -1 if game over.
Game over when snake crosses the screen boundary or bites its body.
:type direction: str
:rtype: int
"""
newHead = [self.snake[0][0]+self.direct[direction][0], self.snake[0][1]+self.direct[direction][1]]
# notice that the newHead can be equal to self.snake[-1]
if (newHead[0] < 0 or newHead[0] >= self.height) or (newHead[1] < 0 or newHead[1] >= self.width) \
or (newHead in self.snake and newHead != self.snake[-1]): return -1
if self.food and self.food[0] == newHead: # eat food
self.snake.appendleft(newHead) # just make the food be part of snake
self.food.popleft() # delete the food that's already eaten
else: # not eating food: append head and delete tail
self.snake.appendleft(newHead)
self.snake.pop()
return len(self.snake)-1
# Your SnakeGame object will be instantiated and called as such:
# obj = SnakeGame(width, height, food)
# param_1 = obj.move(direction) | [
"[email protected]"
] | |
87f3ec6b5cd4aadb962208b899f021f77a46846a | 00cf2491d97f079dadee6b05990e9a506983f3b2 | /datastore/model.py | e3643f844a6427676d3ac675d26e92a0011c481c | [] | no_license | simonemmott/DataStore | af16cdb91f73835203e77108e731acd129e15f96 | d7ccc2e8540b8cd47bb80318b62b813da7b76357 | refs/heads/master | 2020-06-25T02:23:23.221408 | 2019-07-27T22:15:41 | 2019-07-27T22:15:41 | 199,169,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from json_model import JsonModel
import importlib
def import_class(name):
mod_path = '.'.join(name.split('.')[:-1])
cls_name = name.split('.')[-1]
mod = importlib.import_module(mod_path)
if hasattr(mod, cls_name):
attr = getattr(mod, cls_name)
if isinstance(attr, type):
return attr
raise ValueError('{name} is not a class'.format(name=name))
raise ValueError('The module {mod} does not define {name}'.format(mod=mod_path, name=cls_name))
class MetaType(JsonModel):
name = JsonModel.field(str)
ref_type = JsonModel.field(str)
class Meta():
required_fields = ['name']
def __init__(self, *args, **kw):
super(MetaType, self).__init__(*args, **kw)
if not self.ref_type:
self.ref_type = self.name.split('.')[-1]
self.type = import_class(self.name)
@staticmethod
def from_class(cls):
return MetaType(name=cls.__name__)
| [
"[email protected]"
] | |
927e9f9ea8862b1450ddf8c6f8814db817921683 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_051/ch43_2020_08_17_19_52_57_356234.py | f295feb2e8b443b435c9674f12d6e46d6abb9ea6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | lista=['janeiro', 'fevereiro', 'março', 'abril', 'maio', 'junho', 'julho', 'agosto', 'setembro', 'outubro', 'novembro', 'dezembro']
print (lista[int(input('numero do mes: '))-1]) | [
"[email protected]"
] | |
9cc1f699589a7ce3fd4896716330dd97386159c6 | c450204fda11a5d3733c463e31e4c10105420534 | /ans_comparer_gui.py | 66ea93f18d0abb7444b3ffe4cf88a1608c1a1ea8 | [] | no_license | Hilary02/IpynbComparer | 6a25386702ed7de5fdea0ae3281b836970645cce | 418919562b9eeefbbcc8d694aeab88356ba15f73 | refs/heads/master | 2022-11-04T00:56:44.659890 | 2020-06-15T07:09:56 | 2020-06-15T07:55:20 | 272,431,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,209 | py | import os
import tkinter as tk
import tkinter.filedialog
import json
from make_dict import *
left_data = None
right_data = None
debug = False
now_select = ""
def log(s):
logarea.insert("end", f"{s}\n")
def make_model_data():
log("模範解答を選択してください")
file_path = tk.filedialog.askopenfilename(
filetypes=[("模範解答", "*.ipynb")], initialdir="./")
model_dict = ProblemFileReader.makedict(file_path)
if not model_dict:
log("模範解答の処理に失敗しました")
else:
with open("./modelanswer.json", mode="w", encoding="utf-8") as f:
json.dump(model_dict, f, indent=4, ensure_ascii=False)
log("modelanswer.jsonを保存しました")
def file_select_f1():
global left_data
log("左に表示するデータを選択")
file_path = tk.filedialog.askopenfilename(
filetypes=[("Jupyter", "*.ipynb"), ("Json", "*.json")], initialdir="./")
kadai_dict = ProblemFileReader.makedict(file_path)
if kadai_dict:
file_name = file_path.split("/")[-1]
f1la1["text"] = f"ファイル名:{file_name}"
left_data = kadai_dict
log("読み込み成功")
selector_reset()
compare()
else:
log("読み込み失敗")
def file_select_f2():
global right_data
log("右に表示するデータを選択")
file_path = tk.filedialog.askopenfilename(
filetypes=[("Jupyter", "*.ipynb"), ("Json", "*.json")], initialdir="./")
kadai_dict = ProblemFileReader.makedict(file_path)
if kadai_dict:
file_name = file_path.split("/")[-1]
f2la1["text"] = f"ファイル名:{file_name}"
right_data = kadai_dict
log("読み込み成功")
compare()
else:
log("読み込み失敗")
def model_update():
global now_select
with open("./modelanswer.json", mode="r", encoding="utf-8") as f:
tmp_model = json.load(f)
tmp_model[now_select]["input"] = f1tx1.get("1.0", "end-1c")
tmp_model[now_select]["output"] = f1tx2.get("1.0", "end-1c")
left_data[now_select]["input"] = f1tx1.get("1.0", "end-1c")
left_data[now_select]["output"] = f1tx2.get("1.0", "end-1c")
with open("./modelanswer.json", mode="w", encoding="utf-8") as f:
json.dump(tmp_model, f, indent=4, ensure_ascii=False)
log("modelanswer.jsonを左のデータで更新しました")
def selector_reset():
for i in range(selector.size()):
selector.delete(tk.END)
for k in left_data.keys():
selector.insert(tk.END, k)
def kadai_selected(event):
if len(selector.curselection()) == 0:
return
i = selector.curselection()
if not left_data:
log("左側のデータが未選択")
return
f1tx1.delete("1.0", "end")
f1tx1.insert("end", left_data[selector.get(i)]["input"])
f1tx2.delete("1.0", "end")
f1tx2.insert("end", left_data[selector.get(i)]["output"])
if not right_data:
log("右側のデータが未選択")
return
global now_select
now_select = selector.get(i) # 保存
f2tx1.delete("1.0", "end")
f2tx1.insert("end", right_data[selector.get(i)]["input"])
f2tx2.delete("1.0", "end")
f2tx2.insert("end", right_data[selector.get(i)]["output"])
def strip_margin(s):
"""
文字列の各行から空白,空行などを除去した文字列を返す
"""
strip_str = ""
for l in s.split("\n"):
strip_line = l.strip(" '\"")
if strip_line:
strip_str += l.strip(" '\"") + "\n"
return strip_str
def loose_compare(str1, str2):
strip_str1 = strip_margin(str1)
strip_str2 = strip_margin(str2)
return strip_str1 == strip_str2
def compare():
if not left_data or not right_data:
return False
keys = left_data.keys()
q_num = len(keys)
match_list = [False]*q_num
match_num = 0
score.delete("1.0", "end")
try:
for i, k in enumerate(keys):
if loose_compare(left_data[k]["output"], right_data[k]["output"]):
match_num += 1
match_list[i] = True
except Exception as e:
log("左右の形式が一致しません")
return False
score.insert("end", f"{match_num}/{q_num}")
colors = ("red", "green")
for i, b in enumerate(match_list):
selector.itemconfigure(i, foreground="white", background=colors[b])
return f"{match_num}/{q_num}"
# dousiyo
if __name__ == "__main__":
root = tk.Tk()
root.title("nbcompare")
root.geometry("1200x600")
# 左課題表示画面
f1 = tk.Frame(root, relief=tk.GROOVE, bd=2)
f1la1 = tk.Label(f1, text="ファイル名")
f1la1.grid(row=0, column=0, padx=2, pady=2, sticky=tk.N + tk.W)
# ボタン
f1bt1 = tkinter.Button(f1, text="ファイル選択", command=file_select_f1)
f1bt1.grid(row=0, column=1, padx=2, pady=2, sticky=tk.N + tk.E)
f1la2 = tk.Label(f1, text="コード")
f1la2.grid(row=1, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f1tx1 = tk.Text(f1, padx=5, pady=5, width=60,
height=15, font=('Consolas', 11))
f1tx1.grid(row=2, column=0, padx=2, pady=2, columnspan=2)
f1la3 = tk.Label(f1, text="出力")
f1la3.grid(row=3, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f1tx2 = tk.Text(f1, padx=5, pady=5, width=50,
height=8, font=('Consolas', 12))
f1tx2.grid(row=4, column=0, padx=2, pady=2,
columnspan=2, sticky=tk.N + tk.W)
f1.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# 中央課題表示画面
f2 = tk.Frame(root, relief=tk.GROOVE, bd=2)
f2la1 = tk.Label(f2, text="ファイル名")
f2la1.grid(row=0, column=0, padx=2, pady=2, sticky=tk.N + tk.W)
# ボタン
f2bt1 = tkinter.Button(f2, text="ファイル選択", command=file_select_f2)
f2bt1.grid(row=0, column=1, padx=2, pady=2, sticky=tk.N + tk.E)
f2la2 = tk.Label(f2, text="コード")
f2la2.grid(row=1, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f2tx1 = tk.Text(f2, padx=5, pady=5, width=60,
height=15, font=('Consolas', 11))
f2tx1.grid(row=2, column=0, padx=2, pady=2, columnspan=2)
f2la3 = tk.Label(f2, text="出力")
f2la3.grid(row=3, column=0, padx=2, pady=2, columnspan=2, sticky=tk.W)
f2tx2 = tk.Text(f2, padx=5, pady=5, width=50,
height=8, font=('Consolas', 12))
f2tx2.grid(row=4, column=0, padx=2, pady=2,
columnspan=2, sticky=tk.N + tk.W)
f2.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# 右情報表示画面
f3 = tk.Frame(root, bd=2)
f3la1 = tk.Label(f3, text="課題一覧")
f3la1.pack(side=tk.TOP)
# 課題選択リストの作成
selector = tkinter.Listbox(f3, selectmode=tkinter.SINGLE)
selector.insert(0, "選択なし")
selector.bind('<<ListboxSelect>>', kadai_selected)
selector.pack(side=tk.TOP, fill=tk.X, expand=0)
f3la2 = tk.Label(f3, text="一致率")
f3la2.pack(side=tk.TOP)
score = tk.Text(f3, padx=5, pady=5, width=20,
height=1, font=('Consolas', 18))
score.pack(side=tk.TOP)
f3la3 = tk.Label(f3, text="ログ")
f3la3.pack(side=tk.TOP)
logarea = tk.Text(f3, padx=5, pady=5, width=30,
height=20, font=('Consolas', 9))
logarea.pack(side=tk.TOP)
f3bt1 = tkinter.Button(f3, text="左の内容でmodelを更新(仮)", command=model_update)
f3bt1.pack(side=tk.TOP, fill=tk.X, expand=0)
f3.pack(side=tk.LEFT, fill=tk.BOTH, expand=1)
# 初回入力処理
if not os.path.isfile("./modelanswer.json"):
log("模範回答データがありません")
make_model_data()
# 自動読み込み
try:
log("模範回答データを読み込みます")
with open("./modelanswer.json", mode="r", encoding="utf-8") as f:
left_data = json.load(f)
f1la1["text"] = "ファイル名:modelanswer.json"
selector_reset()
except Exception as e:
log("模範回答データが見つかりません")
file_select_f2()
root.mainloop()
| [
"[email protected]"
] | |
4da37331376912a600bead21c5b5170ef9a415e6 | f2f3023c77357ff1e7ba830587291414566aa112 | /Monstr/Core/BaseModule.py | 559843a1ff20be10610ac18e684adcb003c97076 | [
"Apache-2.0"
] | permissive | tier-one-monitoring/monstr | e1e38cc719a75abb39b7ad3a24bb527ef9ad920a | 88af719e2e5eb0fe3ace7238840f97c35ac7044c | refs/heads/master | 2020-07-18T02:42:59.019768 | 2019-05-06T13:42:52 | 2019-05-06T13:42:52 | 60,289,289 | 0 | 3 | Apache-2.0 | 2019-04-29T10:40:10 | 2016-06-02T18:51:15 | Python | UTF-8 | Python | false | false | 10,243 | py | from abc import ABCMeta, abstractmethod
import Monstr.Core.DB as DB
import Monstr.Core.Utils as Utils
import Monstr.Core.Config as Config
import Monstr.Core.Constants as Constants
# ,----------------------.
# |BaseNodule |
# |----------------------|
# |+string name |
# |+obj table_schemas |
# |----------------------|
# |+void Initialize() |
# |+obj PrepareRetrieve()|
# |+obj Retrieve() |
# |+obj InsertToDB() |
# |+obj Analyze() |
# |+obj React() |
# |+obj Run() |
# `----------------------'
class BaseModule():
__metaclass__ = ABCMeta
name = None
table_schemas = None
tables = None
status_table = None
status_list = []
journal = None
events_table = None
rest_links = {}
db_handler = None
status_schema = {'status': (DB.Column('id', DB.Integer, primary_key=True),
DB.Column('name', DB.String(64)),
DB.Column('status', DB.Integer),
DB.Column('time', DB.DateTime(True)),
DB.Column('description', DB.Text),)}
journal_schema = (DB.Column('id', DB.Integer, primary_key=True),
DB.Column('module', DB.String(64)),
DB.Column('time', DB.DateTime(True)),
DB.Column('result', DB.String(32)),
DB.Column('step', DB.String(32)),
DB.Column('description', DB.Text),)
events_schema = (DB.Column('id', DB.BigInteger, primary_key=True),
DB.Column('module', DB.String(64)),
DB.Column('state', DB.String(64)),
DB.Column('name', DB.String(64)),
DB.Column('type', DB.String(32)),
DB.Column('time', DB.DateTime(True)),
DB.Column('severity', DB.Integer),
DB.Column('description', DB.Text),)
responsibles = ['[email protected]']
# ==========================================================================
# Database functions
# ==========================================================================
def _db_incert_journal_row(self, row):
self.db_handler.insert(self.journal, row)
def _db_incert_event_row(self, row):
self.db_handler.insert(self.events_table, row)
def _db_get_status_table_repr(self):
return [x._asdict() for x in self.db_handler.get_session().query(self.status_table['status']).all()]
def _db_update_status(self, statuses):
conn = self.db_handler.get_engine().connect()
for status in statuses:
update = self.status_table['status'].update().values(status=status['status'], time=status['time'], description=status['description']).where(self.status_table['status'].c.name == status['name'])
conn.execute(update)
# ==========================================================================
# Common functions
# ==========================================================================
def _create_journal_row(self, result, step=None, description=None):
row = {'module': self.name,
'time': Utils.get_UTC_now(),
'result': result,
'step': step,
'description': description}
return row
def write_to_journal(self, result, step=None, description=None):
row = self._create_journal_row(result, step, description)
self._db_incert_journal_row(row)
def write_error_to_journal(self, result, step=None, error=None):
description = (type(error).__name__ + ': ' + error.message) if error is not None else None
self.write_to_journal(result, step, description)
# --------------------------------------------------------------------------
def create_event(self, name, state, event_type, time, severity, description):
event = {'module': self.name,
'name': name,
'state': state,
'type': event_type,
'time': time,
'severity': severity,
'description': description}
return event
def write_event(self, event):
self._db_incert_event_row(event)
# --------------------------------------------------------------------------
def _create_params(self, default_params, params):
result = {}
for key in default_params:
if key not in params:
result[key] = default_params[key]
else:
result[key] = type(default_params[key])(params[key])
return result
def get_status_from_status_code(self, code):
if code in Constants.STATUS:
return Constants.STATUS[code]
return 'Undefined' + str(code)
def get_last_status(self):
return self._db_get_status_table_repr()
def update_status(self, current_statuses):
previous_status = self._db_get_status_table_repr()
last_statuses = dict([(str(x['name']), {'name':str(x['name']), 'status':int(x['status']), 'time':str(x['time'])}) for x in previous_status])
update_list = []
event_list = []
for status in current_statuses:
last_status = last_statuses[status['name']]
if last_status['status'] != status['status']:
update_list.append(status)
# Generate event and write to DB
last_status_name = self.get_status_from_status_code(last_status['status'])
new_status_name = self.get_status_from_status_code(status['status'])
event_name = status['name'] + ':' + last_status_name + '->' + new_status_name
event = self.create_event(event_name, status['name'] ,'StatusChange' , status['time'], status['status'], status['description'])
event_list.append(event)
self.write_event(event)
#Send message if necessary
email_conf = Config.get_section('Email')
threshold = int(email_conf['threshold'])
recipients = email_conf['recipients'].split(',')
if last_status['status'] >= threshold or status['status'] >= threshold:
subject = self.name + ':' + status['name'] + ' goes ' + self.get_status_from_status_code(status['status'])
message = """
For %s:%s the status change occured:
last status was %s, since %s
new status is %s, since %s
""" % (self.name, status['name'],
self.get_status_from_status_code(last_status['status']), str(last_status['time']),
self.get_status_from_status_code(status['status']), str(status['time']),
)
Utils.send_email(subject, message, recipients)
self._db_update_status(update_list)
return event_list
# --------------------------------------------------------------------------
def __init__(self):
self.db_handler = DB.DBHandler()
self.rest_links = {'getModuleStatus': self.GetModuleStatus}
self.journal = self.db_handler.getOrCreateTable('monstr_Journal', self.journal_schema)
self.events_table = self.db_handler.getOrCreateTable('monstr_Events', self.events_schema)
def Initialize(self):
if self.name is None:
raise "Module require name"
if self.table_schemas is None:
raise "Module require schemas list"
self.tables = self.db_handler.initialize(self.table_schemas, self.name)
self.status_table = self.db_handler.initialize(self.status_schema, self.name, self.status_list)
def PrepareRetrieve(self):
return {}
def Retrieve(self, params):
pass
def Analyze(self, data):
return []
def React(self, events):
pass
def InsertToDB(self, data):
for schema in data:
table = self.tables[schema]
self.db_handler.bulk_insert(table, data[schema])
return
def ExecuteCheck(self):
try:
self.Initialize()
except Exception as e:
self.write_error_to_journal('Fail', 'Initialize', e)
print e
return
try:
params = self.PrepareRetrieve()
except Exception as e:
self.write_error_to_journal('Fail', 'PrepareRetrieve', e)
print e
return
try:
data = self.Retrieve(params)
except Exception as e:
self.write_error_to_journal('Fail', 'Retrieve', e)
print e
return
try:
self.InsertToDB(data)
except Exception as e:
self.write_error_to_journal('Fail', 'InsertToDB', e)
print e
return
#try:
events = self.Analyze(data)
#except Exception as e:
# self.write_error_to_journal('Fail', 'Analyze', e)
# print 'Analyze error'
# print e
# return
try:
self.React(data)
except Exception as e:
self.write_error_to_journal('Fail', 'React', e)
print e
return
self.write_to_journal('Success')
# ==========================================================================
# Web
# ==========================================================================
def GetModuleStatus(self, incoming_params):
response = {}
params = incoming_params
try:
result = self._db_get_status_table_repr()
response = {'data': result,
'applied_params': params,
'success': True}
except Exception as e:
response = {'data': {},
'incoming_params': incoming_params,
'success': False,
'error': type(e).__name__ + ': ' + e.message,
'description': 'Error inside BaseModule.GetModuleStatus'}
return response
| [
"[email protected]"
] | |
9eba0b833a0ba139819af0b9aa282f36e595bdaf | 9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c | /baomoicrawl/venv/Lib/site-packages/scrapy/utils/job.py | 12a886c4752744d82c9c82f2144df6d642aa170c | [] | no_license | thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler | b0fdedee2942a12d9f64dfed93f43802dc5ab340 | 87c8c07433466bbc43a24ea089f75baeb467c356 | refs/heads/master | 2022-11-27T21:36:33.917491 | 2020-08-10T23:24:42 | 2020-08-10T23:24:42 | 286,583,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | import os
def job_dir(settings):
path = settings['JOBDIR']
if path and not os.path.exists(path):
os.makedirs(path)
return path
| [
"[email protected]"
] | |
87acdc16f9e7ff0ad3da6aaea1d2590cdc5fdf75 | 99091fded6b655e27a7afd5a81693f9e86d064f6 | /offset/core/util.py | 6a1ec85bddc55449b942472a87079f5b6acedf5d | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | dotpot/offset | 68670ace4945c23d1193ef8a8f57679db4fd9038 | 51200d0ee3a1776ad55d7c3ce53a5237236759e2 | refs/heads/master | 2021-01-15T21:50:06.090937 | 2013-10-01T16:55:57 | 2013-10-01T18:26:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | # -*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
import fcntl
import os
import time
def nanotime(s=None):
""" convert seconds to nanoseconds. If s is None, current time is
returned """
if s is not None:
return s * 1000000000
return time.time() * 1000000000
def from_nanotime(n):
""" convert from nanotime to seconds """
return n / 1.0e9
def nanosleep(n):
time.sleep(from_nanotime(n))
| [
"[email protected]"
] | |
a9cbff56aea97cc7f40943e0040f157d74659b76 | 592e77ed163ce83abd940004f56b5ebed0e3f726 | /OS Re-install/HPE-CS500-OS-Re-Installation-2018.03-0/SLES/modules/upgradeMisc.py | 1114c3cb747d5acdeae302afe4422e3f4b66050e | [] | no_license | publiccoding/os_deploy | 84703e4850bf33d7583f76aa471c023e0a1c5603 | a31611060bfffb85cc5ba3e656df61da6f28f665 | refs/heads/master | 2021-04-03T07:51:26.286215 | 2018-08-23T06:35:31 | 2018-08-23T06:35:31 | 124,385,606 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98,797 | py | # Embedded file name: ./upgradeMisc.py
import math
import subprocess
import re
import os
import datetime
import time
import logging
import shutil
import glob
from ast import literal_eval
RED = '\x1b[31m'
GREEN = '\x1b[32m'
YELLOW = '\x1b[33m'
RESETCOLORS = '\x1b[0m'
def checkDiskspace(backupItemsList):
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Checking to ensure that there is enough disk space for the backup and the backup ISO image and that the overall restoration backup does not exceed 3GB.')
print GREEN + 'Checking to ensure that there is enough disk space for the backup and the backup ISO image and that the overall backup archive does not exceed 3GB.' + RESETCOLORS
command = 'df /'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
out = out.strip()
if result.returncode != 0:
logger.error("Unable to get the root file system's usage information.\n" + err + '\n' + out)
print RED + "Unable to get the root file system's usage information; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
try:
tmpVar = re.match('(.*\\s+){3}([0-9]+)\\s+', out).group(2)
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + out + "'.\n" + str(err))
print RED + "There was a match error when trying to match against '" + out + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
availableDiskSpace = int(math.floor(float(tmpVar) / float(1048576)))
if len(backupItemsList) == 3:
backupItems = ' '.join(backupItemsList[0]) + ' ' + ' '.join(backupItemsList[1]) + ' ' + ' '.join(backupItemsList[2])
restorationItems = ' '.join(backupItemsList[0]) + ' ' + ' '.join(backupItemsList[1])
else:
backupItems = ' '.join(backupItemsList[0]) + ' ' + ' '.join(backupItemsList[1])
restorationItems = ' '.join(backupItemsList[0])
backupList = [backupItems, restorationItems]
count = 0
for items in backupList:
command = 'du -BG -sc ' + items
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
if re.search('No such file or directory', err, re.MULTILINE | re.DOTALL) == None:
logger.error("Could not get the total disk space used by '" + items + "'.\n" + err + '\n' + out)
print RED + 'Could not get the total disk space used by the directories/files being backed up; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
if re.match('.*\\s+([0-9]+)G\\s+total', out, re.DOTALL | re.IGNORECASE | re.MULTILINE) != None:
try:
if count == 0:
totalUsed = int(re.match('.*\\s+([0-9]+)G\\s+total', out, re.DOTALL | re.IGNORECASE | re.MULTILINE).group(1)) * 2 + 0.2
else:
totalUsed = int(re.match('.*\\s+([0-9]+)G\\s+total', out, re.DOTALL | re.IGNORECASE | re.MULTILINE).group(1))
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + out + "'.\n" + str(err))
print RED + "There was a match error when trying to match against '" + out + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
else:
logger.error("Could not get the total disk space used by '" + items + "'.\n" + out)
print RED + 'Could not get the total disk space used by the directories/files being backed up; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
if count == 0:
if availableDiskSpace - totalUsed < 3:
logger.error("There is not enough disk space to make a backup of '" + items + "'; available disk space '" + str(availableDiskSpace) + "' minus backup total '" + str(totalUsed) + "' used is less than 3GB.")
print RED + 'There is not enough disk space to make a backup of the directories/files being backed up; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
elif totalUsed > 3:
logger.error("The current size '" + str(totalUsed) + "'GB of the restoration backup of '" + items + "' exceeds 3GB.")
print RED + 'The current size of the restoration backup to be saved to the restoration ISO exceeds 3GB; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
count += 1
logger.info('Done checking to ensure that there is enough disk space for the backup and the backup ISO image and that the overall restoration backup does not exceed 3GB.')
return
def createNetworkInformationFile(upgradeWorkingDir, osDist):
nicDataFileDir = upgradeWorkingDir + '/nicDataFile'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Creating the NIC MAC address cross reference file that will be used for reference after the upgrade.')
print GREEN + 'Creating the NIC MAC address cross reference file that will be used for reference after the upgrade.' + RESETCOLORS
if not os.path.exists(nicDataFileDir):
try:
os.mkdir(nicDataFileDir)
except OSError as err:
logger.error("Unable to create the NIC MAC address cross reference data directory '" + nicDataFileDir + "'.\n" + str(err))
print RED + "Unable to create the NIC MAC address cross reference data directory '" + nicDataFileDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'ifconfig -a'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Unable to get NIC card information.\n' + err + '\n' + out)
print RED + 'Unable to get NIC card information; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
nicDataList = out.splitlines()
nicDict = {}
for data in nicDataList:
if 'HWaddr' in data and 'bond' not in data:
try:
nicList = re.match('\\s*([a-z0-9]+)\\s+.*HWaddr\\s+([a-z0-9:]+)', data, re.IGNORECASE).groups()
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + data + "'.\n" + str(err))
print RED + "There was a match error when trying to match against '" + data + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
nicName = nicList[0]
nicMACAddress = nicList[1].lower()
nicDict[nicMACAddress] = nicName
logger.info('The NIC dictionary was determined to be: ' + str(nicDict) + '.')
procBondingDir = '/proc/net/bonding'
if os.path.isdir(procBondingDir):
command = 'ls ' + procBondingDir
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to get the network bond information from '" + procBondingDir + "'.\n" + err + '\n' + out)
print RED + "Unable to get the network bond information from '" + procBondingDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
activeBondsList = out.strip().split()
for bondName in activeBondsList:
command = 'cat ' + procBondingDir + '/' + bondName
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to get network bond information for '" + bondName + "' from proc.\n" + err + '\n' + out)
print RED + "Unable to get network bond information for '" + bondName + "' from proc; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
procBondingData = out.splitlines()
for data in procBondingData:
if 'Slave Interface' in data:
slaveInterface = re.match('.*:\\s+([a-z0-9]+)', data).group(1)
continue
if 'Permanent HW addr' in data:
macAddress = re.match('.*:\\s+([a-z0-9:]+)', data).group(1)
nicDict[macAddress] = slaveInterface
logger.info('The updated NIC dictionary was determined to be: ' + str(nicDict) + '.')
if osDist == 'RHEL':
updateNICCfgFiles(nicDict)
else:
logger.info("It was determined that there were no active network bonds, since '" + procBondingDir + "' did not exist.")
try:
macAddressDataFile = nicDataFileDir + '/macAddressData.dat'
f = open(macAddressDataFile, 'w')
for macAddress in nicDict:
f.write(nicDict[macAddress] + '|' + macAddress + '\n')
except IOError as err:
logger.error("Could not write NIC card mac address information to '" + macAddressDataFile + "'.\n" + str(err))
print RED + "Could not write NIC card mac address information to '" + macAddressDataFile + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
f.close()
logger.info('Done creating the NIC MAC address cross reference file that will be used for reference after the upgrade.')
def updateNICCfgFiles(nicDict):
logger = logging.getLogger('coeOSUpgradeLogger')
for macAddress in nicDict:
nicCFGFile = '/etc/sysconfig/network-scripts/ifcfg-' + nicDict[macAddress]
if os.path.exists(nicCFGFile):
command = 'egrep "^\\s*HWADDR" ' + nicCFGFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
logger.info("The output of the command '" + command + "' used to get the NIC's MAC address variable 'HWADDR' from '" + nicCFGFile + "' was: " + out.strip() + '.')
if result.returncode != 0:
logger.info("Updating '" + nicCFGFile + "' with the NIC's MAC address, since it was not present.")
command = "echo 'HWADDR=" + macAddress + "' >> " + nicCFGFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while updating '" + nicCFGFile + "' with the NIC's MAC address information.\n" + err + '\n' + out)
print RED + "Problems were encountered while updating '" + nicCFGFile + "' with the NIC's MAC address information; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
def checkRHELNetworkConfiguration(programParentDir, osDist, cursesThread):
errorMessage = ''
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Checking the network configuration.')
cursesThread.insertMessage(['informative', 'Checking the network configuration.'])
cursesThread.insertMessage(['informative', ''])
command = 'systemctl stop network'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while shutting down the network.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while shutting down the network; thus the network configuration was not confirmed.'
return errorMessage
time.sleep(15.0)
if os.path.isfile(programParentDir + '/nicDataFile/pci.ids'):
errorMessage = configureMellanox(programParentDir)
if len(errorMessage) != 0:
return errorMessage
command = 'modprobe -r tg3'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while unloading the tg3 driver.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while unloading the tg3 driver; thus the network configuration was not confirmed.'
return errorMessage
time.sleep(2.0)
command = 'modprobe tg3'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while reloading the tg3 driver.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while reloading the tg3 driver; thus the network configuration was not confirmed.'
return errorMessage
time.sleep(2.0)
command = 'ifconfig -a'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Unable to get NIC card information.\n' + err + '\n' + out)
errorMessage = 'Unable to get NIC card information; thus the network configuration was not confirmed.'
return errorMessage
nicDataList = out.splitlines()
nicDict = {}
count = 0
skip = False
for data in nicDataList:
if 'flags=' in data:
if 'lo:' in data or not 'bond' in data:
try:
nicName = re.match('\\s*([a-z0-9]+):', data, re.IGNORECASE).group(1)
count += 1
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + data + "'.\n" + str(err))
errorMessage = "There was a match error when trying to match against '" + data + "'; thus the network configuration was not confirmed."
return errorMessage
elif 'flags=' in data and 'bond' in data:
skip = True
elif 'ether' in data and 'txqueuelen' in data and not skip:
try:
nicMACAddress = re.match('\\s*ether\\s+([a-z0-9:]+)', data, re.IGNORECASE).group(1)
count += 1
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + data + "'.\n" + str(err))
errorMessage = "There was a match error when trying to match against '" + data + "'; thus the network configuration was not confirmed."
return errorMessage
elif 'ether' in data and 'txqueuelen' in data:
skip = False
else:
continue
nicDict[nicMACAddress] = count == 2 and nicName
count = 0
logger.info('The NIC dictionary was determined to be: ' + str(nicDict) + '.')
try:
macAddressDataFile = programParentDir + '/nicDataFile/macAddressData.dat'
with open(macAddressDataFile) as f:
macAddressData = f.readlines()
except IOError as err:
logger.error("Unable to get the MAC address list from '" + macAddressDataFile + "'.\n" + str(err))
errorMessage = "Unable to get the MAC address list from '" + macAddressDataFile + "'; thus the network configuration was not confirmed."
return errorMessage
macAddressDict = dict((x.strip().split('|') for x in macAddressData))
macAddressDict = dict(map(reversed, macAddressDict.items()))
logger.info('The MAC address dictionary (previous NIC mapping) was determined to be: ' + str(macAddressDict) + '.')
changedNicDict = {}
for macAddress in macAddressDict:
currentNicName = macAddressDict[macAddress]
try:
previousNicName = nicDict[macAddress]
except KeyError as err:
logger.error('The resource key (' + str(err) + ') was not present in the previous NIC dictionary.')
errorMessage = 'The resource key (' + str(err) + ') was not present in the previous NIC dictionary; thus the network configuration was not confirmed.'
return errorMessage
if currentNicName != previousNicName:
changedNicDict[previousNicName] = currentNicName
if len(changedNicDict) != 0:
errorMessage = updateNICNames(changedNicDict, osDist, cursesThread)
logger.info('Done checking the network configuration.')
return errorMessage
def checkSLESNetworkConfiguration(programParentDir, osDist, cursesThread, **kwargs):
errorMessage = ''
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Checking the network configuration.')
cursesThread.insertMessage(['informative', 'Checking the network configuration.'])
cursesThread.insertMessage(['informative', ''])
if 'osDistLevel' in kwargs:
osDistLevel = kwargs['osDistLevel']
else:
osDistLevel = ''
if osDistLevel == '11.4':
command = 'service network stop'
else:
command = 'systemctl stop network'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while shutting down the network.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while shutting down the network; thus the network configuration was not confirmed.'
return errorMessage
time.sleep(15.0)
if os.path.isfile(programParentDir + '/nicDataFile/pci.ids'):
errorMessage = configureMellanox(programParentDir)
if len(errorMessage) != 0:
return errorMessage
command = 'ifconfig -a'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Unable to get NIC card information.\n' + err + '\n' + out)
errorMessage = 'Unable to get NIC card information; thus the network configuration was not confirmed.'
return errorMessage
nicDataList = out.splitlines()
nicDict = {}
for data in nicDataList:
if 'HWaddr' in data:
try:
nicList = re.match('\\s*([a-z0-9]+)\\s+.*HWaddr\\s+([a-z0-9:]+)', data, re.IGNORECASE).groups()
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + data + "'.\n" + str(err))
errorMessage = "There was a match error when trying to match against '" + data + "'; thus the network configuration was not confirmed."
return errorMessage
nicDict[nicList[1].lower()] = nicList[0]
logger.info('The NIC dictionary was determined to be: ' + str(nicDict) + '.')
try:
macAddressDataFile = programParentDir + '/nicDataFile/macAddressData.dat'
with open(macAddressDataFile) as f:
macAddressData = f.readlines()
except IOError as err:
logger.error("Unable to get the MAC address list from '" + macAddressDataFile + "'.\n" + str(err))
errorMessage = "Unable to get the MAC address list from '" + macAddressDataFile + "'; thus the network configuration was not confirmed."
return errorMessage
macAddressDict = dict((x.strip().split('|') for x in macAddressData))
macAddressDict = dict(map(reversed, macAddressDict.items()))
logger.info('The MAC address dictionary (previous NIC mapping) was determined to be: ' + str(macAddressDict) + '.')
changedNicDict = {}
for macAddress in macAddressDict:
currentNicName = macAddressDict[macAddress]
previousNicName = nicDict[macAddress]
if currentNicName != previousNicName:
changedNicDict[previousNicName] = currentNicName
if len(changedNicDict) != 0:
errorMessage = updateNICNames(changedNicDict, osDist, cursesThread)
logger.info('Done checking the network configuration.')
return errorMessage
def updateNICNames(changedNicDict, osDist, cursesThread):
errorMessage = ''
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Updating the network configuration, since the NIC names changed.')
cursesThread.insertMessage(['informative', 'Updating the network configuration, since the NIC names changed.'])
cursesThread.insertMessage(['informative', ''])
logger.info('The changed NIC dictionary was determined to be: ' + str(changedNicDict) + '.')
networkCfgFileList = []
if osDist == 'SLES':
networkDir = '/etc/sysconfig/network'
else:
networkDir = '/etc/sysconfig/network-scripts'
try:
os.chdir(networkDir)
except OSError as err:
logger.error("Unable to change into the network directory '" + networkDir + "'.\n" + str(err))
errorMessage = "Unable to change into the network directory '" + networkDir + "'; thus the network configuration was not confirmed."
return errorMessage
command = 'ls ifcfg-*'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while getting a listing of the NIC configuration files.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while getting a listing of the NIC configuration files; thus the network configuration was not confirmed.'
return errorMessage
nicCfgFileList = out.splitlines()
logger.info('The NIC configuration files were determined to be: ' + str(nicCfgFileList) + '.')
tmpNicNameDict = dict(((nic.strip().replace('ifcfg-', ''), nic.strip()) for nic in nicCfgFileList))
nicNameDict = {}
for key in tmpNicNameDict:
if '.' not in key and key != 'lo':
nicNameDict[key] = tmpNicNameDict[key]
networkCfgFileList.append(tmpNicNameDict[key])
logger.info('The NIC name dictionary was determined to be: ' + str(nicNameDict) + '.')
logger.info('The NIC configuration file list was determined to be: ' + str(networkCfgFileList) + '.')
command = ''
if osDist == 'SLES':
if glob.glob('ifroute-*'):
command = 'ls ifroute-*'
elif glob.glob('route-*'):
command = 'ls route-*'
routeNicNameDict = {}
if not command == '':
logger.info("The command used to get the list of NIC specific route configuration files was: '" + command + "'.")
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while getting a listing of the NIC specific route configuration files.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while getting a listing of the NIC specific route configuration files; thus the network configuration was not confirmed.'
return errorMessage
routeCfgFileList = out.splitlines()
logger.info('The route configuration file list was determined to be: ' + str(routeCfgFileList) + '.')
if osDist == 'SLES':
tmpRouteNicNameDict = dict(((route.strip().replace('ifroute-', ''), route.strip()) for route in routeCfgFileList))
else:
tmpRouteNicNameDict = dict(((route.strip().replace('route-', ''), route.strip()) for route in routeCfgFileList))
for key in tmpRouteNicNameDict:
if '.' not in key and key != 'lo':
routeNicNameDict[key] = tmpRouteNicNameDict[key]
networkCfgFileList.append(tmpRouteNicNameDict[key])
if len(routeNicNameDict) > 0:
logger.info('The route name dictionary was determined to be: ' + str(routeNicNameDict) + '.')
for nicName in changedNicDict:
previousNICName = changedNicDict[nicName]
command = "sed -i 's/" + previousNICName + '/' + nicName + "/g' " + ' '.join(networkCfgFileList)
logger.info("The command used to update the NIC configuration files with the new NIC name was: '" + command + "'.")
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while updating the configuration files with the new NIC name '" + nicName + "'.\n" + err + '\n' + out)
errorMessage = "Problems were encountered while updating the configuration files with the new NIC name '" + nicName + "'; thus the network configuration was not confirmed."
return errorMessage
if previousNICName in nicNameDict:
command = 'mv ' + nicNameDict[previousNICName] + ' ifcfg-' + nicName
logger.info("The command used to move the NIC configuration file '" + nicNameDict[previousNICName] + "' to its new name was: '" + command + "'.")
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while moving '" + nicNameDict[previousNICName] + "' to 'ifcfg-" + nicName + "'.\n" + err + '\n' + out)
errorMessage = "Problems were encountered while moving '" + nicNameDict[previousNICName] + "' to 'ifcfg-" + nicName + "'; thus the network configuration was not confirmed."
return errorMessage
networkCfgFileList.remove(nicNameDict[previousNICName])
if previousNICName in routeNicNameDict:
if osDist == 'SLES':
newRouteFileName = 'ifroute-' + nicName
else:
newRouteFileName = 'route-' + nicName
command = 'mv ' + routeNicNameDict[previousNICName] + ' ' + newRouteFileName
logger.info('The command used to move the NIC route configuration file ' + routeNicNameDict[previousNICName] + "' to its new name was: '" + command + "'.")
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while moving '" + routeNicNameDict[previousNICName] + "' to '" + newRouteFileName + "'.\n" + err + '\n' + out)
errorMessage = "Problems were encountered while moving '" + routeNicNameDict[previousNICName] + "' to '" + newRouteFileName + "'; thus the network configuration was not confirmed."
return errorMessage
networkCfgFileList.remove(routeNicNameDict[previousNICName])
logger.info('Done updating the network configuration, since the NIC names changed.')
return errorMessage
def setHostname(programParentDir, cursesThread):
errorMessage = ''
hostnameFile = programParentDir + '/hostnameData/hostname'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info("Setting the server's hostname.")
cursesThread.insertMessage(['informative', "Setting the server's hostname."])
cursesThread.insertMessage(['informative', ''])
try:
f = open(hostnameFile, 'r')
hostname = f.readline()
except IOError as err:
logger.error("Problems were encountered while reading the server's hostname from '" + hostnameFile + "'.\n" + str(err))
errorMessage = "Problems were encountered while reading the server's hostname from '" + hostnameFile + "'; thus the server's hostname was not set."
return errorMessage
command = 'hostnamectl set-hostname ' + hostname
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while setting the server's hostname '" + hostname + "'.\n" + command + '\n' + err + '\n' + out)
errorMessage = "Problems were encountered while setting the server's hostname '" + hostname + "'; thus the server's hostname may not be set."
command = 'echo -n "' + hostname + '" > /etc/hostname'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while setting the server's hostname '" + hostname + "'.\n" + command + '\n' + err + '\n' + out)
errorMessage = "Problems were encountered while setting the server's hostname '" + hostname + "'; thus the server's hostname may not be set."
logger.info("Done setting the server's hostname.")
return errorMessage
def updateNTPConf(cursesThread):
errorMessage = ''
ntpConfigurationFile = '/etc/ntp.conf'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info("Checking and updating ntp's controlkey setting if necessary.")
cursesThread.insertMessage(['informative', "Checking and updating ntp's controlkey setting if necessary."])
cursesThread.insertMessage(['informative', ''])
command = 'egrep "^\\s*keys" ' + ntpConfigurationFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
logger.info('The output of the command (' + command + ') used to get the keys resource from ' + ntpConfigurationFile + ' was: ' + out.strip() + '.')
if result.returncode == 0:
command = 'egrep "^\\s*controlkey\\s+[0-9]+\\s*.*" ' + ntpConfigurationFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
logger.info('The output of the command (' + command + ') used to get the controlkey resource from ' + ntpConfigurationFile + ' was: ' + out.strip() + '.')
if result.returncode == 0:
command = "sed -ri '0,/^\\s*controlkey\\s+[0-9]+\\s*.*/s//controlkey 1/' " + ntpConfigurationFile
else:
command = "sed -i 's/^\\s*keys.*/&\\ncontrolkey 1/' " + ntpConfigurationFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while setting ntp's controlkey variable.\n" + err + '\n' + out)
errorMessage = "Problems were encountered while setting ntp's controlkey variable; thus the server's ntp server was not configured."
return errorMessage
logger.info("Done checking and updating ntp's controlkey setting if necessary.")
return errorMessage
def createBackup(backupList, upgradeWorkingDir, osDist):
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Creating the backup archive ISO image.')
print GREEN + 'Creating the backup archive ISO image.' + RESETCOLORS
archiveDir = upgradeWorkingDir + '/archiveImages'
if not os.path.isdir(archiveDir):
try:
os.mkdir(archiveDir)
except OSError as err:
logger.error("Unable to create the pre-upgrade archive directory '" + archiveDir + "'.\n" + str(err))
print RED + "Unable to create the pre-upgrade archive directory '" + archiveDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
else:
try:
archiveList = os.listdir(archiveDir)
for archive in archiveList:
os.remove(archiveDir + '/' + archive)
except OSError as err:
logger.error("Unable to remove old archives in '" + archiveDir + "'.\n" + str(err))
print RED + "Unable to remove old archives in '" + archiveDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
hostname = os.uname()[1]
dateTimestamp = datetime.datetime.now().strftime('%d%H%M%b%Y')
restorationBackupList = [archiveDir + '/' + hostname + '_OS_Restoration_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.tar',
archiveDir + '/' + hostname + '_OS_Restoration_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.tar.gz',
archiveDir + '/' + hostname + '_OS_Restoration_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp,
'OS restoration']
archiveBackupList = [archiveDir + '/' + hostname + '_OS_Archive_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.tar',
archiveDir + '/' + hostname + '_OS_Archive_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.tar.gz',
archiveDir + '/' + hostname + '_OS_Archive_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp,
'OS backup']
if len(backupList) == 3:
sapRestorationBackupList = [archiveDir + '/' + hostname + '_SAP_Restoration_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.tar',
archiveDir + '/' + hostname + '_SAP_Restoration_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.tar.gz',
archiveDir + '/' + hostname + '_SAP_Restoration_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp,
'SAP restoration']
count = 0
for backupData in backupList:
fileRemoved = False
if count == 0:
backupReferenceList = restorationBackupList
elif count == 1:
backupReferenceList = archiveBackupList
else:
backupReferenceList = sapRestorationBackupList
backupData, fileRemoved = confirmBackupDataList(backupData)
if fileRemoved:
print YELLOW + 'File(s) were removed from the backup, since they were not present; review the log file for additional information.' + RESETCOLORS
command = 'tar -cWf ' + backupReferenceList[0] + ' ' + ' '.join(backupData) + ' -C /'
logger.info("The command used to create the '" + backupReferenceList[3] + "' tar archive was: " + command + '.')
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('A problem was encountered while creating the pre-upgrade ' + backupReferenceList[3] + " backup '" + backupReferenceList[0] + "' archive.\n" + err + '\n' + out)
print RED + 'A problem was encountered while creating the pre-upgrade ' + backupReferenceList[3] + " backup '" + backupReferenceList[0] + "' archive; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'gzip ' + backupReferenceList[0]
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('A problem was encountered while compressing the pre-upgrade ' + backupReferenceList[3] + " backup '" + backupReferenceList[0] + "' archive.\n" + err + '\n' + out)
print RED + 'A problem was encountered while compressing the pre-upgrade ' + backupReferenceList[3] + " backup '" + backupReferenceList[0] + "' archive; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
count += 1
count = 0
for i in range(len(backupList)):
if count == 0:
backupReferenceList = restorationBackupList
elif count == 1:
backupReferenceList = archiveBackupList
else:
backupReferenceList = sapRestorationBackupList
backupMd5sumFile = backupReferenceList[2] + '.md5sum'
command = 'md5sum ' + backupReferenceList[1] + '> ' + backupMd5sumFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to get the md5sum of the backup archive '" + backupReferenceList[1] + "'.\n" + err + '\n' + out)
print RED + "Unable to get the md5sum of the backup archive '" + backupReferenceList[1] + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
count += 1
backupISO = upgradeWorkingDir + '/' + hostname + '_Archive_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.iso'
backupISOMd5sumFile = upgradeWorkingDir + '/' + hostname + '_Archive_Backup_For_' + osDist + '_Upgrade_' + dateTimestamp + '.md5sum'
command = 'genisoimage -R -m *_OS_Archive_Backup_For*.* -o ' + backupISO + ' ' + upgradeWorkingDir
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("A problem was encountered while creating the pre-upgrade backup '" + backupISO + "' ISO image.\n" + err + '\n' + out)
print RED + "A problem was encountered while creating the pre-upgrade backup '" + backupISO + "' ISO image; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'md5sum ' + backupISO + ' > ' + backupISOMd5sumFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("A problem was encountered while getting the md5sum of the pre-upgrade backup '" + backupISO + "' ISO image.\n" + err + '\n' + out)
print RED + "A problem was encountered while getting the md5sum of the pre-upgrade backup '" + backupISO + "' ISO image; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
logger.info('Done creating the backup archive ISO image.')
return (backupISO, archiveBackupList[1])
def confirmBackupDataList(backupData):
updatedBackupData = []
fileRemoved = False
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Checking the backup data list to ensure the selected files/directories are present and if not removing them from the list.')
for file in backupData:
if glob.glob(file):
updatedBackupData.append(file)
else:
logger.info('The following was removed from the backup list, since it was not present: ' + file + '.')
fileRemoved = True
logger.info('Done checking the backup data list to ensure the selected files/directories are present and if not removing them from the list.')
return (updatedBackupData, fileRemoved)
def installSLESAddOnSoftware(programParentDir, cursesThread):
errorMessage = ''
addOnSoftwareInstalled = True
addOnSoftwareDir = programParentDir + '/addOnSoftwareRPMS'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Installing the additional software RPMs needed for the upgrade.')
cursesThread.insertMessage(['informative', 'Installing the additional software RPMs needed for the upgrade.'])
cursesThread.insertMessage(['informative', ''])
command = 'zypper ar -G -t plaindir ' + addOnSoftwareDir + ' addOnSoftwareRPMS'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while installing the additional software RPMs.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while installing the additional software RPMs; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
command = 'zypper in -y addOnSoftwareRPMS:*'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while installing the additional software RPMs.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while installing the additional software RPMs; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
command = 'zypper rr addOnSoftwareRPMS'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while removing the additional software RPMs repository.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while removing the additional software RPMs repository; the repository will need to be removed manually.'
logger.info('Done installing the additional software RPMs needed for the upgrade.')
return (errorMessage, addOnSoftwareInstalled)
def installRHELAddOnSoftware(programParentDir, cursesThread):
errorMessage = ''
addOnSoftwareInstalled = True
dateTimestamp = datetime.datetime.now().strftime('%d%H%M%b%Y')
repoDir = '/tmp/addOnSoftware_' + dateTimestamp
addOnSoftware = programParentDir + '/addOnSoftwareRPMS'
baseURL = 'baseurl=file://' + repoDir
repositoryTemplate = programParentDir + '/repositoryTemplate/local.repo'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Installing the additional software RPMs needed for the upgrade.')
cursesThread.insertMessage(['informative', 'Installing the additional software RPMs needed for the upgrade.'])
cursesThread.insertMessage(['informative', ''])
try:
shutil.copytree(addOnSoftware, repoDir)
except OSError as err:
logger.error("Problems were encountered while copying the additional software RPMs to '" + repoDir + "'.\n" + str(err))
errorMessage = 'Problems were encountered while copying the additional software RPMs to the repository; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
command = 'createrepo ' + repoDir
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while creating the additional software RPMs repository.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while creating the additional software RPMs repository; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
try:
shutil.copy2(repositoryTemplate, '/etc/yum.repos.d')
except IOError as err:
logger.error('Problems were encountered while copying the additional software RPMs repository template into place.\n' + str(err))
errorMessage = 'Problems were encountered while copying the additional software RPMs repository template into place; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
command = "sed -ri 's|^\\s*baseurl=file://.*|" + baseURL + "|' /etc/yum.repos.d/local.repo"
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while updating the additional software RPMs repository template.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while updating the additional software RPMs repository; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
command = 'yum --disablerepo="*" --enablerepo="local" -y install \\*'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while installing the additional software RPMs.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while installing the additional software RPMs; the RPMs will need to be installed manually before proceeding.'
addOnSoftwareInstalled = False
return (errorMessage, addOnSoftwareInstalled)
try:
os.remove('/etc/yum.repos.d/local.repo')
shutil.rmtree(repoDir)
except OSError as err:
logger.error('Problems were encountered while removing the additional software RPMs repository.\n' + str(err))
errorMessage = 'Problems were encountered while removing the additional software RPMs repository; the RPMs repository will need to be removed manually.'
logger.info('Done installing the additional software RPMs needed for the upgrade.')
return (errorMessage, addOnSoftwareInstalled)
def configureSnapper(osDistLevel, cursesThread):
errorMessage = ''
snapperResourceList = ['NUMBER_CLEANUP="yes"',
'NUMBER_LIMIT="7"',
'TIMELINE_LIMIT_HOURLY="0"',
'TIMELINE_LIMIT_DAILY="7"',
'TIMELINE_LIMIT_WEEKLY="0"',
'TIMELINE_LIMIT_MONTHLY="0"',
'TIMELINE_LIMIT_YEARLY="0"']
configList = []
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Configuring snapper to keep snapshots for the last 7 days.')
cursesThread.insertMessage(['informative', 'Configuring snapper to keep snapshots for the last 7 days.'])
cursesThread.insertMessage(['informative', ''])
command = 'snapper list-configs'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Problems were encountered while getting the list of snapper configurations.\n' + err + '\n' + out)
errorMessage = 'Problems were encountered while getting the list of snapper configurations.'
return errorMessage
configDataList = out.splitlines()
for line in configDataList:
line = line.strip()
if len(line) == 0 or re.match('^-', line) or re.match('^\\s+$', line) or re.match('^Config', line, re.IGNORECASE):
continue
else:
configList.append(re.sub('\\s+', '', line).split('|')[0])
for config in configList:
for resource in snapperResourceList:
if resource == 'TIMELINE_LIMIT_WEEKLY=0' and osDistLevel == '11.4':
continue
if osDistLevel == '11.4':
command = "sed -i 's/^\\s*" + resource.split('=')[0] + '=.*$/' + resource + "/g' /etc/snapper/configs/" + config
else:
command = 'snapper -c ' + config + ' set-config ' + resource
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while setting the resource '" + resource + "' for the snapper configuration '" + config + "'.\n" + err + '\n' + out)
errorMessage = 'Problems were encountered while setting the snapper configuration resources.'
return errorMessage
time.sleep(1.0)
logger.info('Done configuring snapper to keep snapshots for the last 7 days.')
return errorMessage
def extractOSRestorationArchive(osRestorationArchive, osRestorationArchiveErrorFile, cursesThread):
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Extracting the OS restoration archive image.')
command = 'tar -zxf ' + osRestorationArchive + ' -C /'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
logger.info("The command used to extract the OS restoration archive was: '" + command + "'.")
if result.returncode != 0:
logger.error("There was a problem extracting the OS restoration archive '" + osRestorationArchive + "'.\n" + err + '\n' + out)
updateOSRestorationArchiveErrorFile(restorationArchiveErrorFile, cursesThread)
displayErrorMessage("There was a problem extracting the os restoration archive '" + osRestorationArchive + "'; fix the problem and try again.", cursesThread)
logger.info('Done extracting the OS restoration archive image.')
def checkOSRestorationArchive(programParentDir, osRestorationArchiveErrorFile, osDist, cursesThread):
osRestorationArchiveFile = ''
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Checking the OS restoration archive to make sure it is not corrupt.')
osArchiveFileRegex = re.compile('.*_OS_Restoration_Backup_For_' + osDist + '_Upgrade_[0-9]{6}[A-Za-z]{3}[0-9]{4}.tar.gz')
archiveImageDir = programParentDir + '/archiveImages'
command = 'ls ' + archiveImageDir
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to get a listing of the files in '" + archiveImageDir + "'.\n" + err + '\n' + out)
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("Unable to get a listing of the files in '" + archiveImageDir + "'; fix the problem and try again.", cursesThread)
fileList = out.splitlines()
osRestorationArchiveFound = False
for file in fileList:
if re.match(osArchiveFileRegex, file):
md5sumFile = re.sub('tar.gz', 'md5sum', file)
osRestorationArchiveFile = archiveImageDir + '/' + file
osRestorationArchiveMd5sumFile = archiveImageDir + '/' + md5sumFile
osRestorationArchiveFound = True
break
if not osRestorationArchiveFound:
logger.error("The OS restoration archive '" + archiveImageDir + '/' + osArchiveFileRegex.pattern + "' could not be found.")
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("The OS restoration archive '" + archiveImageDir + '/' + osArchiveFileRegex.pattern + "' could not be found; fix the problem and try again.", cursesThread)
if not os.path.isfile(osRestorationArchiveMd5sumFile):
logger.error("The OS restoration archive's md5sum file '" + osRestorationArchiveMd5sumFile + "' is missing.")
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("The OS restoration archive's md5sum file '" + osRestorationArchiveMd5sumFile + "' is missing; fix the problem and try again.", cursesThread)
command = 'md5sum ' + osRestorationArchiveFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to determine the md5sum of the OS restoration archive '" + osRestorationArchiveFile + "'.\n" + err + '\n' + out)
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("Unable to determine the md5sum of the OS restoration archive '" + osRestorationArchiveFile + "'; fix the problem and try again.", cursesThread)
try:
osRestorationArchiveMd5sum = re.match('([0-9,a-f]*)\\s+', out).group(1)
except AttributeError as err:
logger.error('There was a match error when trying to match against ' + out + '.\n' + str(err))
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("There was a match error when matching against '" + out + "'; fix the problem and try again.", cursesThread)
try:
with open(osRestorationArchiveMd5sumFile) as f:
for line in f:
line = line.strip()
if file in line:
originalOSRestorationArchiveMd5sum = re.match('([0-9,a-f]*)\\s+', line).group(1)
except IOError as err:
logger.error("Unable to get the md5sum of the OS restoration archive from '" + osRestorationArchiveMd5sumFile + "'.\n" + str(err))
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("Unable to get the md5sum of the OS restoration archive from '" + osRestorationArchiveMd5sumFile + "'; fix the problem and try again.", cursesThread)
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + line + "'.\n" + str(err))
updateOSestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("There was a match error when matching against '" + line + "'; fix the problem and try again.", cursesThread)
if osRestorationArchiveMd5sum != originalOSRestorationArchiveMd5sum:
logger.error("The OS restoration archive '" + osRestorationArchiveFile + "' is corrupt; its md5sum does not match its md5sum in '" + osRestorationArchiveMd5sumFile + "'.")
updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile)
displayErrorMessage("The OS restoration archive '" + osRestorationArchiveFile + "' is corrupt; fix the problem and try again.", cursesThread)
logger.info('Done checking the OS restoration archive to make sure it is not corrupt.')
return osRestorationArchiveFile
def updateOSRestorationArchiveErrorFile(osRestorationArchiveErrorFile, cursesThread):
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Updating the OS restoration archive error file with an archive failure attempt.')
try:
f = open(osRestorationArchiveErrorFile, 'a')
if os.stat(osRestorationArchiveErrorFile).st_size == 0:
f.write('First Attempt Failed\n')
elif os.stat(osRestorationArchiveErrorFile).st_size < 25:
f.write('Second Attempt Failed\n')
elif os.stat(osRestorationArchiveErrorFile).st_size < 45:
f.write('Third Attempt Failed\n')
except IOError as err:
logger.error('Could not write to the OS restoration archive error file ' + osRestorationArchiveErrorFile + '.\n' + str(err))
displayErrorMessage("Could not write to the OS restoration archive error file '" + osRestorationArchiveErrorFile + "'; fix the problem and try again.", cursesThread)
f.close()
logger.info('Done updating the restoration archive error file with an archive failure attempt.')
def displayErrorMessage(message, cursesThread):
cursesThread.insertMessage(['error', message])
cursesThread.insertMessage(['informative', ''])
cursesThread.getUserInput(['error', 'Press enter to exit and try again.'])
cursesThread.insertMessage(['informative', ''])
while not cursesThread.isUserInputReady():
time.sleep(0.1)
exit(1)
def stageAddOnRPMS(programParentDir, upgradeWorkingDir, osDist, osUpgradeVersion):
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Copying the additional RPMs needed for the post-upgrade to the pre-upgrade working directory.')
addOnSoftwareRPMDir = programParentDir + '/addOnSoftwareRPMS/' + osDist + '/' + osUpgradeVersion
addOnSoftwareRPMSDir = upgradeWorkingDir + '/addOnSoftwareRPMS'
try:
if os.listdir(addOnSoftwareRPMDir):
try:
os.mkdir(addOnSoftwareRPMSDir)
except OSError as err:
logger.error("Unable to create the additional RPMs directory '" + addOnSoftwareRPMSDir + "'.\n" + str(err))
print RED + "Unable to create the additional RPMs directory '" + addOnSoftwareRPMSDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'cp ' + addOnSoftwareRPMDir + '/* ' + addOnSoftwareRPMSDir
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
logger.info("The command used to copy the additional RPMs into place for the post upgrade installation was: '" + command + "'.")
if result.returncode != 0:
logger.error("Unable to copy the additional RPMs from '" + addOnSoftwareRPMDir + "' to '" + addOnSoftwareRPMSDir + "'.\n" + err + '\n' + out)
print RED + "Unable to copy the additional RPMs from '" + addOnSoftwareRPMDir + "' to '" + addOnSoftwareRPMSDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
if osDist == 'RHEL':
copyRepositoryTemplate(programParentDir, upgradeWorkingDir)
else:
logger.info('There were no additional RPMs present for the post-upgrade.')
except OSError as err:
logger.error("Unable to get a listing of the additional RPMs needed for the post-upgrade, if any, from '" + addOnSoftwareRPMDir + "'.\n" + str(err))
print RED + 'Unable to get a listing of the additional RPMs needed for the post-upgrade, if any; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
logger.info('Done copying the additional RPMs needed for the post-upgrade to the pre-upgrade working directory.')
def updateMultipathConf(cursesThread):
errorMessage = ''
started = False
vendorMatched = False
productMatched = False
blackListPresent = False
blacklist = '\'\n\n#Added so that HPE controllers are ignored by multipath.\nblacklist {\n\tdevnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"\n\tdevnode "^hd[a-z][[0-9]*]"\n\n\tdevice {\n\t\tvendor "HP"\n\t\tproduct "LOGICAL VOLUME.*"\n\t}\n}\''
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Updating /etc/multipath.conf to blacklist HPE controllers.')
cursesThread.insertMessage(['informative', 'Updating /etc/multipath.conf to blacklist HPE controllers.'])
cursesThread.insertMessage(['informative', ''])
command = "grep -zPo '^\\s*blacklist\\s+(\\{([^{}]++|(?1))*\\})' /etc/multipath.conf"
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
command = 'echo -e ' + blacklist + ' >> /etc/multipath.conf'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Failed to update /etc/multipath.conf to blacklist HPE controllers.\n' + err + '\n' + out)
logger.info('The command used to update /etc/multipath.conf to blacklist HPE controllers was: \n' + command)
errorMessage = 'Failed to update /etc/multipath.conf to blacklist HPE controllers; thus /etc/multipath.conf will have to be updated manually.'
else:
currrentBlacklist = out.splitlines()
for line in currrentBlacklist:
if re.match('\\s*device\\s*{\\s*$', line):
if started:
started = False
continue
else:
started = True
continue
elif started:
if re.match('\\s*vendor\\s+"HP"\\s*$', line):
if vendorMatched:
started = False
continue
else:
vendorMatched = True
continue
if re.match('\\s*product\\s+"LOGICAL VOLUME\\.\\*"\\s*$', line):
if productMatched:
started = False
continue
else:
productMatched = True
continue
if re.match('\\s*}\\s*$', line) and vendorMatched and productMatched:
blackListPresent = True
break
elif re.match('\\s*}\\s*$', line) and (vendorMatched or productMatched):
break
elif vendorMatched and productMatched and not re.match('\\s*$|\\s*#', line):
break
else:
continue
else:
continue
if blackListPresent:
logger.info('The blacklist for /etc/multipath.conf was not updated, since it is already up to date.')
else:
command = 'echo -e ' + blacklist + ' >> /etc/multipath.conf'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Failed to update /etc/multipath.conf to blacklist HPE controllers.\n' + err + '\n' + out)
logger.info('The command used to update /etc/multipath.conf to blacklist HPE controllers was: \n' + command)
errorMessage = 'Failed to update /etc/multipath.conf to blacklist HPE controllers; thus /etc/multipath.conf will have to be updated manually.'
logger.info('Done updating /etc/multipath.conf to blacklist HPE controllers.')
return errorMessage
def updateInstallCtrlFile(programParentDir, upgradeWorkingDir, osUpgradeVersion, **kwargs):
dateTimestamp = datetime.datetime.now().strftime('%d%H%M%b%Y')
cs500ScaleOut = False
serverModel = 'Superdome'
autoinstImageFile = programParentDir + '/installCtrlFiles/autoinst.img'
autoinstImgMountPoint = '/tmp/autoinstImg_' + dateTimestamp
installCtrlFileDir = upgradeWorkingDir + '/installCtrlFile'
ctrlFile = installCtrlFileDir + '/autoinst.xml'
lunID = ''
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Updating the install control file with the partition ID that will have the OS installed on it.')
if 'cs500ScaleOut' in kwargs:
cs500ScaleOut = True
if 'serverModel' in kwargs:
serverModel = kwargs['serverModel']
installCtrlFileTemplate = programParentDir + '/installCtrlFiles/cs500-autoinst.xml'
ctrlFileImg = installCtrlFileDir + '/cs500_autoinst.img'
else:
installCtrlFileTemplate = programParentDir + '/installCtrlFiles/cs900-autoinst.xml'
ctrlFileImg = installCtrlFileDir + '/cs900_autoinst.img'
if not os.path.isdir(installCtrlFileDir):
try:
os.mkdir(installCtrlFileDir)
shutil.copy2(installCtrlFileTemplate, ctrlFile)
shutil.copy2(autoinstImageFile, ctrlFileImg)
except OSError as err:
logger.error("Unable to create install control file directory '" + installCtrlFileDir + "'.\n" + str(err))
print RED + "Unable to create the install control file directory '" + installCtrlFileDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
except IOError as err:
logger.error("Unable to copy the install control files to '" + ctrlFile + "'.\n" + str(err))
print RED + "Unable to copy the install control files to '" + ctrlFile + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
if not os.path.isdir(autoinstImgMountPoint):
try:
os.mkdir(autoinstImgMountPoint)
except OSError as err:
logger.error("Unable to create mount point '" + autoinstImgMountPoint + "'.\n" + str(err))
print RED + "Unable to create mount point '" + autoinstImgMountPoint + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'mount -o loop -o rw ' + ctrlFileImg + ' ' + autoinstImgMountPoint
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to mount '" + ctrlFileImg + "' on '" + autoinstImgMountPoint + "'.\n" + err + '\n' + out)
print RED + "Unable to mount '" + ctrlFileImg + "' on '" + autoinstImgMountPoint + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
if serverModel == 'Superdome' or cs500ScaleOut:
command = 'df /boot/efi'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Unable to get the partition information for the root file system.\n' + err + '\n' + out)
print RED + 'Unable to get the partition information for the root file system; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
out = out.strip()
logger.info('df shows that the /boot/efi partition is mounted on:\n' + out)
if '/dev' not in out:
logger.error('Unable to identify the the OS LUN device.')
print RED + 'Unable to identify the OS LUN device; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
try:
if '360002ac0' in out:
lunID = re.match('.*(360002ac0+[0-9a-f]+)[_-]part', out, re.DOTALL | re.MULTILINE).group(1)
else:
device = re.match('.*(/dev/[0-9a-z/-_]*)\\s+', out, re.DOTALL | re.MULTILINE).group(1)
command = 'find -L /dev -samefile ' + device
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
if not 'already visited the directory' in err:
if not 'No such' in err:
logger.error("Failed to get the files linked to '" + device + "'.\n" + err + '\n' + out)
print RED + 'Unable to identify the OS LUN ID; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
else:
logger.warn('find complained while searching for files linked to ' + device + ': \n' + err + '\n' + out)
fileList = out.splitlines()
for file in fileList:
if '360002ac0' in file:
try:
lunID = re.match('.*(360002ac0+[0-9a-f]+)', file).group(1)
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + file + "'.\n" + str(err))
print RED + 'There was a match error when trying to get the OS LUN ID; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
break
lunID == '' and logger.error("Unable to identify the OS LUN ID using the device's (" + device + ') linked file list: ' + str(fileList))
print RED + 'Unable to identify the OS LUN ID; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
except AttributeError as err:
logger.error("There was a match error when trying to match against '" + out + "'.\n" + str(err))
print RED + 'There was a match error when trying to get the OS LUN ID; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
logger.info("The OS LUN ID was determined to be '" + lunID + "'.")
osLUNDevice = '/dev/mapper/' + lunID
command = "sed -ri 's,%%bootDisk%%," + osLUNDevice + ",g' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the OS LUN device '" + osLUNDevice + "'.\n" + err + '\n' + out)
print RED + 'Unable to update the install control file with the OS LUN device; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
if osUpgradeVersion == '12.1':
slesSP1PackageDict = {'sapconf': 'saptune',
'rear116': 'rear118a'}
for package in slesSP1PackageDict:
command = "sed -i 's,<package>" + slesSP1PackageDict[package] + '</package>,<package>' + package + "</package>,' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the HPE SAP HANA specific package (" + package + ') for SLES for SAP 12 SP1.\n' + err + '\n' + out)
print RED + 'Unable to update the install control file with the HPE SAP HANA specific package for SLES for SAP 12 SP1; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
time.sleep(1.0)
if serverModel == 'DL580' and osUpgradeVersion == '12.1':
hpeTune = "mkdir -p /mnt/usr/lib/tuned/sap-hpe-hana\\ncat<<EOF > /mnt/usr/lib/tuned/sap-hpe-hana/tuned.conf\\n######################################################\\n# START: tuned profile sap-hpe-hana settings\\n######################################################\\n#\\n# tuned configuration for HPE SAP HANA.\\n# Inherited from SuSE/SAP 'sap-hana' profile\\n#\\n# Allows HPE SAP HANA specific tunings\\n#\\n[main]\\ninclude = sap-hana\\n"
command = "sed -i '\\|<START HPE TUNE>|,\\|<END HPE TUNE>|c\\" + hpeTune + "' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the HPE SAP HANA specific tuning for SLES for SAP 12 SP1.\n" + err + '\n' + out)
print RED + 'Unable to update the install control file with the HPE SAP HANA specific tuning for SLES for SAP 12 SP1; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
elif serverModel == 'DL580':
command = "sed -i '/<START HPE TUNE>\\|<END HPE TUNE>/d' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to remove the tuning boundaries from '" + ctrlFile + "'.\n" + err + '\n' + out)
print RED + 'Unable to remove the tuning boundaries from the install control file; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
if 'cs500ScaleOut' in kwargs:
command = 'sed -i \'s@<start_multipath config:type="boolean">false</start_multipath>@<start_multipath config:type="boolean">true</start_multipath>@\' ' + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the removal of the pre-scripts section.\n" + err + '\n' + out)
print RED + 'Unable to update the install control file with the removal of the pre-scripts section; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
command = 'sed -i \'\\|^[ \\t]*<pre-scripts config:type="list">|,\\|^[ \\t]*</pre-scripts>|d\' ' + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the removal of the pre-scripts section.\n" + err + '\n' + out)
print RED + 'Unable to update the install control file with the removal of the pre-scripts section; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
askContents = ' <ask-list config:type="list">\\n <ask>\\n <title>OS LUN: ' + osLUNDevice + '</title> \\n <question>Choose option</question> \\n <selection config:type="list"> \\n <entry> \\n <label>Reboot Server</label> \\n <value>reboot</value> \\n </entry> \\n <entry> \\n <label>Halt Server</label> \\n <value>halt</value> \\n </entry> \\n <entry> \\n <label>Continue to overwrite the LUN device.</label> \\n <value>continue</value> \\n </entry> \\n </selection> \\n <stage>initial</stage> \\n <script> \\n <environment config:type="boolean">true</environment> \\n <feedback config:type="boolean">true</feedback> \\n <debug config:type="boolean">false</debug> \\n <rerun_on_error config:type="boolean">true</rerun_on_error> \\n <source> \\n<![CDATA[ \\n#!/bin/bash \\n \\ncase "$VAL" in \\n "reboot") \\n echo b > /proc/sysrq-trigger \\n exit 0 \\n ;; \\n "halt") \\n echo o > /proc/sysrq-trigger \\n exit 0 \\n ;; \\n "continue") \\n exit 0 \\n ;; \\nesac \\n]]> \\n </source> \\n </script> \\n </ask> \\n </ask-list>'
command = 'sed -i \'\\|<ask-list config:type="list">|,\\|</ask-list>|c\\' + askContents + "' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the current ask-list section.\n" + err + '\n' + out)
print RED + 'Unable to update the install control file with the current ask-list section; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
try:
shutil.copy2(ctrlFile, autoinstImgMountPoint)
except IOError as err:
logger.error("Unable to copy the install control file '" + ctrlFile + "' to '" + autoinstImgMountPoint + "'.\n" + str(err))
print RED + "Unable to copy the install control file '" + ctrlFile + "' to '" + autoinstImgMountPoint + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'umount ' + autoinstImgMountPoint
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.warn("Unable to unmount '" + autoinstImgMountPoint + "'.\n" + err + '\n' + out)
print YELLOW + "Unable to unmount '" + autoinstImgMountPoint + "'; ." + RESETCOLORS
exit(1)
logger.info('Done updating the install control file with the partition ID that will have the OS installed on it.')
return ctrlFileImg
def updateSGInstallCtrlFile(programParentDir, upgradeWorkingDir, osUpgradeVersion, serverModel):
dateTimestamp = datetime.datetime.now().strftime('%d%H%M%b%Y')
autoinstImageFile = programParentDir + '/installCtrlFiles/autoinst.img'
autoinstImgMountPoint = '/tmp/autoinstImg_' + dateTimestamp
installCtrlFileDir = upgradeWorkingDir + '/installCtrlFile'
ctrlFile = installCtrlFileDir + '/autoinst.xml'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Updating the install control file for the Serviceguard node.')
if serverModel == 'ProLiant DL320e Gen8 v2' or serverModel == 'ProLiant DL360 Gen9':
installCtrlFileTemplate = programParentDir + '/installCtrlFiles/sgQS-autoinst.xml'
ctrlFileImg = installCtrlFileDir + '/sgQS_autoinst.img'
else:
installCtrlFileTemplate = programParentDir + '/installCtrlFiles/sgNFS-autoinst.xml'
ctrlFileImg = installCtrlFileDir + '/sgNFS_autoinst.img'
if not os.path.isdir(installCtrlFileDir):
try:
os.mkdir(installCtrlFileDir)
shutil.copy2(installCtrlFileTemplate, ctrlFile)
shutil.copy2(autoinstImageFile, ctrlFileImg)
except OSError as err:
logger.error("Unable to create install control file directory '" + installCtrlFileDir + "'.\n" + str(err))
print RED + "Unable to create the install control file directory '" + installCtrlFileDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
except IOError as err:
logger.error("Unable to copy the install control files to '" + ctrlFile + "'.\n" + str(err))
print RED + "Unable to copy the install control files to '" + ctrlFile + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
if not os.path.isdir(autoinstImgMountPoint):
try:
os.mkdir(autoinstImgMountPoint)
except OSError as err:
logger.error("Unable to create mount point '" + autoinstImgMountPoint + "'.\n" + str(err))
print RED + "Unable to create mount point '" + autoinstImgMountPoint + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'mount -o loop -o rw ' + ctrlFileImg + ' ' + autoinstImgMountPoint
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to mount '" + ctrlFileImg + "' on '" + autoinstImgMountPoint + "'.\n" + err + '\n' + out)
print RED + "Unable to mount '" + ctrlFileImg + "' on '" + autoinstImgMountPoint + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
if serverModel == 'ProLiant DL360 Gen9':
command = "sed -i 's/B120i/P440ar/g' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the DL360's controller model (P440ar).\n" + err + '\n' + out)
print RED + "Unable to update the install control file with the DL360's controller model (P440ar); fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
if osUpgradeVersion != '12.2':
if osUpgradeVersion == '11.4':
slesPackageDict = {'sapconf': 'saptune',
'rear': 'rear118a'}
else:
slesPackageDict = {'sapconf': 'saptune',
'rear116': 'rear118a'}
for package in slesPackageDict:
command = "sed -i 's,<package>" + slesPackageDict[package] + '</package>,<package>' + package + "</package>,' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to update '" + ctrlFile + "' with the HPE SAP HANA specific package (" + package + ') for SLES for SAP 12 SP1.\n' + err + '\n' + out)
print RED + 'Unable to update the install control file with the HPE SAP HANA specific package for SLES for SAP 12 SP1; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
time.sleep(1.0)
if osUpgradeVersion == '11.4':
removalPackageList = ['perl-Bootloader-YAML',
'git-core',
'perl-Error',
'tuned',
'open-lldp',
'uuidd',
'libcgroup-tools',
'libts-1_0-0',
'lcms2',
'libgif6',
'java-1_7_0-openjdk',
'java-1_7_0-openjdk-headless',
'python-libxml2']
sles11Services = ' <runlevel>\\n <default>3</default>\\n <services config:type="list">\\n <service>\\n <service_name>ntp</service_name>\\n <service_start>3</service_start>\\n </service>\\n </services>\\n </runlevel>'
for package in removalPackageList:
command = "sed -i '/<package>" + package + "<\\/package>/d' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to remove the packages from the control file '" + ctrlFile + "' that are not present for SLES 11.4.\n" + err + '\n' + out + '\n' + str(removalPackageList))
print RED + 'Unable to remove the packages from the control file that are not present for SLES 11.4; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
time.sleep(0.1)
command = "sed -i 's@<loader_type>grub2</loader_type>@<loader_type>grub</loader_type>@' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to change the loader type in the control file '" + ctrlFile + "' from grub2 to grub.\n" + err + '\n' + out)
print RED + 'Unable to change the loader type in the control file from grub2 to grub; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
command = "sed -i '\\|<services-manager>|,\\|</services-manager>|c\\" + sles11Services + "' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Unable to change the "services-manager" section in the control file \'' + ctrlFile + "'.\n" + err + '\n' + out)
print RED + 'Unable to change the "services-manager" section in the control file; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
command = "sed -i 's@<package>quota-nfs</package>@&\\n <package>net-snmp</package>\\n <package>biosdevname</package>@' " + ctrlFile
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Unable to add the net-snmp package to the control file '" + ctrlFile + "'.\n" + err + '\n' + out)
print RED + 'Unable to add the net-snmp package to the control file; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
try:
shutil.copy2(ctrlFile, autoinstImgMountPoint)
except IOError as err:
logger.error("Unable to copy the install control file '" + ctrlFile + "' to '" + autoinstImgMountPoint + "'.\n" + str(err))
print RED + "Unable to copy the install control file '" + ctrlFile + "' to '" + autoinstImgMountPoint + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
command = 'umount ' + autoinstImgMountPoint
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.warn("Unable to unmount '" + autoinstImgMountPoint + "'.\n" + err + '\n' + out)
print YELLOW + "Unable to unmount '" + autoinstImgMountPoint + "'; ." + RESETCOLORS
exit(1)
logger.info('Done updating the install control file the Serviceguard node.')
return ctrlFileImg
def updateKdump(cursesThread):
errorMessage = ''
kdumpServiceFile = '/usr/lib/systemd/system/kdump.service'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Updating kdump.service to address bug 1008170.')
cursesThread.insertMessage(['informative', 'Updating kdump.service to address bug 1008170.'])
cursesThread.insertMessage(['informative', ''])
commandList = ["sed -ri '0,/^\\s*Wants=kdump-rebuild-initrd.service/s//Requires=kdump-rebuild-initrd.service/' /usr/lib/systemd/system/kdump.service", "sed -ri '0,/^\\s*After=local-fs.target/s//After=local-fs.target kdump-rebuild-initrd.service/' /usr/lib/systemd/system/kdump.service"]
logger.info("The list of commands used to update '" + kdumpServiceFile + "' is: " + str(commandList) + '.')
for command in commandList:
command = command
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while updating'" + kdumpServiceFile + "'.\n" + err + '\n' + out)
errorMessage = "Problems were encountered while updating '" + kdumpServiceFile + "'; thus the file will need to be updated manually."
return errorMessage
logger.info('Done updating kdump.service to address bug 1008170.')
return errorMessage
def copyRepositoryTemplate(programParentDir, upgradeWorkingDir):
repositoryTemplate = programParentDir + '/repositoryTemplate/local.repo'
repoDir = upgradeWorkingDir + '/repositoryTemplate'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Copying the yum repository template needed for the post-upgrade to the pre-upgrade working directory.')
try:
if not os.path.isdir(repoDir):
os.mkdir(repoDir)
shutil.copy(repositoryTemplate, repoDir)
except OSError as err:
logger.error("Unable to create the yum repository directory '" + repoDir + "'.\n" + str(err))
print RED + "Unable to create the yum repository directory '" + repoDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
except IOError as err:
logger.error("Unable to copy the yum repository template from '" + repositoryTemplate + "' to '" + repoDir + "'.\n" + str(err))
print RED + "Unable to copy the yum repository template from '" + repositoryTemplate + "' to '" + repoDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
logger.info('Done copying the yum repository template needed for the post-upgrade to the pre-upgrade working directory.')
def configureMellanox(programParentDir):
errorMessage = ''
pciIdsFile = programParentDir + '/nicDataFile/pci.ids'
mellanoxDriverRPM = programParentDir + '/mellanoxDriver/*.rpm'
mellanoxBusList = []
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Configuring the Mellanox cards by installing the Mellanox driver and updating connectx.conf.')
command = 'lspci -i ' + pciIdsFile + ' -mvv'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Failed to get the lspci output used to get the Compute Node's Mellanox NIC bus list.\n" + err + '\n' + out)
return "Failed to get the lspci output used to get the Compute Node's Mellanox NIC bus list; thus the network configuration was not confirmed."
out = re.sub('\n{2,}', '####', out)
deviceList = out.split('####')
for device in deviceList:
if ('Ethernet controller' in device or 'Network controller' in device) and 'Mellanox' in device:
try:
bus = re.match('\\s*[a-zA-Z]+:\\s+([0-9a-f]{2}:[0-9a-f]{2}\\.[0-9])', device, re.MULTILINE | re.DOTALL).group(1)
logger.info('The bus information for device:\n' + device[0:100] + '\nwas determined to be: ' + bus + '.\n')
mellanoxBusList.append(bus)
except AttributeError as err:
logger.error('An AttributeError was encountered while getting the Mellanox nic bus information: ' + str(err) + '\n' + device[0:200])
return 'An AttributeError was encountered while getting the Mellanox nic bus information; thus the network configuration was not confirmed.'
command = 'rpm -Uvh ' + mellanoxDriverRPM
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Failed to install the Mellanox driver.\n' + err + '\n' + out)
return 'Failed to install the Mellanox driver; thus the network configuration was not confirmed.'
for bus in mellanoxBusList:
command = 'connectx_port_config -d ' + bus + ' -c eth,eth'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Failed to update Mellanox bus '" + bus + "' from infininband to ethernet.\n" + err + '\n' + out)
return 'Failed to update Mellanox from infiniband to ethernet; thus the network configuration was not confirmed.'
logger.info('Done configuring the Mellanox cards by installing the Mellanox driver and updating connectx.conf.')
return errorMessage
def checkForMellanox(programParentDir, upgradeWorkingDir, osDist, osDistLevel):
pciIdsFile = programParentDir + '/mellanoxFiles/pci.ids'
mellanoxPresent = False
if osDist == 'SLES':
mellanoxDriverRPMS = glob.glob(programParentDir + '/mellanoxFiles/SLES/' + osDistLevel + '/*.rpm')
else:
mellanoxDriverRPMS = glob.glob(programParentDir + '/mellanoxFiles/RHEL/' + osDistLevel + '/*.rpm')
mellanoxDriverDir = upgradeWorkingDir + '/mellanoxDriver'
nicDataFileDir = upgradeWorkingDir + '/nicDataFile'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info('Checking to see if Mellanox NIC cards are present.')
print GREEN + 'Checking to see if Mellanox NIC cards are present.' + RESETCOLORS
command = 'lspci -i ' + pciIdsFile + ' -mvv'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error('Failed to get the lspci output which is used to check if Mellanox NIC cards are present.\n' + err + '\n' + out)
print RED + 'Failed to get the lspci output which is used to check if Mellanox NIC cards are present; fix the problem and try again; exiting program execution.' + RESETCOLORS
exit(1)
out = re.sub('\n{2,}', '####', out)
deviceList = out.split('####')
for device in deviceList:
if ('Ethernet controller' in device or 'Network controller' in device) and 'Mellanox' in device:
mellanoxPresent = True
break
if mellanoxPresent:
try:
os.mkdir(mellanoxDriverDir)
for file in mellanoxDriverRPMS:
shutil.copy2(file, mellanoxDriverDir)
time.sleep(1.0)
except OSError as err:
logger.error("Unable to create the Mellanox driver directory '" + mellanoxDriverDir + "'.\n" + str(err))
print RED + "Unable to create the Mellanox driver directory '" + mellanoxDriverDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
except IOError as err:
logger.error("Unable to copy the Mellanox driver to '" + mellanoxDriverDir + "'.\n" + str(err))
print RED + "Unable to copy the Mellanox driver to '" + mellanoxDriverDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
try:
shutil.copy2(pciIdsFile, nicDataFileDir)
except IOError as err:
logger.error("Unable to copy the pci.ids file to '" + nicDataFileDir + "'.\n" + str(err))
print RED + "Unable to copy the pci.ids file to '" + nicDataFileDir + "'; fix the problem and try again; exiting program execution." + RESETCOLORS
exit(1)
logger.info('Done checking to see if Mellanox NIC cards are present.')
def getOSUpgradeVersion(supportedOSVersions):
logger = logging.getLogger('coeOSUpgradeLogger')
while True:
if len(supportedOSVersions) > 1:
count = 0
choiceNumberDict = {}
prompt = 'Select the OS version that the server is being upgraded to ('
for version in supportedOSVersions:
osVersion = supportedOSVersions[count]
if osVersion[5:] == '11.4':
print str(count + 1) + '. ' + osVersion[:4] + ' ' + osVersion[5:] + ' (Serviceguard Quorum Servers Only)'
else:
print str(count + 1) + '. ' + osVersion[:4] + ' ' + osVersion[5:]
count += 1
if count == 1:
prompt = prompt + str(count)
else:
prompt = prompt + ',' + str(count)
choiceNumberDict[str(count)] = None
response = raw_input(prompt + ') [1]: ')
response = response.strip()
if response == '':
response = '1'
elif response not in choiceNumberDict:
print 'An invalid selection was made; please try again.\n'
continue
osVersion = supportedOSVersions[int(response) - 1]
else:
osVersion = supportedOSVersions[0]
while True:
response = raw_input('The server is going to be upgraded to ' + osVersion[:4] + ' ' + osVersion[5:] + '; is this correct [y|n|q]: ')
response = response.strip().lower()
if not response == 'y':
print response == 'n' or response == 'q' or 'An invalid entry was provided; please try again.\n'
continue
else:
if response == 'y':
validOS = True
elif response == 'n':
validOS = False
else:
logger.info('Upgrade was cancelled while selecting the OS to upgrade to.')
exit(0)
break
if validOS:
osUpgradeVersion = osVersion[5:]
logger.info('The choice to upgrade to ' + osUpgradeVersion + ' was made.')
break
else:
continue
return osUpgradeVersion
def setTimezone(programParentDir, cursesThread):
errorMessage = ''
linkFileExists = False
timezoneFile = programParentDir + '/timezoneData/timezoneLinks'
logger = logging.getLogger('coeOSUpgradeLogger')
logger.info("Setting the server's time zone.")
cursesThread.insertMessage(['informative', "Setting the server's time zone."])
cursesThread.insertMessage(['informative', ''])
try:
with open(timezoneFile) as f:
timezoneLinkData = f.readlines()
except IOError as err:
logger.error("Unable to get the time zone link list from '" + timezoneFile + "'.\n" + str(err))
errorMessage = "Unable to get the time zone link list; thus the server's time zone was not set."
return errorMessage
try:
os.remove('/etc/localtime')
except OSError as err:
logger.warn('The following error was encounterd while removing /etc/localtime: ' + str(err))
for link in timezoneLinkData:
link = link.strip()
if os.path.isfile(link):
linkFileExists = True
command = 'ln ' + link + ' /etc/localtime'
result = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = result.communicate()
if result.returncode != 0:
logger.error("Problems were encountered while linking /etc/localtime to '" + link + "'.\n" + err + '\n' + out)
errorMessage = "Problems were encountered while linking /etc/localtime; thus the server's time zone was not set."
return errorMessage
break
if not linkFileExists:
logger.error('There was not a link file present for /etc/localtime to link against; the selection of link files was: ' + str(timezoneLinkData))
errorMessage = "There was not a link file present for /etc/localtime to link against; thus the server's time zone was not set."
logger.info("Done setting the server's time zone.")
return errorMessage | [
"[email protected]"
] | |
f68d8f0d5878ccd2ea18009cd682be3667f78cec | 7ba05e73515c14fb8d2f3d056b51102131171a11 | /exercise_funktions/perfect_number.py | ceb4a03e0b152d53bd91c0d0ee007e347cd9bfd5 | [] | no_license | gyurel/SoftUni-Basics-and-Fundamentals | bd6d5fa8c9d0cc51f241393afd418633a66c65dc | 184fc5dfab2fdd410aa8593f4c562fd56211c727 | refs/heads/main | 2023-07-05T11:16:58.966841 | 2021-08-31T19:25:40 | 2021-08-31T19:25:40 | 401,485,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | def perfect_number(number):
list_of_devisors = []
# for devisor in range(1, number):
# if number % devisor == 0:
# list_of_devisors.append(devisor)
#
# if sum(list_of_devisors) == number:
# print("We have a perfect number!")
# else:
# print("It's not so perfect.")
list_of_devisors = [devisor for devisor in range(1, number) if number % devisor == 0]
return print("We have a perfect number!" if sum(list_of_devisors) == number else "It's not so perfect.")
# if sum(list_of_devisors) == number:
# print("We have a perfect number!")
# else:
# print("It's not so perfect.")
number = int(input())
perfect_number(number)
| [
"[email protected]"
] | |
95da0414284b1f8b0e0e098a72c08e474d19c39a | c3e4afc3070fb611b38e2e9e6a1ae3e57f10fb34 | /dbom/rsync_demo.py | 4b9feb71aacb0e69db812a81c2cd2f16ff378c68 | [] | no_license | eat1124/TSDBOM | 6744a11fa3245012805d3e10321e385be0623c7d | 1332c9591c7813ee99e0fc597cd58ff1a7b798bd | refs/heads/master | 2022-12-01T16:39:10.800234 | 2019-09-03T01:33:45 | 2019-09-03T01:33:45 | 174,254,774 | 0 | 0 | null | 2022-11-22T02:55:53 | 2019-03-07T02:14:15 | JavaScript | UTF-8 | Python | false | false | 18,130 | py | """
Rsync自动化备份
"""
import paramiko
import re
class RsyncBackup(object):
"""
Rsync备份:
单一安装Rsync
集体安装Rsync
配置Rsync 多个模块
添加虚拟用户
启动服务
配置密码文件
设置开机自启
服务端/客户端
执行Rsync的命令选项
执行Rsync的参数:服务器端文件地址,虚拟用户名,IP地址,模块名称
"""
def __init__(self, server):
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.msg = ''
self.server = server
try:
self.client.connect(hostname=server['hostname'], username=server['username'], password=server['password'], timeout=2)
except:
self.msg = '远程连接失败。'
else:
self.msg = '远程连接成功。'
self.verify_shell_cmd = ';if [ $? -eq 0 ]; then' + '\n' + \
' echo "cmd_succeed"' + '\n' + \
'else' + '\n' + \
' echo "cmd_failed"' + '\n' + \
'fi'
# self.sudo_permission = 'echo {0}|sudo sh -c '.format(server['password']) if server['username'] != 'root' else ''
# sudo sh -c 'echo "This is testPage." >/usr/local/nginx/html/index.html'
def close_connection(self):
self.client.close()
def run_shell_cmd(self, shell_cmd, get_pty=True):
result = 1
info = ''
# 区分主备密码
print("本次执行命令: echo '{0}'|sudo -S sh -c '{1}'".format(self.server["password"], shell_cmd) if self.server["username"] != "root" else shell_cmd)
# root/普通用户
stdin, stdout, stderr = self.client.exec_command("echo '{0}'|sudo -S sh -c '{1} {2}'".format(self.server["password"], shell_cmd, self.verify_shell_cmd) if self.server["username"] != "root" else shell_cmd + self.verify_shell_cmd, get_pty=get_pty)
stdout_init = ''
stderr_init = ''
if not stderr.readlines():
for num, data in enumerate(stdout.readlines()):
stdout_init += data
if 'cmd_succeed' in stdout_init:
stdout_init = stdout_init.replace('cmd_succeed', '')
result = 1
else:
stdout_init = stdout_init.replace('cmd_failed', '')
result = 0
info = stdout_init if self.server["username"] == "root" else stdout_init.split(":", maxsplit=1)[1]
else:
result = 0
for data in stderr.readlines():
stderr_init += data
info = stderr_init
return result, info
def check_file_path_existed(self, file_path):
"""
检查文件路径是否存在
:param file_path:
:return:
"""
check_file_cmd = ';if [ -d {0} ]; then'.format(file_path) + '\n' + \
' echo "doc"' + '\n' + \
'elif [ -f {1} ]; then'.format(file_path) + '\n' + \
' echo "file"' + '\n' + \
'else' + '\n' + \
' echo "not existed"' + '\n' + \
'fi'
result, info = self.run_shell_cmd(check_file_cmd)
if result == "doc":
result = 1
elif result == "file":
result = 2
else:
result = 0
return result, info
def install_rsync_by_yum(self):
result, info = self.run_shell_cmd('yum install rsync -y')
return result, info
def check_ever_existed(self):
"""
查看rsync是否已经安装
:return:
"""
result, info = self.run_shell_cmd('rsync --help')
return result, info
def set_rsync_server_config(self, model_list, client_ip):
"""
secrets file 默认/etc/rsync.password
auth users 默认rsync_backup
:param model_list: [{"origin_path": "", ""dest_path": "", "model_name": ""}]
:return:
"""
result = 1
info = ""
# 设置服务端密码
server_passwd_ret, server_passwd_info = self.set_server_password()
if server_passwd_ret == 0:
result = 0
info = "服务端密码配置失败:{0}".format(server_passwd_info)
else:
# 配置Rsync用户
rsync_virtual_result, rsync_virtual_info = self.set_rsync_virtual_auth()
if rsync_virtual_result == 0:
result = 0
info = "虚拟用户rsync设置失败:{0}".format(rsync_virtual_info)
else:
base_config = "uid = rsync" + '\n' + \
'gid = rsync' + '\n' + \
'use chroot = no' + '\n' + \
'max connections = 200' + '\n' + \
'timeout = 300' + '\n' + \
'pid file = /var/run/rsyncd.pid' + '\n' + \
'lock file = /var/run/rsyncd.lock ' + '\n' + \
'log file = /var/log/rsyncd.log' + '\n' + \
'fake super = yes'
# 路径加入配置文件,并设置备份地址权限
for temp_model in model_list:
origin_path = temp_model['origin_path']
mode_auth_ret, mode_auth_info = self.run_shell_cmd('chown -R rsync.rsync {0}'.format(origin_path))
if mode_auth_ret == 0:
return mode_auth_ret, "源端备份路径权限配置失败:{0}".format(mode_auth_info)
base_config += '\n' + \
'[{0}]'.format(temp_model['model_name']) + '\n' + \
'path = {0}'.format(origin_path if origin_path.endswith("/") else "{0}/".format(origin_path)) + '\n' + \
'ignore errors' + '\n' + \
'read only = false' + '\n' + \
'list = false' + '\n' + \
'hosts allow = {0}/24'.format(client_ip) + '\n' + \
'auth users = rsync_backup' + '\n' + \
'secrets file = /etc/rsync.password'
rsync_config_result, rsync_config_info = self.run_shell_cmd("""echo "{0}" > /etc/rsyncd.conf""".format(base_config))
if rsync_config_result == 0:
result = 0
info = "Rsync配置文件写入失败:{0}".format(rsync_config_info)
else:
result = 1
info = "Rsync配置成功。"
# 设置防火墙开放端口
# port_result, port_info = self.open_873_port()
# if port_result == 0:
# result = 0
# info = "873端口设置失败:{0}".format(port_info)
# else:
# # 启动rsync
# start_rysnc_result, start_rsync_info = self.start_rsync()
# if start_rysnc_result == 0:
# result = 0
# info = "启动rsync失败:{0}".format(start_rsync_info)
return result, info
def set_rsync_virtual_auth(self):
result, info = self.run_shell_cmd('cat /etc/passwd')
if result == 1 and 'rsync:' not in info:
result, info = self.run_shell_cmd('useradd rsync -s /sbin/nologin -M')
return result, info
def set_client_password(self):
"""
客户端密码
:return:
"""
server_passwd_result, server_passwd_info = self.run_shell_cmd('echo "{0}" > /etc/rsync_server.password'.format('password'))
if server_passwd_result == 1:
chmod_result, chmod_info = self.run_shell_cmd('chmod 600 /etc/rsync_server.password')
if chmod_result == 1:
result = 1
info = "客户端Rsync密码设置成功。"
else:
result = 0
info = "客户端Rsync密码权限设置失败:{0}".format(chmod_info)
else:
result = 0
info = "客户端Rsync密码设置失败:{0}".format(server_passwd_info)
return result, info
def set_server_password(self):
"""
服务端密码
:return:
"""
server_passwd_result, server_passwd_info = self.run_shell_cmd('echo "{0}" > /etc/rsync.password'.format('rsync_backup:password'))
if server_passwd_result == 1:
chmod_result, chmod_info = self.run_shell_cmd('chmod 600 /etc/rsync.password')
if chmod_result == 1:
result = 1
info = "服务器Rsync密码设置成功。"
else:
result = 0
info = "服务器Rsync密码权限设置失败:{0}".format(chmod_info)
else:
result = 0
info = "服务器Rsync密码设置失败:{0}".format(server_passwd_info)
return result, info
def cat_rsync_log(self):
shell_cmd = 'cat /var/log/rsyncd.log'
result = 1
info = ''
print("本次执行命令: echo '{0}'|sudo -S sh -c '{1}'".format(self.server["password"], shell_cmd) if self.server["username"] != "root" else shell_cmd)
stdin, stdout, stderr = self.client.exec_command("echo '{0}'|sudo -S sh -c '{1}' {2}".format(self.server["password"], shell_cmd, self.verify_shell_cmd) if self.server["username"] != "root" else shell_cmd, get_pty=True)
stdout_init = ''
stderr_init = ''
if not stderr.readlines():
pre_task = ""
num = 0
for data in stdout.readlines()[::-1]:
com = re.compile('\[\d+\]')
task_list = com.findall(data)
if task_list:
task_id = task_list[0][1:-1]
if num > 0 and pre_task != task_id:
break
if task_id == pre_task or num == 0:
stdout_init += data
pre_task = task_id
num += 1
info = stdout_init
else:
result = 0
for data in stderr.readlines():
stderr_init += data
info = stderr_init
return result, info
def start_rsync(self):
"""
以守护进程方式启动,不需要设置伪终端,所以需要设置get_pty=False
:return:
"""
result, info = self.run_shell_cmd('ps -ef|grep rsync|grep -v grep')
if "rsync" in info:
result = 0
info = 'rsync已经在运行中。'
else:
result, info = self.run_shell_cmd('rsync --daemon', get_pty=False)
return result, info
def stop_rsync(self):
result, info = self.run_shell_cmd('ps -ef|grep rsync|grep -v grep')
if "rsync" not in info:
result = 0
info = 'rsync未运行。'
else:
result, info = self.run_shell_cmd('pkill rsync')
return result, info
def open_873_port(self):
"""
与centos版本相关
:return:
"""
# 查看centos版本
result, info = 1, ""
centOS_version_result, info = self.run_shell_cmd("cat /etc/redhat-release")
if " 7." in info:
set_port_result, set_port_info = self.run_shell_cmd('firewall-cmd --zone=public --add-port=873/tcp --permanent') # centos7
if set_port_result == 1:
restart_firewalld_result, restart_firewalld_info = self.run_shell_cmd('systemctl restart firewalld.service')
if restart_firewalld_result == 1:
result = 1
info = "防火墙设置成功。"
else:
result = 0
info = "防火墙设置失败:{0}".format(restart_firewalld_info)
else:
result = 0
info = "端口设置失败:{0}".format(set_port_info)
else:
set_port_result, set_port_info = self.run_shell_cmd('/sbin/iptables -I INPUT -p tcp --dport 873 -j ACCEPT') # centos6
if set_port_result == 1:
save_port_result, save_port_info = self.run_shell_cmd('/etc/init.d/iptables save')
if save_port_result == 1:
restart_iptables_result, restart_iptables_info = self.run_shell_cmd('service iptables restart')
if restart_iptables_result == 1:
result = 1
info = "防火墙设置成功。"
else:
result = 0
info = "防火墙重启失败:{0}".format(restart_iptables_info)
else:
result = 0
info = "端口设置保存失败:{0}".format(save_port_info)
else:
result = 0
info = "端口设置失败:{0}".format(set_port_info)
return result, info
def restart_rsync(self):
result, info = 1, ""
check_rsync_result, check_rsync_info = self.run_shell_cmd('ps -ef|grep rsync|grep -v grep')
if "rsync" in check_rsync_info:
pkill_rsync_result, pkill_rsync_info = self.run_shell_cmd('pkill rsync', get_pty=False)
if pkill_rsync_result == 1:
run_rsync_result, run_rsync_info = self.run_shell_cmd('rsync --daemon', get_pty=False)
if run_rsync_result == 1:
result = 1
info = "rsync启动成功。"
else:
result = 0
info = run_rsync_info
else:
run_rsync_result, run_rsync_info = self.start_rsync()
if run_rsync_result == 1:
result = 1
info = "rsync启动成功。"
else:
result = 0
info = run_rsync_info
else:
run_rsync_result, run_rsync_info = self.start_rsync()
if run_rsync_result == 1:
result = 1
info = "rsync启动成功。"
else:
result = 0
info = run_rsync_info
return result, info
def rsync_push(self, local_dir, dest_server, model_name, delete=False):
"""
-avz 方式备份
:param dest_dir: 服务器备份路径
:param auth_user: 虚拟用户
:param dest_server: 目标服务器地址
:param model_name: rsync模块名称
:param delete: 表示无差异备份,默认是增量
:return:
"""
# 异常处理
if delete:
result, info = self.run_shell_cmd(r'rsync -avz {0} rsync_backup@{1}::{2}/ --password-file=/etc/rsync_server.password --delete'.format(local_dir, dest_server, model_name))
else:
result, info = self.run_shell_cmd(r'rsync -avz {0} rsync_backup@{1}::{2}/ --password-file=/etc/rsync_server.password'.format(local_dir, dest_server, model_name))
return result, info
def rsync_pull(self, local_dir, dest_server, model_name, delete=False):
"""
-avz 方式备份
:param dest_dir: 服务器备份路径
:param auth_user: 虚拟用户
:param dest_server: 目标服务器地址
:param model_name: rsync模块名称
:param delete: 表示无差异备份,默认是增量
:return:
"""
# 异常处理
if delete:
result, info = self.run_shell_cmd(r'rsync -avz rsync_backup@{1}::{2}/ {0} --password-file=/etc/rsync_server.password --delete'.format(local_dir, dest_server, model_name))
else:
result, info = self.run_shell_cmd(r'rsync -avz rsync_backup@{1}::{2}/ {0} --password-file=/etc/rsync_server.password'.format(local_dir, dest_server, model_name))
return result, info
if __name__ == '__main__':
server = {
'hostname': '192.168.85.124',
'username': 'root',
'password': 'password'
}
# server = {
# 'hostname': '192.168.85.123',
# 'username': 'rsync_demo',
# 'password': 'password'
# }
rsync_backup = RsyncBackup(server)
# result, info = rsync_backup.set_rsync_virtual_auth()
# print(rsync_backup.msg)
# result, info = rsync_backup.start_rsync()
# result, info = rsync_backup.run_shell_cmd("ls /")
# result, info = rsync_backup.stop_rsync()
# result, info = rsync_backup.run_shell_cmd('ls')
# result, info = rsync_backup.install_rsync_by_yum()
# result, info = rsync_backup.check_ever_existed()
# result, info = rsync_backup.cat_rsync_log()
# result, info = rsync_backup.set_client_password()
# result, info = rsync_backup.rsync_pull(r'/home/rsync_demo/clientcenter/', '192.168.85.124', 'test', delete=True)
result, info = rsync_backup.rsync_push(r'/home/rsync_demo/clientcenter/', '192.168.85.124', 'test', delete=True)
# result, info = rsync_backup.tail_rsync_log()
# model_list: [{"origin_path": "", ""dest_path": ""}]
# result, info = rsync_backup.set_server_password()
# result, info = rsync_backup.set_rsync_server_config([{"origin_path": "/home/rsync_demo/datacenter/", "dest_path": "", "model_name": "test"}], "192.168.85.123")
rsync_backup.close_connection()
# sudo sh -c 'echo "This is testPage." >/usr/local/nginx/html/index.html'
# 将一个字串作为完整的命令来执行
# sudo仅有root的部分权限
print(result, info)
# 做以下修改:
# 1.数据库保存路径时,提示不支持单文件复制
# 2.根目录下文件不能提供选择
# 3.添加拉取数据的方式
# 4.只配置服务器端
| [
"[email protected]"
] | |
2486172e67711750aa62070ee6fd2a6827dbc2e8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_117/ch34_2019_08_28_17_09_17_697744.py | 9ca0d80832e96d242b07897a248b9972577d03eb | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | d = ("deposito inicial: ")
j = ("taxa de juros: ")
i = 0
while i < 23:
t = d + (1 + j) ** i
print ({t:.2f}) | [
"[email protected]"
] | |
11e50fe8f7ccfaa25b3780c5fdcc0732b9de61fc | cbc4782342ad277b9f8cda805a57854ba3468edb | /SVDBias/SVDBias-pe.py | 5f4991f3cb6d026b4af15a99017a37e13c296add | [] | no_license | qingkongmengnuan/BayesianRS | f7f1cdc7ca6336e3d18e98e441b65aa767846005 | e440f6bb26bdc9485d2ae15826c0900b7457b92d | refs/heads/master | 2022-11-30T20:00:20.305454 | 2020-08-11T08:25:11 | 2020-08-11T08:25:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,537 | py | # coding:utf-8
'''
@author: Jason.F
@data: 2019.07.12
@function: Implementation: SVDBias
Datatset: Pinterest-20
Evaluation: hitradio,ndcg
Squared loss function with explicit rating.
'''
import pandas as pd
import numpy as np
import math
from collections import defaultdict
import heapq
import random
#1.Loading the MovienLen dataset, ml-1m
def load_rating_file_as_list(filename):
ratingList = []
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
user, item = int(arr[0]), int(arr[1])
ratingList.append([user, item])
line = f.readline()
return ratingList
def load_negative_file_as_list(filename):
negativeList = []
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
negatives = []
for x in arr[1: ]:
negatives.append(int(x))
negativeList.append(negatives)
line = f.readline()
return negativeList
def load_rating_file_as_matrix(filename):
#Read .rating file and Return dok matrix.
#The first line of .rating file is: num_users\t num_items
# Get number of users and items
num_users, num_items = 0, 0
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
u, i = int(arr[0]), int(arr[1])
num_users = max(num_users, u)
num_items = max(num_items, i)
line = f.readline()
# Construct matrix
#mat = sp.dok_matrix((num_users+1, num_items+1), dtype=np.float32)
mat = np.zeros((num_users+1, num_items+1))
with open(filename, "r") as f:
line = f.readline()
while line != None and line != "":
arr = line.split("\t")
user, item, rating = int(arr[0]), int(arr[1]), float(arr[2])
#if (rating > 0.0): mat[user, item] = 1.0
mat[user, item] = rating
line = f.readline()
return mat
trainMatrix = load_rating_file_as_matrix("./data/pinterest-20.train.rating")
testRatings = load_rating_file_as_list("./data/pinterest-20.test.rating")
testNegatives = load_negative_file_as_list("./data/pinterest-20.test.negative")
print('Dataset Statistics: Interaction = %d, User = %d, Item = %d, Sparsity = %.4f' % \
(len(trainMatrix[np.where(trainMatrix!= 0)]),trainMatrix.shape[0],trainMatrix.shape[1],\
len(trainMatrix[np.where(trainMatrix!= 0)])/(trainMatrix.shape[0]*trainMatrix.shape[1]) ))
#2. SVDBias class
class SVDBias():
def __init__(self, R, num_ng=4):
"""
Perform matrix factorization to predict empty entries in a matrix.
Arguments
- R (ndarray) : user-item rating matrix
- num_ng (int) : number of negative items
"""
self.R = R
self.num_users, self.num_items = R.shape
self.num_ng = num_ng
# Create a list of training samples
pos_samples = [
(i, j, self.R[i, j])
for i in range(self.num_users)
for j in range(self.num_items)
if self.R[i, j] > 0
]
'''
#smapling the negative items
for x in self.samples:
u = x[0]
for t in range(self.num_ng):
j = np.random.randint(self.num_items)
#while (u, j) in self.R:
while self.R[u, j] > 0:
j = np.random.randint(self.num_items)
self.samples.append([u, j, 0])
'''
#smapling the negative items
neg_samples = random.sample([
(i, j, self.R[i, j])
for i in range(self.num_users)
for j in range(self.num_items)
if self.R[i, j] == 0
], len(pos_samples)*num_ng)
self.samples = pos_samples + neg_samples
def train(self, K, alpha=0.001, beta=0.01, epochs=20):
'''
- alpha (float) : learning rate
- beta (float) : regularization parameter
- K (int) : number of latent dimensions
-epochs(int) : number of iterations
'''
self.K = K
self.alpha = alpha
self.beta = beta
self.epochs = epochs
# Initialize user and item latent feature matrice
self.P = np.random.normal(scale=1./self.K, size=(self.num_users, self.K))
self.Q = np.random.normal(scale=1./self.K, size=(self.num_items, self.K))
# Initialize the biases
self.b_u = np.zeros(self.num_users)
self.b_i = np.zeros(self.num_items)
self.b = np.mean(self.R[np.where(self.R != 0)])
# Perform stochastic gradient descent for number of iterations
training_process = []
for i in range(self.epochs):
np.random.shuffle(self.samples)
self.sgd()
#if (i+1) % 10 == 0:
# mse = self.mse()
# print("Iteration: %d ; error = %.4f" % (i+1, mse))
return self.full_matrix()
def mse(self):
"""
A function to compute the total mean square error
"""
xs, ys = self.R.nonzero()
predicted = self.full_matrix()
error = 0
for x, y in zip(xs, ys):
error += pow(self.R[x, y] - predicted[x, y], 2)
return np.sqrt(error)
def sgd(self):
"""
Perform stochastic graident descent
"""
for i, j, r in self.samples:
# Computer prediction and error
prediction = self.get_rating(i, j)
e = (r - prediction)
# Update biases
self.b_u[i] += self.alpha * (e - self.beta * self.b_u[i])
self.b_i[j] += self.alpha * (e - self.beta * self.b_i[j])
# Create copy of row of P since we need to update it but use older values for update on Q
P_i = self.P[i, :][:]
# Update user and item latent feature matrices
self.P[i, :] += self.alpha * (e * self.Q[j, :] - self.beta * self.P[i,:])
self.Q[j, :] += self.alpha * (e * P_i - self.beta * self.Q[j,:])
def get_rating(self, i, j):
"""
Get the predicted rating of user i and item j
"""
prediction = self.b + self.b_u[i] + self.b_i[j] + self.P[i, :].dot(self.Q[j, :].T)
return prediction
def full_matrix(self):
"""
Computer the full matrix using the resultant biases, P and Q
"""
return self.b + self.b_u[:,np.newaxis] + self.b_i[np.newaxis:,] + self.P.dot(self.Q.T)
#3. Training and Evaluating
def getHitRatio(ranklist, gtItem):
for item in ranklist:
if item == gtItem:
return 1
return 0
def getNDCG(ranklist, gtItem):
for i in range(len(ranklist)):
item = ranklist[i]
if item == gtItem:
return math.log(2) / math.log(i+2)
return 0
print ("%3s%20s%20s" % ('K','HR@10', 'NDCG@10'))
mdl = SVDBias(R=trainMatrix, num_ng=4)# K is latent factors
for K in [8,16,32,64]:#latent factors
nR = mdl.train(K=K, alpha=0.001, beta=0.01, epochs=20)
hits = []
ndcgs = []
for u, i in testRatings:
scorelist= [ [ni,nR[u,ni]] for ni in testNegatives[u]]
scorelist.append([i,nR[u,i]])
map_item_score = {}
for item, rate in scorelist: #turn dict
map_item_score[item] = rate
ranklist = heapq.nlargest(10, map_item_score, key=map_item_score.get)#default Topn=10
hr = getHitRatio(ranklist, i)
hits.append(hr)
ndcg = getNDCG(ranklist, i)
ndcgs.append(ndcg)
hitratio,ndcg = np.array(hits).mean(), np.array(ndcgs).mean()
print ("%3d%20.6f%20.6f" % (K, hitratio, ndcg))
'''
nohup python -u SVDBias-pe.py > svdbias-pe.log &
Dataset Statistics: Interaction = 1408394, User = 55187, Item = 9916, Sparsity = 0.0026
K HR@10 NDCG@10
8 0.269919 0.138828
16 0.274449 0.140884
32 0.274286 0.141035
64 0.274576 0.141021
''' | [
"[email protected]"
] | |
abcc57f99068124670c61612d93d80115daf1130 | 0b88201be895a25c8c321481615b4965f529d6da | /CDTB_Seg/model/SEG_BOT/model.py | 64b039c9c3e6f5e3a9a391a5db78055dd22219a1 | [
"BSD-2-Clause",
"MIT"
] | permissive | NLP-Discourse-SoochowU/segmenter2020 | 1e8335da56b26f52ed48eb462047b9fe9b1e10df | fd71b353c59bcb82ec2cd0bebf943040756faa63 | refs/heads/master | 2023-01-13T23:14:37.078780 | 2020-11-24T05:07:26 | 2020-11-24T05:07:26 | 283,890,012 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,509 | py | # -*- coding: utf-8 -*-
"""
@Author: Lyzhang
@Date:
@Description:
"""
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from config import *
from model.SEG_BOT.pointer import Pointer
class Segment_Model(nn.Module):
def __init__(self, word_emb):
super(Segment_Model, self).__init__()
# random
self.word_emb = nn.Embedding(word_emb.shape[0], WORDEMB_SIZE)
# nre_pre = np.array([arr[0:3] for arr in pretrained])
self.word_emb.weight.data.copy_(torch.from_numpy(word_emb))
self.word_emb.weight.requires_grad = True if EMBED_LEARN else False
self.pos_emb = nn.Embedding(POS_TAG_NUM, POS_TAG_SIZE)
self.pos_emb.weight.requires_grad = True
if RNN_TYPE == "LSTM":
self.sent_encode = nn.LSTM(WORDEMB_SIZE + POS_TAG_SIZE, HIDDEN_SIZE // 2, num_layers=RNN_LAYER,
dropout=DROPOUT, bidirectional=True, batch_first=True)
else:
self.sent_encode = nn.GRU(WORDEMB_SIZE + POS_TAG_SIZE, HIDDEN_SIZE // 2, num_layers=RNN_LAYER,
dropout=DROPOUT, bidirectional=True, batch_first=True)
# encoder + decoder
self.encoder = nn.GRU(HIDDEN_SIZE, HIDDEN_SIZE // 2, bidirectional=True, batch_first=True)
self.decoder = nn.GRU(HIDDEN_SIZE, HIDDEN_SIZE, batch_first=True)
# self.decoder = nn.GRU(WORDEMB_SIZE + POS_TAG_SIZE, HIDDEN_SIZE, batch_first=True)
# self.residual_drop = nn.Dropout(RESIDUAL_DROPOUT)
self.context_dense = nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE)
self.pointer = Pointer(HIDDEN_SIZE, HIDDEN_SIZE, 1, HIDDEN_SIZE)
self.nnDropout = nn.Dropout(ENC_DEC_DROPOUT)
def enc_decode_(self, rnn_inputs, gcn_hidden, decode_mask, decode_indices):
""" gcn_hidden: (batch, seq_len, hidden)
Bi_affine attention
decode_indices: (batch, boundary num + 1, num_in(1 or 2)) 代表各个边界后的开始点以及
初始点,位置0
"""
gcn_hidden = self.nnDropout(gcn_hidden)
# encoder
e_out, hidden = self.encoder(gcn_hidden)
# e_out, hidden = gcn_hidden, Var(torch.zeros(2, 1, HIDDEN_SIZE // 2)).cuda(CUDA_ID)
e_out = self.nnDropout(e_out)
# e_out = gcn_hidden + self.residual_drop(e_out)
# decode
# 将序列首尾隐层状态(即context hidden)作为解码器的初始状态。(batch, hidden)
init_states = hidden.transpose(0, 1).view(1, -1)
# 以GCN编码的各个边界点词汇的向量作为解码端的输入,d_inputs = (batch, num_boundary, hidden)
decode_indices = decode_indices.unsqueeze(0).unsqueeze(0)
d_inputs = gcn_hidden[torch.arange(BATCH_SIZE), decode_indices].squeeze(0)
# d_inputs = e_out[torch.arange(BATCH_SIZE), decode_indices].squeeze(0)
# d_inputs = rnn_inputs[torch.arange(BATCH_SIZE), decode_indices].squeeze(0)
d_inputs = self.nnDropout(d_inputs)
# 如果序列中没有边界点是不是意味着没有解码输入?不是,每个解码序列的第一个输入即为序列的首位元素,如果没有边界则解码结果
# 直接为末尾即可。
d_out, _ = self.decoder(d_inputs, init_states.unsqueeze(0)) # d_out = {batch, num_boundary, hidden}
d_out = self.nnDropout(d_out)
# Bi_affine attention between the decoder outputs and the hidden states of the encoder side
# e_out = (batch, encoder_len, hidden) d_out = (batch, decoder_len, hidden), 前人的工作是对编码端所有节点进行概率
# 计算,最后按照选区选用需要的概率区域。我们要实现的是对d_out中每个解码的结果对应的选区确定好之后,用各个解码结果分别和
# 选区中对应的 e_out 组中隐层状态计算 bi_affine attention. 两种做法的原理一致,故采用前人方案进行注意力分配。
# bi_affine: [batch, length_decoder, length_encoder, num_labels]
attn = self.pointer(e_out, d_out).squeeze(-1)
# input(attn.size())
decode_mask = decode_mask.unsqueeze(0).float()
mask_pad_ = (1 - decode_mask) * SMOO_VAL
# input(mask_pad_.size())
masked_attn = attn.mul(decode_mask) + mask_pad_
# scoring
boundary_predict = masked_attn.log_softmax(dim=2)
# (batch, len_decoder, len_encoder) masked
return boundary_predict
@staticmethod
def select_boundary(bi_affine_attn, state_idx, seq_len):
""" attn: [batch, length_decoder, length_encoder, num_labels]
(1, 1, n, 1)
state_idx: tmp_area start idx
"""
decode_mask = [0 for _ in range(state_idx)]
decode_mask = decode_mask + [1 for _ in range(state_idx, seq_len)]
decode_mask = torch.Tensor(decode_mask).float().cuda(CUDA_ID)
# decode_mask = decode_mask.unsqueeze(0).float()
mask_pad_ = (1 - decode_mask) * SMOO_VAL
masked_bi_affine_attn = bi_affine_attn.mul(decode_mask) + mask_pad_ # make it small enough
# scoring
boundary_predict = torch.argmax(masked_bi_affine_attn.log_softmax(dim=-1)).unsqueeze(0)
state_idx = (boundary_predict + 1).item()
return boundary_predict, state_idx
def gen_loss(self, inputs=None, target=None):
""" 进行特征抽取和子句分割
在编码解码模式下,有的句子可能不存在边界点,这时候分两种实验设置进行分析:
1 以学习结束点为一种情景。
2 排除结束点
"""
word_ids, pos_ids, graph, decode_indices, decode_mask, masks = inputs
self.sent_encode.flatten_parameters()
word_emb = self.word_emb(word_ids)
pos_emb = self.pos_emb(pos_ids)
rnn_inputs = torch.cat([word_emb, pos_emb], dim=-1) # (batch_size, padding_length, embedding_size)
if masks is not None:
lengths = masks.sum(-1)
rnn_inputs = rnn_inputs * masks.unsqueeze(-1).float()
rnn_inputs_packed = pack_padded_sequence(rnn_inputs, lengths, batch_first=True)
rnn_outputs_packed, _ = self.sent_encode(rnn_inputs_packed)
rnn_outputs, _ = pad_packed_sequence(rnn_outputs_packed, batch_first=True)
else:
rnn_outputs, _ = self.sent_encode(rnn_inputs)
gcn_outputs = rnn_outputs
tag_score = self.enc_decode_(rnn_inputs, gcn_outputs, decode_mask, decode_indices)
score_ = tag_score.squeeze(0)
# print(score_.size())
# input(target.size())
loss_ = func.nll_loss(score_, target)
return loss_
def forward(self, inputs=None):
""" 解码方式特殊,编码方式一致,解码需要对每个序列的每个边界点处理出来,做顺序解码。
注意,这款解码器包含了每句话的结束点作为标签之一,同样我们在target中也设置了这个边界进行学习。
"""
word_ids, pos_ids, graph, _, _, masks = inputs
word_emb = self.word_emb(word_ids)
seq_len = word_emb.size()[1]
pos_emb = self.pos_emb(pos_ids)
rnn_inputs = torch.cat([word_emb, pos_emb], dim=-1)
lengths = masks.sum(-1)
rnn_inputs = rnn_inputs * masks.unsqueeze(-1).float()
rnn_inputs_packed = pack_padded_sequence(rnn_inputs, lengths, batch_first=True)
rnn_outputs_packed, _ = self.sent_encode(rnn_inputs_packed)
rnn_outputs, _ = pad_packed_sequence(rnn_outputs_packed, batch_first=True)
# (batch, seq_len, hidden)
gcn_outputs = rnn_outputs
# print("gvn_output: ", gcn_outputs.size())
# 编码-解码结构
e_out, hidden = self.encoder(gcn_outputs)
# e_out, hidden = gcn_outputs, Var(torch.zeros(2, 1, HIDDEN_SIZE // 2)).cuda(CUDA_ID)
# e_out = gcn_outputs + self.residual_drop(e_out) # (batch, seq_len, hidden)
state = hidden.transpose(0, 1).view(1, 1, -1) # (batch, xxx, hidden)
# d_input_ = gcn_hidden[0, 0, :] # 初始化解码端输入 (batch, seq0, hidden)
start_idx, d_end, d_outputs = 0, False, None
# 循环解码 上面的decoder是否应该改成 GRU Cell
while not d_end:
d_input = gcn_outputs[:, start_idx, :].unsqueeze(1) # (batch, seq_len, hidden_size) (1, 1, Hidden)
# d_input = e_out[:, start_idx, :].unsqueeze(1) # (batch, seq_len, hidden_size) (1, 1, Hidden)
# d_input = rnn_inputs[:, start_idx, :].unsqueeze(1) # (batch, seq_len, hidden_size) (1, 1, Hidden)
d_out, state = self.decoder(d_input, state) # (batch, 1, hidden_size) (1, 1, h)
# bi_affine attention,根据解码输出在编码结果的指定范围内寻找合适的点的过程
# bi_affine: [batch, length_decoder, length_encoder, num_labels]
bi_affine_attn = self.pointer(e_out, d_out).squeeze(-1)
boundary_idx, start_idx = self.select_boundary(bi_affine_attn, start_idx, seq_len)
d_outputs = boundary_idx if d_outputs is None else torch.cat((d_outputs, boundary_idx), dim=0)
if start_idx == seq_len:
d_end = True
return d_outputs
| [
"[email protected]"
] | |
9c7c5503d6820c8d892b7ba12c79c4c53e2b1abc | 91f4078045a57eaaafe0b172909d7041e829941c | /arjuna-samples/arjex/test/pkg/app_class/check_02_app_model.py | 66bdc8398d1a691c081dc86f4420c25017d30f6c | [
"Apache-2.0"
] | permissive | amiablea2/arjuna | 0d06d1dfb34309f4b6f39b17298f7acb6c3c48c9 | af74e0882216881ceca0a10f26442165ffc43287 | refs/heads/master | 2023-08-21T20:04:30.416303 | 2021-10-27T06:41:40 | 2021-10-27T06:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | # This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
from arjex.lib.app_class.wp_app_model import WordPress
@for_test
def wordpress(request):
# Setup
wordpress = WordPress()
wordpress.login()
yield wordpress
# Teadown
wordpress.logout()
@test
def check_with_wp_app_model(request, wordpress):
wordpress.tweak_role_value_in_settings("editor") | [
"[email protected]"
] | |
6a0133b60e39092bb9168e79b34e7f97ef908275 | ff738b3ec7e5c8c414f6d3c7d74310d8fab69368 | /Mock/Interview5/solution1.py | 63117bb3138185ef7bcaea918ed9a22c4e801a57 | [] | no_license | jw3329/leetcode-problem-solving | a0684ef13bd60e81bd54b91e1b54827aaac9bf16 | 0cc7ad64891a23e348c8214f806a2820ac8c9e0a | refs/heads/main | 2023-08-17T20:36:51.624415 | 2023-08-17T07:09:56 | 2023-08-17T07:09:56 | 170,944,191 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
n = len(nums)
for i in range(n):
for j in range(i+1,n):
if nums[i] + nums[j] == target:
return [i,j]
| [
"[email protected]"
] | |
de63bdffcb21ae66826ed72756766dc1638d7361 | 926b3c52070f6e309567c8598248fd5c57095be9 | /src/mmgeneration/configs/positional_encoding_in_gans/stylegan2_c2_ffhq_512_b3x8_1100k.py | b051c9f38b4cd25d7bc657feff88311e3c9f1f18 | [
"Apache-2.0"
] | permissive | fengbingchun/PyTorch_Test | 410f7cd2303707b0141d433fb9d144a961e1f4c8 | df5c2169f0b699bcd6e74adb4cb0e57f7dcd9348 | refs/heads/master | 2023-05-23T16:42:29.711338 | 2023-03-25T11:31:43 | 2023-03-25T11:31:43 | 167,339,907 | 15 | 4 | null | 2023-03-25T11:31:45 | 2019-01-24T09:24:59 | C++ | UTF-8 | Python | false | false | 1,260 | py | """Config for the `config-f` setting in StyleGAN2."""
_base_ = [
'../_base_/datasets/ffhq_flip.py',
'../_base_/models/stylegan/stylegan2_base.py',
'../_base_/default_runtime.py'
]
model = dict(generator=dict(out_size=512), discriminator=dict(in_size=512))
data = dict(
samples_per_gpu=3,
train=dict(dataset=dict(imgs_root='./data/ffhq/ffhq_imgs/ffhq_512')))
ema_half_life = 10. # G_smoothing_kimg
custom_hooks = [
dict(
type='VisualizeUnconditionalSamples',
output_dir='training_samples',
interval=5000),
dict(
type='ExponentialMovingAverageHook',
module_keys=('generator_ema', ),
interval=1,
interp_cfg=dict(momentum=0.5**(32. / (ema_half_life * 1000.))),
priority='VERY_HIGH')
]
metrics = dict(
fid50k=dict(
type='FID',
num_images=50000,
inception_pkl='work_dirs/inception_pkl/ffhq-512-50k-rgb.pkl',
bgr2rgb=True),
pr10k3=dict(type='PR', num_images=10000, k=3))
checkpoint_config = dict(interval=10000, by_epoch=False, max_keep_ckpts=30)
lr_config = None
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
total_iters = 1100002
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.