blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
868da6ad7de2e92037f68b84e2de141d81e5376e
|
9d6e747ed7204555199ece2033decff978295a09
|
/Programmers/그리디/구명보트.py
|
e397a45f8b8c3318aef387de4ebb6add75f97628
|
[] |
no_license
|
leejaeyeong/Algorithm
|
5b47ed9aa241990945cbf2451afe7f084984ced5
|
72072d1e0c28e72075fc00db9239a4bd444b68b6
|
refs/heads/master
| 2021-08-08T10:57:07.345943 | 2021-07-11T15:01:59 | 2021-07-11T15:01:59 | 238,156,464 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,238 |
py
|
''' 무인도에 갇힌 사람들을 구명보트를 이용하여 구출하려고 합니다.
구명보트는 작아서 한 번에 최대 2명씩 밖에 탈 수 없고, 무게 제한도 있습니다.
예를 들어, 사람들의 몸무게가 [70kg, 50kg, 80kg, 50kg]이고 구명보트의 무게 제한이 100kg이라면
2번째 사람과 4번째 사람은 같이 탈 수 있지만 1번째 사람과 3번째 사람의 무게의 합은 150kg이므로
구명보트의 무게 제한을 초과하여 같이 탈 수 없습니다.
구명보트를 최대한 적게 사용하여 모든 사람을 구출하려고 합니다.
사람들의 몸무게를 담은 배열 people과 구명보트의 무게 제한 limit가 매개변수로 주어질 때,
모든 사람을 구출하기 위해 필요한 구명보트 개수의 최솟값을 return 하도록 solution 함수를 작성해주세요.
'''
# 인덱스를 이용한 방법
def solution(people, limit):
people.sort()
escapeCnt, light, heavy = 0, 0, len(people)-1
while light < heavy :
if people[light] + people[heavy] <= limit :
light += 1
heavy -= 1
escapeCnt += 1
else :
heavy -= 1
return len(people) - escapeCnt
print(solution([70,50,80,50],100))
print(solution([70,80,50],100))
# list del 삭제 연산 후 list 재조정 시간으로 인해 효율성 1번 시간초과
''' def solution(people, limit):
people.sort()
cnt = 0
while len(people) > 1 :
if people[0] + people[-1] <= limit :
del people[0]
del people[-1]
cnt += 1
else :
del people[-1]
cnt += 1
return cnt + len(people) '''
# dequeue를 이용해 삭제를 처리한 경우
# list의 pop(), leftpop()이 list의 처음 또는 마지막 값이기 때문에 재조정이 따로 없는듯하다.
''' import collections
def solution(people, limit):
people = collections.deque(sorted(people))
cnt = 0
while len(people) > 1 :
if people[0] + people[-1] <= limit :
people.popleft()
people.pop()
cnt += 1
else :
people.pop()
cnt += 1
return cnt + len(people) '''
|
[
"[email protected]"
] | |
6b13c2d1b3333eb858d2f7ff6f6803b8edbd5e52
|
f9e441608cbca1fd2a39de27cdc187cf676ef159
|
/matplotlib/example26.py
|
3a47fec94e67faf5639d189d893932d8b226684f
|
[] |
no_license
|
tisnik/jupyter-notebook-examples
|
cdded1ce63f6637c76c33adbfb25d9efc13b5fcf
|
66974b0590f8beef39ed9d364c9d2b1ee3bd2e63
|
refs/heads/master
| 2023-07-07T04:43:11.787115 | 2021-08-05T07:16:20 | 2021-08-05T07:16:20 | 257,216,516 | 7 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 830 |
py
|
# Jupyter Notebook
#
# Dvacátý šestý demonstrační příklad:
# - zobrazení 3D grafu funkce typu z=f(x,y)
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
delta = 0.1
# průběh nezávislé proměnné x
x = np.arange(-10.0, 10.0, delta)
# průběh nezávislé proměnné y
y = np.arange(-10.0, 10.0, delta)
# vytvoření dvou polí se souřadnicemi [x,y]
X, Y = np.meshgrid(x, y)
# vzdálenost od bodu [0,0]
R = np.sqrt(X*X+Y*Y)
# výpočet funkce, kterou použijeme při vykreslování grafu
Z = np.sin(R)/R
# zobrazení 3D grafu formou plochy
ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
# zobrazení grafu
plt.show()
|
[
"[email protected]"
] | |
4de9459362cd51256cf0acfb5269084fb1d69ad5
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-swr/huaweicloudsdkswr/v2/model/show_user_repository_auth_response.py
|
dbf5288a6fb5ffa67492b7a9cdb0a81de2d9138d
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 6,467 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowUserRepositoryAuthResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'int',
'name': 'str',
'self_auth': 'UserAuth',
'others_auths': 'list[UserAuth]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'self_auth': 'self_auth',
'others_auths': 'others_auths'
}
def __init__(self, id=None, name=None, self_auth=None, others_auths=None):
"""ShowUserRepositoryAuthResponse
The model defined in huaweicloud sdk
:param id: id
:type id: int
:param name: 组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:type name: str
:param self_auth:
:type self_auth: :class:`huaweicloudsdkswr.v2.UserAuth`
:param others_auths: 其他用户的权限
:type others_auths: list[:class:`huaweicloudsdkswr.v2.UserAuth`]
"""
super(ShowUserRepositoryAuthResponse, self).__init__()
self._id = None
self._name = None
self._self_auth = None
self._others_auths = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if self_auth is not None:
self.self_auth = self_auth
if others_auths is not None:
self.others_auths = others_auths
@property
def id(self):
"""Gets the id of this ShowUserRepositoryAuthResponse.
id
:return: The id of this ShowUserRepositoryAuthResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ShowUserRepositoryAuthResponse.
id
:param id: The id of this ShowUserRepositoryAuthResponse.
:type id: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this ShowUserRepositoryAuthResponse.
组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:return: The name of this ShowUserRepositoryAuthResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowUserRepositoryAuthResponse.
组织名称。小写字母开头,后面跟小写字母、数字、小数点、下划线或中划线(其中下划线最多允许连续两个,小数点、下划线、中划线不能直接相连),小写字母或数字结尾,1-64个字符。
:param name: The name of this ShowUserRepositoryAuthResponse.
:type name: str
"""
self._name = name
@property
def self_auth(self):
"""Gets the self_auth of this ShowUserRepositoryAuthResponse.
:return: The self_auth of this ShowUserRepositoryAuthResponse.
:rtype: :class:`huaweicloudsdkswr.v2.UserAuth`
"""
return self._self_auth
@self_auth.setter
def self_auth(self, self_auth):
"""Sets the self_auth of this ShowUserRepositoryAuthResponse.
:param self_auth: The self_auth of this ShowUserRepositoryAuthResponse.
:type self_auth: :class:`huaweicloudsdkswr.v2.UserAuth`
"""
self._self_auth = self_auth
@property
def others_auths(self):
"""Gets the others_auths of this ShowUserRepositoryAuthResponse.
其他用户的权限
:return: The others_auths of this ShowUserRepositoryAuthResponse.
:rtype: list[:class:`huaweicloudsdkswr.v2.UserAuth`]
"""
return self._others_auths
@others_auths.setter
def others_auths(self, others_auths):
"""Sets the others_auths of this ShowUserRepositoryAuthResponse.
其他用户的权限
:param others_auths: The others_auths of this ShowUserRepositoryAuthResponse.
:type others_auths: list[:class:`huaweicloudsdkswr.v2.UserAuth`]
"""
self._others_auths = others_auths
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowUserRepositoryAuthResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
f769536752aa4961db43d52a48a9b2267ab748be
|
b796ada966f71ea8b26198f6956f847838ed2df3
|
/Django_advanced/advcbv/advcbv/urls.py
|
9b313393ff4431331ad1bf4affbb5bbee611846f
|
[] |
no_license
|
cdunn6754/Django_Stuff
|
786672b79f69bce8a12369369d6672362401b1d9
|
ae631e0318f50a160c572b3d137c5453df0271f7
|
refs/heads/master
| 2021-01-01T19:58:18.854531 | 2017-10-22T00:16:58 | 2017-10-22T00:16:58 | 98,733,253 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 913 |
py
|
"""advcbv URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from basic_app import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$',views.IndexView.as_view()),
url(r'^basic_app/', include('basic_app.urls'), name='basic_app'),
]
|
[
"[email protected]"
] | |
1a1971c072b24595aa808de9a2af6fbceee0b46b
|
e03ffd4821bd278da3b0835cd8630b12958d4236
|
/sply/grammar.py
|
94b2ee96b6e7368047387bcdd4fca2b58a1f4c68
|
[] |
no_license
|
RussellLuo/sply
|
c659a33eabefe935db06ace846fe30e31bd841ba
|
d6f11f155803b874890428d173b45ee3f2b3fe76
|
refs/heads/master
| 2016-09-06T04:40:04.977841 | 2014-06-11T15:48:29 | 2014-06-11T15:48:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,903 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ### decorators ###
def token(regex):
def wrapper(func):
func.grammar = {
'type': 'token',
'definition': regex
}
return func
return wrapper
def production(rules):
def wrapper(func):
func.grammar = {
'type': 'production',
'definition': rules
}
return func
return wrapper
class Grammar(object):
"""Grammar-definition."""
keywords = ()
literals = ()
simple_tokens = ()
precedences = ()
def token_error_handler(self, t):
"""Error handler when parsing tokens.
Return value:
the number of characters should be skipped
to try parsing the next token.
0 -- no skip (terminate the current parsing process)
"""
print('Illegal characters "%s"' % t.value)
return 1
def production_error_handler(self, p):
if p:
print('Syntax error at %s' % p.value)
else:
print('Syntax error at EOF')
def get_token_names(self):
"""Get names of all tokens.
all_tokens = keywords + literals + simple_tokens + method_tokens
"""
token_names = (
list(self.keywords) +
list(self.literals) +
[name for name, _ in self.simple_tokens] +
[
method.__name__
for method, _ in self.get_grammar_methods('token')
]
)
return token_names
def get_grammar_methods(self, grammar_type):
methods = []
for attr_name in dir(self):
attr = getattr(self, attr_name)
grammar = getattr(attr, 'grammar', None)
if grammar and grammar['type'] == grammar_type:
methods.append((attr, grammar['definition']))
return methods
|
[
"[email protected]"
] | |
62c3025dd72cbd79bb67fc3b146f88adb5c53aa9
|
13830825b25ec01ec2874094a10f36b4b1336ac7
|
/tf/languages/python/triggers/run.py
|
1e16f634c9f07393965d850f7e8e98d94d0d240a
|
[] |
no_license
|
yurimalheiros/textflow
|
db62047b43c44d43c6efc67ad94f8118f984b076
|
c21ddf8aba58dc83d58a8db960d58d91ee2e5c74
|
refs/heads/master
| 2016-09-10T14:47:18.159229 | 2011-11-02T14:26:31 | 2011-11-02T14:26:31 | 1,927,215 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,227 |
py
|
# -*- coding: utf-8 -*-
#######################################################################
# Copyright © 2007-2009 Yuri Malheiros.
# Copyright © 2009 TextFlow Team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# the Free Software Foundation; version 2 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#######################################################################
"""
This module implements the trigger of "F5".
"""
import tf.app
from tf.languages.python.triggers.runfiles.terminal import RunTerminal
shortcut = unichr(65474) #F5
sticky = False
class Run(object):
def activate(self):
"""
Execute the current python file.
"""
self.document_manager = tf.app.document_manager
document = self.document_manager.get_active_document()
self.terminal = RunTerminal()
self.terminal.run("python", document.file_uri)
return True
|
[
"[email protected]"
] | |
9be3517010189943cd437a0ff0fd2307813d2d6d
|
f73c9010616f91b64042d9a39e42bd8e1a8ab364
|
/virtual/bin/django-admin
|
329dbca9ba5f0b38234df91dc94f692282115fa2
|
[
"MIT"
] |
permissive
|
mireille1999/Neighbourhood
|
f8d348c0f30c523d7a7a0c891f508be677e6e927
|
62bfe235a31521219acb3a093467e2ce5d21c976
|
refs/heads/main
| 2023-04-19T03:38:45.016484 | 2021-05-06T09:01:12 | 2021-05-06T09:01:12 | 363,867,762 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 309 |
#!/home/mireille/Desktop/Django/Neighbourhood/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"[email protected]"
] | ||
8d7b14dcccda8ae9f7b2682fb5da36c5f06b8731
|
0534c55fd45878ee1ef12d6a9a2903c51cc6cd56
|
/backend/task_profile/models.py
|
0876c0b99a2c9c7e831a67edea5d5986436f2d77
|
[] |
no_license
|
crowdbotics-apps/lcbo-go-19501
|
a7db5aa2265da23ab294de9c683951e54522ce22
|
96dd8ad7ce8c3dcb9526e2197a952d9b8028898f
|
refs/heads/master
| 2022-11-26T07:36:01.090189 | 2020-08-11T20:23:52 | 2020-08-11T20:23:52 | 286,839,867 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,877 |
py
|
from django.conf import settings
from django.db import models
class Notification(models.Model):
"Generated Model"
type = models.CharField(max_length=20,)
message = models.TextField()
user = models.ManyToManyField("users.User", related_name="notification_user",)
timestamp_created = models.DateTimeField(auto_now_add=True,)
class TaskerProfile(models.Model):
"Generated Model"
user = models.OneToOneField(
"users.User", on_delete=models.CASCADE, related_name="taskerprofile_user",
)
mobile_number = models.CharField(max_length=20,)
photo = models.URLField()
timestamp_created = models.DateTimeField(auto_now_add=True,)
last_updated = models.DateTimeField(auto_now=True,)
last_login = models.DateTimeField(null=True, blank=True,)
description = models.TextField(null=True, blank=True,)
city = models.CharField(null=True, blank=True, max_length=50,)
vehicle = models.CharField(null=True, blank=True, max_length=50,)
closing_message = models.TextField(null=True, blank=True,)
work_area_radius = models.FloatField(null=True, blank=True,)
class CustomerProfile(models.Model):
"Generated Model"
user = models.OneToOneField(
"users.User", on_delete=models.CASCADE, related_name="customerprofile_user",
)
mobile_number = models.CharField(max_length=20,)
photo = models.URLField()
timestamp_created = models.DateTimeField(auto_now_add=True,)
last_updated = models.DateTimeField(auto_now=True,)
last_login = models.DateTimeField(null=True, blank=True,)
class InviteCode(models.Model):
"Generated Model"
code = models.CharField(max_length=20,)
user = models.ForeignKey(
"users.User", on_delete=models.CASCADE, related_name="invitecode_user",
)
timestamp_created = models.DateTimeField(auto_now_add=True,)
# Create your models here.
|
[
"[email protected]"
] | |
bc7dfd6b2743ba065db5069510e641c5b4e23c65
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/class스타크래프트프로젝트전반전_20200709104854.py
|
1d7e1775de7a8f0280b1c3b4921af09ff2aa05b5
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,771 |
py
|
# 일반 유닛
class Unit:
def __init__(self, name, hp, speed):
self.name = name
self.hp = hp
self.speed = speed
print("{0} 유닛이 생성되었습니다.".format(name))
def move(self, location):
print("[지상 유닛 이동]")
print("{0} : {1} 방향으로 이동합니다. [속도 {2}]"
.format(self.name, location, self.speed))
def damaged(self, damage):
print("{0} : {1} 데미지를 입었습니다.".format(self.name, damage))
self.hp -= damage
print("{0} : 현재 체력은 {1}입니다.".format(self.name, self.hp))
if self.hp <= 0:
print("{0} : 파괴되었습니다.".format(self.name))
# 공격 유닛
class AttackUnit(Unit):
def __init__(self, name, hp, speed, damage):
Unit.__init__(self, name, hp, speed)
self.damage = damage
def attack(self, location): # 클래스 내에서 메소드 앞에는 항상 self를 적어주어야 한다.
print("{0} : {1} 방향으로 적군을 공격 합니다. [공격력 {2}]"
.format(self.name, location, self.damage))
# 마린
class Marine(AttackUnit):
def __init__(self):
AttackUnit.__init__(self, "마린", 40, 1, 5)
# 스팀팩 : 일정 시간 동안 이동 및 공격 속도를 증가, 체력 10 감소
def stimpack(self):
if self.hp > 10:
self.hp -= 10
print("{0} : 스팀팩을 사용합니다. (HP 10 감소)".format(self.name))
else:
print("{0} : 체력이 부족하여 스팀팩을 사용하지 않습니다.".format(self.name))
# 탱크
class Tank(AttackUnit):
# 시즈모드 : 탱크를 지상에 고정시켜, 더 높은 파워로 공격 가능. 이동 불가.
seize_developed = False # 시즈모드 개발 여부
def __init__(self):
AttackUnit.__init__(self, "탱크", 150, 1, 35)
self.seize_mode = False
def set_seize_mode(self):
if Tank.seize_developed == False:
return
# 현재 시즈모드가 아닐 때 -> 시즈모드
if self.seize_mode == False:
print("{0} : 시즈모드로 전환합니다.".format(self.name))
self.damage *= 2
self.seize_mode = True
# 현재 시즈모드일 때 -> 시즈모드 해제
else:
print("{0} : 시즈모드를 해제합니다.".format(self.name))
self.damaged /= 2
self.seize_mode = False
# 날 수 있는 기능을 가진 클래스
class Flyable:
def __init__(self, flying_speed):
self.flying_speed = flying_speed
def fly(self, name, location):
print("{0} : {1} 방향으로 날아갑니다. [속도 {2}]"
.format(name, location, self.flying_speed))
# 공중 공격 유닛 클래스
class FlyableAttackUnit(AttackUnit, Flyable):
def __init__(self, name, hp, damage, flying_speed):
AttackUnit.__init__(self, name, hp, 0, damage) # 지상 speed 0
Flyable.__init__(self, flying_speed)
def move(self, location):
print("[공중 유닛 이동]")
self.fly(self.name, location)
# 레이스
class Wraith(FlyableAttackUnit):
def __init__(self):
FlyableAttackUnit.__init__(self,"레이스", 80, 20, 5)
self.clocked = False # 클로킹 모드 (해제 상태)
def clocking(self):
if self.clocked == True: # 클로킹 모드 -> 모드 해제
print("{0} : 클로킹 모드 해제합니다.".format(self.name))
self.clocked == False
else: # 클로킹 모드 해제 -> 모드 설정
print("{0} : 클로킹 모드 설정합니다.".format(self.name))
self.clocked == True
|
[
"[email protected]"
] | |
1a6b70ae45e9fc29904a01cf685b64bf43a59743
|
0e9bd59e25d45adbc859cd187a5ebb00da4685ea
|
/tests/photometric_database/test_lightcurve_database.py
|
d3b21ca0bbdd55f26a9d21f5bb7727df9bc803b3
|
[
"Apache-2.0"
] |
permissive
|
REStickland/ramjet
|
cc836090d5afb868db4317bf9cb7416c26061c02
|
ad69e284d5c45b6bd5e3d34e861e5d7b106d4589
|
refs/heads/master
| 2021-03-10T14:08:42.759728 | 2020-03-02T17:44:34 | 2020-03-02T17:44:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,968 |
py
|
"""
Tests for the LightcurveDatabase class.
"""
from typing import Any
from unittest.mock import Mock, patch
import numpy as np
import tensorflow as tf
import pytest
import ramjet.photometric_database.lightcurve_database
from ramjet.photometric_database.lightcurve_database import LightcurveDatabase
class TestLightcurveDatabase:
@pytest.fixture
def database(self):
"""Fixture of an instance of the class under test."""
return LightcurveDatabase()
@pytest.fixture
def database_module(self) -> Any:
import ramjet.photometric_database.lightcurve_database as database_module
return database_module
@pytest.fixture
def module(self) -> Any:
"""Fixture of the module under test."""
import ramjet.photometric_database.lightcurve_database as lightcurve_database_module
return lightcurve_database_module
def test_extraction_of_chunk_and_remainder_from_array(self, database, module):
module.np.random.shuffle = Mock()
array_to_chunk = np.array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]])
expected_chunk = np.array([[3, 3], [4, 4]])
expected_remainder = np.array([[1, 1], [2, 2], [5, 5], [6, 6]])
chunk, remainder = database.extract_shuffled_chunk_and_remainder(array_to_chunk, chunk_ratio=1 / 3,
chunk_to_extract_index=1)
assert np.array_equal(chunk, expected_chunk)
assert np.array_equal(remainder, expected_remainder)
def test_creating_a_padded_window_dataset_for_a_zipped_example_and_label_dataset(self, database):
# noinspection PyMissingOrEmptyDocstring
def examples_generator():
for example in [[1, 1], [2, 2], [3, 3], [4, 4, 4], [5, 5, 5], [6, 6, 6]]:
yield example
# noinspection PyMissingOrEmptyDocstring
def labels_generator():
for label in [[-1, -1], [-2, -2], [-3, -3], [-4, -4, -4], [-5, -5, -5], [-6, -6, -6]]:
yield label
example_dataset = tf.data.Dataset.from_generator(examples_generator, output_types=tf.float32)
label_dataset = tf.data.Dataset.from_generator(labels_generator, output_types=tf.float32)
dataset = tf.data.Dataset.zip((example_dataset, label_dataset))
padded_window_dataset = database.padded_window_dataset_for_zipped_example_and_label_dataset(
dataset=dataset, batch_size=3, window_shift=2, padded_shapes=([None], [None]))
padded_window_iterator = iter(padded_window_dataset)
batch0 = next(padded_window_iterator)
assert np.array_equal(batch0[0].numpy(), [[1, 1], [2, 2], [3, 3]])
batch1 = next(padded_window_iterator)
assert np.array_equal(batch1[0].numpy(), [[3, 3, 0], [4, 4, 4], [5, 5, 5]])
@patch.object(ramjet.photometric_database.lightcurve_database.np.random, 'randint')
def test_lightcurve_padding_can_be_made_non_random_for_evaluation(self, mock_randint, database, database_module):
mock_randint.return_value = 3
lightcurve0 = database.make_uniform_length(np.array([10, 20, 30, 40, 50]), length=9, randomize=True)
assert np.array_equal(lightcurve0, [30, 40, 50, 10, 20, 30, 40, 50, 10])
lightcurve1 = database.make_uniform_length(np.array([10, 20, 30, 40, 50]), length=9, randomize=False)
assert np.array_equal(lightcurve1, [10, 20, 30, 40, 50, 10, 20, 30, 40])
# Should also work for lightcurves with more than just 1 value over time.
lightcurve2 = database.make_uniform_length(np.array([[10], [20], [30], [40], [50]]), length=9, randomize=True)
assert np.array_equal(lightcurve2, [[30], [40], [50], [10], [20], [30], [40], [50], [10]])
lightcurve3 = database.make_uniform_length(np.array([[10], [20], [30], [40], [50]]), length=9, randomize=False)
assert np.array_equal(lightcurve3, [[10], [20], [30], [40], [50], [10], [20], [30], [40]])
|
[
"[email protected]"
] | |
66fbc0fc76eb0b501882c183116d815f84b4740a
|
0529196c4d0f8ac25afa8d657413d4fc1e6dd241
|
/runnie0427/03613/3613.py2.py
|
a7d442d8f366f973d05a21dfa90f433f61769397
|
[] |
no_license
|
riyuna/boj
|
af9e1054737816ec64cbef5df4927c749808d04e
|
06420dd38d4ac8e7faa9e26172b30c9a3d4e7f91
|
refs/heads/master
| 2023-03-17T17:47:37.198570 | 2021-03-09T06:11:41 | 2021-03-09T06:11:41 | 345,656,935 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,370 |
py
|
<!DOCTYPE html>
<html lang="ko">
<head>
<title>Baekjoon Online Judge</title><meta name="viewport" content="width=device-width, initial-scale=1.0"><meta charset="utf-8"><meta name="author" content="스타트링크 (Startlink)"><meta name="keywords" content="ACM-ICPC, ICPC, 프로그래밍, 온라인 저지, 정보올림피아드, 코딩, 알고리즘, 대회, 올림피아드, 자료구조"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta property="og:type" content="website"><meta property="og:image" content="http://onlinejudgeimages.s3-ap-northeast-1.amazonaws.com/images/boj-og-1200.png"><meta property="og:site_name" content="Baekjoon Online Judge"><meta name="format-detection" content = "telephone=no"><meta name="msapplication-config" content="none"><link rel="apple-touch-icon" sizes="180x180" href="/apple-touch-icon.png"><link rel="icon" type="image/png" sizes="32x32" href="/favicon-32x32.png"><link rel="icon" type="image/png" sizes="16x16" href="/favicon-16x16.png"><link rel="manifest" href="/site.webmanifest"><link rel="mask-icon" href="/safari-pinned-tab.svg" color="#0076c0"><meta name="msapplication-TileColor" content="#00aba9"><meta name="theme-color" content="#ffffff"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/css/bootstrap.min.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/style.css?version=20210107"><link href="https://fonts.googleapis.com/css?family=Noto+Sans+KR:400,700|Open+Sans:400,400i,700,700i|Source+Code+Pro&subset=korean" rel="stylesheet"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/connect.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/result.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/custom.css?version=20210107"><link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.6.3/css/font-awesome.css"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/theme-colors/blue.css?version=20210107"><link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/css/pace.css">
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-10874097-3"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-10874097-3');
</script>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.css" /><meta name="username" content="">
<link rel="stylesheet" href="https://ddo7jzca0m2vt.cloudfront.net/unify/css/pages/page_404_error.css">
</head>
<body>
<div class="wrapper">
<div class="header no-print"><div class="topbar"><div class="container"><ul class="loginbar pull-right"><li><a href = "/register">회원가입</a></li><li class="topbar-devider"></li><li><a href = "/login?next=%2Fsource%2Fdownload%2F7114763">로그인</a></li></ul></div></div><div class="navbar navbar-default mega-menu" role="navigation"><div class="container"><div class="navbar-header"><button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-responsive-collapse"><span class="sr-only">Toggle navigation</span><span class="fa fa-bars"></span></button><a class="navbar-brand" href="/"><img id="logo-header" src="https://d2gd6pc034wcta.cloudfront.net/images/[email protected]" alt="Logo" data-retina></a></div><div class="collapse navbar-collapse navbar-responsive-collapse"><ul class="nav navbar-nav"><li class="dropdown mega-menu-fullwidth "><a href="javascript:void(0);" class="dropdown-toggle" data-toggle="dropdown">문제</a><ul class="dropdown-menu"><li><div class="mega-menu-content"><div class="container"><div class="row equal-height"><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href = "/problemset">전체 문제</a></li><li><a href = "/category">문제 출처</a></li><li><a href = "/step">단계별로 풀어보기</a></li><li><a href = "/problem/tags">알고리즘 분류</a></li><li><a href = "/problem/added">새로 추가된 문제</a></li><li><a href = "/problem/added/1">새로 추가된 영어 문제</a></li><li><a href = "/problem/ranking">문제 순위</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>문제</h3></li><li><a href="/problem/only">푼 사람이 한 명인 문제</a></li><li><a href="/problem/nobody">아무도 못 푼 문제</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/problem/random">랜덤</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>출처</h3></li><li><a href = "/category/1">ICPC</a></li><li><a href = "/category/2">Olympiad</a></li><li><a href = "/category/55">한국정보올림피아드</a></li><li><a href = "/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href = "/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href = "/category/5">대학교 대회</a></li><li><a href = "/category/428">카카오 코드 페스티벌</a></li><li><a href = "/category/215">Coder's High</a></li></ul></div><div class="col-md-3 equal-height-in"><ul class="list-unstyled equal-height-list"><li><h3>ICPC</h3></li><li><a href = "/category/7">Regionals</a></li><li><a href = "/category/4">World Finals</a></li><li><a href = "/category/211">Korea Regional</a></li><li><a href = "/category/34">Africa and the Middle East Regionals</a></li><li><a href = "/category/10">Europe Regionals</a></li><li><a href = "/category/103">Latin America Regionals</a></li><li><a href = "/category/8">North America Regionals</a></li><li><a href = "/category/92">South Pacific Regionals</a></li></ul></div></div></div></div></li></ul></li><li><a href = "/workbook/top">문제집</a></li><li><a href = "/contest/official/list">대회<span class='badge badge-red rounded-2x'>2</span></a></li><li><a href = "/status">채점 현황</a></li><li><a href = "/ranklist">랭킹</a></li><li><a href = "/board/list/all">게시판</a></li><li><a href = "/group/list/all">그룹</a></li><li><a href = "/blog/list">블로그</a></li><li><a href = "/lectures">강의</a></li><li><a href = "/search"><i class="fa fa-search search-btn"></i></a></li></ul></div></div></div></div><form action="/logout" method="post" id="logout_form"><input type='hidden' value='%2Fsource%2Fdownload%2F7114763' name="next"></form>
<div class="container content">
<div class="col-md-8 col-md-offset-2">
<div class="error-v1">
<span class="error-v1-title">404</span>
<span>Not found</span>
<div class="margin-bottom-20"></div>
</div>
<div class="text-center">
<span style="font-size:18px;">강의 슬라이드의 첨부 소스 코드가 404 에러가 뜨는 경우에는 링크를 복사/붙여넣기 해주세요.</span>
</div>
<div class="margin-bottom-40"></div>
</div>
</div>
<div class="footer-v3 no-print"><div class="footer"><div class="container"><div class="row"><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>Baekjoon Online Judge</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/about">소개</a></li><li><a href="/news">뉴스</a></li><li><a href="/live">생중계</a></li><li><a href="/poll">설문조사</a></li><li><a href="/blog">블로그</a></li><li><a href="/calendar">캘린더</a></li><li><a href="/donate">기부하기</a></li><li><a href="https://github.com/Startlink/BOJ-Feature-Request">기능 추가 요청</a></li><li><a href="https://github.com/Startlink/BOJ-spj">스페셜 저지 제작</a></li><li><a href="/labs">실험실</a></li></ul><div class="thumb-headline"><h2>채점 현황</h2></div><ul class="list-unstyled simple-list"><li><a href="/status">채점 현황</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>문제</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/problemset">문제</a></li><li><a href="/step">단계별로 풀어보기</a></li><li><a href="/problem/tags">알고리즘 분류</a></li><li><a href="/problem/added">새로 추가된 문제</a></li><li><a href="/problem/added/1">새로 추가된 영어 문제</a></li><li><a href="/problem/ranking">문제 순위</a></li><li><a href="/problem/recent/submit">최근 제출된 문제</a></li><li><a href="/problem/recent/accepted">최근 풀린 문제</a></li><li><a href="/change">재채점 및 문제 수정</a></li></ul><div class="thumb-headline"><h2>유저 대회 / 고등학교 대회</h2></div><ul class="list-inline simple-list margin-bottom"><li><a href="/category/353">FunctionCup</a></li><li><a href="/category/319">kriiicon</a></li><li><a href="/category/420">구데기컵</a></li><li><a href="/category/358">꼬마컵</a></li><li><a href="/category/421">네블컵</a></li><li><a href="/category/413">소프트콘</a></li><li><a href="/category/416">웰노운컵</a></li><li><a href="/category/detail/1743">HYEA Cup</a></li><li><a href="/category/364">경기과학고등학교</a></li><li><a href="/category/417">대구과학고등학교</a></li><li><a href="/category/429">부산일과학고</a></li><li><a href="/category/435">서울과학고등학교</a></li><li><a href="/category/394">선린인터넷고등학교</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>출처</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/category/1">ICPC</a></li><li><a href="/category/211">ICPC Korea Regional</a></li><li><a href="/category/2">Olympiad</a></li><li><a href="/category/55">한국정보올림피아드</a></li><li><a href="/category/57">한국정보올림피아드시․도지역본선</a></li><li><a href="/category/318">전국 대학생 프로그래밍 대회 동아리 연합</a></li><li><a href="/category/5">대학교 대회</a></li><li><a href="/category/428">카카오 코드 페스티벌</a></li><li><a href="/category/215">Coder's High</a></li></ul><div class="thumb-headline"><h2>대학교 대회</h2></div><ul class="list-inline simple-list"><li><a href="/category/320">KAIST</a></li><li><a href="/category/426">POSTECH</a></li><li><a href="/category/341">고려대학교</a></li><li><a href="/category/434">광주과학기술원</a></li><li><a href="/category/361">국민대학교</a></li><li><a href="/category/83">서강대학교</a></li><li><a href="/category/354">서울대학교</a></li><li><a href="/category/352">숭실대학교</a></li><li><a href="/category/408">아주대학교</a></li><li><a href="/category/334">연세대학교</a></li><li><a href="/category/336">인하대학교</a></li><li><a href="/category/347">전북대학교</a></li><li><a href="/category/400">중앙대학교</a></li><li><a href="/category/402">충남대학교</a></li><li><a href="/category/418">한양대 ERICA</a></li><li><a href="/category/363">홍익대학교</a></li><li><a href="/category/409">경인지역 6개대학 연합 프로그래밍 경시대회</a></li></ul></div><div class="col-sm-3 md-margin-bottom-40"><div class="thumb-headline"><h2>도움말</h2></div><ul class="list-unstyled simple-list margin-bottom-10"><li><a href="/help/judge">채점 도움말 및 채점 환경</a></li><li><a href="/help/rejudge">재채점 안내</a></li><li><a href="/help/rte">런타임 에러 도움말</a></li><li><a href="/help/problem">문제 스타일 안내</a></li><li><a href="/help/language">컴파일 또는 실행 옵션, 컴파일러 버전, 언어 도움말</a></li><li><a href="/help/workbook">문제집 도움말</a></li><li><a href="/help/contest">대회 개최 안내</a></li><li><a href="/help/problem-add">문제 출제 안내</a></li><li><a href="/help/rule">이용 규칙</a></li><li><a href="/help/stat">통계 도움말</a></li><li><a href="/help/question">질문 도움말</a></li><li><a href="/help/faq">자주묻는 질문</a></li><li><a href="/help/lecture">강의 안내</a></li><li><a href="/help/short">짧은 주소 안내</a></li><li><a href="/help/ad">광고 안내</a></li></ul></div></div></div><div class="copyright"><div class="container"><div class="row"><div class="col-md-9 col-sm-12"><p>© 2021 All Rights Reserved. <a href="https://startlink.io">주식회사 스타트링크</a> | <a href="/terms">서비스 약관</a> | <a href="/privacy">개인정보 보호</a> | <a href="/terms/payment">결제 이용 약관</a> | <a href="https://boj.startlink.help/hc/ko">도움말</a> | <a href="http://startl.ink/2pmlJaY">광고 문의</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj.md">업데이트 노트</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-issues.md">이슈</a> | <a href="https://github.com/Startlink/update-note/blob/master/boj-todo.md">TODO</a></p></div><div class="col-md-3 col-sm-12"><ul class="social-icons pull-right"><li><a href="https://www.facebook.com/onlinejudge" data-original-title="Facebook" class="rounded-x social_facebook"></a></li><li><a href="https://startlink.blog" data-original-title="Wordpress" class="rounded-x social_wordpress"></a></li></ul></div></div><div class="row"><div class="col-sm-12"><a href="https://startlink.io" class="hidden-xs"><img src="https://d2gd6pc034wcta.cloudfront.net/logo/startlink-logo-white-only.png" class="pull-right startlink-logo"></a><ul class="list-unstyled simple-list"><li>사업자 등록 번호: 541-88-00682</li><li>대표자명: 최백준</li><li>주소: 서울시 서초구 서초대로74길 29 서초파라곤 412호</li><li>전화번호: 02-521-0487 (이메일로 연락 주세요)</li><li>이메일: <a href="mailto:[email protected]">[email protected]</a></li><li>통신판매신고번호: 제 2017-서울서초-2193 호</li></ul></div><div class="col-xs-9"><p id="no-acm-icpc"></p></div><div class="col-xs-3"></div></div></div></div></div>
</div>
<div id="fb-root"></div><script>
window.fbAsyncInit = function() {
FB.init({
appId : '322026491226049',
cookie : true,
xfbml : true,
version : 'v2.8'
});
};
(function(d, s, id) {
var js, fjs = d.getElementsByTagName(s)[0];
if (d.getElementById(id)) return;
js = d.createElement(s); js.id = id;
js.src = "//connect.facebook.net/ko_KR/sdk.js";
fjs.parentNode.insertBefore(js, fjs);
}(document, 'script', 'facebook-jssdk'));
</script>
<script>
!function(f,b,e,v,n,t,s){ if(f.fbq)return;n=f.fbq=function(){ n.callMethod?
n.callMethod.apply(n,arguments):n.queue.push(arguments) };if(!f._fbq)f._fbq=n;
n.push=n;n.loaded=!0;n.version='2.0';n.queue=[];t=b.createElement(e);t.async=!0;
t.src=v;s=b.getElementsByTagName(e)[0];s.parentNode.insertBefore(t,s) }(window,
document,'script','//connect.facebook.net/en_US/fbevents.js');
fbq('init', '1670563073163149');
fbq('track', 'PageView');
</script>
<noscript><img height="1" width="1" style="display:none" src="https://www.facebook.com/tr?id=1670563073163149&ev=PageView&noscript=1"/></noscript><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/jquery-migrate/3.0.1/jquery-migrate.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.2.0/js/bootstrap.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/moment.min.js"></script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/moment.js/2.21.0/locale/ko.js"></script><script type="text/javascript" src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/app.min.js?version=20210107"></script><script type="text/javascript">jQuery(document).ready(function() {App.init(0);});</script><!--[if lt IE 9]><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/respond.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/plugins/html5shiv.js"></script><script src="https://ddo7jzca0m2vt.cloudfront.net/unify/js/plugins/placeholder-IE-fixes.js"></script><![endif]--><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pace/1.0.2/pace.min.js"></script><script src="https://js.pusher.com/4.2/pusher.min.js"></script><script src="https://cdnjs.cloudflare.com/ajax/libs/noty/3.1.4/noty.min.js"></script>
<script>
window.MathJax = {
tex: {
inlineMath: [ ['$', '$'], ['\\(', '\\)'] ],
displayMath: [ ['$$','$$'], ["\\[","\\]"] ],
processEscapes: true,
tags: "ams",
autoload: {
color: [],
colorv2: ['color']
},
packages: { '[+]': ['noerrors'] }
},
options: {
ignoreHtmlClass: "no-mathjax|redactor-editor",
processHtmlClass: 'mathjax',
enableMenu: false
},
chtml: {
scale: 0.9
},
loader: {
load: ['input/tex', 'output/chtml', '[tex]/noerrors'],
}
};
</script><script src="https://polyfill.io/v3/polyfill.min.js?features=es6"></script><script id="MathJax-script" async src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js"></script>
</body>
</html>
|
[
"[email protected]"
] | |
22cd7d391cc9fbe273b5040f9df01d41521f8ad7
|
722ace259d9a92a7923a7ebd32608acbfddb073d
|
/Tag04/doz_a7.py
|
69786d28c2f05b2563f8c924bbd53b3c49e9c70d
|
[] |
no_license
|
anna-s-dotcom/python_final
|
19da2d0d2a420f4d54bb2a9760593b655230dcea
|
41275625c8905f55a7561cd50df51bbb7d4c39bd
|
refs/heads/master
| 2020-12-08T15:30:04.336376 | 2020-01-10T10:17:15 | 2020-01-10T10:17:15 | 233,017,742 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,197 |
py
|
# Aufgabe 7)
# Erstelle eine Funktion, welche eine Zahl und einen Befehl übergeben bekommt.
# Der Befehl soll „lin“ oder „quad“ sein.
# Je nach dem soll die Funktion ausgeführt werden:
# lin = 4*x+5
# quad = 4*x2+5*x+6
# 1) Nutze ein if/else Konstrukt.
def linquad(x, f = 'lin'):
if f == 'lin':
return 4 * x + 5
elif f == 'quad':
return 4 * x**2 + 5*x + 6
else:
print('Falsche Eingabe')
y = linquad(5, 'lin')
print(y)
y = linquad(10, 'quad')
print(y)
# 2) Nutze ein Dictionary.
def linquad_dic(x, f):
d = {'lin': 4 * x + 5, 'quad': 4 * x**2 + 5*x + 6}
return d[f]
y = linquad_dic(5, 'lin')
print(y)
y = linquad_dic(10, 'quad')
print(y)
# wichtig, wenn d erstellt wird werden die werte ausgerechnet!
print()
x = 5
d = {'lin': 4 * x + 5, 'quad': 4 * x**2 + 5*x + 6}
print(d['lin'])
# neu wählen von x hat keine auswirkung mehr auf d
x = 10
print(d['lin'])
def f1():
print('F1')
def f2(x):
return 4 * x
# wenn funktion als value, dann ohne ()
# - wenn die funktion nicht direkt aufgerufen werden soll
d2 = {'f_1': f1, 'f_2': f2}
d2['f_1']()
y = d2['f_2'](5)
print(y)
|
[
"[email protected]"
] | |
524077872b20c6c49333c3c0e864ffe3114cc7fb
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2423/60899/261347.py
|
b07973f6cbd23601c9ce7344d44ec4505e1dade2
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
numOftests = int(input())
for i in range(numOftests):
list2 = list(map(int, input().split()))
length = list2[0]
list0 = list(map(int,input().split()))
list1 = list(map(int, input().split()))
list0.sort()
list1.sort()
lengthOfzero = len(list0)
list0.extend(list1)
list0 = list(set(list0))
if len(list0) == lengthOfzero:
print("Yes")
else:
print("No")
|
[
"[email protected]"
] | |
d191aa6944595ac49641b74daa31bb3cd5b34f34
|
e5dad8e72f6c89011ae030f8076ac25c365f0b5f
|
/caret_scripts/smooth_medial_wall.py
|
3ba2096b84ab538f66083977dd8820563423cb95
|
[] |
no_license
|
djsperka/caret
|
f9a99dc5b88c4ab25edf8b1aa557fe51588c2652
|
153f8e334e0cbe37d14f78c52c935c074b796370
|
refs/heads/master
| 2023-07-15T19:34:16.565767 | 2020-03-07T00:29:29 | 2020-03-07T00:29:29 | 122,994,146 | 0 | 1 | null | 2018-02-26T16:06:03 | 2018-02-26T16:06:03 | null |
UTF-8
|
Python
| false | false | 4,412 |
py
|
#!/usr/bin/python
#
# Imports
#
import os
import sys
#
# Global Variables
#
progName = "/Users/john/caret5_osx/caret_source/caret_command/caret_command"
#progName = "caret_command"
areaColorFileName = "Geography.areacolor"
separateBorderProjectionFileName = "MedialWall_Separate.borderproj"
mergedBorderProjectionFileName = "MedialWall_Merged.borderproj"
borderColorFileName = "LANDMARK.bordercolor"
medialWallBorderName = "LANDMARK.MEDIAL.WALL"
fiducialCoordFileName = "Human.colin.Cerebral.R.FIDUCIAL.TLRC.711-2B.71723.coord"
smoothedFiducialCoordFileName = "Human.colin.Cerebral.R.FIDUCIAL_SMOOTHED.TLRC.711-2B.71723.coord"
inflatedCoordFileName = "Human.colin.Cerebral.R.INFLATED.71723.coord"
paintFileName = "MedialWall.paint"
paintName = "MEDIAL.WALL"
roiFileName = "medial_wall.roi"
surfaceShapeFileName = "Curvature.surface_shape"
topologyFileName = "Human.colin.Cerebral.R.CLOSED.71723.topo"
##-----------------------------------------------------------------------------
#
# Run a command
#
def runCommand(cmdList) :
cmd = " ".join(cmdList) # join cmdList into a string separated by blanks
print "\nExecuting: %s\n" % cmd
result = os.system(cmd)
if (result != 0) :
print "COMMAND FAILED: "
print " ", cmd
os._exit(-1)
##-----------------------------------------------------------------------------
#
# Main
#
#
# Merge the two medial wall borders into a single border
#
cmdList = (progName,
"-surface-border-merge",
separateBorderProjectionFileName,
mergedBorderProjectionFileName,
medialWallBorderName,
"LANDMARK.MedWall.DORSAL",
"LANDMARK.MedWall.VENTRAL",
"-delete-input-border-projections",
"-close-border")
runCommand(cmdList)
#
# Resample the border
#
cmdList = (progName,
"-surface-border-resample",
fiducialCoordFileName,
topologyFileName,
mergedBorderProjectionFileName,
mergedBorderProjectionFileName,
str(2.0),
"-all")
runCommand(cmdList)
#
# Create a region of interest that contains nodes within
# the medial wall border projection
#
cmdList = (progName,
"-surface-region-of-interest-selection",
inflatedCoordFileName,
topologyFileName,
roiFileName,
roiFileName,
"-border-projection",
mergedBorderProjectionFileName,
medialWallBorderName,
"M",
"3D",
0,
"NORMAL")
runCommand(cmdList)
#
# Create the color for the medial wall paint
#
cmdList = (progName,
"-color-file-add-color",
areaColorFileName,
areaColorFileName,
paintName,
str(255),
str(0),
str(0))
runCommand(cmdList)
#
# Create the color for the medial wall border
#
cmdList = (progName,
"-color-file-add-color",
borderColorFileName,
borderColorFileName,
medialWallBorderName,
str(255),
str(0),
str(0))
runCommand(cmdList)
#
# Create a NEW paint file with one column named "Geography"
#
cmdList = (progName,
"-paint-file-create",
paintFileName,
str(1),
"-coordinate-file",
inflatedCoordFileName,
"-set-column-name 1 Geography")
runCommand(cmdList)
#
# Assign nodes in ROI to paint
#
cmdList = (progName,
"-paint-assign-to-nodes",
paintFileName,
paintFileName,
str(1),
paintName,
"-assign-from-roi-file",
roiFileName)
runCommand(cmdList)
#
# Smooth the medial wall
#
cmdList = (progName,
"-surface-smoothing",
fiducialCoordFileName,
smoothedFiducialCoordFileName,
topologyFileName,
str(1.0),
str(50),
str(-1),
"-roi-file ",
roiFileName)
runCommand(cmdList)
#
# Generate curvature
#
cmdList = (progName,
"-surface-curvature",
smoothedFiducialCoordFileName,
topologyFileName,
surfaceShapeFileName,
surfaceShapeFileName,
"-generate-mean-curvature",
"-mean-column-name \"Folding (Mean Curvature) MWS\"")
runCommand(cmdList)
|
[
"[email protected]"
] | |
c3ce493b5b7d71d6751e0d43317734bfe591ca9a
|
18d4e647425400d41c128c0a6f90e0654dcf0fef
|
/MAGNN/model/base_MAGNN.py
|
24429d3e8ade7f1fcc2f4d17ce76532d3899def9
|
[] |
no_license
|
ddatta-DAC/AD_v3
|
793537fca36c49acee8c63396966a1d6d43df407
|
75191fc77874fa1d09c570195036cb8b8b83de24
|
refs/heads/master
| 2022-09-16T18:57:58.982936 | 2020-05-29T15:34:04 | 2020-05-29T15:34:04 | 263,650,595 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,744 |
py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
from dgl.nn.pytorch import edge_softmax
class MAGNN_metapath_specific(nn.Module):
def __init__(self,
etypes,
out_dim,
num_heads,
rnn_type='gru',
r_vec=None,
attn_drop=0.5,
alpha=0.01,
use_minibatch=False,
attn_switch=False):
super(MAGNN_metapath_specific, self).__init__()
self.out_dim = out_dim
self.num_heads = num_heads
self.rnn_type = rnn_type
self.etypes = etypes
self.r_vec = r_vec
self.use_minibatch = use_minibatch
self.attn_switch = attn_switch
# rnn-like metapath instance aggregator
# consider multiple attention heads
if rnn_type == 'gru':
self.rnn = nn.GRU(out_dim, num_heads * out_dim)
elif rnn_type == 'lstm':
self.rnn = nn.LSTM(out_dim, num_heads * out_dim)
elif rnn_type == 'bi-gru':
self.rnn = nn.GRU(out_dim, num_heads * out_dim // 2, bidirectional=True)
elif rnn_type == 'bi-lstm':
self.rnn = nn.LSTM(out_dim, num_heads * out_dim // 2, bidirectional=True)
elif rnn_type == 'linear':
self.rnn = nn.Linear(out_dim, num_heads * out_dim)
elif rnn_type == 'max-pooling':
self.rnn = nn.Linear(out_dim, num_heads * out_dim)
elif rnn_type == 'neighbor-linear':
self.rnn = nn.Linear(out_dim, num_heads * out_dim)
# node-level attention
# attention considers the center node embedding or not
if self.attn_switch:
self.attn1 = nn.Linear(out_dim, num_heads, bias=False)
self.attn2 = nn.Parameter(torch.empty(size=(1, num_heads, out_dim)))
else:
self.attn = nn.Parameter(torch.empty(size=(1, num_heads, out_dim)))
self.leaky_relu = nn.LeakyReLU(alpha)
self.softmax = edge_softmax
if attn_drop:
self.attn_drop = nn.Dropout(attn_drop)
else:
self.attn_drop = lambda x: x
# weight initialization
if self.attn_switch:
nn.init.xavier_normal_(self.attn1.weight, gain=1.414)
nn.init.xavier_normal_(self.attn2.data, gain=1.414)
else:
nn.init.xavier_normal_(self.attn.data, gain=1.414)
def edge_softmax(self, g):
attention = self.softmax(g, g.edata.pop('a'))
# Dropout attention scores and save them
g.edata['a_drop'] = self.attn_drop(attention)
def message_passing(self, edges):
ft = edges.data['eft'] * edges.data['a_drop']
return {'ft': ft}
def forward(self, inputs):
# features: num_all_nodes x out_dim
if self.use_minibatch:
g, features, type_mask, edge_metapath_indices, target_idx = inputs
else:
g, features, type_mask, edge_metapath_indices = inputs
# Embedding layer
# use torch.nn.functional.embedding or torch.embedding here
# do not use torch.nn.embedding
# edata: E x Seq x out_dim
edata = F.embedding(edge_metapath_indices, features)
# apply rnn to metapath-based feature sequence
if self.rnn_type == 'gru':
_, hidden = self.rnn(edata.permute(1, 0, 2))
elif self.rnn_type == 'lstm':
_, (hidden, _) = self.rnn(edata.permute(1, 0, 2))
elif self.rnn_type == 'bi-gru':
_, hidden = self.rnn(edata.permute(1, 0, 2))
hidden = hidden.permute(1, 0, 2).reshape(-1, self.out_dim, self.num_heads).permute(0, 2, 1).reshape(
-1, self.num_heads * self.out_dim).unsqueeze(dim=0)
elif self.rnn_type == 'bi-lstm':
_, (hidden, _) = self.rnn(edata.permute(1, 0, 2))
hidden = hidden.permute(1, 0, 2).reshape(-1, self.out_dim, self.num_heads).permute(0, 2, 1).reshape(
-1, self.num_heads * self.out_dim).unsqueeze(dim=0)
elif self.rnn_type == 'average':
hidden = torch.mean(edata, dim=1)
hidden = torch.cat([hidden] * self.num_heads, dim=1)
hidden = hidden.unsqueeze(dim=0)
elif self.rnn_type == 'linear':
hidden = self.rnn(torch.mean(edata, dim=1))
hidden = hidden.unsqueeze(dim=0)
elif self.rnn_type == 'max-pooling':
hidden, _ = torch.max(self.rnn(edata), dim=1)
hidden = hidden.unsqueeze(dim=0)
elif self.rnn_type == 'TransE0' or self.rnn_type == 'TransE1':
r_vec = self.r_vec
if self.rnn_type == 'TransE0':
r_vec = torch.stack((r_vec, -r_vec), dim=1)
r_vec = r_vec.reshape(self.r_vec.shape[0] * 2, self.r_vec.shape[1]) # etypes x out_dim
edata = F.normalize(edata, p=2, dim=2)
for i in range(edata.shape[1] - 1):
# consider None edge (symmetric relation)
temp_etypes = [etype for etype in self.etypes[i:] if etype is not None]
edata[:, i] = edata[:, i] + r_vec[temp_etypes].sum(dim=0)
hidden = torch.mean(edata, dim=1)
hidden = torch.cat([hidden] * self.num_heads, dim=1)
hidden = hidden.unsqueeze(dim=0)
elif self.rnn_type == 'RotatE0' or self.rnn_type == 'RotatE1':
r_vec = F.normalize(self.r_vec, p=2, dim=2)
if self.rnn_type == 'RotatE0':
r_vec = torch.stack((r_vec, r_vec), dim=1)
r_vec[:, 1, :, 1] = -r_vec[:, 1, :, 1]
r_vec = r_vec.reshape(self.r_vec.shape[0] * 2, self.r_vec.shape[1], 2) # etypes x out_dim/2 x 2
edata = edata.reshape(edata.shape[0], edata.shape[1], edata.shape[2] // 2, 2)
final_r_vec = torch.zeros([edata.shape[1], self.out_dim // 2, 2], device=edata.device)
final_r_vec[-1, :, 0] = 1
for i in range(final_r_vec.shape[0] - 2, -1, -1):
# consider None edge (symmetric relation)
if self.etypes[i] is not None:
final_r_vec[i, :, 0] = final_r_vec[i + 1, :, 0].clone() * r_vec[self.etypes[i], :, 0] -\
final_r_vec[i + 1, :, 1].clone() * r_vec[self.etypes[i], :, 1]
final_r_vec[i, :, 1] = final_r_vec[i + 1, :, 0].clone() * r_vec[self.etypes[i], :, 1] +\
final_r_vec[i + 1, :, 1].clone() * r_vec[self.etypes[i], :, 0]
else:
final_r_vec[i, :, 0] = final_r_vec[i + 1, :, 0].clone()
final_r_vec[i, :, 1] = final_r_vec[i + 1, :, 1].clone()
for i in range(edata.shape[1] - 1):
temp1 = edata[:, i, :, 0].clone() * final_r_vec[i, :, 0] -\
edata[:, i, :, 1].clone() * final_r_vec[i, :, 1]
temp2 = edata[:, i, :, 0].clone() * final_r_vec[i, :, 1] +\
edata[:, i, :, 1].clone() * final_r_vec[i, :, 0]
edata[:, i, :, 0] = temp1
edata[:, i, :, 1] = temp2
edata = edata.reshape(edata.shape[0], edata.shape[1], -1)
hidden = torch.mean(edata, dim=1)
hidden = torch.cat([hidden] * self.num_heads, dim=1)
hidden = hidden.unsqueeze(dim=0)
elif self.rnn_type == 'neighbor':
hidden = edata[:, 0]
hidden = torch.cat([hidden] * self.num_heads, dim=1)
hidden = hidden.unsqueeze(dim=0)
elif self.rnn_type == 'neighbor-linear':
hidden = self.rnn(edata[:, 0])
hidden = hidden.unsqueeze(dim=0)
eft = hidden.permute(1, 0, 2).view(-1, self.num_heads, self.out_dim) # E x num_heads x out_dim
if self.attn_switch:
center_node_feat = F.embedding(edge_metapath_indices[:, -1], features) # E x out_dim
a1 = self.attn1(center_node_feat) # E x num_heads
a2 = (eft * self.attn2).sum(dim=-1) # E x num_heads
a = (a1 + a2).unsqueeze(dim=-1) # E x num_heads x 1
else:
a = (eft * self.attn).sum(dim=-1).unsqueeze(dim=-1) # E x num_heads x 1
a = self.leaky_relu(a)
g.edata.update({'eft': eft, 'a': a})
# compute softmax normalized attention values
self.edge_softmax(g)
# compute the aggregated node features scaled by the dropped,
# unnormalized attention values.
g.update_all(
self.message_passing,
fn.sum('ft', 'ft')
)
ret = g.ndata['ft'] # E x num_heads x out_dim
if self.use_minibatch:
return ret[target_idx]
else:
return ret
class MAGNN_ctr_ntype_specific(nn.Module):
def __init__(self,
num_metapaths,
etypes_list,
out_dim,
num_heads,
attn_vec_dim,
rnn_type='gru',
r_vec=None,
attn_drop=0.5,
use_minibatch=False):
super(MAGNN_ctr_ntype_specific, self).__init__()
self.out_dim = out_dim
self.num_heads = num_heads
self.use_minibatch = use_minibatch
# metapath-specific layers
self.metapath_layers = nn.ModuleList()
for i in range(num_metapaths):
self.metapath_layers.append(MAGNN_metapath_specific(
etypes_list[i],
out_dim,
num_heads,
rnn_type,
r_vec,
attn_drop=attn_drop,
use_minibatch=use_minibatch)
)
# metapath-level attention
# note that the acutal input dimension should consider the number of heads
# as multiple head outputs are concatenated together
self.fc1 = nn.Linear(out_dim * num_heads, attn_vec_dim, bias=True)
self.fc2 = nn.Linear(attn_vec_dim, 1, bias=False)
# weight initialization
nn.init.xavier_normal_(self.fc1.weight, gain=1.414)
nn.init.xavier_normal_(self.fc2.weight, gain=1.414)
def forward(self, inputs):
if self.use_minibatch:
g_list, features, type_mask, edge_metapath_indices_list, target_idx_list = inputs
# metapath-specific layers
metapath_outs = [F.elu(
metapath_layer(
(g, features, type_mask, edge_metapath_indices, target_idx)
).view(-1, self.num_heads * self.out_dim)
)
for g, edge_metapath_indices, target_idx, metapath_layer in zip (g_list, edge_metapath_indices_list, target_idx_list, self.metapath_layers)]
else:
g_list, features, type_mask, edge_metapath_indices_list = inputs
# metapath-specific layers
metapath_outs = [F.elu(metapath_layer((g, features, type_mask, edge_metapath_indices)).view(-1, self.num_heads * self.out_dim))
for g, edge_metapath_indices, metapath_layer in zip(g_list, edge_metapath_indices_list, self.metapath_layers)]
beta = []
for metapath_out in metapath_outs:
fc1 = torch.tanh(self.fc1(metapath_out))
fc1_mean = torch.mean(fc1, dim=0)
fc2 = self.fc2(fc1_mean)
beta.append(fc2)
beta = torch.cat(beta, dim=0)
beta = F.softmax(beta, dim=0)
beta = torch.unsqueeze(beta, dim=-1)
beta = torch.unsqueeze(beta, dim=-1)
metapath_outs = [torch.unsqueeze(metapath_out, dim=0) for metapath_out in metapath_outs]
metapath_outs = torch.cat(metapath_outs, dim=0)
h = torch.sum(beta * metapath_outs, dim=0)
return h
|
[
"[email protected]"
] | |
2fda7128de47a50c6ff375d42206469d47952984
|
68a088346090ae4e929c208906b14181da0f92f6
|
/第一阶段/2. Python01/day03/exercise/01_str_rectangle.py
|
753207cbeb3641e48a534062ed628f0b1941a2de
|
[] |
no_license
|
LONG990122/PYTHON
|
d1530e734ae48416b5f989a4d97bd1d66d165b91
|
59a2a2a0b033c8ad0cb33d6126c252e9d574eff7
|
refs/heads/master
| 2020-07-07T09:38:03.501705 | 2019-09-23T16:28:31 | 2019-09-23T16:28:31 | 203,316,565 | 0 | 0 | null | 2019-10-23T15:02:33 | 2019-08-20T06:47:44 |
HTML
|
UTF-8
|
Python
| false | false | 401 |
py
|
# 01_str_rectangle.py
# 写一个程序,打印一个高度为4的矩形方框
# 要求输入一个整数,此整数代表矩形的宽度,输出此矩形
# 如:
# 请输入宽度: 10
# 打印如下:
# ##########
# # #
# # #
# ##########
w = int(input("请输入宽度: "))
line1 = '#' * w
line2 = '#' + ' ' * (w - 2) + '#'
print(line1)
print(line2)
print(line2)
print(line1)
|
[
"[email protected]"
] | |
f8edac2da5a69b605d4359c2f8f216e0976f96d9
|
e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d
|
/a10sdk/core/techsupport/techsupport_vcsdebug.py
|
5ab73d62848edc2f0279a82d14c841396240e154
|
[
"Apache-2.0"
] |
permissive
|
amwelch/a10sdk-python
|
4179565afdc76cdec3601c2715a79479b3225aef
|
3e6d88c65bd1a2bf63917d14be58d782e06814e6
|
refs/heads/master
| 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null |
UTF-8
|
Python
| false | false | 820 |
py
|
from a10sdk.common.A10BaseClass import A10BaseClass
class GeneratesAvcsDebugFile(A10BaseClass):
""" :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/techsupport/vcsdebug`.
Class Generates aVCS debug file supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "Generates aVCS debug file"
self.a10_url="/axapi/v3/techsupport/vcsdebug"
self.DeviceProxy = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
[
"[email protected]"
] | |
556f9a4f94a174e5febc071241d4cba8792817cd
|
fcecbfe364306bd4da1ac316d38e5dc4fc7942a2
|
/model_util/test_diag.py
|
2ab0bbfe19f5ccae96a8533164a08bc8ef4c68f1
|
[] |
no_license
|
darothen/crm-tools
|
ce7cda50ca8d49c801848d60a83372a6f3eef273
|
095da56aba6911e472093a2ebae8a73503ff0855
|
refs/heads/master
| 2016-09-06T14:53:28.088780 | 2014-05-05T14:12:13 | 2014-05-05T14:12:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 575 |
py
|
from crm_io import read_diag, save_diag
#filename = "/Volumes/legion_home/models/crm71_2d/OUTPUT/DIAG"
filename = "/Volumes/legion_storage02/crm_testing/kshv_2d_largedomain/kshv_500ccn_100in/DIAG"
nz = 65
spmd = True
ts = 20
nt = read_diag(filename, nz, spmd)
all_time, all_tdiag = save_diag(filename, nz, nt, spmd)
print all_time
print all_tdiag[ts,:,36]
from pylab import *
import numpy as np
fig = plt.figure(1)
plt.clf()
d = np.ma.masked_invalid(all_tdiag[ts,:,36])
d = np.ma.masked_outside(d, -10, 10)
d = np.ma.filled(d, 0.)
print d.shape
plt.plot(d, range(nz), "o")
|
[
"[email protected]"
] | |
76a142e0bf1248e45f81f90da77866782710a3d6
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Terracortril_med_Polymyxin_B_eye_or_ear_ointment_SmPC.py
|
1981573d4c6dae71344cf9ec7d1423466396b794
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087 | 2013-07-16T14:05:41 | 2013-07-16T14:05:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 852 |
py
|
{'_data': [['Common',
[['Eye',
u'Sveda vid instillation. Vid l\xe5ngtidsbehandling hornhinneskada och intraokul\xe4r tryckstegring.']]],
['Uncommon', [['Eye', u'Aktivering av viruskeratit.']]],
['Rare', [['Eye', u'Korneaperforation, katarakt vid l\xe5ngtidsbehandling.']]],
['Unknown',
[['Infections', u'Infektion, bakteriell infektion, svampinfektion'],
['Immune system', u'\xd6verk\xe4nslighetsreaktioner'],
['Eye',
u'\xd6kat t\xe5rfl\xf6de, glaukom, synnervsp\xe5verkan, br\xe4nnande k\xe4nsla, k\xe4nsla av fr\xe4mmande partiklar, f\xf6rs\xe4mrad l\xe4kning'],
['Skin',
u'Kontakdermatit Allm\xe4nna symtom och/eller symtom vid adminstreringsst\xe4llet']]]],
'_pages': [2, 3],
u'_rank': 7,
u'_type': u'LSFU'}
|
[
"daro@daro-ThinkPad-X220.(none)"
] |
daro@daro-ThinkPad-X220.(none)
|
3bfbb99e6ddff6a51ff6bb6726c8df7a291a5619
|
30158cbe4ef57ec85df07da8e8f8085a5001996c
|
/frame_2D_alg/alternative versions/frame_blobs_ortho.py
|
5e55f32ec1a59c90132a010af70173b6bfe1edf4
|
[
"CC-BY-4.0",
"MIT"
] |
permissive
|
boris-kz/CogAlg
|
951c00e202db8537d106b9794b5309f22f3d25b9
|
a56a369524c9435bfb20225b71e87b6b28c9d1e3
|
refs/heads/master
| 2023-09-04T04:07:52.028903 | 2023-09-04T00:58:53 | 2023-09-04T00:58:53 | 90,488,789 | 110 | 54 |
MIT
| 2023-05-04T11:27:04 | 2017-05-06T20:17:18 |
Python
|
UTF-8
|
Python
| false | false | 13,568 |
py
|
from scipy import misc
from time import time
from collections import deque, namedtuple
import numpy as np
'''
frame_blobs() defines blobs: contiguous areas of positive or negative deviation of gradient. Gradient is estimated
as |dx| + |dy|, then selectively and more precisely as hypot(dx, dy), from cross-comparison among adjacent pixels.
Complemented by intra_blob (recursive search within blobs), it will be a 2D version of first-level core algorithm.
frame_blobs() performs several levels (Le) of encoding, incremental per scan line defined by vertical coordinate y.
value of y per Le line is shown relative to y of current input line, incremented by top-down scan of input image:
1Le, line y: comp_pixel (lateral and vertical comp) -> pixel + derivatives tuple: dert ) frame of derts: dert__
2Le, line y-1: form_P(dert2) -> 1D pattern P
3Le, line y-2: scan_P_(P, hP)-> hP, roots: down-connections, fork_: up-connections between Ps
4Le, line y-3: form_segment(hP, seg) -> seg: merge vertically-connected _Ps in non-forking blob segments
5Le, line y-4+ seg depth: form_blob(seg, blob): merge connected segments in fork_ incomplete blobs, recursively
All 2D functions (y_comp, scan_P_, form_segment, form_blob) input two lines: higher and lower, convert elements of
lower line into elements of new higher line, then displace elements of old higher line into higher function.
Higher-line elements include additional variables, derived while they were lower-line elements.
Processing is mostly sequential because blobs are irregular and very difficult to map to matrices.
prefix '_' denotes higher-line variable or pattern, vs. same-type lower-line variable or pattern,
postfix '_' denotes array name, vs. same-name elements of that array
'''
# ************ MAIN FUNCTIONS *******************************************************************************************
# -image_to_blobs()
# -comp_pixel()
# -form_P_()
# -scan_P_()
# -form_seg_()
# -form_blob()
# ***********************************************************************************************************************
def image_to_blobs(image): # root function, postfix '_' denotes array vs element, prefix '_' denotes higher- vs lower- line variable
frame = [[0, 0, 0, 0], [], image.shape] # params, blob_, shape
dert__ = comp_pixel(image) # vertically and horizontally bilateral comparison of adjacent pixels
seg_ = deque() # buffer of running segments
for y in range(1, height - 1): # first and last row are discarded
P_ = form_P_(dert__[y]) # horizontal clustering
P_ = scan_P_(P_, seg_, frame)
seg_ = form_seg_(y, P_, frame)
while seg_: form_blob(seg_.popleft(), frame) # frame ends, last-line segs are merged into their blobs
return frame # frame of 2D patterns
# ---------- image_to_blobs() end -----------------------------------------------------------------------------------
def comp_pixel(p__): # bilateral comparison between vertically and horizontally consecutive pixels within image
dert__ = np.empty(shape=(height, width, 4), dtype=int) # initialize dert__
dy__ = p__[2:, 1:-1] - p__[:-2, 1:-1] # vertical comp between rows, first and last column are discarded
dx__ = p__[1:-1, 2:] - p__[1:-1, :-2] # lateral comp between columns, first and last row are discarded
g__ = np.abs(dy__) + np.abs(dx__) - ave # deviation of gradient, initially approximated as |dy| + |dx|
dert__[:, :, 0] = p__
dert__[1:-1, 1:-1, 1] = g__
dert__[1:-1, 1:-1, 2] = dy__ # first row, last row, first column and last-column are discarded
dert__[1:-1, 1:-1, 3] = dx__
return dert__
# ---------- comp_pixel() end ---------------------------------------------------------------------------------------
def form_P_(dert_): # horizontally cluster and sum consecutive pixels and their derivatives into Ps
P_ = deque() # row of Ps
i, g, dy, dx = dert_[1] # first dert
x0, I, G, Dy, Dx, L = 1, i, g, dy, dx, 1 # P params
P_dert_ = [(i, g, dy, dx)] # dert buffer
_s = g > 0 # sign
for x, (i, g, dy, dx) in enumerate(dert_[2:-1], start=2):
s = g > 0
if s != _s: # P is terminated and new P is initialized
P_.append([_s, x0, I, G, Dy, Dx, L, P_dert_])
x0, I, G, Dy, Dx, L = x, 0, 0, 0, 0, 0
P_dert_ = []
# accumulate P params:
I += i
G += g
Dy += dy
Dx += dx
L += 1
P_dert_.append((i, g, dy, dx))
_s = s # prior sign
P_.append([_s, x0, I, G, Dy, Dx, L, P_dert_]) # last P in row
return P_
# ---------- form_P_() end ------------------------------------------------------------------------------------------
def scan_P_(P_, seg_, frame): # integrate x overlaps (forks) between same-sign Ps and _Ps into blob segments
new_P_ = deque()
if P_ and seg_: # if both are not empty
P = P_.popleft() # input-line Ps
seg = seg_.popleft() # higher-line segments,
_P = seg[2][-1] # last element of each segment is higher-line P
stop = False
fork_ = []
while not stop:
x0 = P[1] # first x in P
xn = x0 + P[-2] # first x in next P
_x0 = _P[1] # first x in _P
_xn = _x0 +_P[-2] # first x in next _P
if P[0] == _P[0] and _x0 < xn and x0 < _xn: # test for sign match and x overlap
seg[3] += 1
fork_.append(seg) # P-connected segments are buffered into fork_
if xn < _xn: # _P overlaps next P in P_
new_P_.append((P, fork_))
fork_ = []
if P_:
P = P_.popleft() # load next P
else: # terminate loop
if seg[3] != 1: # if roots != 1: terminate seg
form_blob(seg, frame)
stop = True
else: # no next-P overlap
if seg[3] != 1: # if roots != 1: terminate seg
form_blob(seg, frame)
if seg_: # load next _P
seg = seg_.popleft()
_P = seg[2][-1]
else: # if no seg left: terminate loop
new_P_.append((P, fork_))
stop = True
while P_: # terminate Ps and segs that continue at line's end
new_P_.append((P_.popleft(), [])) # no fork
while seg_:
form_blob(seg_.popleft(), frame) # roots always == 0
return new_P_
# ---------- scan_P_() end ------------------------------------------------------------------------------------------
def form_seg_(y, P_, frame): # convert or merge every P into segment, merge blobs
new_seg_ = deque()
while P_:
P, fork_ = P_.popleft()
s, x0 = P[:2]
params = P[2:-1] # I, G, Dy, Dx, L, Ly
xn = x0 + params[-1] # next-P x0 = x0 + L
params.append(1) # add Ly
if not fork_: # new_seg is initialized with initialized blob
blob = [s, [0] * (len(params)), [], 1, [y, x0, xn]] # s, params, seg_, open_segments, box
new_seg = [y, params, [P], 0, fork_, blob] # y0, params, Py_, roots, fork_, blob
blob[2].append(new_seg)
else:
if len(fork_) == 1 and fork_[0][3] == 1: # P has one fork and that fork has one root
new_seg = fork_[0]
I, G, Dy, Dx, L, Ly = params
Is, Gs, Dys, Dxs, Ls, Lys = new_seg[1] # fork segment params, P is merged into segment:
new_seg[1] = [Is + I, Gs + G, Dys + Dy, Dxs + Dx, Ls + L, Lys + Ly]
new_seg[2].append(P) # Py_: vertical buffer of Ps
new_seg[3] = 0 # reset roots
blob = new_seg[-1]
else: # if > 1 forks, or 1 fork that has > 1 roots:
blob = fork_[0][5]
new_seg = [y, params, [P], 0, fork_, blob] # new_seg is initialized with fork blob
blob[2].append(new_seg) # segment is buffered into blob
if len(fork_) > 1: # merge blobs of all forks
if fork_[0][3] == 1: # if roots == 1: fork hasn't been terminated
form_blob(fork_[0], frame) # merge seg of 1st fork into its blob
for fork in fork_[1:len(fork_)]: # merge blobs of other forks into blob of 1st fork
if fork[3] == 1:
form_blob(fork, frame)
if not fork[5] is blob:
params, seg_, open_segs, box = fork[5][1:] # merged blob, omit sign
blob[1] = [par1 + par2 for par1, par2 in zip(blob[1], params)] # sum merging blobs
blob[3] += open_segs
blob[4][0] = min(blob[4][0], box[0]) # extend box y0
blob[4][1] = min(blob[4][1], box[1]) # extend box x0
blob[4][2] = max(blob[4][2], box[2]) # extend box xn
for seg in seg_:
if not seg is fork:
seg[5] = blob # blobs in other forks are references to blob in the first fork
blob[2].append(seg) # buffer of merged root segments
fork[5] = blob
blob[2].append(fork)
blob[3] -= 1 # open_segments -= 1: shared with merged blob
blob[4][1] = min(blob[4][1], x0) # extend box x0
blob[4][2] = max(blob[4][2], xn) # extend box xn
new_seg_.append(new_seg)
return new_seg_
# ---------- form_seg_() end --------------------------------------------------------------------------------------------
def form_blob(term_seg, frame): # terminated segment is merged into continued or initialized blob (all connected segments)
y0s, params, Py_, roots, fork_, blob = term_seg
blob[1] = [par1 + par2 for par1, par2 in zip(params, blob[1])]
blob[3] += roots - 1 # number of open segments
if not blob[3]: # if open_segments == 0: blob is terminated and packed in frame
s, [I, G, Dy, Dx, L, Ly], seg_, open_segs, (y0, x0, xn) = blob
yn = y0s + params[-1] # yn = y0 + Ly
map = np.zeros((yn - y0, xn - x0), dtype=bool) # local map of blob
for seg in seg_:
seg.pop() # remove references to blob
for y, P in enumerate(seg[2], start=seg[0]):
x0P = P[1]
LP = P[-2]
xnP = x0P + LP
map[y - y0, x0P - x0:xnP - x0] = True
frame[0][0] += I
frame[0][3] += G
frame[0][1] += Dy
frame[0][2] += Dx
frame[1].append(nt_blob(
Derts= [I, [[ (G, Dy, Dx, L, Ly, 1, 0, []) ]]], # Derts[0] = I, Dert[1] = single blob,
# rng=1 for comp_range, also layer index = derts[-(rng-1|2)][fa]:
# fa=0: sub_layer index: 0 g | 1 ga, none for hypot_g
# sub_blob_= [], nested to depth = Derts[index]
sign=s,
box= (y0, yn, x0, xn), # boundary box
map= map, # blob boolean map, to compute overlap
root_blob=[blob],
seg_=seg_,
))
# ---------- form_blob() end ----------------------------------------------------------------------------------------
# ************ PROGRAM BODY *********************************************************************************************
ave = 20
# Load inputs --------------------------------------------------------------------
image = misc.imread('./../images/raccoon_eye.jpg', flatten=True).astype(int)
height, width = image.shape
# Main ---------------------------------------------------------------------------
start_time = time()
nt_blob = namedtuple('blob', 'Derts sign box map root_blob seg_')
frame_of_blobs = image_to_blobs(image)
# from intra_blob_debug import intra_blob_hypot # not yet functional, comment-out to run
# frame_of_blobs = intra_blob(hypot_g, frame_of_blobs) # evaluate for deeper clustering inside each blob, recursively
# DEBUG --------------------------------------------------------------------------
from DEBUG import draw, map_blobs
# draw('./../debug/root_blobs', map_blobs(frame_of_blobs))
from intra_comp_ts import intra_comp, hypot_g
for i, blob in enumerate(frame_of_blobs[1]):
if blob.Derts[0][-2] > 500: # L > 20
intra_comp(blob, hypot_g, 0, 5)
draw('./../debug/hypot_g' + str(i), map_blobs(blob))
# intra_comp(blob, comp_range, 0, 5)
# draw('./../debug/comp_range' + str(i), map_blobs(blob))
# intra_comp(blob, comp_angle, 0, 25)
# draw('./../debug/comp_angle_' + str(i), map_blobs(blob))
# intra_comp(blob, comp_gradient, 0, 5)
# draw('./../debug/comp_gradient_' + str(i), map_blobs(blob))
# END DEBUG -----------------------------------------------------------------------
end_time = time() - start_time
print(end_time)
# ************ PROGRAM BODY END ******************************************************************************************
|
[
"[email protected]"
] | |
970db4f9221a1a43b4d38341b08f46bb0370580c
|
25403dd331c6f273190461ed57c12c9f633853b9
|
/chiton/core/environment.py
|
1e0113b34cfc4ea1dddea7ec596c12a3ce322fb8
|
[] |
no_license
|
justinlocsei/chiton
|
d1a91a57ff90f16aa375d0f4c6a0b093a66d2d38
|
6ca38962d08a6ca154434a1f78235155710ffeec
|
refs/heads/master
| 2021-03-27T15:10:07.811691 | 2017-01-30T01:18:33 | 2017-01-30T01:18:33 | 48,400,233 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,738 |
py
|
import os.path
import re
from voluptuous import All, Length, Invalid, MultipleInvalid, Schema
from chiton.core.exceptions import ConfigurationError
# All known log levels
LOG_LEVELS = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
def use_config(user_data={}):
"""Load an external JSON configuration file.
The contents of the JSON file are deeply merged with the defaults, ensuring
that the returned value is always valid.
Args:
user_data (dict): The user customizations to apply to the base configuration
Returns:
dict: The merged configuration file
Raises:
chiton.core.exceptions.ConfigurationError: If the user settings are invalid
"""
try:
_validate_config(user_data)
except MultipleInvalid as e:
raise ConfigurationError('Invalid configuration: %s' % e)
config_data = _default_config()
config_data.update(user_data)
return config_data
def _default_config():
"""Define the default configuration data."""
return {
'allow_api_browsing': False,
'allowed_hosts': [],
'amazon_associates_aws_access_key_id': None,
'amazon_associates_aws_secret_access_key': None,
'amazon_associates_tracking_id': None,
'conn_max_age': 0,
'database': {},
'debug': False,
'default_email': None,
'encryption_key': None,
'environment': None,
'file_logging': False,
'log_file': None,
'log_level': 'INFO',
'media_root': None,
'media_url': '/media/',
'previous_encryption_key': None,
'public_api': False,
'redis': {},
'secret_key': None,
'sentry_dsn': None,
'server_email': None,
'shopstyle_uid': None,
'static_root': None,
'static_url': '/static/',
'track_errors': False,
'trusted_proxy_ips': ['127.0.0.1']
}
def _validate_config(config):
"""Validate configuration data, raising an error for invalid data."""
Schema({
'allow_api_browsing': bool,
'allowed_hosts': [str],
'amazon_associates_aws_access_key_id': All(str, Length(min=1)),
'amazon_associates_aws_secret_access_key': All(str, Length(min=1)),
'amazon_associates_tracking_id': All(str, Length(min=1), _AmazonAssociatesTrackingID()),
'conn_max_age': int,
'database': Schema({
'engine': All(str, Length(min=1)),
'host': All(str, Length(min=1)),
'name': All(str, Length(min=1)),
'password': All(str, Length(min=1)),
'port': All(int),
'user': All(str, Length(min=1))
}),
'debug': bool,
'default_email': All(str, Length(min=1)),
'encryption_key': All(str, Length(min=1)),
'environment': All(str, Length(min=1)),
'file_logging': bool,
'log_file': All(str, Length(min=1), _AbsolutePath()),
'log_level': All(str, Length(min=1), _LogLevel()),
'media_root': All(str, Length(min=1), _AbsolutePath()),
'media_url': All(str, Length(min=1), _MediaUrl()),
'public_api': bool,
'previous_encryption_key': All(str, Length(min=1)),
'redis': Schema({
'db': int,
'host': All(str, Length(min=1)),
'port': int
}),
'secret_key': All(str, Length(min=1)),
'sentry_dsn': All(str, Length(min=1)),
'server_email': All(str, Length(min=1)),
'shopstyle_uid': All(str, Length(min=1)),
'static_root': All(str, Length(min=1), _AbsolutePath()),
'static_url': All(str, Length(min=1), _MediaUrl()),
'track_errors': bool,
'trusted_proxy_ips': [str]
})(config)
def _AbsolutePath():
"""Ensure that a string is an absolute file path."""
def validator(value):
if not os.path.isabs(value):
raise Invalid('%s must be an absolute path' % value)
return validator
def _AmazonAssociatesTrackingID():
"""Ensure that a string is an Amazon Associates tracking ID."""
def validator(value):
if not re.search('-2\d$', value):
raise Invalid('%s must be an Amazon Associates tracking ID' % value)
return validator
def _LogLevel():
"""Ensure that a string is a known log level."""
def validator(value):
if value not in LOG_LEVELS:
raise Invalid('%s must be a log level (%s)' % (value, ', '.join(LOG_LEVELS)))
return validator
def _MediaUrl():
"""Ensure that a URL is a Django-style media URL ending in a slash."""
def validator(value):
if not value.endswith('/'):
raise Invalid('%s does not have a trailing slash' % value)
return validator
|
[
"[email protected]"
] | |
59f8c7d57f0528ed4dfb3503f979f7eb63155d5d
|
149660428ec7570b02b9e8b3d494dcd548e80005
|
/01-04_python基础/05_高级数据类型/hm_17_字符串的查找和替换.py
|
9801733fcb09d371234b006f6705420a8c32271f
|
[] |
no_license
|
kenzzuli/hm_15
|
603eb178e476f946eb57b1cdf0c85ba5d65e8d58
|
db8a6d13776e55aa4e05ff9f39e9c8e98d59d8ee
|
refs/heads/master
| 2023-08-07T01:57:01.993474 | 2021-09-23T15:49:19 | 2021-09-23T15:49:19 | 359,322,831 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 902 |
py
|
hello_str = "hello world"
# 1.判断是否以指定字符串开始
print(hello_str.startswith("hello")) # 大小写敏感
# 2.判断是否以指定字符串结束
print(hello_str.endswith("orld"))
# 3.查找指定字符串
# index同样也可以查找指定的子字符串在大字符串中的索引
print(hello_str.find("wo"))
print("*" * 100)
print(hello_str.rindex('o'))
print(hello_str.index("o"))
print(hello_str.rindex("r"))
print(hello_str.index("r"))
print("*" * 100)
# find 如果指定的字符串不存在,会返回-1
# index 如果指定的字符串不存在,会报错
print(hello_str.find("abc"))
print(hello_str.index("abc"))
# 4.替换字符串
# replace方法执行后,会返回一个新的字符串,但不会修改原有字符串的内容!!!
print(hello_str.replace("ello", "i"))
print(hello_str)
hi_str = "Hi Are you happy?"
print(hi_str.lower().startswith("h"))
|
[
"[email protected]"
] | |
36070783ac0da6e39d149ff6a9e6e406d58e1bb4
|
5e1bf5340b28288027d946f136fa0838ca2621dd
|
/RL_Udemy/optimistic_initial_values.py
|
05f34a93a3960471d89c25c00170bf64dd4f12cf
|
[] |
no_license
|
DavisDataScience/DataInterviewPrep
|
5964006933fa8dd1f9b684be94744f5c9505eadb
|
55921a8752e349b9cd08833148c38ebb9210d4ed
|
refs/heads/master
| 2020-12-30T11:02:49.782409 | 2018-04-20T07:11:11 | 2018-04-20T07:11:11 | 98,841,799 | 5 | 0 | null | 2017-11-08T09:54:28 | 2017-07-31T02:52:15 |
HTML
|
UTF-8
|
Python
| false | false | 1,662 |
py
|
# https://deeplearningcourses.com/c/artificial-intelligence-reinforcement-learning-in-python
# https://www.udemy.com/artificial-intelligence-reinforcement-learning-in-python
import numpy as np
import matplotlib.pyplot as plt
class Bandit:
def __init__(self, m, upper_limit):
self.m = m
self.mean = upper_limit
self.N = 0
def pull(self):
return np.random.randn() + self.m
def update(self, x):
self.N += 1
# look at the formula for estimating_bandit_rewards
self.mean = (1 - 1.0/self.N)*self.mean + 1.0/self.N*x
def run_experiment(m1, m2, m3, eps, N):
bandits = [Bandit(m1,4), Bandit(m2,4), Bandit(m3,4)]
data = np.empty(N)
for i in range(N):
# epsilon greedy
j = np.argmax([b.mean for b in bandits])
x = bandits[j].pull()
bandits[j].update(x)
# for the plot
data[i] = x
cumulative_average = np.cumsum(data) / (np.arange(N) + 1)
# plot moving average ctr
plt.plot(cumulative_average)
plt.plot(np.ones(N)*m1)
plt.plot(np.ones(N)*m2)
plt.plot(np.ones(N)*m3)
plt.xscale('log')
plt.show()
for b in bandits:
print (b.mean)
return cumulative_average
if __name__ == '__main__':
c_1 = run_experiment(1.0, 2.0, 3.0, 0.1, 100000)
c_05 = run_experiment(1.0, 2.0, 3.0, 0.05, 100000)
c_01 = run_experiment(1.0, 2.0, 3.0, 0.01, 100000)
# log scale plot
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_01, label='eps = 0.01')
plt.legend()
plt.xscale('log')
plt.show()
# linear plot
plt.plot(c_1, label='eps = 0.1')
plt.plot(c_05, label='eps = 0.05')
plt.plot(c_01, label='eps = 0.01')
plt.legend()
plt.show()
|
[
"[email protected]"
] | |
a386ad32ce1908a2c38fe109aef2276beec8507d
|
d2c54233a96b0de3137d320a86de726f87f6d3b4
|
/cnn/struct/updateset_module.py
|
420fca72eebd6129c8038d6af319637cdeb3851b
|
[
"MIT"
] |
permissive
|
hslee1539/cnn
|
aa93e6c41e994b409b5ebcd6e1abfaef98cd0c60
|
816418af0a0057d777f41ac072c3a97fea7e2027
|
refs/heads/master
| 2020-05-01T13:30:25.781805 | 2019-09-10T08:47:24 | 2019-09-10T08:47:24 | 177,493,396 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,481 |
py
|
from import_lib import lib
from tensor.main_module import *
from ctypes import Structure, c_int, POINTER, c_float
# Tensor는 POINTER(_Tensor) 임
class _UpdateSet(Structure):
_fields_ = [
('delta', Tensor),
('value', Tensor),
('momnt', Tensor)
]
lib.cnn_create_updateset.argtypes = (Tensor, Tensor)
lib.cnn_create_updateset.restype = POINTER(_UpdateSet)
lib.cnn_release_updateset_deep.argtypes = [POINTER(_UpdateSet)]
#lib.cnn_release_updateset.argtypes = [POINTER(_UpdateSet)]
def _create(delta, value):
return lib.cnn_create_updateset(delta, value)
def _release(self, deep = True):
if(deep):
lib.cnn_release_updateset_deep(self)
else:
lib.cnn_release_updateset(self)
del self # 검증 안됨
def _getDelta(self):
return self.contents.delta
def _getValue(self):
return self.contents.value
def _getMomnt(self):
return self.contents.momnt
def _setDelta(self, value):
self.contents.delta = value
def _setValue(self, value):
self.contents.value = value
def _setMomnt(self, value):
self.contents.momnt = value
UpdateSet = POINTER(_UpdateSet)
UpdateSet.__doc__ = "cnn_UpdateSet 구조체의 포인터에 프로퍼티와 메소드를 추가한 클래스입니다."
UpdateSet.delta = property(_getDelta, _setDelta)
UpdateSet.value = property(_getValue, _setValue)
UpdateSet.momnt = property(_getMomnt, _setMomnt)
UpdateSet.create = staticmethod(_create)
UpdateSet.release = _release
|
[
"[email protected]"
] | |
25a9c9a9109a8396fc521ce6af6ccc72553ddb0a
|
77717d0024c8597fec83600259ea5547abbc183a
|
/configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
|
1c8c581bccb439fbd4d5f7a4f67777162361b1f8
|
[
"Apache-2.0"
] |
permissive
|
fengyouliang/wheat_detection
|
0a090ef5eda7f2c5463996f4795f9ce06dd04050
|
d056123426a1260c29b486cbb8e44a88a0a3c5bc
|
refs/heads/master
| 2022-11-17T15:09:29.113493 | 2020-07-18T13:47:34 | 2020-07-18T13:47:34 | 276,532,878 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,044 |
py
|
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'
model = dict(
bbox_head=dict(
_delete_=True,
type='GARetinaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]),
anchor_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.04, loss_weight=1.0)))
# training and testing settings
train_cfg = dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.4,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(neg_iou_thr=0.5, min_pos_iou=0.0),
center_ratio=0.2,
ignore_ratio=0.5)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
|
[
"[email protected]"
] | |
1df75ea249e9b902876129b1f377f9baad11d651
|
0210dc6652a9abd37d8200b1338e0db177d2b199
|
/model/transforms/autoregressive/ar_linear.py
|
e6a998a0c34555211f6eb3e208946e72e391fc78
|
[
"MIT"
] |
permissive
|
dihjiang/argmax_flows
|
77fdb303eb6e508f07cd5b2b7b84855e37c4511b
|
4ffff4bd6f7b25e20292eff6bad2bf5a962e8d39
|
refs/heads/main
| 2023-05-14T07:03:24.527960 | 2021-06-01T12:10:18 | 2021-06-01T12:10:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,269 |
py
|
import torch
from survae.utils import sum_except_batch
from .ar import AutoregressiveBijection
class AdditiveAutoregressiveBijection(AutoregressiveBijection):
'''Additive autoregressive bijection.'''
def _num_params(self):
return 1
def _forward(self, x, params):
return x + params, x.new_zeros(x.shape[0])
def _element_inverse(self, z, element_params):
return z - element_params
class AffineAutoregressiveBijection(AutoregressiveBijection):
'''Affine autoregressive bijection.'''
def _num_params(self):
return 2
def _forward(self, x, params):
assert params.shape[-1] == self._num_params()
log_scale, shift = self._split_params(params)
scale = torch.exp(log_scale)
z = scale * x + shift
ldj = sum_except_batch(log_scale)
return z, ldj
def _element_inverse(self, z, element_params):
assert element_params.shape[-1] == self._num_params()
log_scale, shift = self._split_params(element_params)
scale = torch.exp(log_scale)
x = (z - shift) / scale
return x
def _split_params(self, params):
unconstrained_scale = params[..., 0]
shift = params[..., 1]
return unconstrained_scale, shift
|
[
"[email protected]"
] | |
c707702f06a08c042d2db0549257878c0cee865b
|
367b17c673c274154fb778f536021c7b5d32d47a
|
/tools/v8_presubmit.py
|
ea241b4724522aae50e1bb0cf87728b829b2d6fe
|
[
"bzip2-1.0.6",
"BSD-3-Clause",
"SunPro"
] |
permissive
|
Drieger/v8
|
e52083873e67dbc122e3169451ef11bfe5c8cc93
|
93283bf04ae3fd96592b4090e90ac75130aa9d52
|
refs/heads/master
| 2020-03-26T22:19:17.520937 | 2019-01-15T17:11:25 | 2019-01-15T18:08:07 | 145,448,530 | 0 | 0 |
NOASSERTION
| 2018-10-05T11:58:51 | 2018-08-20T17:18:53 |
C++
|
UTF-8
|
Python
| false | false | 23,214 |
py
|
#!/usr/bin/env python
#
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import hashlib
md5er = hashlib.md5
except ImportError, e:
import md5
md5er = md5.new
import json
import optparse
import os
from os.path import abspath, join, dirname, basename, exists
import pickle
import re
import sys
import subprocess
import multiprocessing
from subprocess import PIPE
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
# Special LINT rules diverging from default and reason.
# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
# We now run our own header guard check in PRESUBMIT.py.
# build/include_what_you_use: Started giving false positives for variables
# named "string" and "map" assuming that you needed to include STL headers.
LINT_RULES = """
-build/header_guard
-build/include_what_you_use
-readability/fn_size
-readability/multiline_comment
-runtime/references
-whitespace/comments
""".split()
LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--opt[^-].*\n")
ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
TOOLS_PATH = dirname(abspath(__file__))
def CppLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = -1
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
if error_count == -1:
print "Failed to process %s" % command.pop()
return 1
break
m = LINT_OUTPUT_PATTERN.match(out_line)
if m:
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running cpplint.py. Please make sure you have depot_tools' +
' in your $PATH. Lint check skipped.')
process.kill()
def TorqueLintWorker(command):
try:
process = subprocess.Popen(command, stderr=subprocess.PIPE)
process.wait()
out_lines = ""
error_count = 0
while True:
out_line = process.stderr.readline()
if out_line == '' and process.poll() != None:
break
out_lines += out_line
error_count += 1
sys.stdout.write(out_lines)
if error_count != 0:
sys.stdout.write("tip: use 'tools/torque/format-torque.py -i <filename>'\n");
return error_count
except KeyboardInterrupt:
process.kill()
except:
print('Error running format-torque.py')
process.kill()
class FileContentsCache(object):
def __init__(self, sums_file_name):
self.sums = {}
self.sums_file_name = sums_file_name
def Load(self):
try:
sums_file = None
try:
sums_file = open(self.sums_file_name, 'r')
self.sums = pickle.load(sums_file)
except:
# Cannot parse pickle for any reason. Not much we can do about it.
pass
finally:
if sums_file:
sums_file.close()
def Save(self):
try:
sums_file = open(self.sums_file_name, 'w')
pickle.dump(self.sums, sums_file)
except:
# Failed to write pickle. Try to clean-up behind us.
if sums_file:
sums_file.close()
try:
os.unlink(self.sums_file_name)
except:
pass
finally:
sums_file.close()
def FilterUnchangedFiles(self, files):
changed_or_new = []
for file in files:
try:
handle = open(file, "r")
file_sum = md5er(handle.read()).digest()
if not file in self.sums or self.sums[file] != file_sum:
changed_or_new.append(file)
self.sums[file] = file_sum
finally:
handle.close()
return changed_or_new
def RemoveFile(self, file):
if file in self.sums:
self.sums.pop(file)
class SourceFileProcessor(object):
"""
Utility class that can run through a directory structure, find all relevant
files and invoke a custom check on the files.
"""
def RunOnPath(self, path):
"""Runs processor on all files under the given path."""
all_files = []
for file in self.GetPathsToSearch():
all_files += self.FindFilesIn(join(path, file))
return self.ProcessFiles(all_files)
def RunOnFiles(self, files):
"""Runs processor only on affected files."""
# Helper for getting directory pieces.
dirs = lambda f: dirname(f).split(os.sep)
# Path offsets where to look (to be in sync with RunOnPath).
# Normalize '.' to check for it with str.startswith.
search_paths = [('' if p == '.' else p) for p in self.GetPathsToSearch()]
all_files = [
f.AbsoluteLocalPath()
for f in files
if (not self.IgnoreFile(f.LocalPath()) and
self.IsRelevant(f.LocalPath()) and
all(not self.IgnoreDir(d) for d in dirs(f.LocalPath())) and
any(map(f.LocalPath().startswith, search_paths)))
]
return self.ProcessFiles(all_files)
def IgnoreDir(self, name):
return (name.startswith('.') or
name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
'octane', 'sunspider', 'traces-arm64'))
def IgnoreFile(self, name):
return name.startswith('.')
def FindFilesIn(self, path):
result = []
for (root, dirs, files) in os.walk(path):
for ignored in [x for x in dirs if self.IgnoreDir(x)]:
dirs.remove(ignored)
for file in files:
if not self.IgnoreFile(file) and self.IsRelevant(file):
result.append(join(root, file))
return result
class CacheableSourceFileProcessor(SourceFileProcessor):
"""Utility class that allows caching ProcessFiles() method calls.
In order to use it, create a ProcessFilesWithoutCaching method that returns
the files requiring intervention after processing the source files.
"""
def __init__(self, use_cache, cache_file_path, file_type):
self.use_cache = use_cache
self.cache_file_path = cache_file_path
self.file_type = file_type
def GetProcessorWorker(self):
"""Expected to return the worker function to run the formatter."""
raise NotImplementedError
def GetProcessorScript(self):
"""Expected to return a tuple
(path to the format processor script, list of arguments)."""
raise NotImplementedError
def GetProcessorCommand(self):
format_processor, options = self.GetProcessorScript()
if not format_processor:
print('Could not find the formatter for % files' % self.file_type)
sys.exit(1)
command = [sys.executable, format_processor]
command.extend(options)
return command
def ProcessFiles(self, files):
if self.use_cache:
cache = FileContentsCache(self.cache_file_path)
cache.Load()
files = cache.FilterUnchangedFiles(files)
if len(files) == 0:
print 'No changes in %s files detected. Skipping check' % self.file_type
return True
files_requiring_changes = self.DetectFilesToChange(files)
print (
'Total %s files found that require formatting: %d' %
(self.file_type, len(files_requiring_changes)))
if self.use_cache:
for file in files_requiring_changes:
cache.RemoveFile(file)
cache.Save()
return files_requiring_changes == []
def DetectFilesToChange(self, files):
command = self.GetProcessorCommand()
worker = self.GetProcessorWorker()
commands = [command + [file] for file in files]
count = multiprocessing.cpu_count()
pool = multiprocessing.Pool(count)
try:
results = pool.map_async(worker, commands).get(timeout=240)
except KeyboardInterrupt:
print "\nCaught KeyboardInterrupt, terminating workers."
pool.terminate()
pool.join()
sys.exit(1)
unformatted_files = []
for index, errors in enumerate(results):
if errors > 0:
unformatted_files.append(files[index])
return unformatted_files
class CppLintProcessor(CacheableSourceFileProcessor):
"""
Lint files to check that they follow the google code style.
"""
def __init__(self, use_cache=True):
super(CppLintProcessor, self).__init__(
use_cache=use_cache, cache_file_path='.cpplint-cache', file_type='C/C++')
def IsRelevant(self, name):
return name.endswith('.cc') or name.endswith('.h')
def IgnoreDir(self, name):
return (super(CppLintProcessor, self).IgnoreDir(name)
or (name == 'third_party'))
IGNORE_LINT = ['export-template.h', 'flag-definitions.h']
def IgnoreFile(self, name):
return (super(CppLintProcessor, self).IgnoreFile(name)
or (name in CppLintProcessor.IGNORE_LINT))
def GetPathsToSearch(self):
dirs = ['include', 'samples', 'src']
test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests']
return dirs + [join('test', dir) for dir in test_dirs]
def GetProcessorWorker(self):
return CppLintWorker
def GetProcessorScript(self):
filters = ','.join([n for n in LINT_RULES])
arguments = ['--filter', filters]
for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
cpplint = os.path.join(path, 'cpplint.py')
if os.path.isfile(cpplint):
return cpplint, arguments
return None, arguments
class TorqueLintProcessor(CacheableSourceFileProcessor):
"""
Check .tq files to verify they follow the Torque style guide.
"""
def __init__(self, use_cache=True):
super(TorqueLintProcessor, self).__init__(
use_cache=use_cache, cache_file_path='.torquelint-cache',
file_type='Torque')
def IsRelevant(self, name):
return name.endswith('.tq')
def GetPathsToSearch(self):
dirs = ['third-party', 'src']
test_dirs = ['torque']
return dirs + [join('test', dir) for dir in test_dirs]
def GetProcessorWorker(self):
return TorqueLintWorker
def GetProcessorScript(self):
torque_tools = os.path.join(TOOLS_PATH, "torque")
torque_path = os.path.join(torque_tools, "format-torque.py")
arguments = ['-l']
if os.path.isfile(torque_path):
return torque_path, arguments
return None, arguments
COPYRIGHT_HEADER_PATTERN = re.compile(
r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""
Check that all files include a copyright notice and no trailing whitespaces.
"""
RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status', '.tq', '.g4']
def __init__(self):
self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
def CreateRuntimeFunctionCallMatcher(self):
runtime_h_path = join(dirname(TOOLS_PATH), 'src/runtime/runtime.h')
pattern = re.compile(r'\s+F\(([^,]*),.*\)')
runtime_functions = []
with open(runtime_h_path) as f:
for line in f.readlines():
m = pattern.match(line)
if m:
runtime_functions.append(m.group(1))
if len(runtime_functions) < 250:
print ("Runtime functions list is suspiciously short. "
"Consider updating the presubmit script.")
sys.exit(1)
str = '(\%\s+(' + '|'.join(runtime_functions) + '))[\s\(]'
return re.compile(str)
# Overwriting the one in the parent class.
def FindFilesIn(self, path):
if os.path.exists(path+'/.git'):
output = subprocess.Popen('git ls-files --full-name',
stdout=PIPE, cwd=path, shell=True)
result = []
for file in output.stdout.read().split():
for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
if self.IgnoreDir(dir_part):
break
else:
if (self.IsRelevant(file) and os.path.exists(file)
and not self.IgnoreFile(file)):
result.append(join(path, file))
if output.wait() == 0:
return result
return super(SourceProcessor, self).FindFilesIn(path)
def IsRelevant(self, name):
for ext in SourceProcessor.RELEVANT_EXTENSIONS:
if name.endswith(ext):
return True
return False
def GetPathsToSearch(self):
return ['.']
def IgnoreDir(self, name):
return (super(SourceProcessor, self).IgnoreDir(name) or
name in ('third_party', 'out', 'obj', 'DerivedSources'))
IGNORE_COPYRIGHTS = ['box2d.js',
'cpplint.py',
'copy.js',
'corrections.js',
'crypto.js',
'daemon.py',
'earley-boyer.js',
'fannkuch.js',
'fasta.js',
'injected-script.cc',
'injected-script.h',
'libraries.cc',
'libraries-empty.cc',
'lua_binarytrees.js',
'meta-123.js',
'memops.js',
'poppler.js',
'primes.js',
'raytrace.js',
'regexp-pcre.js',
'resources-123.js',
'sqlite.js',
'sqlite-change-heap.js',
'sqlite-pointer-masking.js',
'sqlite-safe-heap.js',
'v8-debugger-script.h',
'v8-inspector-impl.cc',
'v8-inspector-impl.h',
'v8-runtime-agent-impl.cc',
'v8-runtime-agent-impl.h',
'gnuplot-4.6.3-emscripten.js',
'zlib.js']
IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
IGNORE_COPYRIGHTS_DIRECTORY = "test/test262/local-tests"
def EndOfDeclaration(self, line):
return line == "}" or line == "};"
def StartOfDeclaration(self, line):
return line.find("//") == 0 or \
line.find("/*") == 0 or \
line.find(") {") != -1
def ProcessContents(self, name, contents):
result = True
base = basename(name)
if not base in SourceProcessor.IGNORE_TABS:
if '\t' in contents:
print "%s contains tabs" % name
result = False
if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
if not COPYRIGHT_HEADER_PATTERN.search(contents):
print "%s is missing a correct copyright header." % name
result = False
if ' \n' in contents or contents.endswith(' '):
line = 0
lines = []
parts = contents.split(' \n')
if not contents.endswith(' '):
parts.pop()
for part in parts:
line += part.count('\n') + 1
lines.append(str(line))
linenumbers = ', '.join(lines)
if len(lines) > 1:
print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
else:
print "%s has trailing whitespaces in line %s." % (name, linenumbers)
result = False
if not contents.endswith('\n') or contents.endswith('\n\n'):
print "%s does not end with a single new line." % name
result = False
# Sanitize flags for fuzzer.
if ".js" in name and ("mjsunit" in name or "debugger" in name):
match = FLAGS_LINE.search(contents)
if match:
print "%s Flags should use '-' (not '_')" % name
result = False
if not "mjsunit/mjsunit.js" in name:
if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
not FLAGS_ENABLE_OPT.search(contents):
print "%s Flag --opt should be set if " \
"assertOptimized() is used" % name
result = False
if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
not FLAGS_NO_ALWAYS_OPT.search(contents):
print "%s Flag --no-always-opt should be set if " \
"assertUnoptimized() is used" % name
result = False
match = self.runtime_function_call_pattern.search(contents)
if match:
print "%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1))
result = False
return result
def ProcessFiles(self, files):
success = True
violations = 0
for file in files:
try:
handle = open(file)
contents = handle.read()
if not self.ProcessContents(file, contents):
success = False
violations += 1
finally:
handle.close()
print "Total violating files: %s" % violations
return success
def _CheckStatusFileForDuplicateKeys(filepath):
comma_space_bracket = re.compile(", *]")
lines = []
with open(filepath) as f:
for line in f.readlines():
# Skip all-comment lines.
if line.lstrip().startswith("#"): continue
# Strip away comments at the end of the line.
comment_start = line.find("#")
if comment_start != -1:
line = line[:comment_start]
line = line.strip()
# Strip away trailing commas within the line.
line = comma_space_bracket.sub("]", line)
if len(line) > 0:
lines.append(line)
# Strip away trailing commas at line ends. Ugh.
for i in range(len(lines) - 1):
if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
lines[i + 1][0] in ("}", "]")):
lines[i] = lines[i][:-1]
contents = "\n".join(lines)
# JSON wants double-quotes.
contents = contents.replace("'", '"')
# Fill in keywords (like PASS, SKIP).
for key in statusfile.KEYWORDS:
contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
status = {"success": True}
def check_pairs(pairs):
keys = {}
for key, value in pairs:
if key in keys:
print("%s: Error: duplicate key %s" % (filepath, key))
status["success"] = False
keys[key] = True
json.loads(contents, object_pairs_hook=check_pairs)
return status["success"]
class StatusFilesProcessor(SourceFileProcessor):
"""Checks status files for incorrect syntax and duplicate keys."""
def IsRelevant(self, name):
# Several changes to files under the test directories could impact status
# files.
return True
def GetPathsToSearch(self):
return ['test', 'tools/testrunner']
def ProcessFiles(self, files):
success = True
for status_file_path in sorted(self._GetStatusFiles(files)):
success &= statusfile.PresubmitCheck(status_file_path)
success &= _CheckStatusFileForDuplicateKeys(status_file_path)
return success
def _GetStatusFiles(self, files):
test_path = join(dirname(TOOLS_PATH), 'test')
testrunner_path = join(TOOLS_PATH, 'testrunner')
status_files = set()
for file_path in files:
if file_path.startswith(testrunner_path):
for suitepath in os.listdir(test_path):
suitename = os.path.basename(suitepath)
status_file = os.path.join(
test_path, suitename, suitename + ".status")
if os.path.exists(status_file):
status_files.add(status_file)
return status_files
for file_path in files:
if file_path.startswith(test_path):
# Strip off absolute path prefix pointing to test suites.
pieces = file_path[len(test_path):].lstrip(os.sep).split(os.sep)
if pieces:
# Infer affected status file name. Only care for existing status
# files. Some directories under "test" don't have any.
if not os.path.isdir(join(test_path, pieces[0])):
continue
status_file = join(test_path, pieces[0], pieces[0] + ".status")
if not os.path.exists(status_file):
continue
status_files.add(status_file)
return status_files
def CheckDeps(workspace):
checkdeps_py = join(workspace, 'buildtools', 'checkdeps', 'checkdeps.py')
return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
def PyTests(workspace):
result = True
for script in [
join(workspace, 'tools', 'release', 'test_scripts.py'),
join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
]:
print 'Running ' + script
result &= subprocess.call(
[sys.executable, script], stdout=subprocess.PIPE) == 0
return result
def GetOptions():
result = optparse.OptionParser()
result.add_option('--no-lint', help="Do not run cpplint", default=False,
action="store_true")
result.add_option('--no-linter-cache', help="Do not cache linter results",
default=False, action="store_true")
return result
def Main():
workspace = abspath(join(dirname(sys.argv[0]), '..'))
parser = GetOptions()
(options, args) = parser.parse_args()
success = True
print "Running checkdeps..."
success &= CheckDeps(workspace)
use_linter_cache = not options.no_linter_cache
if not options.no_lint:
print "Running C++ lint check..."
success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
print "Running Torque formatting check..."
success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
workspace)
print "Running copyright header, trailing whitespaces and " \
"two empty lines between declarations check..."
success &= SourceProcessor().RunOnPath(workspace)
print "Running status-files check..."
success &= StatusFilesProcessor().RunOnPath(workspace)
print "Running python tests..."
success &= PyTests(workspace)
if success:
return 0
else:
return 1
if __name__ == '__main__':
sys.exit(Main())
|
[
"[email protected]"
] | |
ec9449b8fc562621af4968040ac96cc6fe147d4b
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_67/87.py
|
c30dc2a35d202661326157975327a7f60af30452
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,790 |
py
|
#! /usr/bin/env python
# vim: set et ts=4 sw=4 ci cino=(0:
import sys
import os
import math
import binascii
def main():
f = open(sys.argv[1])
ntest = int(f.readline().strip())
for nt in xrange(ntest):
nrect = int(f.readline().strip())
rects = []
maxx = 0
maxy = 0
for r in xrange(nrect):
l = [ int(x) for x in f.readline().strip().split() ]
if len(l) != 4:
print "Error ", l
sys.exit(1)
xa = l[0]
ya = l[1]
xb = l[2]
yb = l[3]
if xa > xb:
xa,xb = xb,xa
if ya > yb:
ya,yb = yb,ya
if xb > maxx:
maxx= xb
if yb > maxy:
maxy = yb
rects.append( (xa, ya, xb, yb))
grid = [[ 0 for x in xrange(maxx)] for y in xrange(maxy+2) ]
for rec in rects:
xa, ya, xb, yb = rec
for x in xrange(xa-1, xb):
for y in xrange(ya, yb+1):
grid[y][x] = 1
# for xx in grid:
# print xx
found = True
t = 0
while found:
found = False
t += 1
sy = maxy
for sx in xrange(maxx-1, 0, -1):
p = 0
x = sx
y = sy
while True:
n = grid[y][x]
if p == 1 and n == 1:
grid[y+1][x] = 1
elif p == 0 and n == 0:
grid[y+1][x] = 0
if grid[y+1][x] == 1:
found = True
p = n
x += 1
y -= 1
if x >= maxx or y < 0:
break
sx = 0
for sy in xrange(maxy, -1, -1):
p = 0
x = sx
y = sy
while True:
n = grid[y][x]
if p == 1 and n == 1:
grid[y+1][x] = 1
elif p == 0 and n == 0:
grid[y+1][x] = 0
if grid[y+1][x] == 1:
found = True
p = n
x += 1
y -= 1
if x >= maxx or y < 0:
break
# found = doit( sx, sy, grid, grid, maxx )
# print "New"
# for xx in grid:
# print xx
print "Case #%d: %d" % (nt + 1, t)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
388b5093f3eea31cab989db5d839937c077d237f
|
df2cbe914f463ad050d7ed26194424afbe3a0a52
|
/addons/stock_account/tests/test_account_move.py
|
7b2607004c5d051787e4e778dcda9115697a8446
|
[
"Apache-2.0"
] |
permissive
|
SHIVJITH/Odoo_Machine_Test
|
019ed339e995be980606a2d87a63312ddc18e706
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
refs/heads/main
| 2023-07-16T16:23:14.300656 | 2021-08-29T11:48:36 | 2021-08-29T11:48:36 | 401,010,175 | 0 | 0 |
Apache-2.0
| 2021-08-29T10:13:58 | 2021-08-29T10:13:58 | null |
UTF-8
|
Python
| false | false | 5,741 |
py
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.addons.stock_account.tests.test_stockvaluation import _create_accounting_data
from odoo.tests.common import tagged, Form
@tagged("post_install", "-at_install")
class TestAccountMove(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
(
cls.stock_input_account,
cls.stock_output_account,
cls.stock_valuation_account,
cls.expense_account,
cls.stock_journal,
) = _create_accounting_data(cls.env)
cls.product_A = cls.env["product.product"].create(
{
"name": "Product A",
"type": "product",
"default_code": "prda",
"categ_id": cls.env.ref("product.product_category_all").id,
"taxes_id": [(5, 0, 0)],
"supplier_taxes_id": [(5, 0, 0)],
"lst_price": 100.0,
"standard_price": 10.0,
"property_account_income_id": cls.company_data["default_account_revenue"].id,
"property_account_expense_id": cls.company_data["default_account_expense"].id,
}
)
cls.product_A.categ_id.write(
{
"property_stock_account_input_categ_id": cls.stock_input_account.id,
"property_stock_account_output_categ_id": cls.stock_output_account.id,
"property_stock_valuation_account_id": cls.stock_valuation_account.id,
"property_stock_journal": cls.stock_journal.id,
"property_valuation": "real_time",
"property_cost_method": "standard",
}
)
def test_standard_perpetual_01_mc_01(self):
rate = self.currency_data["rates"].sorted()[0].rate
move_form = Form(self.env["account.move"].with_context(default_move_type="out_invoice"))
move_form.partner_id = self.partner_a
move_form.currency_id = self.currency_data["currency"]
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_A
line_form.tax_ids.clear()
invoice = move_form.save()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 1)
invoice._post()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 4)
self.assertEqual(len(invoice.mapped("line_ids").filtered("is_anglo_saxon_line")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 2)
def test_fifo_perpetual_01_mc_01(self):
self.product_A.categ_id.property_cost_method = "fifo"
rate = self.currency_data["rates"].sorted()[0].rate
move_form = Form(self.env["account.move"].with_context(default_move_type="out_invoice"))
move_form.partner_id = self.partner_a
move_form.currency_id = self.currency_data["currency"]
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_A
line_form.tax_ids.clear()
invoice = move_form.save()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 1)
invoice._post()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 4)
self.assertEqual(len(invoice.mapped("line_ids").filtered("is_anglo_saxon_line")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 2)
def test_average_perpetual_01_mc_01(self):
self.product_A.categ_id.property_cost_method = "average"
rate = self.currency_data["rates"].sorted()[0].rate
move_form = Form(self.env["account.move"].with_context(default_move_type="out_invoice"))
move_form.partner_id = self.partner_a
move_form.currency_id = self.currency_data["currency"]
with move_form.invoice_line_ids.new() as line_form:
line_form.product_id = self.product_A
line_form.tax_ids.clear()
invoice = move_form.save()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 1)
invoice._post()
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_total)
self.assertAlmostEqual(self.product_A.lst_price * rate, invoice.amount_residual)
self.assertEqual(len(invoice.mapped("line_ids")), 4)
self.assertEqual(len(invoice.mapped("line_ids").filtered("is_anglo_saxon_line")), 2)
self.assertEqual(len(invoice.mapped("line_ids.currency_id")), 2)
|
[
"[email protected]"
] | |
4abeb645d33ce6f4cf795c8c9e123787d71aec95
|
b427fc41375fd95d6da5c7168a823f1b286bb250
|
/imageAugmentation/noise.py
|
dcf11f01bcfd107942d802d3774aa805d2e5191e
|
[] |
no_license
|
heixialeeLeon/DeepFundation
|
40b9cc14ed639d1b9cd34dad32c9b497c9e23927
|
f42560cc25e447f812bdfabd24d426bd9b49bb94
|
refs/heads/master
| 2020-04-18T06:11:05.222490 | 2019-08-02T01:09:21 | 2019-08-02T01:09:21 | 167,309,579 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 743 |
py
|
import numpy as np
from numpy import random
import torch
from torchvision import transforms
import cv2
from PIL import Image, ImageFilter
import skimage
def RandomNoise(img, noise):
img = img.astype(np.float32)
img = img / 255.0
mode = [
lambda x : skimage.util.random_noise(x, 'gaussian', mean=0, var=noise),
lambda x : skimage.util.random_noise(x, 'speckle', mean=0, var=noise),
lambda x : skimage.util.random_noise(x, 's&p', amount= noise)
]
img = (random.choice(mode)(img)*255).astype(np.uint8)
return img
if __name__ == "__main__":
img = cv2.imread("../test/1.jpg")
cv2.imshow("raw", img)
img_blur = RandomNoise(img, 0.01)
cv2.imshow("cv2", img_blur)
cv2.waitKey(-1)
|
[
"[email protected]"
] | |
2e2f683e57de6ef5a99d838c90b3c50d17ab40b9
|
d2c4934325f5ddd567963e7bd2bdc0673f92bc40
|
/tests/artificial/transf_None/trend_PolyTrend/cycle_7/ar_12/test_artificial_1024_None_PolyTrend_7_12_0.py
|
c929841014abe7e46526d945e3cdc89788a4419e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jmabry/pyaf
|
797acdd585842474ff4ae1d9db5606877252d9b8
|
afbc15a851a2445a7824bf255af612dc429265af
|
refs/heads/master
| 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 |
BSD-3-Clause
| 2018-12-17T22:08:12 | 2018-06-12T17:15:43 |
Python
|
UTF-8
|
Python
| false | false | 265 |
py
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "None", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"[email protected]"
] | |
611d72e952623f37ad15946469334fd8729f472b
|
25310086303e892b29754ad3287ede45ee189f4c
|
/torcms/script/script_sitemap.py
|
bf194a786e2fea0b7465ee171ec65f7baa38fa68
|
[
"MIT"
] |
permissive
|
CW0606/TorCMS
|
41d65431a439bba0360305291f34fb4ed1b32b9c
|
23ddc4d2c27bda890ef2a7998770888857dc6a61
|
refs/heads/master
| 2021-05-15T13:42:40.041532 | 2017-10-17T00:40:21 | 2017-10-17T00:40:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,206 |
py
|
# -*- coding: utf-8 -*-
'''
Generating sitemap.
'''
import os
from torcms.model.post_model import MPost
from torcms.model.wiki_model import MWiki
from config import router_post, SITE_CFG
def gen_post_map(file_name, ext_url=''):
'''
Generate the urls for posts.
:return: None
'''
with open(file_name, 'a') as fout:
for kind_key in router_post:
recent_posts = MPost.query_all(kind=kind_key, limit=1000000)
for recent_post in recent_posts:
url = os.path.join(SITE_CFG['site_url'],
router_post[recent_post.kind],
ext_url,
recent_post.uid)
fout.write('{url}\n'.format(url=url))
def gen_wiki_map(file_name, ext_url=''):
'''
Generate the urls for wiki.
:return: None
'''
# wiki
wiki_recs = MWiki.query_all(limit=10000, kind='1')
with open(file_name, 'a') as fileout:
for rec in wiki_recs:
url = os.path.join(SITE_CFG['site_url'],
'wiki' + '/_edit' if len(ext_url) else '',
rec.title)
fileout.write('{url}\n'.format(url=url))
## page.
page_recs = MWiki.query_all(limit=10000, kind='2')
with open(file_name, 'a') as fileout:
for rec in page_recs:
url = os.path.join(SITE_CFG['site_url'],
'page' + '/_edit' if len(ext_url) else '',
rec.uid)
fileout.write('{url}\n'.format(url=url))
def run_sitemap(_):
'''
Generate the sitemap file.
:param args: args
:return: None
'''
site_map_file = 'xx_sitemap.txt'
if os.path.exists(site_map_file):
os.remove(site_map_file)
gen_wiki_map(site_map_file)
gen_post_map(site_map_file)
def run_editmap(_):
'''
Generate the urls file for editing.
:param args: args
:return: None
'''
edit_map_file = 'xx_editmap.txt'
if os.path.exists(edit_map_file):
os.remove(edit_map_file)
gen_wiki_map(edit_map_file, ext_url='_edit')
gen_post_map(edit_map_file, ext_url='_edit')
|
[
"[email protected]"
] | |
5c822d163aa8bd895275596a4e7a02de83a3ff35
|
b7f3edb5b7c62174bed808079c3b21fb9ea51d52
|
/third_party/blink/web_tests/external/wpt/cookies/resources/dropSecure.py
|
9820295697d22e258b885da239db9fd828b64d3c
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause"
] |
permissive
|
otcshare/chromium-src
|
26a7372773b53b236784c51677c566dc0ad839e4
|
64bee65c921db7e78e25d08f1e98da2668b57be5
|
refs/heads/webml
| 2023-03-21T03:20:15.377034 | 2020-11-16T01:40:14 | 2020-11-16T01:40:14 | 209,262,645 | 18 | 21 |
BSD-3-Clause
| 2023-03-23T06:20:07 | 2019-09-18T08:52:07 | null |
UTF-8
|
Python
| false | false | 483 |
py
|
from helpers import makeDropCookie, setNoCacheAndCORSHeaders
def main(request, response):
"""Respond to `/cookie/drop/secure` by dropping the two cookie set by
`setSecureTestCookies()`"""
headers = setNoCacheAndCORSHeaders(request, response)
# Expire the cookies, and return a JSON-encoded success code.
headers.append(makeDropCookie(b"alone_secure", False))
headers.append(makeDropCookie(b"alone_insecure", False))
return headers, b'{"success": true}'
|
[
"[email protected]"
] | |
9888ad9a7521b31a4a5620fd551865e5aaf87a6e
|
1ca90c49a56420ce876c0418146d27c853cf850f
|
/tests/test_registryv2imagesource.py
|
1a5e9182ebd935ecdb62b54d3d2f6d8ce8f19191
|
[
"Apache-2.0"
] |
permissive
|
ChinnoDog/docker-sign-verify
|
f159f9b86ce5da8bd62f9c9da840cfae8a4f5f14
|
1fc4ad6c54d076d9515ddd1f690b53e73f8e4bc4
|
refs/heads/master
| 2023-02-16T12:16:09.510077 | 2020-07-28T19:58:29 | 2020-07-28T19:58:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,868 |
py
|
#!/usr/bin/env python
# pylint: disable=protected-access,redefined-outer-name
"""RegistryV2ImageSource tests."""
import logging
from copy import deepcopy
from datetime import datetime
from pathlib import Path
from typing import Dict
import aiofiles
import pytest
from docker_registry_client_async import FormattedSHA256
from docker_sign_verify import (
ImageConfig,
RegistryV2ImageSource,
RegistryV2Manifest,
SignatureTypes,
)
from docker_sign_verify.aiotempfile import open as aiotempfile
from docker_sign_verify.imagesource import (
ImageSourceSignImage,
ImageSourceVerifyImageIntegrity,
)
from .localregistry import (
docker_client,
known_good_image_local,
known_good_image_remote,
pytest_registry,
) # Needed for pytest.fixtures
from .stubs import FakeSigner
from .testutils import hash_file
pytestmark = [pytest.mark.asyncio]
LOGGER = logging.getLogger(__name__)
@pytest.fixture
async def registry_v2_image_source() -> RegistryV2ImageSource:
"""Provides a RegistryV2ImageSource instance."""
# Do not use caching; get a new instance for each test
# Implicitly tests __aenter__(), __aexit__(), and close()
async with RegistryV2ImageSource() as registry_v2_image_source:
yield registry_v2_image_source
def test___init__(registry_v2_image_source: RegistryV2ImageSource):
"""Test that the image source can be instantiated."""
assert registry_v2_image_source
@pytest.mark.online
async def test_get_image_config(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test image configuration retrieval."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
config = await registry_v2_image_source.get_image_config(
known_good_image_local["image_name"], **kwargs
)
assert isinstance(config, ImageConfig)
@pytest.mark.online
async def test_get_image_layer_to_disk(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test layer retrieval to disk."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
manifest = await registry_v2_image_source.get_manifest(
known_good_image_local["image_name"], **kwargs
)
config_digest = manifest.get_config_digest()
LOGGER.debug("Retrieving blob: %s/%s ...", config_digest, config_digest)
async with aiotempfile(mode="w+b") as file:
result = await registry_v2_image_source.get_image_layer_to_disk(
known_good_image_local["image_name"], config_digest, file, **kwargs
)
LOGGER.debug("Verifying digest of written file ...")
assert await hash_file(file.name) == config_digest
assert result["digest"] == config_digest
@pytest.mark.online
async def test_get_manifest(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test manifest retrieval."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
LOGGER.debug(
"Retrieving manifest for: %s ...", known_good_image_local["image_name"]
)
manifest = await registry_v2_image_source.get_manifest(
known_good_image_local["image_name"], **kwargs
)
assert isinstance(manifest, RegistryV2Manifest)
assert (
manifest.get_digest() == known_good_image_local["image_name"].resolve_digest()
)
@pytest.mark.online
async def test_layer_exists(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test layer existence."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
LOGGER.debug(
"Retrieving manifest for: %s ...", known_good_image_local["image_name"]
)
manifest = await registry_v2_image_source.get_manifest(
known_good_image_local["image_name"], **kwargs
)
layer = manifest.get_layers()[-1]
assert await registry_v2_image_source.layer_exists(
known_good_image_local["image_name"], layer, **kwargs
)
assert not await registry_v2_image_source.layer_exists(
known_good_image_local["image_name"], FormattedSHA256("0" * 64), **kwargs
)
# TODO async def test_put_image
@pytest.mark.online_modification
async def test_put_image_config(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test image configuration assignment."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
LOGGER.debug(
"Retrieving image configuration for: %s ...",
known_good_image_local["image_name"],
)
image_config = await registry_v2_image_source.get_image_config(
known_good_image_local["image_name"], **kwargs
)
# Modify the configuration
json = image_config.get_json()
labels = image_config._get_labels(json)
labels["foo"] = datetime.now().strftime("%d%m%Y%H%M%S")
image_config._set_json(json)
LOGGER.debug(
"Storing modified image configuration: %s ...", image_config.get_digest()
)
response = await registry_v2_image_source.put_image_config(
known_good_image_local["image_name"], image_config, **kwargs
)
# Note: If NoneType, digest may already exist
assert response["digest"] == image_config.get_digest()
@pytest.mark.online_modification
async def test_put_image(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test image layer assignment."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
image_name = known_good_image_local["image_name"]
LOGGER.debug("Retrieving image: %s ...", image_name)
response = await registry_v2_image_source.verify_image_integrity(
image_name, **kwargs
)
image_name.tag += "_copy"
LOGGER.debug("Storing image: %s ...", image_name)
response = await registry_v2_image_source.put_image(
registry_v2_image_source,
image_name,
response["manifest"],
response["image_config"],
response["compressed_layer_files"],
**kwargs,
)
@pytest.mark.online_modification
async def test_put_image_layer(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test image layer assignment."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
LOGGER.debug(
"Retrieving image configuration for: %s ...",
known_good_image_local["image_name"],
)
image_config = await registry_v2_image_source.get_image_config(
known_good_image_local["image_name"], **kwargs
)
# Modify the configuration
json = image_config.get_json()
labels = image_config._get_labels(json)
labels["foo"] = datetime.now().strftime("%d%m%Y%H%M%S")
image_config._set_json(json)
LOGGER.debug(
"Storing modified image configuration: %s ...", image_config.get_digest()
)
response = await registry_v2_image_source.put_image_layer(
known_good_image_local["image_name"], image_config.get_bytes(), **kwargs
)
assert response["digest"] == image_config.get_digest()
@pytest.mark.online_modification
async def test_put_image_layer_from_disk(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
tmp_path: Path,
**kwargs,
):
"""Test image layer assignment from disk."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
LOGGER.debug(
"Retrieving image configuration for: %s ...",
known_good_image_local["image_name"],
)
image_config = await registry_v2_image_source.get_image_config(
known_good_image_local["image_name"], **kwargs
)
# Modify the configuration
json = image_config.get_json()
labels = image_config._get_labels(json)
labels["foo"] = datetime.now().strftime("%d%m%Y%H%M%S")
image_config._set_json(json)
LOGGER.debug("Writing modified configuration to disk ...")
path = tmp_path.joinpath("image_config.json")
async with aiofiles.open(path, mode="w+b") as file:
await file.write(image_config.get_bytes())
LOGGER.debug(
"Storing modified image configuration: %s ...", image_config.get_digest()
)
async with aiofiles.open(path, mode="r+b") as file:
response = await registry_v2_image_source.put_image_layer_from_disk(
known_good_image_local["image_name"], file, **kwargs
)
assert response["digest"] == image_config.get_digest()
@pytest.mark.online_modification
async def test_put_manifest(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test manifest assignment."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
LOGGER.debug(
"Retrieving manifest for: %s ...", known_good_image_local["image_name"]
)
manifest = await registry_v2_image_source.get_manifest(
known_good_image_local["image_name"], **kwargs
)
assert isinstance(manifest, RegistryV2Manifest)
assert (
manifest.get_digest() == known_good_image_local["image_name"].resolve_digest()
)
LOGGER.debug("Storing manifest for: %s ...", known_good_image_local["image_name"])
response = await registry_v2_image_source.put_manifest(
manifest, known_good_image_local["image_name"], **kwargs
)
assert response["digest"] == known_good_image_local["image_name"].resolve_digest()
@pytest.mark.online_modification
async def test_sign_image_same_image_source(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test image signing."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
dest_image_name = deepcopy(known_good_image_local["image_name"])
dest_image_name.digest = None
dest_image_name.tag = "{0}_signed".format(dest_image_name.tag)
def assertions(result: ImageSourceSignImage):
assert result
image_config = result["image_config"]
assert image_config
assert "FAKE SIGNATURE" in str(image_config)
signature_value = result["signature_value"]
assert signature_value
assert "FAKE SIGNATURE" in signature_value
verify_image_data = result["verify_image_data"]
assert verify_image_data
assert image_config == verify_image_data["image_config"]
manifest = verify_image_data["manifest"]
assert manifest
manifest_signed = result["manifest_signed"]
assert manifest_signed
assert manifest_signed.get_config_digest() == image_config.get_digest()
assert len(manifest_signed.get_layers()) == len(image_config.get_image_layers())
# 1. Single signature
assertions(
await registry_v2_image_source.sign_image(
FakeSigner(),
known_good_image_local["image_name"],
registry_v2_image_source,
dest_image_name,
SignatureTypes.SIGN,
**kwargs,
)
)
# TODO: Test signing image twice (with same key, with different keys ...)
# Can we do this here (using dockerhub), or do we need to do this in test_imageconfig.py???
# TODO: test_sign_image_different_image_source
@pytest.mark.online
async def test_verify_image_integrity(
registry_v2_image_source: RegistryV2ImageSource,
known_good_image_local: Dict,
**kwargs,
):
"""Test image integrity verification."""
if "protocol" in known_good_image_local:
kwargs["protocol"] = known_good_image_local["protocol"]
def assertions(result: ImageSourceVerifyImageIntegrity):
assert result
image_config = result["image_config"]
assert image_config
manifest = result["manifest"]
assert manifest
assert len(result["compressed_layer_files"]) == len(
result["uncompressed_layer_files"]
)
assert len(result["uncompressed_layer_files"]) == len(
result["uncompressed_layer_files"]
)
# 1. Unsigned
assertions(
await registry_v2_image_source.verify_image_integrity(
known_good_image_local["image_name"], **kwargs
)
)
# TODO: Test integrity on a signed image ...
# Can we do this here (using dockerhub), or do we need to do this in test_imageconfig.py???
|
[
"[email protected]"
] | |
ace3fa96b99c770cfc4abe0e6bbc691329863df0
|
3433314089e976a121e0a4ff7320d1214faabc8b
|
/autoarray/util/mapper_util.py
|
71ba884a15c17b9107ebb5ccd6c466f43e568f46
|
[
"MIT"
] |
permissive
|
Sketos/PyAutoArray
|
ab7a63543a35401560ee575c4a8ede7a2561d743
|
72dc7e8d1c38786915f82a7e7284239e5ce87624
|
refs/heads/master
| 2021-02-12T19:06:17.247806 | 2020-04-10T13:15:00 | 2020-04-10T13:15:00 | 244,619,959 | 0 | 0 |
MIT
| 2020-03-03T17:21:03 | 2020-03-03T11:35:40 |
Python
|
UTF-8
|
Python
| false | false | 7,865 |
py
|
import numpy as np
from autoarray import decorator_util
@decorator_util.jit()
def mapping_matrix_from_pixelization_1d_index_for_sub_mask_1d_index(
pixelization_1d_index_for_sub_mask_1d_index,
pixels,
total_mask_pixels,
mask_1d_index_for_sub_mask_1d_index,
sub_fraction,
):
"""Computes the util matrix, by iterating over the known mappings between the sub-grid and pixelization.
Parameters
-----------
pixelization_1d_index_for_sub_mask_1d_index : ndarray
The mappings between the observed grid's sub-pixels and pixelization's pixels.
pixels : int
The number of pixels in the pixelization.
total_mask_pixels : int
The number of datas pixels in the observed datas and thus on the grid.
mask_1d_index_for_sub_mask_1d_index : ndarray
The mappings between the observed grid's sub-pixels and observed grid's pixels.
sub_fraction : float
The fractional area each sub-pixel takes up in an pixel.
"""
mapping_matrix = np.zeros((total_mask_pixels, pixels))
for sub_mask_1d_index in range(mask_1d_index_for_sub_mask_1d_index.shape[0]):
mapping_matrix[
mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index],
pixelization_1d_index_for_sub_mask_1d_index[sub_mask_1d_index],
] += sub_fraction
return mapping_matrix
@decorator_util.jit()
def pixelization_1d_index_for_voronoi_sub_mask_1d_index_from_grids_and_geometry(
grid,
nearest_pixelization_1d_index_for_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index,
pixelization_grid,
pixel_neighbors,
pixel_neighbors_size,
):
""" Compute the mappings between a set of sub-grid pixels and pixelization pixels, using information on \
how the pixels hosting each sub-pixel map to their closest pixelization pixel on the image-plane pix-grid \
and the pixelization's pixel centres.
To determine the complete set of sub-pixel to pixelization pixel mappings, we must pair every sub-pixel to \
its nearest pixel. Using a full nearest neighbor search to do this is slow, thus the pixel neighbors (derived via \
the Voronoi grid) are used to localize each nearest neighbor search by using a graph search.
Parameters
----------
grid : Grid
The grid of (y,x) arc-second coordinates at the centre of every unmasked pixel, which has been traced to \
to an irgrid via lens.
nearest_pixelization_1d_index_for_mask_1d_index : ndarray
A 1D array that maps every grid pixel to its nearest pix-grid pixel (as determined on the unlensed \
2D array).
pixelization_grid : (float, float)
The (y,x) centre of every Voronoi pixel in arc-seconds.
pixel_neighbors : ndarray
An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \
the Voronoi grid (entries of -1 correspond to no neighbor).
pixel_neighbors_size : ndarray
An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \
Voronoi grid.
"""
pixelization_1d_index_for_sub_mask_1d_index = np.zeros((grid.shape[0]))
for sub_mask_1d_index in range(grid.shape[0]):
nearest_pixelization_1d_index = nearest_pixelization_1d_index_for_mask_1d_index[
mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
]
while True:
nearest_pixelization_pixel_center = pixelization_grid[
nearest_pixelization_1d_index
]
sub_pixel_to_nearest_pixelization_distance = (
(grid[sub_mask_1d_index, 0] - nearest_pixelization_pixel_center[0]) ** 2
+ (grid[sub_mask_1d_index, 1] - nearest_pixelization_pixel_center[1])
** 2
)
closest_separation_from_pixelization_to_neighbor = 1.0e8
for neighbor_pixelization_1d_index in range(
pixel_neighbors_size[nearest_pixelization_1d_index]
):
neighbor = pixel_neighbors[
nearest_pixelization_1d_index, neighbor_pixelization_1d_index
]
separation_from_neighbor = (
(grid[sub_mask_1d_index, 0] - pixelization_grid[neighbor, 0]) ** 2
+ (grid[sub_mask_1d_index, 1] - pixelization_grid[neighbor, 1]) ** 2
)
if (
separation_from_neighbor
< closest_separation_from_pixelization_to_neighbor
):
closest_separation_from_pixelization_to_neighbor = (
separation_from_neighbor
)
closest_neighbor_pixelization_1d_index = (
neighbor_pixelization_1d_index
)
neighboring_pixelization_1d_index = pixel_neighbors[
nearest_pixelization_1d_index, closest_neighbor_pixelization_1d_index
]
sub_pixel_to_neighboring_pixelization_distance = (
closest_separation_from_pixelization_to_neighbor
)
if (
sub_pixel_to_nearest_pixelization_distance
<= sub_pixel_to_neighboring_pixelization_distance
):
pixelization_1d_index_for_sub_mask_1d_index[
sub_mask_1d_index
] = nearest_pixelization_1d_index
break
else:
nearest_pixelization_1d_index = neighboring_pixelization_1d_index
return pixelization_1d_index_for_sub_mask_1d_index
@decorator_util.jit()
def adaptive_pixel_signals_from_images(
pixels,
signal_scale,
pixelization_1d_index_for_sub_mask_1d_index,
mask_1d_index_for_sub_mask_1d_index,
hyper_image,
):
"""Compute the (hyper) signal in each pixel, where the signal is the sum of its datas_-pixel fluxes. \
These pixel-signals are used to compute the effective regularization weight of each pixel.
The pixel signals are hyper in the following ways:
1) Divided by the number of datas_-pixels in the pixel, to ensure all pixels have the same \
'relative' signal (i.e. a pixel with 10 pixels doesn't have x2 the signal of one with 5).
2) Divided by the maximum pixel-signal, so that all signals vary between 0 and 1. This ensures that the \
regularizations weights are defined identically for any datas_ unit_label or signal-to-noise_map ratio.
3) Raised to the power of the hyper_galaxy-parameter *signal_scale*, so the method can control the relative \
contribution regularization in different regions of pixelization.
Parameters
-----------
pixels : int
The total number of pixels in the pixelization the regularization scheme is applied to.
signal_scale : float
A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \
low signal regions.
regular_to_pix : ndarray
A 1D array util every pixel on the grid to a pixel on the pixelization.
hyper_image : ndarray
The image of the galaxy which is used to compute the weigghted pixel signals.
"""
pixel_signals = np.zeros((pixels,))
pixel_sizes = np.zeros((pixels,))
for sub_mask_1d_index in range(len(pixelization_1d_index_for_sub_mask_1d_index)):
mask_1d_index = mask_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
pixel_signals[
pixelization_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]
] += hyper_image[mask_1d_index]
pixel_sizes[pixelization_1d_index_for_sub_mask_1d_index[sub_mask_1d_index]] += 1
pixel_sizes[pixel_sizes == 0] = 1
pixel_signals /= pixel_sizes
pixel_signals /= np.max(pixel_signals)
return pixel_signals ** signal_scale
|
[
"[email protected]"
] | |
e2f2035ebe825909c200b197f392cd98c62b4dd1
|
93ad28897161a30027a2ac1d10c7235f6d66b2f8
|
/docker_registry_client_async/formattedsha256.py
|
c788ae5c66bd8f8ada4732201c212cd1574d8e51
|
[
"Apache-2.0"
] |
permissive
|
GitHK/docker-registry-client-async
|
0ce117374eb094c3705510d72c081d24c316bada
|
384b1b7f7abcda55258028d930b45054ab03f6c4
|
refs/heads/main
| 2023-05-11T10:50:55.432955 | 2021-03-16T18:57:25 | 2021-03-16T18:57:25 | 371,687,021 | 0 | 0 |
Apache-2.0
| 2021-05-28T12:03:12 | 2021-05-28T12:03:11 | null |
UTF-8
|
Python
| false | false | 1,292 |
py
|
#!/usr/bin/env python
"""Utility classes."""
import hashlib
class FormattedSHA256(str):
"""A algorithm prefixed SHA256 hash value."""
def __new__(cls, sha256: str):
if sha256:
sha256 = sha256.replace("sha256:", "")
if not sha256 or len(sha256) != 64:
raise ValueError(sha256)
obj = super().__new__(cls, f"sha256:{sha256}")
obj.sha256 = sha256
return obj
@staticmethod
def parse(digest: str) -> "FormattedSHA256":
"""
Initializes a FormattedSHA256 from a given SHA256 digest value.
Args:
digest: A SHA256 digest value in form SHA256:<digest value>.
Returns:
The newly initialized object.
"""
if not digest or not digest.startswith("sha256:") or len(digest) != 71:
raise ValueError(digest)
return FormattedSHA256(digest[7:])
@staticmethod
def calculate(data: bytes) -> "FormattedSHA256":
"""
Calculates the digest value for given data.
Args:
data: The data for which to calculate the digest value.
Returns:
The FormattedSHA256 containing the corresponding digest value.
"""
return FormattedSHA256(hashlib.sha256(data).hexdigest())
|
[
"[email protected]"
] | |
1ec4029f94f10baeb145b9970801a547ff3b09f6
|
de9cb0ae2f80832c0573cba63330983ed23fa0d0
|
/dict/insert_word.py
|
fcd1770448fd942442c4bf8c2d079c3408bd233f
|
[] |
no_license
|
brooot/Aid_Files
|
3fa25ccf3ac23a8ca69fdefb3ab1089534ff9d81
|
11cdbc5037cddf042b857558902fdc04513335be
|
refs/heads/master
| 2023-03-27T15:29:24.534347 | 2021-03-25T13:02:52 | 2021-03-25T13:02:52 | 200,647,728 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 867 |
py
|
import pymysql
import re
f = open('dict.txt')
db = pymysql.connect('localhost', 'root',
'xzl1122', 'dict')
cur = db.cursor()
sql = 'insert into words (word,interpret) \
VALUES (%s,%s)'
a = 1
for line in f:
# 获取匹配内容元组 (word,mean)
tup = re.findall(r'(\w+)\s+(.*)', line)[0]
'''要加[0]是因为返回的是查找到的子组的tupple组成的数组,
如[(子组1,子组2),(),...,()]
findall在有子组的情况下会只会返回子组匹配到的内容,
当存在两个及以上的子组的时候便会将每次匹配到的子组放在一个元组内返回,
组成一个列表的元素'''
if a:
print(tup)
a = 0
try:
pass
# cur.execute(sql, tup)
# db.commit()
except Exception:
db.rollback()
f.close()
cur.close()
db.close()
|
[
"[email protected]"
] | |
8947cf626fb6b113322a37e8f0468da4d664e36b
|
ea393959886a5cd13da4539d634f2ca0bbcd06a2
|
/tensorflow/cs224d_tf_tutorial_example.py
|
c6b21c628c02abe4a105916a220217b28b672dce
|
[] |
no_license
|
zhangchizju2012/LeetCode
|
f605f35b82f16282559af71e4e61ec2629a90ebc
|
0c4c38849309124121b03cc0b4bf39071b5d1c8c
|
refs/heads/master
| 2020-04-05T12:12:14.810639 | 2018-08-09T10:24:52 | 2018-08-09T10:24:52 | 81,021,830 | 7 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,732 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 28 16:58:40 2018
@author: zhangchi
"""
#import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
#import seaborn
# Define input data
X_data = np.arange(100, step=.1)
y_data = 20 * X_data + 10 #X_data + 20 * np.sin(X_data/10)
# Plot input data
#plt.scatter(X_data, y_data)
# Define data size and batch size
n_samples = 1000
batch_size = 100
# Tensorflow is finicky about shapes, so resize
X_data = np.reshape(X_data, (n_samples,1))
y_data = np.reshape(y_data, (n_samples,1))
# Define placeholders for input
X = tf.placeholder(tf.float32, shape=(batch_size, 1))
y = tf.placeholder(tf.float32, shape=(batch_size, 1))
with tf.variable_scope("linear-regression"):
W = tf.get_variable("weights", (1, 1),
initializer=tf.random_normal_initializer())
b = tf.get_variable("bias", (1,),
initializer=tf.constant_initializer(0.0))
y_pred = tf.matmul(X, W) + b
loss = tf.reduce_sum((y - y_pred)**2/n_samples)
# Sample code to run full gradient descent:
# Define optimizer operation
opt_operation = tf.train.AdamOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
# Initialize Variables in graph
sess.run(tf.initialize_all_variables())
# Gradient descent loop for 500 steps
for _ in range(10000):
# Select random minibatch
indices = np.random.choice(n_samples, batch_size)
X_batch, y_batch = X_data[indices], y_data[indices]
# Do gradient descent step
_, loss_val, W_, b_ = sess.run([opt_operation, loss, W, b], feed_dict={X: X_batch, y: y_batch})
#print loss_val
print(str(loss_val)+ " "+ str(W_)+" "+str(b_))
|
[
"[email protected]"
] | |
bebafa256688cc5b2033779112d3ba4ee6592a67
|
993ef8924418866f932396a58e3ad0c2a940ddd3
|
/Production/python/PrivateSamples/EMJ_UL17_mMed-1000_mDark-10_kappa-0p2_aligned-down_cff.py
|
1e55bb177e0248446530a17ca3b58394c483f328
|
[] |
no_license
|
TreeMaker/TreeMaker
|
48d81f6c95a17828dbb599d29c15137cd6ef009a
|
15dd7fe9e9e6f97d9e52614c900c27d200a6c45f
|
refs/heads/Run2_UL
| 2023-07-07T15:04:56.672709 | 2023-07-03T16:43:17 | 2023-07-03T16:43:17 | 29,192,343 | 16 | 92 | null | 2023-07-03T16:43:28 | 2015-01-13T13:59:30 |
Python
|
UTF-8
|
Python
| false | false | 1,961 |
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-2.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL17/step4_MINIAODv2_mMed-1000_mDark-10_kappa-0p2_aligned-down_n-500_part-9.root',
] )
|
[
"[email protected]"
] | |
cefc94e4b906f9be3a6fb61c800913cfaf3f1f37
|
ad4299d34d75f4cf346c08d1971fff0555923b7a
|
/nodeum_sdk/models/by_metadata_value_buckets.py
|
7f663cd885871b1cc0cbcc91a8a3dca52273855a
|
[
"MIT"
] |
permissive
|
nodeum-io/nodeum-sdk-python
|
16ab0b4e6dcff05c4a06829d263d27f3b66a41be
|
205536491bff507dea7be44af46202c17e7121d9
|
refs/heads/master
| 2021-07-13T19:38:24.863671 | 2020-09-01T09:56:30 | 2020-09-01T09:56:30 | 201,425,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,247 |
py
|
# coding: utf-8
"""
Nodeum API
The Nodeum API makes it easy to tap into the digital data mesh that runs across your organisation. Make requests to our API endpoints and we’ll give you everything you need to interconnect your business workflows with your storage. All production API requests are made to: http://nodeumhostname/api/ The current production version of the API is v1. **REST** The Nodeum API is a RESTful API. This means that the API is designed to allow you to get, create, update, & delete objects with the HTTP verbs GET, POST, PUT, PATCH, & DELETE. **JSON** The Nodeum API speaks exclusively in JSON. This means that you should always set the Content-Type header to application/json to ensure that your requests are properly accepted and processed by the API. **Authentication** All API calls require user-password authentication. **Cross-Origin Resource Sharing** The Nodeum API supports CORS for communicating from Javascript for these endpoints. You will need to specify an Origin URI when creating your application to allow for CORS to be whitelisted for your domain. **Pagination** Some endpoints such as File Listing return a potentially lengthy array of objects. In order to keep the response sizes manageable the API will take advantage of pagination. Pagination is a mechanism for returning a subset of the results for a request and allowing for subsequent requests to “page” through the rest of the results until the end is reached. Paginated endpoints follow a standard interface that accepts two query parameters, limit and offset, and return a payload that follows a standard form. These parameters names and their behavior are borrowed from SQL LIMIT and OFFSET keywords. **Versioning** The Nodeum API is constantly being worked on to add features, make improvements, and fix bugs. This means that you should expect changes to be introduced and documented. However, there are some changes or additions that are considered backwards-compatible and your applications should be flexible enough to handle them. These include: - Adding new endpoints to the API - Adding new attributes to the response of an existing endpoint - Changing the order of attributes of responses (JSON by definition is an object of unordered key/value pairs) **Filter parameters** When browsing a list of items, multiple filter parameters may be applied. Some operators can be added to the value as a prefix: - `=` value is equal. Default operator, may be omitted - `!=` value is different - `>` greater than - `>=` greater than or equal - `<` lower than - `>=` lower than or equal - `><` included in list, items should be separated by `|` - `!><` not included in list, items should be separated by `|` - `~` pattern matching, may include `%` (any characters) and `_` (one character) - `!~` pattern not matching, may include `%` (any characters) and `_` (one character) # noqa: E501
The version of the OpenAPI document: 2.1.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from nodeum_sdk.configuration import Configuration
class ByMetadataValueBuckets(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'buckets': 'list[ByMetadataBucket]'
}
attribute_map = {
'buckets': 'buckets'
}
def __init__(self, buckets=None, local_vars_configuration=None): # noqa: E501
"""ByMetadataValueBuckets - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._buckets = None
self.discriminator = None
if buckets is not None:
self.buckets = buckets
@property
def buckets(self):
"""Gets the buckets of this ByMetadataValueBuckets. # noqa: E501
:return: The buckets of this ByMetadataValueBuckets. # noqa: E501
:rtype: list[ByMetadataBucket]
"""
return self._buckets
@buckets.setter
def buckets(self, buckets):
"""Sets the buckets of this ByMetadataValueBuckets.
:param buckets: The buckets of this ByMetadataValueBuckets. # noqa: E501
:type: list[ByMetadataBucket]
"""
self._buckets = buckets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ByMetadataValueBuckets):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ByMetadataValueBuckets):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
b7ce35e05be0a2aeebc41796f49d3ef7d711954c
|
24a38619bfb7458e9124c4eddb86e67f46a7cdcd
|
/weo/dates.py
|
1b39ad9c71ad183e5cf6f79f06437c4734f4f8e2
|
[] |
no_license
|
FinTrek/weo-reader
|
8a976b08bb8d4b606ea0930507bf57529d9d094d
|
8a9f6a51e19ca38caeaa35d2c814dc73a7b1388d
|
refs/heads/master
| 2023-04-07T09:24:33.690258 | 2021-04-15T08:12:49 | 2021-04-15T08:12:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,177 |
py
|
import os
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import List, Optional, Tuple, Union
import requests
__all__ = [
"download",
"all_releases",
"make_url_countries",
"make_url_commodities",
"Date",
]
def cur_year() -> int:
return datetime.today().year
def cur_month() -> int:
return datetime.today().month
class Release(Enum):
Spring = 1
Fall = 2
@dataclass
class Date:
year: int
release: Release
def __gt__(self, x):
return not (self <= x)
def __lt__(self, x):
return (self.year, self.release) < (x.year, x.release)
def __le__(self, x):
return (self < x) | (self == x)
def succ(d: Date) -> Date:
year, rel = d.year, d.release
if d.release == 2:
year += 1
rel = 1
else:
rel = 2
return Date(year, rel)
def first() -> Date:
return Date(2007, 2)
def current() -> Date:
y = cur_year()
m = cur_month()
if m >= 10:
return Date(y, 2)
elif 4 <= m < 10:
return Date(y, 1)
else:
return Date(y - 1, 2)
def month(d: Date) -> int:
if d == Date(2011, 2):
return 9
elif d.release == 2:
return 10
else:
return 4
def month_str(d: Date) -> str:
return {4: "Apr", 9: "Sep", 10: "Oct"}[month(d)]
def name(d: Date) -> str:
return f"{d.year}-{month_str(d)} WEO dataset"
def period_str(d: Date) -> str:
return str(d.release).zfill(2)
base_url = "https://www.imf.org/-/media/Files/Publications/WEO/WEO-Database"
def filename(year, month, prefix):
return f"WEO{month}{year}{prefix}.xls"
def url_after_2020(base_url, year, month, period_marker, prefix):
fn = filename(year, month, prefix)
return f"{base_url}/{year}/{period_marker}/{fn}"
def url_before_2020(base_url, year, month, period_marker, prefix):
fn = filename(year, month, prefix)
return f"{base_url}/{year}/{fn}"
def make_url(d: Date, prefix: str, base_url: str = base_url):
year = d.year
month = month_str(d)
period_marker = period_str(d)
args = base_url, year, month, period_marker, prefix
if d == Date(2021, 1):
return "https://www.imf.org/-/media/Files/Publications/WEO/WEO-Database/2021/WEOApr2021all.ashx"
if d >= Date(2020, 2):
return url_after_2020(*args)
else:
return url_before_2020(*args)
def make_url_countries(d: Date):
return make_url(d, prefix="all")
def make_url_commodities(d: Date):
return make_url(d, prefix="alla")
def yield_dates():
d = first()
last = current()
while d <= last:
yield d
d = succ(d)
def all_releases() -> List[Tuple[int, int]]:
"""Provide all (year, release) pairs to use in bulk download."""
return [(d.year, d.release) for d in yield_dates()]
def is_future(d: Date):
return d > current()
def is_ancient(d: Date):
return d < first()
class DateError(ValueError):
pass
def validate(d: Date):
if is_ancient(d):
raise DateError(f"Cannot work with date earlier than October 2007, got {d}")
if is_future(d):
raise DateError(f"The date is in the future: {d}")
def get_season(tag: Union[int, str]) -> int:
if isinstance(tag, str):
tag = tag.lower()[:3]
if tag in [1, 2]:
return tag
elif tag in ["apr", 4]:
return 1
elif tag in ["oct", "sep", 9, 10]:
return 2
else:
raise DateError(tag)
def default_filename(d: Date):
return f"weo_{d.year}_{d.release}.csv"
def get_date(year: int, release: Union[int, str]):
release = get_season(release)
d = Date(year, release)
validate(d)
return d
def locate(d: Date, filename: Optional[str] = None, directory: Optional[str] = None):
if filename is None:
filename = default_filename(d)
if directory is None:
path = filename
else:
path = os.path.join(directory, filename)
return os.path.normpath(path)
def curl(path: str, url: str):
r = requests.get(url, stream=True)
iterable = r.iter_content(chunk_size=1024)
with open(path, "wb") as f:
for chunk in iterable:
if chunk: # filter out keep-alive new chunks
f.write(chunk)
print(path, size_str(path))
return path
def accept(
year: int,
release: Union[int, str],
filename: Optional[str] = None,
directory: Optional[str] = None,
):
d = get_date(year, release)
path = locate(d, filename, directory)
url = make_url_countries(d)
return d, path, url
def download(
year: int,
release: Union[int, str],
filename: Optional[str] = None,
directory: str = ".",
fetch=curl,
):
"""Download dataset from IMF WEO website by release.
from weo import download
download(2020, 'Oct', 'weo.csv')
Equivalent to:
curl -o weo.csv https://www.imf.org/-/media/Files/Publications/WEO/WEO-Database/2020/02/WEOOct2020all.xls
To download all releases (folder 'weo_data' must exist):
from weo import all_releases
for (year, release) in all_releases():
download(year, release, directory='weo_data')
Parameters
----------
year : int
Year of WEO release.
release : int or str
For spring WEO release use 1 or 'Apr'
For fall WEO release use 2, 'Oct' or (in 2011) - 'Sep'.
filename : str
Filename where to save file.
directory:
Directory where to write file.
fetch: callable, optional
Used for testing.
Returns
-------
path, url
"""
d = get_date(year, release)
path = locate(d, filename, directory)
url = make_url_countries(d)
if os.path.exists(path):
print("Already downloaded", name(d), "at", path)
else:
fetch(path, url)
print("Downloaded", name(d))
return path, url
def mb(bytes: int):
"""Express bytes in Mb"""
x = bytes / (2 ** (10 * 2))
return round(x, 1)
def size(path: str) -> int:
return Path(path).stat().st_size
def size_str(path: str) -> str:
x = mb(size(path))
return f"{x}Mb"
|
[
"[email protected]"
] | |
daa5e171eace2bc81c2aa7d6a425aaead522bd20
|
6b551bec528a1d6544201d3c6d86835e885343b5
|
/deep_privacy/config/__init__.py
|
e890ce1d31413b02d716c5a49fc2d643da63d24f
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
hukkelas/DeepPrivacy
|
9471c8e9389828aa09330905081205b061161d81
|
5ee3f1b0608f03ac54d5694b6421f6132cb63f0e
|
refs/heads/master
| 2023-08-16T00:41:02.366235 | 2023-03-28T06:23:34 | 2023-03-28T06:23:34 | 206,106,232 | 1,288 | 194 |
MIT
| 2021-08-18T08:21:33 | 2019-09-03T15:08:27 |
Python
|
UTF-8
|
Python
| false | false | 188 |
py
|
import argparse
from .base import Config
def default_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument("config_path")
return parser
|
[
"[email protected]"
] | |
437e08dbe2b55f9fa56c1e729dcc134bed63ab13
|
58f38f1d69d4bfc650ad18e0045c36ae29c9d84a
|
/Django基础部分代码/chapter11/authenticate_demo/front/models.py
|
fb099528d5cfdb0ff7e815a2b0d07bce8ee2011e
|
[] |
no_license
|
zjf201811/DjangoWebProject
|
0670c61b89387901089bf67cf2423d9341f69913
|
fab15784fb326ba4517951e180418ea54de03afe
|
refs/heads/master
| 2020-04-18T12:03:08.798484 | 2019-05-06T03:59:46 | 2019-05-06T03:59:46 | 167,522,193 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,094 |
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser,BaseUserManager
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.base_user import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth import get_user_model
# 如果模型是一个代理模型
# 那么就不能在这个模型中添加新的Field
# User.objects.all()
# Person.objects.all()
# 以上两种写法是等价的
# class Person(User):
# telephone = models.CharField(max_length=11)
# class Meta:
# proxy = True
#
# @classmethod
# def get_blacklist(cls):
# return cls.objects.filter(is_active=False)
# class UserExtension(models.Model):
# user = models.OneToOneField(User,on_delete=models.CASCADE,related_name='extension')
# telephone = models.CharField(max_length=11)
# school = models.CharField(max_length=100)
#
# @receiver(post_save,sender=User)
# def handler_user_extension(sender,instance,created,**kwargs):
# if created:
# UserExtension.objects.create(user=instance)
# else:
# instance.extension.save()
class UserManager(BaseUserManager):
def _create_user(self,telephone,username,password,**kwargs):
if not telephone:
raise ValueError('必须要传递手机号码!')
if not password:
raise ValueError('必须要传递密码!')
user = self.model(telephone=telephone,username=username,**kwargs)
user.set_password(password)
user.save()
return user
def create_user(self,telephone,username,password,**kwargs):
kwargs['is_superuser'] = False
return self._create_user(telephone=telephone,username=username,password=password,**kwargs)
def create_superuser(self,telephone,username,password,**kwargs):
kwargs['is_superuser'] = True
return self._create_user(telephone=telephone,username=username,password=password,**kwargs)
# class User(AbstractUser):
# telephone = models.CharField(max_length=11,unique=True)
# school = models.CharField(max_length=100)
#
# USERNAME_FIELD = 'telephone'
#
# objects = UserManager()
class User(AbstractBaseUser,PermissionsMixin):
telephone = models.CharField(max_length=11,unique=True)
email = models.CharField(max_length=100,unique=True)
username = models.CharField(max_length=100)
is_active = models.BooleanField(default=True)
USERNAME_FIELD = 'telephone'
REQUIRED_FIELDS = []
objects = UserManager()
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
# 切记:
# 如果要自定义User模型
# 那么必须在第一次运行migrate命令之前就先创建好模型
class Article(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
author = models.ForeignKey(get_user_model(),on_delete=models.CASCADE)
class Meta:
permissions = [
('view_article','看文章的权限!')
]
|
[
"[email protected]"
] | |
b32ecefa6149e3e45d3aa903f5f8953541a5d1af
|
a9ac3c537fc778b34cb32d4528e2d1190e65e19e
|
/shabanipy/version.py
|
964651219a1dc54208efb4ad35ddff70d2eb6d1e
|
[
"MIT"
] |
permissive
|
wms269/shabanipy
|
9f770cfdf113ca8e8af69cd793be2f8bf9b0141a
|
1e751631e031c528e18d5e0d8ff4fa1457f4107e
|
refs/heads/master
| 2022-09-23T15:43:43.875608 | 2020-04-09T17:49:24 | 2020-04-09T17:49:24 | 265,638,022 | 1 | 0 |
MIT
| 2020-05-20T17:25:40 | 2020-05-20T17:25:39 | null |
UTF-8
|
Python
| false | false | 1,508 |
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2018 by ShabaniPy Authors, see AUTHORS for more details.
#
# Distributed under the terms of the MIT license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""The version information for this release of ShabaniPy.
"""
from collections import namedtuple
# The major release number. Differences in the major number indicate
# possibly large differences in API.
MAJOR = 0
# The minor release number. Differences in the minor number indicate
# possibly small differences in the API, but these changes will come
# backwards compatibility support when possible. Minor releases are
# typically used for large feature additions.
MINOR = 1
# The micro release number. The micro release number is incremented
# for bug fix releases and small feature additions.
MICRO = 2
# The status indicate if this is a development or pre-release version
STATUS = ''
#: A namedtuple of the version info for the current release.
version_info = namedtuple('version_info', 'major minor micro status')
version_info = version_info(MAJOR, MINOR, MICRO, STATUS)
# Remove everything but the 'version_info' from this module.
del namedtuple, MAJOR, MINOR, MICRO, STATUS
__version__ = ('{0}.{1}.{2}'.format(*version_info) if not version_info.status
else '{0}.{1}.{2}.{3}'.format(*version_info))
|
[
"[email protected]"
] | |
93225b0a14654568b4b5d579a6201445d80399e2
|
cd5746f8cc7aee1f20606a65b4fae0d5e8ee78dc
|
/Python Books/scikit-learn-Cookbook/code/chapter1/swiss_roll.py
|
c73c474a387fc3b5967f5bbab6fa6a4afc785319
|
[] |
no_license
|
theGreenJedi/Path
|
df24fca355590efef0c6cb5c52e7216c6b5d2464
|
b5ed2805dbb046480929e49e550bfd8af5bb4d6f
|
refs/heads/master
| 2023-07-27T14:23:37.694546 | 2021-07-16T01:38:55 | 2021-07-16T01:38:55 | 87,686,563 | 8 | 2 | null | 2023-07-11T22:49:03 | 2017-04-09T05:57:30 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 429 |
py
|
import sklearn.datasets as d
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import numpy as np
swiss_roll = d.make_swiss_roll()
f = plt.figure(figsize=(8, 4))
ax = f.add_subplot(111, projection='3d')
ax.set_title("A swiss roll with Y flattened.")
colors = np.array(['r', 'g', 'b'])
X = swiss_roll[0]
ax.scatter(X[:, 0], np.zeros_like(X[:, 1]), X[:, 2], alpha=0.75)
f.savefig("swiss_roll.png")
|
[
"[email protected]"
] | |
88d76acedf188298f224f8ab1e5713387b2cc660
|
8dde6f201657946ad0cfeacab41831f681e6bc6f
|
/digit_sum.py
|
0325ac29e4631c9bb8dd93c9c6043d1056304b3f
|
[] |
no_license
|
peraktong/LEETCODE_Jason
|
c5d4a524ba69b1b089f18ce4a53dc8f50ccbb88c
|
06961cc468211b9692cd7a889ee38d1cd4e1d11e
|
refs/heads/master
| 2022-04-12T11:34:38.738731 | 2020-04-07T21:17:04 | 2020-04-07T21:17:04 | 219,398,022 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 977 |
py
|
import collections
def getCounter(i):
# if i < 10:
# return collections.Counter(range(i+1))
threshold = 10
counters = [collections.Counter({0: 1})]
while i >= threshold:
cur_counter = counters[-1]
next_counter = collections.Counter()
for num in cur_counter:
for leading in range(10):
next_counter[num + leading] += cur_counter[num]
counters.append(next_counter)
threshold *= 10
threshold //= 10
res = collections.Counter()
leading = 0
i += 1
while i:
max_digit = i // threshold
cur = counters.pop()
for num in cur:
for digit in range(max_digit):
res[leading + digit + num] += cur[num]
leading += max_digit
i %= threshold
threshold //= 10
return res
def waysToChooseSum(i, j):
c = getCounter(j) - getCounter(i - 1)
s = max(c.values())
return s, list(c.values()).count(s)
|
[
"[email protected]"
] | |
838aebd46b9d1c892d43774d08246182238c65fb
|
02682d8426a7738740d13b695f93223ddc1054a3
|
/enabler/apps.py
|
ad8f1b1563d8c11f05ecbb547b2481dda0ad1cad
|
[] |
no_license
|
math2001/cmt_manager
|
f8898d16d8e1e0f90651bd8e006b82dd98288a89
|
d6a690b3885e9db2ff1921695ec7b2f31747a39f
|
refs/heads/master
| 2023-01-31T03:20:52.023640 | 2020-12-12T03:03:29 | 2020-12-12T03:03:29 | 320,718,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 89 |
py
|
from django.apps import AppConfig
class EnablerConfig(AppConfig):
name = "enabler"
|
[
"[email protected]"
] | |
19ac95fe5874ccfc89b7bb5c896250cdb88931c4
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2020_10_01_preview/models/_models_py3.py
|
a7053c060d6e53b3f37a65edb20344800cc44851
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080 | 2023-06-08T02:53:04 | 2023-06-08T02:53:04 | 222,384,897 | 1 | 0 |
MIT
| 2023-09-08T08:38:48 | 2019-11-18T07:09:24 |
Python
|
UTF-8
|
Python
| false | false | 159,358 |
py
|
# coding=utf-8
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, List, Optional, TYPE_CHECKING, Union
from ... import _serialization
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from .. import models as _models
class ApprovalSettings(_serialization.Model):
"""The approval settings.
:ivar is_approval_required: Determine whether approval is required or not.
:vartype is_approval_required: bool
:ivar is_approval_required_for_extension: Determine whether approval is required for assignment
extension.
:vartype is_approval_required_for_extension: bool
:ivar is_requestor_justification_required: Determine whether requestor justification required.
:vartype is_requestor_justification_required: bool
:ivar approval_mode: The type of rule. Known values are: "SingleStage", "Serial", "Parallel",
and "NoApproval".
:vartype approval_mode: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.ApprovalMode
:ivar approval_stages: The approval stages of the request.
:vartype approval_stages:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.ApprovalStage]
"""
_attribute_map = {
"is_approval_required": {"key": "isApprovalRequired", "type": "bool"},
"is_approval_required_for_extension": {"key": "isApprovalRequiredForExtension", "type": "bool"},
"is_requestor_justification_required": {"key": "isRequestorJustificationRequired", "type": "bool"},
"approval_mode": {"key": "approvalMode", "type": "str"},
"approval_stages": {"key": "approvalStages", "type": "[ApprovalStage]"},
}
def __init__(
self,
*,
is_approval_required: Optional[bool] = None,
is_approval_required_for_extension: Optional[bool] = None,
is_requestor_justification_required: Optional[bool] = None,
approval_mode: Optional[Union[str, "_models.ApprovalMode"]] = None,
approval_stages: Optional[List["_models.ApprovalStage"]] = None,
**kwargs: Any
) -> None:
"""
:keyword is_approval_required: Determine whether approval is required or not.
:paramtype is_approval_required: bool
:keyword is_approval_required_for_extension: Determine whether approval is required for
assignment extension.
:paramtype is_approval_required_for_extension: bool
:keyword is_requestor_justification_required: Determine whether requestor justification
required.
:paramtype is_requestor_justification_required: bool
:keyword approval_mode: The type of rule. Known values are: "SingleStage", "Serial",
"Parallel", and "NoApproval".
:paramtype approval_mode: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.ApprovalMode
:keyword approval_stages: The approval stages of the request.
:paramtype approval_stages:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.ApprovalStage]
"""
super().__init__(**kwargs)
self.is_approval_required = is_approval_required
self.is_approval_required_for_extension = is_approval_required_for_extension
self.is_requestor_justification_required = is_requestor_justification_required
self.approval_mode = approval_mode
self.approval_stages = approval_stages
class ApprovalStage(_serialization.Model):
"""The approval stage.
:ivar approval_stage_time_out_in_days: The time in days when approval request would be timed
out.
:vartype approval_stage_time_out_in_days: int
:ivar is_approver_justification_required: Determine whether approver need to provide
justification for his decision.
:vartype is_approver_justification_required: bool
:ivar escalation_time_in_minutes: The time in minutes when the approval request would be
escalated if the primary approver does not approves.
:vartype escalation_time_in_minutes: int
:ivar primary_approvers: The primary approver of the request.
:vartype primary_approvers: list[~azure.mgmt.authorization.v2020_10_01_preview.models.UserSet]
:ivar is_escalation_enabled: The value determine whether escalation feature is enabled.
:vartype is_escalation_enabled: bool
:ivar escalation_approvers: The escalation approver of the request.
:vartype escalation_approvers:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.UserSet]
"""
_attribute_map = {
"approval_stage_time_out_in_days": {"key": "approvalStageTimeOutInDays", "type": "int"},
"is_approver_justification_required": {"key": "isApproverJustificationRequired", "type": "bool"},
"escalation_time_in_minutes": {"key": "escalationTimeInMinutes", "type": "int"},
"primary_approvers": {"key": "primaryApprovers", "type": "[UserSet]"},
"is_escalation_enabled": {"key": "isEscalationEnabled", "type": "bool"},
"escalation_approvers": {"key": "escalationApprovers", "type": "[UserSet]"},
}
def __init__(
self,
*,
approval_stage_time_out_in_days: Optional[int] = None,
is_approver_justification_required: Optional[bool] = None,
escalation_time_in_minutes: Optional[int] = None,
primary_approvers: Optional[List["_models.UserSet"]] = None,
is_escalation_enabled: Optional[bool] = None,
escalation_approvers: Optional[List["_models.UserSet"]] = None,
**kwargs: Any
) -> None:
"""
:keyword approval_stage_time_out_in_days: The time in days when approval request would be timed
out.
:paramtype approval_stage_time_out_in_days: int
:keyword is_approver_justification_required: Determine whether approver need to provide
justification for his decision.
:paramtype is_approver_justification_required: bool
:keyword escalation_time_in_minutes: The time in minutes when the approval request would be
escalated if the primary approver does not approves.
:paramtype escalation_time_in_minutes: int
:keyword primary_approvers: The primary approver of the request.
:paramtype primary_approvers:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.UserSet]
:keyword is_escalation_enabled: The value determine whether escalation feature is enabled.
:paramtype is_escalation_enabled: bool
:keyword escalation_approvers: The escalation approver of the request.
:paramtype escalation_approvers:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.UserSet]
"""
super().__init__(**kwargs)
self.approval_stage_time_out_in_days = approval_stage_time_out_in_days
self.is_approver_justification_required = is_approver_justification_required
self.escalation_time_in_minutes = escalation_time_in_minutes
self.primary_approvers = primary_approvers
self.is_escalation_enabled = is_escalation_enabled
self.escalation_approvers = escalation_approvers
class CloudErrorBody(_serialization.Model):
"""An error response from the service.
:ivar code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:vartype code: str
:ivar message: A message describing the error, intended to be suitable for display in a user
interface.
:vartype message: str
"""
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, *, code: Optional[str] = None, message: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword code: An identifier for the error. Codes are invariant and are intended to be consumed
programmatically.
:paramtype code: str
:keyword message: A message describing the error, intended to be suitable for display in a user
interface.
:paramtype message: str
"""
super().__init__(**kwargs)
self.code = code
self.message = message
class EligibleChildResource(_serialization.Model):
"""Eligible child resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The resource scope Id.
:vartype id: str
:ivar name: The resource name.
:vartype name: str
:ivar type: The resource type.
:vartype type: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class EligibleChildResourcesListResult(_serialization.Model):
"""Eligible child resources list operation result.
:ivar value: Eligible child resource list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.EligibleChildResource]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[EligibleChildResource]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.EligibleChildResource"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Eligible child resource list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.EligibleChildResource]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class ErrorAdditionalInfo(_serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: JSON
"""
_validation = {
"type": {"readonly": True},
"info": {"readonly": True},
}
_attribute_map = {
"type": {"key": "type", "type": "str"},
"info": {"key": "info", "type": "object"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(_serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~azure.mgmt.authorization.v2020_10_01_preview.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.ErrorAdditionalInfo]
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
"target": {"readonly": True},
"details": {"readonly": True},
"additional_info": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
"target": {"key": "target", "type": "str"},
"details": {"key": "details", "type": "[ErrorDetail]"},
"additional_info": {"key": "additionalInfo", "type": "[ErrorAdditionalInfo]"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(_serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed
operations. (This also follows the OData error response format.).
:ivar error: The error object.
:vartype error: ~azure.mgmt.authorization.v2020_10_01_preview.models.ErrorDetail
"""
_attribute_map = {
"error": {"key": "error", "type": "ErrorDetail"},
}
def __init__(self, *, error: Optional["_models.ErrorDetail"] = None, **kwargs: Any) -> None:
"""
:keyword error: The error object.
:paramtype error: ~azure.mgmt.authorization.v2020_10_01_preview.models.ErrorDetail
"""
super().__init__(**kwargs)
self.error = error
class ExpandedProperties(_serialization.Model):
"""ExpandedProperties.
:ivar scope: Details of the resource scope.
:vartype scope: ~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedPropertiesScope
:ivar role_definition: Details of role definition.
:vartype role_definition:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedPropertiesRoleDefinition
:ivar principal: Details of the principal.
:vartype principal:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedPropertiesPrincipal
"""
_attribute_map = {
"scope": {"key": "scope", "type": "ExpandedPropertiesScope"},
"role_definition": {"key": "roleDefinition", "type": "ExpandedPropertiesRoleDefinition"},
"principal": {"key": "principal", "type": "ExpandedPropertiesPrincipal"},
}
def __init__(
self,
*,
scope: Optional["_models.ExpandedPropertiesScope"] = None,
role_definition: Optional["_models.ExpandedPropertiesRoleDefinition"] = None,
principal: Optional["_models.ExpandedPropertiesPrincipal"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: Details of the resource scope.
:paramtype scope: ~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedPropertiesScope
:keyword role_definition: Details of role definition.
:paramtype role_definition:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedPropertiesRoleDefinition
:keyword principal: Details of the principal.
:paramtype principal:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedPropertiesPrincipal
"""
super().__init__(**kwargs)
self.scope = scope
self.role_definition = role_definition
self.principal = principal
class ExpandedPropertiesPrincipal(_serialization.Model):
"""Details of the principal.
:ivar id: Id of the principal.
:vartype id: str
:ivar display_name: Display name of the principal.
:vartype display_name: str
:ivar email: Email id of the principal.
:vartype email: str
:ivar type: Type of the principal.
:vartype type: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"email": {"key": "email", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
email: Optional[str] = None,
type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Id of the principal.
:paramtype id: str
:keyword display_name: Display name of the principal.
:paramtype display_name: str
:keyword email: Email id of the principal.
:paramtype email: str
:keyword type: Type of the principal.
:paramtype type: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.email = email
self.type = type
class ExpandedPropertiesRoleDefinition(_serialization.Model):
"""Details of role definition.
:ivar id: Id of the role definition.
:vartype id: str
:ivar display_name: Display name of the role definition.
:vartype display_name: str
:ivar type: Type of the role definition.
:vartype type: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Id of the role definition.
:paramtype id: str
:keyword display_name: Display name of the role definition.
:paramtype display_name: str
:keyword type: Type of the role definition.
:paramtype type: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.type = type
class ExpandedPropertiesScope(_serialization.Model):
"""Details of the resource scope.
:ivar id: Scope id of the resource.
:vartype id: str
:ivar display_name: Display name of the resource.
:vartype display_name: str
:ivar type: Type of the resource.
:vartype type: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Scope id of the resource.
:paramtype id: str
:keyword display_name: Display name of the resource.
:paramtype display_name: str
:keyword type: Type of the resource.
:paramtype type: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.type = type
class Permission(_serialization.Model):
"""Role definition permissions.
:ivar actions: Allowed actions.
:vartype actions: list[str]
:ivar not_actions: Denied actions.
:vartype not_actions: list[str]
:ivar data_actions: Allowed Data actions.
:vartype data_actions: list[str]
:ivar not_data_actions: Denied Data actions.
:vartype not_data_actions: list[str]
"""
_attribute_map = {
"actions": {"key": "actions", "type": "[str]"},
"not_actions": {"key": "notActions", "type": "[str]"},
"data_actions": {"key": "dataActions", "type": "[str]"},
"not_data_actions": {"key": "notDataActions", "type": "[str]"},
}
def __init__(
self,
*,
actions: Optional[List[str]] = None,
not_actions: Optional[List[str]] = None,
data_actions: Optional[List[str]] = None,
not_data_actions: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword actions: Allowed actions.
:paramtype actions: list[str]
:keyword not_actions: Denied actions.
:paramtype not_actions: list[str]
:keyword data_actions: Allowed Data actions.
:paramtype data_actions: list[str]
:keyword not_data_actions: Denied Data actions.
:paramtype not_data_actions: list[str]
"""
super().__init__(**kwargs)
self.actions = actions
self.not_actions = not_actions
self.data_actions = data_actions
self.not_data_actions = not_data_actions
class PolicyAssignmentProperties(_serialization.Model):
"""PolicyAssignmentProperties.
:ivar scope: Details of the resource scope.
:vartype scope:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentPropertiesScope
:ivar role_definition: Details of role definition.
:vartype role_definition:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentPropertiesRoleDefinition
:ivar policy: Details of the policy.
:vartype policy:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentPropertiesPolicy
"""
_attribute_map = {
"scope": {"key": "scope", "type": "PolicyAssignmentPropertiesScope"},
"role_definition": {"key": "roleDefinition", "type": "PolicyAssignmentPropertiesRoleDefinition"},
"policy": {"key": "policy", "type": "PolicyAssignmentPropertiesPolicy"},
}
def __init__(
self,
*,
scope: Optional["_models.PolicyAssignmentPropertiesScope"] = None,
role_definition: Optional["_models.PolicyAssignmentPropertiesRoleDefinition"] = None,
policy: Optional["_models.PolicyAssignmentPropertiesPolicy"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: Details of the resource scope.
:paramtype scope:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentPropertiesScope
:keyword role_definition: Details of role definition.
:paramtype role_definition:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentPropertiesRoleDefinition
:keyword policy: Details of the policy.
:paramtype policy:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentPropertiesPolicy
"""
super().__init__(**kwargs)
self.scope = scope
self.role_definition = role_definition
self.policy = policy
class PolicyAssignmentPropertiesPolicy(_serialization.Model):
"""Details of the policy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Id of the policy.
:vartype id: str
:ivar last_modified_by: The name of the entity last modified it.
:vartype last_modified_by: ~azure.mgmt.authorization.v2020_10_01_preview.models.Principal
:ivar last_modified_date_time: The last modified date time.
:vartype last_modified_date_time: ~datetime.datetime
"""
_validation = {
"last_modified_by": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"last_modified_by": {"key": "lastModifiedBy", "type": "Principal"},
"last_modified_date_time": {"key": "lastModifiedDateTime", "type": "iso-8601"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
last_modified_date_time: Optional[datetime.datetime] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Id of the policy.
:paramtype id: str
:keyword last_modified_date_time: The last modified date time.
:paramtype last_modified_date_time: ~datetime.datetime
"""
super().__init__(**kwargs)
self.id = id
self.last_modified_by = None
self.last_modified_date_time = last_modified_date_time
class PolicyAssignmentPropertiesRoleDefinition(_serialization.Model):
"""Details of role definition.
:ivar id: Id of the role definition.
:vartype id: str
:ivar display_name: Display name of the role definition.
:vartype display_name: str
:ivar type: Type of the role definition.
:vartype type: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Id of the role definition.
:paramtype id: str
:keyword display_name: Display name of the role definition.
:paramtype display_name: str
:keyword type: Type of the role definition.
:paramtype type: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.type = type
class PolicyAssignmentPropertiesScope(_serialization.Model):
"""Details of the resource scope.
:ivar id: Scope id of the resource.
:vartype id: str
:ivar display_name: Display name of the resource.
:vartype display_name: str
:ivar type: Type of the resource.
:vartype type: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Scope id of the resource.
:paramtype id: str
:keyword display_name: Display name of the resource.
:paramtype display_name: str
:keyword type: Type of the resource.
:paramtype type: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.type = type
class PolicyProperties(_serialization.Model):
"""PolicyProperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar scope: Details of the resource scope.
:vartype scope: ~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyPropertiesScope
"""
_validation = {
"scope": {"readonly": True},
}
_attribute_map = {
"scope": {"key": "scope", "type": "PolicyPropertiesScope"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.scope = None
class PolicyPropertiesScope(_serialization.Model):
"""Details of the resource scope.
:ivar id: Scope id of the resource.
:vartype id: str
:ivar display_name: Display name of the resource.
:vartype display_name: str
:ivar type: Type of the resource.
:vartype type: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"type": {"key": "type", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
type: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: Scope id of the resource.
:paramtype id: str
:keyword display_name: Display name of the resource.
:paramtype display_name: str
:keyword type: Type of the resource.
:paramtype type: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.type = type
class Principal(_serialization.Model):
"""The name of the entity last modified it.
:ivar id: The id of the principal made changes.
:vartype id: str
:ivar display_name: The name of the principal made changes.
:vartype display_name: str
:ivar type: Type of principal such as user , group etc.
:vartype type: str
:ivar email: Email of principal.
:vartype email: str
"""
_attribute_map = {
"id": {"key": "id", "type": "str"},
"display_name": {"key": "displayName", "type": "str"},
"type": {"key": "type", "type": "str"},
"email": {"key": "email", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
display_name: Optional[str] = None,
type: Optional[str] = None,
email: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the principal made changes.
:paramtype id: str
:keyword display_name: The name of the principal made changes.
:paramtype display_name: str
:keyword type: Type of principal such as user , group etc.
:paramtype type: str
:keyword email: Email of principal.
:paramtype email: str
"""
super().__init__(**kwargs)
self.id = id
self.display_name = display_name
self.type = type
self.email = email
class RoleAssignment(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment ID.
:vartype id: str
:ivar name: The role assignment name.
:vartype name: str
:ivar type: The role assignment type.
:vartype type: str
:ivar scope: The role assignment scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar description: Description of role assignment.
:vartype description: str
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: Time it was created.
:vartype created_on: ~datetime.datetime
:ivar updated_on: Time it was updated.
:vartype updated_on: ~datetime.datetime
:ivar created_by: Id of the user who created the assignment.
:vartype created_by: str
:ivar updated_by: Id of the user who updated the assignment.
:vartype updated_by: str
:ivar delegated_managed_identity_resource_id: Id of the delegated managed identity resource.
:vartype delegated_managed_identity_resource_id: str
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"scope": {"readonly": True},
"created_on": {"readonly": True},
"updated_on": {"readonly": True},
"created_by": {"readonly": True},
"updated_by": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"description": {"key": "properties.description", "type": "str"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"updated_on": {"key": "properties.updatedOn", "type": "iso-8601"},
"created_by": {"key": "properties.createdBy", "type": "str"},
"updated_by": {"key": "properties.updatedBy", "type": "str"},
"delegated_managed_identity_resource_id": {
"key": "properties.delegatedManagedIdentityResourceId",
"type": "str",
},
}
def __init__(
self,
*,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
principal_type: Optional[Union[str, "_models.PrincipalType"]] = None,
description: Optional[str] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
delegated_managed_identity_resource_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:paramtype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:keyword description: Description of role assignment.
:paramtype description: str
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
:keyword delegated_managed_identity_resource_id: Id of the delegated managed identity resource.
:paramtype delegated_managed_identity_resource_id: str
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = None
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = principal_type
self.description = description
self.condition = condition
self.condition_version = condition_version
self.created_on = None
self.updated_on = None
self.created_by = None
self.updated_by = None
self.delegated_managed_identity_resource_id = delegated_managed_identity_resource_id
class RoleAssignmentCreateParameters(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role assignment create parameters.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar scope: The role assignment scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID. Required.
:vartype role_definition_id: str
:ivar principal_id: The principal ID. Required.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar description: Description of role assignment.
:vartype description: str
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: Time it was created.
:vartype created_on: ~datetime.datetime
:ivar updated_on: Time it was updated.
:vartype updated_on: ~datetime.datetime
:ivar created_by: Id of the user who created the assignment.
:vartype created_by: str
:ivar updated_by: Id of the user who updated the assignment.
:vartype updated_by: str
:ivar delegated_managed_identity_resource_id: Id of the delegated managed identity resource.
:vartype delegated_managed_identity_resource_id: str
"""
_validation = {
"scope": {"readonly": True},
"role_definition_id": {"required": True},
"principal_id": {"required": True},
"created_on": {"readonly": True},
"updated_on": {"readonly": True},
"created_by": {"readonly": True},
"updated_by": {"readonly": True},
}
_attribute_map = {
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"description": {"key": "properties.description", "type": "str"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"updated_on": {"key": "properties.updatedOn", "type": "iso-8601"},
"created_by": {"key": "properties.createdBy", "type": "str"},
"updated_by": {"key": "properties.updatedBy", "type": "str"},
"delegated_managed_identity_resource_id": {
"key": "properties.delegatedManagedIdentityResourceId",
"type": "str",
},
}
def __init__(
self,
*,
role_definition_id: str,
principal_id: str,
principal_type: Optional[Union[str, "_models.PrincipalType"]] = None,
description: Optional[str] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
delegated_managed_identity_resource_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword role_definition_id: The role definition ID. Required.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID. Required.
:paramtype principal_id: str
:keyword principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:paramtype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:keyword description: Description of role assignment.
:paramtype description: str
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
:keyword delegated_managed_identity_resource_id: Id of the delegated managed identity resource.
:paramtype delegated_managed_identity_resource_id: str
"""
super().__init__(**kwargs)
self.scope = None
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = principal_type
self.description = description
self.condition = condition
self.condition_version = condition_version
self.created_on = None
self.updated_on = None
self.created_by = None
self.updated_by = None
self.delegated_managed_identity_resource_id = delegated_managed_identity_resource_id
class RoleAssignmentFilter(_serialization.Model):
"""Role Assignments filter.
:ivar principal_id: Returns role assignment of the specific principal.
:vartype principal_id: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
}
def __init__(self, *, principal_id: Optional[str] = None, **kwargs: Any) -> None:
"""
:keyword principal_id: Returns role assignment of the specific principal.
:paramtype principal_id: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
class RoleAssignmentListResult(_serialization.Model):
"""Role assignment list operation result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: Role assignment list.
:vartype value: list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignment]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_validation = {
"next_link": {"readonly": True},
}
_attribute_map = {
"value": {"key": "value", "type": "[RoleAssignment]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(self, *, value: Optional[List["_models.RoleAssignment"]] = None, **kwargs: Any) -> None:
"""
:keyword value: Role assignment list.
:paramtype value: list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignment]
"""
super().__init__(**kwargs)
self.value = value
self.next_link = None
class RoleAssignmentSchedule(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role Assignment schedule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment schedule Id.
:vartype id: str
:ivar name: The role assignment schedule name.
:vartype name: str
:ivar type: The role assignment schedule type.
:vartype type: str
:ivar scope: The role assignment schedule scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar role_assignment_schedule_request_id: The id of roleAssignmentScheduleRequest used to
create this roleAssignmentSchedule.
:vartype role_assignment_schedule_request_id: str
:ivar linked_role_eligibility_schedule_id: The id of roleEligibilitySchedule used to activated
this roleAssignmentSchedule.
:vartype linked_role_eligibility_schedule_id: str
:ivar assignment_type: Assignment type of the role assignment schedule. Known values are:
"Activated" and "Assigned".
:vartype assignment_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.AssignmentType
:ivar member_type: Membership type of the role assignment schedule. Known values are:
"Inherited", "Direct", and "Group".
:vartype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:ivar status: The status of the role assignment schedule. Known values are: "Accepted",
"PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:vartype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:ivar start_date_time: Start DateTime when role assignment schedule.
:vartype start_date_time: ~datetime.datetime
:ivar end_date_time: End DateTime when role assignment schedule.
:vartype end_date_time: ~datetime.datetime
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: DateTime when role assignment schedule was created.
:vartype created_on: ~datetime.datetime
:ivar updated_on: DateTime when role assignment schedule was modified.
:vartype updated_on: ~datetime.datetime
:ivar expanded_properties: Additional properties of principal, scope and role definition.
:vartype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"role_assignment_schedule_request_id": {"key": "properties.roleAssignmentScheduleRequestId", "type": "str"},
"linked_role_eligibility_schedule_id": {"key": "properties.linkedRoleEligibilityScheduleId", "type": "str"},
"assignment_type": {"key": "properties.assignmentType", "type": "str"},
"member_type": {"key": "properties.memberType", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"start_date_time": {"key": "properties.startDateTime", "type": "iso-8601"},
"end_date_time": {"key": "properties.endDateTime", "type": "iso-8601"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"updated_on": {"key": "properties.updatedOn", "type": "iso-8601"},
"expanded_properties": {"key": "properties.expandedProperties", "type": "ExpandedProperties"},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
principal_type: Optional[Union[str, "_models.PrincipalType"]] = None,
role_assignment_schedule_request_id: Optional[str] = None,
linked_role_eligibility_schedule_id: Optional[str] = None,
assignment_type: Optional[Union[str, "_models.AssignmentType"]] = None,
member_type: Optional[Union[str, "_models.MemberType"]] = None,
status: Optional[Union[str, "_models.Status"]] = None,
start_date_time: Optional[datetime.datetime] = None,
end_date_time: Optional[datetime.datetime] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
created_on: Optional[datetime.datetime] = None,
updated_on: Optional[datetime.datetime] = None,
expanded_properties: Optional["_models.ExpandedProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: The role assignment schedule scope.
:paramtype scope: str
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:paramtype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:keyword role_assignment_schedule_request_id: The id of roleAssignmentScheduleRequest used to
create this roleAssignmentSchedule.
:paramtype role_assignment_schedule_request_id: str
:keyword linked_role_eligibility_schedule_id: The id of roleEligibilitySchedule used to
activated this roleAssignmentSchedule.
:paramtype linked_role_eligibility_schedule_id: str
:keyword assignment_type: Assignment type of the role assignment schedule. Known values are:
"Activated" and "Assigned".
:paramtype assignment_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.AssignmentType
:keyword member_type: Membership type of the role assignment schedule. Known values are:
"Inherited", "Direct", and "Group".
:paramtype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:keyword status: The status of the role assignment schedule. Known values are: "Accepted",
"PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:paramtype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:keyword start_date_time: Start DateTime when role assignment schedule.
:paramtype start_date_time: ~datetime.datetime
:keyword end_date_time: End DateTime when role assignment schedule.
:paramtype end_date_time: ~datetime.datetime
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
:keyword created_on: DateTime when role assignment schedule was created.
:paramtype created_on: ~datetime.datetime
:keyword updated_on: DateTime when role assignment schedule was modified.
:paramtype updated_on: ~datetime.datetime
:keyword expanded_properties: Additional properties of principal, scope and role definition.
:paramtype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = principal_type
self.role_assignment_schedule_request_id = role_assignment_schedule_request_id
self.linked_role_eligibility_schedule_id = linked_role_eligibility_schedule_id
self.assignment_type = assignment_type
self.member_type = member_type
self.status = status
self.start_date_time = start_date_time
self.end_date_time = end_date_time
self.condition = condition
self.condition_version = condition_version
self.created_on = created_on
self.updated_on = updated_on
self.expanded_properties = expanded_properties
class RoleAssignmentScheduleFilter(_serialization.Model):
"""Role assignment schedule filter.
:ivar principal_id: Returns role assignment schedule of the specific principal.
:vartype principal_id: str
:ivar role_definition_id: Returns role assignment schedule of the specific role definition.
:vartype role_definition_id: str
:ivar status: Returns role assignment schedule instances of the specific status.
:vartype status: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"role_definition_id": {"key": "roleDefinitionId", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
role_definition_id: Optional[str] = None,
status: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: Returns role assignment schedule of the specific principal.
:paramtype principal_id: str
:keyword role_definition_id: Returns role assignment schedule of the specific role definition.
:paramtype role_definition_id: str
:keyword status: Returns role assignment schedule instances of the specific status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
self.status = status
class RoleAssignmentScheduleInstance(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Information about current or upcoming role assignment schedule instance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment schedule instance ID.
:vartype id: str
:ivar name: The role assignment schedule instance name.
:vartype name: str
:ivar type: The role assignment schedule instance type.
:vartype type: str
:ivar scope: The role assignment schedule scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar role_assignment_schedule_id: Id of the master role assignment schedule.
:vartype role_assignment_schedule_id: str
:ivar origin_role_assignment_id: Role Assignment Id in external system.
:vartype origin_role_assignment_id: str
:ivar status: The status of the role assignment schedule instance. Known values are:
"Accepted", "PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:vartype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:ivar start_date_time: The startDateTime of the role assignment schedule instance.
:vartype start_date_time: ~datetime.datetime
:ivar end_date_time: The endDateTime of the role assignment schedule instance.
:vartype end_date_time: ~datetime.datetime
:ivar linked_role_eligibility_schedule_id: roleEligibilityScheduleId used to activate.
:vartype linked_role_eligibility_schedule_id: str
:ivar linked_role_eligibility_schedule_instance_id: roleEligibilityScheduleInstanceId linked to
this roleAssignmentScheduleInstance.
:vartype linked_role_eligibility_schedule_instance_id: str
:ivar assignment_type: Assignment type of the role assignment schedule. Known values are:
"Activated" and "Assigned".
:vartype assignment_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.AssignmentType
:ivar member_type: Membership type of the role assignment schedule. Known values are:
"Inherited", "Direct", and "Group".
:vartype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: DateTime when role assignment schedule was created.
:vartype created_on: ~datetime.datetime
:ivar expanded_properties: Additional properties of principal, scope and role definition.
:vartype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"role_assignment_schedule_id": {"key": "properties.roleAssignmentScheduleId", "type": "str"},
"origin_role_assignment_id": {"key": "properties.originRoleAssignmentId", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"start_date_time": {"key": "properties.startDateTime", "type": "iso-8601"},
"end_date_time": {"key": "properties.endDateTime", "type": "iso-8601"},
"linked_role_eligibility_schedule_id": {"key": "properties.linkedRoleEligibilityScheduleId", "type": "str"},
"linked_role_eligibility_schedule_instance_id": {
"key": "properties.linkedRoleEligibilityScheduleInstanceId",
"type": "str",
},
"assignment_type": {"key": "properties.assignmentType", "type": "str"},
"member_type": {"key": "properties.memberType", "type": "str"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"expanded_properties": {"key": "properties.expandedProperties", "type": "ExpandedProperties"},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
principal_type: Optional[Union[str, "_models.PrincipalType"]] = None,
role_assignment_schedule_id: Optional[str] = None,
origin_role_assignment_id: Optional[str] = None,
status: Optional[Union[str, "_models.Status"]] = None,
start_date_time: Optional[datetime.datetime] = None,
end_date_time: Optional[datetime.datetime] = None,
linked_role_eligibility_schedule_id: Optional[str] = None,
linked_role_eligibility_schedule_instance_id: Optional[str] = None,
assignment_type: Optional[Union[str, "_models.AssignmentType"]] = None,
member_type: Optional[Union[str, "_models.MemberType"]] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
created_on: Optional[datetime.datetime] = None,
expanded_properties: Optional["_models.ExpandedProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: The role assignment schedule scope.
:paramtype scope: str
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:paramtype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:keyword role_assignment_schedule_id: Id of the master role assignment schedule.
:paramtype role_assignment_schedule_id: str
:keyword origin_role_assignment_id: Role Assignment Id in external system.
:paramtype origin_role_assignment_id: str
:keyword status: The status of the role assignment schedule instance. Known values are:
"Accepted", "PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:paramtype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:keyword start_date_time: The startDateTime of the role assignment schedule instance.
:paramtype start_date_time: ~datetime.datetime
:keyword end_date_time: The endDateTime of the role assignment schedule instance.
:paramtype end_date_time: ~datetime.datetime
:keyword linked_role_eligibility_schedule_id: roleEligibilityScheduleId used to activate.
:paramtype linked_role_eligibility_schedule_id: str
:keyword linked_role_eligibility_schedule_instance_id: roleEligibilityScheduleInstanceId linked
to this roleAssignmentScheduleInstance.
:paramtype linked_role_eligibility_schedule_instance_id: str
:keyword assignment_type: Assignment type of the role assignment schedule. Known values are:
"Activated" and "Assigned".
:paramtype assignment_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.AssignmentType
:keyword member_type: Membership type of the role assignment schedule. Known values are:
"Inherited", "Direct", and "Group".
:paramtype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
:keyword created_on: DateTime when role assignment schedule was created.
:paramtype created_on: ~datetime.datetime
:keyword expanded_properties: Additional properties of principal, scope and role definition.
:paramtype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = principal_type
self.role_assignment_schedule_id = role_assignment_schedule_id
self.origin_role_assignment_id = origin_role_assignment_id
self.status = status
self.start_date_time = start_date_time
self.end_date_time = end_date_time
self.linked_role_eligibility_schedule_id = linked_role_eligibility_schedule_id
self.linked_role_eligibility_schedule_instance_id = linked_role_eligibility_schedule_instance_id
self.assignment_type = assignment_type
self.member_type = member_type
self.condition = condition
self.condition_version = condition_version
self.created_on = created_on
self.expanded_properties = expanded_properties
class RoleAssignmentScheduleInstanceFilter(_serialization.Model):
"""Role assignment schedule instance filter.
:ivar principal_id: Returns role assignment schedule instances of the specific principal.
:vartype principal_id: str
:ivar role_definition_id: Returns role assignment schedule instances of the specific role
definition.
:vartype role_definition_id: str
:ivar status: Returns role assignment schedule instances of the specific status.
:vartype status: str
:ivar role_assignment_schedule_id: Returns role assignment schedule instances belonging to a
specific role assignment schedule.
:vartype role_assignment_schedule_id: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"role_definition_id": {"key": "roleDefinitionId", "type": "str"},
"status": {"key": "status", "type": "str"},
"role_assignment_schedule_id": {"key": "roleAssignmentScheduleId", "type": "str"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
role_definition_id: Optional[str] = None,
status: Optional[str] = None,
role_assignment_schedule_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: Returns role assignment schedule instances of the specific principal.
:paramtype principal_id: str
:keyword role_definition_id: Returns role assignment schedule instances of the specific role
definition.
:paramtype role_definition_id: str
:keyword status: Returns role assignment schedule instances of the specific status.
:paramtype status: str
:keyword role_assignment_schedule_id: Returns role assignment schedule instances belonging to a
specific role assignment schedule.
:paramtype role_assignment_schedule_id: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
self.status = status
self.role_assignment_schedule_id = role_assignment_schedule_id
class RoleAssignmentScheduleInstanceListResult(_serialization.Model):
"""Role assignment schedule instance list operation result.
:ivar value: Role assignment schedule instance list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleInstance]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleAssignmentScheduleInstance]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleAssignmentScheduleInstance"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role assignment schedule instance list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleInstance]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleAssignmentScheduleListResult(_serialization.Model):
"""Role assignment schedule list operation result.
:ivar value: Role assignment schedule list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentSchedule]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleAssignmentSchedule]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleAssignmentSchedule"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role assignment schedule list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentSchedule]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleAssignmentScheduleRequest(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role Assignment schedule request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment schedule request ID.
:vartype id: str
:ivar name: The role assignment schedule request name.
:vartype name: str
:ivar type: The role assignment schedule request type.
:vartype type: str
:ivar scope: The role assignment schedule request scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar request_type: The type of the role assignment schedule request. Eg: SelfActivate,
AdminAssign etc. Known values are: "AdminAssign", "AdminRemove", "AdminUpdate", "AdminExtend",
"AdminRenew", "SelfActivate", "SelfDeactivate", "SelfExtend", and "SelfRenew".
:vartype request_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.RequestType
:ivar status: The status of the role assignment schedule request. Known values are: "Accepted",
"PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:vartype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:ivar approval_id: The approvalId of the role assignment schedule request.
:vartype approval_id: str
:ivar target_role_assignment_schedule_id: The resultant role assignment schedule id or the role
assignment schedule id being updated.
:vartype target_role_assignment_schedule_id: str
:ivar target_role_assignment_schedule_instance_id: The role assignment schedule instance id
being updated.
:vartype target_role_assignment_schedule_instance_id: str
:ivar schedule_info: Schedule info of the role assignment schedule.
:vartype schedule_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequestPropertiesScheduleInfo
:ivar linked_role_eligibility_schedule_id: The linked role eligibility schedule id - to
activate an eligibility.
:vartype linked_role_eligibility_schedule_id: str
:ivar justification: Justification for the role assignment.
:vartype justification: str
:ivar ticket_info: Ticket Info of the role assignment.
:vartype ticket_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequestPropertiesTicketInfo
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: DateTime when role assignment schedule request was created.
:vartype created_on: ~datetime.datetime
:ivar requestor_id: Id of the user who created this request.
:vartype requestor_id: str
:ivar expanded_properties: Additional properties of principal, scope and role definition.
:vartype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"scope": {"readonly": True},
"principal_type": {"readonly": True},
"status": {"readonly": True},
"approval_id": {"readonly": True},
"created_on": {"readonly": True},
"requestor_id": {"readonly": True},
"expanded_properties": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"request_type": {"key": "properties.requestType", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"approval_id": {"key": "properties.approvalId", "type": "str"},
"target_role_assignment_schedule_id": {"key": "properties.targetRoleAssignmentScheduleId", "type": "str"},
"target_role_assignment_schedule_instance_id": {
"key": "properties.targetRoleAssignmentScheduleInstanceId",
"type": "str",
},
"schedule_info": {
"key": "properties.scheduleInfo",
"type": "RoleAssignmentScheduleRequestPropertiesScheduleInfo",
},
"linked_role_eligibility_schedule_id": {"key": "properties.linkedRoleEligibilityScheduleId", "type": "str"},
"justification": {"key": "properties.justification", "type": "str"},
"ticket_info": {"key": "properties.ticketInfo", "type": "RoleAssignmentScheduleRequestPropertiesTicketInfo"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"requestor_id": {"key": "properties.requestorId", "type": "str"},
"expanded_properties": {"key": "properties.expandedProperties", "type": "ExpandedProperties"},
}
def __init__(
self,
*,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
request_type: Optional[Union[str, "_models.RequestType"]] = None,
target_role_assignment_schedule_id: Optional[str] = None,
target_role_assignment_schedule_instance_id: Optional[str] = None,
schedule_info: Optional["_models.RoleAssignmentScheduleRequestPropertiesScheduleInfo"] = None,
linked_role_eligibility_schedule_id: Optional[str] = None,
justification: Optional[str] = None,
ticket_info: Optional["_models.RoleAssignmentScheduleRequestPropertiesTicketInfo"] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword request_type: The type of the role assignment schedule request. Eg: SelfActivate,
AdminAssign etc. Known values are: "AdminAssign", "AdminRemove", "AdminUpdate", "AdminExtend",
"AdminRenew", "SelfActivate", "SelfDeactivate", "SelfExtend", and "SelfRenew".
:paramtype request_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RequestType
:keyword target_role_assignment_schedule_id: The resultant role assignment schedule id or the
role assignment schedule id being updated.
:paramtype target_role_assignment_schedule_id: str
:keyword target_role_assignment_schedule_instance_id: The role assignment schedule instance id
being updated.
:paramtype target_role_assignment_schedule_instance_id: str
:keyword schedule_info: Schedule info of the role assignment schedule.
:paramtype schedule_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequestPropertiesScheduleInfo
:keyword linked_role_eligibility_schedule_id: The linked role eligibility schedule id - to
activate an eligibility.
:paramtype linked_role_eligibility_schedule_id: str
:keyword justification: Justification for the role assignment.
:paramtype justification: str
:keyword ticket_info: Ticket Info of the role assignment.
:paramtype ticket_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequestPropertiesTicketInfo
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = None
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = None
self.request_type = request_type
self.status = None
self.approval_id = None
self.target_role_assignment_schedule_id = target_role_assignment_schedule_id
self.target_role_assignment_schedule_instance_id = target_role_assignment_schedule_instance_id
self.schedule_info = schedule_info
self.linked_role_eligibility_schedule_id = linked_role_eligibility_schedule_id
self.justification = justification
self.ticket_info = ticket_info
self.condition = condition
self.condition_version = condition_version
self.created_on = None
self.requestor_id = None
self.expanded_properties = None
class RoleAssignmentScheduleRequestFilter(_serialization.Model):
"""Role assignment schedule request filter.
:ivar principal_id: Returns role assignment requests of the specific principal.
:vartype principal_id: str
:ivar role_definition_id: Returns role assignment requests of the specific role definition.
:vartype role_definition_id: str
:ivar requestor_id: Returns role assignment requests created by specific principal.
:vartype requestor_id: str
:ivar status: Returns role assignment requests of specific status.
:vartype status: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"role_definition_id": {"key": "roleDefinitionId", "type": "str"},
"requestor_id": {"key": "requestorId", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
role_definition_id: Optional[str] = None,
requestor_id: Optional[str] = None,
status: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: Returns role assignment requests of the specific principal.
:paramtype principal_id: str
:keyword role_definition_id: Returns role assignment requests of the specific role definition.
:paramtype role_definition_id: str
:keyword requestor_id: Returns role assignment requests created by specific principal.
:paramtype requestor_id: str
:keyword status: Returns role assignment requests of specific status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
self.requestor_id = requestor_id
self.status = status
class RoleAssignmentScheduleRequestListResult(_serialization.Model):
"""Role assignment schedule request list operation result.
:ivar value: Role assignment schedule request list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequest]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleAssignmentScheduleRequest]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleAssignmentScheduleRequest"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role assignment schedule request list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequest]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleAssignmentScheduleRequestPropertiesScheduleInfo(_serialization.Model):
"""Schedule info of the role assignment schedule.
:ivar start_date_time: Start DateTime of the role assignment schedule.
:vartype start_date_time: ~datetime.datetime
:ivar expiration: Expiration of the role assignment schedule.
:vartype expiration:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequestPropertiesScheduleInfoExpiration
"""
_attribute_map = {
"start_date_time": {"key": "startDateTime", "type": "iso-8601"},
"expiration": {"key": "expiration", "type": "RoleAssignmentScheduleRequestPropertiesScheduleInfoExpiration"},
}
def __init__(
self,
*,
start_date_time: Optional[datetime.datetime] = None,
expiration: Optional["_models.RoleAssignmentScheduleRequestPropertiesScheduleInfoExpiration"] = None,
**kwargs: Any
) -> None:
"""
:keyword start_date_time: Start DateTime of the role assignment schedule.
:paramtype start_date_time: ~datetime.datetime
:keyword expiration: Expiration of the role assignment schedule.
:paramtype expiration:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleAssignmentScheduleRequestPropertiesScheduleInfoExpiration
"""
super().__init__(**kwargs)
self.start_date_time = start_date_time
self.expiration = expiration
class RoleAssignmentScheduleRequestPropertiesScheduleInfoExpiration(_serialization.Model):
"""Expiration of the role assignment schedule.
:ivar type: Type of the role assignment schedule expiration. Known values are: "AfterDuration",
"AfterDateTime", and "NoExpiration".
:vartype type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Type
:ivar end_date_time: End DateTime of the role assignment schedule.
:vartype end_date_time: ~datetime.datetime
:ivar duration: Duration of the role assignment schedule in TimeSpan.
:vartype duration: str
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"end_date_time": {"key": "endDateTime", "type": "iso-8601"},
"duration": {"key": "duration", "type": "str"},
}
def __init__(
self,
*,
type: Optional[Union[str, "_models.Type"]] = None,
end_date_time: Optional[datetime.datetime] = None,
duration: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword type: Type of the role assignment schedule expiration. Known values are:
"AfterDuration", "AfterDateTime", and "NoExpiration".
:paramtype type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Type
:keyword end_date_time: End DateTime of the role assignment schedule.
:paramtype end_date_time: ~datetime.datetime
:keyword duration: Duration of the role assignment schedule in TimeSpan.
:paramtype duration: str
"""
super().__init__(**kwargs)
self.type = type
self.end_date_time = end_date_time
self.duration = duration
class RoleAssignmentScheduleRequestPropertiesTicketInfo(_serialization.Model):
"""Ticket Info of the role assignment.
:ivar ticket_number: Ticket number for the role assignment.
:vartype ticket_number: str
:ivar ticket_system: Ticket system name for the role assignment.
:vartype ticket_system: str
"""
_attribute_map = {
"ticket_number": {"key": "ticketNumber", "type": "str"},
"ticket_system": {"key": "ticketSystem", "type": "str"},
}
def __init__(
self, *, ticket_number: Optional[str] = None, ticket_system: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword ticket_number: Ticket number for the role assignment.
:paramtype ticket_number: str
:keyword ticket_system: Ticket system name for the role assignment.
:paramtype ticket_system: str
"""
super().__init__(**kwargs)
self.ticket_number = ticket_number
self.ticket_system = ticket_system
class RoleEligibilitySchedule(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role eligibility schedule.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role eligibility schedule Id.
:vartype id: str
:ivar name: The role eligibility schedule name.
:vartype name: str
:ivar type: The role eligibility schedule type.
:vartype type: str
:ivar scope: The role eligibility schedule scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar role_eligibility_schedule_request_id: The id of roleEligibilityScheduleRequest used to
create this roleAssignmentSchedule.
:vartype role_eligibility_schedule_request_id: str
:ivar member_type: Membership type of the role eligibility schedule. Known values are:
"Inherited", "Direct", and "Group".
:vartype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:ivar status: The status of the role eligibility schedule. Known values are: "Accepted",
"PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:vartype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:ivar start_date_time: Start DateTime when role eligibility schedule.
:vartype start_date_time: ~datetime.datetime
:ivar end_date_time: End DateTime when role eligibility schedule.
:vartype end_date_time: ~datetime.datetime
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: DateTime when role eligibility schedule was created.
:vartype created_on: ~datetime.datetime
:ivar updated_on: DateTime when role eligibility schedule was modified.
:vartype updated_on: ~datetime.datetime
:ivar expanded_properties: Additional properties of principal, scope and role definition.
:vartype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"role_eligibility_schedule_request_id": {"key": "properties.roleEligibilityScheduleRequestId", "type": "str"},
"member_type": {"key": "properties.memberType", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"start_date_time": {"key": "properties.startDateTime", "type": "iso-8601"},
"end_date_time": {"key": "properties.endDateTime", "type": "iso-8601"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"updated_on": {"key": "properties.updatedOn", "type": "iso-8601"},
"expanded_properties": {"key": "properties.expandedProperties", "type": "ExpandedProperties"},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
principal_type: Optional[Union[str, "_models.PrincipalType"]] = None,
role_eligibility_schedule_request_id: Optional[str] = None,
member_type: Optional[Union[str, "_models.MemberType"]] = None,
status: Optional[Union[str, "_models.Status"]] = None,
start_date_time: Optional[datetime.datetime] = None,
end_date_time: Optional[datetime.datetime] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
created_on: Optional[datetime.datetime] = None,
updated_on: Optional[datetime.datetime] = None,
expanded_properties: Optional["_models.ExpandedProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: The role eligibility schedule scope.
:paramtype scope: str
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:paramtype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:keyword role_eligibility_schedule_request_id: The id of roleEligibilityScheduleRequest used to
create this roleAssignmentSchedule.
:paramtype role_eligibility_schedule_request_id: str
:keyword member_type: Membership type of the role eligibility schedule. Known values are:
"Inherited", "Direct", and "Group".
:paramtype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:keyword status: The status of the role eligibility schedule. Known values are: "Accepted",
"PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:paramtype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:keyword start_date_time: Start DateTime when role eligibility schedule.
:paramtype start_date_time: ~datetime.datetime
:keyword end_date_time: End DateTime when role eligibility schedule.
:paramtype end_date_time: ~datetime.datetime
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
:keyword created_on: DateTime when role eligibility schedule was created.
:paramtype created_on: ~datetime.datetime
:keyword updated_on: DateTime when role eligibility schedule was modified.
:paramtype updated_on: ~datetime.datetime
:keyword expanded_properties: Additional properties of principal, scope and role definition.
:paramtype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = principal_type
self.role_eligibility_schedule_request_id = role_eligibility_schedule_request_id
self.member_type = member_type
self.status = status
self.start_date_time = start_date_time
self.end_date_time = end_date_time
self.condition = condition
self.condition_version = condition_version
self.created_on = created_on
self.updated_on = updated_on
self.expanded_properties = expanded_properties
class RoleEligibilityScheduleFilter(_serialization.Model):
"""Role eligibility schedule filter.
:ivar principal_id: Returns role eligibility schedule of the specific principal.
:vartype principal_id: str
:ivar role_definition_id: Returns role eligibility schedule of the specific role definition.
:vartype role_definition_id: str
:ivar status: Returns role eligibility schedule of the specific status.
:vartype status: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"role_definition_id": {"key": "roleDefinitionId", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
role_definition_id: Optional[str] = None,
status: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: Returns role eligibility schedule of the specific principal.
:paramtype principal_id: str
:keyword role_definition_id: Returns role eligibility schedule of the specific role definition.
:paramtype role_definition_id: str
:keyword status: Returns role eligibility schedule of the specific status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
self.status = status
class RoleEligibilityScheduleInstance(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Information about current or upcoming role eligibility schedule instance.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role eligibility schedule instance ID.
:vartype id: str
:ivar name: The role eligibility schedule instance name.
:vartype name: str
:ivar type: The role eligibility schedule instance type.
:vartype type: str
:ivar scope: The role eligibility schedule scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar role_eligibility_schedule_id: Id of the master role eligibility schedule.
:vartype role_eligibility_schedule_id: str
:ivar status: The status of the role eligibility schedule instance. Known values are:
"Accepted", "PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:vartype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:ivar start_date_time: The startDateTime of the role eligibility schedule instance.
:vartype start_date_time: ~datetime.datetime
:ivar end_date_time: The endDateTime of the role eligibility schedule instance.
:vartype end_date_time: ~datetime.datetime
:ivar member_type: Membership type of the role eligibility schedule. Known values are:
"Inherited", "Direct", and "Group".
:vartype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: DateTime when role eligibility schedule was created.
:vartype created_on: ~datetime.datetime
:ivar expanded_properties: Additional properties of principal, scope and role definition.
:vartype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"role_eligibility_schedule_id": {"key": "properties.roleEligibilityScheduleId", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"start_date_time": {"key": "properties.startDateTime", "type": "iso-8601"},
"end_date_time": {"key": "properties.endDateTime", "type": "iso-8601"},
"member_type": {"key": "properties.memberType", "type": "str"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"expanded_properties": {"key": "properties.expandedProperties", "type": "ExpandedProperties"},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
principal_type: Optional[Union[str, "_models.PrincipalType"]] = None,
role_eligibility_schedule_id: Optional[str] = None,
status: Optional[Union[str, "_models.Status"]] = None,
start_date_time: Optional[datetime.datetime] = None,
end_date_time: Optional[datetime.datetime] = None,
member_type: Optional[Union[str, "_models.MemberType"]] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
created_on: Optional[datetime.datetime] = None,
expanded_properties: Optional["_models.ExpandedProperties"] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: The role eligibility schedule scope.
:paramtype scope: str
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:paramtype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:keyword role_eligibility_schedule_id: Id of the master role eligibility schedule.
:paramtype role_eligibility_schedule_id: str
:keyword status: The status of the role eligibility schedule instance. Known values are:
"Accepted", "PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:paramtype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:keyword start_date_time: The startDateTime of the role eligibility schedule instance.
:paramtype start_date_time: ~datetime.datetime
:keyword end_date_time: The endDateTime of the role eligibility schedule instance.
:paramtype end_date_time: ~datetime.datetime
:keyword member_type: Membership type of the role eligibility schedule. Known values are:
"Inherited", "Direct", and "Group".
:paramtype member_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.MemberType
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
:keyword created_on: DateTime when role eligibility schedule was created.
:paramtype created_on: ~datetime.datetime
:keyword expanded_properties: Additional properties of principal, scope and role definition.
:paramtype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = principal_type
self.role_eligibility_schedule_id = role_eligibility_schedule_id
self.status = status
self.start_date_time = start_date_time
self.end_date_time = end_date_time
self.member_type = member_type
self.condition = condition
self.condition_version = condition_version
self.created_on = created_on
self.expanded_properties = expanded_properties
class RoleEligibilityScheduleInstanceFilter(_serialization.Model):
"""Role eligibility schedule instance filter.
:ivar principal_id: Returns role eligibility schedule instances of the specific principal.
:vartype principal_id: str
:ivar role_definition_id: Returns role eligibility schedule instances of the specific role
definition.
:vartype role_definition_id: str
:ivar status: Returns role eligibility schedule instances of the specific status.
:vartype status: str
:ivar role_eligibility_schedule_id: Returns role eligibility schedule instances belonging to a
specific role eligibility schedule.
:vartype role_eligibility_schedule_id: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"role_definition_id": {"key": "roleDefinitionId", "type": "str"},
"status": {"key": "status", "type": "str"},
"role_eligibility_schedule_id": {"key": "roleEligibilityScheduleId", "type": "str"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
role_definition_id: Optional[str] = None,
status: Optional[str] = None,
role_eligibility_schedule_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: Returns role eligibility schedule instances of the specific principal.
:paramtype principal_id: str
:keyword role_definition_id: Returns role eligibility schedule instances of the specific role
definition.
:paramtype role_definition_id: str
:keyword status: Returns role eligibility schedule instances of the specific status.
:paramtype status: str
:keyword role_eligibility_schedule_id: Returns role eligibility schedule instances belonging to
a specific role eligibility schedule.
:paramtype role_eligibility_schedule_id: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
self.status = status
self.role_eligibility_schedule_id = role_eligibility_schedule_id
class RoleEligibilityScheduleInstanceListResult(_serialization.Model):
"""Role eligibility schedule instance list operation result.
:ivar value: Role eligibility schedule instance list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleInstance]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleEligibilityScheduleInstance]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleEligibilityScheduleInstance"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role eligibility schedule instance list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleInstance]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleEligibilityScheduleListResult(_serialization.Model):
"""role eligibility schedule list operation result.
:ivar value: role eligibility schedule list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilitySchedule]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleEligibilitySchedule]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleEligibilitySchedule"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: role eligibility schedule list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilitySchedule]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleEligibilityScheduleRequest(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role Eligibility schedule request.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role eligibility schedule request ID.
:vartype id: str
:ivar name: The role eligibility schedule request name.
:vartype name: str
:ivar type: The role eligibility schedule request type.
:vartype type: str
:ivar scope: The role eligibility schedule request scope.
:vartype scope: str
:ivar role_definition_id: The role definition ID.
:vartype role_definition_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
:ivar principal_type: The principal type of the assigned principal ID. Known values are:
"User", "Group", "ServicePrincipal", "ForeignGroup", and "Device".
:vartype principal_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.PrincipalType
:ivar request_type: The type of the role assignment schedule request. Eg: SelfActivate,
AdminAssign etc. Known values are: "AdminAssign", "AdminRemove", "AdminUpdate", "AdminExtend",
"AdminRenew", "SelfActivate", "SelfDeactivate", "SelfExtend", and "SelfRenew".
:vartype request_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.RequestType
:ivar status: The status of the role eligibility schedule request. Known values are:
"Accepted", "PendingEvaluation", "Granted", "Denied", "PendingProvisioning", "Provisioned",
"PendingRevocation", "Revoked", "Canceled", "Failed", "PendingApprovalProvisioning",
"PendingApproval", "FailedAsResourceIsLocked", "PendingAdminDecision", "AdminApproved",
"AdminDenied", "TimedOut", "ProvisioningStarted", "Invalid", "PendingScheduleCreation",
"ScheduleCreated", and "PendingExternalProvisioning".
:vartype status: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Status
:ivar approval_id: The approvalId of the role eligibility schedule request.
:vartype approval_id: str
:ivar schedule_info: Schedule info of the role eligibility schedule.
:vartype schedule_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequestPropertiesScheduleInfo
:ivar target_role_eligibility_schedule_id: The resultant role eligibility schedule id or the
role eligibility schedule id being updated.
:vartype target_role_eligibility_schedule_id: str
:ivar target_role_eligibility_schedule_instance_id: The role eligibility schedule instance id
being updated.
:vartype target_role_eligibility_schedule_instance_id: str
:ivar justification: Justification for the role eligibility.
:vartype justification: str
:ivar ticket_info: Ticket Info of the role eligibility.
:vartype ticket_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequestPropertiesTicketInfo
:ivar condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:vartype condition: str
:ivar condition_version: Version of the condition. Currently accepted value is '2.0'.
:vartype condition_version: str
:ivar created_on: DateTime when role eligibility schedule request was created.
:vartype created_on: ~datetime.datetime
:ivar requestor_id: Id of the user who created this request.
:vartype requestor_id: str
:ivar expanded_properties: Additional properties of principal, scope and role definition.
:vartype expanded_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.ExpandedProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"scope": {"readonly": True},
"principal_type": {"readonly": True},
"status": {"readonly": True},
"approval_id": {"readonly": True},
"created_on": {"readonly": True},
"requestor_id": {"readonly": True},
"expanded_properties": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"principal_id": {"key": "properties.principalId", "type": "str"},
"principal_type": {"key": "properties.principalType", "type": "str"},
"request_type": {"key": "properties.requestType", "type": "str"},
"status": {"key": "properties.status", "type": "str"},
"approval_id": {"key": "properties.approvalId", "type": "str"},
"schedule_info": {
"key": "properties.scheduleInfo",
"type": "RoleEligibilityScheduleRequestPropertiesScheduleInfo",
},
"target_role_eligibility_schedule_id": {"key": "properties.targetRoleEligibilityScheduleId", "type": "str"},
"target_role_eligibility_schedule_instance_id": {
"key": "properties.targetRoleEligibilityScheduleInstanceId",
"type": "str",
},
"justification": {"key": "properties.justification", "type": "str"},
"ticket_info": {"key": "properties.ticketInfo", "type": "RoleEligibilityScheduleRequestPropertiesTicketInfo"},
"condition": {"key": "properties.condition", "type": "str"},
"condition_version": {"key": "properties.conditionVersion", "type": "str"},
"created_on": {"key": "properties.createdOn", "type": "iso-8601"},
"requestor_id": {"key": "properties.requestorId", "type": "str"},
"expanded_properties": {"key": "properties.expandedProperties", "type": "ExpandedProperties"},
}
def __init__(
self,
*,
role_definition_id: Optional[str] = None,
principal_id: Optional[str] = None,
request_type: Optional[Union[str, "_models.RequestType"]] = None,
schedule_info: Optional["_models.RoleEligibilityScheduleRequestPropertiesScheduleInfo"] = None,
target_role_eligibility_schedule_id: Optional[str] = None,
target_role_eligibility_schedule_instance_id: Optional[str] = None,
justification: Optional[str] = None,
ticket_info: Optional["_models.RoleEligibilityScheduleRequestPropertiesTicketInfo"] = None,
condition: Optional[str] = None,
condition_version: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword role_definition_id: The role definition ID.
:paramtype role_definition_id: str
:keyword principal_id: The principal ID.
:paramtype principal_id: str
:keyword request_type: The type of the role assignment schedule request. Eg: SelfActivate,
AdminAssign etc. Known values are: "AdminAssign", "AdminRemove", "AdminUpdate", "AdminExtend",
"AdminRenew", "SelfActivate", "SelfDeactivate", "SelfExtend", and "SelfRenew".
:paramtype request_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RequestType
:keyword schedule_info: Schedule info of the role eligibility schedule.
:paramtype schedule_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequestPropertiesScheduleInfo
:keyword target_role_eligibility_schedule_id: The resultant role eligibility schedule id or the
role eligibility schedule id being updated.
:paramtype target_role_eligibility_schedule_id: str
:keyword target_role_eligibility_schedule_instance_id: The role eligibility schedule instance
id being updated.
:paramtype target_role_eligibility_schedule_instance_id: str
:keyword justification: Justification for the role eligibility.
:paramtype justification: str
:keyword ticket_info: Ticket Info of the role eligibility.
:paramtype ticket_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequestPropertiesTicketInfo
:keyword condition: The conditions on the role assignment. This limits the resources it can be
assigned to. e.g.:
@Resource[Microsoft.Storage/storageAccounts/blobServices/containers:ContainerName]
StringEqualsIgnoreCase 'foo_storage_container'.
:paramtype condition: str
:keyword condition_version: Version of the condition. Currently accepted value is '2.0'.
:paramtype condition_version: str
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = None
self.role_definition_id = role_definition_id
self.principal_id = principal_id
self.principal_type = None
self.request_type = request_type
self.status = None
self.approval_id = None
self.schedule_info = schedule_info
self.target_role_eligibility_schedule_id = target_role_eligibility_schedule_id
self.target_role_eligibility_schedule_instance_id = target_role_eligibility_schedule_instance_id
self.justification = justification
self.ticket_info = ticket_info
self.condition = condition
self.condition_version = condition_version
self.created_on = None
self.requestor_id = None
self.expanded_properties = None
class RoleEligibilityScheduleRequestFilter(_serialization.Model):
"""Role eligibility schedule request filter.
:ivar principal_id: Returns role eligibility requests of the specific principal.
:vartype principal_id: str
:ivar role_definition_id: Returns role eligibility requests of the specific role definition.
:vartype role_definition_id: str
:ivar requestor_id: Returns role eligibility requests created by specific principal.
:vartype requestor_id: str
:ivar status: Returns role eligibility requests of specific status.
:vartype status: str
"""
_attribute_map = {
"principal_id": {"key": "principalId", "type": "str"},
"role_definition_id": {"key": "roleDefinitionId", "type": "str"},
"requestor_id": {"key": "requestorId", "type": "str"},
"status": {"key": "status", "type": "str"},
}
def __init__(
self,
*,
principal_id: Optional[str] = None,
role_definition_id: Optional[str] = None,
requestor_id: Optional[str] = None,
status: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword principal_id: Returns role eligibility requests of the specific principal.
:paramtype principal_id: str
:keyword role_definition_id: Returns role eligibility requests of the specific role definition.
:paramtype role_definition_id: str
:keyword requestor_id: Returns role eligibility requests created by specific principal.
:paramtype requestor_id: str
:keyword status: Returns role eligibility requests of specific status.
:paramtype status: str
"""
super().__init__(**kwargs)
self.principal_id = principal_id
self.role_definition_id = role_definition_id
self.requestor_id = requestor_id
self.status = status
class RoleEligibilityScheduleRequestListResult(_serialization.Model):
"""Role eligibility schedule request list operation result.
:ivar value: Role eligibility schedule request list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequest]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleEligibilityScheduleRequest]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleEligibilityScheduleRequest"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role eligibility schedule request list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequest]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleEligibilityScheduleRequestPropertiesScheduleInfo(_serialization.Model):
"""Schedule info of the role eligibility schedule.
:ivar start_date_time: Start DateTime of the role eligibility schedule.
:vartype start_date_time: ~datetime.datetime
:ivar expiration: Expiration of the role eligibility schedule.
:vartype expiration:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequestPropertiesScheduleInfoExpiration
"""
_attribute_map = {
"start_date_time": {"key": "startDateTime", "type": "iso-8601"},
"expiration": {"key": "expiration", "type": "RoleEligibilityScheduleRequestPropertiesScheduleInfoExpiration"},
}
def __init__(
self,
*,
start_date_time: Optional[datetime.datetime] = None,
expiration: Optional["_models.RoleEligibilityScheduleRequestPropertiesScheduleInfoExpiration"] = None,
**kwargs: Any
) -> None:
"""
:keyword start_date_time: Start DateTime of the role eligibility schedule.
:paramtype start_date_time: ~datetime.datetime
:keyword expiration: Expiration of the role eligibility schedule.
:paramtype expiration:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleEligibilityScheduleRequestPropertiesScheduleInfoExpiration
"""
super().__init__(**kwargs)
self.start_date_time = start_date_time
self.expiration = expiration
class RoleEligibilityScheduleRequestPropertiesScheduleInfoExpiration(_serialization.Model):
"""Expiration of the role eligibility schedule.
:ivar type: Type of the role eligibility schedule expiration. Known values are:
"AfterDuration", "AfterDateTime", and "NoExpiration".
:vartype type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Type
:ivar end_date_time: End DateTime of the role eligibility schedule.
:vartype end_date_time: ~datetime.datetime
:ivar duration: Duration of the role eligibility schedule in TimeSpan.
:vartype duration: str
"""
_attribute_map = {
"type": {"key": "type", "type": "str"},
"end_date_time": {"key": "endDateTime", "type": "iso-8601"},
"duration": {"key": "duration", "type": "str"},
}
def __init__(
self,
*,
type: Optional[Union[str, "_models.Type"]] = None,
end_date_time: Optional[datetime.datetime] = None,
duration: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword type: Type of the role eligibility schedule expiration. Known values are:
"AfterDuration", "AfterDateTime", and "NoExpiration".
:paramtype type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.Type
:keyword end_date_time: End DateTime of the role eligibility schedule.
:paramtype end_date_time: ~datetime.datetime
:keyword duration: Duration of the role eligibility schedule in TimeSpan.
:paramtype duration: str
"""
super().__init__(**kwargs)
self.type = type
self.end_date_time = end_date_time
self.duration = duration
class RoleEligibilityScheduleRequestPropertiesTicketInfo(_serialization.Model):
"""Ticket Info of the role eligibility.
:ivar ticket_number: Ticket number for the role eligibility.
:vartype ticket_number: str
:ivar ticket_system: Ticket system name for the role eligibility.
:vartype ticket_system: str
"""
_attribute_map = {
"ticket_number": {"key": "ticketNumber", "type": "str"},
"ticket_system": {"key": "ticketSystem", "type": "str"},
}
def __init__(
self, *, ticket_number: Optional[str] = None, ticket_system: Optional[str] = None, **kwargs: Any
) -> None:
"""
:keyword ticket_number: Ticket number for the role eligibility.
:paramtype ticket_number: str
:keyword ticket_system: Ticket system name for the role eligibility.
:paramtype ticket_system: str
"""
super().__init__(**kwargs)
self.ticket_number = ticket_number
self.ticket_system = ticket_system
class RoleManagementPolicy(_serialization.Model): # pylint: disable=too-many-instance-attributes
"""Role management policy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role management policy Id.
:vartype id: str
:ivar name: The role management policy name.
:vartype name: str
:ivar type: The role management policy type.
:vartype type: str
:ivar scope: The role management policy scope.
:vartype scope: str
:ivar display_name: The role management policy display name.
:vartype display_name: str
:ivar description: The role management policy description.
:vartype description: str
:ivar is_organization_default: The role management policy is default policy.
:vartype is_organization_default: bool
:ivar last_modified_by: The name of the entity last modified it.
:vartype last_modified_by: ~azure.mgmt.authorization.v2020_10_01_preview.models.Principal
:ivar last_modified_date_time: The last modified date time.
:vartype last_modified_date_time: ~datetime.datetime
:ivar rules: The rule applied to the policy.
:vartype rules:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRule]
:ivar effective_rules: The readonly computed rule applied to the policy.
:vartype effective_rules:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRule]
:ivar policy_properties: Additional properties of scope.
:vartype policy_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"last_modified_by": {"readonly": True},
"last_modified_date_time": {"readonly": True},
"effective_rules": {"readonly": True},
"policy_properties": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"display_name": {"key": "properties.displayName", "type": "str"},
"description": {"key": "properties.description", "type": "str"},
"is_organization_default": {"key": "properties.isOrganizationDefault", "type": "bool"},
"last_modified_by": {"key": "properties.lastModifiedBy", "type": "Principal"},
"last_modified_date_time": {"key": "properties.lastModifiedDateTime", "type": "iso-8601"},
"rules": {"key": "properties.rules", "type": "[RoleManagementPolicyRule]"},
"effective_rules": {"key": "properties.effectiveRules", "type": "[RoleManagementPolicyRule]"},
"policy_properties": {"key": "properties.policyProperties", "type": "PolicyProperties"},
}
def __init__(
self,
*,
scope: Optional[str] = None,
display_name: Optional[str] = None,
description: Optional[str] = None,
is_organization_default: Optional[bool] = None,
rules: Optional[List["_models.RoleManagementPolicyRule"]] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: The role management policy scope.
:paramtype scope: str
:keyword display_name: The role management policy display name.
:paramtype display_name: str
:keyword description: The role management policy description.
:paramtype description: str
:keyword is_organization_default: The role management policy is default policy.
:paramtype is_organization_default: bool
:keyword rules: The rule applied to the policy.
:paramtype rules:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRule]
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.display_name = display_name
self.description = description
self.is_organization_default = is_organization_default
self.last_modified_by = None
self.last_modified_date_time = None
self.rules = rules
self.effective_rules = None
self.policy_properties = None
class RoleManagementPolicyRule(_serialization.Model):
"""The role management policy rule.
You probably want to use the sub-classes and not this class directly. Known sub-classes are:
RoleManagementPolicyApprovalRule, RoleManagementPolicyAuthenticationContextRule,
RoleManagementPolicyEnablementRule, RoleManagementPolicyExpirationRule,
RoleManagementPolicyNotificationRule
All required parameters must be populated in order to send to Azure.
:ivar id: The id of the rule.
:vartype id: str
:ivar rule_type: The type of rule. Required. Known values are:
"RoleManagementPolicyApprovalRule", "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule", "RoleManagementPolicyExpirationRule", and
"RoleManagementPolicyNotificationRule".
:vartype rule_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleType
:ivar target: The target of the current rule.
:vartype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
"""
_validation = {
"rule_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"rule_type": {"key": "ruleType", "type": "str"},
"target": {"key": "target", "type": "RoleManagementPolicyRuleTarget"},
}
_subtype_map = {
"rule_type": {
"RoleManagementPolicyApprovalRule": "RoleManagementPolicyApprovalRule",
"RoleManagementPolicyAuthenticationContextRule": "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule": "RoleManagementPolicyEnablementRule",
"RoleManagementPolicyExpirationRule": "RoleManagementPolicyExpirationRule",
"RoleManagementPolicyNotificationRule": "RoleManagementPolicyNotificationRule",
}
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
target: Optional["_models.RoleManagementPolicyRuleTarget"] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the rule.
:paramtype id: str
:keyword target: The target of the current rule.
:paramtype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
"""
super().__init__(**kwargs)
self.id = id
self.rule_type: Optional[str] = None
self.target = target
class RoleManagementPolicyApprovalRule(RoleManagementPolicyRule):
"""The role management policy rule.
All required parameters must be populated in order to send to Azure.
:ivar id: The id of the rule.
:vartype id: str
:ivar rule_type: The type of rule. Required. Known values are:
"RoleManagementPolicyApprovalRule", "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule", "RoleManagementPolicyExpirationRule", and
"RoleManagementPolicyNotificationRule".
:vartype rule_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleType
:ivar target: The target of the current rule.
:vartype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:ivar setting: The approval setting.
:vartype setting: ~azure.mgmt.authorization.v2020_10_01_preview.models.ApprovalSettings
"""
_validation = {
"rule_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"rule_type": {"key": "ruleType", "type": "str"},
"target": {"key": "target", "type": "RoleManagementPolicyRuleTarget"},
"setting": {"key": "setting", "type": "ApprovalSettings"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
target: Optional["_models.RoleManagementPolicyRuleTarget"] = None,
setting: Optional["_models.ApprovalSettings"] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the rule.
:paramtype id: str
:keyword target: The target of the current rule.
:paramtype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:keyword setting: The approval setting.
:paramtype setting: ~azure.mgmt.authorization.v2020_10_01_preview.models.ApprovalSettings
"""
super().__init__(id=id, target=target, **kwargs)
self.rule_type: str = "RoleManagementPolicyApprovalRule"
self.setting = setting
class RoleManagementPolicyAssignment(_serialization.Model):
"""Role management policy.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role management policy Id.
:vartype id: str
:ivar name: The role management policy name.
:vartype name: str
:ivar type: The role management policy type.
:vartype type: str
:ivar scope: The role management policy scope.
:vartype scope: str
:ivar role_definition_id: The role definition of management policy assignment.
:vartype role_definition_id: str
:ivar policy_id: The policy id role management policy assignment.
:vartype policy_id: str
:ivar policy_assignment_properties: Additional properties of scope, role definition and policy.
:vartype policy_assignment_properties:
~azure.mgmt.authorization.v2020_10_01_preview.models.PolicyAssignmentProperties
"""
_validation = {
"id": {"readonly": True},
"name": {"readonly": True},
"type": {"readonly": True},
"policy_assignment_properties": {"readonly": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"name": {"key": "name", "type": "str"},
"type": {"key": "type", "type": "str"},
"scope": {"key": "properties.scope", "type": "str"},
"role_definition_id": {"key": "properties.roleDefinitionId", "type": "str"},
"policy_id": {"key": "properties.policyId", "type": "str"},
"policy_assignment_properties": {
"key": "properties.policyAssignmentProperties",
"type": "PolicyAssignmentProperties",
},
}
def __init__(
self,
*,
scope: Optional[str] = None,
role_definition_id: Optional[str] = None,
policy_id: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword scope: The role management policy scope.
:paramtype scope: str
:keyword role_definition_id: The role definition of management policy assignment.
:paramtype role_definition_id: str
:keyword policy_id: The policy id role management policy assignment.
:paramtype policy_id: str
"""
super().__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = scope
self.role_definition_id = role_definition_id
self.policy_id = policy_id
self.policy_assignment_properties = None
class RoleManagementPolicyAssignmentListResult(_serialization.Model):
"""Role management policy assignment list operation result.
:ivar value: Role management policy assignment list.
:vartype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyAssignment]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleManagementPolicyAssignment]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleManagementPolicyAssignment"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role management policy assignment list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyAssignment]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleManagementPolicyAuthenticationContextRule(RoleManagementPolicyRule):
"""The role management policy rule.
All required parameters must be populated in order to send to Azure.
:ivar id: The id of the rule.
:vartype id: str
:ivar rule_type: The type of rule. Required. Known values are:
"RoleManagementPolicyApprovalRule", "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule", "RoleManagementPolicyExpirationRule", and
"RoleManagementPolicyNotificationRule".
:vartype rule_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleType
:ivar target: The target of the current rule.
:vartype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:ivar is_enabled: The value indicating if rule is enabled.
:vartype is_enabled: bool
:ivar claim_value: The claim value.
:vartype claim_value: str
"""
_validation = {
"rule_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"rule_type": {"key": "ruleType", "type": "str"},
"target": {"key": "target", "type": "RoleManagementPolicyRuleTarget"},
"is_enabled": {"key": "isEnabled", "type": "bool"},
"claim_value": {"key": "claimValue", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
target: Optional["_models.RoleManagementPolicyRuleTarget"] = None,
is_enabled: Optional[bool] = None,
claim_value: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the rule.
:paramtype id: str
:keyword target: The target of the current rule.
:paramtype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:keyword is_enabled: The value indicating if rule is enabled.
:paramtype is_enabled: bool
:keyword claim_value: The claim value.
:paramtype claim_value: str
"""
super().__init__(id=id, target=target, **kwargs)
self.rule_type: str = "RoleManagementPolicyAuthenticationContextRule"
self.is_enabled = is_enabled
self.claim_value = claim_value
class RoleManagementPolicyEnablementRule(RoleManagementPolicyRule):
"""The role management policy rule.
All required parameters must be populated in order to send to Azure.
:ivar id: The id of the rule.
:vartype id: str
:ivar rule_type: The type of rule. Required. Known values are:
"RoleManagementPolicyApprovalRule", "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule", "RoleManagementPolicyExpirationRule", and
"RoleManagementPolicyNotificationRule".
:vartype rule_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleType
:ivar target: The target of the current rule.
:vartype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:ivar enabled_rules: The list of enabled rules.
:vartype enabled_rules: list[str or
~azure.mgmt.authorization.v2020_10_01_preview.models.EnablementRules]
"""
_validation = {
"rule_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"rule_type": {"key": "ruleType", "type": "str"},
"target": {"key": "target", "type": "RoleManagementPolicyRuleTarget"},
"enabled_rules": {"key": "enabledRules", "type": "[str]"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
target: Optional["_models.RoleManagementPolicyRuleTarget"] = None,
enabled_rules: Optional[List[Union[str, "_models.EnablementRules"]]] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the rule.
:paramtype id: str
:keyword target: The target of the current rule.
:paramtype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:keyword enabled_rules: The list of enabled rules.
:paramtype enabled_rules: list[str or
~azure.mgmt.authorization.v2020_10_01_preview.models.EnablementRules]
"""
super().__init__(id=id, target=target, **kwargs)
self.rule_type: str = "RoleManagementPolicyEnablementRule"
self.enabled_rules = enabled_rules
class RoleManagementPolicyExpirationRule(RoleManagementPolicyRule):
"""The role management policy rule.
All required parameters must be populated in order to send to Azure.
:ivar id: The id of the rule.
:vartype id: str
:ivar rule_type: The type of rule. Required. Known values are:
"RoleManagementPolicyApprovalRule", "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule", "RoleManagementPolicyExpirationRule", and
"RoleManagementPolicyNotificationRule".
:vartype rule_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleType
:ivar target: The target of the current rule.
:vartype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:ivar is_expiration_required: The value indicating whether expiration is required.
:vartype is_expiration_required: bool
:ivar maximum_duration: The maximum duration of expiration in timespan.
:vartype maximum_duration: str
"""
_validation = {
"rule_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"rule_type": {"key": "ruleType", "type": "str"},
"target": {"key": "target", "type": "RoleManagementPolicyRuleTarget"},
"is_expiration_required": {"key": "isExpirationRequired", "type": "bool"},
"maximum_duration": {"key": "maximumDuration", "type": "str"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
target: Optional["_models.RoleManagementPolicyRuleTarget"] = None,
is_expiration_required: Optional[bool] = None,
maximum_duration: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the rule.
:paramtype id: str
:keyword target: The target of the current rule.
:paramtype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:keyword is_expiration_required: The value indicating whether expiration is required.
:paramtype is_expiration_required: bool
:keyword maximum_duration: The maximum duration of expiration in timespan.
:paramtype maximum_duration: str
"""
super().__init__(id=id, target=target, **kwargs)
self.rule_type: str = "RoleManagementPolicyExpirationRule"
self.is_expiration_required = is_expiration_required
self.maximum_duration = maximum_duration
class RoleManagementPolicyListResult(_serialization.Model):
"""Role management policy list operation result.
:ivar value: Role management policy list.
:vartype value: list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicy]
:ivar next_link: The URL to use for getting the next set of results.
:vartype next_link: str
"""
_attribute_map = {
"value": {"key": "value", "type": "[RoleManagementPolicy]"},
"next_link": {"key": "nextLink", "type": "str"},
}
def __init__(
self,
*,
value: Optional[List["_models.RoleManagementPolicy"]] = None,
next_link: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword value: Role management policy list.
:paramtype value:
list[~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicy]
:keyword next_link: The URL to use for getting the next set of results.
:paramtype next_link: str
"""
super().__init__(**kwargs)
self.value = value
self.next_link = next_link
class RoleManagementPolicyNotificationRule(RoleManagementPolicyRule):
"""The role management policy rule.
All required parameters must be populated in order to send to Azure.
:ivar id: The id of the rule.
:vartype id: str
:ivar rule_type: The type of rule. Required. Known values are:
"RoleManagementPolicyApprovalRule", "RoleManagementPolicyAuthenticationContextRule",
"RoleManagementPolicyEnablementRule", "RoleManagementPolicyExpirationRule", and
"RoleManagementPolicyNotificationRule".
:vartype rule_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleType
:ivar target: The target of the current rule.
:vartype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:ivar notification_type: The type of notification. "Email"
:vartype notification_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.NotificationDeliveryMechanism
:ivar notification_level: The notification level. Known values are: "None", "Critical", and
"All".
:vartype notification_level: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.NotificationLevel
:ivar recipient_type: The recipient type. Known values are: "Requestor", "Approver", and
"Admin".
:vartype recipient_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RecipientType
:ivar notification_recipients: The list notification recipients.
:vartype notification_recipients: list[str]
:ivar is_default_recipients_enabled: Its value determine if the notification need to be sent to
the recipient type specified in policy rule.
:vartype is_default_recipients_enabled: bool
"""
_validation = {
"rule_type": {"required": True},
}
_attribute_map = {
"id": {"key": "id", "type": "str"},
"rule_type": {"key": "ruleType", "type": "str"},
"target": {"key": "target", "type": "RoleManagementPolicyRuleTarget"},
"notification_type": {"key": "notificationType", "type": "str"},
"notification_level": {"key": "notificationLevel", "type": "str"},
"recipient_type": {"key": "recipientType", "type": "str"},
"notification_recipients": {"key": "notificationRecipients", "type": "[str]"},
"is_default_recipients_enabled": {"key": "isDefaultRecipientsEnabled", "type": "bool"},
}
def __init__(
self,
*,
id: Optional[str] = None, # pylint: disable=redefined-builtin
target: Optional["_models.RoleManagementPolicyRuleTarget"] = None,
notification_type: Optional[Union[str, "_models.NotificationDeliveryMechanism"]] = None,
notification_level: Optional[Union[str, "_models.NotificationLevel"]] = None,
recipient_type: Optional[Union[str, "_models.RecipientType"]] = None,
notification_recipients: Optional[List[str]] = None,
is_default_recipients_enabled: Optional[bool] = None,
**kwargs: Any
) -> None:
"""
:keyword id: The id of the rule.
:paramtype id: str
:keyword target: The target of the current rule.
:paramtype target:
~azure.mgmt.authorization.v2020_10_01_preview.models.RoleManagementPolicyRuleTarget
:keyword notification_type: The type of notification. "Email"
:paramtype notification_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.NotificationDeliveryMechanism
:keyword notification_level: The notification level. Known values are: "None", "Critical", and
"All".
:paramtype notification_level: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.NotificationLevel
:keyword recipient_type: The recipient type. Known values are: "Requestor", "Approver", and
"Admin".
:paramtype recipient_type: str or
~azure.mgmt.authorization.v2020_10_01_preview.models.RecipientType
:keyword notification_recipients: The list notification recipients.
:paramtype notification_recipients: list[str]
:keyword is_default_recipients_enabled: Its value determine if the notification need to be sent
to the recipient type specified in policy rule.
:paramtype is_default_recipients_enabled: bool
"""
super().__init__(id=id, target=target, **kwargs)
self.rule_type: str = "RoleManagementPolicyNotificationRule"
self.notification_type = notification_type
self.notification_level = notification_level
self.recipient_type = recipient_type
self.notification_recipients = notification_recipients
self.is_default_recipients_enabled = is_default_recipients_enabled
class RoleManagementPolicyRuleTarget(_serialization.Model):
"""The role management policy rule target.
:ivar caller: The caller of the setting.
:vartype caller: str
:ivar operations: The type of operation.
:vartype operations: list[str]
:ivar level: The assignment level to which it is applied.
:vartype level: str
:ivar target_objects: The list of target objects.
:vartype target_objects: list[str]
:ivar inheritable_settings: The list of inheritable settings.
:vartype inheritable_settings: list[str]
:ivar enforced_settings: The list of enforced settings.
:vartype enforced_settings: list[str]
"""
_attribute_map = {
"caller": {"key": "caller", "type": "str"},
"operations": {"key": "operations", "type": "[str]"},
"level": {"key": "level", "type": "str"},
"target_objects": {"key": "targetObjects", "type": "[str]"},
"inheritable_settings": {"key": "inheritableSettings", "type": "[str]"},
"enforced_settings": {"key": "enforcedSettings", "type": "[str]"},
}
def __init__(
self,
*,
caller: Optional[str] = None,
operations: Optional[List[str]] = None,
level: Optional[str] = None,
target_objects: Optional[List[str]] = None,
inheritable_settings: Optional[List[str]] = None,
enforced_settings: Optional[List[str]] = None,
**kwargs: Any
) -> None:
"""
:keyword caller: The caller of the setting.
:paramtype caller: str
:keyword operations: The type of operation.
:paramtype operations: list[str]
:keyword level: The assignment level to which it is applied.
:paramtype level: str
:keyword target_objects: The list of target objects.
:paramtype target_objects: list[str]
:keyword inheritable_settings: The list of inheritable settings.
:paramtype inheritable_settings: list[str]
:keyword enforced_settings: The list of enforced settings.
:paramtype enforced_settings: list[str]
"""
super().__init__(**kwargs)
self.caller = caller
self.operations = operations
self.level = level
self.target_objects = target_objects
self.inheritable_settings = inheritable_settings
self.enforced_settings = enforced_settings
class UserSet(_serialization.Model):
"""The detail of a user.
:ivar user_type: The type of user. Known values are: "User" and "Group".
:vartype user_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.UserType
:ivar is_backup: The value indicating whether the user is a backup fallback approver.
:vartype is_backup: bool
:ivar id: The object id of the user.
:vartype id: str
:ivar description: The description of the user.
:vartype description: str
"""
_attribute_map = {
"user_type": {"key": "userType", "type": "str"},
"is_backup": {"key": "isBackup", "type": "bool"},
"id": {"key": "id", "type": "str"},
"description": {"key": "description", "type": "str"},
}
def __init__(
self,
*,
user_type: Optional[Union[str, "_models.UserType"]] = None,
is_backup: Optional[bool] = None,
id: Optional[str] = None, # pylint: disable=redefined-builtin
description: Optional[str] = None,
**kwargs: Any
) -> None:
"""
:keyword user_type: The type of user. Known values are: "User" and "Group".
:paramtype user_type: str or ~azure.mgmt.authorization.v2020_10_01_preview.models.UserType
:keyword is_backup: The value indicating whether the user is a backup fallback approver.
:paramtype is_backup: bool
:keyword id: The object id of the user.
:paramtype id: str
:keyword description: The description of the user.
:paramtype description: str
"""
super().__init__(**kwargs)
self.user_type = user_type
self.is_backup = is_backup
self.id = id
self.description = description
class ValidationResponse(_serialization.Model):
"""Validation response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar is_valid: Whether or not validation succeeded.
:vartype is_valid: bool
:ivar error_info: Failed validation result details.
:vartype error_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.ValidationResponseErrorInfo
"""
_validation = {
"is_valid": {"readonly": True},
}
_attribute_map = {
"is_valid": {"key": "isValid", "type": "bool"},
"error_info": {"key": "errorInfo", "type": "ValidationResponseErrorInfo"},
}
def __init__(self, *, error_info: Optional["_models.ValidationResponseErrorInfo"] = None, **kwargs: Any) -> None:
"""
:keyword error_info: Failed validation result details.
:paramtype error_info:
~azure.mgmt.authorization.v2020_10_01_preview.models.ValidationResponseErrorInfo
"""
super().__init__(**kwargs)
self.is_valid = None
self.error_info = error_info
class ValidationResponseErrorInfo(_serialization.Model):
"""Failed validation result details.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: Error code indicating why validation failed.
:vartype code: str
:ivar message: Message indicating why validation failed.
:vartype message: str
"""
_validation = {
"code": {"readonly": True},
"message": {"readonly": True},
}
_attribute_map = {
"code": {"key": "code", "type": "str"},
"message": {"key": "message", "type": "str"},
}
def __init__(self, **kwargs: Any) -> None:
""" """
super().__init__(**kwargs)
self.code = None
self.message = None
|
[
"[email protected]"
] | |
bf85f2d5bc9202b07db534959a5bffdd241ea76a
|
192874fd96861ceb1864a71bf6f13932cc017d63
|
/hue/desktop/core/ext-py/openpyxl-1.6.1/openpyxl/writer/styles.py
|
c31a5a905027ab106aeed9608cef392eb720e9c8
|
[
"MIT",
"PSF-2.0",
"Apache-2.0"
] |
permissive
|
OpenPOWER-BigData/HDP-hue
|
1de3efc0ac773f1e7b1acd03675f11b65c6f477d
|
23719febdaae26c916bdc9d0712645987ae7e0e4
|
refs/heads/master
| 2021-01-17T17:19:31.157051 | 2016-07-18T19:44:10 | 2016-07-18T19:44:10 | 63,631,863 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,535 |
py
|
# file openpyxl/writer/styles.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Write the shared style table."""
# package imports
from openpyxl.shared.xmltools import Element, SubElement
from openpyxl.shared.xmltools import get_document_content
from openpyxl import style
class StyleWriter(object):
def __init__(self, workbook):
self._style_list = self._get_style_list(workbook)
self._root = Element('styleSheet',
{'xmlns':'http://schemas.openxmlformats.org/spreadsheetml/2006/main'})
def _get_style_list(self, workbook):
crc = {}
for worksheet in workbook.worksheets:
uniqueStyles = dict((id(style), style) for style in worksheet._styles.values()).values()
for style in uniqueStyles:
crc[hash(style)] = style
self.style_table = dict([(style, i + 1) \
for i, style in enumerate(crc.values())])
sorted_styles = sorted(self.style_table.items(), \
key=lambda pair:pair[1])
return [s[0] for s in sorted_styles]
def get_style_by_hash(self):
return dict([(hash(style), id) \
for style, id in self.style_table.items()])
def write_table(self):
number_format_table = self._write_number_formats()
fonts_table = self._write_fonts()
fills_table = self._write_fills()
borders_table = self._write_borders()
self._write_cell_style_xfs()
self._write_cell_xfs(number_format_table, fonts_table, fills_table, borders_table)
self._write_cell_style()
self._write_dxfs()
self._write_table_styles()
return get_document_content(xml_node=self._root)
def _write_fonts(self):
""" add fonts part to root
return {font.crc => index}
"""
fonts = SubElement(self._root, 'fonts')
# default
font_node = SubElement(fonts, 'font')
SubElement(font_node, 'sz', {'val':'11'})
SubElement(font_node, 'color', {'theme':'1'})
SubElement(font_node, 'name', {'val':'Calibri'})
SubElement(font_node, 'family', {'val':'2'})
SubElement(font_node, 'scheme', {'val':'minor'})
# others
table = {}
index = 1
for st in self._style_list:
if hash(st.font) != hash(style.DEFAULTS.font) and hash(st.font) not in table:
table[hash(st.font)] = str(index)
font_node = SubElement(fonts, 'font')
SubElement(font_node, 'sz', {'val':str(st.font.size)})
if str(st.font.color.index).split(':')[0] == 'theme': # strip prefix theme if marked as such
if str(st.font.color.index).split(':')[2]:
SubElement(font_node, 'color', {'theme':str(st.font.color.index).split(':')[1],
'tint':str(st.font.color.index).split(':')[2]})
else:
SubElement(font_node, 'color', {'theme':str(st.font.color.index).split(':')[1]})
else:
SubElement(font_node, 'color', {'rgb':str(st.font.color.index)})
SubElement(font_node, 'name', {'val':st.font.name})
SubElement(font_node, 'family', {'val':'2'})
# Don't write the 'scheme' element because it appears to prevent
# the font name from being applied in Excel.
#SubElement(font_node, 'scheme', {'val':'minor'})
if st.font.bold:
SubElement(font_node, 'b')
if st.font.italic:
SubElement(font_node, 'i')
if st.font.underline == 'single':
SubElement(font_node, 'u')
index += 1
fonts.attrib["count"] = str(index)
return table
def _write_fills(self):
fills = SubElement(self._root, 'fills', {'count':'2'})
fill = SubElement(fills, 'fill')
SubElement(fill, 'patternFill', {'patternType':'none'})
fill = SubElement(fills, 'fill')
SubElement(fill, 'patternFill', {'patternType':'gray125'})
table = {}
index = 2
for st in self._style_list:
if hash(st.fill) != hash(style.DEFAULTS.fill) and hash(st.fill) not in table:
table[hash(st.fill)] = str(index)
fill = SubElement(fills, 'fill')
if hash(st.fill.fill_type) != hash(style.DEFAULTS.fill.fill_type):
node = SubElement(fill, 'patternFill', {'patternType':st.fill.fill_type})
if hash(st.fill.start_color) != hash(style.DEFAULTS.fill.start_color):
if str(st.fill.start_color.index).split(':')[0] == 'theme': # strip prefix theme if marked as such
if str(st.fill.start_color.index).split(':')[2]:
SubElement(node, 'fgColor', {'theme':str(st.fill.start_color.index).split(':')[1],
'tint':str(st.fill.start_color.index).split(':')[2]})
else:
SubElement(node, 'fgColor', {'theme':str(st.fill.start_color.index).split(':')[1]})
else:
SubElement(node, 'fgColor', {'rgb':str(st.fill.start_color.index)})
if hash(st.fill.end_color) != hash(style.DEFAULTS.fill.end_color):
if str(st.fill.end_color.index).split(':')[0] == 'theme': # strip prefix theme if marked as such
if str(st.fill.end_color.index).split(':')[2]:
SubElement(node, 'bgColor', {'theme':str(st.fill.end_color.index).split(':')[1],
'tint':str(st.fill.end_color.index).split(':')[2]})
else:
SubElement(node, 'bgColor', {'theme':str(st.fill.end_color.index).split(':')[1]})
else:
SubElement(node, 'bgColor', {'rgb':str(st.fill.end_color.index)})
index += 1
fills.attrib["count"] = str(index)
return table
def _write_borders(self):
borders = SubElement(self._root, 'borders')
# default
border = SubElement(borders, 'border')
SubElement(border, 'left')
SubElement(border, 'right')
SubElement(border, 'top')
SubElement(border, 'bottom')
SubElement(border, 'diagonal')
# others
table = {}
index = 1
for st in self._style_list:
if hash(st.borders) != hash(style.DEFAULTS.borders) and hash(st.borders) not in table:
table[hash(st.borders)] = str(index)
border = SubElement(borders, 'border')
# caution: respect this order
for side in ('left', 'right', 'top', 'bottom', 'diagonal'):
obj = getattr(st.borders, side)
if obj.border_style is None or obj.border_style == 'none':
node = SubElement(border, side)
attrs = {}
else:
node = SubElement(border, side, {'style':obj.border_style})
if str(obj.color.index).split(':')[0] == 'theme': # strip prefix theme if marked as such
if str(obj.color.index).split(':')[2]:
SubElement(node, 'color', {'theme':str(obj.color.index).split(':')[1],
'tint':str(obj.color.index).split(':')[2]})
else:
SubElement(node, 'color', {'theme':str(obj.color.index).split(':')[1]})
else:
SubElement(node, 'color', {'rgb':str(obj.color.index)})
index += 1
borders.attrib["count"] = str(index)
return table
def _write_cell_style_xfs(self):
cell_style_xfs = SubElement(self._root, 'cellStyleXfs', {'count':'1'})
xf = SubElement(cell_style_xfs, 'xf',
{'numFmtId':"0", 'fontId':"0", 'fillId':"0", 'borderId':"0"})
def _write_cell_xfs(self, number_format_table, fonts_table, fills_table, borders_table):
""" write styles combinations based on ids found in tables """
# writing the cellXfs
cell_xfs = SubElement(self._root, 'cellXfs',
{'count':'%d' % (len(self._style_list) + 1)})
# default
def _get_default_vals():
return dict(numFmtId='0', fontId='0', fillId='0',
xfId='0', borderId='0')
SubElement(cell_xfs, 'xf', _get_default_vals())
for st in self._style_list:
vals = _get_default_vals()
if hash(st.font) != hash(style.DEFAULTS.font):
vals['fontId'] = fonts_table[hash(st.font)]
vals['applyFont'] = '1'
if hash(st.borders) != hash(style.DEFAULTS.borders):
vals['borderId'] = borders_table[hash(st.borders)]
vals['applyBorder'] = '1'
if hash(st.fill) != hash(style.DEFAULTS.fill):
vals['fillId'] = fills_table[hash(st.fill)]
vals['applyFill'] = '1'
if st.number_format != style.DEFAULTS.number_format:
vals['numFmtId'] = '%d' % number_format_table[st.number_format]
vals['applyNumberFormat'] = '1'
if hash(st.alignment) != hash(style.DEFAULTS.alignment):
vals['applyAlignment'] = '1'
node = SubElement(cell_xfs, 'xf', vals)
if hash(st.alignment) != hash(style.DEFAULTS.alignment):
alignments = {}
for align_attr in ['horizontal', 'vertical']:
if hash(getattr(st.alignment, align_attr)) != hash(getattr(style.DEFAULTS.alignment, align_attr)):
alignments[align_attr] = getattr(st.alignment, align_attr)
if hash(st.alignment.wrap_text) != hash(style.DEFAULTS.alignment.wrap_text):
alignments['wrapText'] = '1'
if st.alignment.text_rotation > 0:
alignments['textRotation'] = '%s' % st.alignment.text_rotation
elif st.alignment.text_rotation < 0:
alignments['textRotation'] = '%s' % (90 - st.alignment.text_rotation)
SubElement(node, 'alignment', alignments)
def _write_cell_style(self):
cell_styles = SubElement(self._root, 'cellStyles', {'count':'1'})
cell_style = SubElement(cell_styles, 'cellStyle',
{'name':"Normal", 'xfId':"0", 'builtinId':"0"})
def _write_dxfs(self):
dxfs = SubElement(self._root, 'dxfs', {'count':'0'})
def _write_table_styles(self):
table_styles = SubElement(self._root, 'tableStyles',
{'count':'0', 'defaultTableStyle':'TableStyleMedium9',
'defaultPivotStyle':'PivotStyleLight16'})
def _write_number_formats(self):
number_format_table = {}
number_format_list = []
exceptions_list = []
num_fmt_id = 165 # start at a greatly higher value as any builtin can go
num_fmt_offset = 0
for style in self._style_list:
if not style.number_format in number_format_list :
number_format_list.append(style.number_format)
for number_format in number_format_list:
if number_format.is_builtin():
btin = number_format.builtin_format_id(number_format.format_code)
number_format_table[number_format] = btin
else:
number_format_table[number_format] = num_fmt_id + num_fmt_offset
num_fmt_offset += 1
exceptions_list.append(number_format)
num_fmts = SubElement(self._root, 'numFmts',
{'count':'%d' % len(exceptions_list)})
for number_format in exceptions_list :
SubElement(num_fmts, 'numFmt',
{'numFmtId':'%d' % number_format_table[number_format],
'formatCode':'%s' % number_format.format_code})
return number_format_table
|
[
"[email protected]"
] | |
99c7253a3a82a9b457f75291657f219af610974a
|
7baef4f7dad8e6538096d42c4e03e6d531fbf0bf
|
/day03/pymysql回顾.py
|
8024a2823e0fbd29a0872938bafc1ab8e1a1cd6e
|
[] |
no_license
|
q737645224/spyder
|
a16a2bdbdab12ca6b2546f81363d3571d1358a3e
|
bae94180ef6953a21a2491da378cce738312afc7
|
refs/heads/master
| 2020-04-07T20:03:30.966825 | 2018-11-23T04:29:34 | 2018-11-23T04:29:34 | 158,673,753 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
# 创建一个库 testspider
# 创建一张表 t1 (id int)
# 在表中插入一条记录 id=1
import pymysql
# 创建数据库连接对象
db = pymysql.connect("localhost","root","123456",
charset="utf8")
# 创建游标对象ss
cursor = db.cursor()
cursor.execute("create database if not exists testspider;")
cursor.execute("use testspider;")
cursor.execute("create table if not exists t1(id int);")
cursor.execute("insert into t1 values(1);")
db.commit()
cursor.close()
db.close()
|
[
"[email protected]"
] | |
c73811aca9f9efdecf49e96164b6a636150efd2a
|
4e02d5b0b1b0739553fd40bbbdfb0d02c9830350
|
/128_Longest_Consecutive_Sequence.py
|
c9b07f448a3b08cc5afae8e2d2900b8f23681a6f
|
[] |
no_license
|
bingli8802/leetcode
|
b039ab6af62f0c8992463393f561caafd21056e6
|
a509b383a42f54313970168d9faa11f088f18708
|
refs/heads/master
| 2023-03-29T03:11:45.801090 | 2021-03-23T22:55:16 | 2021-03-23T22:55:16 | 279,321,659 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 500 |
py
|
class Solution(object):
# 先消重再排序
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
nums = list(set(nums))
nums.sort()
n = len(nums)
dp = [1] * n
res = 1
# print nums
for i in range(1, n):
if nums[i] == nums[i-1] + 1:
dp[i] = dp[i-1] + 1
res = max(res, dp[i])
return res
|
[
"[email protected]"
] | |
a94cec00255f5040df4c55fb1849dca6fed62f52
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/423/usersdata/310/90249/submittedfiles/mdc.py
|
a77c841ed2e89333d50d419a258627beefa4aba9
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 228 |
py
|
# -*- coding: utf-8 -*-
import math
n1 = int(input('Digite n1: '))
n2 = int(input('Digite n2: '))
i = 1
while true:
i+=1
(n1%i)== (n2%i)==0
if i == n1 or i==n2:
break
print (i)
|
[
"[email protected]"
] | |
f21d03a72329d8ac5a16de1eab795c0d58849ca1
|
ef11cb7a2ee550e4fb95be46cd4d67d6cc230787
|
/python/Top Interview Questions - Medium/Sorting and Searching/tests/test_peakelement.py
|
d127805ee2d84e333d8f1e8d744f4cc27a3513b7
|
[] |
no_license
|
Hilldrupca/LeetCode
|
44b32161743ba982ea5e3fe593ff8a27c96e9350
|
c6d600bc74afd14e00d4f0ffed40696192b229c3
|
refs/heads/master
| 2023-03-31T22:21:17.967663 | 2021-04-07T16:18:17 | 2021-04-07T16:18:17 | 288,544,724 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
import unittest, sys
sys.path.append('..')
from peakelement import Solution
class TestPeakElement(unittest.TestCase):
def setUp(self):
self.s = Solution()
def test_find_peak_element(self):
case_one = [1,2,3,1]
self.assertEqual(self.s.findPeakElement(case_one), 2)
case_two = [1,2,1,3,5,6,4]
self.assertIn(self.s.findPeakElement(case_two), [1,5])
case_three = [1,2]
self.assertEqual(self.s.findPeakElement(case_three), 1)
case_four = [2,1]
self.assertEqual(self.s.findPeakElement(case_four), 0)
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
304249226458b0590377088be01d33c50df7627a
|
4a5c67c87150955ce0164b4263458dbcb563bbda
|
/api_venv/bin/sqlformat
|
ce4f3d3fe4f575bd89d0ab0ad3ace1e2db24b0c2
|
[] |
no_license
|
madhav06/python-rest-api
|
24715a1b8f0f58ab5ba45d03e8f2fc9eb355a5f0
|
fb49b308bfa478ed53817e1d0a504099a1317e96
|
refs/heads/master
| 2023-02-07T05:30:07.347111 | 2020-12-19T09:02:16 | 2020-12-19T09:02:16 | 294,404,015 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 259 |
#!/Users/madhavnandan/python-rest-api/api_venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"[email protected]"
] | ||
f4574ec50464cb4a665ca662d90ca13be44605c6
|
75f0580af1734b9edb9e06bfadfe48f45b057872
|
/2017/16/sol.py
|
835caece5131fdce96b033253e6823d0875f54c4
|
[] |
no_license
|
penteract/adventofcode
|
5bb317f8093f60c1d776d0983016a5288d059603
|
7b7344708ef1d58caa339a32a13f3390556b664c
|
refs/heads/master
| 2023-01-29T16:08:13.541190 | 2023-01-16T20:21:02 | 2023-01-16T20:21:02 | 160,901,373 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,444 |
py
|
from functools import *
from itertools import *
from collections import defaultdict
import sys
sys.setrecursionlimit(100000)
f = open("input")
l=[x for x in f]
d=defaultdict(int)
#r=list(map(int,l[0].split()))
#l=["s1,x3/4,pe/b"]
l = l[0].split(",")
p = [chr(x) for x in range(ord("a"),ord("p")+1)]
#p=list("abcde")
print("".join(p))
d={"".join(p):0}
aa=0
for i in range(2000):
for ins in l:
if ins[0]=="s":
n=int(ins[1:])
p=p[-n:]+p[:-n]
elif ins[0]=="x":
a,b=list(map(int,ins[1:].split("/")))
x=p[a]
p[a]=p[b]
p[b]=x
elif ins[0]=="p":
x,y = ins[1:].split("/")
a=p.index(x)
b=p.index(y)
x=p[a]
p[a]=p[b]
p[b]=x
else:
print(ins)
k=("".join(p))
if k in d:
print(k)
break
aa+=1
d[k]=aa
perm = [ord(x)-ord("a") for x in "dcmlhejnifpokgba"]
def ap(l,p):
return [l[i] for i in p]
def apply(prm,n):
if n==1: return prm
if n%2:
return ap(apply(prm,n-1),prm)
else:
return apply(ap(prm,prm),n//2)
def disp(s):
return ("".join(chr(n+ord("a")) for n in s))
disp(apply(perm,10**9))
#wrong:
#jlmenhdafcbkgoip doing x wrong (moving from the front)
#pmbdaelhgonkjcif still doing x wrong
#wrong pt2
#dcmljghfinpokeba (permutation nonsense)
#legnajicfkmdobph (adding 1 after break statement)
|
[
"[email protected]"
] | |
db49ab033224ca4e10e045059a3acc5df038ce33
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part002442.py
|
5629f88f997a1d7f910f1dc3a272cbf65af41175
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,955 |
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher58448(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.1.0_1', 1, 1, S(1)), Mul)
]),
1: (1, Multiset({}), [
(VariableWithCount('i2.4.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.4.1.0_1', 1, 1, S(1)), Mul)
]),
2: (2, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher58448._instance is None:
CommutativeMatcher58448._instance = CommutativeMatcher58448()
return CommutativeMatcher58448._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 58447
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 73934
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 73935
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 73936
if len(subjects2) == 0:
pass
# State 73937
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque
|
[
"[email protected]"
] | |
4404f045b07bb343172c518668f96bba1f6a6075
|
3a99586e226f0f23ace22fcc30af7b190e90dd08
|
/esp32/tools/get.py
|
51be563a24a667b244c4f3d8cf43666479955974
|
[] |
no_license
|
thewenneur/esp32-1
|
19a1095b0b619c0bb9cb9eaa86abd2a9d9678269
|
d683123af8cc39fe2dfc281ac44444023186960f
|
refs/heads/master
| 2021-01-20T08:36:48.001717 | 2017-08-29T16:39:49 | 2017-08-29T16:39:49 | 101,567,493 | 0 | 0 | null | 2017-08-29T16:44:49 | 2017-08-27T17:06:58 |
C
|
UTF-8
|
Python
| false | false | 5,065 |
py
|
#!/usr/bin/env python
# This script will download and extract required tools into the current directory.
# Tools list is obtained from package/package_esp8266com_index.template.json file.
# Written by Ivan Grokhotkov, 2015.
#
from __future__ import print_function
import os
import shutil
import errno
import os.path
import hashlib
import json
import platform
import sys
import tarfile
import zipfile
import re
if sys.version_info[0] == 3:
from urllib.request import urlretrieve
else:
# Not Python 3 - today, it is most likely to be Python 2
from urllib import urlretrieve
if 'Windows' in platform.system():
import requests
current_dir = os.path.dirname(os.path.realpath(__file__))
dist_dir = current_dir + '/dist/'
def sha256sum(filename, blocksize=65536):
hash = hashlib.sha256()
with open(filename, "rb") as f:
for block in iter(lambda: f.read(blocksize), b""):
hash.update(block)
return hash.hexdigest()
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST or not os.path.isdir(path):
raise
def report_progress(count, blockSize, totalSize):
percent = int(count*blockSize*100/totalSize)
percent = min(100, percent)
sys.stdout.write("\r%d%%" % percent)
sys.stdout.flush()
def unpack(filename, destination):
dirname = ''
print('Extracting {0}'.format(os.path.basename(filename)))
sys.stdout.flush()
if filename.endswith('tar.gz'):
tfile = tarfile.open(filename, 'r:gz')
tfile.extractall(destination)
dirname= tfile.getnames()[0]
elif filename.endswith('zip'):
zfile = zipfile.ZipFile(filename)
zfile.extractall(destination)
dirname = zfile.namelist()[0]
else:
raise NotImplementedError('Unsupported archive type')
# a little trick to rename tool directories so they don't contain version number
rename_to = re.match(r'^([a-z][^\-]*\-*)+', dirname).group(0).strip('-')
if rename_to != dirname:
print('Renaming {0} to {1}'.format(dirname, rename_to))
if os.path.isdir(rename_to):
shutil.rmtree(rename_to)
shutil.move(dirname, rename_to)
def get_tool(tool):
sys_name = platform.system()
archive_name = tool['archiveFileName']
local_path = dist_dir + archive_name
url = tool['url']
#real_hash = tool['checksum'].split(':')[1]
if not os.path.isfile(local_path):
print('Downloading ' + archive_name);
sys.stdout.flush()
if 'CYGWIN_NT' in sys_name:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
urlretrieve(url, local_path, report_progress, context=ctx)
elif 'Windows' in sys_name:
r = requests.get(url)
f = open(local_path, 'wb')
f.write(r.content)
f.close()
else:
urlretrieve(url, local_path, report_progress)
sys.stdout.write("\rDone\n")
sys.stdout.flush()
else:
print('Tool {0} already downloaded'.format(archive_name))
sys.stdout.flush()
#local_hash = sha256sum(local_path)
#if local_hash != real_hash:
# print('Hash mismatch for {0}, delete the file and try again'.format(local_path))
# raise RuntimeError()
unpack(local_path, '.')
def load_tools_list(filename, platform):
tools_info = json.load(open(filename))['packages'][0]['tools']
tools_to_download = []
for t in tools_info:
tool_platform = [p for p in t['systems'] if p['host'] == platform]
if len(tool_platform) == 0:
continue
tools_to_download.append(tool_platform[0])
return tools_to_download
def identify_platform():
arduino_platform_names = {'Darwin' : {32 : 'i386-apple-darwin', 64 : 'x86_64-apple-darwin'},
'Linux' : {32 : 'i686-pc-linux-gnu', 64 : 'x86_64-pc-linux-gnu'},
'LinuxARM': {32 : 'arm-linux-gnueabihf', 64 : 'aarch64-linux-gnu'},
'Windows' : {32 : 'i686-mingw32', 64 : 'i686-mingw32'}}
bits = 32
if sys.maxsize > 2**32:
bits = 64
sys_name = platform.system()
sys_platform = platform.platform()
print('System: %s, Info: %s' % (sys_name, sys_platform))
if 'Linux' in sys_name and sys_platform.find('arm') > 0:
sys_name = 'LinuxARM'
if 'CYGWIN_NT' in sys_name:
sys_name = 'Windows'
return arduino_platform_names[sys_name][bits]
if __name__ == '__main__':
identified_platform = identify_platform()
print('Platform: {0}'.format(identified_platform))
tools_to_download = load_tools_list(current_dir + '/../package/package_esp32_index.template.json', identified_platform)
mkdir_p(dist_dir)
for tool in tools_to_download:
get_tool(tool)
print('Done')
|
[
"[email protected]"
] | |
583dd181b2b7cd621ebd9e4f11ad5c3aaa71c632
|
db69a3e20ec69bd8a08ed14ec6193a08e543965d
|
/mars/dataframe/window/expanding/tests/test_expanding.py
|
2b03e57dbb175028f762828d25976440b7f1e046
|
[
"BSD-3-Clause",
"CC0-1.0",
"ISC",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"MIT"
] |
permissive
|
Fernadoo/mars
|
dbf62cd6f3ff82e3c399f1c06c6da1681b462856
|
f8e93edeecbe184b018cd1d0d948b3a2ba74bee6
|
refs/heads/master
| 2023-08-12T12:48:33.726883 | 2021-09-29T14:29:18 | 2021-09-29T14:29:18 | 377,359,795 | 0 | 1 |
Apache-2.0
| 2021-09-29T14:29:19 | 2021-06-16T03:29:08 |
Python
|
UTF-8
|
Python
| false | false | 2,337 |
py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
from ..... import dataframe as md
from .....core import tile
def test_expanding():
df = pd.DataFrame(np.random.rand(4, 3), columns=list('abc'))
df2 = md.DataFrame(df)
with pytest.raises(NotImplementedError):
_ = df2.expanding(3, center=True)
with pytest.raises(NotImplementedError):
_ = df2.expanding(3, axis=1)
r = df2.expanding(3, center=False)
expected = df.expanding(3, center=False)
assert repr(r) == repr(expected)
assert 'b' in dir(r)
with pytest.raises(AttributeError):
_ = r.d
with pytest.raises(KeyError):
_ = r['d']
with pytest.raises(KeyError):
_ = r['a', 'd']
assert 'a' not in dir(r.a)
assert 'c' not in dir(r['a', 'b'])
def test_expanding_agg():
df = pd.DataFrame(np.random.rand(4, 3), columns=list('abc'))
df2 = md.DataFrame(df, chunk_size=3)
r = df2.expanding(3).agg('max')
expected = df.expanding(3).agg('max')
assert r.shape == df.shape
assert r.index_value is df2.index_value
pd.testing.assert_index_equal(r.columns_value.to_pandas(),
expected.columns)
pd.testing.assert_series_equal(r.dtypes, df2.dtypes)
r = tile(r)
for c in r.chunks:
assert c.shape == c.inputs[0].shape
assert c.index_value is c.inputs[0].index_value
pd.testing.assert_index_equal(c.columns_value.to_pandas(),
expected.columns)
pd.testing.assert_series_equal(c.dtypes, expected.dtypes)
aggs = ['sum', 'count', 'min', 'max', 'mean', 'var', 'std']
for a in aggs:
r = getattr(df2.expanding(3), a)()
assert r.op.func == [a]
|
[
"[email protected]"
] | |
58ed44fde6b4a7efde10c71105ac6a59cce696d1
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/HaxQfQTEpo7BFE5rz_22.py
|
ad90b5d4387874d64b88c56f046176ed2d7ab37e
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 636 |
py
|
def alternate_pos_neg(lst):
Length = len(lst)
if (0 in lst):
return False
if (Length < 2):
return True
Group_A = []
Group_B = []
Counter = 0
Length = len(lst)
while (Counter < Length):
Item = lst[Counter]
if (Counter % 2 == 0):
Group_A.append(Item)
Counter += 1
else:
Group_B.append(Item)
Counter += 1
Test_A1 = min(Group_A)
Test_A2 = max(Group_A)
Test_B1 = min(Group_B)
Test_B2 = max(Group_B)
if (Test_A1 > 0) and (Test_B2 < 0):
return True
elif (Test_B1 > 0) and (Test_A2 < 0):
return True
else:
return False
|
[
"[email protected]"
] | |
1f04965e75982f1b5afc12d0b634c0c48480c05a
|
356b5455a4fb86c49c800a6181323b7fabef2b08
|
/ppci/cli/java.py
|
fb06d54a7bd02da0b0ff2377aad9a91c0f00bb8b
|
[
"BSD-2-Clause"
] |
permissive
|
obround/ppci
|
be7d1ce7832513629ee1301e7b67c0ceda38d668
|
ba0840bc5f4ffd889f882a814fb26f88cd854379
|
refs/heads/master
| 2023-02-11T13:47:35.439871 | 2021-01-05T22:33:08 | 2021-01-05T22:33:08 | 327,131,704 | 0 | 0 |
BSD-2-Clause
| 2021-01-05T22:08:23 | 2021-01-05T22:08:23 | null |
UTF-8
|
Python
| false | false | 2,137 |
py
|
""" Java handling utility.
"""
import argparse
import sys
from .base import base_parser, march_parser, LogSetup, get_arch_from_args
from .compile_base import compile_parser, do_compile
from ..arch.jvm import read_class_file, class_to_ir, print_class_file
from ..arch.jvm import read_jar
from ..irutils import print_module
parser = argparse.ArgumentParser(description=__doc__, parents=[base_parser])
subparsers = parser.add_subparsers(
title="commands", description="possible commands", dest="command"
)
java_compile_parser = subparsers.add_parser(
"compile",
help="Compile a java class file ahead of time.",
parents=[compile_parser, march_parser],
)
java_compile_parser.add_argument(
"class_file",
metavar="java class file",
type=argparse.FileType("rb"),
help="class file to compile",
)
dis_parser = subparsers.add_parser(
"javap", help="Disassemble (javap) a java class."
)
dis_parser.add_argument(
"class_file",
metavar="java class file",
type=argparse.FileType("rb"),
help="class file to inspect",
)
jar_parser = subparsers.add_parser("jar", help="Explore jar file.")
jar_parser.add_argument(
"jarfile", metavar="java jar file", help="jar file to inspect"
)
def java(args=None):
""" Java command line utility. """
args = parser.parse_args(args)
with LogSetup(args) as log_setup:
if args.command == "compile":
march = get_arch_from_args(args)
class_file = read_class_file(args.class_file, verbose=True)
args.class_file.close()
ir_module = class_to_ir(class_file)
print_module(ir_module, verify=False)
ir_modules = [ir_module]
do_compile(ir_modules, march, log_setup.reporter, log_setup.args)
elif args.command == "javap":
class_file = read_class_file(args.class_file)
args.class_file.close()
print_class_file(class_file)
elif args.command == "jar":
read_jar(args.jarfile)
else: # pragma: no cover
parser.print_usage()
sys.exit(1)
if __name__ == "__main__":
java()
|
[
"[email protected]"
] | |
a6a0f7b4beb4b4725dd0e18f9b8fc2ea3da17868
|
ecc5e006b8336a444678ac0db9ef63fffc1307d3
|
/backend/affinity_24197/wsgi.py
|
862ceeb3ae2ce5ecf19d749a9131ca9f323b9128
|
[] |
no_license
|
crowdbotics-apps/affinity-24197
|
f280bff6a9352c86e4df6306167cd322f1e798d4
|
699558a60e58f52c53b50650200b9c0ff27292ed
|
refs/heads/master
| 2023-02-27T06:40:05.898471 | 2021-02-01T20:55:17 | 2021-02-01T20:55:17 | 335,081,472 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 405 |
py
|
"""
WSGI config for affinity_24197 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'affinity_24197.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
487a87eccdc9fded9c01a8c11fc065a68e28b11e
|
87140007e96872d3611f0778eb0eebe5799616d7
|
/runs/src8-tgt4/par-bro-iter03000.cfg.py
|
9b417d6b0b220f0e5ea3402e7eca3723d83b49d6
|
[
"MIT"
] |
permissive
|
janpawellek/broeval
|
49499fa302abff916ffced201034d3b9394503cd
|
57e31aa6e354d0bba88103b44910483e8d982d00
|
refs/heads/master
| 2021-01-11T12:19:13.619220 | 2016-12-20T16:23:27 | 2016-12-20T16:23:27 | 76,468,134 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 825 |
py
|
# Write results to this file
OUTFILE = 'runs/src8-tgt4/par-bro-iter03000.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.11', '10.0.0.12', '10.0.0.13', '10.0.0.14', '10.0.0.31', '10.0.0.32', '10.0.0.33', '10.0.0.34']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [True, True, True, True, True, True, True, True]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.21', '10.0.0.22', '10.0.0.23', '10.0.0.24']
# Should Bro be enabled on the target machines?
TARGET_BRO = [True, True, True, True]
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 3000
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
|
[
"[email protected]"
] | |
914826d139e292b9eaad569990e10a927b19d38f
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part000375.py
|
aa7700fe091caac398fee48064c6a60790bbfb21
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,308 |
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher54899(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher54899._instance is None:
CommutativeMatcher54899._instance = CommutativeMatcher54899()
return CommutativeMatcher54899._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 54898
return
yield
from collections import deque
|
[
"[email protected]"
] | |
aae86dec0874ccba8a31b92ae911f0ffd56c20b4
|
e233f9bf52ad0f88416962edd957a3c866c19b78
|
/reagent/workflow/spark_utils.py
|
fbd43d68752885e10e28a6381fa830d1d76ccb3b
|
[
"BSD-3-Clause"
] |
permissive
|
dwtcourses/ReAgent
|
38c99dfe47adf1471620419f744cb4145f4f4151
|
b9b54d4f30ff65cf1c54dc0cf90c938b48c44f90
|
refs/heads/master
| 2022-04-26T15:35:46.109984 | 2020-04-29T05:38:14 | 2020-04-29T05:40:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,232 |
py
|
#!/usr/bin/env python3
import logging
import os
import pprint
from os.path import abspath, dirname, join
from typing import Dict, Optional
import reagent
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
logger = logging.getLogger(__name__)
# This is where Scala preprocessing (i.e TimelineOperator) is located
SPARK_JAR_FROM_ROOT_DIR = "preprocessing/target/rl-preprocessing-1.1.jar"
"""
SPARK_JAR is abspath to the above jar file.
Assume file structure
ReAgent/
preprocessing/...
reagent/...
"""
SPARK_JAR = join(dirname(reagent.__file__), os.pardir, SPARK_JAR_FROM_ROOT_DIR)
DEFAULT_SPARK_CONFIG = {
"spark.master": "local[*]",
"spark.app.name": "ReAgent",
"spark.sql.session.timeZone": "UTC",
"spark.sql.warehouse.dir": abspath("spark-warehouse"),
# Set shuffle partitions to a low number, e.g. <= cores * 2 to speed
# things up, otherwise the tests will use the default 200 partitions
# and it will take a lot more time to complete
"spark.sql.shuffle.partitions": "12",
"spark.sql.execution.arrow.enabled": "true",
# For accessing timeline operator
"spark.driver.extraClassPath": SPARK_JAR,
# Same effect as builder.enableHiveSupport() [useful for test framework]
"spark.sql.catalogImplementation": "hive",
}
def get_spark_session(config: Optional[Dict[str, str]] = DEFAULT_SPARK_CONFIG):
logger.info(f"Building with config: \n{pprint.pformat(config)}")
spark = SparkSession.builder.enableHiveSupport()
if config is not None:
for k, v in config.items():
spark = spark.config(k, v)
spark = spark.getOrCreate()
spark.sparkContext.setLogLevel("ERROR")
return spark
def get_table_url(table_name: str) -> str:
spark = get_spark_session()
url = (
spark.sql(f"DESCRIBE FORMATTED {table_name}")
.filter((col("col_name") == "Location"))
.select("data_type")
.toPandas()
.astype(str)["data_type"]
.values[0]
)
# unfortunately url is file:/... or hdfs:/... not file:///...
# so we need to insert '//'
assert url.count(":") == 1, f"{url} has more than one :"
schema, path = url.split(":")
return f"{schema}://{path}"
|
[
"[email protected]"
] | |
95da1c4d9aaf181baaa004c2b4e9535dcb762d7c
|
e983e429c87d27860b35e91e82109474aecefe7e
|
/env/bin/django-admin.py
|
45576ab1de002d7bddddaf05f80cc1126b22ae25
|
[
"MIT"
] |
permissive
|
wilbrone/InstaPhotos
|
6943312933a9d33fc5593f109756ccd7fec7118f
|
f4a0b7ccf04637702f5868f4ce9d88913863bc55
|
refs/heads/master
| 2022-12-11T05:45:58.010157 | 2020-01-20T08:33:44 | 2020-01-20T08:33:44 | 230,255,757 | 0 | 0 |
MIT
| 2022-12-08T03:27:12 | 2019-12-26T11:52:30 |
Python
|
UTF-8
|
Python
| false | false | 177 |
py
|
#!/home/aphya5/moringa-school-projects/Django/instagram/env/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"[email protected]"
] | |
4ea1512ea43feb40f0800c9b8e9a1a44af519500
|
fac37d77a8d00e3d13106bcd728d51a455dd16f2
|
/aspc.py
|
42705d3d34343e8607e14290df10a67f66313486
|
[] |
no_license
|
anu-bioinfo/rosalind-4
|
c6a628bba94f647cf4a34bdf505f1527af4346a9
|
3ddc659d44298f4dd4b5dde66d7833b4d27a2580
|
refs/heads/master
| 2020-03-25T13:47:39.521215 | 2014-09-14T02:30:54 | 2014-09-14T02:30:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 466 |
py
|
#!/usr/bin/env python
from __future__ import print_function, division
import os
from math import factorial
def sum_of_combinations(n, m):
return sum(factorial(n) // (factorial(k) * factorial(n - k))
for k in range(m, n + 1)) % 1000000
if __name__ == "__main__":
with open(os.path.join('data', 'rosalind_aspc.txt')) as dataset:
n, m = [int(r) for r in dataset.readline().rstrip().split()]
print(int(sum_of_combinations(n, m)))
|
[
"[email protected]"
] | |
5e792aae5764fc35b1a8a29c85694f13c0e7eb99
|
53181572c4b22df4b569a9901bcd5347a3459499
|
/ceit_191116/py200307/function_1_def.py
|
7356a48cd8e325c795895d041c04ae7aae902bff
|
[] |
no_license
|
edu-athensoft/ceit4101python_student
|
80ef067b77421fce76d04f778d5c6de8b12f676c
|
33cfa438c062d45e8d246b853e93d3c14b92ff2d
|
refs/heads/master
| 2020-07-30T01:04:21.084384 | 2020-07-27T02:21:57 | 2020-07-27T02:21:57 | 210,027,310 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 327 |
py
|
"""
function definition, syntax
without parameters
def function_name(*parameters):
'''docstring'''
statement(s)
"""
# example 1 - my print a line
# function name: myprint
# parameters: ()
# define - user-defined
def myprint():
print("===")
print()
print("===")
# call a function
myprint()
|
[
"[email protected]"
] | |
d259c44a3f848080f0ba94f0baa88e557cf06e5b
|
e8c93a67b7812c644d0485ddae665a638d278340
|
/captum/attr/_core/layer/layer_integrated_gradients.py
|
9d3840cc23968bc9e3bfc7fed8f55d4fcf49620a
|
[
"BSD-3-Clause"
] |
permissive
|
nanohanno/captum
|
b30ebadb6da683269bc9d9fb8253304d51049441
|
f1346fc32a1f191f2c9a9401428372f68a88aec1
|
refs/heads/master
| 2021-08-09T00:43:51.866663 | 2021-07-23T23:33:14 | 2021-07-23T23:34:48 | 226,319,799 | 1 | 3 |
BSD-3-Clause
| 2019-12-06T11:56:44 | 2019-12-06T11:56:43 | null |
UTF-8
|
Python
| false | false | 24,893 |
py
|
#!/usr/bin/env python3
import functools
import warnings
from typing import Any, Callable, List, Tuple, Union, overload
import torch
from torch import Tensor
from torch.nn.parallel.scatter_gather import scatter
from captum._utils.common import (
_extract_device,
_format_additional_forward_args,
_format_outputs,
)
from captum._utils.gradient import _forward_layer_eval, _run_forward
from captum._utils.typing import BaselineType, Literal, ModuleOrModuleList, TargetType
from captum.attr._core.integrated_gradients import IntegratedGradients
from captum.attr._utils.attribution import GradientAttribution, LayerAttribution
from captum.attr._utils.common import (
_format_input_baseline,
_tensorize_baseline,
_validate_input,
)
from captum.log import log_usage
class LayerIntegratedGradients(LayerAttribution, GradientAttribution):
r"""
Layer Integrated Gradients is a variant of Integrated Gradients that assigns
an importance score to layer inputs or outputs, depending on whether we
attribute to the former or to the latter one.
Integrated Gradients is an axiomatic model interpretability algorithm that
attributes / assigns an importance score to each input feature by approximating
the integral of gradients of the model's output with respect to the inputs
along the path (straight line) from given baselines / references to inputs.
Baselines can be provided as input arguments to attribute method.
To approximate the integral we can choose to use either a variant of
Riemann sum or Gauss-Legendre quadrature rule.
More details regarding the integrated gradients method can be found in the
original paper:
https://arxiv.org/abs/1703.01365
"""
def __init__(
self,
forward_func: Callable,
layer: ModuleOrModuleList,
device_ids: Union[None, List[int]] = None,
multiply_by_inputs: bool = True,
) -> None:
r"""
Args:
forward_func (callable): The forward function of the model or any
modification of it
layer (ModuleOrModuleList):
Layer or list of layers for which attributions are computed.
For each layer the output size of the attribute matches
this layer's input or output dimensions, depending on
whether we attribute to the inputs or outputs of the
layer, corresponding to the attribution of each neuron
in the input or output of this layer.
Please note that layers to attribute on cannot be
dependent on each other. That is, a subset of layers in
`layer` cannot produce the inputs for another layer.
For example, if your model is of a simple linked-list
based graph structure (think nn.Sequence), e.g. x -> l1
-> l2 -> l3 -> output. If you pass in any one of those
layers, you cannot pass in another due to the
dependence, e.g. if you pass in l2 you cannot pass in
l1 or l3.
device_ids (list(int)): Device ID list, necessary only if forward_func
applies a DataParallel model. This allows reconstruction of
intermediate outputs from batched results across devices.
If forward_func is given as the DataParallel model itself,
then it is not necessary to provide this argument.
multiply_by_inputs (bool, optional): Indicates whether to factor
model inputs' multiplier in the final attribution scores.
In the literature this is also known as local vs global
attribution. If inputs' multiplier isn't factored in,
then this type of attribution method is also called local
attribution. If it is, then that type of attribution
method is called global.
More detailed can be found here:
https://arxiv.org/abs/1711.06104
In case of layer integrated gradients, if `multiply_by_inputs`
is set to True, final sensitivity scores are being multiplied by
layer activations for inputs - layer activations for baselines.
"""
LayerAttribution.__init__(self, forward_func, layer, device_ids=device_ids)
GradientAttribution.__init__(self, forward_func)
self.ig = IntegratedGradients(forward_func, multiply_by_inputs)
if isinstance(layer, list) and len(layer) > 1:
warnings.warn(
"Multiple layers provided. Please ensure that each layer is"
"**not** solely solely dependent on the outputs of"
"another layer. Please refer to the documentation for more"
"detail."
)
@overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType,
target: TargetType,
additional_forward_args: Any,
n_steps: int,
method: str,
internal_batch_size: Union[None, int],
return_convergence_delta: Literal[False],
attribute_to_layer_input: bool,
) -> Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]]:
...
@overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType,
target: TargetType,
additional_forward_args: Any,
n_steps: int,
method: str,
internal_batch_size: Union[None, int],
return_convergence_delta: Literal[True],
attribute_to_layer_input: bool,
) -> Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tensor,
]:
...
@overload
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tensor,
],
]:
...
@log_usage()
def attribute(
self,
inputs: Union[Tensor, Tuple[Tensor, ...]],
baselines: BaselineType = None,
target: TargetType = None,
additional_forward_args: Any = None,
n_steps: int = 50,
method: str = "gausslegendre",
internal_batch_size: Union[None, int] = None,
return_convergence_delta: bool = False,
attribute_to_layer_input: bool = False,
) -> Union[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tuple[
Union[Tensor, Tuple[Tensor, ...], List[Union[Tensor, Tuple[Tensor, ...]]]],
Tensor,
],
]:
r"""
This method attributes the output of the model with given target index
(in case it is provided, otherwise it assumes that output is a
scalar) to layer inputs or outputs of the model, depending on whether
`attribute_to_layer_input` is set to True or False, using the approach
described above.
In addition to that it also returns, if `return_convergence_delta` is
set to True, integral approximation delta based on the completeness
property of integrated gradients.
Args:
inputs (tensor or tuple of tensors): Input for which layer integrated
gradients are computed. If forward_func takes a single
tensor as input, a single input tensor should be provided.
If forward_func takes multiple tensors as input, a tuple
of the input tensors should be provided. It is assumed
that for all given input tensors, dimension 0 corresponds
to the number of examples, and if multiple input tensors
are provided, the examples must be aligned appropriately.
baselines (scalar, tensor, tuple of scalars or tensors, optional):
Baselines define the starting point from which integral
is computed and can be provided as:
- a single tensor, if inputs is a single tensor, with
exactly the same dimensions as inputs or the first
dimension is one and the remaining dimensions match
with inputs.
- a single scalar, if inputs is a single tensor, which will
be broadcasted for each input value in input tensor.
- a tuple of tensors or scalars, the baseline corresponding
to each tensor in the inputs' tuple can be:
- either a tensor with matching dimensions to
corresponding tensor in the inputs' tuple
or the first dimension is one and the remaining
dimensions match with the corresponding
input tensor.
- or a scalar, corresponding to a tensor in the
inputs' tuple. This scalar value is broadcasted
for corresponding input tensor.
In the cases when `baselines` is not provided, we internally
use zero scalar corresponding to each input tensor.
Default: None
target (int, tuple, tensor or list, optional): Output indices for
which gradients are computed (for classification cases,
this is usually the target class).
If the network returns a scalar value per example,
no target index is necessary.
For general 2D outputs, targets can be either:
- a single integer or a tensor containing a single
integer, which is applied to all input examples
- a list of integers or a 1D tensor, with length matching
the number of examples in inputs (dim 0). Each integer
is applied as the target for the corresponding example.
For outputs with > 2 dimensions, targets can be either:
- A single tuple, which contains #output_dims - 1
elements. This target index is applied to all examples.
- A list of tuples with length equal to the number of
examples in inputs (dim 0), and each tuple containing
#output_dims - 1 elements. Each tuple is applied as the
target for the corresponding example.
Default: None
additional_forward_args (any, optional): If the forward function
requires additional arguments other than the inputs for
which attributions should not be computed, this argument
can be provided. It must be either a single additional
argument of a Tensor or arbitrary (non-tuple) type or a
tuple containing multiple additional arguments including
tensors or any arbitrary python types. These arguments
are provided to forward_func in order following the
arguments in inputs.
For a tensor, the first dimension of the tensor must
correspond to the number of examples. It will be
repeated for each of `n_steps` along the integrated
path. For all other types, the given argument is used
for all forward evaluations.
Note that attributions are not computed with respect
to these arguments.
Default: None
n_steps (int, optional): The number of steps used by the approximation
method. Default: 50.
method (string, optional): Method for approximating the integral,
one of `riemann_right`, `riemann_left`, `riemann_middle`,
`riemann_trapezoid` or `gausslegendre`.
Default: `gausslegendre` if no method is provided.
internal_batch_size (int, optional): Divides total #steps * #examples
data points into chunks of size at most internal_batch_size,
which are computed (forward / backward passes)
sequentially. internal_batch_size must be at least equal to
#examples.
For DataParallel models, each batch is split among the
available devices, so evaluations on each available
device contain internal_batch_size / num_devices examples.
If internal_batch_size is None, then all evaluations are
processed in one batch.
Default: None
return_convergence_delta (bool, optional): Indicates whether to return
convergence delta or not. If `return_convergence_delta`
is set to True convergence delta will be returned in
a tuple following attributions.
Default: False
attribute_to_layer_input (bool, optional): Indicates whether to
compute the attribution with respect to the layer input
or output. If `attribute_to_layer_input` is set to True
then the attributions will be computed with respect to
layer input, otherwise it will be computed with respect
to layer output.
Note that currently it is assumed that either the input
or the output of internal layer, depending on whether we
attribute to the input or output, is a single tensor.
Support for multiple tensors will be added later.
Default: False
Returns:
**attributions** or 2-element tuple of **attributions**, **delta**:
- **attributions** (*tensor*, tuple of *tensors* or tuple of *tensors*):
Integrated gradients with respect to `layer`'s inputs or
outputs. Attributions will always be the same size and
dimensionality as the input or output of the given layer,
depending on whether we attribute to the inputs or outputs
of the layer which is decided by the input flag
`attribute_to_layer_input`.
For a single layer, attributions are returned in a tuple if
the layer inputs / outputs contain multiple tensors,
otherwise a single tensor is returned.
For multiple layers, attributions will always be
returned as a list. Each element in this list will be
equivalent to that of a single layer output, i.e. in the
case that one layer, in the given layers, inputs / outputs
multiple tensors: the corresponding output element will be
a tuple of tensors. The ordering of the outputs will be
the same order as the layers given in the constructor.
- **delta** (*tensor*, returned if return_convergence_delta=True):
The difference between the total approximated and true
integrated gradients. This is computed using the property
that the total sum of forward_func(inputs) -
forward_func(baselines) must equal the total sum of the
integrated gradient.
Delta is calculated per example, meaning that the number of
elements in returned delta tensor is equal to the number of
of examples in inputs.
Examples::
>>> # ImageClassifier takes a single input tensor of images Nx3x32x32,
>>> # and returns an Nx10 tensor of class probabilities.
>>> # It contains an attribute conv1, which is an instance of nn.conv2d,
>>> # and the output of this layer has dimensions Nx12x32x32.
>>> net = ImageClassifier()
>>> lig = LayerIntegratedGradients(net, net.conv1)
>>> input = torch.randn(2, 3, 32, 32, requires_grad=True)
>>> # Computes layer integrated gradients for class 3.
>>> # attribution size matches layer output, Nx12x32x32
>>> attribution = lig.attribute(input, target=3)
"""
inps, baselines = _format_input_baseline(inputs, baselines)
_validate_input(inps, baselines, n_steps, method)
baselines = _tensorize_baseline(inps, baselines)
additional_forward_args = _format_additional_forward_args(
additional_forward_args
)
def flatten_tuple(tup):
return tuple(
sum((list(x) if isinstance(x, (tuple, list)) else [x] for x in tup), [])
)
if self.device_ids is None:
self.device_ids = getattr(self.forward_func, "device_ids", None)
inputs_layer = _forward_layer_eval(
self.forward_func,
inps,
self.layer,
device_ids=self.device_ids,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
# if we have one output
if not isinstance(self.layer, list):
inputs_layer = (inputs_layer,)
num_outputs = [1 if isinstance(x, Tensor) else len(x) for x in inputs_layer]
num_outputs_cumsum = torch.cumsum(
torch.IntTensor([0] + num_outputs), dim=0 # type: ignore
)
inputs_layer = flatten_tuple(inputs_layer)
baselines_layer = _forward_layer_eval(
self.forward_func,
baselines,
self.layer,
device_ids=self.device_ids,
additional_forward_args=additional_forward_args,
attribute_to_layer_input=attribute_to_layer_input,
)
baselines_layer = flatten_tuple(baselines_layer)
# inputs -> these inputs are scaled
def gradient_func(
forward_fn: Callable,
inputs: Union[Tensor, Tuple[Tensor, ...]],
target_ind: TargetType = None,
additional_forward_args: Any = None,
) -> Tuple[Tensor, ...]:
if self.device_ids is None or len(self.device_ids) == 0:
scattered_inputs = (inputs,)
else:
# scatter method does not have a precise enough return type in its
# stub, so suppress the type warning.
scattered_inputs = scatter( # type:ignore
inputs, target_gpus=self.device_ids
)
scattered_inputs_dict = {
scattered_input[0].device: scattered_input
for scattered_input in scattered_inputs
}
with torch.autograd.set_grad_enabled(True):
def layer_forward_hook(
module, hook_inputs, hook_outputs=None, layer_idx=0
):
device = _extract_device(module, hook_inputs, hook_outputs)
is_layer_tuple = (
isinstance(hook_outputs, tuple)
# hook_outputs is None if attribute_to_layer_input == True
if hook_outputs is not None
else isinstance(hook_inputs, tuple)
)
if is_layer_tuple:
return scattered_inputs_dict[device][
num_outputs_cumsum[layer_idx] : num_outputs_cumsum[
layer_idx + 1
]
]
return scattered_inputs_dict[device][num_outputs_cumsum[layer_idx]]
hooks = []
try:
layers = self.layer
if not isinstance(layers, list):
layers = [self.layer]
for layer_idx, layer in enumerate(layers):
hook = None
# TODO:
# Allow multiple attribute_to_layer_input flags for
# each layer, i.e. attribute_to_layer_input[layer_idx]
if attribute_to_layer_input:
hook = layer.register_forward_pre_hook(
functools.partial(
layer_forward_hook, layer_idx=layer_idx
)
)
else:
hook = layer.register_forward_hook(
functools.partial(
layer_forward_hook, layer_idx=layer_idx
)
)
hooks.append(hook)
output = _run_forward(
self.forward_func, tuple(), target_ind, additional_forward_args
)
finally:
for hook in hooks:
if hook is not None:
hook.remove()
assert output[0].numel() == 1, (
"Target not provided when necessary, cannot"
" take gradient with respect to multiple outputs."
)
# torch.unbind(forward_out) is a list of scalar tensor tuples and
# contains batch_size * #steps elements
grads = torch.autograd.grad(torch.unbind(output), inputs)
return grads
self.ig.gradient_func = gradient_func
all_inputs = (
(inps + additional_forward_args)
if additional_forward_args is not None
else inps
)
attributions = self.ig.attribute.__wrapped__( # type: ignore
self.ig, # self
inputs_layer,
baselines=baselines_layer,
target=target,
additional_forward_args=all_inputs,
n_steps=n_steps,
method=method,
internal_batch_size=internal_batch_size,
return_convergence_delta=False,
)
# handle multiple outputs
output: List[Tuple[Tensor, ...]] = [
tuple(
attributions[
int(num_outputs_cumsum[i]) : int(num_outputs_cumsum[i + 1])
]
)
for i in range(len(num_outputs))
]
if return_convergence_delta:
start_point, end_point = baselines, inps
# computes approximation error based on the completeness axiom
delta = self.compute_convergence_delta(
attributions,
start_point,
end_point,
additional_forward_args=additional_forward_args,
target=target,
)
return _format_outputs(isinstance(self.layer, list), output), delta
return _format_outputs(isinstance(self.layer, list), output)
def has_convergence_delta(self) -> bool:
return True
@property
def multiplies_by_inputs(self):
return self.ig.multiplies_by_inputs
|
[
"[email protected]"
] | |
d17f822579e1bc3a3eb2590493618026d10cf984
|
1a82a96e860d69d4f1ce862c1fa86919f356a7a8
|
/finalcif/gui/new_key_dialog.py
|
9ca94cbda85fd2c273bbcbb20e78edb0b3b9babc
|
[
"Beerware"
] |
permissive
|
dkratzert/FinalCif
|
f9d27aaf4a5da2620c000e75cf7a59c78646c2dc
|
f97ccec1e8be9ce4034784fa52bbc5257d9a9e7d
|
refs/heads/master
| 2023-08-31T10:24:08.258193 | 2023-08-31T10:22:20 | 2023-08-31T10:22:20 | 191,889,707 | 21 | 1 |
NOASSERTION
| 2023-05-21T15:07:51 | 2019-06-14T06:41:16 |
Python
|
UTF-8
|
Python
| false | false | 1,569 |
py
|
import re
from PyQt5 import QtWidgets
from PyQt5.QtCore import pyqtSignal
from finalcif.cif import all_cif_dicts
from finalcif.gui.new_key_dialog_ui import Ui_AddKeyWindow
class NewKey(QtWidgets.QMainWindow, Ui_AddKeyWindow):
new_key_added = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent=parent)
self.setupUi(self)
self.searchLineEdit.textChanged.connect(self.search)
self.addKeyPushButton.clicked.connect(self.add_keys)
self.cancelPushButton.clicked.connect(lambda: self.close())
self.keysListWidget.addItems(all_cif_dicts.cif_all_dict.keys())
for num in range(self.keysListWidget.count()):
item = self.keysListWidget.item(num)
helptext = all_cif_dicts.cif_all_dict.get(item.text())
item.setToolTip(helptext)
def add_keys(self):
for item in self.keysListWidget.selectedItems():
self.new_key_added.emit(item.text())
def search(self, searchtext: str):
self.keysListWidget.clear()
cif_keys = all_cif_dicts.cif_all_dict.keys()
if searchtext:
searchpattern = re.compile(f'.*{searchtext}.*', re.IGNORECASE)
searched = [x for x in cif_keys if searchpattern.match(x)]
self.keysListWidget.addItems(searched)
else:
self.keysListWidget.addItems(cif_keys)
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication(sys.argv)
nk = NewKey()
nk.show()
sys.exit(app.exec_())
|
[
"[email protected]"
] | |
aef336f70ef99ce4f5996661124bb6ba969bbb5d
|
5118ee8e91da7e76949cbb8d401462d5f402723d
|
/python_work_fs01/2018/0329/test3.py
|
819908059479c589e80f0bf5257e3b48cb5e76da
|
[] |
no_license
|
nakanishi-akitaka/python_backup
|
ad87941396e84cacd8540d66033b560025c4de92
|
d11e5b9cf4dd57f51938d3638ff74c102380e624
|
refs/heads/master
| 2020-06-30T08:22:39.696697 | 2019-12-21T23:18:55 | 2019-12-21T23:18:55 | 200,776,513 | 0 | 8 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,751 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from pymatgen import Composition, Element
from numpy import zeros, mean
trainFile = open("bandgapDFT.csv","r").readlines()
# input: pymatgen NO Composition object
# output: SOSEI vector
def naiveVectorize(composition):
vector = zeros((MAX_Z))
for element in composition:
# element HA GENSI. fraction HA SONO GENSI GA SOSEI NI HUKUMARERU WARIAI
fraction = composition.get_atomic_fraction(element)
vector[element.Z - 1] = fraction
return(vector)
materials = []
bandgaps = []
naiveFeatures = []
MAX_Z = 100
for line in trainFile:
split = str.split(line, ',')
material = Composition(split[0])
materials.append(material) # KAGAKUSIKI
naiveFeatures.append(naiveVectorize(material)) # TOKUCHORYO
bandgaps.append(float(split[1])) # band gap NO YOMIKOMI
baselineError = mean(abs(mean(bandgaps) - bandgaps))
print("Mean Absolute Error : " + str(round(baselineError, 3)) + " eV")
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn import neural_network
# sklearn NO random forest KAIKI
lr = LinearRegression()
dtr = DecisionTreeRegressor()
rfr = RandomForestRegressor()
nn = neural_network.MLPRegressor(max_iter=1000)
estimators = {'LR ':lr,'DTR':dtr,'RFR':rfr,'NN ':nn}
# for k, v in estimators.items():
# print(k,v,type(k),type(v))
# KOUSA KENSHO SIMASU
cv = ShuffleSplit(n_splits=10, test_size=0.1, random_state=0)
for k,v in estimators.items():
sc = cross_val_score( v, naiveFeatures, bandgaps, cv=cv, scoring='r2')
print("R2 by "+k+" with composition data: "+ str(round(abs(mean(sc)), 3)) + " ")
pf= [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
npf=4
npf+=1
for material in materials:
theseFeatures = []
feature = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]
for element in material:
feature[ 1].append(float(element.Z))
feature[ 2].append(material.get_atomic_fraction(element)*material.num_atoms)
feature[ 3].append(float(element.group))
feature[ 4].append(float(element.row))
feature[ 5].append(element.X)
feature[ 6].append(float(element.max_oxidation_state))
feature[ 7].append(float(element.min_oxidation_state))
feature[ 8].append(float(str(element.atomic_mass).split("a")[0]))
feature[ 9].append(float(element.mendeleev_no))
feature[10].append(float(str(element.melting_point).split("K")[0]))
feature[11].append(float(str(element.molar_volume).split("c")[0]))
feature[12].append(float(str(element.thermal_conductivity).split("W")[0]))
feature[13].append(element.is_noble_gas)
feature[14].append(element.is_transition_metal)
feature[15].append(element.is_rare_earth_metal)
feature[16].append(element.is_metalloid)
feature[17].append(element.is_alkali)
feature[18].append(element.is_alkaline)
feature[19].append(element.is_halogen)
feature[20].append(element.is_chalcogen)
feature[21].append(element.is_lanthanoid)
feature[22].append(element.is_actinoid)
for i in range(1,npf):
theseFeatures.extend(feature[i])
pf[i].append(theseFeatures[:])
for k,v in estimators.items():
for i in range(1,npf):
sc = cross_val_score( v, pf[i], bandgaps, cv=cv, scoring='r2')
print("R2 by "+k+" with physical ", i, " data: ave ", round(sc.mean(), 3)," std ", round(sc.std(), 3))
|
[
"[email protected]"
] | |
0463e4ee319c4bf4ebffe5cd815ab8f85b45adef
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/importer/tflite2/handlers/backend/transpose.py
|
23234c75127cac797ca8c1adf4ffb04f15ab2673
|
[
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427 | 2023-08-10T09:04:44 | 2023-08-10T09:04:44 | 133,324,605 | 145 | 96 |
Apache-2.0
| 2023-08-27T19:03:52 | 2018-05-14T07:50:29 |
C
|
UTF-8
|
Python
| false | false | 2,705 |
py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from nntool.graph.dim import Dim
from nntool.graph.types import ConstantInputNode, NNEdge, TransposeNode
from nntool.importer.common.constant_mixin import ConstantMixin
from nntool.importer.common.provisional_dim import ProvisionalDim
from nntool.importer.tflite2.common import LOG
from nntool.importer.tflite2.common.tflite_node import TFLiteNode
from ..backend_handler import BackendHandler
from ..handler import tflite_op
@tflite_op("TRANSPOSE")
class Transpose(BackendHandler, ConstantMixin):
@classmethod
def _common(cls, node: TFLiteNode, **kwargs):
G = kwargs['G']
opts = kwargs['opts']
all_nodes = kwargs['all_nodes']
inputs = [all_nodes[t] for t in node.input]
x = inputs[0]
x_shape = x[2].shape
new_axes = {}
for idx, dim in enumerate(x_shape):
if dim is not None:
new_axes[idx] = len(new_axes)
ptranspose = cls._verify_constant(inputs[1])
pout_shape = [x_shape[dim] for dim in ptranspose]
transpose = [new_axes[axis] for axis in ptranspose if x_shape[axis] is not None]
node.input[1].used = True
if cls.is_constant(x):
LOG.info("reducing %s to a constant", node.name)
val = np.transpose(cls.get_constant(x), ptranspose)
params = ConstantInputNode(node.name, value=np.transpose(val, ptranspose),
dims=Dim.unnamed(val.shape))
else:
params = TransposeNode(node.name, transpose=transpose)
if opts.get('load_quantization'):
G.quantization[params.name] = cls.load_tf_quantization([node.input[0]], node.output)
G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))
all_nodes[node.output[0]] = (params, 0, ProvisionalDim(pout_shape))
return params
@classmethod
def version_1(cls, node: TFLiteNode, **kwargs):
return cls._common(node, **kwargs)
|
[
"[email protected]"
] | |
aa65c39966c5c3996b0e4ccc5cd54969b654a3b4
|
b3d552675b36cb88a1388fcfc531e497ad7cbee9
|
/day8/form_validater/cms/models.py
|
bd100b3fda14c14a849c139d0223a34e3b01935a
|
[] |
no_license
|
gaohj/1902_django
|
3cea1f0935fd983f25c6fd832b103ac5165a2e30
|
822af7b42120c6edc699bf97c800887ff84f5621
|
refs/heads/master
| 2022-12-11T10:02:50.233398 | 2019-11-26T08:33:38 | 2019-11-26T08:33:38 | 209,241,390 | 2 | 0 | null | 2022-12-08T07:28:24 | 2019-09-18T07:05:48 |
Python
|
UTF-8
|
Python
| false | false | 179 |
py
|
from django.db import models
# Create your models here.
class User(models.Model):
username = models.CharField(max_length=100)
telephone = models.CharField(max_length=100)
|
[
"[email protected]"
] | |
582cd839c799c316ee8471223a0c1ea010ab379b
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/artificial/transf_Quantization/trend_LinearTrend/cycle_0/ar_12/test_artificial_1024_Quantization_LinearTrend_0_12_0.py
|
77be5894f2be385b71feb3c48605ac0b18920a78
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 |
BSD-3-Clause
| 2023-03-08T21:45:40 | 2016-10-13T09:30:30 |
Python
|
UTF-8
|
Python
| false | false | 270 |
py
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 0, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12);
|
[
"[email protected]"
] | |
bf494aea278afcbbd81af424e97a4bce171d18d2
|
d87d312ff0d8e200605d35932abdfa282c803110
|
/qswatplus/PIL/ImageDraw.py
|
6a70def3b20f79f40de5fd8c9ccac0fea86eb39c
|
[
"GPL-2.0-or-later",
"MIT"
] |
permissive
|
celray/swatplus-automatic-workflow
|
70848960f8ff6b109899ced145ee938d2adcfd19
|
ddb3de70708687ca3167ec4b72ac432426175f45
|
refs/heads/master
| 2022-03-16T17:23:15.815336 | 2022-03-09T19:08:35 | 2022-03-09T19:08:35 | 248,785,912 | 11 | 5 |
MIT
| 2020-10-16T11:11:56 | 2020-03-20T15:12:48 |
Python
|
UTF-8
|
Python
| false | false | 16,527 |
py
|
#
# The Python Imaging Library
# $Id$
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import math
import numbers
from . import Image, ImageColor
from ._util import isStringType
"""
A simple 2D drawing interface for PIL images.
<p>
Application code should use the <b>Draw</b> factory, instead of
directly.
"""
class ImageDraw(object):
def __init__(self, im, mode=None):
"""
Create a drawing instance.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
im.load()
if im.readonly:
im._copy() # make it writeable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
def getfont(self):
"""
Get the current default font.
:returns: An image font."""
if not self.font:
# FIXME: should add a font repository
from . import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
def arc(self, xy, start, end, fill=None, width=0):
"""Draw an arc."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink, width)
def bitmap(self, xy, bitmap, fill=None):
"""Draw a bitmap."""
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
def chord(self, xy, start, end, fill=None, outline=None, width=0):
"""Draw a chord."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_chord(xy, start, end, ink, 0, width)
def ellipse(self, xy, fill=None, outline=None, width=0):
"""Draw an ellipse."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_ellipse(xy, ink, 0, width)
def line(self, xy, fill=None, width=0, joint=None):
"""Draw a line, or a connected sequence of line segments."""
ink = self._getink(fill)[0]
if ink is not None:
self.draw.draw_lines(xy, ink, width)
if joint == "curve" and width > 4:
for i in range(1, len(xy)-1):
point = xy[i]
angles = [
math.degrees(math.atan2(
end[0] - start[0], start[1] - end[1]
)) % 360
for start, end in ((xy[i-1], point), (point, xy[i+1]))
]
if angles[0] == angles[1]:
# This is a straight line, so no joint is required
continue
def coord_at_angle(coord, angle):
x, y = coord
angle -= 90
distance = width/2 - 1
return tuple([
p +
(math.floor(p_d) if p_d > 0 else math.ceil(p_d))
for p, p_d in
((x, distance * math.cos(math.radians(angle))),
(y, distance * math.sin(math.radians(angle))))
])
flipped = ((angles[1] > angles[0] and
angles[1] - 180 > angles[0]) or
(angles[1] < angles[0] and
angles[1] + 180 > angles[0]))
coords = [
(point[0] - width/2 + 1, point[1] - width/2 + 1),
(point[0] + width/2 - 1, point[1] + width/2 - 1)
]
if flipped:
start, end = (angles[1] + 90, angles[0] + 90)
else:
start, end = (angles[0] - 90, angles[1] - 90)
self.pieslice(coords, start - 90, end - 90, fill)
if width > 8:
# Cover potential gaps between the line and the joint
if flipped:
gapCoords = [
coord_at_angle(point, angles[0]+90),
point,
coord_at_angle(point, angles[1]+90)
]
else:
gapCoords = [
coord_at_angle(point, angles[0]-90),
point,
coord_at_angle(point, angles[1]-90)
]
self.line(gapCoords, fill, width=3)
def shape(self, shape, fill=None, outline=None):
"""(Experimental) Draw a shape."""
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_outline(shape, ink, 0)
def pieslice(self, xy, start, end, fill=None, outline=None, width=0):
"""Draw a pieslice."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_pieslice(xy, start, end, ink, 0, width)
def point(self, xy, fill=None):
"""Draw one or more individual pixels."""
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
def polygon(self, xy, fill=None, outline=None):
"""Draw a polygon."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_polygon(xy, ink, 0)
def rectangle(self, xy, fill=None, outline=None, width=0):
"""Draw a rectangle."""
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None and ink != fill:
self.draw.draw_rectangle(xy, ink, 0, width)
def _multiline_check(self, text):
"""Draw text."""
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def text(self, xy, text, fill=None, font=None, anchor=None,
*args, **kwargs):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor,
*args, **kwargs)
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode,
*args, **kwargs)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode, *args, **kwargs)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
def multiline_text(self, xy, text, fill=None, font=None, anchor=None,
spacing=4, align="left", direction=None, features=None):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
raise ValueError('align must be "left", "center" or "right"')
self.text((left, top), line, fill, font, anchor,
direction=direction, features=features)
top += line_spacing
left = xy[0]
def textsize(self, text, font=None, spacing=4, direction=None,
features=None):
"""Get the size of a given string, in pixels."""
if self._multiline_check(text):
return self.multiline_textsize(text, font, spacing,
direction, features)
if font is None:
font = self.getfont()
return font.getsize(text, direction, features)
def multiline_textsize(self, text, font=None, spacing=4, direction=None,
features=None):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font, spacing,
direction, features)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing - spacing
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except AttributeError:
Outline = None
def getdraw(im=None, hints=None):
"""
(Experimental) A more advanced 2D drawing interface for PIL images,
based on the WCK interface.
:param im: The image to draw in.
:param hints: An optional list of hints.
:returns: A (drawing context, drawing resource factory) tuple.
"""
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
from . import _imagingagg as handler
except ImportError:
pass
if handler is None:
from . import ImageDraw2 as handler
if im:
im = handler.Draw(im)
return im, handler
def floodfill(image, xy, value, border=None, thresh=0):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of non-
homogeneous, but similar, colors.
"""
# based on an implementation by Eric S. Raymond
# amended by yo1995 @20180806
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if _color_diff(value, background) <= thresh:
return # seed point already has fill color
pixel[x, y] = value
except (ValueError, IndexError):
return # seed point outside image
edge = {(x, y)}
full_edge = set() # use a set to keep record of current and previous edge pixels to reduce memory consumption
while edge:
new_edge = set()
for (x, y) in edge: # 4 adjacent method
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
if (s, t) in full_edge:
continue # if already processed, skip
try:
p = pixel[s, t]
except (ValueError, IndexError):
pass
else:
full_edge.add((s, t))
if border is None:
fill = _color_diff(p, background) <= thresh
else:
fill = p != value and p != border
if fill:
pixel[s, t] = value
new_edge.add((s, t))
full_edge = edge # discard pixels processed
edge = new_edge
def _color_diff(color1, color2):
"""
Uses 1-norm distance to calculate difference between two values.
"""
if isinstance(color2, tuple):
return sum([abs(color1[i]-color2[i]) for i in range(0, len(color2))])
else:
return abs(color1-color2)
|
[
"[email protected]"
] | |
353cc38bd9be00b7415a5b43c6474ad6cede0ac4
|
6e1549257568a0ca81b3fc5864e2e1fa65171b06
|
/salarydk/models/inline_response20086.py
|
5ba1f813cf50856872be7f174fd9b1ec2d9076ca
|
[] |
no_license
|
tdwizard/salarydk
|
19d3453de8fbdd886a0189dbf232f98de971e18a
|
dcf5040101b3e576f1068ea104148651e5c66511
|
refs/heads/master
| 2023-08-05T05:40:09.561288 | 2021-09-24T09:41:43 | 2021-09-24T09:41:43 | 409,910,180 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,924 |
py
|
# coding: utf-8
"""
Salary.dk API
This is the public API for Salary.dk. # General Our API is a JSON-based, REST-like API. Our webapp uses the exact same API, so everything you can do in our webapp, you can do through our API. However, we are slowly opening up the API, so not all endpoints are documented here yet. Only the endpoints documented here are stable. If there is some functionality you would like to access through our API, please contact us. The API is located at https://api.salary.dk. All requests must use TLS. In order to use the API on behalf of other users than yourself, you need to register as an API client. You do this by sending an e-mail to [email protected] with the name and purpose of your client. API-keys for each account can be obtained once logged in to Salary, under the settings for the Company. All endpoints are documented to be able to return the 500 error code. We strive to not return this error code, so if you do encounter this error code, it might mean there is an error on our side. In this case, do not hesitate to contact us. # Versioning, upgrade and deprecation policy Our API might change over time. In order to ensure a stable API, we follow these rules when changing the API. New fields might be added at any time to any response or as non-required parameters to any input. When adding input fields, we ensure the default behaviour when not supplying the field is the same as the previous version. In these cases, the version of an endpoint is not increased, since it is backwards compatible. Since we might add new fields to responses, be sure to use a JSON parser in your implementation. This ensures that any extra fields added are ignored by your implementation. We might add entirely new endpoints at any time. If we need to change an existing endpoint without being able to make it backwards compatible, we will add a new version of the endpoint, and mark the old as deprecated but still functional. We will then contact any users of the deprecated endpoint and ensure an upgrade is performed. Once all consumers have moved to the new endpoint version, the old one will be removed. We will not at any point change the meaning of any existing field, nor will we remove any field or endpoint without following the above deprecated procedure. However, we might add new types to existing enums at any time. # Cross-Origin Resource Sharing This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/) - and that allows cross-domain communication from the browser. All responses have a wildcard same-origin which makes them completely public and accessible to everyone, including any code on any site, as long as the proper access token is passed. # Authentication All request require an access token. There are two ways to obtain an access token: * Logging in as a user. (this endpoint is not yet publicly available). * Using an API-key: [endpoint](#operation/APIClientLogin) Using one of these methods, you will obtain an access token. In all subsequest requests, this access token should be passed in the Authorization header. The access token is valid for around one hour, after which a new token should be obtained. You do not need to dispose of access tokens once created. They have a limited lifetime, and Salary.dk will automatically expire old ones. For some endpoints, the authorizedUserQuery security definition is used. This allows for passing the access token as a query parameter where it is not possible to pass it as a header. In particular, this is used for downloading files. <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from salarydk.configuration import Configuration
class InlineResponse20086(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'data': 'InlineResponse20086Data'
}
attribute_map = {
'data': 'data'
}
def __init__(self, data=None, local_vars_configuration=None): # noqa: E501
"""InlineResponse20086 - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._data = None
self.discriminator = None
if data is not None:
self.data = data
@property
def data(self):
"""Gets the data of this InlineResponse20086. # noqa: E501
:return: The data of this InlineResponse20086. # noqa: E501
:rtype: InlineResponse20086Data
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this InlineResponse20086.
:param data: The data of this InlineResponse20086. # noqa: E501
:type: InlineResponse20086Data
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, InlineResponse20086):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, InlineResponse20086):
return True
return self.to_dict() != other.to_dict()
|
[
"[email protected]"
] | |
598ac98ebf7a8341d74571338f3b6e48b6e28ed2
|
708074835900ae623239ce3c0d1e6f948b799fd0
|
/ftp-2/ftp_client/ftp_client.py
|
0656e0224631f01bd97e806116aa4fd738f037a1
|
[] |
no_license
|
hukeyy/learn_python
|
66688bcbaa43d79775030d2876979bbda08892ef
|
c71a37da88b089316536587ed47d32405bd987a3
|
refs/heads/master
| 2020-03-21T11:07:24.049328 | 2018-12-25T11:59:17 | 2018-12-25T11:59:17 | 138,490,613 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: hkey
import os, sys
import socket
class MyClient:
def __init__(self, ip_port):
self.client = socket.socket()
self.ip_port = ip_port
def connect(self):
self.client.connect(self.ip_port)
def start(self):
self.connect()
while True:
print('注册(register)\n登录(login)')
auth_type = input('>>>').strip()
if auth_type == 'register' or auth_type == 'login':
user = input('用户名:')
pwd = input('密码:')
auth_info = '%s:%s:%s' %(auth_type, user, pwd)
self.client.sendall(auth_info.encode())
status_code = self.client.recv(1024)
else:
print('\033[31;1m输入错误,请重新输入.\033[0m')
|
[
"[email protected]"
] | |
4bcc563980a7cf202366282871a65a1d905a2c74
|
143e7c46515697d009bdb0bb4825db18942db002
|
/movies/forms.py
|
a59290839cee1ac462126f093ba2d4be41f147bc
|
[] |
no_license
|
yoonwoo123/project_08
|
e171aec3729b5d2686b3d10769c2bbd84a2b90ad
|
b978e86696b84c7e505263ad0fa776edb93d0a58
|
refs/heads/master
| 2020-09-01T14:18:29.392806 | 2019-04-04T05:15:16 | 2019-04-04T05:15:16 | 218,978,077 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 458 |
py
|
from django import forms
from .models import Movie
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
class MovieForm(forms.ModelForm):
class Meta:
model = Movie
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_method = 'POST'
self.helper.add_input(Submit('submit', 'Submit'))
|
[
"[email protected]"
] | |
ffdb634592c69b8a2d23ac8f12192049bb9f4287
|
d5dc79925d84486315fb0ed71a0403a27cb5d6af
|
/DDF_experiment/dark_ddf.py
|
09266d09ec5714e5e4ae827ebfc8ee579b124e60
|
[] |
no_license
|
lsst-sims/sims_featureScheduler_runs1.4
|
2e5b3380f29256c6bbc164787cb41d060456d1d3
|
87684c9c3156f3ebd18cc06c8512ee92ef1b16a0
|
refs/heads/master
| 2020-08-11T09:06:19.901882 | 2020-07-17T20:45:17 | 2020-07-17T20:45:17 | 214,535,040 | 1 | 2 | null | 2019-12-03T20:59:31 | 2019-10-11T21:57:32 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 24,296 |
py
|
import numpy as np
import matplotlib.pylab as plt
import healpy as hp
from lsst.sims.featureScheduler.modelObservatory import Model_observatory
from lsst.sims.featureScheduler.schedulers import Core_scheduler, simple_filter_sched
from lsst.sims.featureScheduler.utils import standard_goals, create_season_offset
import lsst.sims.featureScheduler.basis_functions as bf
from lsst.sims.featureScheduler.surveys import (Greedy_survey,
Blob_survey)
from lsst.sims.featureScheduler import sim_runner
import lsst.sims.featureScheduler.detailers as detailers
import sys
import subprocess
import os
import argparse
from lsst.sims.featureScheduler.surveys import Deep_drilling_survey
import lsst.sims.featureScheduler.basis_functions as basis_functions
def dd_bfs(RA, dec, survey_name, ha_limits, frac_total=0.0185/2., aggressive_frac=0.011/2.):
"""
Convienence function to generate all the feasibility basis functions
"""
sun_alt_limit = -18.
time_needed = 62.
fractions = [0.00, aggressive_frac, frac_total]
bfs = []
bfs.append(basis_functions.Filter_loaded_basis_function(filternames=['r', 'g', 'i', 'z', 'y']))
bfs.append(basis_functions.Not_twilight_basis_function(sun_alt_limit=sun_alt_limit))
bfs.append(basis_functions.Time_to_twilight_basis_function(time_needed=time_needed))
bfs.append(basis_functions.Hour_Angle_limit_basis_function(RA=RA, ha_limits=ha_limits))
bfs.append(basis_functions.Moon_down_basis_function())
bfs.append(basis_functions.Fraction_of_obs_basis_function(frac_total=frac_total, survey_name=survey_name))
bfs.append(basis_functions.Look_ahead_ddf_basis_function(frac_total, aggressive_frac,
sun_alt_limit=sun_alt_limit, time_needed=time_needed,
RA=RA, survey_name=survey_name,
ha_limits=ha_limits))
bfs.append(basis_functions.Soft_delay_basis_function(fractions=fractions, delays=[0., 0.5, 1.5],
survey_name=survey_name))
return bfs
def dd_u_bfs(RA, dec, survey_name, ha_limits, frac_total=0.0019/2., aggressive_frac=0.0014/2.):
"""Convienence function to generate all the feasibility basis functions for u-band DDFs
"""
bfs = []
sun_alt_limit = -18.
time_needed = 6.
fractions = [0.00, aggressive_frac, frac_total]
bfs.append(basis_functions.Filter_loaded_basis_function(filternames='u'))
bfs.append(basis_functions.Not_twilight_basis_function(sun_alt_limit=sun_alt_limit))
bfs.append(basis_functions.Time_to_twilight_basis_function(time_needed=time_needed))
bfs.append(basis_functions.Hour_Angle_limit_basis_function(RA=RA, ha_limits=ha_limits))
bfs.append(basis_functions.Moon_down_basis_function())
bfs.append(basis_functions.Fraction_of_obs_basis_function(frac_total=frac_total, survey_name=survey_name))
bfs.append(basis_functions.Look_ahead_ddf_basis_function(frac_total, aggressive_frac,
sun_alt_limit=sun_alt_limit, time_needed=time_needed,
RA=RA, survey_name=survey_name,
ha_limits=ha_limits))
bfs.append(basis_functions.Soft_delay_basis_function(fractions=fractions, delays=[0., 0.2, 0.5],
survey_name=survey_name))
return bfs
def generate_dd_surveys(nside=None, nexp=2, detailers=None, reward_value=100):
"""Utility to return a list of standard deep drilling field surveys.
XXX-Someone double check that I got the coordinates right!
"""
surveys = []
# ELAIS S1
RA = 9.45
dec = -44.
survey_name = 'DD:ELAISS1'
ha_limits = ([0., 1.5], [21.5, 24.])
bfs = dd_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',
nvis=[20, 10, 20, 26, 20],
survey_name=survey_name, reward_value=reward_value,
nside=nside, nexp=nexp, detailers=detailers))
survey_name = 'DD:u,ELAISS1'
bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',
nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
# XMM-LSS
survey_name = 'DD:XMM-LSS'
RA = 35.708333
dec = -4-45/60.
ha_limits = ([0., 1.5], [21.5, 24.])
bfs = dd_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',
nvis=[20, 10, 20, 26, 20], survey_name=survey_name, reward_value=reward_value,
nside=nside, nexp=nexp, detailers=detailers))
survey_name = 'DD:u,XMM-LSS'
bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',
nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
# Extended Chandra Deep Field South
RA = 53.125
dec = -28.-6/60.
survey_name = 'DD:ECDFS'
ha_limits = [[0.5, 3.0], [20., 22.5]]
bfs = dd_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',
nvis=[20, 10, 20, 26, 20],
survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
survey_name = 'DD:u,ECDFS'
bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',
nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
# COSMOS
RA = 150.1
dec = 2.+10./60.+55/3600.
survey_name = 'DD:COSMOS'
ha_limits = ([0., 2.5], [21.5, 24.])
bfs = dd_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',
nvis=[20, 10, 20, 26, 20],
survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
survey_name = 'DD:u,COSMOS'
bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u',
nvis=[8], survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
# Euclid Fields
survey_name = 'DD:EDFS1'
RA = 58.97
dec = -49.28
ha_limits = ([0., 1.5], [21.5, 24.])
bfs = dd_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',
nvis=[5, 7, 19, 24, 5],
survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
survey_name = 'DD:u,EDFS1'
bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8],
survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
survey_name = 'DD:EDFS2'
RA = 63.6
dec = -47.60
ha_limits = ([0., 1.5], [21.5, 24.])
bfs = dd_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='rgizy',
nvis=[5, 7, 19, 24, 5],
survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
survey_name = 'DD:u,EDFS2'
bfs = dd_u_bfs(RA, dec, survey_name, ha_limits)
surveys.append(Deep_drilling_survey(bfs, RA, dec, sequence='u', nvis=[8],
survey_name=survey_name, reward_value=reward_value, nside=nside,
nexp=nexp, detailers=detailers))
return surveys
def gen_greedy_surveys(nside=32, nexp=1, exptime=30., filters=['r', 'i', 'z', 'y'],
camera_rot_limits=[-80., 80.],
shadow_minutes=60., max_alt=76., moon_distance=30., ignore_obs='DD',
m5_weight=3., footprint_weight=0.3, slewtime_weight=3.,
stayfilter_weight=3.):
"""
Make a quick set of greedy surveys
This is a convienence function to generate a list of survey objects that can be used with
lsst.sims.featureScheduler.schedulers.Core_scheduler.
To ensure we are robust against changes in the sims_featureScheduler codebase, all kwargs are
explicitly set.
Parameters
----------
nside : int (32)
The HEALpix nside to use
nexp : int (1)
The number of exposures to use in a visit.
exptime : float (30.)
The exposure time to use per visit (seconds)
filters : list of str (['r', 'i', 'z', 'y'])
Which filters to generate surveys for.
camera_rot_limits : list of float ([-80., 80.])
The limits to impose when rotationally dithering the camera (degrees).
shadow_minutes : float (60.)
Used to mask regions around zenith (minutes)
max_alt : float (76.
The maximium altitude to use when masking zenith (degrees)
moon_distance : float (30.)
The mask radius to apply around the moon (degrees)
ignore_obs : str or list of str ('DD')
Ignore observations by surveys that include the given substring(s).
m5_weight : float (3.)
The weight for the 5-sigma depth difference basis function
footprint_weight : float (0.3)
The weight on the survey footprint basis function.
slewtime_weight : float (3.)
The weight on the slewtime basis function
stayfilter_weight : float (3.)
The weight on basis function that tries to stay avoid filter changes.
"""
# Define the extra parameters that are used in the greedy survey. I
# think these are fairly set, so no need to promote to utility func kwargs
greed_survey_params = {'block_size': 1, 'smoothing_kernel': None,
'seed': 42, 'camera': 'LSST', 'dither': True,
'survey_name': 'greedy'}
footprints = standard_goals(nside=nside)
sum_footprints = 0
for key in footprints:
sum_footprints += np.sum(footprints[key])
surveys = []
detailer = detailers.Camera_rot_detailer(min_rot=np.min(camera_rot_limits), max_rot=np.max(camera_rot_limits))
for filtername in filters:
bfs = []
bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight))
bfs.append((bf.Footprint_basis_function(filtername=filtername,
footprint=footprints[filtername],
out_of_bounds_val=np.nan, nside=nside,
all_footprints_sum=sum_footprints), footprint_weight))
bfs.append((bf.Slewtime_basis_function(filtername=filtername, nside=nside), slewtime_weight))
bfs.append((bf.Strict_filter_basis_function(filtername=filtername), stayfilter_weight))
# Masks, give these 0 weight
bfs.append((bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=shadow_minutes,
max_alt=max_alt), 0))
bfs.append((bf.Moon_avoidance_basis_function(nside=nside, moon_distance=moon_distance), 0))
bfs.append((bf.Filter_loaded_basis_function(filternames=filtername), 0))
bfs.append((bf.Planet_mask_basis_function(nside=nside), 0))
weights = [val[1] for val in bfs]
basis_functions = [val[0] for val in bfs]
surveys.append(Greedy_survey(basis_functions, weights, exptime=exptime, filtername=filtername,
nside=nside, ignore_obs=ignore_obs, nexp=nexp,
detailers=[detailer], **greed_survey_params))
return surveys
def generate_blobs(nside, nexp=1, exptime=30., filter1s=['u', 'u', 'u', 'g', 'r', 'i', 'z', 'y'],
filter2s=['u', 'g', 'r', 'r', 'i', 'z', 'y', 'y'], pair_time=22.,
camera_rot_limits=[-80., 80.], n_obs_template=3,
season=300., season_start_hour=-4., season_end_hour=2.,
shadow_minutes=60., max_alt=76., moon_distance=30., ignore_obs='DD',
m5_weight=6., footprint_weight=0.6, slewtime_weight=3.,
stayfilter_weight=3., template_weight=12.):
"""
Generate surveys that take observations in blobs.
Parameters
----------
nside : int (32)
The HEALpix nside to use
nexp : int (1)
The number of exposures to use in a visit.
exptime : float (30.)
The exposure time to use per visit (seconds)
filter1s : list of str
The filternames for the first set
filter2s : list of str
The filter names for the second in the pair (None if unpaired)
pair_time : float (22)
The ideal time between pairs (minutes)
camera_rot_limits : list of float ([-80., 80.])
The limits to impose when rotationally dithering the camera (degrees).
n_obs_template : int (3)
The number of observations to take every season in each filter
season : float (300)
The length of season (i.e., how long before templates expire) (days)
season_start_hour : float (-4.)
For weighting how strongly a template image needs to be observed (hours)
sesason_end_hour : float (2.)
For weighting how strongly a template image needs to be observed (hours)
shadow_minutes : float (60.)
Used to mask regions around zenith (minutes)
max_alt : float (76.
The maximium altitude to use when masking zenith (degrees)
moon_distance : float (30.)
The mask radius to apply around the moon (degrees)
ignore_obs : str or list of str ('DD')
Ignore observations by surveys that include the given substring(s).
m5_weight : float (3.)
The weight for the 5-sigma depth difference basis function
footprint_weight : float (0.3)
The weight on the survey footprint basis function.
slewtime_weight : float (3.)
The weight on the slewtime basis function
stayfilter_weight : float (3.)
The weight on basis function that tries to stay avoid filter changes.
template_weight : float (12.)
The weight to place on getting image templates every season
"""
blob_survey_params = {'slew_approx': 7.5, 'filter_change_approx': 140.,
'read_approx': 2., 'min_pair_time': 15., 'search_radius': 30.,
'alt_max': 85., 'az_range': 90., 'flush_time': 30.,
'smoothing_kernel': None, 'nside': nside, 'seed': 42, 'dither': True,
'twilight_scale': True}
footprints = standard_goals(nside=nside)
sum_footprints = 0
for key in footprints:
sum_footprints += np.sum(footprints[key])
surveys = []
times_needed = [pair_time, pair_time*2]
for filtername, filtername2 in zip(filter1s, filter2s):
detailer_list = []
detailer_list.append(detailers.Camera_rot_detailer(min_rot=np.min(camera_rot_limits),
max_rot=np.max(camera_rot_limits)))
detailer_list.append(detailers.Close_alt_detailer())
# List to hold tuples of (basis_function_object, weight)
bfs = []
if filtername2 is not None:
bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight/2.))
bfs.append((bf.M5_diff_basis_function(filtername=filtername2, nside=nside), m5_weight/2.))
else:
bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight))
if filtername2 is not None:
bfs.append((bf.Footprint_basis_function(filtername=filtername,
footprint=footprints[filtername],
out_of_bounds_val=np.nan, nside=nside,
all_footprints_sum=sum_footprints), footprint_weight/2.))
bfs.append((bf.Footprint_basis_function(filtername=filtername2,
footprint=footprints[filtername2],
out_of_bounds_val=np.nan, nside=nside,
all_footprints_sum=sum_footprints), footprint_weight/2.))
else:
bfs.append((bf.Footprint_basis_function(filtername=filtername,
footprint=footprints[filtername],
out_of_bounds_val=np.nan, nside=nside,
all_footprints_sum=sum_footprints), footprint_weight))
bfs.append((bf.Slewtime_basis_function(filtername=filtername, nside=nside), slewtime_weight))
bfs.append((bf.Strict_filter_basis_function(filtername=filtername), stayfilter_weight))
if filtername2 is not None:
bfs.append((bf.N_obs_per_year_basis_function(filtername=filtername, nside=nside,
footprint=footprints[filtername],
n_obs=n_obs_template, season=season,
season_start_hour=season_start_hour,
season_end_hour=season_end_hour), template_weight/2.))
bfs.append((bf.N_obs_per_year_basis_function(filtername=filtername2, nside=nside,
footprint=footprints[filtername2],
n_obs=n_obs_template, season=season,
season_start_hour=season_start_hour,
season_end_hour=season_end_hour), template_weight/2.))
else:
bfs.append((bf.N_obs_per_year_basis_function(filtername=filtername, nside=nside,
footprint=footprints[filtername],
n_obs=n_obs_template, season=season,
season_start_hour=season_start_hour,
season_end_hour=season_end_hour), template_weight))
# Masks, give these 0 weight
bfs.append((bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=shadow_minutes, max_alt=max_alt,
penalty=np.nan, site='LSST'), 0.))
bfs.append((bf.Moon_avoidance_basis_function(nside=nside, moon_distance=moon_distance), 0.))
filternames = [fn for fn in [filtername, filtername2] if fn is not None]
bfs.append((bf.Filter_loaded_basis_function(filternames=filternames), 0))
if filtername2 is None:
time_needed = times_needed[0]
else:
time_needed = times_needed[1]
bfs.append((bf.Time_to_twilight_basis_function(time_needed=time_needed), 0.))
bfs.append((bf.Not_twilight_basis_function(), 0.))
bfs.append((bf.Planet_mask_basis_function(nside=nside), 0.))
# unpack the basis functions and weights
weights = [val[1] for val in bfs]
basis_functions = [val[0] for val in bfs]
if filtername2 is None:
survey_name = 'blob, %s' % filtername
else:
survey_name = 'blob, %s%s' % (filtername, filtername2)
if filtername2 is not None:
detailer_list.append(detailers.Take_as_pairs_detailer(filtername=filtername2))
surveys.append(Blob_survey(basis_functions, weights, filtername1=filtername, filtername2=filtername2,
exptime=exptime,
ideal_pair_time=pair_time,
survey_note=survey_name, ignore_obs=ignore_obs,
nexp=nexp, detailers=detailer_list, **blob_survey_params))
return surveys
def run_sched(surveys, survey_length=365.25, nside=32, fileroot='baseline_', verbose=False,
extra_info=None, illum_limit=15.):
years = np.round(survey_length/365.25)
scheduler = Core_scheduler(surveys, nside=nside)
n_visit_limit = None
filter_sched = simple_filter_sched(illum_limit=illum_limit)
observatory = Model_observatory(nside=nside)
observatory, scheduler, observations = sim_runner(observatory, scheduler,
survey_length=survey_length,
filename=fileroot+'%iyrs.db' % years,
delete_past=True, n_visit_limit=n_visit_limit,
verbose=verbose, extra_info=extra_info,
filter_scheduler=filter_sched)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--verbose", dest='verbose', action='store_true')
parser.set_defaults(verbose=False)
parser.add_argument("--survey_length", type=float, default=365.25*10)
parser.add_argument("--outDir", type=str, default="")
parser.add_argument("--maxDither", type=float, default=0.7, help="Dither size for DDFs (deg)")
parser.add_argument("--moon_illum_limit", type=float, default=15., help="illumination limit to remove u-band")
args = parser.parse_args()
survey_length = args.survey_length # Days
outDir = args.outDir
verbose = args.verbose
max_dither = args.maxDither
illum_limit = args.moon_illum_limit
nside = 32
per_night = True # Dither DDF per night
nexp = 1 # All observations
mixed_pairs = True # For the blob scheduler
camera_ddf_rot_limit = 75.
extra_info = {}
exec_command = ''
for arg in sys.argv:
exec_command += ' ' + arg
extra_info['exec command'] = exec_command
try:
extra_info['git hash'] = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
except subprocess.CalledProcessError:
extra_info['git hash'] = 'Not in git repo'
extra_info['file executed'] = os.path.realpath(__file__)
fileroot = 'darkddf_'
file_end = 'v1.4_'
# Set up the DDF surveys to dither
dither_detailer = detailers.Dither_detailer(per_night=per_night, max_dither=max_dither)
details = [detailers.Camera_rot_detailer(min_rot=-camera_ddf_rot_limit, max_rot=camera_ddf_rot_limit), dither_detailer]
ddfs = generate_dd_surveys(nside=nside, nexp=nexp, detailers=details)
greedy = gen_greedy_surveys(nside, nexp=nexp)
blobs = generate_blobs(nside, nexp=nexp)
surveys = [ddfs, blobs, greedy]
run_sched(surveys, survey_length=survey_length, verbose=verbose,
fileroot=os.path.join(outDir, fileroot+file_end), extra_info=extra_info,
nside=nside, illum_limit=illum_limit)
|
[
"[email protected]"
] | |
9d503b9b1c3674a28181a2570002021ac4478d1d
|
210ecd63113ce90c5f09bc2b09db3e80ff98117a
|
/AbletonLive9_RemoteScripts/Axiom_49_61_Classic/Axiom.py
|
0ab7f4b3308a768b95753f436555c09b9f96c849
|
[] |
no_license
|
ajasver/MidiScripts
|
86a765b8568657633305541c46ccc1fd1ea34501
|
f727a2e63c95a9c5e980a0738deb0049363ba536
|
refs/heads/master
| 2021-01-13T02:03:55.078132 | 2015-07-16T18:27:30 | 2015-07-16T18:27:30 | 38,516,112 | 4 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,330 |
py
|
#Embedded file name: /Users/versonator/Jenkins/live/Binary/Core_Release_64_static/midi-remote-scripts/Axiom_49_61_Classic/Axiom.py
from _Axiom.consts import *
from _Axiom.Transport import Transport
from _Axiom.Pads import Pads
from _Axiom.Encoders import Encoders
from SliderSection import SliderSection
import Live
import MidiRemoteScript
class Axiom:
""" A controller script for the M-Audio Axiom Keyboard/Controller series """
def __init__(self, c_instance):
self.__c_instance = c_instance
self.__current_track = self.song().view.selected_track
self.__current_device = self.__current_track.view.selected_device
self.song().add_visible_tracks_listener(self.__tracks_changed)
self.__transport_unit = Transport(self)
self.__encoder_unit = Encoders(self, True)
self.__slider_unit = SliderSection(self)
self.__pad_unit = Pads(self)
def application(self):
"""returns a reference to the application that we are running in
"""
return Live.Application.get_application()
def song(self):
"""returns a reference to the Live song instance that we do control
"""
return self.__c_instance.song()
def disconnect(self):
"""Live -> Script
Called right before we get disconnected from Live.
"""
self.song().remove_visible_tracks_listener(self.__tracks_changed)
self.__encoder_unit.disconnect()
def can_lock_to_devices(self):
return True
def suggest_input_port(self):
"""Live -> Script
Live can ask the script for an input port name to find a suitable one.
"""
return str('USB Axiom')
def suggest_output_port(self):
"""Live -> Script
Live can ask the script for an output port name to find a suitable one.
"""
return str('USB Axiom')
def suggest_map_mode(self, cc_no, channel):
"""Live -> Script
Live can ask the script for a suitable mapping mode for a given CC.
"""
suggested_map_mode = Live.MidiMap.MapMode.absolute
if cc_no in AXIOM_ENCODERS:
suggested_map_mode = Live.MidiMap.MapMode.relative_smooth_binary_offset
return suggested_map_mode
def show_message(self, message):
self.__c_instance.show_message(message)
def supports_pad_translation(self):
return True
def connect_script_instances(self, instanciated_scripts):
"""Called by the Application as soon as all scripts are initialized.
You can connect yourself to other running scripts here, as we do it
connect the extension modules (MackieControlXTs).
"""
pass
def request_rebuild_midi_map(self):
"""Script -> Live
When the internal MIDI controller has changed in a way that you need to rebuild
the MIDI mappings, request a rebuild by calling this function
This is processed as a request, to be sure that its not too often called, because
its time-critical.
"""
self.__c_instance.request_rebuild_midi_map()
def send_midi(self, midi_event_bytes):
"""Script -> Live
Use this function to send MIDI events through Live to the _real_ MIDI devices
that this script is assigned to.
"""
self.__c_instance.send_midi(midi_event_bytes)
def refresh_state(self):
"""Live -> Script
Send out MIDI to completely update the attached MIDI controller.
Will be called when requested by the user, after for example having reconnected
the MIDI cables...
"""
pass
def build_midi_map(self, midi_map_handle):
"""Live -> Script
Build DeviceParameter Mappings, that are processed in Audio time, or
forward MIDI messages explicitly to our receive_midi_functions.
Which means that when you are not forwarding MIDI, nor mapping parameters, you will
never get any MIDI messages at all.
"""
script_handle = self.__c_instance.handle()
self.__transport_unit.build_midi_map(script_handle, midi_map_handle)
self.__encoder_unit.build_midi_map(script_handle, midi_map_handle)
self.__slider_unit.build_midi_map(script_handle, midi_map_handle)
self.__pad_unit.build_midi_map(script_handle, midi_map_handle)
self.__c_instance.set_pad_translation(PAD_TRANSLATION)
def update_display(self):
"""Live -> Script
Aka on_timer. Called every 100 ms and should be used to update display relevant
parts of the controller
"""
if self.__transport_unit:
self.__transport_unit.refresh_state()
def receive_midi(self, midi_bytes):
"""Live -> Script
MIDI messages are only received through this function, when explicitly
forwarded in 'build_midi_map'.
"""
if midi_bytes[0] & 240 == CC_STATUS:
channel = midi_bytes[0] & 15
cc_no = midi_bytes[1]
cc_value = midi_bytes[2]
if list(AXIOM_TRANSPORT).count(cc_no) > 0:
self.__transport_unit.receive_midi_cc(cc_no, cc_value)
elif list(AXIOM_BUTTONS).count(cc_no) > 0:
self.__slider_unit.receive_midi_cc(cc_no, cc_value, channel)
elif list(AXIOM_ENCODERS).count(cc_no) > 0:
self.__encoder_unit.receive_midi_cc(cc_no, cc_value, channel)
elif list(AXIOM_PADS).count(cc_no) > 0:
self.__pad_unit.receive_midi_cc(cc_no, cc_value, channel)
elif midi_bytes[0] == 240:
pass
def lock_to_device(self, device):
self.__encoder_unit.lock_to_device(device)
def unlock_from_device(self, device):
self.__encoder_unit.unlock_from_device(device)
def set_appointed_device(self, device):
self.__encoder_unit.set_appointed_device(device)
def __tracks_changed(self):
self.request_rebuild_midi_map()
def bank_changed(self, new_bank):
if self.__encoder_unit.set_bank(new_bank):
self.request_rebuild_midi_map()
def restore_bank(self, bank):
self.__encoder_unit.restore_bank(bank)
self.request_rebuild_midi_map()
def instance_identifier(self):
return self.__c_instance.instance_identifier()
|
[
"[email protected]"
] | |
5f1b314f8c6a6fa4da53ae06524ff29cf5a0f199
|
ecd9b7e440f002a00d3a8a8e62de208508fadb75
|
/flask/docker_flask_simple/app.py
|
cb6628b3c9dbfca90bb81b8a092b41c7c253c91b
|
[] |
no_license
|
enderst3/challenges
|
ce2248eabc36e64dbd84dffad3fea176108bc3f3
|
0b13195cc53d5fc25f7bedd3dad1c14051e216cc
|
refs/heads/master
| 2023-01-09T00:47:51.129159 | 2020-04-20T22:09:00 | 2020-04-20T22:09:00 | 77,492,197 | 0 | 1 | null | 2023-01-03T22:44:04 | 2016-12-28T01:08:14 | null |
UTF-8
|
Python
| false | false | 180 |
py
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Flask Docerized'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
|
[
"[email protected]"
] | |
e76a7e53420bb1d7554e59aa283fc3ad1b29a39a
|
268568ff2d483f39de78a5b29d941ce499cace33
|
/external-deps/python-language-server/pyls/_utils.py
|
b1a3bd96c8db094dcff95ae71fcefcf25cd50da1
|
[
"MIT",
"Python-2.0"
] |
permissive
|
MarkMoretto/spyder-master
|
61e7f8007144562978da9c6adecaa3022758c56f
|
5f8c64edc0bbd203a97607950b53a9fcec9d2f0b
|
refs/heads/master
| 2023-01-10T16:34:37.825886 | 2020-08-07T19:07:56 | 2020-08-07T19:07:56 | 285,901,914 | 2 | 1 |
MIT
| 2022-12-20T13:46:41 | 2020-08-07T19:03:37 |
Python
|
UTF-8
|
Python
| false | false | 6,670 |
py
|
# Copyright 2017 Palantir Technologies, Inc.
import functools
import inspect
import logging
import os
import sys
import threading
import jedi
PY2 = sys.version_info.major == 2
JEDI_VERSION = jedi.__version__
if PY2:
import pathlib2 as pathlib
else:
import pathlib
log = logging.getLogger(__name__)
def debounce(interval_s, keyed_by=None):
"""Debounce calls to this function until interval_s seconds have passed."""
def wrapper(func):
timers = {}
lock = threading.Lock()
@functools.wraps(func)
def debounced(*args, **kwargs):
call_args = inspect.getcallargs(func, *args, **kwargs)
key = call_args[keyed_by] if keyed_by else None
def run():
with lock:
del timers[key]
return func(*args, **kwargs)
with lock:
old_timer = timers.get(key)
if old_timer:
old_timer.cancel()
timer = threading.Timer(interval_s, run)
timers[key] = timer
timer.start()
return debounced
return wrapper
def find_parents(root, path, names):
"""Find files matching the given names relative to the given path.
Args:
path (str): The file path to start searching up from.
names (List[str]): The file/directory names to look for.
root (str): The directory at which to stop recursing upwards.
Note:
The path MUST be within the root.
"""
if not root:
return []
if not os.path.commonprefix((root, path)):
log.warning("Path %s not in %s", path, root)
return []
# Split the relative by directory, generate all the parent directories, then check each of them.
# This avoids running a loop that has different base-cases for unix/windows
# e.g. /a/b and /a/b/c/d/e.py -> ['/a/b', 'c', 'd']
dirs = [root] + os.path.relpath(os.path.dirname(path), root).split(os.path.sep)
# Search each of /a/b/c, /a/b, /a
while dirs:
search_dir = os.path.join(*dirs)
existing = list(filter(os.path.exists, [os.path.join(search_dir, n) for n in names]))
if existing:
return existing
dirs.pop()
# Otherwise nothing
return []
def match_uri_to_workspace(uri, workspaces):
if uri is None:
return None
max_len, chosen_workspace = -1, None
path = pathlib.Path(uri).parts
for workspace in workspaces:
try:
workspace_parts = pathlib.Path(workspace).parts
except TypeError:
# This can happen in Python2 if 'value' is a subclass of string
workspace_parts = pathlib.Path(unicode(workspace)).parts
if len(workspace_parts) > len(path):
continue
match_len = 0
for workspace_part, path_part in zip(workspace_parts, path):
if workspace_part == path_part:
match_len += 1
if match_len > 0:
if match_len > max_len:
max_len = match_len
chosen_workspace = workspace
return chosen_workspace
def list_to_string(value):
return ",".join(value) if isinstance(value, list) else value
def merge_dicts(dict_a, dict_b):
"""Recursively merge dictionary b into dictionary a.
If override_nones is True, then
"""
def _merge_dicts_(a, b):
for key in set(a.keys()).union(b.keys()):
if key in a and key in b:
if isinstance(a[key], dict) and isinstance(b[key], dict):
yield (key, dict(_merge_dicts_(a[key], b[key])))
elif b[key] is not None:
yield (key, b[key])
else:
yield (key, a[key])
elif key in a:
yield (key, a[key])
elif b[key] is not None:
yield (key, b[key])
return dict(_merge_dicts_(dict_a, dict_b))
def format_docstring(contents):
"""Python doc strings come in a number of formats, but LSP wants markdown.
Until we can find a fast enough way of discovering and parsing each format,
we can do a little better by at least preserving indentation.
"""
contents = contents.replace('\t', u'\u00A0' * 4)
contents = contents.replace(' ', u'\u00A0' * 2)
return contents
def clip_column(column, lines, line_number):
"""
Normalise the position as per the LSP that accepts character positions > line length
https://microsoft.github.io/language-server-protocol/specification#position
"""
max_column = len(lines[line_number].rstrip('\r\n')) if len(lines) > line_number else 0
return min(column, max_column)
def position_to_jedi_linecolumn(document, position):
"""
Convert the LSP format 'line', 'character' to Jedi's 'line', 'column'
https://microsoft.github.io/language-server-protocol/specification#position
"""
code_position = {}
if position:
code_position = {'line': position['line'] + 1,
'column': clip_column(position['character'],
document.lines,
position['line'])}
return code_position
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32
PROCESS_QUERY_INFROMATION = 0x1000
def is_process_alive(pid):
"""Check whether the process with the given pid is still alive.
Running `os.kill()` on Windows always exits the process, so it can't be used to check for an alive process.
see: https://docs.python.org/3/library/os.html?highlight=os%20kill#os.kill
Hence ctypes is used to check for the process directly via windows API avoiding any other 3rd-party dependency.
Args:
pid (int): process ID
Returns:
bool: False if the process is not alive or don't have permission to check, True otherwise.
"""
process = kernel32.OpenProcess(PROCESS_QUERY_INFROMATION, 0, pid)
if process != 0:
kernel32.CloseHandle(process)
return True
return False
else:
import errno
def is_process_alive(pid):
"""Check whether the process with the given pid is still alive.
Args:
pid (int): process ID
Returns:
bool: False if the process is not alive or don't have permission to check, True otherwise.
"""
if pid < 0:
return False
try:
os.kill(pid, 0)
except OSError as e:
return e.errno == errno.EPERM
else:
return True
|
[
"[email protected]"
] | |
205911f940c06e8ae528b745e89315ebdc625763
|
3546dd5dbcffc8509440c820faa7cf28080c5df7
|
/python35/Lib/site-packages/scipy/spatial/_procrustes.py
|
e94af255f13055b789b6289a7e0f6872ad2a0b60
|
[
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LGPL-2.1-only"
] |
permissive
|
Matchoc/python_env
|
55ad609c8270cc6148eda22d37f36709d73b3652
|
859d84d1717a265a4085ad29706b12c19c62d36f
|
refs/heads/master
| 2022-02-13T11:05:51.825544 | 2020-06-05T02:42:08 | 2020-06-05T02:42:08 | 75,793,921 | 0 | 1 |
Apache-2.0
| 2018-12-14T07:30:28 | 2016-12-07T03:06:13 |
Python
|
UTF-8
|
Python
| false | false | 4,498 |
py
|
"""
This module provides functions to perform full Procrustes analysis.
This code was originally written by Justin Kucynski and ported over from
scikit-bio by Yoshiki Vazquez-Baeza.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy.linalg import orthogonal_procrustes
__all__ = ['procrustes']
def procrustes(data1, data2):
r"""Procrustes analysis, a similarity test for two data sets.
Each input matrix is a set of points or vectors (the rows of the matrix).
The dimension of the space is the number of columns of each matrix. Given
two identically sized matrices, procrustes standardizes both such that:
- :math:`tr(AA^{T}) = 1`.
- Both sets of points are centered around the origin.
Procrustes ([1]_, [2]_) then applies the optimal transform to the second
matrix (including scaling/dilation, rotations, and reflections) to minimize
:math:`M^{2}=\sum(data1-data2)^{2}`, or the sum of the squares of the
pointwise differences between the two input datasets.
This function was not designed to handle datasets with different numbers of
datapoints (rows). If two data sets have different dimensionality
(different number of columns), simply add columns of zeros the smaller of
the two.
Parameters
----------
data1 : array_like
Matrix, n rows represent points in k (columns) space `data1` is the
reference data, after it is standardised, the data from `data2` will be
transformed to fit the pattern in `data1` (must have >1 unique points).
data2 : array_like
n rows of data in k space to be fit to `data1`. Must be the same
shape ``(numrows, numcols)`` as data1 (must have >1 unique points).
Returns
-------
mtx1 : array_like
A standardized version of `data1`.
mtx2 : array_like
The orientation of `data2` that best fits `data1`. Centered, but not
necessarily :math:`tr(AA^{T}) = 1`.
disparity : float
:math:`M^{2}` as defined above.
Raises
------
ValueError
If the input arrays are not two-dimensional.
If the shape of the input arrays is different.
If the input arrays have zero columns or zero rows.
See Also
--------
scipy.linalg.orthogonal_procrustes
Notes
-----
- The disparity should not depend on the order of the input matrices, but
the output matrices will, as only the first output matrix is guaranteed
to be scaled such that :math:`tr(AA^{T}) = 1`.
- Duplicate data points are generally ok, duplicating a data point will
increase its effect on the procrustes fit.
- The disparity scales as the number of points per input matrix.
References
----------
.. [1] Krzanowski, W. J. (2000). "Principles of Multivariate analysis".
.. [2] Gower, J. C. (1975). "Generalized procrustes analysis".
Examples
--------
>>> from scipy.spatial import procrustes
The matrix ``b`` is a rotated, shifted, scaled and mirrored version of
``a`` here:
>>> a = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], 'd')
>>> b = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], 'd')
>>> mtx1, mtx2, disparity = procrustes(a, b)
>>> round(disparity)
0.0
"""
mtx1 = np.array(data1, dtype=np.double, copy=True)
mtx2 = np.array(data2, dtype=np.double, copy=True)
if mtx1.ndim != 2 or mtx2.ndim != 2:
raise ValueError("Input matrices must be two-dimensional")
if mtx1.shape != mtx2.shape:
raise ValueError("Input matrices must be of same shape")
if mtx1.size == 0:
raise ValueError("Input matrices must be >0 rows and >0 cols")
# translate all the data to the origin
mtx1 -= np.mean(mtx1, 0)
mtx2 -= np.mean(mtx2, 0)
norm1 = np.linalg.norm(mtx1)
norm2 = np.linalg.norm(mtx2)
if norm1 == 0 or norm2 == 0:
raise ValueError("Input matrices must contain >1 unique points")
# change scaling of data (in rows) such that trace(mtx*mtx') = 1
mtx1 /= norm1
mtx2 /= norm2
# transform mtx2 to minimize disparity
R, s = orthogonal_procrustes(mtx1, mtx2)
mtx2 = np.dot(mtx2, R.T) * s
# measure the dissimilarity between the two datasets
disparity = np.sum(np.square(mtx1 - mtx2))
return mtx1, mtx2, disparity
|
[
"[email protected]"
] | |
99a4eea6eda5b5fb118f8699d09cd04ae35731c3
|
2f898bb332097d11f321186207e94f6d156587f3
|
/audio/generar_lista.py
|
19b54a1a93b7ab07278abcc704016addeba3b756
|
[
"MIT"
] |
permissive
|
miltonsarria/teaching
|
ad2d07e9cfbfcf272c4b2fbef47321eae765a605
|
7a2b4e6c74d9f11562dfe34722e607ca081c1681
|
refs/heads/master
| 2022-01-05T05:58:13.163155 | 2019-05-02T20:45:46 | 2019-05-02T20:45:46 | 102,375,690 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 372 |
py
|
import os
data_dir='/home/sarria/python/audio/audio'
######################
# frequency range to plot
archivo=open('lista.txt','w')
for dirpath, dirnames, filenames in os.walk(data_dir):
for name in filenames:
if name.split('.')[1]=='wav':
fullname = os.path.join(dirpath,name)
archivo.write(fullname+'\n')
archivo.close()
|
[
"[email protected]"
] | |
cdf97c1a0c5c56f2ee41575f2427b61649c0da97
|
043ca446cbee59c1926de7473869ef34748e5b2b
|
/_2016/patreon.py
|
e381afcf994ca2b241b69e2c1b1d7faa5516bb21
|
[
"MIT"
] |
permissive
|
soubam/videos
|
785d04907d6955456797f989893b3f811c7f721e
|
04a00e521808e2b733903bd1c91435a29ba2c678
|
refs/heads/master
| 2023-08-28T15:38:08.284733 | 2021-11-11T23:22:35 | 2021-11-11T23:22:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 20,927 |
py
|
from manim_imports_ext import *
class SideGigToFullTime(Scene):
def construct(self):
morty = Mortimer()
morty.next_to(ORIGIN, DOWN)
self.add(morty)
self.side_project(morty)
self.income(morty)
self.full_time(morty)
def side_project(self, morty):
rect = PictureInPictureFrame()
rect.next_to(morty, UP+LEFT)
side_project = TexText("Side project")
side_project.next_to(rect, UP)
dollar_sign = Tex("\\$")
cross = VGroup(*[
Line(vect, -vect, color = RED)
for vect in (UP+RIGHT, UP+LEFT)
])
cross.set_height(dollar_sign.get_height())
no_money = VGroup(dollar_sign, cross)
no_money.next_to(rect, DOWN)
self.play(
morty.change_mode, "raise_right_hand",
morty.look_at, rect
)
self.play(
Write(side_project),
ShowCreation(rect)
)
self.wait()
self.play(Blink(morty))
self.wait()
self.play(Write(dollar_sign))
self.play(ShowCreation(cross))
self.screen_title = side_project
self.cross = cross
def income(self, morty):
dollar_signs = VGroup(*[
Tex("\\$")
for x in range(10)
])
dollar_signs.arrange(RIGHT, buff = LARGE_BUFF)
dollar_signs.set_color(BLACK)
dollar_signs.next_to(morty.eyes, RIGHT, buff = 2*LARGE_BUFF)
self.play(
morty.change_mode, "happy",
morty.look_at, dollar_signs,
dollar_signs.shift, LEFT,
dollar_signs.set_color, GREEN
)
for x in range(5):
last_sign = dollar_signs[0]
dollar_signs.remove(last_sign)
self.play(
FadeOut(last_sign),
dollar_signs.shift, LEFT
)
random.shuffle(dollar_signs.submobjects)
self.play(
ApplyMethod(
dollar_signs.shift,
(FRAME_Y_RADIUS+1)*DOWN,
lag_ratio = 0.5
),
morty.change_mode, "guilty",
morty.look, DOWN+RIGHT
)
self.play(Blink(morty))
def full_time(self, morty):
new_title = TexText("Full time")
new_title.move_to(self.screen_title)
q_mark = Tex("?")
q_mark.next_to(self.cross)
q_mark.set_color(GREEN)
self.play(morty.look_at, q_mark)
self.play(Transform(self.screen_title, new_title))
self.play(
Transform(self.cross, q_mark),
morty.change_mode, "confused"
)
self.play(Blink(morty))
self.wait()
self.play(
morty.change_mode, "happy",
morty.look, UP+RIGHT
)
self.play(Blink(morty))
self.wait()
class TakesTime(Scene):
def construct(self):
rect = PictureInPictureFrame(height = 4)
rect.to_edge(RIGHT, buff = LARGE_BUFF)
clock = Clock()
clock.hour_hand.set_color(BLUE_C)
clock.minute_hand.set_color(BLUE_D)
clock.next_to(rect, LEFT, buff = LARGE_BUFF)
self.add(rect)
self.play(ShowCreation(clock))
for x in range(3):
self.play(ClockPassesTime(clock))
class GrowingToDoList(Scene):
def construct(self):
morty = Mortimer()
morty.flip()
morty.next_to(ORIGIN, DOWN+LEFT)
title = TexText("3blue1brown to-do list")
title.next_to(ORIGIN, RIGHT)
title.to_edge(UP)
underline = Line(title.get_left(), title.get_right())
underline.next_to(title, DOWN)
lines = VGroup(*list(map(TexText, [
"That one on topology",
"Something with quaternions",
"Solving puzzles with binary counting",
"Tatoos on math",
"Laplace stuffs",
"The role of memorization in math",
"Strangeness of the axiom of choice",
"Tensors",
"Different view of $e^{\\pi i}$",
"Quadratic reciprocity",
"Fourier stuffs",
"$1+2+3+\\cdots = -\\frac{1}{12}$",
"Understanding entropy",
])))
lines.scale(0.65)
lines.arrange(DOWN, buff = MED_SMALL_BUFF, aligned_edge = LEFT)
lines.set_color_by_gradient(BLUE_C, YELLOW)
lines.next_to(title, DOWN, buff = LARGE_BUFF/2.)
lines.to_edge(RIGHT)
self.play(
Write(title),
morty.look_at, title
)
self.play(
Write(lines[0]),
morty.change_mode, "erm",
run_time = 1
)
for line in lines[1:3]:
self.play(
Write(line),
morty.look_at, line,
run_time = 1
)
self.play(
morty.change_mode, "pleading",
morty.look_at, lines,
Write(
VGroup(*lines[3:]),
)
)
class TwoTypesOfVideos(Scene):
def construct(self):
morty = Mortimer().shift(2*DOWN)
stand_alone = TexText("Standalone videos")
stand_alone.shift(FRAME_X_RADIUS*LEFT/2)
stand_alone.to_edge(UP)
series = TexText("Series")
series.shift(FRAME_X_RADIUS*RIGHT/2)
series.to_edge(UP)
box = Rectangle(width = 16, height = 9, color = WHITE)
box.set_height(3)
box.next_to(stand_alone, DOWN)
series_list = VGroup(*[
TexText("Essence of %s"%s)
for s in [
"linear algebra",
"calculus",
"probability",
"real analysis",
"complex analysis",
"ODEs",
]
])
series_list.arrange(DOWN, aligned_edge = LEFT, buff = MED_SMALL_BUFF)
series_list.set_width(FRAME_X_RADIUS-2)
series_list.next_to(series, DOWN, buff = MED_SMALL_BUFF)
series_list.to_edge(RIGHT)
fridays = TexText("Every other friday")
when_done = TexText("When series is done")
for words, vect in (fridays, LEFT), (when_done, RIGHT):
words.set_color(YELLOW)
words.next_to(
morty, vect,
buff = MED_SMALL_BUFF,
aligned_edge = UP
)
unless = TexText("""
Unless you're
a patron \\dots
""")
unless.next_to(when_done, DOWN, buff = MED_SMALL_BUFF)
self.add(morty)
self.play(Blink(morty))
self.play(
morty.change_mode, "raise_right_hand",
morty.look_at, stand_alone,
Write(stand_alone, run_time = 2),
)
self.play(
morty.change_mode, "raise_left_hand",
morty.look_at, series,
Write(series, run_time = 2),
)
self.play(Blink(morty))
self.wait()
self.play(
morty.change_mode, "raise_right_hand",
morty.look_at, box,
ShowCreation(box)
)
for x in range(3):
self.wait(2)
self.play(Blink(morty))
self.play(
morty.change_mode, "raise_left_hand",
morty.look_at, series
)
for i, words in enumerate(series_list):
self.play(Write(words), run_time = 1)
self.play(Blink(morty))
self.wait()
self.play(series_list[1].set_color, BLUE)
self.wait(2)
self.play(Blink(morty))
self.wait()
pairs = [
(fridays, "speaking"),
(when_done, "wave_2") ,
(unless, "surprised"),
]
for words, mode in pairs:
self.play(
Write(words),
morty.change_mode, mode,
morty.look_at, words
)
self.wait()
class ClassWatching(TeacherStudentsScene):
def construct(self):
rect = PictureInPictureFrame(height = 4)
rect.next_to(self.get_teacher(), UP, buff = LARGE_BUFF/2.)
rect.to_edge(RIGHT)
self.add(rect)
for pi in self.get_students():
pi.look_at(rect)
self.random_blink(5)
self.change_student_modes(
"raise_left_hand",
"raise_right_hand",
"sassy",
)
self.play(self.get_teacher().change_mode, "pondering")
self.random_blink(3)
class RandolphWatching(Scene):
def construct(self):
randy = Randolph()
randy.shift(2*LEFT)
randy.look(RIGHT)
self.add(randy)
self.wait()
self.play(Blink(randy))
self.wait()
self.play(
randy.change_mode, "pondering",
randy.look, RIGHT
)
self.play(Blink(randy))
self.wait()
class RandolphWatchingWithLaptop(Scene):
pass
class GrowRonaksSierpinski(Scene):
CONFIG = {
"colors" : [BLUE, YELLOW, BLUE_C, BLUE_E],
"dot_radius" : 0.08,
"n_layers" : 64,
}
def construct(self):
sierp = self.get_ronaks_sierpinski(self.n_layers)
dots = self.get_dots(self.n_layers)
self.triangle = VGroup(sierp, dots)
self.triangle.scale(1.5)
self.triangle.shift(3*UP)
sierp_layers = sierp.submobjects
dot_layers = dots.submobjects
last_dot_layer = dot_layers[0]
self.play(ShowCreation(last_dot_layer))
run_time = 1
for n, sierp_layer, dot_layer in zip(it.count(1), sierp_layers, dot_layers[1:]):
self.play(
ShowCreation(sierp_layer, lag_ratio=1),
Animation(last_dot_layer),
run_time = run_time
)
self.play(ShowCreation(
dot_layer,
run_time = run_time,
lag_ratio=1,
))
# if n == 2:
# dot = dot_layer[1]
# words = TexText("Stop growth at pink")
# words.next_to(dot, DOWN, 2)
# arrow = Arrow(words, dot)
# self.play(
# Write(words),
# ShowCreation(arrow)
# )
# self.wait()
# self.play(*map(FadeOut, [words, arrow]))
log2 = np.log2(n)
if n > 2 and log2-np.round(log2) == 0 and n < self.n_layers:
self.wait()
self.rescale()
run_time /= 1.3
last_dot_layer = dot_layer
def rescale(self):
shown_mobs = VGroup(*self.get_mobjects())
shown_mobs_copy = shown_mobs.copy()
self.remove(shown_mobs)
self.add(shown_mobs_copy)
top = shown_mobs.get_top()
self.triangle.scale(0.5)
self.triangle.move_to(top, aligned_edge = UP)
self.play(Transform(shown_mobs_copy, shown_mobs))
self.remove(shown_mobs_copy)
self.add(shown_mobs)
def get_pascal_point(self, n, k):
return n*rotate_vector(RIGHT, -2*np.pi/3) + k*RIGHT
def get_lines_at_layer(self, n):
lines = VGroup()
for k in range(n+1):
if choose(n, k)%2 == 1:
p1 = self.get_pascal_point(n, k)
p2 = self.get_pascal_point(n+1, k)
p3 = self.get_pascal_point(n+1, k+1)
lines.add(Line(p1, p2), Line(p1, p3))
return lines
def get_dot_layer(self, n):
dots = VGroup()
for k in range(n+1):
p = self.get_pascal_point(n, k)
dot = Dot(p, radius = self.dot_radius)
if choose(n, k)%2 == 0:
if choose(n-1, k)%2 == 0:
continue
dot.set_color(PINK)
else:
dot.set_color(WHITE)
dots.add(dot)
return dots
def get_ronaks_sierpinski(self, n_layers):
ronaks_sierpinski = VGroup()
for n in range(n_layers):
ronaks_sierpinski.add(self.get_lines_at_layer(n))
ronaks_sierpinski.set_color_by_gradient(*self.colors)
ronaks_sierpinski.set_stroke(width = 0)##TODO
return ronaks_sierpinski
def get_dots(self, n_layers):
dots = VGroup()
for n in range(n_layers+1):
dots.add(self.get_dot_layer(n))
return dots
class PatreonLogo(Scene):
def construct(self):
words1 = TexText(
"Support future\\\\",
"3blue1brown videos"
)
words2 = TexText(
"Early access to\\\\",
"``Essence of'' series"
)
for words in words1, words2:
words.scale(2)
words.to_edge(DOWN)
self.play(Write(words1))
self.wait(2)
self.play(Transform(words1, words2))
self.wait(2)
class PatreonLogin(Scene):
pass
class PythagoreanTransformation(Scene):
def construct(self):
tri1 = VGroup(
Line(ORIGIN, 2*RIGHT, color = BLUE),
Line(2*RIGHT, 3*UP, color = YELLOW),
Line(3*UP, ORIGIN, color = MAROON_B),
)
tri1.shift(2.5*(DOWN+LEFT))
tri2, tri3, tri4 = copies = [
tri1.copy().rotate(-i*np.pi/2)
for i in range(1, 4)
]
a = Tex("a").next_to(tri1[0], DOWN, buff = MED_SMALL_BUFF)
b = Tex("b").next_to(tri1[2], LEFT, buff = MED_SMALL_BUFF)
c = Tex("c").next_to(tri1[1].get_center(), UP+RIGHT)
c_square = Polygon(*[
tri[1].get_end()
for tri in [tri1] + copies
])
c_square.set_stroke(width = 0)
c_square.set_fill(color = YELLOW, opacity = 0.5)
c_square_tex = Tex("c^2")
big_square = Polygon(*[
tri[0].get_start()
for tri in [tri1] + copies
])
big_square.set_color(WHITE)
a_square = Square(side_length = 2)
a_square.shift(1.5*(LEFT+UP))
a_square.set_stroke(width = 0)
a_square.set_fill(color = BLUE, opacity = 0.5)
a_square_tex = Tex("a^2")
a_square_tex.move_to(a_square)
b_square = Square(side_length = 3)
b_square.move_to(
a_square.get_corner(DOWN+RIGHT),
aligned_edge = UP+LEFT
)
b_square.set_stroke(width = 0)
b_square.set_fill(color = MAROON_B, opacity = 0.5)
b_square_tex = Tex("b^2")
b_square_tex.move_to(b_square)
self.play(ShowCreation(tri1, run_time = 2))
self.play(*list(map(Write, [a, b, c])))
self.wait()
self.play(
FadeIn(c_square),
Animation(c)
)
self.play(Transform(c, c_square_tex))
self.wait(2)
mover = tri1.copy()
for copy in copies:
self.play(Transform(
mover, copy,
path_arc = -np.pi/2
))
self.add(copy)
self.remove(mover)
self.add(big_square, *[tri1]+copies)
self.wait(2)
self.play(*list(map(FadeOut, [a, b, c, c_square])))
self.play(
tri3.shift,
tri1.get_corner(UP+LEFT) -\
tri3.get_corner(UP+LEFT)
)
self.play(tri2.shift, 2*RIGHT)
self.play(tri4.shift, 3*UP)
self.wait()
self.play(FadeIn(a_square))
self.play(FadeIn(b_square))
self.play(Write(a_square_tex))
self.play(Write(b_square_tex))
self.wait(2)
class KindWordsOnEoLA(TeacherStudentsScene):
def construct(self):
rect = Rectangle(width = 16, height = 9, color = WHITE)
rect.set_height(4)
title = TexText("Essence of linear algebra")
title.to_edge(UP)
rect.next_to(title, DOWN)
self.play(
Write(title),
ShowCreation(rect),
*[
ApplyMethod(pi.look_at, rect)
for pi in self.get_pi_creatures()
],
run_time = 2
)
self.random_blink()
self.change_student_modes(*["hooray"]*3)
self.random_blink()
self.play(self.get_teacher().change_mode, "happy")
self.random_blink()
class MakeALotOfPiCreaturesHappy(Scene):
def construct(self):
width = 7
height = 4
pis = VGroup(*[
VGroup(*[
Randolph()
for x in range(7)
]).arrange(RIGHT, buff = MED_LARGE_BUFF)
for x in range(4)
]).arrange(DOWN, buff = MED_LARGE_BUFF)
pi_list = list(it.chain(*[
layer.submobjects
for layer in pis.submobjects
]))
random.shuffle(pi_list)
colors = color_gradient([BLUE_D, GREY_BROWN], len(pi_list))
for pi, color in zip(pi_list, colors):
pi.set_color(color)
pis = VGroup(*pi_list)
pis.set_height(6)
self.add(pis)
pis.generate_target()
self.wait()
for pi, color in zip(pis.target, colors):
pi.change_mode("hooray")
# pi.scale(1)
pi.set_color(color)
self.play(
MoveToTarget(
pis,
run_time = 2,
lag_ratio = 0.5,
)
)
for x in range(10):
pi = random.choice(pi_list)
self.play(Blink(pi))
class IntegrationByParts(Scene):
def construct(self):
rect = Rectangle(width = 5, height = 3)
# f = lambda t : 4*np.sin(t*np.pi/2)
f = lambda t : 4*t
g = lambda t : 3*smooth(t)
curve = ParametricCurve(lambda t : f(t)*RIGHT + g(t)*DOWN)
curve.set_color(YELLOW)
curve.center()
rect = Rectangle()
rect.replace(curve, stretch = True)
regions = []
for vect, color in (UP+RIGHT, BLUE), (DOWN+LEFT, GREEN):
region = curve.copy()
region.add_line_to(rect.get_corner(vect))
region.set_stroke(width = 0)
region.set_fill(color = color, opacity = 0.5)
regions.append(region)
upper_right, lower_left = regions
v_lines, h_lines = VGroup(), VGroup()
for alpha in np.linspace(0, 1, 30):
point = curve.point_from_proportion(alpha)
top_point = curve.get_points()[0][1]*UP + point[0]*RIGHT
left_point = curve.get_points()[0][0]*RIGHT + point[1]*UP
v_lines.add(Line(top_point, point))
h_lines.add(Line(left_point, point))
v_lines.set_color(BLUE_E)
h_lines.set_color(GREEN_E)
equation = Tex(
"\\int_0^1 g\\,df",
"+\\int_0^1 f\\,dg",
"= \\big(fg \\big)_0^1"
)
equation.to_edge(UP)
equation.set_color_by_tex(
"\\int_0^1 g\\,df",
upper_right.get_color()
)
equation.set_color_by_tex(
"+\\int_0^1 f\\,dg",
lower_left.get_color()
)
left_brace = Brace(rect, LEFT)
down_brace = Brace(rect, DOWN)
g_T = left_brace.get_text("$g(t)\\big|_0^1$")
f_T = down_brace.get_text("$f(t)\\big|_0^1$")
self.draw_curve(curve)
self.play(ShowCreation(rect))
self.play(*list(map(Write, [down_brace, left_brace, f_T, g_T])))
self.wait()
self.play(FadeIn(upper_right))
self.play(
ShowCreation(
v_lines,
run_time = 2
),
Animation(curve),
Animation(rect)
)
self.play(Write(equation[0]))
self.wait()
self.play(FadeIn(lower_left))
self.play(
ShowCreation(
h_lines,
run_time = 2
),
Animation(curve),
Animation(rect)
)
self.play(Write(equation[1]))
self.wait()
self.play(Write(equation[2]))
self.wait()
def draw_curve(self, curve):
lp, lnum, comma, rnum, rp = coords = Tex(
"\\big(f(", "t", "), g(", "t", ")\\big)"
)
coords.set_color_by_tex("0.00", BLACK)
dot = Dot(radius = 0.1)
dot.move_to(curve.get_points()[0])
coords.next_to(dot, UP+RIGHT)
self.play(
ShowCreation(curve),
UpdateFromFunc(
dot,
lambda d : d.move_to(curve.get_points()[-1])
),
MaintainPositionRelativeTo(coords, dot),
run_time = 5,
rate_func=linear
)
self.wait()
self.play(*list(map(FadeOut, [coords, dot])))
class EndScreen(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"""
See you every
other friday!
""",
target_mode = "hooray"
)
self.change_student_modes(*["happy"]*3)
self.random_blink()
|
[
"[email protected]"
] | |
6e2543b569d01c6fb691b474f00c0e8cba92b412
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/22_专题/字典序列删除/No.205 マージして辞書順最小.py
|
eaa9b544e0e0d282d781269d4968c7757f805bb3
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null |
UTF-8
|
Python
| false | false | 586 |
py
|
# https://yukicoder.me/problems/no/205
from heapq import heapify, heappop, heappush
from typing import List
def minLexMerge(words: List[str]) -> str:
"""字典序最小的合并字符串"""
pq = [w + chr(200) for w in words]
heapify(pq)
res = []
while pq:
min_ = heappop(pq)
res.append(min_[0])
min_ = min_[1:]
if len(min_) >= 2:
heappush(pq, min_)
return "".join(res)
if __name__ == "__main__":
N = int(input())
words = [input() for _ in range(N)]
print(minLexMerge(words))
|
[
"[email protected]"
] | |
04354364843fb4756d535de602e0fdfbd92a9c56
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_Class592.py
|
0d982aed897b3624ea008b4b08f71e31f6e89d1f
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,619 |
py
|
# qubit number=3
# total number=16
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.cx(input_qubit[0],input_qubit[1]) # number=13
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[1]) # number=15
prog.cx(input_qubit[0],input_qubit[1]) # number=10
prog.x(input_qubit[1]) # number=11
prog.cx(input_qubit[0],input_qubit[1]) # number=12
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_Class592.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = BasicAer.get_backend('statevector_simulator')
circuit1 = transpile(prog, FakeYorktown())
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
d8cf6188053ba908317208d886dcc8fd7f1b6f87
|
62bbfb6c50bba16304202aea96d1de4990f95e04
|
/dependencies/pulumi_aws/ebs/get_volume.py
|
76cdd9bcbc32565818b181ebf5c6ff8e2aa9a736
|
[] |
no_license
|
adriell/lambda-autoservico-storagegateway
|
b40b8717c8de076e61bbd422461c7d624a0d2273
|
f6e3dea61b004b73943a5438c658d3f019f106f7
|
refs/heads/main
| 2023-03-16T14:41:16.821675 | 2021-03-11T03:30:33 | 2021-03-11T03:30:33 | 345,865,704 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,905 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = [
'GetVolumeResult',
'AwaitableGetVolumeResult',
'get_volume',
]
@pulumi.output_type
class GetVolumeResult:
"""
A collection of values returned by getVolume.
"""
def __init__(__self__, arn=None, availability_zone=None, encrypted=None, filters=None, id=None, iops=None, kms_key_id=None, most_recent=None, multi_attach_enabled=None, outpost_arn=None, size=None, snapshot_id=None, tags=None, throughput=None, volume_id=None, volume_type=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if availability_zone and not isinstance(availability_zone, str):
raise TypeError("Expected argument 'availability_zone' to be a str")
pulumi.set(__self__, "availability_zone", availability_zone)
if encrypted and not isinstance(encrypted, bool):
raise TypeError("Expected argument 'encrypted' to be a bool")
pulumi.set(__self__, "encrypted", encrypted)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if iops and not isinstance(iops, int):
raise TypeError("Expected argument 'iops' to be a int")
pulumi.set(__self__, "iops", iops)
if kms_key_id and not isinstance(kms_key_id, str):
raise TypeError("Expected argument 'kms_key_id' to be a str")
pulumi.set(__self__, "kms_key_id", kms_key_id)
if most_recent and not isinstance(most_recent, bool):
raise TypeError("Expected argument 'most_recent' to be a bool")
pulumi.set(__self__, "most_recent", most_recent)
if multi_attach_enabled and not isinstance(multi_attach_enabled, bool):
raise TypeError("Expected argument 'multi_attach_enabled' to be a bool")
pulumi.set(__self__, "multi_attach_enabled", multi_attach_enabled)
if outpost_arn and not isinstance(outpost_arn, str):
raise TypeError("Expected argument 'outpost_arn' to be a str")
pulumi.set(__self__, "outpost_arn", outpost_arn)
if size and not isinstance(size, int):
raise TypeError("Expected argument 'size' to be a int")
pulumi.set(__self__, "size", size)
if snapshot_id and not isinstance(snapshot_id, str):
raise TypeError("Expected argument 'snapshot_id' to be a str")
pulumi.set(__self__, "snapshot_id", snapshot_id)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if throughput and not isinstance(throughput, int):
raise TypeError("Expected argument 'throughput' to be a int")
pulumi.set(__self__, "throughput", throughput)
if volume_id and not isinstance(volume_id, str):
raise TypeError("Expected argument 'volume_id' to be a str")
pulumi.set(__self__, "volume_id", volume_id)
if volume_type and not isinstance(volume_type, str):
raise TypeError("Expected argument 'volume_type' to be a str")
pulumi.set(__self__, "volume_type", volume_type)
@property
@pulumi.getter
def arn(self) -> str:
"""
The volume ARN (e.g. arn:aws:ec2:us-east-1:0123456789012:volume/vol-59fcb34e).
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
"""
The AZ where the EBS volume exists.
"""
return pulumi.get(self, "availability_zone")
@property
@pulumi.getter
def encrypted(self) -> bool:
"""
Whether the disk is encrypted.
"""
return pulumi.get(self, "encrypted")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetVolumeFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def iops(self) -> int:
"""
The amount of IOPS for the disk.
"""
return pulumi.get(self, "iops")
@property
@pulumi.getter(name="kmsKeyId")
def kms_key_id(self) -> str:
"""
The ARN for the KMS encryption key.
"""
return pulumi.get(self, "kms_key_id")
@property
@pulumi.getter(name="mostRecent")
def most_recent(self) -> Optional[bool]:
return pulumi.get(self, "most_recent")
@property
@pulumi.getter(name="multiAttachEnabled")
def multi_attach_enabled(self) -> bool:
"""
(Optional) Specifies whether Amazon EBS Multi-Attach is enabled.
"""
return pulumi.get(self, "multi_attach_enabled")
@property
@pulumi.getter(name="outpostArn")
def outpost_arn(self) -> str:
"""
The Amazon Resource Name (ARN) of the Outpost.
"""
return pulumi.get(self, "outpost_arn")
@property
@pulumi.getter
def size(self) -> int:
"""
The size of the drive in GiBs.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter(name="snapshotId")
def snapshot_id(self) -> str:
"""
The snapshot_id the EBS volume is based off.
"""
return pulumi.get(self, "snapshot_id")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A map of tags for the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def throughput(self) -> int:
"""
The throughput that the volume supports, in MiB/s.
"""
return pulumi.get(self, "throughput")
@property
@pulumi.getter(name="volumeId")
def volume_id(self) -> str:
"""
The volume ID (e.g. vol-59fcb34e).
"""
return pulumi.get(self, "volume_id")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
"""
The type of EBS volume.
"""
return pulumi.get(self, "volume_type")
class AwaitableGetVolumeResult(GetVolumeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVolumeResult(
arn=self.arn,
availability_zone=self.availability_zone,
encrypted=self.encrypted,
filters=self.filters,
id=self.id,
iops=self.iops,
kms_key_id=self.kms_key_id,
most_recent=self.most_recent,
multi_attach_enabled=self.multi_attach_enabled,
outpost_arn=self.outpost_arn,
size=self.size,
snapshot_id=self.snapshot_id,
tags=self.tags,
throughput=self.throughput,
volume_id=self.volume_id,
volume_type=self.volume_type)
def get_volume(filters: Optional[Sequence[pulumi.InputType['GetVolumeFilterArgs']]] = None,
most_recent: Optional[bool] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeResult:
"""
Use this data source to get information about an EBS volume for use in other
resources.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
ebs_volume = aws.ebs.get_volume(filters=[
aws.ebs.GetVolumeFilterArgs(
name="volume-type",
values=["gp2"],
),
aws.ebs.GetVolumeFilterArgs(
name="tag:Name",
values=["Example"],
),
],
most_recent=True)
```
:param Sequence[pulumi.InputType['GetVolumeFilterArgs']] filters: One or more name/value pairs to filter off of. There are
several valid keys, for a full reference, check out
[describe-volumes in the AWS CLI reference][1].
:param bool most_recent: If more than one result is returned, use the most
recent Volume.
:param Mapping[str, str] tags: A map of tags for the resource.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['mostRecent'] = most_recent
__args__['tags'] = tags
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:ebs/getVolume:getVolume', __args__, opts=opts, typ=GetVolumeResult).value
return AwaitableGetVolumeResult(
arn=__ret__.arn,
availability_zone=__ret__.availability_zone,
encrypted=__ret__.encrypted,
filters=__ret__.filters,
id=__ret__.id,
iops=__ret__.iops,
kms_key_id=__ret__.kms_key_id,
most_recent=__ret__.most_recent,
multi_attach_enabled=__ret__.multi_attach_enabled,
outpost_arn=__ret__.outpost_arn,
size=__ret__.size,
snapshot_id=__ret__.snapshot_id,
tags=__ret__.tags,
throughput=__ret__.throughput,
volume_id=__ret__.volume_id,
volume_type=__ret__.volume_type)
|
[
"[email protected]"
] | |
0db7239c4f03d3370f18ba124dbfcb684296c2b2
|
962bc309985d39c115948dc788027dd860491ec8
|
/src/bioservices/tcga.py
|
67281041e4d623bedabba038cd7b7b6e13f0a6f3
|
[] |
no_license
|
kp14/bioservices
|
6d03808ca5d14c84b708a819c8e4ad3ba3cb8931
|
a0e7f0f7aee3c8afc41ebebeb70d04dd02155e4f
|
refs/heads/master
| 2020-12-25T20:43:15.568664 | 2015-11-04T15:46:04 | 2015-11-04T15:46:04 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,865 |
py
|
# -*- python -*-
#
# This file is part of bioservices software
#
# Copyright (c) 2013-2014 - EBI-EMBL
#
# File author(s):
# Thomas Cokelaer <[email protected]>
#
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: https://github.com/cokelaer/bioservices
# documentation: http://packages.python.org/bioservices
#
##############################################################################
# $Id$
"""Interface to the quickGO interface
.. topic:: What is quickGO
:URL: http://www.ebi.ac.uk/QuickGO/
:Service: http://www.ebi.ac.uk/QuickGO/WebServices.html
.. highlights::
"QuickGO is a fast web-based browser for Gene Ontology terms and
annotations, which is provided by the UniProt-GOA project at the EBI. "
-- from QuickGO home page, Dec 2012
"""
from __future__ import print_function
from bioservices.services import REST
__all__ = ["TCGA"]
class TCGA(REST):
"""Interface to the `TCGA`_ service
DRAFT in progress
https://wiki.nci.nih.gov/display/TCGA/TCGA+Annotations+Web+Service+User%27s+Guide
"""
def __init__(self, verbose=False, cache=False):
""".. rubric:: Constructor
:param bool verbose: print informative messages.
"""
super(TCGA,
self).__init__(url="http://tcga-data.nci.nih.gov",
name="TCGA", verbose=verbose, cache=cache)
def search_annotations(self, item=None, annotationId):
"""Obtain Term information
"""
params = {'item':item, 'annotationId': annotationId}
res = self.http_get("annotations/resources/searchannotations/json",
frmt="json", params=params)
return res
def view_annotations(self):
raise NotImplementedError
|
[
"[email protected]"
] | |
78e539bc4df5bd03fd0482b41756fceec1fe2904
|
0aedbdf98c00ff0c74a6fa759d4991563b957cde
|
/TwitterStream.py
|
5061256bb6e4ebb9d7cc5fee8bd48a203c55c4fe
|
[] |
no_license
|
seethakris/ASPP2018
|
b953b0cc6684f9a9c667e85b6a37de73a2f0f41a
|
44aca0d6f3d3a1dac3bd9348477d23adc7b53f43
|
refs/heads/master
| 2021-09-04T11:07:51.423377 | 2018-01-18T04:24:54 | 2018-01-18T04:24:54 | 117,678,504 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,805 |
py
|
# Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
from time import sleep
from collections import deque # list-like container with fast appends and pops on either end
# Variables that contains the user credentials to access Twitter API
access_token = "YOURKEYHERE"
access_token_secret = "YOURKEYHERE"
consumer_key = "YOURKEYHERE"
consumer_secret = "YOURKEYHERE"
# This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
"""
Class with functions to stream tweets
"""
def __init__(self, api=None, maxlength=int(1e5)):
super(StdOutListener, self).__init__()
self.queue = deque(maxlen=maxlength)
def on_status(self, status):
if status.lang.find('en') < 0:
return
if status.coordinates is not None and status.entities.get('hashtags') != []:
self.queue.append(status)
def on_error(self, status_code):
print('Error:', status_code)
return False
def gettweets(maxlength=int(1e5), wait_time=0.001):
"""
Tweets are streamed and stored in a queue. The queue is popped from the left during function call
:param maxlength: maximum length of the queue
wait_time: time to wait for a new tweet
"""
listener = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, listener)
stream.filter(locations=[-180, -90, 180, 90], async=True) # This listens to tweets from all over the world
while True:
if len(listener.queue) > 0:
yield listener.queue.popleft()
else:
sleep(wait_time)
|
[
"[email protected]"
] | |
5481f6f58c583a5b277d9510ddf791c355253b36
|
4510bbf54e2ca619c3a863f5ca03df6584585402
|
/tfx/components/__init__.py
|
c41bd948dd4197767f194a8a52a9a827be681b74
|
[
"Apache-2.0"
] |
permissive
|
Mdlglobal-atlassian-net/tfx
|
e55f38336d1989ac970b5069c7128097ed86b422
|
37cbbb95c65e1a891045dd13232a7f2a293a7b70
|
refs/heads/master
| 2022-10-02T07:44:41.180873 | 2020-06-01T18:49:15 | 2020-06-01T18:49:53 | 268,607,840 | 0 | 1 |
Apache-2.0
| 2020-06-01T19:01:51 | 2020-06-01T19:01:50 | null |
UTF-8
|
Python
| false | false | 2,189 |
py
|
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Subpackage for TFX components."""
import tensorflow as tf
# For component user to direct use tfx.components.[...] as an alias.
from tfx.components.bulk_inferrer.component import BulkInferrer
from tfx.components.common_nodes.importer_node import ImporterNode
from tfx.components.common_nodes.resolver_node import ResolverNode
from tfx.components.evaluator.component import Evaluator
from tfx.components.example_gen.big_query_example_gen.component import BigQueryExampleGen
from tfx.components.example_gen.component import FileBasedExampleGen
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.components.example_gen.import_example_gen.component import ImportExampleGen
from tfx.components.example_validator.component import ExampleValidator
from tfx.components.infra_validator.component import InfraValidator
from tfx.components.model_validator.component import ModelValidator
from tfx.components.pusher.component import Pusher
from tfx.components.schema_gen.component import SchemaGen
from tfx.components.statistics_gen.component import StatisticsGen
from tfx.components.trainer.component import Trainer
from tfx.components.transform.component import Transform
# Prevents double logging: TFX and TF uses `tf.logging` but Beam uses standard
# logging, both logging modules add its own handler. Following setting disables
# tf.logging to propagate up to the parent logging handlers. This is a global
# behavior (perhaps thread hostile) which affects all code that uses component
# libaray.
tf.get_logger().propagate = False
|
[
"[email protected]"
] | |
8bb262aeb2c7180ba30a8500f8b6363764b4e797
|
8890925319a25dc3df29f53d0d8125d347680f68
|
/looker_client_31/looker_sdk/user.py
|
211ffb1e3e321c24c8a99f3587927f6b7965c9ad
|
[
"MIT"
] |
permissive
|
ContrastingSounds/looker_sdk_31
|
f5d300ae54aee1cc5a2621b36b49541db24ed248
|
f973434049fff1b605b10086ab8b84f2f62e3489
|
refs/heads/master
| 2020-03-19T20:31:24.785373 | 2018-06-11T09:41:36 | 2018-06-11T09:41:36 | 136,802,021 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 31,338 |
py
|
# coding: utf-8
"""
Experimental Looker API 3.1 Preview
This API 3.1 is in active development. Breaking changes are likely to occur to some API functions in future Looker releases until API 3.1 is officially launched and upgraded to beta status. If you have time and interest to experiment with new or modified services exposed in this embryonic API 3.1, we welcome your participation and feedback! For large development efforts or critical line-of-business projects, we strongly recommend you stick with the API 3.0 while API 3.1 is under construction. # noqa: E501
OpenAPI spec version: 3.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from looker_client_31.looker_sdk.access_filter import AccessFilter # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_api import CredentialsApi # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_api3 import CredentialsApi3 # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_email import CredentialsEmail # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_embed import CredentialsEmbed # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_google import CredentialsGoogle # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_ldap import CredentialsLDAP # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_looker_openid import CredentialsLookerOpenid # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_oidc import CredentialsOIDC # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_saml import CredentialsSaml # noqa: F401,E501
from looker_client_31.looker_sdk.credentials_totp import CredentialsTotp # noqa: F401,E501
from looker_client_31.looker_sdk.session import Session # noqa: F401,E501
class User(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'first_name': 'str',
'last_name': 'str',
'display_name': 'str',
'email': 'str',
'is_disabled': 'bool',
'avatar_url': 'str',
'home_space_id': 'str',
'personal_space_id': 'int',
'embed_group_space_id': 'int',
'access_filters': 'list[AccessFilter]',
'credentials_email': 'CredentialsEmail',
'credentials_totp': 'CredentialsTotp',
'credentials_ldap': 'CredentialsLDAP',
'credentials_google': 'CredentialsGoogle',
'credentials_saml': 'CredentialsSaml',
'credentials_oidc': 'CredentialsOIDC',
'credentials_api': 'CredentialsApi',
'credentials_api3': 'list[CredentialsApi3]',
'credentials_embed': 'list[CredentialsEmbed]',
'credentials_looker_openid': 'CredentialsLookerOpenid',
'sessions': 'list[Session]',
'role_ids': 'list[int]',
'group_ids': 'list[int]',
'presumed_looker_employee': 'bool',
'verified_looker_employee': 'bool',
'looker_versions': 'list[str]',
'ui_state': 'dict(str, str)',
'locale': 'str',
'url': 'str',
'can': 'dict(str, bool)'
}
attribute_map = {
'id': 'id',
'first_name': 'first_name',
'last_name': 'last_name',
'display_name': 'display_name',
'email': 'email',
'is_disabled': 'is_disabled',
'avatar_url': 'avatar_url',
'home_space_id': 'home_space_id',
'personal_space_id': 'personal_space_id',
'embed_group_space_id': 'embed_group_space_id',
'access_filters': 'access_filters',
'credentials_email': 'credentials_email',
'credentials_totp': 'credentials_totp',
'credentials_ldap': 'credentials_ldap',
'credentials_google': 'credentials_google',
'credentials_saml': 'credentials_saml',
'credentials_oidc': 'credentials_oidc',
'credentials_api': 'credentials_api',
'credentials_api3': 'credentials_api3',
'credentials_embed': 'credentials_embed',
'credentials_looker_openid': 'credentials_looker_openid',
'sessions': 'sessions',
'role_ids': 'role_ids',
'group_ids': 'group_ids',
'presumed_looker_employee': 'presumed_looker_employee',
'verified_looker_employee': 'verified_looker_employee',
'looker_versions': 'looker_versions',
'ui_state': 'ui_state',
'locale': 'locale',
'url': 'url',
'can': 'can'
}
def __init__(self, id=None, first_name=None, last_name=None, display_name=None, email=None, is_disabled=None, avatar_url=None, home_space_id=None, personal_space_id=None, embed_group_space_id=None, access_filters=None, credentials_email=None, credentials_totp=None, credentials_ldap=None, credentials_google=None, credentials_saml=None, credentials_oidc=None, credentials_api=None, credentials_api3=None, credentials_embed=None, credentials_looker_openid=None, sessions=None, role_ids=None, group_ids=None, presumed_looker_employee=None, verified_looker_employee=None, looker_versions=None, ui_state=None, locale=None, url=None, can=None): # noqa: E501
"""User - a model defined in Swagger""" # noqa: E501
self._id = None
self._first_name = None
self._last_name = None
self._display_name = None
self._email = None
self._is_disabled = None
self._avatar_url = None
self._home_space_id = None
self._personal_space_id = None
self._embed_group_space_id = None
self._access_filters = None
self._credentials_email = None
self._credentials_totp = None
self._credentials_ldap = None
self._credentials_google = None
self._credentials_saml = None
self._credentials_oidc = None
self._credentials_api = None
self._credentials_api3 = None
self._credentials_embed = None
self._credentials_looker_openid = None
self._sessions = None
self._role_ids = None
self._group_ids = None
self._presumed_looker_employee = None
self._verified_looker_employee = None
self._looker_versions = None
self._ui_state = None
self._locale = None
self._url = None
self._can = None
self.discriminator = None
if id is not None:
self.id = id
if first_name is not None:
self.first_name = first_name
if last_name is not None:
self.last_name = last_name
if display_name is not None:
self.display_name = display_name
if email is not None:
self.email = email
if is_disabled is not None:
self.is_disabled = is_disabled
if avatar_url is not None:
self.avatar_url = avatar_url
if home_space_id is not None:
self.home_space_id = home_space_id
if personal_space_id is not None:
self.personal_space_id = personal_space_id
if embed_group_space_id is not None:
self.embed_group_space_id = embed_group_space_id
if access_filters is not None:
self.access_filters = access_filters
if credentials_email is not None:
self.credentials_email = credentials_email
if credentials_totp is not None:
self.credentials_totp = credentials_totp
if credentials_ldap is not None:
self.credentials_ldap = credentials_ldap
if credentials_google is not None:
self.credentials_google = credentials_google
if credentials_saml is not None:
self.credentials_saml = credentials_saml
if credentials_oidc is not None:
self.credentials_oidc = credentials_oidc
if credentials_api is not None:
self.credentials_api = credentials_api
if credentials_api3 is not None:
self.credentials_api3 = credentials_api3
if credentials_embed is not None:
self.credentials_embed = credentials_embed
if credentials_looker_openid is not None:
self.credentials_looker_openid = credentials_looker_openid
if sessions is not None:
self.sessions = sessions
if role_ids is not None:
self.role_ids = role_ids
if group_ids is not None:
self.group_ids = group_ids
if presumed_looker_employee is not None:
self.presumed_looker_employee = presumed_looker_employee
if verified_looker_employee is not None:
self.verified_looker_employee = verified_looker_employee
if looker_versions is not None:
self.looker_versions = looker_versions
if ui_state is not None:
self.ui_state = ui_state
if locale is not None:
self.locale = locale
if url is not None:
self.url = url
if can is not None:
self.can = can
@property
def id(self):
"""Gets the id of this User. # noqa: E501
Unique Id # noqa: E501
:return: The id of this User. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this User.
Unique Id # noqa: E501
:param id: The id of this User. # noqa: E501
:type: int
"""
self._id = id
@property
def first_name(self):
"""Gets the first_name of this User. # noqa: E501
First name # noqa: E501
:return: The first_name of this User. # noqa: E501
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""Sets the first_name of this User.
First name # noqa: E501
:param first_name: The first_name of this User. # noqa: E501
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""Gets the last_name of this User. # noqa: E501
Last name # noqa: E501
:return: The last_name of this User. # noqa: E501
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""Sets the last_name of this User.
Last name # noqa: E501
:param last_name: The last_name of this User. # noqa: E501
:type: str
"""
self._last_name = last_name
@property
def display_name(self):
"""Gets the display_name of this User. # noqa: E501
Full name for display (available only if both first_name and last_name are set) # noqa: E501
:return: The display_name of this User. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this User.
Full name for display (available only if both first_name and last_name are set) # noqa: E501
:param display_name: The display_name of this User. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def email(self):
"""Gets the email of this User. # noqa: E501
EMail address # noqa: E501
:return: The email of this User. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this User.
EMail address # noqa: E501
:param email: The email of this User. # noqa: E501
:type: str
"""
self._email = email
@property
def is_disabled(self):
"""Gets the is_disabled of this User. # noqa: E501
Account has been disabled # noqa: E501
:return: The is_disabled of this User. # noqa: E501
:rtype: bool
"""
return self._is_disabled
@is_disabled.setter
def is_disabled(self, is_disabled):
"""Sets the is_disabled of this User.
Account has been disabled # noqa: E501
:param is_disabled: The is_disabled of this User. # noqa: E501
:type: bool
"""
self._is_disabled = is_disabled
@property
def avatar_url(self):
"""Gets the avatar_url of this User. # noqa: E501
URL for the avatar image (may be generic) # noqa: E501
:return: The avatar_url of this User. # noqa: E501
:rtype: str
"""
return self._avatar_url
@avatar_url.setter
def avatar_url(self, avatar_url):
"""Sets the avatar_url of this User.
URL for the avatar image (may be generic) # noqa: E501
:param avatar_url: The avatar_url of this User. # noqa: E501
:type: str
"""
self._avatar_url = avatar_url
@property
def home_space_id(self):
"""Gets the home_space_id of this User. # noqa: E501
ID string for user's home space # noqa: E501
:return: The home_space_id of this User. # noqa: E501
:rtype: str
"""
return self._home_space_id
@home_space_id.setter
def home_space_id(self, home_space_id):
"""Sets the home_space_id of this User.
ID string for user's home space # noqa: E501
:param home_space_id: The home_space_id of this User. # noqa: E501
:type: str
"""
self._home_space_id = home_space_id
@property
def personal_space_id(self):
"""Gets the personal_space_id of this User. # noqa: E501
ID of user's personal space # noqa: E501
:return: The personal_space_id of this User. # noqa: E501
:rtype: int
"""
return self._personal_space_id
@personal_space_id.setter
def personal_space_id(self, personal_space_id):
"""Sets the personal_space_id of this User.
ID of user's personal space # noqa: E501
:param personal_space_id: The personal_space_id of this User. # noqa: E501
:type: int
"""
self._personal_space_id = personal_space_id
@property
def embed_group_space_id(self):
"""Gets the embed_group_space_id of this User. # noqa: E501
(Embed only) ID of user's group space based on the external_group_id optionally specified during embed user login # noqa: E501
:return: The embed_group_space_id of this User. # noqa: E501
:rtype: int
"""
return self._embed_group_space_id
@embed_group_space_id.setter
def embed_group_space_id(self, embed_group_space_id):
"""Sets the embed_group_space_id of this User.
(Embed only) ID of user's group space based on the external_group_id optionally specified during embed user login # noqa: E501
:param embed_group_space_id: The embed_group_space_id of this User. # noqa: E501
:type: int
"""
self._embed_group_space_id = embed_group_space_id
@property
def access_filters(self):
"""Gets the access_filters of this User. # noqa: E501
Model access filters. # noqa: E501
:return: The access_filters of this User. # noqa: E501
:rtype: list[AccessFilter]
"""
return self._access_filters
@access_filters.setter
def access_filters(self, access_filters):
"""Sets the access_filters of this User.
Model access filters. # noqa: E501
:param access_filters: The access_filters of this User. # noqa: E501
:type: list[AccessFilter]
"""
self._access_filters = access_filters
@property
def credentials_email(self):
"""Gets the credentials_email of this User. # noqa: E501
Email/Password login credentials # noqa: E501
:return: The credentials_email of this User. # noqa: E501
:rtype: CredentialsEmail
"""
return self._credentials_email
@credentials_email.setter
def credentials_email(self, credentials_email):
"""Sets the credentials_email of this User.
Email/Password login credentials # noqa: E501
:param credentials_email: The credentials_email of this User. # noqa: E501
:type: CredentialsEmail
"""
self._credentials_email = credentials_email
@property
def credentials_totp(self):
"""Gets the credentials_totp of this User. # noqa: E501
Two-factor credentials # noqa: E501
:return: The credentials_totp of this User. # noqa: E501
:rtype: CredentialsTotp
"""
return self._credentials_totp
@credentials_totp.setter
def credentials_totp(self, credentials_totp):
"""Sets the credentials_totp of this User.
Two-factor credentials # noqa: E501
:param credentials_totp: The credentials_totp of this User. # noqa: E501
:type: CredentialsTotp
"""
self._credentials_totp = credentials_totp
@property
def credentials_ldap(self):
"""Gets the credentials_ldap of this User. # noqa: E501
LDAP credentials # noqa: E501
:return: The credentials_ldap of this User. # noqa: E501
:rtype: CredentialsLDAP
"""
return self._credentials_ldap
@credentials_ldap.setter
def credentials_ldap(self, credentials_ldap):
"""Sets the credentials_ldap of this User.
LDAP credentials # noqa: E501
:param credentials_ldap: The credentials_ldap of this User. # noqa: E501
:type: CredentialsLDAP
"""
self._credentials_ldap = credentials_ldap
@property
def credentials_google(self):
"""Gets the credentials_google of this User. # noqa: E501
Google auth credentials # noqa: E501
:return: The credentials_google of this User. # noqa: E501
:rtype: CredentialsGoogle
"""
return self._credentials_google
@credentials_google.setter
def credentials_google(self, credentials_google):
"""Sets the credentials_google of this User.
Google auth credentials # noqa: E501
:param credentials_google: The credentials_google of this User. # noqa: E501
:type: CredentialsGoogle
"""
self._credentials_google = credentials_google
@property
def credentials_saml(self):
"""Gets the credentials_saml of this User. # noqa: E501
Saml auth credentials # noqa: E501
:return: The credentials_saml of this User. # noqa: E501
:rtype: CredentialsSaml
"""
return self._credentials_saml
@credentials_saml.setter
def credentials_saml(self, credentials_saml):
"""Sets the credentials_saml of this User.
Saml auth credentials # noqa: E501
:param credentials_saml: The credentials_saml of this User. # noqa: E501
:type: CredentialsSaml
"""
self._credentials_saml = credentials_saml
@property
def credentials_oidc(self):
"""Gets the credentials_oidc of this User. # noqa: E501
OpenID Connect auth credentials # noqa: E501
:return: The credentials_oidc of this User. # noqa: E501
:rtype: CredentialsOIDC
"""
return self._credentials_oidc
@credentials_oidc.setter
def credentials_oidc(self, credentials_oidc):
"""Sets the credentials_oidc of this User.
OpenID Connect auth credentials # noqa: E501
:param credentials_oidc: The credentials_oidc of this User. # noqa: E501
:type: CredentialsOIDC
"""
self._credentials_oidc = credentials_oidc
@property
def credentials_api(self):
"""Gets the credentials_api of this User. # noqa: E501
API user credentials. NO LONGER SUPPORTED. # noqa: E501
:return: The credentials_api of this User. # noqa: E501
:rtype: CredentialsApi
"""
return self._credentials_api
@credentials_api.setter
def credentials_api(self, credentials_api):
"""Sets the credentials_api of this User.
API user credentials. NO LONGER SUPPORTED. # noqa: E501
:param credentials_api: The credentials_api of this User. # noqa: E501
:type: CredentialsApi
"""
self._credentials_api = credentials_api
@property
def credentials_api3(self):
"""Gets the credentials_api3 of this User. # noqa: E501
API 3 credentials # noqa: E501
:return: The credentials_api3 of this User. # noqa: E501
:rtype: list[CredentialsApi3]
"""
return self._credentials_api3
@credentials_api3.setter
def credentials_api3(self, credentials_api3):
"""Sets the credentials_api3 of this User.
API 3 credentials # noqa: E501
:param credentials_api3: The credentials_api3 of this User. # noqa: E501
:type: list[CredentialsApi3]
"""
self._credentials_api3 = credentials_api3
@property
def credentials_embed(self):
"""Gets the credentials_embed of this User. # noqa: E501
Embed credentials # noqa: E501
:return: The credentials_embed of this User. # noqa: E501
:rtype: list[CredentialsEmbed]
"""
return self._credentials_embed
@credentials_embed.setter
def credentials_embed(self, credentials_embed):
"""Sets the credentials_embed of this User.
Embed credentials # noqa: E501
:param credentials_embed: The credentials_embed of this User. # noqa: E501
:type: list[CredentialsEmbed]
"""
self._credentials_embed = credentials_embed
@property
def credentials_looker_openid(self):
"""Gets the credentials_looker_openid of this User. # noqa: E501
LookerOpenID credentials. Used for login by Looker Analysts # noqa: E501
:return: The credentials_looker_openid of this User. # noqa: E501
:rtype: CredentialsLookerOpenid
"""
return self._credentials_looker_openid
@credentials_looker_openid.setter
def credentials_looker_openid(self, credentials_looker_openid):
"""Sets the credentials_looker_openid of this User.
LookerOpenID credentials. Used for login by Looker Analysts # noqa: E501
:param credentials_looker_openid: The credentials_looker_openid of this User. # noqa: E501
:type: CredentialsLookerOpenid
"""
self._credentials_looker_openid = credentials_looker_openid
@property
def sessions(self):
"""Gets the sessions of this User. # noqa: E501
Active sessions # noqa: E501
:return: The sessions of this User. # noqa: E501
:rtype: list[Session]
"""
return self._sessions
@sessions.setter
def sessions(self, sessions):
"""Sets the sessions of this User.
Active sessions # noqa: E501
:param sessions: The sessions of this User. # noqa: E501
:type: list[Session]
"""
self._sessions = sessions
@property
def role_ids(self):
"""Gets the role_ids of this User. # noqa: E501
Array of ids of the roles for this user # noqa: E501
:return: The role_ids of this User. # noqa: E501
:rtype: list[int]
"""
return self._role_ids
@role_ids.setter
def role_ids(self, role_ids):
"""Sets the role_ids of this User.
Array of ids of the roles for this user # noqa: E501
:param role_ids: The role_ids of this User. # noqa: E501
:type: list[int]
"""
self._role_ids = role_ids
@property
def group_ids(self):
"""Gets the group_ids of this User. # noqa: E501
Array of ids of the groups for this user # noqa: E501
:return: The group_ids of this User. # noqa: E501
:rtype: list[int]
"""
return self._group_ids
@group_ids.setter
def group_ids(self, group_ids):
"""Sets the group_ids of this User.
Array of ids of the groups for this user # noqa: E501
:param group_ids: The group_ids of this User. # noqa: E501
:type: list[int]
"""
self._group_ids = group_ids
@property
def presumed_looker_employee(self):
"""Gets the presumed_looker_employee of this User. # noqa: E501
User is identified as an employee of Looker # noqa: E501
:return: The presumed_looker_employee of this User. # noqa: E501
:rtype: bool
"""
return self._presumed_looker_employee
@presumed_looker_employee.setter
def presumed_looker_employee(self, presumed_looker_employee):
"""Sets the presumed_looker_employee of this User.
User is identified as an employee of Looker # noqa: E501
:param presumed_looker_employee: The presumed_looker_employee of this User. # noqa: E501
:type: bool
"""
self._presumed_looker_employee = presumed_looker_employee
@property
def verified_looker_employee(self):
"""Gets the verified_looker_employee of this User. # noqa: E501
User is identified as an employee of Looker who has been verified via Looker corporate authentication # noqa: E501
:return: The verified_looker_employee of this User. # noqa: E501
:rtype: bool
"""
return self._verified_looker_employee
@verified_looker_employee.setter
def verified_looker_employee(self, verified_looker_employee):
"""Sets the verified_looker_employee of this User.
User is identified as an employee of Looker who has been verified via Looker corporate authentication # noqa: E501
:param verified_looker_employee: The verified_looker_employee of this User. # noqa: E501
:type: bool
"""
self._verified_looker_employee = verified_looker_employee
@property
def looker_versions(self):
"""Gets the looker_versions of this User. # noqa: E501
Array of strings representing the Looker versions that this user has used (this only goes back as far as '3.54.0') # noqa: E501
:return: The looker_versions of this User. # noqa: E501
:rtype: list[str]
"""
return self._looker_versions
@looker_versions.setter
def looker_versions(self, looker_versions):
"""Sets the looker_versions of this User.
Array of strings representing the Looker versions that this user has used (this only goes back as far as '3.54.0') # noqa: E501
:param looker_versions: The looker_versions of this User. # noqa: E501
:type: list[str]
"""
self._looker_versions = looker_versions
@property
def ui_state(self):
"""Gets the ui_state of this User. # noqa: E501
Per user dictionary of undocumented state information owned by the Looker UI. # noqa: E501
:return: The ui_state of this User. # noqa: E501
:rtype: dict(str, str)
"""
return self._ui_state
@ui_state.setter
def ui_state(self, ui_state):
"""Sets the ui_state of this User.
Per user dictionary of undocumented state information owned by the Looker UI. # noqa: E501
:param ui_state: The ui_state of this User. # noqa: E501
:type: dict(str, str)
"""
self._ui_state = ui_state
@property
def locale(self):
"""Gets the locale of this User. # noqa: E501
User's preferred locale. User locale takes precedence over Looker's system-wide default locale. Locale determines language of display strings and date and numeric formatting in API responses. Locale string must be a 2 letter language code or a combination of language code and region code: 'en' or 'en-US', for example. # noqa: E501
:return: The locale of this User. # noqa: E501
:rtype: str
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this User.
User's preferred locale. User locale takes precedence over Looker's system-wide default locale. Locale determines language of display strings and date and numeric formatting in API responses. Locale string must be a 2 letter language code or a combination of language code and region code: 'en' or 'en-US', for example. # noqa: E501
:param locale: The locale of this User. # noqa: E501
:type: str
"""
self._locale = locale
@property
def url(self):
"""Gets the url of this User. # noqa: E501
Link to get this item # noqa: E501
:return: The url of this User. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this User.
Link to get this item # noqa: E501
:param url: The url of this User. # noqa: E501
:type: str
"""
self._url = url
@property
def can(self):
"""Gets the can of this User. # noqa: E501
Operations the current user is able to perform on this object # noqa: E501
:return: The can of this User. # noqa: E501
:rtype: dict(str, bool)
"""
return self._can
@can.setter
def can(self, can):
"""Sets the can of this User.
Operations the current user is able to perform on this object # noqa: E501
:param can: The can of this User. # noqa: E501
:type: dict(str, bool)
"""
self._can = can
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, User):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
fbfcabf6cfb11d516401f5429f766accc46becaf
|
7a09af404f29389504742a3d5f1727bfbe562750
|
/TrekBot2_WS/build/tf2_tools/catkin_generated/pkg.installspace.context.pc.py
|
f8537ec21dba2e692b461229e9f52497428867f2
|
[
"MIT"
] |
permissive
|
Rafcin/TrekBot
|
4baa2ed93b90920b36adba0b72384ac320d2de01
|
d3dc63e6c16a040b16170f143556ef358018b7da
|
refs/heads/master
| 2020-03-30T02:15:35.361254 | 2018-12-14T03:30:25 | 2018-12-14T03:30:25 | 150,622,252 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 402 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "tf2;tf2_msgs;tf2_ros".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tf2_tools"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/install"
PROJECT_VERSION = "0.6.3"
|
[
"[email protected]"
] | |
34104a73ede55a038ffc76a358918c958c9d3b49
|
b36c05bfa929cac8f956feebba1e7cb2fc550387
|
/d5/d5p2.py
|
ef18fab1691f09b791969af3089469d440ed7028
|
[] |
no_license
|
jabadia/advent-of-code-2017
|
ddc5dd2e141e0620ec7d017d0345cc3807debfbf
|
9e595bf7d074073dde0f85353fe060e6bf147969
|
refs/heads/master
| 2021-09-01T20:15:39.211708 | 2017-12-25T22:33:12 | 2017-12-25T22:33:12 | 115,634,618 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,315 |
py
|
TEST_INPUT = """
0
3
0
1
-3
"""
INPUT = """
0
1
0
0
1
-3
0
0
2
-2
-6
-3
2
-5
-6
-3
-3
0
-8
-12
1
-9
-12
-9
0
-7
-17
-6
-18
-7
-6
-21
-28
-14
-23
-14
-17
-5
-35
-17
-26
-14
1
-27
-19
-40
-32
-44
2
-14
-15
-12
-35
0
-49
-12
-7
-46
-47
-32
-33
-47
-7
-62
-20
-35
-4
-35
-8
-3
-61
-38
-63
-27
-33
-57
-48
-66
-68
-11
-61
-50
-34
-31
-36
-79
-49
-71
1
-34
-65
-61
-91
-12
-21
-82
-85
-51
-89
0
-83
-53
-44
-7
1
-19
-39
-27
-94
-36
-31
-35
-97
-45
-90
-15
-106
-30
-79
-18
-25
-105
-30
-63
-109
-32
-91
-96
-87
-121
-116
-103
-71
-1
-113
-10
-47
-109
-107
-38
-66
-26
-8
-38
-31
-129
-42
-91
-89
-107
-125
-75
-118
-81
-45
-111
-27
-63
-106
-110
-64
-63
-80
-44
-33
-130
-55
-90
-144
-15
-132
-122
-155
-122
-94
-159
-5
-89
-6
-97
-129
-159
-15
-44
-156
-124
-113
-154
-95
-96
-29
-121
-30
-73
-118
-57
-76
-141
-138
-108
-185
-56
-136
-161
-138
-192
2
-126
-12
-39
-60
-125
-149
-193
-146
-116
-101
-16
-207
-122
-92
-204
-42
-112
-28
-93
-96
-57
-136
-19
-36
-107
-170
-19
-20
-96
-229
-59
-172
-58
-89
-31
-57
-223
-37
-189
-43
-135
-90
-150
-22
-152
-243
-37
-231
-112
-57
-168
-30
-77
-162
-181
-176
-202
-138
-206
-183
-190
-257
-181
-47
-23
-248
-114
-98
-77
-143
-168
-166
-30
-155
-237
-51
-113
-243
-41
-142
-231
-139
-20
-190
-262
-142
-238
-200
-270
-113
-35
-296
-146
-205
-129
-198
-68
-139
-56
-196
-133
-16
-229
-258
-91
-63
-249
-274
-156
-273
-182
-166
-115
-154
-296
-115
-89
-120
-201
-44
-287
-8
1
-260
-297
-282
-114
-323
-326
-166
-241
-109
-21
-236
-280
-19
-80
-77
-271
-292
-340
-300
-206
-308
-99
-156
-277
-245
-132
-56
-172
-53
-271
-32
-5
-235
-329
-1
-150
-247
-268
-133
-341
-221
-2
-43
-229
-190
-337
-40
-71
-72
-149
-25
-253
-44
-113
-164
-370
-284
-235
-9
-234
-291
1
-152
-302
-393
-47
-289
-75
-140
-349
-140
-353
-298
-27
-292
-380
-55
-62
-208
-221
-41
-316
-411
-367
-220
-248
-59
-177
-372
-55
-241
-240
-140
-315
-297
-42
-118
-141
-70
-183
-153
-30
-63
-306
-110
-8
-356
-80
-314
-323
-41
-176
-165
-41
-230
-132
-222
-2
-404
-38
-130
2
-16
-141
-136
-336
-245
-6
-348
-172
-267
-208
-291
-285
-67
-219
-216
-136
-325
-27
-382
-242
-50
-284
-149
-454
-336
-346
-293
-402
-76
-324
-219
-336
-24
-446
-123
-185
-196
-295
-173
-400
-137
-414
-14
-104
-62
-252
-17
-398
-490
-440
-89
-347
-101
-142
-228
-301
-396
-320
-52
-508
-122
-436
-311
-344
-240
-434
-220
-197
-31
-295
-44
-452
-269
-430
-373
-409
-438
-365
-13
-241
-418
-20
-24
-141
-1
-148
-307
-63
-423
-254
-8
-438
-326
-19
-135
-109
-394
2
-398
-273
-158
-453
-346
-86
-431
-536
-549
-379
-483
-85
-476
-483
-104
-87
-462
-249
-540
-164
-360
-100
-238
-45
-390
-59
-156
-248
-257
-150
-164
-160
-545
-520
-364
-384
-237
-456
-28
-366
-147
0
-303
-583
-420
-370
-299
-154
-380
-188
-491
-258
-598
-429
-349
-333
-569
-4
-556
-421
-182
-441
-407
-542
-364
-370
-384
1
-529
-45
-319
-395
-279
-160
-575
-193
-25
-565
-548
-445
-266
-304
-361
-348
-303
-159
-39
-75
-437
-608
-622
-556
-108
-343
-283
-68
-632
-393
-68
-140
-126
-531
-87
-519
-334
-56
-70
-275
-247
-370
-439
-118
-497
-630
-594
-612
-541
-161
-646
-397
-100
-284
-313
0
-59
-200
-601
-663
-529
-676
-610
-7
-228
-50
-494
-382
-250
-306
-274
-163
-110
-375
-124
-237
-98
-645
-692
-495
-593
-647
-178
-531
-336
-697
-646
-671
-633
-542
-461
-200
-658
-525
-389
-643
-258
-329
-656
-400
-692
-557
-506
-594
-67
-623
-113
-459
-211
-713
-115
-602
-131
-181
-30
-227
-53
-719
-631
-641
-434
-552
-716
-368
-19
-439
-443
-552
-85
-79
-449
-254
-620
-474
-121
-210
-285
-608
-456
-513
-496
-13
-418
-399
-437
-258
-15
-623
-178
-336
-379
-721
-299
-729
-742
-64
-13
-438
-603
-666
-278
-767
-200
-686
-497
-256
-541
-491
-360
-615
-326
-682
-759
-524
-580
-323
-578
-793
-478
-107
-440
-657
-790
-605
-21
-163
-392
-560
-336
-430
-613
-182
-15
-782
-607
-281
-269
-25
-699
-89
-593
-280
-269
-438
-103
-359
-387
-157
-747
-619
-176
-772
-500
-735
-691
-797
-612
-573
-36
-617
-630
-357
-718
-210
-48
-185
-20
-556
-206
-722
-559
-416
-578
-745
-564
-273
-62
-300
-218
-711
-744
-805
-277
-522
-346
-280
-762
-438
-381
-379
-198
-737
-555
-466
-218
-511
-334
-353
-259
-225
-675
-350
-585
-647
-52
-395
-324
-106
-826
-279
-81
-396
-611
-312
-529
-291
-129
-594
-437
-188
-649
-820
-237
-673
-6
-387
-195
-503
-350
-83
-88
-626
-30
-313
-13
-633
-403
-319
-832
-185
-146
-839
-9
-557
-799
-841
-700
-465
-669
-769
-235
-849
-863
-819
-76
-912
-931
-909
-762
-607
-522
-64
-769
-377
-133
-414
-772
-206
-746
-730
-393
-901
-72
-33
-811
-372
-298
-835
-637
-302
-481
-958
-878
-867
-25
-260
-448
-21
-930
-903
-581
-547
-664
-843
-140
-337
-383
-513
-368
-221
-474
-169
-673
-728
-266
-862
-753
-815
-647
-106
-15
-728
-912
-147
-828
-6
-694
-434
-737
-335
-183
-732
-841
-364
-155
-116
-966
-822
-65
-22
-853
-208
-326
-826
-472
-491
-436
-771
-1009
-98
-401
-915
-275
-574
-313
-884
-648
-935
-94
-326
-553
-744
-723
-782
-719
-175
-868
-190
-153
-48
-218
-414
-721
-715
-995
-991
-575
-264
-70
-366
-381
-130
-409
-817
-258
-1028
-552
-878
-449
-138
-900
-45
-119
-677
-844
-869
-985
-1019
-60
-649
-915
-93
-1053
-121
-631
-156
-332
-193
"""
def solve(input):
code = [int(n.strip()) for n in input.strip().split('\n')]
pc = 0
count = 0
while 0 <= pc < len(code):
next_pc = pc + code[pc]
code[pc] += 1
count += 1
pc = next_pc
return count
res = solve(TEST_INPUT)
assert (res == 5)
print(solve(INPUT))
|
[
"[email protected]"
] | |
90d1ba7d7f1065e6b4287dc4f9bdf753dd042573
|
de56d8ffb98bf4ef8336a89f7dea2ba4e4797134
|
/blog/migrations/0005_auto_20200619_0217.py
|
4f935d59ebd5bda644ffa14190f257db3d6f39a6
|
[] |
no_license
|
topdev38/agecny-cms
|
9bb5280646d498592738936599ceb13c87899428
|
ff9f76b06e2e41b82cc2ebf54fbd27a9a22994f3
|
refs/heads/master
| 2023-01-01T23:15:50.436794 | 2020-10-18T03:51:17 | 2020-10-18T03:51:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 840 |
py
|
# Generated by Django 3.0.5 on 2020-06-19 09:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0009_auto_20200619_0205'),
('blog', '0004_auto_20200618_0812'),
]
operations = [
migrations.AddField(
model_name='blogdetailpage',
name='navigation_bar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.NavigationSnippet'),
),
migrations.AddField(
model_name='blogpage',
name='navigation_bar',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.NavigationSnippet'),
),
]
|
[
"[email protected]"
] | |
425f90eb8552b8eddac49d0942fe278eeb209643
|
85e078ee3ceda5091624233ca19ba42f78747499
|
/LeetCode/binary_tree_tilt.py
|
da123fed23b66a5d89eb908cb09913176514a842
|
[] |
no_license
|
papayetoo/StudyinPython
|
d5e6ec0cff0e97fcc4afc8d846e3658c06eb67c2
|
f686b6e08720ad4d7d57b41d24c63c4bfa64dd90
|
refs/heads/master
| 2021-07-22T04:05:38.993123 | 2021-02-03T14:12:26 | 2021-02-03T14:12:26 | 240,009,852 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 516 |
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def findTilt(self, root: TreeNode) -> int:
def postorder(node: TreeNoe):
if not node:
return 0
l = postorder(node.left)
r = postorder(node.right)
return abs(l - r)
result = postorder(root)
print(result)
return 0
|
[
"[email protected]"
] | |
fc14e4aaa42ca1b1f9774ae8c9c96ace464ac8e0
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/train/xgboost/xgboost_predictor.py
|
1b319b93b299bc02a5b83a2f1cdcfa1e8fab6e8e
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405 | 2023-08-31T03:20:38 | 2023-08-31T03:20:38 | 71,932,349 | 29,482 | 5,669 |
Apache-2.0
| 2023-09-14T21:48:14 | 2016-10-25T19:38:30 |
Python
|
UTF-8
|
Python
| false | false | 5,718 |
py
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import pandas as pd
import xgboost
from ray.air.constants import TENSOR_COLUMN_NAME
from ray.air.data_batch_type import DataBatchType
from ray.air.util.data_batch_conversion import _unwrap_ndarray_object_type_if_needed
from ray.train.predictor import Predictor
from ray.train.xgboost import XGBoostCheckpoint
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
from ray.data.preprocessor import Preprocessor
@PublicAPI(stability="beta")
class XGBoostPredictor(Predictor):
"""A predictor for XGBoost models.
Args:
model: The XGBoost booster to use for predictions.
preprocessor: A preprocessor used to transform data batches prior
to prediction.
"""
def __init__(
self, model: xgboost.Booster, preprocessor: Optional["Preprocessor"] = None
):
self.model = model
super().__init__(preprocessor)
def __repr__(self):
return (
f"{self.__class__.__name__}(model={self.model!r}, "
f"preprocessor={self._preprocessor!r})"
)
@classmethod
def from_checkpoint(cls, checkpoint: XGBoostCheckpoint) -> "XGBoostPredictor":
"""Instantiate the predictor from a Checkpoint.
This is a helper constructor that instantiates the predictor from a
framework-specific XGBoost checkpoint.
Args:
checkpoint: The checkpoint to load the model and preprocessor from.
"""
model = checkpoint.get_model()
preprocessor = checkpoint.get_preprocessor()
return cls(model=model, preprocessor=preprocessor)
def predict(
self,
data: DataBatchType,
feature_columns: Optional[Union[List[str], List[int]]] = None,
dmatrix_kwargs: Optional[Dict[str, Any]] = None,
**predict_kwargs,
) -> DataBatchType:
"""Run inference on data batch.
The data is converted into an XGBoost DMatrix before being inputted to
the model.
Args:
data: A batch of input data.
feature_columns: The names or indices of the columns in the
data to use as features to predict on. If None, then use
all columns in ``data``.
dmatrix_kwargs: Dict of keyword arguments passed to ``xgboost.DMatrix``.
**predict_kwargs: Keyword arguments passed to ``xgboost.Booster.predict``.
Examples:
.. testcode::
import numpy as np
import xgboost as xgb
from ray.train.xgboost import XGBoostPredictor
train_X = np.array([[1, 2], [3, 4]])
train_y = np.array([0, 1])
model = xgb.XGBClassifier().fit(train_X, train_y)
predictor = XGBoostPredictor(model=model.get_booster())
data = np.array([[1, 2], [3, 4]])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = np.array([[1, 2, 8], [3, 4, 9]])
predictions = predictor.predict(data, feature_columns=[0, 1])
.. testcode::
import pandas as pd
import xgboost as xgb
from ray.train.xgboost import XGBoostPredictor
train_X = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
train_y = pd.Series([0, 1])
model = xgb.XGBClassifier().fit(train_X, train_y)
predictor = XGBoostPredictor(model=model.get_booster())
# Pandas dataframe.
data = pd.DataFrame([[1, 2], [3, 4]], columns=["A", "B"])
predictions = predictor.predict(data)
# Only use first and second column as the feature
data = pd.DataFrame([[1, 2, 8], [3, 4, 9]], columns=["A", "B", "C"])
predictions = predictor.predict(data, feature_columns=["A", "B"])
Returns:
Prediction result.
"""
return Predictor.predict(
self,
data,
feature_columns=feature_columns,
dmatrix_kwargs=dmatrix_kwargs,
**predict_kwargs,
)
def _predict_pandas(
self,
data: "pd.DataFrame",
feature_columns: Optional[Union[List[str], List[int]]] = None,
dmatrix_kwargs: Optional[Dict[str, Any]] = None,
**predict_kwargs,
) -> "pd.DataFrame":
dmatrix_kwargs = dmatrix_kwargs or {}
feature_names = None
if TENSOR_COLUMN_NAME in data:
data = data[TENSOR_COLUMN_NAME].to_numpy()
data = _unwrap_ndarray_object_type_if_needed(data)
if feature_columns:
# In this case feature_columns is a list of integers
data = data[:, feature_columns]
elif feature_columns:
# feature_columns is a list of integers or strings
data = data[feature_columns].to_numpy()
# Only set the feature names if they are strings
if all(isinstance(fc, str) for fc in feature_columns):
feature_names = feature_columns
else:
feature_columns = data.columns.tolist()
data = data.to_numpy()
if all(isinstance(fc, str) for fc in feature_columns):
feature_names = feature_columns
if feature_names:
dmatrix_kwargs["feature_names"] = feature_names
matrix = xgboost.DMatrix(data, **dmatrix_kwargs)
df = pd.DataFrame(self.model.predict(matrix, **predict_kwargs))
df.columns = (
["predictions"]
if len(df.columns) == 1
else [f"predictions_{i}" for i in range(len(df.columns))]
)
return df
|
[
"[email protected]"
] | |
6e6667cf463f0e2376456bda7fa413e8cc8c9d58
|
45b8e141f762b95edec36ce40809ea4b89e3d287
|
/mahkalastore/home/views.py
|
23c7f82f9021101fb873e62508a747c6d244c294
|
[] |
no_license
|
nimanoori22/mys
|
73d7a0ad141e1c6208e776a15d079a2599c46a7f
|
0122586a4d69f80219ad25e42ef89f3052f5cb81
|
refs/heads/master
| 2022-11-28T22:24:44.947703 | 2020-08-13T14:52:19 | 2020-08-13T14:52:19 | 279,652,903 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,800 |
py
|
from django.shortcuts import render, HttpResponse, HttpResponsePermanentRedirect
from product.models import Product, Category, Images, Comment
from home.models import Setting, ContactForm, ContactMessage
from .forms import SearchForm
from django.contrib import messages
from django.http import HttpResponseRedirect
import json
# Create your views here.
def index(request):
page = "home"
category = Category.objects.all()
setting = Setting.objects.get(pk=1)
products_slider = Product.objects.all().order_by('id')[:4] #first 4 products
products_latest= Product.objects.all().order_by('-id')[:4] #last 4 products
products_picked = Product.objects.all().order_by('?')[:4] #randomly picked
context = {'setting': setting,
'page': page,
'products_slider': products_slider,
'products_latest': products_latest,
'products_picked': products_picked,
'category': category}
return render(request, 'index.html', context=context)
def aboutus(request):
setting = Setting.objects.get(pk=1)
context = {'setting': setting}
return render(request, 'about.html', context=context)
def contact(request):
if request.method == 'POST':
form = ContactForm(request.POST)
if form.is_valid():
data = ContactMessage()
data.name = form.cleaned_data['name']
data.email = form.cleaned_data['email']
data.subject = form.cleaned_data['subject']
data.message = form.cleaned_data['message']
data.ip = request.META.get('REMOTE_ADDR')
data.save()
messages.success(request, 'your message has been sent, thank you <3')
HttpResponseRedirect('/contact')
setting = Setting.objects.get(pk=1)
form = ContactForm
context = {'setting': setting, 'form': form,}
return render(request, 'contact.html', context=context)
def category_product(request, id, slug):
category = Category.objects.all()
products = Product.objects.filter(category_id=id)
context = {
'products': products,
'category': category,
}
return render(request, 'category_product.html', context=context)
def search(request):
if request.method == 'POST':
form = SearchForm(request.POST)
if form.is_valid():
query = form.cleaned_data['query']
catid = form.cleaned_data['catid']
if catid == 0:
products = Product.objects.filter(name__icontains=query)
else:
products = Product.objects.filter(name__icontains=query, category_id=catid)
category = Category.objects.all()
context = {
'products': products,
'query': query,
'category': category,
}
return render(request, 'search_products.html', context=context)
return HttpResponsePermanentRedirect('/')
def search_auto(request):
if request.is_ajax():
q = request.GET.get('term', '')
products = Product.objects.filter(name__icontains=q)
results = []
for rs in products:
product_json = {}
product_json = rs.name
results.append(product_json)
data = json.dumps(results)
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(data, mimetype)
def product_detail(request, id, slug):
category = Category.objects.all()
product = Product.objects.get(pk=id)
images = Images.objects.filter(product_id=id)
comments = Comment.objects.filter(product_id=id, status='True')
context = {
'product': product,
'category': category,
'mypics': images,
'comments': comments,
}
return render(request, 'product_detail.html', context=context)
|
[
"[email protected]"
] | |
d9f06fb6f20f3f1270aaab0aff1acbd04c6ce096
|
d4c82eb9ae3037cf8742c3fc8c31cf4a80f5d21c
|
/examples/Python/examples/Tools/scripts/finddiv.py
|
7f483e293126378754d76f484901c26e6bae0d42
|
[] |
no_license
|
gzhu-team-509/programming-knowledge-base
|
68132b1a669f208dab94dcf2401ce39d89ebe53b
|
3f3d026927157b7fdf210da195cb912366975e75
|
refs/heads/master
| 2021-05-05T12:17:12.686569 | 2017-11-04T07:30:28 | 2017-11-04T07:30:28 | 104,754,220 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,517 |
py
|
#! /usr/bin/python2.6
"""finddiv - a grep-like tool that looks for division operators.
Usage: finddiv [-l] file_or_directory ...
For directory arguments, all files in the directory whose name ends in
.py are processed, and subdirectories are processed recursively.
This actually tokenizes the files to avoid false hits in comments or
strings literals.
By default, this prints all lines containing a / or /= operator, in
grep -n style. With the -l option specified, it prints the filename
of files that contain at least one / or /= operator.
"""
import os
import sys
import getopt
import tokenize
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "lh")
except getopt.error, msg:
usage(msg)
return 2
if not args:
usage("at least one file argument is required")
return 2
listnames = 0
for o, a in opts:
if o == "-h":
print __doc__
return
if o == "-l":
listnames = 1
exit = None
for filename in args:
x = process(filename, listnames)
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
def process(filename, listnames):
if os.path.isdir(filename):
return processdir(filename, listnames)
try:
fp = open(filename)
except IOError, msg:
sys.stderr.write("Can't open: %s\n" % msg)
return 1
g = tokenize.generate_tokens(fp.readline)
lastrow = None
for type, token, (row, col), end, line in g:
if token in ("/", "/="):
if listnames:
print filename
break
if row != lastrow:
lastrow = row
print "%s:%d:%s" % (filename, row, line),
fp.close()
def processdir(dir, listnames):
try:
names = os.listdir(dir)
except os.error, msg:
sys.stderr.write("Can't list directory: %s\n" % dir)
return 1
files = []
for name in names:
fn = os.path.join(dir, name)
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
files.append(fn)
files.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
exit = None
for fn in files:
x = process(fn, listnames)
exit = exit or x
return exit
if __name__ == "__main__":
sys.exit(main())
|
[
"[email protected]"
] | |
33925cd42a46e5db54b36a88e09837da3dec21cd
|
888d4ea7e0a61f8c4a2ae602c57799b367b5174f
|
/crab/old/datasets2016FSR.py
|
c31e801ea2719eabf82ba143a8d7b9b580973227
|
[] |
no_license
|
gmandorl/nanoAOD-tools
|
6a95debfb96bb86f55568942dc0b347873b38144
|
2cb041148f99cb41e03d19014657db8ddb5c16cd
|
refs/heads/master
| 2020-07-24T23:36:14.018605 | 2020-06-16T14:59:24 | 2020-06-16T14:59:24 | 208,084,777 | 0 | 1 | null | 2020-03-15T10:00:14 | 2019-09-12T15:34:19 |
Python
|
UTF-8
|
Python
| false | false | 13,936 |
py
|
data2016 = {
"SingleMuonRun2016B" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_un2016B-17Jul2018_ver2-v1-57057fb2c24f603844e32924f34ddf42/USER",
],
"SingleMuonRun2016C" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_Run2016C-17Jul2018-v1-57057fb2c24f603844e32924f34ddf42/USER",
],
"SingleMuonRun2016D" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_Run2016D-17Jul2018-v1-57057fb2c24f603844e32924f34ddf42/USER",
],
"SingleMuonRun2016E" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_Run2016E-17Jul2018-v1-57057fb2c24f603844e32924f34ddf42/USER",
],
"SingleMuonRun2016F" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_Run2016F-17Jul2018-v1-57057fb2c24f603844e32924f34ddf42/USER",
],
"SingleMuonRun2016G" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_Run2016G-17Jul2018-v1-57057fb2c24f603844e32924f34ddf42/USER",
],
"SingleMuonRun2016H" : [
"/SingleMuon/arizzi-RunIIDataJuly18_nanoFSRV2_Run2016H-17Jul2018-v1-57057fb2c24f603844e32924f34ddf42/USER"
],
}
mc2016 = {
#"vbfHmm_FSR_2016POWPY" : [
#"/VBF_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV1_un2_asymptotic_v3-v2-0ee09caf70c2c8f40e412449c31b45df/USER"
#],
"DY105_2016AMCPY" : [
"/DYJetsToLL_M-105To160_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"DY105_2016MGPY" : [
"/DYJetsToLL_M-105To160_TuneCP5_PSweights_13TeV-madgraphMLM-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"DY105VBF_2016AMCPY" : [
"/DYJetsToLL_M-105To160_VBFFilter_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v1-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/DYJetsToLL_M-105To160_VBFFilter_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext2-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"DY105VBF_2016MGPY" : [
"/DYJetsToLL_M-105To160_VBFFilter_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"DY0J_2016AMCPY" : [
"/DYToLL_0J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v1-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/DYToLL_0J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/DYToLL_0J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-00000000000000000000000000000000/USER"
],
"DY1J_2016AMCPY" : [
"/DYToLL_1J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v1-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/DYToLL_1J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"DY2J_2016AMCPY" : [
"/DYToLL_2J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#~ "DYTau_2016AMCPY" : [
#~ ""
#~ ],
#"DYM50_2016AMCPY" : [
#"/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7_ext2-v1/NANOAODSIM"
#],
"EWKZ105_2016MGHERWIG" : [
"/EWK_LLJJ_MLL_105-160_SM_5f_LO_TuneEEC5_13TeV-madgraph-herwigpp/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"EWKZ_2016MGHERWIG" : [
"/EWK_LLJJ_MLL-50_MJJ-120_13TeV-madgraph-herwigpp/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"EWKZ105_2016MGPY" : [
"/EWK_LLJJ_MLL_105-160_SM_5f_LO_TuneCUETP8M1_PSweights_13TeV-madgraph-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"EWKZ_2016MGPY" : [
"/EWK_LLJJ_MLL-50_MJJ-120_13TeV-madgraph-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"EWKZint_2016MGPY" : [
#"/LLJJ_INT_SM_5f_LO_13TeV_madgraph-pythia8_TuneCUETP8M1/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7_ext1-v1/NANOAODSIM"
#],
"STs_2016AMCPY" : [
"/ST_s-channel_4f_leptonDecays_13TeV-amcatnlo-pythia8_TuneCUETP8M1/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"STwtbar_2016POWPY" : [
"/ST_tW_antitop_5f_inclusiveDecays_13TeV-powheg-pythia8_TuneCUETP8M2T4/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"STwt_2016POWPY" : [
"/ST_tW_top_5f_inclusiveDecays_13TeV-powheg-pythia8_TuneCUETP8M2T4/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"STtbar_2016POW_MADSPIN_PY" : [
#"/ST_t-channel_antitop_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
"STt_2016POW_MADSPIN_PY" : [
"/ST_t-channel_antitop_4f_inclusiveDecays_13TeV-powhegV2-madspin-pythia8_TuneCUETP8M1/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"TT_2016POWPY" : [
"/TT_TuneCUETP8M2T4_13TeV-powheg-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"TTlep_2016POWPY" : [
"/TTTo2L2Nu_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"TTsemi_2016POWPY" : [
#"/TTToSemilepton_TuneCUETP8M2_ttHtranche3_13TeV-powheg-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
#"TTlep_2016MGPY" : [
#"/TTJets_DiLept_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM",
#"/TTJets_DiLept_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7_ext1-v1/NANOAODSIM"
#],
#"TThad_2016POWPY" : [
#"/TTToHadronic_TuneCP5_PSweights_13TeV-powheg-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
#"W2J_2016AMCPY" : [
#"/WToLNu_2J_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7_ext4-v1/NANOAODSIM",
#"/WToLNu_2J_13TeV-amcatnloFXFX-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7_ext1-v1/NANOAODSIM"
#],
"W1J_2016AMCPY" : [
"/WToLNu_1J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"W0J_2016AMCPY" : [
"/WToLNu_0J_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WWdps_2016MGPY" : [
"/WWTo2L2Nu_DoubleScattering_13TeV-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WWJJlnln_2016MGPY" : [
"/WWJJToLNuLNu_EWK_13TeV-madgraph-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"WLLJJln_2016MG_MADSPIN_PY" : [
#"/WLLJJ_WToLNu_EWK_TuneCUETP8M1_13TeV_madgraph-madspin-pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
"WWJJlnlnNoT_2016MGPY" : [
"/WWJJToLNuLNu_EWK_noTop_13TeV-madgraph-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WW2l2n_2016POWPY" : [
"/WWTo2L2Nu_13TeV-powheg/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WWlnqq_2016AMC_MADSPIN_PY" : [
"/WWTo1L1Nu2Q_13TeV_amcatnloFXFX_madspin_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WZ1l1n2q_2016POWPY" : [
"/WZToLNu2Q_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WZ1l1n2q_2016AMCPY" : [
"/WZTo2Q2Nu_13TeV_amcatnloFXFX_madspin_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WZ1l3n_2016AMCPY" : [
"/WZTo1L3Nu_13TeV_amcatnloFXFX_madspin_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"WZ2l2q_2016AMC_MADSPIN_PY" : [
#"/WZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
"WZ3l1n_2016POWPY" : [
"/WZTo3LNu_TuneCUETP8M1_13TeV-powheg-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v1-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/WZTo3LNu_TuneCUETP8M1_13TeV-powheg-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WZ3l1n_2016AMCPY" : [
"/WZTo3LNu_TuneCUETP8M1_13TeV-amcatnloFXFX-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"ZZ2l2q_2016POWPY" : [
"/ZZTo2L2Q_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"ZZ2l2q_2016AMCPY" : [
"/ZZTo2L2Q_13TeV_amcatnloFXFX_madspin_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"ZZ2l2n_2016POWPY" : [
#"/ZZTo2L2Nu_13TeV_powheg_pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM",
#"/ZZTo2L2Nu_13TeV_powheg_pythia8_ext1/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
"ZZ_2016AMCPY": [
"/ZZ_TuneCUETP8M1_13TeV-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WZ_2016AMCPY": [
"/WZ_TuneCUETP8M1_13TeV-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/WZ_TuneCUETP8M1_13TeV-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WW_2016AMCPY": [
"/WW_TuneCUETP8M1_13TeV-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/WW_TuneCUETP8M1_13TeV-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"ggHmm_2016AMCPY" : [
"/GluGluHToMuMu_M125_TuneCP5_PSweights_13TeV_amcatnloFXFX_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"ggHmm_2016POWPY" : [
"/GluGluHToMuMu_M-125_TuneCP5_PSweights_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/GluGlu_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/GluGlu_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"vbfHmm_2016POWPY" : [
"/VBF_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_symptotic_v3_ext1-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/VBF_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/VBFHToMuMu_M-125_TuneCP5_PSweights_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"vbfHmm_2016POWHERWIG" : [
"/VBFHToMuMu_M-125_TuneEEC5_13TeV-powheg-herwigpp/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"vbfHmm_2016AMCPY" : [
"/VBFHToMuMu_M125_TuneCP5_PSweights_13TeV_amcatnlo_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"vbfHmm_2016AMCHERWIG" : [
"/VBFHToMuMu_M-125_TuneEEC5_13TeV-amcatnlo-herwigpp/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"zHmm_2016POWPY" : [
"/ZH_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER",
"/ZH_HToMuMu_ZToAll_M125_TuneCP5_PSweights_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"ttHmm_2016POWPY" : [
"/ttHToMuMu_M125_TuneCP5_PSweights_13TeV-powheg-pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WplusHmm_2016POWPY" : [
"/WplusH_HToMuMu_WToAll_M125_TuneCP5_PSweights_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER" ,
"/WPlusH_HToMuMu_M125_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v2-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
"WminusHmm_2016POWPY" : [
"/WminusH_HToMuMu_WToAll_M125_TuneCP5_PSweights_13TeV_powheg_pythia8/arizzi-RunIISummer16MiniAODv3_nanoFSRV2_un2_asymptotic_v3-v1-cc4adcbd1151d12d342e6fce96f3a534/USER"
],
#"vbfHtautau_2016POWPY" : [
#"/VBFHToTauTau_M125_13TeV_powheg_pythia8/RunIISummer16NanoAODv5-PUMoriond17_Nano1June2019_102X_mcRun2_asymptotic_v7-v1/NANOAODSIM"
#],
}
|
[
"[email protected]"
] | |
23094fc7981a2a9ec1933bf16e464be5b94360bb
|
9cd180fc7594eb018c41f0bf0b54548741fd33ba
|
/sdk/python/pulumi_azure_nextgen/network/v20170601/network_watcher.py
|
4acd5c948f6a86e7fb6e05cd645200b151fc7041
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
MisinformedDNA/pulumi-azure-nextgen
|
c71971359450d03f13a53645171f621e200fe82d
|
f0022686b655c2b0744a9f47915aadaa183eed3b
|
refs/heads/master
| 2022-12-17T22:27:37.916546 | 2020-09-28T16:03:59 | 2020-09-28T16:03:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,727 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['NetworkWatcher']
class NetworkWatcher(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
network_watcher_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Network watcher in a resource group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] network_watcher_name: The name of the network watcher.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['etag'] = etag
__props__['id'] = id
__props__['location'] = location
if network_watcher_name is None:
raise TypeError("Missing required property 'network_watcher_name'")
__props__['network_watcher_name'] = network_watcher_name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['tags'] = tags
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/latest:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20160901:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20161201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20170301:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20170801:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20170901:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20171001:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20171101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180401:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180601:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180701:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20180801:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20181001:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20181101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20181201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190401:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190601:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190701:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190801:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20190901:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20191101:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20191201:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200301:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200401:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200501:NetworkWatcher"), pulumi.Alias(type_="azure-nextgen:network/v20200601:NetworkWatcher")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(NetworkWatcher, __self__).__init__(
'azure-nextgen:network/v20170601:NetworkWatcher',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'NetworkWatcher':
"""
Get an existing NetworkWatcher resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return NetworkWatcher(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.