blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cc69c17eb918fd897c9f5b7af3a2c749094d41a7 | 0dbfea9dcbbdf7a329c9d0f61831973b3168e560 | /camera.py | a9b8a4f8902973ba7b1242f9bbea89e50609084c | [] | no_license | SmartPracticeschool/SPS-4035-Intelligent-Best-Safety-Max-Safety-Rating-Generator-for-Restaurant | 6e6be915bf0208c5a9ca1368f99d754051fd9fee | 47ba68197c24623fee757788e03cbb41e5fca921 | refs/heads/master | 2022-12-11T09:28:01.007804 | 2020-09-11T10:20:54 | 2020-09-11T10:20:54 | 292,284,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | import cv2
import boto3
import datetime
import requests
face_cascade=cv2.CascadeClassifier("haarcascade_frontalface_alt2.xml")
ds_factor=0.6
count=0
class VideoCamera(object):
def __init__(self):
self.video = cv2.VideoCapture(0)
def __del__(self):
self.video.release()
def get_frame(self):
#count=0
global count
success, image = self.video.read()
is_success, im_buf_arr = cv2.imencode(".jpg", image)
image1 = im_buf_arr.tobytes()
client=boto3.client('rekognition',
aws_access_key_id="ASIA3JZX6DJK2IOZBPEV",
aws_secret_access_key="GQ4AuQs80d8r+gLfQCadeLY/vmll0SLFPQMF/x9P",
aws_session_token="FwoGZXIvYXdzEHoaDMK6+Vqt+bc4zxdiSyLKAe9iC6fIvoALw6dZuXTSz5Vb0GfE43zPfJTLsmHOA+pDUpGwlCEBfT6xXrgPq5XiGabwP/5ZFbp517LpM08a3f76c356zrXXYSVPazZogFUMc/qMDkEWly/SW66SeT9cgRirmZAj49GMGUBAFovwnWAUOmWEMJVOT+R7BCcRDs7qzlV8mrmhichmPsmSWqOcZsJY+2b99WyupvX8XorhsQepP0eQK0VkZVxU0FN1iFgijdC1FgZ51y0fKVfkXFbONQ2CXdn0EnAYOcAoqu3s+gUyLRhXqAddoXMzN2yXr8kKsDW9H2XiMzfy4lVX669OchDI696RMMVo3K66fvIdiA==",
region_name='us-east-1')
response = client.detect_custom_labels(
ProjectVersionArn='arn:aws:rekognition:us-east-1:776969525845:project/Mask-Detection2/version/Mask-Detection2.2020-09-07T23.02.02/1599499928143',Image={
'Bytes':image1})
print(response['CustomLabels'])
if not len(response['CustomLabels']):
count=count+1
date = str(datetime.datetime.now()).split(" ")[0]
#print(date)
url = " https://81ryisfwlc.execute-api.us-east-1.amazonaws.com/apiForMaskCount?date="+date+"&count="+str(count)
resp = requests.get(url)
f = open("countfile.txt", "w")
f.write(str(count))
f.close()
#print(count)
image=cv2.resize(image,None,fx=ds_factor,fy=ds_factor,interpolation=cv2.INTER_AREA)
gray=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
face_rects=face_cascade.detectMultiScale(gray,1.3,5)
for (x,y,w,h) in face_rects:
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
break
ret, jpeg = cv2.imencode('.jpg', image)
#cv2.putText(image, text = str(count), org=(10,40), fontFace=cv2.FONT_HERSHEY_PLAIN, fontScale=1, color=(1,0,0))
cv2.imshow('image',image)
return jpeg.tobytes()
| [
"[email protected]"
] | |
3b054790bdd248be0ce98c709a283af3f4078789 | 679ce4b323f79b2425976201324c6c1f88b95199 | /Python/Resource/game_state_machine.py | dfc0cbe9f3c5720937da66d1172aa9f2ccb26615 | [] | no_license | abriggs914/Coding_Practice | ff690fb5f145a11f4da144f3882b37f473b10450 | 3afd7c59e0d90f0ef5f6203853e69f853312019b | refs/heads/master | 2023-08-31T04:04:58.048554 | 2023-08-29T13:23:29 | 2023-08-29T13:23:29 | 161,865,421 | 0 | 1 | null | 2022-10-27T08:35:29 | 2018-12-15T03:20:14 | Python | UTF-8 | Python | false | false | 11,619 | py |
# Class outlining a Game-State-Machine (GSM).
# Version............1.4
# Date........2022-08-03
# Author....Avery Briggs
class GSM:
def __init__(self, options, name=None, idx=None, max_cycles=None, allow_recycle=True):
"""Game State Machine. Simulates app_state switches for an object.
Required: options - list of states.
Optional: name - GSM name
idx - starting index for a app_state
max_cycles - maximum number of cycles allowed
allow_recycle - use this to allow for only a single cycle(Think generators)"""
if idx is None:
idx = 0
if not isinstance(idx, int) or (0 < idx < len(options)):
raise TypeError("Error param 'idx' needs to be an integer corresponding to a list index.")
if not isinstance(options, list) and not isinstance(options, tuple):
raise TypeError("Error param 'options' needs to be an ordered iterable object. (supported: list, tuple)")
if len(options) == 0:
raise ValueError("Error param 'options' needs to have at least 1 element.")
if max_cycles == 0:
raise ValueError("Error you can not create a GSM that does not have at least 1 cycle")
self.name = name
self.idx = idx
self.options = options
self.cycles = 0
self.prev = self.calc_prev()
self.max_cycles = -1
if max_cycles is not None:
if isinstance(max_cycles, bool) and max_cycles:
# use this for 1 iteration
self.max_cycles = 1
elif isinstance(max_cycles, int):
self.max_cycles = max_cycles
self.callbacks = {}
self.allow_recycle = allow_recycle
def __iter__(self):
"""Generator of upcoming states. ONLY 1 CYCLE"""
# return self.options[:self.idx] + self.options[self.idx:]
for op in self.queue():
yield op
def calc_prev(self, idx=None):
"""Grab the index immediately before the given index, defaults to current index."""
idx = self.idx if idx is None else idx
# print(f"idx: {idx}, new: {(idx - 1) % len(self)}, t: {type(idx)}")
return (idx - 1) % len(self)
def __next__(self):
"""Call this like a generator would. Simulates 'walking' states and checks against max_cycles."""
a = (self.idx - 1) % len(self)
b = (self.prev + 0) % len(self)
# print(f"name={self.name}, idx: <{self.idx}>, prev: <{self.prev}>, a={a}, b={b}")
if a != b:
# if this is true, then the app_state index was altered illegally.
raise ValueError("STOP!!" + "\n" + str(self) + "\n" + "The app_state index was altered illegally.")
self.idx += 1
if self.idx >= len(self):
self.cycles += 1
self.restart()
if not self.can_recycle():
raise StopIteration(f"Error max cycles have been reached for this GSM object. cycles={self.cycles}")
# if self.max_cycles >= 0:
# if self.cycles >= self.max_cycles:
# raise StopIteration(f"Error max cycles have been reached for this GSM object. cycles={self.cycles}")
new_state = self.state()
self.callback(new_state)
# print(f"new_state: <{new_state}>, idx: <{self.idx}>, prev: <{self.prev}>")
self.prev = self.calc_prev() # call last to act as a check.
return new_state
def __len__(self):
"""Return length of states list"""
return len(self.options)
def queue(self):
"""List of states in pending order, beginning with the current."""
rest = self.options[self.idx:]
if self.can_recycle():
rest += self.options[:self.idx]
return rest
def opposite(self, round_up=False):
"""Viewing options cyclically, return the app_state opposite to the current. Use round_up to handle odd length app_state lists"""
off = 0 if not round_up else len(self) % 2
return self.options[(self.idx + ((len(self) // 2) + off)) % len(self)]
def state(self, idx=None):
"""Return the app_state at a given index. If none given, defaults to own index."""
return self.options[self.idx] if idx is None else self.options[idx]
def peek(self, n_ahead=1):
"""Peek ahead to the nth app_state. Default next app_state."""
return self.state((self.idx + n_ahead) % len(self))
def set_state(self, idx):
if idx in self.options:
self.idx = self.options.index(idx)
# print(f"UPDATE: {self.idx}")
self.prev = self.calc_prev()
print(self)
# print(f"idf: {self.idx}, prev: {self.prev}")
return
else:
if isinstance(idx, int) and not isinstance(idx, bool):
if -1 < idx < len(self):
self.idx = idx
self.prev = self.calc_prev()
return
raise ValueError(f"Error param idx is not recognized as a app_state or an index. idx={idx}, type={type(idx)}")
# if isinstance(idx, int):
# # TODO this will cause a problem for keys that are also whole numbers. instead of by value this does by position
# if -1 < idx < len(self):
# self.idx = idx
# self.prev = self.calc_prev()
# else:
# raise ValueError(f"Error cannot set the app_state to index={idx}. Index out of range.")
# else:
# if idx not in self.options:
# raise KeyError(f"Error key '{idx}' not a valid app_state for this machine.")
# app_state = idx
# self.idx = self.options.index(app_state)
# print(f"idx: {idx}, s.idx: {self.idx}")
# self.prev = self.calc_prev(self.idx)
def add_state(self, state, idx=None):
"""Add a app_state. By default, appended, but can be altered using idx param."""
if idx is None:
if isinstance(self.options, list):
self.options.append(state)
else:
self.options = (*self.options, state)
else:
if isinstance(self.options, list):
self.options.insert(idx, state)
else:
self.options = (*self.options[:idx], state, self.options[idx:])
self.prev = self.calc_prev()
def remove_state(self, state):
"""Remove a app_state. Beware ValueError"""
self.unbind_callback(state)
if isinstance(self.options, list):
self.options.remove(state)
else:
temp = list(self.options)
temp.remove(state)
self.options = tuple(temp)
self.prev = self.calc_prev()
def bind_callback(self, func, *args, state=None, all_states=False, **kwargs):
"""Add a callback to a given app_state """
# print(f"func: {func}")
# print(f"args: {args}")
# print(f"kwargs: {kwargs}")
state = state if state is not None else self.state()
if state not in self.options:
raise KeyError(f"Error unable to bind callback for app_state '{state}' as it is not a valid app_state of this GSM.")
self.callbacks[state] = (func, args, kwargs)
if all_states:
for state_ in self.options:
if state_ != state:
self.callbacks[state_] = (func, args, kwargs)
def unbind_callback(self, state=None):
"""Unbind a callback for a given app_state, defaults to current app_state."""
state = state if state is not None else self.state()
if state not in self.options:
raise KeyError(f"Error unable to unbind callback for app_state '{state}' as it is not a valid app_state of this GSM.")
if state not in self.callbacks:
print(f"No callbacks have been bound to app_state '{state}' yet.")
return
del self.callbacks[state]
def callback(self, state=None):
"""Call the function associated with a given app_state, defaults to current app_state."""
state = state if state is not None else self.state()
if state in self.callbacks:
func, args, kwargs = self.callbacks[state]
func(*args, **kwargs)
def restart(self):
"""Restart from idx=0, same cycle."""
self.idx = 0
def reset(self):
"""Reset from index=0 and cycle=0."""
if not self.allow_recycle:
raise StopIteration("Error this GSM is not allowed to recycle based on init param 'allow_recycle'.")
self.restart()
self.cycles = 0
def can_recycle(self):
"""Can this GSM cycle again or will it raise a StopIteration."""
return self.allow_recycle and (self.max_cycles < 0 or self.cycles < self.max_cycles - 1)
def __repr__(self):
a = f" name={self.name}," if self.name is not None else ""
b = f", cycle_num/max_cycles={self.cycles} / {self.max_cycles}" if self.max_cycles >= 0 else ""
r = (self.cycles * len(self)) + self.idx
f = (self.max_cycles * len(self)) if len(self) != 0 and self.max_cycles != 0 else 1
p = ("%.2f" % ((100 * r) / f)) + " %"
c = f", #state_idx/ttl_states={r} / {f} = {p}" if b else ""
return f"<GSM{a} app_state={self.state()}, options={self.queue()}{b}{c}>"
class BooleanGSM(GSM):
# Binary switch
def __init__(self, name=None, idx=None, max_cycles=None, t_first=True):
super().__init__(options=[True, False] if t_first else [False, True], name=name, idx=idx, max_cycles=max_cycles)
class YesNoCancelGSM(GSM):
# Triple app_state switch
def __init__(self, name=None, idx=None, max_cycles=None):
super().__init__(options=["Yes", "No", "Cancel"], name=name, idx=idx, max_cycles=max_cycles)
if __name__ == '__main__':
def print_hello1():
print("Hello World!")
def print_hello2(arg1, arg2, arg3=4):
print(f"Hello World! arg1={arg1} arg2={arg2} arg3={arg3}")
# phone_number_guess.main()
# orbiting_date_picker.main()
gsma = GSM(options=list(range(100)), name="GSMA")
gsm1 = GSM(options=list(range(100)), name="GSM1")
gsm2 = BooleanGSM(name="GSM2")
gsm3 = YesNoCancelGSM(name="GSM3")
gsm4 = YesNoCancelGSM(max_cycles=True, name="GSM4")
to_print = [
# gsm3.opposite(round_up=True),
gsm2.add_state("There"),
gsm2.set_state("There")
# gsm2.__next__(),
# gsm4.__next__(),
# gsm4.__next__(),
# gsm4.can_recycle(),
# # gsm2.bind_callback(print_hello1),
# gsm2.__next__(),
# # gsm2.bind_callback(print_hello2, 1, 4, arg3=5),
# # gsm2.unbind_callback(app_state=True),
# gsm2.bind_callback(print_hello2, -1, -4, arg3=-5, app_state=True),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.remove_state(app_state=True),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm2.__next__(),
# gsm1,
# gsm2,
# gsm3,
# gsm4,
# list(gsm1),
# list(gsm2),
# list(gsm3),
# list(gsm4)
# # gsm4.__next__()
]
for i, test in enumerate(to_print):
print(f"i: {i}, test=<{test}>")
| [
"[email protected]"
] | |
dbb97f0cbc36f2bfd81ed8cc7c03df74b429d7e7 | dfe2a52a1c36a28a8bf85af7efd42380d980b773 | /virtual/lib/python3.6/site-packages/social/tests/backends/test_skyrock.py | 6c9bc03d42c8bfa986caebbc4d6173b6d3d79df5 | [
"MIT"
] | permissive | virginiah894/Instagram-clone | 2c2a15d89fcdb25b22bd60428cf84a01f3bd553c | 4d8abe7bafefae06a0e462e6a47631c2f8a1d361 | refs/heads/master | 2022-12-10T06:56:21.105357 | 2020-01-07T14:14:50 | 2020-01-07T14:14:50 | 229,394,540 | 3 | 0 | MIT | 2022-12-08T03:23:40 | 2019-12-21T07:41:19 | Python | UTF-8 | Python | false | false | 1,343 | py | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class SkyrockOAuth1Test(OAuth1Test):
backend_path = 'social.backends.skyrock.SkyrockOAuth'
user_data_url = 'https://api.skyrock.com/v2/user/get.json'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
})
user_data_body = json.dumps({
'locale': 'en_US',
'city': '',
'has_blog': False,
'web_messager_enabled': True,
'email': '[email protected]',
'username': 'foobar',
'firstname': 'Foo',
'user_url': '',
'address1': '',
'address2': '',
'has_profile': False,
'allow_messages_from': 'everybody',
'is_online': False,
'postalcode': '',
'lang': 'en',
'id_user': 10101010,
'name': 'Bar',
'gender': 0,
'avatar_url': 'http://www.skyrock.com/img/avatars/default-0.jpg',
'nb_friends': 0,
'country': 'US',
'birth_date': '1980-06-10'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| [
"[email protected]"
] | |
f7f87f6e23d10dd65ab01b555e144d39934285c9 | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/exceptions/error_codes.py | faca39a3c0924d90bce9ef83a30fe5a758a7f70c | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 61,855 | py | __all__ = ()
__doc__ = """
The possible json error codes received from Discord HTTP API requests.
Import it as `ERROR_CODES`.
Examples
--------
Sending a direct message to a user.
```py
from hata import ERROR_CODES, DiscordException
async def try_send_private_message(client, user, content):
private_channel = await client.channel_private_create(user)
try:
await client.message_create(private_channel, content)
except DiscordException as err:
if err.code != ERROR_CODES.cannot_message_user:
raise
```
Error Codes
-----------
+-------------------------------------------------------------------+-----------+-----------+
| Respective name | Value | Notes |
+===================================================================+===========+===========+
| unknown_account | 10001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_application | 10002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_channel | 10003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_guild | 10004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_integration | 10005 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_invite | 10006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_member | 10007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_message | 10008 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_overwrite | 10009 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_provider | 10010 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_role | 10011 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_token | 10012 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_user | 10013 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_emoji | 10014 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_webhook | 10015 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_webhook_service | 10016 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_session | 10020 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_approval_form | 10023 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_ban | 10026 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_SKU | 10027 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_store_listing | 10028 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_entitlement | 10029 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_team | 10030 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_lobby | 10031 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_branch | 10032 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_store_directory_layout | 10033 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_redistributable | 10036 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_gift_code | 10038 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_team_member | 10040 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_stream | 10049 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_guild_boost_cooldown | 10050 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_guild_template | 10057 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_discovery_category | 10059 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_sticker | 10060 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_interaction | 10062 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_application_command | 10063 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_voice_state | 10065 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_application_command_permissions | 10066 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_stage | 10067 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_guild_member_verification_form | 10068 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_guild_welcome_screen | 10069 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_scheduled_event | 10070 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_scheduled_event_user | 10071 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_tag | 10071 | - |
+-------------------------------------------------------------------+-----------+-----------+
| bots_not_allowed | 20001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| only_bots_allowed | 20002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| RPC_proxy_disallowed | 20003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| explicit_content | 20009 | - |
+-------------------------------------------------------------------+-----------+-----------+
| account_scheduled_for_deletion | 20011 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_not_authorized_for_application | 20012 | - |
+-------------------------------------------------------------------+-----------+-----------+
| account_disabled | 20013 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_slowmode | 20016 | - |
+-------------------------------------------------------------------+-----------+-----------+
| team_ownership_required | 20018 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_announcement_message_edit | 20022 | - |
+-------------------------------------------------------------------+-----------+-----------+
| under_minimum_age | 20024 | - |
+-------------------------------------------------------------------+-----------+-----------+
| quarantined | 20026 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_channel_write | 20028 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_server_send | 20029 | - |
+-------------------------------------------------------------------+-----------+-----------+
| name_contains_disallowed_word | 20031 | - |
+-------------------------------------------------------------------+-----------+-----------+
| guild_subscription_level_too_low | 20035 | - |
+-------------------------------------------------------------------+-----------+-----------+
| vanity_url_required_for_published_guilds | 20040 | - |
+-------------------------------------------------------------------+-----------+-----------+
| vanity_url_employee_only_guild_disabled | 20044 | - |
+-------------------------------------------------------------------+-----------+-----------+
| vanity_url_requirements_not_met | 20045 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_guilds | 30001 | 100 |
+-------------------------------------------------------------------+-----------+-----------+
| max_friends | 30001 | 10000 |
+-------------------------------------------------------------------+-----------+-----------+
| max_pins | 30003 | 50 |
+-------------------------------------------------------------------+-----------+-----------+
| max_recipients | 30004 | 10 |
+-------------------------------------------------------------------+-----------+-----------+
| max_roles | 30005 | 250 |
+-------------------------------------------------------------------+-----------+-----------+
| max_used_usernames | 30006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_webhooks | 30007 | 15 |
+-------------------------------------------------------------------+-----------+-----------+
| max_emojis | 30008 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_reactions | 30010 | 20 |
+-------------------------------------------------------------------+-----------+-----------+
| max_group_channels | 30011 | 10 |
+-------------------------------------------------------------------+-----------+-----------+
| max_channels | 30013 | 500 |
+-------------------------------------------------------------------+-----------+-----------+
| max_attachments | 30015 | 10 |
+-------------------------------------------------------------------+-----------+-----------+
| max_invites | 30016 | 1000 |
+-------------------------------------------------------------------+-----------+-----------+
| max_animated_emojis | 30018 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_guild_members | 30019 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_application_game_SKUs | 30021 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_teams | 30023 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_companies | 30025 | - |
+-------------------------------------------------------------------+-----------+-----------+
| not_enough_guild_members | 30029 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_guild_discovery_category | 30030 | 5 |
+-------------------------------------------------------------------+-----------+-----------+
| guild_has_template | 30031 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_application_commands | 30032 | 100 |
+-------------------------------------------------------------------+-----------+-----------+
| max_thread_participants | 30033 | 1000 |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_daily_application_command_creation | 30034 | 200 |
+-------------------------------------------------------------------+-----------+-----------+
| max_bans | 30035 | 2500~ |
+-------------------------------------------------------------------+-----------+-----------+
| max_ban_fetches | 30037 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_scheduled_events | 30038 | 100 |
+-------------------------------------------------------------------+-----------+-----------+
| max_stickers | 30039 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_prune | 30040 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_widget_update | 30042 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_edit_to_message_older_than_one_hour | 30046 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_pinned_threads_in_forum_channel | 30047 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_forum_channel_tags | 30048 | - |
+-------------------------------------------------------------------+-----------+-----------+
| bitrate_too_high_for_channel_type | 30052 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_premium_emoji | 30056 | 25 |
+-------------------------------------------------------------------+-----------+-----------+
| max_webhooks_of_guilds | 30058 | 100 |
+-------------------------------------------------------------------+-----------+-----------+
| max_blocked_users | 30059 | - |
+-------------------------------------------------------------------+-----------+-----------+
| channels_too_large | 30061 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_resource | 31002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unauthorized | 40001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| email_verification_required | 40002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_private_channel_opening | 40003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| send_message_temporarily_disabled | 40004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| request_too_large | 40005 | - |
+-------------------------------------------------------------------+-----------+-----------+
| feature_disabled | 40006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_banned | 40007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| connection_revoked | 40012 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_in_team | 40024 | - |
+-------------------------------------------------------------------+-----------+-----------+
| team_members_must_be_verified | 40026 | - |
+-------------------------------------------------------------------+-----------+-----------+
| team_invitation_accepted | 40027 | - |
+-------------------------------------------------------------------+-----------+-----------+
| delete_account_transfer_team_ownership | 40028 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_not_connected_to_voice | 40032 | - |
+-------------------------------------------------------------------+-----------+-----------+
| message_crossposted | 40033 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_identity_verification_processing | 40035 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_identity_verification_succeeded | 40036 | - |
+-------------------------------------------------------------------+-----------+-----------+
| application_name_used | 40041 | - |
+-------------------------------------------------------------------+-----------+-----------+
| interaction_failed_to_send | 40043 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_send_message_to_forum_channel | 40058 | - |
+-------------------------------------------------------------------+-----------+-----------+
| interaction_already_acknowledged | 40060 | - |
+-------------------------------------------------------------------+-----------+-----------+
| tag_name_not_unique | 40061 | - |
+-------------------------------------------------------------------+-----------+-----------+
| rate_limit_service_resource | 40062 | - |
+-------------------------------------------------------------------+-----------+-----------+
| no_tags_available_for_non_moderators | 40066 | - |
+-------------------------------------------------------------------+-----------+-----------+
| tag_required | 40067 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_quarantined | 40068 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invites_disabled | 40069 | - |
+-------------------------------------------------------------------+-----------+-----------+
| missing_access | 50001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_account_type | 50002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_action_for_private_channel | 50003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| widget_disabled | 50004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_edit_message_of_other_user | 50005 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_create_empty_message | 50006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_message_user | 50007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_send_message_to_non_text_channel | 50008 | - |
+-------------------------------------------------------------------+-----------+-----------+
| channel_verification_level_too_high | 50009 | - |
+-------------------------------------------------------------------+-----------+-----------+
| oauth2_application_has_no_bot | 50010 | - |
+-------------------------------------------------------------------+-----------+-----------+
| oauth2_application_limit_reached | 50011 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_oauth2_state | 50012 | - |
+-------------------------------------------------------------------+-----------+-----------+
| missing_permissions | 50013 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_token | 50014 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_note | 50015 | - |
+-------------------------------------------------------------------+-----------+-----------+
| bulk_delete_amount_out_of_range | 50016 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_MFA_level | 50017 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_password | 50018 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_pin_message_in_different_channel | 50019 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invite_code_invalid_or_taken | 50020 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_action_for_system_message | 50021 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_phone_number | 50022 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_client_id | 50023 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_action_for_this_channel_type | 50024 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_oauth2_access_token | 50025 | - |
+-------------------------------------------------------------------+-----------+-----------+
| missing_oauth2_scope | 50026 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_webhook_token | 50027 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_role | 50028 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_recipients | 50033 | - |
+-------------------------------------------------------------------+-----------+-----------+
| bulk_delete_message_too_old | 50034 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_form_body | 50035 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_add_user_to_guild_where_bot_is_not_in | 50036 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_activity_action | 50039 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_oauth2_redirect_url | 50040 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_API_version | 50041 | - |
+-------------------------------------------------------------------+-----------+-----------+
| asset_size_too_large | 50045 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_asset | 50046 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_payment_source | 50048 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_application_name | 50050 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_gift_redemption_owned | 50051 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_self_redeem_this_gift | 50054 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_guild | 50055 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_request_origin | 50067 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_message_type | 50068 | - |
+-------------------------------------------------------------------+-----------+-----------+
| payment_source_required_to_redeem_gift | 50070 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_edit_system_webhook | 50077 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_delete_community_channel | 50074 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_edit_sticker_within_message | 50080 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_sticker_sent | 50081 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_action_for_archived_thread | 50083 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_thread_notification_setting | 50084 | - |
+-------------------------------------------------------------------+-----------+-----------+
| before_value_earlier_than_creation_time | 50085 | - |
+-------------------------------------------------------------------+-----------+-----------+
| community_and_rules_channel_cannot_be_changed_to_announcement | 50086 | - |
+-------------------------------------------------------------------+-----------+-----------+
| event_entity_type_different_from_the_entitys | 50091 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_country_code | 50095 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_friend_self | 50096 | - |
+-------------------------------------------------------------------+-----------+-----------+
| guild_monetization_required | 50097 | - |
+-------------------------------------------------------------------+-----------+-----------+
| not_enough_guild_boosters | 50101 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_user_settings_data | 50105 | - |
+-------------------------------------------------------------------+-----------+-----------+
| activity_launch_no_access | 50106 | - |
+-------------------------------------------------------------------+-----------+-----------+
| activity_launch_premium_tier | 50107 | - |
+-------------------------------------------------------------------+-----------+-----------+
| activity_launch_concurrent_activities | 50108 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_json | 50109 | - |
+-------------------------------------------------------------------+-----------+-----------+
| failed_to_resize_asset_below_max_size | 50138 | 262144 |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_mix_subscription_and_non_subscription_roles_for_an_emoji | 50144 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_convert_emoji_between_premium_and_non_premium | 50145 | - |
+-------------------------------------------------------------------+-----------+-----------+
| upload_file_not_found | 50146 | - |
+-------------------------------------------------------------------+-----------+-----------+
| activity_launch_afk_channel | 50147 | - |
+-------------------------------------------------------------------+-----------+-----------+
| feature_not_yet_rolled_out | 50155 | - |
+-------------------------------------------------------------------+-----------+-----------+
| voice_message_not_supports_additional_content | 50159 | - |
+-------------------------------------------------------------------+-----------+-----------+
| voice_message_must_have_one_audio_attachment | 50160 | - |
+-------------------------------------------------------------------+-----------+-----------+
| voice_message_must_have_supporting_metadata | 50161 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_edit_voice_message | 50162 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_delete_guild_subscription_integration | 50163 | - |
+-------------------------------------------------------------------+-----------+-----------+
| new_owner_ineligible_for_subscription | 50164 | - |
+-------------------------------------------------------------------+-----------+-----------+
| activity_launch_age_gated | 50165 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_send_voice_message_to_this_channel | 50173 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_enabled | 60001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_disabled | 60002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_required | 60003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_unverified | 60004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_invalid_secret | 60005 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_invalid_ticket | 60006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_invalid_code | 60008 | - |
+-------------------------------------------------------------------+-----------+-----------+
| MFA_invalid_session | 60009 | - |
+-------------------------------------------------------------------+-----------+-----------+
| phone_number_unable_to_send | 70003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| phone_verification_required | 70007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| relationship_incoming_disabled | 80000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| relationship_incoming_blocked | 80001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| relationship_invalid_target_bot | 80002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| relationship_invalid_target_self | 80003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| relationship_invalid_discord_tag | 80004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| relationship_already_friends | 80007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| reaction_blocked | 90001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| user_cannot_burst_react | 90002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_billing_profile | 100001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_payment_source | 100002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| unknown_subscriptions | 100003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| already_subscribed | 100004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_plan | 100005 | - |
+-------------------------------------------------------------------+-----------+-----------+
| payment_source_required | 100006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| already_cancelled | 100007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_payment | 100008 | - |
+-------------------------------------------------------------------+-----------+-----------+
| already_refunded | 100009 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_billing_address | 100010 | - |
+-------------------------------------------------------------------+-----------+-----------+
| already_purchased | 100011 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_gift_redemption_subscription_managed | 100021 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_gift_redemption_subscription_incompatible | 100023 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_gift_redemption_invoice_open | 100024 | - |
+-------------------------------------------------------------------+-----------+-----------+
| negative_invoice_amount | 100027 | - |
+-------------------------------------------------------------------+-----------+-----------+
| authentication_required | 100029 | - |
+-------------------------------------------------------------------+-----------+-----------+
| subscription_renewal_in_progress | 100042 | - |
+-------------------------------------------------------------------+-----------+-----------+
| confirmation_required | 100047 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_currency_for_payment_source | 100051 | - |
+-------------------------------------------------------------------+-----------+-----------+
| ineligible_for_subscription | 100053 | - |
+-------------------------------------------------------------------+-----------+-----------+
| card_declined | 100054 | - |
+-------------------------------------------------------------------+-----------+-----------+
| purchase_token_authorization_required | 100056 | - |
+-------------------------------------------------------------------+-----------+-----------+
| billing_non_refundable_payment_source | 100060 | - |
+-------------------------------------------------------------------+-----------+-----------+
| application_not_yet_available | 110001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| listing_already_joined | 120000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| listing_too_many_member | 120001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| listing_join_blocked | 120002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| resource_overloaded | 130000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| stage_already_open | 150006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_reply_without_read_message_history_permission | 160002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| message_has_thread | 160004 | - |
+-------------------------------------------------------------------+-----------+-----------+
| thread_locked | 160005 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_active_threads | 160006 | - |
+-------------------------------------------------------------------+-----------+-----------+
| max_active_announcement_threads | 160007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| invalid_lottie_json | 170001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| sticker_maximum_dimensions_exceeded | 170005 | 320x320 |
+-------------------------------------------------------------------+-----------+-----------+
| sticker_frame_rate_out_of_expected_range | 170006 | ?-400 ms |
+-------------------------------------------------------------------+-----------+-----------+
| sticker_animation_duration_exceeds_five_second | 170007 | - |
+-------------------------------------------------------------------+-----------+-----------+
| poggermode_temporarily_disabled | 170008 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_update_finished_scheduled_event | 180000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| exactly_one_guild_id_parameter_is_required | 180001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| failed_to_create_stage_needed_for_scheduled_event | 180002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| privacy_policy_required | 190001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| terms_of_service_required | 190002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| auto_moderation_message_blocked | 200000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| auto_moderation_title_blocked | 200001 | - |
+-------------------------------------------------------------------+-----------+-----------+
| auto_moderation_invalid_regex | 200002 | - |
+-------------------------------------------------------------------+-----------+-----------+
| webhook_can_create_thread_only_in_forum_channel | 220003 | - |
+-------------------------------------------------------------------+-----------+-----------+
| harmful_link_message_blocked | 240000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| clyde_consent_required | 310000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_enable_onboarding_requirements_not_met | 350000 | - |
+-------------------------------------------------------------------+-----------+-----------+
| cannot_update_onboarding_requirements_not_met | 350001 | - |
+-------------------------------------------------------------------+-----------+-----------+
"""
unknown_account = 10001
unknown_application = 10002
unknown_channel = 10003
unknown_guild = 10004
unknown_integration = 10005
unknown_invite = 10006
unknown_member = 10007
unknown_message = 10008
unknown_overwrite = 10009
unknown_provider = 10010
unknown_role = 10011
unknown_token = 10012
unknown_user = 10013
unknown_emoji = 10014
unknown_webhook = 10015
unknown_webhook_service = 10016
unknown_session = 10020
unknown_approval_form = 10023
unknown_ban = 10026
unknown_SKU = 10027
unknown_store_listing = 10028
unknown_entitlement = 10029
unknown_team = 10030
unknown_lobby = 10031
unknown_branch = 10032
unknown_store_directory_layout = 10033
unknown_redistributable = 10036
unknown_gift_code = 10038
unknown_team_member = 10040
unknown_stream = 10049
unknown_guild_boost_cooldown = 10050
unknown_guild_template = 10057
unknown_discovery_category = 10059
unknown_sticker = 10060
unknown_interaction = 10062
unknown_application_command = 10063
unknown_voice_state = 10065
unknown_application_command_permissions = 10066
unknown_stage = 10067
unknown_guild_member_verification_form = 10068
unknown_guild_welcome_screen = 10069
unknown_scheduled_event = 10070
unknown_scheduled_event_user = 10071
unknown_tag = 10087
bots_not_allowed = 20001
only_bots_allowed = 20002
RPC_proxy_disallowed = 20003
explicit_content = 20009
account_scheduled_for_deletion = 20011
user_not_authorized_for_application = 20012
account_disabled = 20013
rate_limit_slowmode = 20016
team_ownership_required = 20018
rate_limit_announcement_message_edit = 20022
under_minimum_age = 20024
quarantined = 20026
rate_limit_channel_write = 20028
rate_limit_server_send = 20029
name_contains_disallowed_word = 20031
guild_subscription_level_too_low = 20035
vanity_url_required_for_published_guilds = 20040
vanity_url_employee_only_guild_disabled = 20044
vanity_url_requirements_not_met = 20045
max_guilds = 30001 # 100
max_friends = 30001 # 10000
max_pins = 30003 # 50
max_recipients = 30004 # 10
max_roles = 30005 # 250
max_used_usernames = 30006
max_webhooks = 30007 # 15
max_emojis = 30008
max_reactions = 30010 # 20
max_group_channels = 30011 # 10
max_channels = 30013 # 500
max_attachments = 30015 # 10
max_invites = 30016 # 1000
max_animated_emojis = 30018
max_guild_members = 30019
max_application_game_SKUs = 30021
max_teams = 30023
max_companies = 30025
not_enough_guild_members = 30029
max_guild_discovery_category = 30030 # 5
guild_has_template = 30031
max_application_commands = 30032
max_thread_participants = 30033
rate_limit_daily_application_command_creation = 30034
max_bans = 30035
max_ban_fetches = 30037
max_scheduled_events = 30038
max_stickers = 30039
rate_limit_prune = 30040
rate_limit_widget_update = 30042
rate_limit_edit_to_message_older_than_one_hour = 30046
max_pinned_threads_in_forum_channel = 30047
max_forum_channel_tags = 30048
bitrate_too_high_for_channel_type = 30052
max_premium_emoji = 30056
max_webhooks_of_guilds = 30058
max_blocked_users = 30059
channels_too_large = 30061
rate_limit_resource = 31002
unauthorized = 40001
email_verification_required = 40002
rate_limit_private_channel_opening = 40003
send_message_temporarily_disabled = 40004
request_too_large = 40005
feature_disabled = 40006
user_banned = 40007
connection_revoked = 40012
user_in_team = 40024
team_members_must_be_verified = 40026
team_invitation_accepted = 40027
delete_account_transfer_team_ownership = 40028
user_not_connected_to_voice = 40032
message_crossposted = 40033
user_identity_verification_processing = 40035
user_identity_verification_succeeded = 40036
application_name_used = 40041
interaction_failed_to_send = 40043
cannot_send_message_to_forum_channel = 40058
interaction_already_acknowledged = 40060
tag_name_not_unique = 40061
rate_limit_service_resource = 40062
no_tags_available_for_non_moderators = 40066
tag_required = 40067
user_quarantined = 40068
invites_disabled = 40069
missing_access = 50001
invalid_account_type = 50002
invalid_action_for_private_channel = 50003
widget_disabled = 50004
cannot_edit_message_of_other_user = 50005
cannot_create_empty_message = 50006
cannot_message_user = 50007
cannot_send_message_to_non_text_channel = 50008
channel_verification_level_too_high = 50009
oauth2_application_has_no_bot = 50010
oauth2_application_limit_reached = 50011
invalid_oauth2_state = 50012
missing_permissions = 50013
invalid_token = 50014
invalid_note = 50015
bulk_delete_amount_out_of_range = 50016
invalid_MFA_level = 50017
invalid_password = 50018
cannot_pin_message_in_different_channel = 50019
invite_code_invalid_or_taken = 50020
invalid_action_for_system_message = 50021
invalid_phone_number = 50022
invalid_client_id = 50023
invalid_action_for_this_channel_type = 50024
invalid_oauth2_access_token = 50025
missing_oauth2_scope = 50026
invalid_webhook_token = 50027
invalid_role = 50028
invalid_recipients = 50033
bulk_delete_message_too_old = 50034
invalid_form_body = 50035
cannot_add_user_to_guild_where_bot_is_not_in = 50036
invalid_activity_action = 50039
invalid_oauth2_redirect_url = 50040
invalid_API_version = 50041
asset_size_too_large = 50045
invalid_asset = 50046
invalid_payment_source = 50048
invalid_application_name = 50050
invalid_gift_redemption_owned = 50051
cannot_self_redeem_this_gift = 50054
invalid_guild = 50055
invalid_request_origin = 50067
invalid_message_type = 50068
payment_source_required_to_redeem_gift = 50070
cannot_edit_system_webhook = 50073
cannot_delete_community_channel = 50074
cannot_edit_sticker_within_message = 50080
invalid_sticker_sent = 50081
invalid_action_for_archived_thread = 50083
invalid_thread_notification_setting = 50084
before_value_earlier_than_creation_time = 50085
community_and_rules_channel_cannot_be_changed_to_announcement = 50086
event_entity_type_different_from_the_entitys = 50091
invalid_country_code = 50095
cannot_friend_self = 50096
guild_monetization_required = 50097
not_enough_guild_boosters = 50101
invalid_user_settings_data = 50105
activity_launch_no_access = 50106
activity_launch_premium_tier = 50107
activity_launch_concurrent_activities = 50108
invalid_json = 50109
failed_to_resize_asset_below_max_size = 50138
cannot_mix_subscription_and_non_subscription_roles_for_an_emoji = 50144
cannot_convert_emoji_between_premium_and_non_premium = 50145
upload_file_not_found = 50146
activity_launch_afk_channel = 50148
feature_not_yet_rolled_out = 50155
voice_message_not_supports_additional_content = 50159
voice_message_must_have_one_audio_attachment = 50160
voice_message_must_have_supporting_metadata = 50161
cannot_edit_voice_message = 50162
cannot_delete_guild_subscription_integration = 50163
new_owner_ineligible_for_subscription = 50164
activity_launch_age_gated = 50165
cannot_send_voice_message_to_this_channel = 50173
MFA_enabled = 60001
MFA_disabled = 60002
MFA_required = 60003
MFA_unverified = 60004
MFA_invalid_secret = 60005
MFA_invalid_ticket = 60006
MFA_invalid_code = 60008
MFA_invalid_session = 60009
phone_number_unable_to_send = 70003
phone_verification_required = 70007
relationship_incoming_disabled = 80000
relationship_incoming_blocked = 80001
relationship_invalid_target_bot = 80002
relationship_invalid_target_self = 80003
relationship_invalid_discord_tag = 80004
relationship_already_friends = 80007
reaction_blocked = 90001
user_cannot_burst_react = 90002
unknown_billing_profile = 100001
unknown_payment_source = 100002
unknown_subscriptions = 100003
already_subscribed = 100004
invalid_plan = 100005
payment_source_required = 100006
already_cancelled = 100007
invalid_payment = 100008
already_refunded = 100009
invalid_billing_address = 100010
already_purchased = 100011
invalid_gift_redemption_subscription_managed = 100021
invalid_gift_redemption_subscription_incompatible = 100023
invalid_gift_redemption_invoice_open = 100024
negative_invoice_amount = 100027
authentication_required = 100029
subscription_renewal_in_progress = 100042
confirmation_required = 100047
invalid_currency_for_payment_source = 100051
ineligible_for_subscription = 100053
card_declined = 100054
purchase_token_authorization_required = 100056
billing_non_refundable_payment_source = 100060
application_not_yet_available = 110001
listing_already_joined = 120000
listing_too_many_member = 120001
listing_join_blocked = 120002
resource_overloaded = 130000
stage_already_open = 150006
cannot_reply_without_read_message_history_permission = 160002
message_has_thread = 160004
thread_locked = 160005
max_active_threads = 160006
max_active_announcement_threads = 160007
invalid_lottie_json = 170001
sticker_maximum_dimensions_exceeded = 170005
sticker_frame_rate_out_of_expected_range = 170006
sticker_animation_duration_exceeds_five_second = 170007
poggermode_temporarily_disabled = 170008
cannot_update_finished_scheduled_event = 180000
exactly_one_guild_id_parameter_is_required = 180001
failed_to_create_stage_needed_for_scheduled_event = 180002
privacy_policy_required = 190001
terms_of_service_required = 190002
auto_moderation_message_blocked = 200000
auto_moderation_title_blocked = 200001
auto_moderation_invalid_regex = 200002
webhook_can_create_thread_only_in_forum_channel = 220003
harmful_link_message_blocked = 240000
clyde_consent_required = 310000
cannot_enable_onboarding_requirements_not_met = 350000
cannot_update_onboarding_requirements_not_met = 350001
| [
"[email protected]"
] | |
d8fd4caa1881767fdbdb3243b826d95602368b79 | 246e9200a834261eebcf1aaa54da5080981a24ea | /project-euler/26-50/quadratic-primes.py | 687b00c3053fa2d1e6a26428025f06a178f6a92c | [] | no_license | kalsotra2001/practice | db435514b7b57ce549b96a8baf64fad8f579da18 | bbc8a458718ad875ce5b7caa0e56afe94ae6fa68 | refs/heads/master | 2021-12-15T20:48:21.186658 | 2017-09-07T23:01:56 | 2017-09-07T23:01:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from math import sqrt
def prime(n):
if n < 2:
return False
if n == 2:
return True
else:
for div in range(2, int(sqrt(n)) + 1):
if n % div == 0:
return False
return True
max_primes = 0
product = 0
for i in range(-999, 1001):
for j in range(-999, 1001):
n = 0
while True:
s = n ** 2 + i * n + j
if prime(s) == False:
break
if n > max_primes:
max_primes = n
product = i * j
n += 1
print product | [
"[email protected]"
] | |
c296bcf5d763803370519dbc7b0cfa134d9b4fc7 | fd3f0fdc6af4d0b0205a70b7706caccab2c46dc0 | /0x08-python-more_classes/1-rectangle.py | 89807a014a51f03ba7255d4d66673efba41e72ac | [] | no_license | Maynot2/holbertonschool-higher_level_programming | b41c0454a1d27fe34596fe4aacadf6fc8612cd23 | 230c3df96413cd22771d1c1b4c344961b4886a61 | refs/heads/main | 2023-05-04T05:43:19.457819 | 2021-05-12T14:51:56 | 2021-05-12T14:51:56 | 319,291,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | #!/usr/bin/python3
"""This module contains geometric shape classe(s)"""
class Rectangle:
"""Simulates a real world rectangle"""
def __init__(self, width=0, height=0):
"""Initialises a rectangle of a given width and height"""
self.width = width
self.height = height
@property
def width(self):
"""Retrieves the width"""
return self.__width
@width.setter
def width(self, size):
"""Sets the width"""
if not isinstance(size, int):
raise TypeError('width must be an integer')
if size < 0:
raise ValueError('width must be >= 0')
self.__width = size
@property
def height(self):
"""Retrieves the height"""
return self.__height
@height.setter
def height(self, size):
"""Sets the height"""
if not isinstance(size, int):
raise TypeError('height must be an integer')
if size < 0:
raise ValueError('height must be >= 0')
self.__height = size
| [
"[email protected]"
] | |
5c6620edec199d9f5d5a8418a074133844d17c7c | e34d69f33d9bf3d9de99343ba24ad78bc5197a93 | /scripts/cmp_lj_sync | f25d123129b22ec54a527691be2a502d7f0f1e29 | [] | no_license | cms-ttH/ttH-TauRoast | 8e8728a49d02d9e8d7dc119376a4aefb6e8fd77d | 3fe6529d7270dc091db00f95997ca6add8b95ac9 | refs/heads/master | 2021-01-24T06:13:06.485445 | 2017-10-11T14:04:05 | 2017-10-11T14:04:05 | 10,819,593 | 2 | 5 | null | 2016-09-15T07:19:20 | 2013-06-20T12:46:59 | Python | UTF-8 | Python | false | false | 656 | #!/usr/bin/env python
import sys
def read(fn):
evts = {}
with open(fn) as f:
for line in f:
if not line.startswith('1'):
continue
run, lumi, event, stub = line.split(',', 3)
evts[(run, lumi, event)] = stub
return evts
me = read(sys.argv[1])
kit = read(sys.argv[2])
mkeys = set(me.keys())
kkeys = set(kit.keys())
for k in mkeys - kkeys:
print "me", ",".join(list(k) + [me[k]]).strip()
for k in kkeys - mkeys:
print "kit", ",".join(list(k) + [kit[k]]).strip()
print len(mkeys - kkeys), "events unique in first file"
print len(kkeys - mkeys), "events unique in second file"
| [
"[email protected]"
] | ||
165c12e14da405727a906d3e419de0a1d7201897 | 37279a0ac59daab785f1d1583851eb3f5dad30e1 | /workflow/oer_analysis/oer_scaling/oer_scaling.py | 4e2df538b6004a0966af53cec1e2bb3ef03c7538 | [
"MIT"
] | permissive | flash-jaehyun/PROJ_IrOx_OER | 50269e34f428e4c54b34afe3e07aae77e6ff82fc | e0b3ef8e69deeb41d62059a92f466477238efbed | refs/heads/master | 2023-02-09T05:45:11.969160 | 2020-12-30T23:18:54 | 2020-12-30T23:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,594 | py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.2
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# # Creating OER scaling plot from raw data, not my modules
# ---
# ### Import Modules
# + jupyter={"source_hidden": true}
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import copy
import numpy as np
from sklearn.metrics import mean_squared_error
import plotly.graph_objs as go
from plotly.subplots import make_subplots
# #########################################################
from proj_data import layout_shared as layout_shared_main
from proj_data import scatter_shared_props as scatter_shared_props_main
from proj_data import stoich_color_dict
# #########################################################
from methods import get_df_features_targets
# #########################################################
from layout import layout
# -
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
show_plot = True
else:
from tqdm import tqdm
verbose = False
show_plot = False
# ### Read Data
df_features_targets = get_df_features_targets()
# + active=""
#
#
# +
df_features_targets = df_features_targets.dropna(subset=[
("targets", "g_o", ""),
("targets", "g_oh", ""),
])
# df_targets = df_features_targets["targets"].dropna()
df_targets = df_features_targets["targets"]
x_array = df_targets["g_oh"]
y_array = df_targets["g_o"]
color_array = df_features_targets["format"]["color"]["stoich"]
# +
# print(111 * "TEMP | ")
# print("")
# df_features_targets.columns.tolist()
# df_tmp = df_features_targets.loc[:,
# [
# ('format', 'color', 'stoich'),
# ('data', 'stoich', ''),
# ]
# ]
# for index_i, row_i in df_tmp.iterrows():
# tmp = 42
# color_i = row_i["format"]["color"]["stoich"]
# stoich_i = row_i["data"]["stoich"][""]
# # print("# ", stoich_i, " '", color_i, "'", sep="")
# if stoich_i == "AB2":
# if color_i == "#46cf44":
# tmp = 42
# # print("AB2 Good")
# else:
# print("AB2 Bad")
# if stoich_i == "AB3":
# if color_i == "#42e3e3":
# tmp = 42
# # print("AB3 Good")
# else:
# print("AB3 Bad")
# -
# ### Fitting data
x_poly = np.linspace(x_array.min() - 0.2, x_array.max() + 0.2, num=50)
# +
z_1 = np.polyfit(
x_array, y_array,
1,
)
p_1 = np.poly1d(z_1)
print(
"Polynomial Fit (1st order): ",
"\n",
[np.round(i, 3) for i in list(z_1)],
sep="")
rmse_i = mean_squared_error(
y_array,
[p_1(i) for i in x_array],
squared=False)
print(
"RMSE (1st order): ",
rmse_i,
sep="")
y_poly_1 = [p_1(i) for i in x_poly]
# +
z_2 = np.polyfit(
x_array, y_array,
2,
)
p_2 = np.poly1d(z_2)
print(
"Polynomial Fit (2nd order): ",
"\n",
[np.round(i, 3) for i in list(z_2)],
sep="")
rmse_i = mean_squared_error(
y_array,
[p_2(i) for i in x_array],
squared=False)
print(
"RMSE (2nd order): ",
rmse_i,
sep="")
y_poly_2 = [p_2(i) for i in x_poly]
# -
# ### Layout
# +
layout_shared = copy.deepcopy(layout_shared_main)
layout_master = layout_shared.update(
layout
)
layout_master["xaxis"]["range"] = [x_array.min() - 0.2, x_array.max() + 0.2]
layout_master["title"] = "*O vs *OH Scaling Plot (1st and 2nd order fits)"
# -
# ### Instantiate scatter plots
# +
trace_poly_1 = go.Scatter(
x=x_poly, y=y_poly_1,
mode="lines",
line_color="grey",
name="poly_fit (1st order)",
)
trace_poly_2 = go.Scatter(
x=x_poly, y=y_poly_2,
mode="lines",
line_color="black",
name="poly_fit (2nd order)",
)
# +
trace = go.Scatter(
x=x_array, y=y_array,
mode="markers",
# marker_color=color_i,
marker_color=color_array,
name="main",
)
scatter_shared_props = copy.deepcopy(scatter_shared_props_main)
trace = trace.update(
scatter_shared_props,
overwrite=False,
)
# -
# ### Instantiate figure
# +
fig = go.Figure(
data=[
trace_poly_1,
trace_poly_2,
trace,
],
layout=layout_master,
)
fig.write_json(
os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/oer_analysis/oer_scaling",
"out_plot/oer_scaling__O_vs_OH_plot.json"))
# -
if show_plot:
fig.show()
# + active=""
# There seems to be some nonlinearities at weak bonding energies
# +
# assert False
# + active=""
#
#
#
#
#
#
#
#
#
#
#
# -
# ## Plotting Histogram
df_ab2 = df_features_targets[df_features_targets["data"]["stoich"] == "AB2"]
df_ab3 = df_features_targets[df_features_targets["data"]["stoich"] == "AB3"]
print(
# "\n",
"AB2 ΔG_O Mean: ",
df_ab2["targets"]["g_o"].mean(),
"\n",
"AB3 ΔG_O Mean: ",
df_ab3["targets"]["g_o"].mean(),
"\n",
"diff: ",
df_ab3["targets"]["g_o"].mean() - df_ab2["targets"]["g_o"].mean(),
"\n",
40 * "-",
"\n",
"AB2 ΔG_OH Mean: ",
df_ab2["targets"]["g_oh"].mean(),
"\n",
"AB3 ΔG_OH Mean: ",
df_ab3["targets"]["g_oh"].mean(),
"\n",
"diff: ",
df_ab3["targets"]["g_oh"].mean() - df_ab2["targets"]["g_oh"].mean(),
sep="")
# +
shared_layout_hist = go.Layout(
yaxis_title="N",
barmode="overlay",
)
shared_trace_hist = dict(
opacity=0.55,
nbinsx=15,
)
# -
# ### Trying to get the number of data in bins to set y-axis range (NOT WORKING SO FAR)
# +
# y_targets_list = [
# df_ab2.targets.g_oh,
# # df_ab3.targets.g_oh,
# # df_ab2.targets.g_o,
# # df_ab3.targets.g_o,
# ]
# max_num_data_list = []
# for y_target_i in y_targets_list:
# width = (y_target_i.max() - y_target_i.min()) / shared_trace_hist["nbinsx"]
# num_data_in_sliver_list = []
# for i in np.linspace(y_target_i.min(), y_target_i.max(), 200):
# i_upper = i + width / 2
# i_lower = i - width / 2
# print(i_upper, i_lower)
# y_in_sliver = y_target_i[
# (y_target_i < i_upper) & \
# (y_target_i > i_lower)
# ]
# num_data_in_sliver = y_in_sliver.shape[0]
# #print(num_data_in_sliver)
# num_data_in_sliver_list.append(num_data_in_sliver)
# max_num_data_in_sliver_i = np.max(num_data_in_sliver_list)
# print(max_num_data_in_sliver_i)
# print("")
# max_num_data_list.append(max_num_data_in_sliver_i)
# max_max_num_in_sliver = np.max(max_num_data_list)
# max_max_num_in_sliver
# # width =
# (y_target_i.max() - y_target_i.min()) / shared_trace_hist["nbinsx"]
# # y_targets_list[0]
# # y_in_sliver =
# y_target_i[
# (y_target_i < 0.6) & \
# (y_target_i > 0.4)
# ]
# -
# ### Instantiate *OH plots
# +
# %%capture
fig_oh = go.Figure()
fig_oh.add_trace(
go.Histogram(
x=df_ab2.targets.g_oh,
marker_color=stoich_color_dict["AB2"],
name="AB2",
).update(dict1=shared_trace_hist)
)
fig_oh.add_trace(
go.Histogram(
x=df_ab3.targets.g_oh,
marker_color=stoich_color_dict["AB3"],
name="AB3",
).update(dict1=shared_trace_hist)
)
# #########################################################
# Layout manipulation
layout_shared = copy.deepcopy(layout_shared_main)
layout_shared.update(
go.Layout(
# title="TEMP01",
xaxis=go.layout.XAxis(
title="ΔG<sub>*OH</sub>",
),
),
overwrite=False,
)
layout_shared.update(shared_layout_hist)
fig_oh.update_layout(dict1=layout_shared)
# -
# ### Instantiate *O plots
# +
# %%capture
fig_o = go.Figure()
fig_o.add_trace(
go.Histogram(
x=df_ab2.targets.g_o,
marker_color=stoich_color_dict["AB2"],
name="AB2",
).update(dict1=shared_trace_hist)
)
fig_o.add_trace(
go.Histogram(
x=df_ab3.targets.g_o,
marker_color=stoich_color_dict["AB3"],
name="AB3",
).update(dict1=shared_trace_hist)
)
# #########################################################
# Layout manipulation
layout_shared = copy.deepcopy(layout_shared_main)
layout_shared.update(
go.Layout(
# title="",
xaxis=go.layout.XAxis(
title="ΔG<sub>*O</sub>",
),
),
overwrite=False,
)
layout_shared.update(shared_layout_hist)
fig_o.update_layout(dict1=layout_shared)
# -
# ### Instantiate subplot
# +
# %%capture
fig = make_subplots(rows=1, cols=2)
for trace_i in fig_o.data:
fig.add_trace(
trace_i,
row=1, col=1,
)
for trace_i in fig_oh.data:
fig.add_trace(
trace_i,
row=1, col=2,
)
fig.update_layout(
height=600,
width=1000,
title_text="ΔG<sub>*O</sub> and ΔG<sub>*OH</sub> Histograms (eV)",
)
fig.update_layout(layout_shared_main)
fig.update_layout(shared_layout_hist)
fig.update_xaxes(
fig_o.layout["xaxis"],
row=1, col=1,
overwrite=False,
)
fig.update_xaxes(
fig_oh.layout["xaxis"],
row=1, col=2,
overwrite=False,
)
y_range_ub = 45
fig.update_yaxes(
fig_o.layout["yaxis"].update(
range=[0, y_range_ub],
),
row=1, col=1,
overwrite=False,
)
fig.update_yaxes(
fig_oh.layout["yaxis"].update(
range=[0, y_range_ub],
),
row=1, col=2,
overwrite=False,
)
# -
# ### Saving plot to json
fig.write_json(
os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/oer_analysis/oer_scaling",
"out_plot/oer_scaling__O_OH_histogram.json"))
if show_plot:
fig.show()
# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("oer_scaling.ipynb")
print(20 * "# # ")
# #########################################################
# + active=""
#
#
#
# + jupyter={"source_hidden": true}
# stoich_color_dict["AB2"]
# # go.Histogram?
# + jupyter={"source_hidden": true}
# df_features_targets.head()
# df_features_targets.columns.tolist()
# + jupyter={"source_hidden": true}
# color_i
# + jupyter={"source_hidden": true}
# print(len(x_array))
# print(len(y_array))
# print(len(color_i))
# + jupyter={"source_hidden": true}
# df_targets.sort_values("g_oh")
| [
"[email protected]"
] | |
d0be707b6b95674e7a55339a7774568045b2a525 | 6a7058009587e78b5c758ff783410325ad7c2a4b | /educative/slidingWindow/non_repeat_substring.py | 2883b8e112151f20e144a97d63367dc9680d312d | [
"Apache-2.0"
] | permissive | stacykutyepov/python-cp-cheatsheet | 8b96b76403c501f5579befd07b3c4a4c69fe914e | a00a57e1b36433648d1cace331e15ff276cef189 | refs/heads/master | 2023-07-16T13:26:35.130763 | 2021-08-30T11:23:39 | 2021-08-30T11:23:39 | 401,442,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py | """
time: 13 min
errors: none!
"""
def non_repeat_substring(str):
maxLen, i = 0, 0
ht = {}
for i, c in enumerate(str):
if c in ht:
maxLen = max(maxLen, len(ht))
ht.clear()
ht[c] = True
maxLen = max(len(ht), maxLen)
return maxLen
def main():
print("Length of the longest substring: " + str(non_repeat_substring("aabccbb")))
print("Length of the longest substring: " + str(non_repeat_substring("abbbb")))
print("Length of the longest substring: " + str(non_repeat_substring("abccde")))
main() | [
"[email protected]"
] | |
9610d71e683b7cf6ba117adf541c9de69f52aee6 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/core/sims4/gsi/command_buffer.py | e1301ea4581704ced936304f7533eab0b6fbd36f | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | import collections
try:
import threading
_threading_enabled = True
except ImportError:
import dummy_threading as threading
_threading_enabled = False
import sims4.commands
import sims4.log
import sims4.service_manager
logger = sims4.log.Logger('GSI')
_Command = collections.namedtuple('_Command', ('command_string', 'callback', 'output_override', 'zone_id', 'connection_id'))
def _execute_command(command):
real_output = sims4.commands.output
sims4.commands.output = command.output_override
result = False
try:
if command.zone_id is not None:
sims4.commands.execute(command.command_string, command.connection_id)
else:
sims4.commands.execute(command.command_string, None)
result = True
except Exception:
result = False
logger.exception('Error while executing game command for')
finally:
sims4.commands.output = real_output
command.callback(result)
if _threading_enabled:
class CommandBufferService(sims4.service_manager.Service):
def __init__(self):
self.pending_commands = None
self._lock = threading.Lock()
def start(self):
with self._lock:
self.pending_commands = []
def stop(self):
with self._lock:
self.pending_commands = None
def add_command(self, command_string, callback=None, output_override=None, zone_id=None, connection_id=None):
with self._lock:
if self.pending_commands is not None:
command = _Command(command_string, callback, output_override, zone_id, connection_id)
self.pending_commands.append(command)
def on_tick(self):
with self._lock:
if not self.pending_commands:
return
local_pending_commands = list(self.pending_commands)
del self.pending_commands[:]
for command in local_pending_commands:
_execute_command(command)
else:
class CommandBufferService(sims4.service_manager.Service):
def add_command(self, command_string, callback=None, output_override=None, zone_id=None, connection_id=None):
command = _Command(command_string, callback, output_override, zone_id, connection_id)
_execute_command(command)
def on_tick(self):
pass
| [
"[email protected]"
] | |
309cd04173b4d096eb7b590ed67fc399ef2c0877 | cf668ede675f5b5a49912e8ca2170b5d5dba85c3 | /FullDesign/LsRand_OnlyTau_4.py | 3d9e3b41a25a22b071485ba0d01de3c81c709d52 | [] | no_license | amemil/MasterThesisRaw | b6c97a671e740871be541539384192684f5f1966 | bb357481cc47ef3a2b241f4b1df85fd0a4ff1de0 | refs/heads/main | 2023-06-09T22:49:06.082380 | 2021-06-25T09:38:20 | 2021-06-25T09:38:20 | 327,104,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 12 19:29:28 2021
@author: emilam
"""
import sys, os
import numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
import UtilitiesMaster as ut
s1init1 = np.load('s1init_16.npy')
s2init1 = np.load('s2init_16.npy')
Winit1 = np.load('Winit_16.npy')
s1init2 = np.load('s1init_17.npy')
s2init2 = np.load('s2init_17.npy')
Winit2 = np.load('Winit_17.npy')
s1init3 = np.load('s1init_18.npy')
s2init3 = np.load('s2init_18.npy')
Winit3 = np.load('Winit_18.npy')
s1init4 = np.load('s1init_19.npy')
s2init4 = np.load('s2init_19.npy')
Winit4= np.load('Winit_19.npy')
s1init5 = np.load('s1init_20.npy')
s2init5 = np.load('s2init_20.npy')
Winit5 = np.load('Winit_20.npy')
indx = [16,17,18,19,20]
s1s = [s1init1,s1init2,s1init3,s1init4,s1init5]
s2s = [s2init1,s2init2,s2init3,s2init4,s2init5]
ws = [Winit1,Winit2,Winit3,Winit4,Winit5]
for i in range(5):
design = ut.ExperimentDesign(freqs_init=np.array([20,50,100,200]),maxtime=60,trialsize=5\
,Ap=0.005, tau=0.02, genstd=0.0001,b1=-3.1, b2=-3.1, w0=1.0,binsize = 1/500.0,reals = 20,longinit = 60\
,s1init = s1s[i],s2init = s2s[i],Winit = ws[i])
means,entrs,optms,W,posts = design.onlineDesign_wh_tau(nofreq =False,constant = False, random = True, optimised = False)
np.save('RandEstimatesTau_'+str(indx[i]),means)
np.save('RandEntropiesTau_'+str(indx[i]),entrs)
np.save('RandWTau_'+str(indx[i]),W)
np.save('RandPostsTau_'+str(indx[i]),posts)
np.save('RandFreqsTau_'+str(indx[i]),optms) | [
"[email protected]"
] | |
9eb21a225d99d72d993744068321b270fe85c8e0 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/mysql_forwarding.py | 1bd1a82ee4f076d2f13ebb0d6b9e7b5b2c2a94ed | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 8,910 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class MysqlForwarding:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
sensitive_list.append('password')
openapi_types = {
'address': 'NetAddress',
'db_name': 'str',
'username': 'str',
'password': 'str',
'enable_ssl': 'bool',
'table_name': 'str',
'column_mappings': 'list[ColumnMapping]'
}
attribute_map = {
'address': 'address',
'db_name': 'db_name',
'username': 'username',
'password': 'password',
'enable_ssl': 'enable_ssl',
'table_name': 'table_name',
'column_mappings': 'column_mappings'
}
def __init__(self, address=None, db_name=None, username=None, password=None, enable_ssl=None, table_name=None, column_mappings=None):
"""MysqlForwarding
The model defined in huaweicloud sdk
:param address:
:type address: :class:`huaweicloudsdkiotda.v5.NetAddress`
:param db_name: **参数说明**:连接MYSQL数据库的库名。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:type db_name: str
:param username: **参数说明**:连接MYSQL数据库的用户名
:type username: str
:param password: **参数说明**:连接MYSQL数据库的密码
:type password: str
:param enable_ssl: **参数说明**:客户端是否使用SSL连接服务端,默认为true
:type enable_ssl: bool
:param table_name: **参数说明**:MYSQL数据库的表名
:type table_name: str
:param column_mappings: **参数说明**:MYSQL数据库的列和流转数据的对应关系列表。
:type column_mappings: list[:class:`huaweicloudsdkiotda.v5.ColumnMapping`]
"""
self._address = None
self._db_name = None
self._username = None
self._password = None
self._enable_ssl = None
self._table_name = None
self._column_mappings = None
self.discriminator = None
self.address = address
self.db_name = db_name
self.username = username
self.password = password
if enable_ssl is not None:
self.enable_ssl = enable_ssl
self.table_name = table_name
self.column_mappings = column_mappings
@property
def address(self):
"""Gets the address of this MysqlForwarding.
:return: The address of this MysqlForwarding.
:rtype: :class:`huaweicloudsdkiotda.v5.NetAddress`
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this MysqlForwarding.
:param address: The address of this MysqlForwarding.
:type address: :class:`huaweicloudsdkiotda.v5.NetAddress`
"""
self._address = address
@property
def db_name(self):
"""Gets the db_name of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的库名。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The db_name of this MysqlForwarding.
:rtype: str
"""
return self._db_name
@db_name.setter
def db_name(self, db_name):
"""Sets the db_name of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的库名。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param db_name: The db_name of this MysqlForwarding.
:type db_name: str
"""
self._db_name = db_name
@property
def username(self):
"""Gets the username of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的用户名
:return: The username of this MysqlForwarding.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的用户名
:param username: The username of this MysqlForwarding.
:type username: str
"""
self._username = username
@property
def password(self):
"""Gets the password of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的密码
:return: The password of this MysqlForwarding.
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this MysqlForwarding.
**参数说明**:连接MYSQL数据库的密码
:param password: The password of this MysqlForwarding.
:type password: str
"""
self._password = password
@property
def enable_ssl(self):
"""Gets the enable_ssl of this MysqlForwarding.
**参数说明**:客户端是否使用SSL连接服务端,默认为true
:return: The enable_ssl of this MysqlForwarding.
:rtype: bool
"""
return self._enable_ssl
@enable_ssl.setter
def enable_ssl(self, enable_ssl):
"""Sets the enable_ssl of this MysqlForwarding.
**参数说明**:客户端是否使用SSL连接服务端,默认为true
:param enable_ssl: The enable_ssl of this MysqlForwarding.
:type enable_ssl: bool
"""
self._enable_ssl = enable_ssl
@property
def table_name(self):
"""Gets the table_name of this MysqlForwarding.
**参数说明**:MYSQL数据库的表名
:return: The table_name of this MysqlForwarding.
:rtype: str
"""
return self._table_name
@table_name.setter
def table_name(self, table_name):
"""Sets the table_name of this MysqlForwarding.
**参数说明**:MYSQL数据库的表名
:param table_name: The table_name of this MysqlForwarding.
:type table_name: str
"""
self._table_name = table_name
@property
def column_mappings(self):
"""Gets the column_mappings of this MysqlForwarding.
**参数说明**:MYSQL数据库的列和流转数据的对应关系列表。
:return: The column_mappings of this MysqlForwarding.
:rtype: list[:class:`huaweicloudsdkiotda.v5.ColumnMapping`]
"""
return self._column_mappings
@column_mappings.setter
def column_mappings(self, column_mappings):
"""Sets the column_mappings of this MysqlForwarding.
**参数说明**:MYSQL数据库的列和流转数据的对应关系列表。
:param column_mappings: The column_mappings of this MysqlForwarding.
:type column_mappings: list[:class:`huaweicloudsdkiotda.v5.ColumnMapping`]
"""
self._column_mappings = column_mappings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MysqlForwarding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
a7a8a4fc6a4d8860a7cbcf0990e903217b21bb30 | fd40d6375ddae5c8613004a411341f0c984e80d5 | /src/visions/core/implementations/types/visions_datetime.py | f8a11f0c6b5bf06f1ed01080801bdba0c704c5d0 | [
"LicenseRef-scancode-public-domain",
"MIT"
] | permissive | ieaves/tenzing | 93c3353e62621c90adefc5a174a2dcde9aacbc46 | 92d39c1c3a5633d8074e0ffe8c2687c465aebbc8 | refs/heads/master | 2020-04-25T07:14:31.388737 | 2020-01-07T02:51:13 | 2020-01-07T02:51:13 | 172,608,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | import pandas.api.types as pdt
import pandas as pd
from typing import Sequence
from visions.core.model.relations import (
IdentityRelation,
InferenceRelation,
TypeRelation,
)
from visions.core.model.type import VisionsBaseType
from visions.core.implementations.types import visions_string
from visions.utils.coercion import test_utils
def to_datetime(series: pd.Series) -> pd.Series:
return pd.to_datetime(series)
def _get_relations() -> Sequence[TypeRelation]:
from visions.core.implementations.types import visions_generic
relations = [
IdentityRelation(visions_datetime, visions_generic),
InferenceRelation(
visions_datetime,
visions_string,
relationship=test_utils.coercion_test(to_datetime),
transformer=to_datetime,
),
]
return relations
class visions_datetime(VisionsBaseType):
"""**Datetime** implementation of :class:`visions.core.model.type.VisionsBaseType`.
Examples:
>>> x = pd.Series([pd.datetime(2017, 3, 5), pd.datetime(2019, 12, 4)])
>>> x in visions_datetime
True
"""
@classmethod
def get_relations(cls) -> Sequence[TypeRelation]:
return _get_relations()
@classmethod
def contains_op(cls, series: pd.Series) -> bool:
return pdt.is_datetime64_any_dtype(series)
| [
"[email protected]"
] | |
045849a6dcf37e6dfd8deaa91796aebe1f3f2334 | c6609c161df66949656ca91d8a3d9f4d27a4c399 | /rates_project_04122021/rates_client/rates_client/rate_client.py | 71771ba20ad4886144213eb414be5cd5d7451817 | [
"MIT"
] | permissive | t4d-classes/advanced-python_04122021 | b93ea38c5b35af2b1eb06bc1d5fe6d3f0c1cf39f | 07b27aea8ac3c7170eb66d5243c5cd841f41322c | refs/heads/master | 2023-04-11T11:45:18.114381 | 2021-04-20T12:36:04 | 2021-04-20T12:36:04 | 357,016,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,063 | py | """ rate client module """
import socket
import sys
import pathlib
import yaml
from rates_shared.utils import read_config
def main() -> None:
"""Main Function"""
try:
config = read_config()
host = config["server"]["host"]
port = int(config["server"]["port"])
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
client_socket.connect((host, port))
print(client_socket.recv(2048).decode("UTF-8"))
while True:
command = input("> ")
if command == "exit":
break
else:
client_socket.sendall(command.encode("UTF-8"))
print(client_socket.recv(2048).decode("UTF-8"))
client_socket.close()
except ConnectionResetError:
print("Server connection was closed.")
except ConnectionRefusedError:
print("Server is not running.")
except KeyboardInterrupt:
pass
sys.exit(0)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f9ed10bc581a959ecacc6f7e395dd6fef7ea68b0 | 016f96e528141db111f15a4c00a0fc46e61cdff6 | /lib/emailses/urls.py | 5fb0d6077dd92bf4cd7ecabb113f5b0498156de9 | [
"BSD-2-Clause"
] | permissive | hdknr/emailqueue | 3d02407b06a492cdf9b89fde2b06c766cd500555 | 05e108562f4fb612440f769973b9a3d02c11afcd | refs/heads/master | 2021-01-23T20:13:04.807258 | 2015-10-08T08:41:51 | 2015-10-08T08:41:51 | 20,243,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | from django.conf.urls import url
import views
urlpatterns = [
url(r'(?P<topic>.+)', views.notify, name='emailses_notify'),
]
| [
"[email protected]"
] | |
193e2e78f289aa1bb05e4b344b5c7d17b61c984e | 6b0161214e4db57a81d3b4432d82c874c7106f13 | /couchbase/_pyport.py | 20700a813860e5ed162b08ac93b31aa54775c92d | [
"Apache-2.0"
] | permissive | neilalbrock/couchbase-python-client | de9d6115d1240f56f4cb7b57aee7e8765c5c7d1f | 95789e3d49c42613fe719bbd02e6d9ad30216334 | refs/heads/master | 2021-01-15T18:51:31.311163 | 2013-10-14T13:58:28 | 2013-10-14T13:58:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,140 | py | #
# Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module contains various mappings for modules which have had
# their names changed across Python major versions
try:
import urllib.parse as ulp
from urllib.request import urlopen
from urllib.parse import parse_qs
except ImportError:
import urllib as ulp
from urllib2 import urlopen
from urlparse import parse_qs
try:
long = long
except NameError:
long = int
try:
xrange = xrange
except NameError:
xrange = range
try:
basestring = basestring
except NameError:
basestring = str
| [
"[email protected]"
] | |
219d2f333e3269a900b48b27c08cc48a24363b80 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02707/s167454120.py | 27b53e15e36617817a3cd3b043a523410132faad | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | import math
n = int(input())
c= [0]*n
s = map(int,input().split())
for i in s:
c[i-1]+=1
for i in c:
print(i) | [
"[email protected]"
] | |
9cfda2d3b33fe057ffac602c8a45eb41a9ec05e5 | adea9fc9697f5201f4cb215571025b0493e96b25 | /napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/__init__.py | 2c88a90da723c84456e7fd1bc37de0086e2d37c7 | [
"Apache-2.0"
] | permissive | andyjsharp/napalm-yang | d8a8b51896ef7c6490f011fe265db46f63f54248 | ef80ebbfb50e188f09486380c88b058db673c896 | refs/heads/develop | 2021-09-09T02:09:36.151629 | 2018-03-08T22:44:04 | 2018-03-08T22:44:04 | 114,273,455 | 0 | 0 | null | 2018-03-08T22:44:05 | 2017-12-14T16:33:35 | Python | UTF-8 | Python | false | false | 34,558 | py |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
unicode = str
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/afi-safi/af/segment-routing/prefix-sids/prefix-sid/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for the IGP Prefix SID.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__prefix','__sid_id','__label_options',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'interfaces', u'interface', u'levels', u'level', u'afi-safi', u'af', u'segment-routing', u'prefix-sids', u'prefix-sid', u'config']
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with inet:ip-prefix""",
'defined-type': "inet:ip-prefix",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
def _get_sid_id(self):
"""
Getter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
return self.__sid_id
def _set_sid_id(self, v, load=False):
"""
Setter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_id() directly.
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sid_id must be of a type compatible with sr-sid-type""",
'defined-type': "openconfig-network-instance:sr-sid-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)""",
})
self.__sid_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sid_id(self):
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
def _get_label_options(self):
"""
Getter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
return self.__label_options
def _set_label_options(self, v, load=False):
"""
Setter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_label_options is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_label_options() directly.
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """label_options must be of a type compatible with enumeration""",
'defined-type': "openconfig-network-instance:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)""",
})
self.__label_options = t
if hasattr(self, '_set'):
self._set()
def _unset_label_options(self):
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
prefix = __builtin__.property(_get_prefix, _set_prefix)
sid_id = __builtin__.property(_get_sid_id, _set_sid_id)
label_options = __builtin__.property(_get_label_options, _set_label_options)
_pyangbind_elements = {'prefix': prefix, 'sid_id': sid_id, 'label_options': label_options, }
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/afi-safi/af/segment-routing/prefix-sids/prefix-sid/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters for the IGP Prefix SID.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_extmethods', '__prefix','__sid_id','__label_options',)
_yang_name = 'config'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'network-instances', u'network-instance', u'protocols', u'protocol', u'isis', u'interfaces', u'interface', u'levels', u'level', u'afi-safi', u'af', u'segment-routing', u'prefix-sids', u'prefix-sid', u'config']
def _get_prefix(self):
"""
Getter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
return self.__prefix
def _set_prefix(self, v, load=False):
"""
Setter method for prefix, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/prefix (inet:ip-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix() directly.
YANG Description: The IP prefix for which the IGP prefix SID should be advertised. The
value specified is a local prefix on the interface which is advertised
into the IGP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """prefix must be of a type compatible with inet:ip-prefix""",
'defined-type': "inet:ip-prefix",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)""",
})
self.__prefix = t
if hasattr(self, '_set'):
self._set()
def _unset_prefix(self):
self.__prefix = YANGDynClass(base=[RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/(([0-9])|([1-2][0-9])|(3[0-2]))'}),RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}),], is_leaf=True, yang_name="prefix", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='inet:ip-prefix', is_config=True)
def _get_sid_id(self):
"""
Getter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
return self.__sid_id
def _set_sid_id(self, v, load=False):
"""
Setter method for sid_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/sid_id (sr-sid-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_sid_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sid_id() directly.
YANG Description: The Segment Identifier to be used when advertising the IGP Prefix SID.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sid_id must be of a type compatible with sr-sid-type""",
'defined-type': "openconfig-network-instance:sr-sid-type",
'generated-type': """YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)""",
})
self.__sid_id = t
if hasattr(self, '_set'):
self._set()
def _unset_sid_id(self):
self.__sid_id = YANGDynClass(base=[RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'16..1048575']}),RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'NO_LABEL': {}, u'IPV6_EXPLICIT_NULL': {'value': 2}, u'ENTROPY_LABEL_INDICATOR': {'value': 7}, u'IPV4_EXPLICIT_NULL': {'value': 0}, u'ROUTER_ALERT': {'value': 1}, u'IMPLICIT_NULL': {'value': 3}},),RestrictedClassType(base_type=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), restriction_dict={'pattern': u'[0-9a-fA-F:\\.]*'}),], is_leaf=True, yang_name="sid-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='sr-sid-type', is_config=True)
def _get_label_options(self):
"""
Getter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
return self.__label_options
def _set_label_options(self, v, load=False):
"""
Setter method for label_options, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/afi_safi/af/segment_routing/prefix_sids/prefix_sid/config/label_options (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_label_options is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_label_options() directly.
YANG Description: The options associated with the IGP prefix SID for MPLS. The value
of this leaf specifies the option that the SID should be advertised
into the IGP with.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """label_options must be of a type compatible with enumeration""",
'defined-type': "openconfig-network-instance:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)""",
})
self.__label_options = t
if hasattr(self, '_set'):
self._set()
def _unset_label_options(self):
self.__label_options = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'EXPLICIT_NULL': {}, u'NO_PHP': {}},), is_leaf=True, yang_name="label-options", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='enumeration', is_config=True)
prefix = __builtin__.property(_get_prefix, _set_prefix)
sid_id = __builtin__.property(_get_sid_id, _set_sid_id)
label_options = __builtin__.property(_get_label_options, _set_label_options)
_pyangbind_elements = {'prefix': prefix, 'sid_id': sid_id, 'label_options': label_options, }
| [
"[email protected]"
] | |
e83b9d3c0be7a33e00b85393338fd4d1bb2d0e8d | cbc7ca332ff4cb8c98cb9eb37af654ee63297802 | /torch/_C/_distributed_c10d.pyi | 1cbf030e4afedebc674b36fe035faa072792c35e | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | yfzheng11/pytorch | 307ebedbf6c5f23ed49584cf9536e47cff0a3ab0 | 1aa14fcb14dd1ecc8382f747de6f2070d929ed02 | refs/heads/master | 2023-05-26T14:07:05.459920 | 2021-06-03T20:28:45 | 2021-06-03T20:30:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,379 | pyi | from datetime import timedelta
from enum import Enum
from typing import Optional, List, Any, Tuple, overload
from torch import Tensor
# This module is defined in torch/csrc/distributed/c10d/init.cpp
_DEFAULT_FIRST_BUCKET_BYTES: int
_DEFAULT_NO_TIMEOUT: timedelta
_DEFAULT_PG_TIMEOUT: timedelta
class BuiltinCommHookType(Enum):
ALLREDUCE = ...
FP16_COMPRESS = ...
def _register_comm_hook(reducer: Reducer, state: Any, comm_hook: Any): ...
def _register_builtin_comm_hook(
reducer: Reducer, comm_hook_type: BuiltinCommHookType
): ...
class GradBucket:
def __init__(
self,
index: int,
tensor: Tensor,
offsets: List[int],
lengths: List[int],
sizes_list: List[Tuple[int]],
): ...
def get_index(self) -> int: ...
def get_tensor(self) -> Tensor: ...
def get_per_parameter_tensors(self) -> List[Tensor]: ...
def is_the_last_bucket_to_allreduce(self) -> bool: ...
def set_tensor(self, tensor: Tensor) -> None: ...
class Reducer:
def __init__(
self,
replicas: List[List[Tensor]],
bucket_indices: List[List[int]],
process_group: ProcessGroup,
expect_sparse_gradients: List[List[bool]],
bucket_bytes_cap: int,
find_unused_parameters: bool,
gradient_as_bucket_view: bool,
): ...
...
class Logger:
def __init__(self, reducer: Reducer): ...
def set_construction_data_and_log(
self,
module_name: str,
device_ids: List[int],
output_device: int,
broadcast_buffers: bool,
): ...
...
def _get_debug_mode(): ...
class _DistributedDebugLevel(Enum):
OFF = ...
INFO = ...
DETAIL = ...
class ReduceOp(Enum):
SUM = ...
PRODUCT = ...
MIN = ...
MAX = ...
BAND = ...
BOR = ...
BXOR = ...
UNUSED = ...
class BroadcastOptions:
rootRank: int
rootTensor: int
timeout: timedelta
class AllreduceOptions:
reduceOp: ReduceOp
timeout: timedelta
class AllreduceCoalescedOptions(AllreduceOptions): ...
class ReduceOptions:
reduceOp: ReduceOp
rootRank: int
rootTensor: int
timeout: timedelta
class AllGatherOptions:
timeout: timedelta
class GatherOptions:
rootRank: int
timeout: timedelta
class ScatterOptions:
rootRank: int
timeout: timedelta
class ReduceScatterOptions:
reduceOp: ReduceOp
timeout: timedelta
class BarrierOptions:
device_ids: List[int]
timeout: timedelta
class AllToAllOptions:
timeout: timedelta
class Store:
def set(self, key: str, value: str): ...
def get(self, key: str) -> bytes: ...
def add(self, key: str, value: int) -> int: ...
def compare_set(self, key: str, expected_value: str, desired_value: str) -> bytes: ...
def delete_key(self, key: str) -> bool: ...
def num_keys(self) -> int: ...
def set_timeout(self, timeout: timedelta): ...
@overload
def wait(self, keys: List[str]): ...
@overload
def wait(self, keys: List[str], timeout: timedelta): ...
class FileStore(Store):
def __init__(self, path: str, numWorkers: int): ...
class HashStore(Store):
def __init__(self): ...
class TCPStore(Store):
def __init__(
self,
host_name: str,
port: int,
world_size: int = ...,
is_master: bool = ...,
timeout: timedelta = ...,
wait_for_workers: bool = ...
): ...
class PrefixStore(Store):
def __init__(self, prefix: str, store: Store): ...
class Work:
def is_completed(self) -> bool: ...
def is_success(self) -> bool: ...
def exception(self) -> Any: ...
def wait(self, timeout: timedelta = _DEFAULT_NO_TIMEOUT) -> bool: ...
def source_rank(self) -> int: ...
def _source_rank(self) -> int: ...
def result(self) -> List[Tensor]: ...
def synchronize(self): ...
...
class ProcessGroup:
class Options: ...
def __init__(self): ...
def rank(self) -> int: ...
def size(self) -> int: ...
@overload
def broadcast(
self,
tensors: List[Tensor],
opts=BroadcastOptions(),
) -> Work: ...
@overload
def broadcast(
self,
tensor: Tensor,
root: int,
) -> Work: ...
@overload
def allreduce(
self,
tensors: List[Tensor],
opts: AllreduceOptions = AllreduceOptions(),
) -> Work: ...
@overload
def allreduce(
self,
tensors: List[Tensor],
op=ReduceOp.SUM,
) -> Work: ...
@overload
def allreduce(
self,
tensor: Tensor,
op=ReduceOp.SUM,
) -> Work: ...
def allreduce_coalesced(
self,
tensors: List[Tensor],
opts=AllreduceCoalescedOptions(),
) -> Work: ...
@overload
def reduce(
self,
tensors: List[Tensor],
opts=ReduceOptions(),
) -> Work: ...
@overload
def reduce(
self,
tensor: Tensor,
root: int,
op=ReduceOp.SUM,
) -> Work: ...
@overload
def allgather(
self,
output_tensors: List[List[Tensor]],
input_tensors: List[Tensor],
opts=AllGatherOptions(),
) -> Work: ...
@overload
def allgather(
self,
output_tensors: List[Tensor],
input_tensor: Tensor,
) -> Work: ...
def _allgather_base(
self,
output: Tensor,
input: Tensor,
opts = AllGatherOptions(),
) -> Work: ...
def allgather_coalesced(
self,
output_lists: List[List[Tensor]],
input_list: List[Tensor],
opts=AllGatherOptions(),
) -> Work: ...
@overload
def gather(
self,
output_tensors: List[List[Tensor]],
input_tensors: List[Tensor],
opts=GatherOptions(),
) -> Work: ...
@overload
def gather(
self,
output_tensors: List[Tensor],
input_tensor: Tensor,
root: int,
) -> Work: ...
@overload
def scatter(
self,
output_tensors: List[Tensor],
input_tensors: List[List[Tensor]],
opts=ScatterOptions(),
) -> Work: ...
@overload
def scatter(
self,
output_tensor: Tensor,
input_tensors: List[Tensor],
root: int,
) -> Work: ...
@overload
def reduce_scatter(
self,
output_tensors: List[Tensor],
input_tensors: List[List[Tensor]],
opts=ReduceScatterOptions(),
) -> Work: ...
@overload
def reduce_scatter(
self,
output_tensors: Tensor,
input_tensor: List[Tensor],
) -> Work: ...
@overload
def alltoall_base(
self,
output_tensor: Tensor,
input_tensor: Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
opts=AllToAllOptions(),
) -> Work: ...
@overload
def alltoall_base(
self,
output: Tensor,
input: Tensor,
output_split_sizes: List[int],
input_split_sizes: List[int],
) -> Work: ...
@overload
def alltoall(
self,
output_tensor: List[Tensor],
input_tensor: List[Tensor],
opts=AllToAllOptions(),
) -> Work: ...
@overload
def alltoall(
self,
output: List[Tensor],
input: List[Tensor],
) -> Work: ...
def send(
self,
tensors: List[Tensor],
dstRank: int,
tag: int,
) -> Work: ...
def recv(
self,
tensors: List[Tensor],
srcRank: int,
tag: int,
) -> Work: ...
def recv_anysource(self, tensors: List[Tensor], tag: int) -> Work: ...
def barrier(self, opts=BarrierOptions()) -> Work: ...
class ProcessGroupRoundRobin(ProcessGroup): ...
def _round_robin_process_groups(
process_groups: List[ProcessGroup],
) -> ProcessGroupRoundRobin: ...
class ProcessGroupGloo(ProcessGroup):
class Device: ...
class Options: ...
def __init__(
self,
store: Store,
rank: int,
size: int,
timeout: timedelta,
): ...
@staticmethod
def create_device(hostname=str(), interface=str()) -> Device: ...
...
@staticmethod
def create_default_device() -> Device: ...
...
class _ProcessGroupWrapper(ProcessGroup):
def __init__(
self,
pg: ProcessGroup,
gloo_pg: ProcessGroupGloo
): ...
class ProcessGroupNCCL(ProcessGroup):
class Options: ...
def __init__(
self,
store: Store,
rank: int,
size: int,
timeout: timedelta,
): ...
@staticmethod
def _group_start() -> None: ...
@staticmethod
def _group_end() -> None: ...
...
class ProcessGroupMPI(ProcessGroup):
def __init__(
self,
rank: int,
size: int,
pgComm: int,
): ...
@staticmethod
def create(ranks: List[int]) -> ProcessGroupMPI: ...
def _compute_bucket_assignment_by_size(
tensors: List[Tensor],
bucket_size: int,
expect_sparse_gradient: List[bool],
tensor_indices: List[int],
) -> List[List[int]]: ...
def _broadcast_coalesced(
process_group: ProcessGroup,
tensors: List[Tensor],
buffer_size: int,
src: int,
): ...
def _test_python_store(store: Store): ...
def _verify_model_across_ranks(
process_group: ProcessGroup, replicas: List[List[Tensor]]
): ...
| [
"[email protected]"
] | |
2f0c54950c0ab91c97cc12a6797f81d91b85ec0b | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/stmt_for_str_same_var-9.py | f87144cd7e69039ad5482181a2f622e922dcd6a8 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | x:str = $STRING
for x in x:
print(x)
| [
"[email protected]"
] | |
6c6e0e149c60270d3573a57dfa2fd3aa115c5361 | e1fe1ed4f2ba8ab0146ce7c08d65bc7947150fc8 | /credit11315/spiders/no_redis_detail_info_scrapy.py | 3fb233a4bbd3603929edc4969dbae24a6847b673 | [] | no_license | yidun55/credit11315 | 0d88ceef314efa444de58eb5da8939c1acff3abe | b048ec9db036a382287d5faacb9490ccbf50735c | refs/heads/master | 2021-01-20T01:03:30.617914 | 2015-07-31T09:58:24 | 2015-07-31T09:58:24 | 38,853,611 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,621 | py | #!usr/bin/env python
#coding: utf-8
"""
从11315全国企业征信系统http://www.11315.com/上
爬取企业信息
"""
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy import log
from scrapy import signals
from scrapy import Selector
from scrapy.exceptions import DontCloseSpider
import sys
from credit11315.items import *
from credit11315.middlewares import UnknownResponseError, ForbbidenResponseError
from credit11315.tool.for_ominated_strip import for_ominated_data
from credit11315.tool.for_JCXX import extract_combine_JCXX
from credit11315.tool.for_all_blocks_info_extract import block_info_extract
from credit11315.tool.for_fundation_info_extract import fundation_info_extract
import HTMLParser
import redis
import urllib2
reload(sys)
sys.setdefaultencoding("utf-8")
class GetDetailInfo(Spider):
"""
从redis上读取url,并提取企业的信息
"""
name = 'noredisdetail'
start_urls = ['http://www.11315.com']
def set_crawler(self,crawler):
super(GetDetailInfo, self).set_crawler(crawler)
self.crawler.signals.connect(self.spider_idle,\
signal=signals.spider_idle)
def spider_idle(self):
raise DontCloseSpider
def parse(self,response):
urlPath = '/home/dyh/data/credit11315/detailUrl\
/uniq_all_detail_url'
f = open(urlPath, "r")
for url in f:
yield Request(url.strip(),callback=my_parse,\
dont_filter=True)
def my_parse(self, response):
"""
解析
"""
sel = Selector(text=response.body)
print len(sel.xpath(u"//b[text()='单位名称']"))!= 0, "parse 条件"
log.msg("parse 条件=%s"%str(len(sel.xpath(u"//b[text()='单位名称']")) != 0), level=log.INFO)
if (len(sel.xpath(u"//b[text()='单位名称']")) != 0): #判别是否为要输入验证码
pass
else:
log.msg("code=%s, %s"%(str(response.status),response.body), level=log.INFO)
raise UnknownResponseError
#========================================================
"""
第一部分:企业信用档案
"""
item = DetailInformation()
item['basic_info'] = fundation_info_extract(response)
#========================================================
#========================================================
"""
第一部分 政府监管信息
"""
item['regulatory_info'] = extract_combine_JCXX(response)
#========================================================
#========================================================
"""
第三部分 行业评价信息
"""
keywords_list = ['2-1.体系/产品/行业认证信息',
'2-2.行业协会(社会组织)评价信息',\
'2-3.水电气通讯等公共事业单位评价']
item['envaluated_info'] = block_info_extract(response,\
keywords_list)
#========================================================
"""
第四部分 媒体评价信息
"""
keywords_list = ['3-1.媒体评价信息']
item['media_env'] = block_info_extract(response, keywords_list)
#========================================================
"""
第五部分 金融信贷信息
"""
#url = 'http://www.11315.com/\
#getTradeLendingCount?companyId=%s'%response.url[7:15]
#header = {'User-Agent':"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.89 Safari/537.36",
# 'Referer':response.url}
#req = urllib2.Request(url=url, headers=header)
#xtml = urllib2.urlopen(req)
#Nums = xtml.read()
#print Nums, "this is Nums"
#Nums = eval(Nums).split(",")
#print Nums, "this is anothor Nums"
#total = str(sum([int(i) for i in Nums]))
#Nums.insert(0, total) #在头部插入
#if total == '0':
# t_url = ""
#else:
# t_url = sel.xpath(u"//script").re(ur"html\(\'<a href=\"([\w\W]*?)\"")[0]
#Nums.append(t_url)
#Nums_re = "|".join(Nums)
keywords_list = ['4-2.民间借贷评价信息']
item["credit_fin"] = block_info_extract(response, keywords_list)
#=======================================================
"""
第六部分 企业运营信息
"""
#keywords_list = ['5-3.水电煤气电话费信息',
#'5-4.纳税信息'] #要么运行js,要么模拟请求,破网站,就两行数据至于吗
#item['operation_info'] = block_info_extract(response, keywords_list)
#========================================================
"""
第七部分 市场反馈信息
"""
keywords_list = ['6-1.消费者评价信息',
'6-2.企业之间履约评价','6-3.员工评价信息',
'6-4.其他']
item['feedback_info'] = block_info_extract(response, keywords_list)
#========================================================
return item
#else:
# print "raise unknownresponseError in spider", response.request.meta
# #raise UnknownResponseError
# #raise ForbbidenResponseError("work or no nnnnnn")
# request = response.request
# retryreq = request.copy()
# retryreq.dont_filter = True
# log.msg("UnknowResponseError %s"%response.body, level=log.INFO)
# yield retryreq
| [
"[email protected]"
] | |
dd755e188ecdc91d717143c7321fa6eaf8bdb91a | 99dcb18a9e3ea367272f740b8cbf3c34285a0c08 | /.sample_configs/param_handlers/cancel_data_labeling_job_sample.py | b11a0a22ff20d8c981aefa487bf8a6a00db46715 | [
"Apache-2.0"
] | permissive | googleapis/python-aiplatform | 926a4873f35dbea15b2fd86c0e16b5e6556d803e | 76b95b92c1d3b87c72d754d8c02b1bca652b9a27 | refs/heads/main | 2023-08-19T23:49:02.180075 | 2023-08-19T13:25:59 | 2023-08-19T13:27:27 | 298,017,988 | 418 | 240 | Apache-2.0 | 2023-09-14T21:08:33 | 2020-09-23T15:43:39 | Python | UTF-8 | Python | false | false | 716 | py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_name(name: str) -> str:
# Sample function parameter name in cancel_data_labeling_job_sample
name = name
return name
| [
"[email protected]"
] | |
f36e4bac20c903f91a082d88b22b765caafeac35 | a708f1d36586d2b01c99f2cb44aa4612b10192f6 | /周赛/week183/5376非递增顺序.py | eb36dbf409919c8981dd21302f726a93af63edc0 | [] | no_license | LeopoldACC/Algorithm | 2477e8a371e9cdc5a47b582ca2a454539b96071e | fc1b0bec0e28d31e9a6ff722b3a66eacb0278148 | refs/heads/master | 2023-01-25T02:28:14.422447 | 2020-12-03T15:01:10 | 2020-12-03T15:01:10 | 197,297,197 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | class Solution:###最终版
def minSubsequence(self, nums):
nums = sorted(nums)
prefix_sum = nums[:]
for i in range(len(nums)-2,-1,-1):
prefix_sum[i]+=prefix_sum[i+1]
index = -1
for i in range(len(nums)-1,-1,-1):
if prefix_sum[i]>prefix_sum[0]//2:
index = i
break
return nums[index:][::-1]
class Solution0:
def minSubsequence(self, nums):
nums = sorted(nums)
prefix_sum =nums[:]
for i in range(len(nums)):
prefix_sum[i]+=nums[i]
target = prefix_sum[-1]//2
index = self.bisec(prefix_sum,target)
return nums[index:][::-1]
def bisec(self,prefix,target):
start,end = 0,len(prefix)-1
while start+1<end:
mid = (start+end)//2
if prefix[mid]<=target:
start = mid
else:
end = mid
return end if prefix[end]>target else start
s = Solution()
s.minSubsequence([4,4,7,6,7]) | [
"[email protected]"
] | |
499a03357c8ae0101e94a0ea850bdfd693fd861f | 77fc5af96da1d461c86c7f9668b64b99ca04a1b6 | /codes/montecarlo.py | 32c0ce13aa246aa42786f17cd7c0371a3c56965c | [] | no_license | rene-d/edupython | 5b6bc8ddb5eb8ec896ee70fb961d4e689af1075a | 1261d0c7aae17bb2d4ff3370860768b73ba4172d | refs/heads/master | 2020-11-24T10:07:18.504472 | 2019-12-21T21:03:08 | 2019-12-21T21:03:08 | 228,099,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # Découpe d'un carré en 3 zones
# https://edupython.tuxfamily.org/sources/view.php?code=montecarlo
# Les zones sont les domaines du plan délimitées par les courbes
# des fonctions carré et racine carrée, à l'intérieur du carré unité,
# dans un repère orthonormal.
# Les aires sont obtenues par la méthode de Monte Carlo.
# On choisit un point au hasard dans le carré unité 10 000 fois
# Et on estime ainsi l'aire de chaque domaine.
a, b, c = 0, 0, 0
for i in range (10000) :
x, y = random(), random()
if y > sqrt (x) : a = a + 1
elif y > x * x : b = b + 1
else : c = c + 1
print ("On est dans la zone A", a, "fois sur 10 000.")
print ("On est dans la zone B", b, "fois sur 10 000.")
print ("On est dans la zone C", c, "fois sur 10 000.")
print ("Donc les aires respectives des zones A, B et C",end="")
print ("sont estimées à", a / 10000, ",", b / 10000, "et", c / 10000, "unités d'aire.")
| [
"[email protected]"
] | |
80f832455983b37492aabb45c118bac2cd8e5ae4 | e49f2251e07a70c943b70bbae27c439631a31552 | /tfx/components/model_validator/component.py | d6bc178a4182f17e0e083067fa3c9cd3df96c6b5 | [
"Apache-2.0"
] | permissive | hephaex/tfx | eac03c1ab670368088ec2a49af28ff374dc95c4a | 76d8731cb54be3451e10d270d8bcb0589401135f | refs/heads/master | 2020-09-16T11:52:06.198631 | 2019-11-23T21:01:50 | 2019-11-23T21:45:46 | 223,760,941 | 1 | 0 | Apache-2.0 | 2019-11-24T14:53:08 | 2019-11-24T14:53:08 | null | UTF-8 | Python | false | false | 4,026 | py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TFX ModelValidator component definition."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Text
from tfx import types
from tfx.components.base import base_component
from tfx.components.base import executor_spec
from tfx.components.model_validator import driver
from tfx.components.model_validator import executor
from tfx.types import standard_artifacts
from tfx.types.standard_component_specs import ModelValidatorSpec
class ModelValidator(base_component.BaseComponent):
"""A TFX component to validate a newly trained model against a prior model.
The model validator component can be used to check model metrics threshold
and validate current model against a previously validated model. If there
isn't a prior validated model, model validator will just make sure the
threshold passed. Otherwise, ModelValidator compares a newly trained models
against a known good model, specifically the last model "blessed" by this
component. A model is "blessed" if the exported model's metrics are within
predefined thresholds around the prior model's metrics.
*Note:* This component includes a driver to resolve last blessed model.
## Possible causes why model validation fails
Model validation can fail for many reasons, but these are the most common:
- problems with training data. For example, negative examples are dropped or
features are missing.
- problems with the test or evaluation data. For example, skew exists between
the training and evaluation data.
- changes in data distribution. This indicates the user behavior may have
changed over time.
- problems with the trainer. For example, the trainer was stopped before
model is converged or the model is unstable.
## Example
```
# Performs quality validation of a candidate model (compared to a baseline).
model_validator = ModelValidator(
examples=example_gen.outputs['examples'],
model=trainer.outputs['model'])
```
"""
SPEC_CLASS = ModelValidatorSpec
EXECUTOR_SPEC = executor_spec.ExecutorClassSpec(executor.Executor)
DRIVER_CLASS = driver.Driver
def __init__(self,
examples: types.Channel,
model: types.Channel,
blessing: Optional[types.Channel] = None,
instance_name: Optional[Text] = None):
"""Construct a ModelValidator component.
Args:
examples: A Channel of 'ExamplesPath' type, usually produced by
[ExampleGen](https://www.tensorflow.org/tfx/guide/examplegen) component.
_required_
model: A Channel of 'ModelExportPath' type, usually produced by
[Trainer](https://www.tensorflow.org/tfx/guide/trainer) component.
_required_
blessing: Output channel of 'ModelBlessingPath' that contains the
validation result.
instance_name: Optional name assigned to this specific instance of
ModelValidator. Required only if multiple ModelValidator components are
declared in the same pipeline.
"""
blessing = blessing or types.Channel(
type=standard_artifacts.ModelBlessing,
artifacts=[standard_artifacts.ModelBlessing()])
spec = ModelValidatorSpec(examples=examples, model=model, blessing=blessing)
super(ModelValidator, self).__init__(spec=spec, instance_name=instance_name)
| [
"[email protected]"
] | |
02275546e99d17fd7465ff2cbf3e4eacf57003e3 | a64757759a7170478ad3e9c71429c484491426be | /autoconv.py | 85e7b1b45b29a9ad766ab17c57193b68b5453c93 | [] | no_license | fy0/autoconv | 940928810bcda472bf401c14c2452ef64359fd9c | 1073934a0d03eba5e5192ffb583629308ff74d13 | refs/heads/master | 2021-07-01T01:30:25.249292 | 2017-09-18T08:11:57 | 2017-09-18T08:11:57 | 103,892,929 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,704 | py | """
{
"global": {
"encoder": "opusenc.exe",
"input_dir": "input",
"output_dir": "output",
"watch_ext": [".wav"],
"output_ext": ".opus"
},
"types": {
"music": {
"--title": "track title"
}
}
}
"""
import os
import json
import time
import subprocess
from shlex import quote
from pathlib import Path # py3.4+
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
config = {}
def convert(type_name, filepath):
if len(filepath.parts) == 1:
type_name = ''
if filepath.suffix not in config['global']['watch_ext']:
return
if type_name in config['types']:
typeinfo = config['types'][type_name]
params = []
for k, v in typeinfo.items():
params.append('%s %s' % (k, v))
out_path = Path(config['global']['output_dir']).joinpath(filepath)
out_ext = config['global']['output_ext']
encoder = subprocess.list2cmdline([config['global']['encoder']])
cmd = [
str(Path(config['global']['input_dir']).joinpath(filepath)),
str(out_path)[:-len(out_path.suffix)] + out_ext # .absolute()
]
os.makedirs(os.path.dirname(out_path), exist_ok=True)
cmd_txt = encoder + ' ' + ' '.join(params) + subprocess.list2cmdline(cmd)
print('Running: %s' % cmd_txt)
os.system(cmd_txt)
return True
class FileEventHandler(FileSystemEventHandler):
def on_moved(self, event):
if event.is_directory:
print("directory moved from {0} to {1}".format(event.src_path,event.dest_path))
else:
path = Path(event.dest_path).relative_to(config['global']['input_dir'])
if convert(path.parts[0], path):
#print("file moved from {0} to {1}".format(event.src_path,event.dest_path))
print('[Encoded] %s' % event.src_path)
def on_modified(self, event):
if not event.is_directory:
path = Path(event.src_path).relative_to(config['global']['input_dir'])
if convert(path.parts[0], path):
#print("file modified: %s" % event.src_path)
print('[Encoded] %s' % event.src_path)
def main():
global config
config = json.loads(open('config.json', encoding='utf-8').read())
observer = Observer()
event_handler = FileEventHandler()
observer.schedule(event_handler, config['global']['input_dir'], True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
73a032a0612a77d65c7a07200994e83c69f92ce3 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_2751486_0/Python/TheTaintedOne/solution.py | 746f30c4b1aac93a50dee4c7e781e4c5746595eb | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import sys
lines = sys.stdin.readlines()
ntests = int(lines[0])
vowels = set(["a", "e", "i", "o", "u"])
linenum = 1;
for c in xrange(0, ntests):
name, csize = lines[linenum].split()
csize = int(csize)
# print "[" + name + "]"
# print start_size, num_others
cons = [];
for cc in name:
if cc in vowels:
cons.append(0)
else:
cons.append(1)
# print cons
runs = [];
curr_run = 0;
for pos in xrange(len(name)):
if cons[pos]==1:
curr_run = curr_run + 1
else:
curr_run = 0
if curr_run>= csize:
runs.append((pos, curr_run))
# print runs
res = 0
list_pos = 0
for pos in xrange(len(name)):
if list_pos < len(runs):
if pos>runs[list_pos][0]-csize+1:
list_pos = list_pos+1
if list_pos < len(runs):
res = res + (len(name)-runs[list_pos][0])
# print pos, runs[list_pos]
print "Case #" + str(c+1) + ": ", str(res)
linenum = linenum + 1
| [
"[email protected]"
] | |
be22c1e11ed28eafca08cf5bcfe0da6b20b66836 | 7c13de6b7831f99b8790452e03953e5ded0aca64 | /classy_vision/generic/distributed_util.py | ec5211f4123496d80e7c9396eef6df0d0e8b1338 | [
"MIT"
] | permissive | vreis/ClassyVision-2 | 3f99d3c06ec422e81e29b0f38f02a7ce56e480d6 | 80aa4d421d1203b4b92bb9b848ccc866816e4f6d | refs/heads/master | 2021-07-15T18:03:14.212417 | 2019-12-06T16:48:19 | 2019-12-06T16:50:46 | 226,377,934 | 0 | 0 | MIT | 2019-12-06T17:27:46 | 2019-12-06T17:27:45 | null | UTF-8 | Python | false | false | 5,284 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
# Default to GPU 0
_cuda_device_index: int = 0
# Setting _cuda_device_index to -1 internally implies that we should use CPU
_CPU_DEVICE_INDEX = -1
def convert_to_distributed_tensor(tensor):
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This helper function converts to the correct
device and returns the tensor + original device.
"""
orig_device = "cpu" if not tensor.is_cuda else "gpu"
if (
torch.distributed.is_available()
and torch.distributed.get_backend() == torch.distributed.Backend.NCCL
and not tensor.is_cuda
):
tensor = tensor.cuda()
return (tensor, orig_device)
def convert_to_normal_tensor(tensor, orig_device):
"""
For some backends, such as NCCL, communication only works if the
tensor is on the GPU. This converts the tensor back to original device.
"""
if tensor.is_cuda and orig_device == "cpu":
tensor = tensor.cpu()
return tensor
def is_distributed_training_run():
return (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and (torch.distributed.get_world_size() > 1)
)
def is_master():
"""
Returns True if this is rank 0 of a distributed training job OR if it is
a single trainer job. Otherwise False.
"""
return get_rank() == 0
def all_reduce_mean(tensor):
"""
Wrapper over torch.distributed.all_reduce for performing mean reduction
of tensor over all processes.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.all_reduce(tensor, torch.distributed.ReduceOp.SUM)
tensor = tensor / torch.distributed.get_world_size()
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor
def all_reduce_sum(tensor):
"""
Wrapper over torch.distributed.all_reduce for performing sum
reduction of tensor over all processes in both distributed /
non-distributed scenarios.
"""
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
torch.distributed.all_reduce(tensor, torch.distributed.ReduceOp.SUM)
tensor = convert_to_normal_tensor(tensor, orig_device)
return tensor
def gather_tensors_from_all(tensor):
"""
Wrapper over torch.distributed.all_gather for performing
'gather' of 'tensor' over all processes in both distributed /
non-distributed scenarios.
"""
if tensor.ndim == 0:
# 0 dim tensors cannot be gathered. so unsqueeze
tensor = tensor.unsqueeze(0)
if is_distributed_training_run():
tensor, orig_device = convert_to_distributed_tensor(tensor)
gathered_tensors = [
torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(gathered_tensors, tensor)
gathered_tensors = [
convert_to_normal_tensor(_tensor, orig_device)
for _tensor in gathered_tensors
]
else:
gathered_tensors = [tensor]
return gathered_tensors
def gather_from_all(tensor):
gathered_tensors = gather_tensors_from_all(tensor)
gathered_tensor = torch.cat(gathered_tensors, 0)
return gathered_tensor
def barrier():
"""
Wrapper over torch.distributed.barrier, returns without waiting
if the distributed process group is not initialized instead of throwing error.
"""
if not torch.distributed.is_available() or not torch.distributed.is_initialized():
return
torch.distributed.barrier()
def get_world_size():
"""
Simple wrapper for correctly getting worldsize in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_world_size()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 1
)
def get_rank():
"""
Simple wrapper for correctly getting rank in both distributed
/ non-distributed settings
"""
return (
torch.distributed.get_rank()
if torch.distributed.is_available() and torch.distributed.is_initialized()
else 0
)
def set_cuda_device_index(idx: int):
global _cuda_device_index
_cuda_device_index = idx
torch.cuda.set_device(_cuda_device_index)
def set_cpu_device():
global _cuda_device_index
_cuda_device_index = _CPU_DEVICE_INDEX
def get_cuda_device_index() -> int:
return _cuda_device_index
def init_distributed_data_parallel_model(model):
global _cuda_device_index
if _cuda_device_index == _CPU_DEVICE_INDEX:
# CPU-only model, don't specify device
return torch.nn.parallel.DistributedDataParallel(model, broadcast_buffers=False)
else:
# GPU model
return torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[_cuda_device_index],
output_device=_cuda_device_index,
broadcast_buffers=False,
)
| [
"[email protected]"
] | |
b34e8eb425d1099c4b4358e74814477818dfc003 | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/vulkan-deps/vulkan-validation-layers/src/scripts/common_ci.py | 88e5f1af7cda75fbf03793a27e6bf5f32ce53c52 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 10,787 | py | #!/usr/bin/python3 -i
#
# Copyright (c) 2015-2017, 2019-2023 The Khronos Group Inc.
# Copyright (c) 2015-2017, 2019-2023 Valve Corporation
# Copyright (c) 2015-2017, 2019-2023 LunarG, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import platform
import shutil
import argparse
if sys.version_info[0] != 3:
print("This script requires Python 3. Run script with [-h] option for more details.")
sys_exit(0)
# Use Ninja for all platforms for performance/simplicity
os.environ['CMAKE_GENERATOR'] = "Ninja"
# Utility for creating a directory if it does not exist. Behaves similarly to 'mkdir -p'
def make_dirs(path, clean=False):
if clean and os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path, exist_ok=True)
# helper to define paths relative to the repo root
def RepoRelative(path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), '..', path))
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.split(os.path.abspath(__file__))[0], '..'))
# TODO: Pass this in as arg, may be useful for running locally
EXTERNAL_DIR_NAME = "external"
BUILD_DIR_NAME = "build"
VVL_BUILD_DIR = RepoRelative(BUILD_DIR_NAME)
TEST_INSTALL_DIR = RepoRelative("build/install")
def externalDir(config): return os.path.join(RepoRelative(EXTERNAL_DIR_NAME), config)
# Runs a command in a directory and returns its return code.
# Directory is project root by default, or a relative path from project root
def RunShellCmd(command, start_dir = PROJECT_ROOT, env=None, verbose=False):
if start_dir != PROJECT_ROOT:
start_dir = RepoRelative(start_dir)
cmd_list = command.split(" ")
if verbose or ('VVL_CI_VERBOSE' in os.environ and os.environ['VVL_CI_VERBOSE'] != '0'):
print(f'CICMD({cmd_list}, env={env})')
subprocess.check_call(cmd_list, cwd=start_dir, env=env)
#
# Check if the system is Windows
def IsWindows(): return 'windows' == platform.system().lower()
#
# Set MACOSX_DEPLOYMENT_TARGET
def SetupDarwin(osx):
if platform.system() != "Darwin":
return
# By default it will use the latest MacOS SDK available on the system.
if osx == 'latest':
return
# Currently the Vulkan SDK targets 10.15 as the minimum for MacOS support.
# If we need to we can raise the minimim like we did for C++17 support.
os.environ['MACOSX_DEPLOYMENT_TARGET'] = "10.15"
print(f"Targeting {os.environ['MACOSX_DEPLOYMENT_TARGET']} MacOS Deployment Target", flush=True)
#
# Run VVL scripts
def CheckVVL(config):
ext_dir = externalDir(config)
vulkan_registry = ext_dir + "/Vulkan-Headers/registry"
spirv_unified = ext_dir + "/SPIRV-Headers/include/spirv/unified1/"
# Verify consistency of generated source code
print("Check Generated Source Code Consistency")
gen_check_cmd = f'python scripts/generate_source.py --verify {vulkan_registry} {spirv_unified}'
RunShellCmd(gen_check_cmd)
print('Run vk_validation_stats.py')
valid_usage_json = vulkan_registry + "/validusage.json"
text_file = RepoRelative(f'{VVL_BUILD_DIR}/layers/vuid_coverage_database.txt')
gen_check_cmd = f'python scripts/vk_validation_stats.py {valid_usage_json} -text {text_file}'
RunShellCmd(gen_check_cmd)
#
# Prepare the Validation Layers for testing
def BuildVVL(config, cmake_args, build_tests):
print("Log CMake version")
cmake_ver_cmd = 'cmake --version'
RunShellCmd(cmake_ver_cmd)
print("Run CMake for Validation Layers")
cmake_cmd = f'cmake -S . -B {VVL_BUILD_DIR} -DUPDATE_DEPS=ON -DCMAKE_BUILD_TYPE={config}'
# By default BUILD_WERROR is OFF, CI should always enable it.
cmake_cmd += ' -DBUILD_WERROR=ON'
cmake_cmd += f' -DBUILD_TESTS={build_tests}'
if cmake_args:
cmake_cmd += f' {cmake_args}'
RunShellCmd(cmake_cmd)
print("Build Validation Layers and Tests")
build_cmd = f'cmake --build {VVL_BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Validation Layers")
install_cmd = f'cmake --install {VVL_BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Prepare Loader for executing Layer Validation Tests
def BuildLoader():
LOADER_DIR = RepoRelative(os.path.join("%s/Vulkan-Loader" % EXTERNAL_DIR_NAME))
# Clone Loader repo
if not os.path.exists(LOADER_DIR):
print("Clone Loader Source Code")
clone_loader_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Loader.git'
RunShellCmd(clone_loader_cmd, EXTERNAL_DIR_NAME)
print("Run CMake for Loader")
LOADER_BUILD_DIR = RepoRelative("%s/Vulkan-Loader/%s" % (EXTERNAL_DIR_NAME, BUILD_DIR_NAME))
print("Run CMake for Loader")
cmake_cmd = f'cmake -S {LOADER_DIR} -B {LOADER_BUILD_DIR}'
cmake_cmd += ' -D UPDATE_DEPS=ON -D BUILD_TESTS=OFF -D CMAKE_BUILD_TYPE=Release'
# This enables better stack traces from tools like leak sanitizer by using the loader feature which prevents unloading of libraries at shutdown.
cmake_cmd += ' -D LOADER_DISABLE_DYNAMIC_LIBRARY_UNLOADING=ON'
if not IsWindows():
cmake_cmd += ' -D LOADER_ENABLE_ADDRESS_SANITIZER=ON'
RunShellCmd(cmake_cmd)
print("Build Loader")
build_cmd = f'cmake --build {LOADER_BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Loader")
install_cmd = f'cmake --install {LOADER_BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Prepare Mock ICD for use with Layer Validation Tests
def BuildMockICD():
VT_DIR = RepoRelative("%s/Vulkan-Tools" % EXTERNAL_DIR_NAME)
if not os.path.exists(VT_DIR):
print("Clone Vulkan-Tools Repository")
clone_tools_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Tools.git'
RunShellCmd(clone_tools_cmd, EXTERNAL_DIR_NAME)
ICD_BUILD_DIR = RepoRelative("%s/Vulkan-Tools/%s" % (EXTERNAL_DIR_NAME,BUILD_DIR_NAME))
print("Run CMake for ICD")
cmake_cmd = f'cmake -S {VT_DIR} -B {ICD_BUILD_DIR} -D CMAKE_BUILD_TYPE=Release '
cmake_cmd += '-DBUILD_CUBE=NO -DBUILD_VULKANINFO=NO -D INSTALL_ICD=ON -D UPDATE_DEPS=ON'
RunShellCmd(cmake_cmd)
print("Build Mock ICD")
build_cmd = f'cmake --build {ICD_BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Mock ICD")
install_cmd = f'cmake --install {ICD_BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Prepare Profile Layer for use with Layer Validation Tests
def BuildProfileLayer():
RunShellCmd('pip3 install jsonschema', EXTERNAL_DIR_NAME)
VP_DIR = RepoRelative("%s/Vulkan-Profiles" % EXTERNAL_DIR_NAME)
if not os.path.exists(VP_DIR):
print("Clone Vulkan-Profiles Repository")
clone_cmd = 'git clone https://github.com/KhronosGroup/Vulkan-Profiles.git'
RunShellCmd(clone_cmd, EXTERNAL_DIR_NAME)
BUILD_DIR = RepoRelative("%s/Vulkan-Profiles/%s" % (EXTERNAL_DIR_NAME, BUILD_DIR_NAME))
print("Run CMake for Profile Layer")
cmake_cmd = f'cmake -S {VP_DIR} -B {BUILD_DIR}'
cmake_cmd += ' -D CMAKE_BUILD_TYPE=Release'
cmake_cmd += ' -D UPDATE_DEPS=ON'
cmake_cmd += ' -D PROFILES_BUILD_TESTS=OFF'
RunShellCmd(cmake_cmd)
print("Build Profile Layer")
build_cmd = f'cmake --build {BUILD_DIR}'
RunShellCmd(build_cmd)
print("Install Profile Layer")
install_cmd = f'cmake --install {BUILD_DIR} --prefix {TEST_INSTALL_DIR}'
RunShellCmd(install_cmd)
#
# Run the Layer Validation Tests
def RunVVLTests():
print("Run Vulkan-ValidationLayer Tests using Mock ICD")
if IsWindows():
print("Not implemented yet")
exit(-1)
lvt_cmd = os.path.join(PROJECT_ROOT, BUILD_DIR_NAME, 'tests', 'vk_layer_validation_tests')
lvt_env = dict(os.environ)
# Because we installed everything to TEST_INSTALL_DIR all the libraries/json files are in pre-determined locations
# defined by GNUInstallDirs. This makes adding the LD_LIBRARY_PATH and VK_LAYER_PATH trivial/robust.
lvt_env['LD_LIBRARY_PATH'] = os.path.join(TEST_INSTALL_DIR, 'lib')
lvt_env['VK_LAYER_PATH'] = os.path.join(TEST_INSTALL_DIR, 'share/vulkan/explicit_layer.d')
lvt_env['VK_DRIVER_FILES'] = os.path.join(TEST_INSTALL_DIR, 'share/vulkan/icd.d/VkICD_mock_icd.json')
lvt_env['VK_INSTANCE_LAYERS'] = 'VK_LAYER_KHRONOS_validation' + os.pathsep + 'VK_LAYER_KHRONOS_profiles'
lvt_env['VK_KHRONOS_PROFILES_SIMULATE_CAPABILITIES'] = 'SIMULATE_API_VERSION_BIT,SIMULATE_FEATURES_BIT,SIMULATE_PROPERTIES_BIT,SIMULATE_EXTENSIONS_BIT,SIMULATE_FORMATS_BIT,SIMULATE_QUEUE_FAMILY_PROPERTIES_BIT'
# By default use the max_profile.json
if "VK_KHRONOS_PROFILES_PROFILE_FILE" not in os.environ:
lvt_env['VK_KHRONOS_PROFILES_PROFILE_FILE'] = RepoRelative('tests/device_profiles/max_profile.json')
# By default set portability to false
if "VK_KHRONOS_PROFILES_EMULATE_PORTABILITY" not in os.environ:
lvt_env['VK_KHRONOS_PROFILES_EMULATE_PORTABILITY'] = 'false'
lvt_env['VK_KHRONOS_PROFILES_DEBUG_REPORTS'] = 'DEBUG_REPORT_ERROR_BIT'
RunShellCmd(lvt_cmd, env=lvt_env)
print("Re-Running multithreaded tests with VK_LAYER_FINE_GRAINED_LOCKING disabled")
lvt_env['VK_LAYER_FINE_GRAINED_LOCKING'] = '0'
RunShellCmd(lvt_cmd + ' --gtest_filter=*Thread*', env=lvt_env)
def GetArgParser():
configs = ['release', 'debug']
default_config = configs[0]
osx_choices = ['min', 'latest']
osx_default = osx_choices[1]
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config', dest='configuration',
metavar='CONFIG', action='store',
choices=configs, default=default_config,
help='Build target configuration. Can be one of: {0}'.format(
', '.join(configs)))
parser.add_argument(
'--cmake', dest='cmake',
metavar='CMAKE', type=str,
default='', help='Additional args to pass to cmake')
parser.add_argument(
'--build', dest='build',
action='store_true', help='Build the layers')
parser.add_argument(
'--test', dest='test',
action='store_true', help='Tests the layers')
parser.add_argument(
'--osx', dest='osx', action='store',
choices=osx_choices, default=osx_default,
help='Sets MACOSX_DEPLOYMENT_TARGET on Apple platforms.')
return parser
| [
"[email protected]"
] | |
c8d58bad12f2d00dbaaa0a198b391fe827e89ccc | 79a60fa1daeaa9dbe0cb551423fe28c2f2cf7da3 | /websocket/bottle/mqttwsweb.py | a0e1c343be062aba3efa12502005552a8a3d9e11 | [
"MIT"
] | permissive | swkim01/mqtt | a505494815cd5f487cbc1e434fd0546c1bc08eac | 030d9106bf791b54538aac8789df872abaa96e17 | refs/heads/master | 2021-01-10T22:07:04.788958 | 2019-07-19T01:41:07 | 2019-07-19T01:41:07 | 42,844,241 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #-*- coding: utf-8 -*-
from bottle import route, get, request, template, response, static_file
from bottle import run
#import json
host="<HOST IP>"
port=8008
wsport=9001
@route('/mqttws31.js')
def mqttws31():
return static_file("mqttws31.js", root=".")
@get('/mqttwschart')
def dht22chart():
return template("mqttwschart", host=host, port=wsport)
@get('/')
def index():
return template("mqttwsindex", host=host, port=wsport)
if __name__ == '__main__':
run(host=host, port=port)
| [
"[email protected]"
] | |
80bfdaf5259b84ac700aab294d7db8d5372259c3 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02546/s914300427.py | 3dad5d294d902104d5bf9e91b64a4bd3c9b6633e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | S = input()
if(S[-1] == "s"):
S += "es"
else:
S += "s"
print(S) | [
"[email protected]"
] | |
d95e8925cc8de16faad7f38369d751625be57b1b | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/networkx-1.11-py27_0/lib/python2.7/site-packages/networkx/algorithms/centrality/betweenness.py | b8c2b125f647b1c691d87810ce7eb454c7f0ab78 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 11,680 | py | # coding=utf8
"""
Betweenness centrality measures.
"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from heapq import heappush, heappop
from itertools import count
import networkx as nx
import random
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['betweenness_centrality',
'edge_betweenness_centrality',
'edge_betweenness']
def betweenness_centrality(G, k=None, normalized=True, weight=None,
endpoints=False,
seed=None):
r"""Compute the shortest-path betweenness centrality for nodes.
Betweenness centrality of a node `v` is the sum of the
fraction of all-pairs shortest paths that pass through `v`
.. math::
c_B(v) =\sum_{s,t \in V} \frac{\sigma(s, t|v)}{\sigma(s, t)}
where `V` is the set of nodes, `\sigma(s, t)` is the number of
shortest `(s, t)`-paths, and `\sigma(s, t|v)` is the number of those
paths passing through some node `v` other than `s, t`.
If `s = t`, `\sigma(s, t) = 1`, and if `v \in {s, t}`,
`\sigma(s, t|v) = 0` [2]_.
Parameters
----------
G : graph
A NetworkX graph
k : int, optional (default=None)
If k is not None use k node samples to estimate betweenness.
The value of k <= n where n is the number of nodes in the graph.
Higher values give better approximation.
normalized : bool, optional
If True the betweenness values are normalized by `2/((n-1)(n-2))`
for graphs, and `1/((n-1)(n-2))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
endpoints : bool, optional
If True include the endpoints in the shortest path counts.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
edge_betweenness_centrality
load_centrality
Notes
-----
The algorithm is from Ulrik Brandes [1]_.
See [4]_ for the original first published version and [2]_ for details on
algorithms for variations and related metrics.
For approximate betweenness calculations set k=#samples to use
k nodes ("pivots") to estimate the betweenness values. For an estimate
of the number of pivots needed see [3]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
References
----------
.. [1] Ulrik Brandes:
A Faster Algorithm for Betweenness Centrality.
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes:
On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
.. [3] Ulrik Brandes and Christian Pich:
Centrality Estimation in Large Networks.
International Journal of Bifurcation and Chaos 17(7):2303-2318, 2007.
http://www.inf.uni-konstanz.de/algo/publications/bp-celn-06.pdf
.. [4] Linton C. Freeman:
A set of measures of centrality based on betweenness.
Sociometry 40: 35–41, 1977
http://moreno.ss.uci.edu/23.pdf
"""
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
if k is None:
nodes = G
else:
random.seed(seed)
nodes = random.sample(G.nodes(), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS
S, P, sigma = _single_source_shortest_path_basic(G, s)
else: # use Dijkstra's algorithm
S, P, sigma = _single_source_dijkstra_path_basic(G, s, weight)
# accumulation
if endpoints:
betweenness = _accumulate_endpoints(betweenness, S, P, sigma, s)
else:
betweenness = _accumulate_basic(betweenness, S, P, sigma, s)
# rescaling
betweenness = _rescale(betweenness, len(G),
normalized=normalized,
directed=G.is_directed(),
k=k)
return betweenness
def edge_betweenness_centrality(G, k=None, normalized=True, weight=None,
seed=None):
r"""Compute betweenness centrality for edges.
Betweenness centrality of an edge `e` is the sum of the
fraction of all-pairs shortest paths that pass through `e`
.. math::
c_B(e) =\sum_{s,t \in V} \frac{\sigma(s, t|e)}{\sigma(s, t)}
where `V` is the set of nodes,`\sigma(s, t)` is the number of
shortest `(s, t)`-paths, and `\sigma(s, t|e)` is the number of
those paths passing through edge `e` [2]_.
Parameters
----------
G : graph
A NetworkX graph
k : int, optional (default=None)
If k is not None use k node samples to estimate betweenness.
The value of k <= n where n is the number of nodes in the graph.
Higher values give better approximation.
normalized : bool, optional
If True the betweenness values are normalized by `2/(n(n-1))`
for graphs, and `1/(n(n-1))` for directed graphs where `n`
is the number of nodes in G.
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
Returns
-------
edges : dictionary
Dictionary of edges with betweenness centrality as the value.
See Also
--------
betweenness_centrality
edge_load
Notes
-----
The algorithm is from Ulrik Brandes [1]_.
For weighted graphs the edge weights must be greater than zero.
Zero edge weights can produce an infinite number of equal length
paths between pairs of nodes.
References
----------
.. [1] A Faster Algorithm for Betweenness Centrality. Ulrik Brandes,
Journal of Mathematical Sociology 25(2):163-177, 2001.
http://www.inf.uni-konstanz.de/algo/publications/b-fabc-01.pdf
.. [2] Ulrik Brandes: On Variants of Shortest-Path Betweenness
Centrality and their Generic Computation.
Social Networks 30(2):136-145, 2008.
http://www.inf.uni-konstanz.de/algo/publications/b-vspbc-08.pdf
"""
betweenness = dict.fromkeys(G, 0.0) # b[v]=0 for v in G
# b[e]=0 for e in G.edges()
betweenness.update(dict.fromkeys(G.edges(), 0.0))
if k is None:
nodes = G
else:
random.seed(seed)
nodes = random.sample(G.nodes(), k)
for s in nodes:
# single source shortest paths
if weight is None: # use BFS
S, P, sigma = _single_source_shortest_path_basic(G, s)
else: # use Dijkstra's algorithm
S, P, sigma = _single_source_dijkstra_path_basic(G, s, weight)
# accumulation
betweenness = _accumulate_edges(betweenness, S, P, sigma, s)
# rescaling
for n in G: # remove nodes to only return edges
del betweenness[n]
betweenness = _rescale_e(betweenness, len(G),
normalized=normalized,
directed=G.is_directed())
return betweenness
# obsolete name
def edge_betweenness(G, k=None, normalized=True, weight=None, seed=None):
return edge_betweenness_centrality(G, k, normalized, weight, seed)
# helpers for betweenness centrality
def _single_source_shortest_path_basic(G, s):
S = []
P = {}
for v in G:
P[v] = []
sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1.0
D[s] = 0
Q = [s]
while Q: # use BFS to find shortest paths
v = Q.pop(0)
S.append(v)
Dv = D[v]
sigmav = sigma[v]
for w in G[v]:
if w not in D:
Q.append(w)
D[w] = Dv + 1
if D[w] == Dv + 1: # this is a shortest path, count paths
sigma[w] += sigmav
P[w].append(v) # predecessors
return S, P, sigma
def _single_source_dijkstra_path_basic(G, s, weight='weight'):
# modified from Eppstein
S = []
P = {}
for v in G:
P[v] = []
sigma = dict.fromkeys(G, 0.0) # sigma[v]=0 for v in G
D = {}
sigma[s] = 1.0
push = heappush
pop = heappop
seen = {s: 0}
c = count()
Q = [] # use Q as heap with (distance,node id) tuples
push(Q, (0, next(c), s, s))
while Q:
(dist, _, pred, v) = pop(Q)
if v in D:
continue # already searched this node.
sigma[v] += sigma[pred] # count paths
S.append(v)
D[v] = dist
for w, edgedata in G[v].items():
vw_dist = dist + edgedata.get(weight, 1)
if w not in D and (w not in seen or vw_dist < seen[w]):
seen[w] = vw_dist
push(Q, (vw_dist, next(c), v, w))
sigma[w] = 0.0
P[w] = [v]
elif vw_dist == seen[w]: # handle equal paths
sigma[w] += sigma[v]
P[w].append(v)
return S, P, sigma
def _accumulate_basic(betweenness, S, P, sigma, s):
delta = dict.fromkeys(S, 0)
while S:
w = S.pop()
coeff = (1.0 + delta[w]) / sigma[w]
for v in P[w]:
delta[v] += sigma[v] * coeff
if w != s:
betweenness[w] += delta[w]
return betweenness
def _accumulate_endpoints(betweenness, S, P, sigma, s):
betweenness[s] += len(S) - 1
delta = dict.fromkeys(S, 0)
while S:
w = S.pop()
coeff = (1.0 + delta[w]) / sigma[w]
for v in P[w]:
delta[v] += sigma[v] * coeff
if w != s:
betweenness[w] += delta[w] + 1
return betweenness
def _accumulate_edges(betweenness, S, P, sigma, s):
delta = dict.fromkeys(S, 0)
while S:
w = S.pop()
coeff = (1.0 + delta[w]) / sigma[w]
for v in P[w]:
c = sigma[v] * coeff
if (v, w) not in betweenness:
betweenness[(w, v)] += c
else:
betweenness[(v, w)] += c
delta[v] += c
if w != s:
betweenness[w] += delta[w]
return betweenness
def _rescale(betweenness, n, normalized, directed=False, k=None):
if normalized is True:
if n <= 2:
scale = None # no normalization b=0 for all nodes
else:
scale = 1.0 / ((n - 1) * (n - 2))
else: # rescale by 2 for undirected graphs
if not directed:
scale = 1.0 / 2.0
else:
scale = None
if scale is not None:
if k is not None:
scale = scale * n / k
for v in betweenness:
betweenness[v] *= scale
return betweenness
def _rescale_e(betweenness, n, normalized, directed=False, k=None):
if normalized is True:
if n <= 1:
scale = None # no normalization b=0 for all nodes
else:
scale = 1.0 / (n * (n - 1))
else: # rescale by 2 for undirected graphs
if not directed:
scale = 1.0 / 2.0
else:
scale = None
if scale is not None:
if k is not None:
scale = scale * n / k
for v in betweenness:
betweenness[v] *= scale
return betweenness
| [
"[email protected]"
] | |
5961d295b23abd4a5c1995b3f10bf6ccb333c741 | 44600adf1731a449ff2dd5c84ce92c7f8b567fa4 | /colour_down/adaptation/fairchild1990.py | 4ce1a10481213f117c2508f1c43f594b728df699 | [] | no_license | ajun73/Work_Code | b6a3581c5be4ccde93bd4632d8aaaa9ecc782b43 | 017d12361f7f9419d4b45b23ed81f9856278e849 | refs/heads/master | 2020-04-11T23:16:43.994397 | 2019-12-28T07:48:44 | 2019-12-28T07:48:44 | 162,161,852 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,432 | py | # -*- coding: utf-8 -*-
"""
Fairchild (1990) Chromatic Adaptation Model
===========================================
Defines *Fairchild (1990)* chromatic adaptation model objects:
- :func:`colour.adaptation.chromatic_adaptation_Fairchild1990`
See Also
--------
`Fairchild (1990) Chromatic Adaptation Model Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/adaptation/fairchild1990.ipynb>`_
References
----------
- :cite:`Fairchild1991a` : Fairchild, M. D. (1991). Formulation and testing
of an incomplete-chromatic-adaptation model. Color Research & Application,
16(4), 243-250. doi:10.1002/col.5080160406
- :cite:`Fairchild2013s` : Fairchild, M. D. (2013). FAIRCHILD'S 1990 MODEL.
In Color Appearance Models (3rd ed., pp. 4418-4495). Wiley. ISBN:B00DAYO8E2
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.adaptation import VON_KRIES_CAT
from colour.utilities import dot_vector, row_as_diagonal, tsplit, tstack
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2018 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'FAIRCHILD1990_XYZ_TO_RGB_MATRIX', 'FAIRCHILD1990_RGB_TO_XYZ_MATRIX',
'chromatic_adaptation_Fairchild1990', 'XYZ_to_RGB_Fairchild1990',
'RGB_to_XYZ_Fairchild1990', 'degrees_of_adaptation'
]
FAIRCHILD1990_XYZ_TO_RGB_MATRIX = VON_KRIES_CAT
"""
*Fairchild (1990)* colour appearance model *CIE XYZ* tristimulus values to cone
responses matrix.
FAIRCHILD1990_XYZ_TO_RGB_MATRIX : array_like, (3, 3)
"""
FAIRCHILD1990_RGB_TO_XYZ_MATRIX = np.linalg.inv(VON_KRIES_CAT)
"""
*Fairchild (1990)* colour appearance model cone responses to *CIE XYZ*
tristimulus values matrix.
FAIRCHILD1990_RGB_TO_XYZ_MATRIX : array_like, (3, 3)
"""
def chromatic_adaptation_Fairchild1990(XYZ_1,
XYZ_n,
XYZ_r,
Y_n,
discount_illuminant=False):
"""
Adapts given stimulus *CIE XYZ_1* tristimulus values from test viewing
conditions to reference viewing conditions using *Fairchild (1990)*
chromatic adaptation model.
Parameters
----------
XYZ_1 : array_like
*CIE XYZ_1* tristimulus values of test sample / stimulus in domain
[0, 100].
XYZ_n : array_like
Test viewing condition *CIE XYZ_n* tristimulus values of whitepoint.
XYZ_r : array_like
Reference viewing condition *CIE XYZ_r* tristimulus values of
whitepoint.
Y_n : numeric or array_like
Luminance :math:`Y_n` of test adapting stimulus in :math:`cd/m^2`.
discount_illuminant : bool, optional
Truth value indicating if the illuminant should be discounted.
Returns
-------
ndarray
Adapted *CIE XYZ_2* tristimulus values of stimulus.
Warning
-------
The input domain and output range of that definition are non standard!
Notes
-----
- Input *CIE XYZ_1*, *CIE XYZ_n* and *CIE XYZ_r* tristimulus values are
in domain [0, 100].
- Output *CIE XYZ_2* tristimulus values are in range [0, 100].
References
----------
- :cite:`Fairchild1991a`
- :cite:`Fairchild2013s`
Examples
--------
>>> XYZ_1 = np.array([19.53, 23.07, 24.97])
>>> XYZ_n = np.array([111.15, 100.00, 35.20])
>>> XYZ_r = np.array([94.81, 100.00, 107.30])
>>> Y_n = 200
>>> chromatic_adaptation_Fairchild1990(XYZ_1, XYZ_n, XYZ_r, Y_n)
... # doctest: +ELLIPSIS
array([ 23.3252634..., 23.3245581..., 76.1159375...])
"""
XYZ_1 = np.asarray(XYZ_1)
XYZ_n = np.asarray(XYZ_n)
XYZ_r = np.asarray(XYZ_r)
Y_n = np.asarray(Y_n)
LMS_1 = dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ_1)
LMS_n = dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ_n)
LMS_r = dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ_r)
p_LMS = degrees_of_adaptation(
LMS_1, Y_n, discount_illuminant=discount_illuminant)
a_LMS_1 = p_LMS / LMS_n
a_LMS_2 = p_LMS / LMS_r
A_1 = row_as_diagonal(a_LMS_1)
A_2 = row_as_diagonal(a_LMS_2)
LMSp_1 = dot_vector(A_1, LMS_1)
c = 0.219 - 0.0784 * np.log10(Y_n)
C = row_as_diagonal(tstack((c, c, c)))
LMS_a = dot_vector(C, LMSp_1)
LMSp_2 = dot_vector(np.linalg.inv(C), LMS_a)
LMS_c = dot_vector(np.linalg.inv(A_2), LMSp_2)
XYZ_c = dot_vector(FAIRCHILD1990_RGB_TO_XYZ_MATRIX, LMS_c)
return XYZ_c
def XYZ_to_RGB_Fairchild1990(XYZ):
"""
Converts from *CIE XYZ* tristimulus values to cone responses.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values.
Returns
-------
ndarray
Cone responses.
Examples
--------
>>> XYZ = np.array([19.53, 23.07, 24.97])
>>> XYZ_to_RGB_Fairchild1990(XYZ) # doctest: +ELLIPSIS
array([ 22.1231935..., 23.6054224..., 22.9279534...])
"""
return dot_vector(FAIRCHILD1990_XYZ_TO_RGB_MATRIX, XYZ)
def RGB_to_XYZ_Fairchild1990(RGB):
"""
Converts from cone responses to *CIE XYZ* tristimulus values.
Parameters
----------
RGB : array_like
Cone responses.
Returns
-------
ndarray
*CIE XYZ* tristimulus values.
Examples
--------
>>> RGB = np.array([22.12319350, 23.60542240, 22.92795340])
>>> RGB_to_XYZ_Fairchild1990(RGB) # doctest: +ELLIPSIS
array([ 19.53, 23.07, 24.97])
"""
return dot_vector(FAIRCHILD1990_RGB_TO_XYZ_MATRIX, RGB)
def degrees_of_adaptation(LMS, Y_n, v=1 / 3, discount_illuminant=False):
"""
Computes the degrees of adaptation :math:`p_L`, :math:`p_M` and
:math:`p_S`.
Parameters
----------
LMS : array_like
Cone responses.
Y_n : numeric or array_like
Luminance :math:`Y_n` of test adapting stimulus in :math:`cd/m^2`.
v : numeric or array_like, optional
Exponent :math:`v`.
discount_illuminant : bool, optional
Truth value indicating if the illuminant should be discounted.
Returns
-------
ndarray
Degrees of adaptation :math:`p_L`, :math:`p_M` and :math:`p_S`.
Examples
--------
>>> LMS = np.array([20.00052060, 19.99978300, 19.99883160])
>>> Y_n = 31.83
>>> degrees_of_adaptation(LMS, Y_n) # doctest: +ELLIPSIS
array([ 0.9799324..., 0.9960035..., 1.0233041...])
>>> degrees_of_adaptation(LMS, Y_n, 1 / 3, True)
array([ 1., 1., 1.])
"""
LMS = np.asarray(LMS)
if discount_illuminant:
return np.ones(LMS.shape)
Y_n = np.asarray(Y_n)
v = np.asarray(v)
L, M, S = tsplit(LMS)
LMS_E = dot_vector(VON_KRIES_CAT, np.ones(LMS.shape)) # E illuminant.
L_E, M_E, S_E = tsplit(LMS_E)
Ye_n = Y_n ** v
def m_E(x, y):
"""
Computes the :math:`m_E` term.
"""
return (3 * (x / y)) / (L / L_E + M / M_E + S / S_E)
def P_c(x):
"""
Computes the :math:`P_L`, :math:`P_M` or :math:`P_S` terms.
"""
return (1 + Ye_n + x) / (1 + Ye_n + 1 / x)
p_L = P_c(m_E(L, L_E))
p_M = P_c(m_E(M, M_E))
p_S = P_c(m_E(S, S_E))
p_LMS = tstack((p_L, p_M, p_S))
return p_LMS
| [
"[email protected]"
] | |
68e3dbcc684161b2f8d32f752aaad8f778937993 | 9f6b9a40444df2b09960b5b531232ee6975e74dd | /level_1.py | 13d4ca664e2d9291979dc351d30188b94817ea48 | [] | no_license | nildiert/hodor | f60e94f4a64b8b0217c760104f501a6a586d4129 | 3c8b2df854ed2af6c5345250bc8f557b52761aee | refs/heads/master | 2020-06-02T06:19:44.740188 | 2019-06-10T02:59:23 | 2019-06-10T02:59:23 | 191,067,129 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | from lxml import html
import requests
import time
def req_values(url):
page = requests.get(url)
tree = html.fromstring(page.content)
me = tree.xpath(next_val)
return ([page, tree, me])
try:
url = 'http://158.69.76.135/level1.php'
data = {'id':'730','holdthedoor':'submit'}
next_val = '//td[contains(text(), "730")]/following-sibling::node()/text()'
page, tree, me = req_values(url)
data.update({"key":page.cookies["HoldTheDoor"]})
while ("".join(me) != '\n4095 '):
page, tree, me = req_values(url)
data.update({"key":page.cookies["HoldTheDoor"]})
status = requests.post(url, data, cookies=page.cookies)
print("{} {}".format(status ,me))
except Exception as e:
print(e)
| [
"[email protected]"
] | |
2da7f4194e18775060afca1bfc1dcd85d1009570 | 3411ad233c411c06765f4b07f8670c12025178b6 | /201-300/231-240/237-deleteNodeInLinkedList/deleteNodeInLinkedList.py | 9b19cef457feb64cb67a51b92b91f733e6ae73ed | [
"MIT"
] | permissive | xuychen/Leetcode | 7d9d31fed898ce58440f5ae6665d2ccaf1a4b256 | c8bf33af30569177c5276ffcd72a8d93ba4c402a | refs/heads/master | 2021-11-19T20:39:43.741589 | 2021-10-24T16:26:52 | 2021-10-24T16:26:52 | 140,212,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
node.val = node.next.val
node.next = node.next.next | [
"[email protected]"
] | |
b0c14efa30aa6296d714f88a56de72b29a3cb346 | 821d830910c354cb89767a77e00c77deb592ca0c | /bayesnet/math/__init__.py | a7e1743bd4c1a5cdfed8a8de29633ed4e5c1f037 | [
"MIT"
] | permissive | zxsted/BayesianNetwork | c61aa77a511e96852dec38f268f0dc31b6752cac | efe75b5416a262741fa60ad09380684886e91eff | refs/heads/master | 2021-05-09T05:38:43.513255 | 2017-10-25T06:58:26 | 2017-10-25T06:58:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from bayesnet.math.add import add
from bayesnet.math.divide import divide, rdivide
from bayesnet.math.exp import exp
from bayesnet.math.log import log
from bayesnet.math.matmul import matmul, rmatmul
from bayesnet.math.mean import mean
from bayesnet.math.multiply import multiply
from bayesnet.math.negative import negative
from bayesnet.math.power import power, rpower
from bayesnet.math.product import prod
from bayesnet.math.sqrt import sqrt
from bayesnet.math.square import square
from bayesnet.math.subtract import subtract, rsubtract
from bayesnet.math.sum import sum
from bayesnet.tensor.tensor import Tensor
Tensor.__add__ = add
Tensor.__radd__ = add
Tensor.__truediv__ = divide
Tensor.__rtruediv__ = rdivide
Tensor.mean = mean
Tensor.__matmul__ = matmul
Tensor.__rmatmul__ = rmatmul
Tensor.__mul__ = multiply
Tensor.__rmul__ = multiply
Tensor.__neg__ = negative
Tensor.__pow__ = power
Tensor.__rpow__ = rpower
Tensor.prod = prod
Tensor.__sub__ = subtract
Tensor.__rsub__ = rsubtract
Tensor.sum = sum
__all__ = [
"add",
"divide",
"exp",
"log",
"matmul",
"mean",
"multiply",
"power",
"prod",
"sqrt",
"square",
"subtract",
"sum"
]
| [
"[email protected]"
] | |
3317db01ff8d0d5eff65cd314197024a8f717d5c | cc196a0111bbdcc04af7e579bc87e808cc0c7b02 | /trident/__init__.py | 880dacc18d173da5184a33655707722ed4ae11ab | [
"MIT"
] | permissive | Jackliaoall-AI-Framework/trident | 73819a95121d9d4dbf81d28ae32aea43c0541840 | cd26c1108c05c3ab4c262f9b416a126b2ad2f858 | refs/heads/master | 2023-01-15T15:15:10.544946 | 2020-11-23T04:15:33 | 2020-11-23T04:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | """trident api"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from importlib import reload
from sys import stderr
defaultencoding = 'utf-8'
if sys.getdefaultencoding() != defaultencoding:
reload(sys)
sys.setdefaultencoding(defaultencoding)
__version__ = '0.6.1'
stderr.write('trident {0}\n'.format(__version__))
from trident.backend import *
import threading
import random
| [
"[email protected]"
] | |
49387a3cdc6c6e23837c5436d99d317dbd2554eb | 40b262d813d07a113914d6009af8737898f2e096 | /Platos test/apps/schedules/migrations/0001_initial.py | 8cf8f0fa053f62e70fdc4f248d417b5c4d27999c | [] | no_license | Nish8192/Python | cb6de3b96e790464a0a4ad10eda86ce4f79688b4 | 5c03beff6f3669d5cfb6b31c5749827db8b6a627 | refs/heads/master | 2020-12-23T16:56:18.301723 | 2017-05-27T02:09:02 | 2017-05-27T02:09:02 | 92,563,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-28 22:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login_register', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('h9to10', models.BooleanField(verbose_name=False)),
('h10to11', models.BooleanField(verbose_name=False)),
('h11to12', models.BooleanField(verbose_name=False)),
('h12to13', models.BooleanField(verbose_name=False)),
('h13to14', models.BooleanField(verbose_name=False)),
('h14to15', models.BooleanField(verbose_name=False)),
('h15to16', models.BooleanField(verbose_name=False)),
('h16to17', models.BooleanField(verbose_name=False)),
('h17to18', models.BooleanField(verbose_name=False)),
('h18to19', models.BooleanField(verbose_name=False)),
('h19to20', models.BooleanField(verbose_name=False)),
('h20to21', models.BooleanField(verbose_name=False)),
('h21to22', models.BooleanField(verbose_name=False)),
('h22to23', models.BooleanField(verbose_name=False)),
('h23to0', models.BooleanField(verbose_name=False)),
('h0to1', models.BooleanField(verbose_name=False)),
('h1to2', models.BooleanField(verbose_name=False)),
('h2to3', models.BooleanField(verbose_name=False)),
('h3to4', models.BooleanField(verbose_name=False)),
('h4to5', models.BooleanField(verbose_name=False)),
('h5to6', models.BooleanField(verbose_name=False)),
('h6to7', models.BooleanField(verbose_name=False)),
('h7to8', models.BooleanField(verbose_name=False)),
('h8to9', models.BooleanField(verbose_name=False)),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fri', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fri_schedule', to='schedules.Day')),
('mon', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='mon_schedule', to='schedules.Day')),
('sat', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sat_schedule', to='schedules.Day')),
('sun', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sun_schedule', to='schedules.Day')),
('thu', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='thu_schedule', to='schedules.Day')),
('tue', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tue_schedule', to='schedules.Day')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='schedule_user', to='login_register.User')),
('wed', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='wed_schedule', to='schedules.Day')),
],
),
]
| [
"[email protected]"
] | |
5b16d88b43724df85472827ca9a1de35f189620b | 6ed48bf3c72e61fe53144a3545ab305112c93501 | /appengine/findit/handlers/test/help_triage_test.py | ef11561424031e1e74e5d088f7422bb54953dd08 | [
"BSD-3-Clause"
] | permissive | eunchong/infra | ee5f7a9379977de8c814f90dbba3f6adbf06a75c | ce3728559112bfb3e8b32137eada517aec6d22f9 | refs/heads/master | 2022-11-27T06:26:57.415805 | 2016-04-08T12:34:36 | 2016-04-08T12:34:36 | 55,699,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,341 | py | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import webapp2
from testing_utils import testing
from common.git_repository import GitRepository
from handlers import help_triage
from model.wf_analysis import WfAnalysis
from model.wf_build import WfBuild
from waterfall import buildbot
from waterfall.build_info import BuildInfo
from waterfall import build_util
EXPECTED_RESULTS_120 = {
'598ed4fa15e6a1d0d92b2b7df04fc31ab5d6e829': {
'fixed_cl_review_url': 'https://codereview.chromium.org/12578123',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/121'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463001',
'fixing_cl_commit_position': 342013,
'fixed_cl_commit_position': 341971,
'fixed_revision': '598ed4fa15e6a1d0d92b2b7df04fc31ab5d6e829',
'fixing_build_number': 121,
'action': 'Reverted',
'fixing_revision': '598sd489df74g125svf35s04fc3'
},
'062a6f974d7c08d27902060c241149ce193e4dd5': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1268183002',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/121'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463006',
'fixing_cl_commit_position': 342015,
'fixed_cl_commit_position': 341977,
'fixed_revision': '062a6f974d7c08d27902060c241149ce193e4dd5',
'fixing_build_number': 121,
'action': 'Reverted',
'fixing_revision': '123456789c08d27902060c241149ce193e4dd5dd'
},
'584de1b73f811bcdb98eae1fb0d92b2b7df04fc3': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1263223005',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/122'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463003',
'fixing_cl_commit_position': 342014,
'fixed_cl_commit_position': 341976,
'fixed_revision': '584de1b73f811bcdb98eae1fb0d92b2b7df04fc3',
'fixing_build_number': 122,
'action': 'Reverted',
'fixing_revision': '123456671bcdb98eae1fb0d92b2b7df04fc3'
},
'3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1260813007',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/123'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463100',
'fixing_cl_commit_position': 332070,
'fixed_cl_commit_position': 341978,
'fixed_revision': '3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6',
'fixing_build_number': 123,
'action': 'Reverted',
'fixing_revision': '123455668d4ab0670331a6c0ebfc4f3ab8e6'
}
}
EXPECTED_RESULTS_121 = {
'3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1260813007',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/123'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463100',
'action': 'Reverted',
'fixed_cl_commit_position': 341978,
'fixed_revision': '3e4aaaa45c528d4ab0670331a6c0ebfc4f3ab8e6',
'fixing_build_number': 123,
'fixing_cl_commit_position': 332070,
'fixing_revision': '123455668d4ab0670331a6c0ebfc4f3ab8e6'
},
'584de1b73f811bcdb98eae1fb0d92b2b7df04fc3': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1263223005',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/122'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/120'),
'fixed_build_number': 120,
'fixing_cl_review_url': 'https://codereview.chromium.org/1280463003',
'action': 'Reverted',
'fixed_cl_commit_position': 341976,
'fixed_revision': '584de1b73f811bcdb98eae1fb0d92b2b7df04fc3',
'fixing_build_number': 122,
'fixing_cl_commit_position': 342014,
'fixing_revision': '123456671bcdb98eae1fb0d92b2b7df04fc3'
},
'123456789c08d27902060c241149ce193e4dd5dd': {
'fixed_cl_review_url': 'https://codereview.chromium.org/1280463006',
'fixing_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/122'),
'fixed_build_url': (
'https://build.chromium.org/p/m/builders/b/builds/121'),
'fixed_build_number': 121,
'fixing_cl_review_url': 'https://codereview.chromium.org/1161773008',
'action': 'Reverted',
'fixed_cl_commit_position': 342015,
'fixed_revision': '123456789c08d27902060c241149ce193e4dd5dd',
'fixing_build_number': 122,
'fixing_cl_commit_position': 332062,
'fixing_revision': '062a6f974d7c01234569ce193e4dd5'
}
}
class HelpTriageTest(testing.AppengineTestCase):
app_module = webapp2.WSGIApplication([
('/help-triage', help_triage.HelpTriage),
], debug=True)
def _GetBuildInfo(self, master_name, builder_name, build_number):
file_name = os.path.join(
os.path.dirname(__file__), 'data', 'help_triage_test_data',
'build_data_%s_%s_%s.json' % (
master_name, builder_name, build_number))
if not os.path.isfile(file_name):
return None
with open(file_name, 'r') as f:
return f.read()
def _MockDownloadBuildData(
self, master_name, builder_name, build_number):
build = WfBuild.Get(master_name, builder_name, build_number)
if not build: # pragma: no cover
build = WfBuild.Create(master_name, builder_name, build_number)
build.data = self._GetBuildInfo(master_name, builder_name, build_number)
build.put()
return build
def _MockDownloadChangeLogData(self, revision):
file_name = os.path.join(
os.path.dirname(__file__), 'data', 'help_triage_test_data',
'change_log_' + revision)
with open(file_name) as f:
commit_log = f.read()
return revision, json.loads(commit_log[len(')]}\'\n'):])
def setUp(self):
super(HelpTriageTest, self).setUp()
self.master_name = 'm'
self.builder_name = 'b'
self.mock_current_user(user_email='[email protected]', is_admin=True)
self.mock(build_util, 'DownloadBuildData',
self._MockDownloadBuildData)
self.mock(GitRepository, '_DownloadChangeLogData',
self._MockDownloadChangeLogData)
def _CreateAnalysis(self, build_number, first_failure, last_pass=None):
analysis = WfAnalysis.Create(
self.master_name, self.builder_name, build_number)
analysis.result = {
'failures': [
{
'last_pass': last_pass,
'first_failure': first_failure,
'suspected_cls': [],
'step_name': 'gn_check'
}
]
}
analysis.put()
def testGetFirstFailedBuild(self):
self._CreateAnalysis(120, 118, 117)
first_build, failed_steps = help_triage._GetFirstFailedBuild(
self.master_name, self.builder_name, 120)
self.assertEqual(118, first_build)
self.assertEqual(['gn_check'], failed_steps)
def testGetFirstFailedBuildNoLastPass(self):
self._CreateAnalysis(120, 118)
first_build, failed_steps = help_triage._GetFirstFailedBuild(
self.master_name, self.builder_name, 120)
self.assertEqual(118, first_build)
self.assertEqual(['gn_check'], failed_steps)
def testGetFirstFailedBuildNoAnalysis(self):
first_build, failed_steps = help_triage._GetFirstFailedBuild(
self.master_name, self.builder_name, 120)
self.assertIsNone(first_build)
self.assertIsNone(failed_steps)
def testCheckReverts(self):
self._CreateAnalysis(120, 120)
results = help_triage._CheckReverts(
self.master_name, self.builder_name, 120)
self.assertEqual(EXPECTED_RESULTS_120, results)
def testCheckRevertsReturnNoneWhenNoGreenBuild(self):
self._CreateAnalysis(124, 124)
expected_results = {}
results = help_triage._CheckReverts(
self.master_name, self.builder_name, 124)
self.assertEqual(expected_results, results)
def testCheckRevertsReturnNoneWhenNoReverts(self):
self._CreateAnalysis(118, 118)
expected_results = {}
results = help_triage._CheckReverts(
self.master_name, self.builder_name, 118)
self.assertEqual(expected_results, results)
def testHelpTriageHandler(self):
build_url = buildbot.CreateBuildUrl(
self.master_name, self.builder_name, 121)
analysis = WfAnalysis.Create(self.master_name, self.builder_name, 121)
analysis.result = {
'failures': [
{
'last_pass': None,
'first_failure': 120,
'suspected_cls': [],
'step_name': 'gn_check'
}
]
}
analysis.put()
response = self.test_app.get('/help-triage', params={'url': build_url})
self.assertEqual(200, response.status_int)
self.assertEqual(EXPECTED_RESULTS_121, response.json_body)
def testHelpTriageHandlerReturnNoneForGreenBuild(self):
build_url = buildbot.CreateBuildUrl(
self.master_name, self.builder_name, 123)
build = WfBuild.Create(self.master_name, self.builder_name, 123)
build.data = self._GetBuildInfo(self.master_name, self.builder_name, 123)
build.put()
response = self.test_app.get('/help-triage', params={'url': build_url})
expected_results = {}
self.assertEqual(200, response.status_int)
self.assertEqual(expected_results, response.json_body)
| [
"[email protected]"
] | |
ee615403c191a3bc00a63b9ac501349fe54af94f | dec29f40788478f73798f23b79ca892b3121150a | /apps/core/forms.py | 2dea5904ea3b9ecfd5b3ba08032c83908459e911 | [] | no_license | RonaldTheodoro/django-ecommerce | 2c661e6f3ae0154ecb7a8e25183875da8c27d14f | 9097049107e5a7ab52474aa89fe40f02689fb24a | refs/heads/master | 2021-05-06T02:08:51.166682 | 2017-12-17T00:32:03 | 2017-12-17T00:32:03 | 114,499,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | from django import forms
class ContactForm(forms.Form):
fullname = forms.CharField(
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Your full name'}
)
)
email = forms.EmailField(
widget=forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': 'Your email'}
)
)
content = forms.CharField(
widget=forms.Textarea(
attrs={'class': 'form-control', 'placeholder': 'Your message'}
)
)
| [
"[email protected]"
] | |
d3977fe8da468335955c73585ad4373f968ec62b | 52e8841ac9603e994fc487ecb52f232e55a50e07 | /Bio/HMM/Utilities.py | 17db3f4ed0ad626cb1de97b7c921f6692d2f4f6b | [] | no_license | rored/RozszerzenieBio.PDB | aff434fddfe57199a7465f79126eba62b1c789ae | 7c9d696faacabff912b1263fe19291d6a198c3c2 | refs/heads/master | 2021-01-21T04:50:37.903227 | 2016-06-23T19:15:42 | 2016-06-23T19:15:42 | 55,064,794 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,209 | py | # This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
#
"""Generic functions which are useful for working with HMMs.
This just collects general functions which you might like to use in
dealing with HMMs.
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
def pretty_print_prediction(emissions, real_state, predicted_state,
emission_title="Emissions",
real_title="Real State",
predicted_title="Predicted State",
line_width=75):
"""Print out a state sequence prediction in a nice manner.
Arguments:
o emissions -- The sequence of emissions of the sequence you are
dealing with.
o real_state -- The actual state path that generated the emissions.
o predicted_state -- A state path predicted by some kind of HMM model.
"""
# calculate the length of the titles and sequences
title_length = max(len(emission_title), len(real_title),
len(predicted_title)) + 1
seq_length = line_width - title_length
# set up the titles so they'll print right
emission_title = emission_title.ljust(title_length)
real_title = real_title.ljust(title_length)
predicted_title = predicted_title.ljust(title_length)
cur_position = 0
# while we still have more than seq_length characters to print
while True:
if (cur_position + seq_length) < len(emissions):
extension = seq_length
else:
extension = len(emissions) - cur_position
print("%s%s" % (emission_title,
emissions[cur_position:cur_position + seq_length]))
print("%s%s" % (real_title,
real_state[cur_position:cur_position + seq_length]))
print("%s%s\n" % (predicted_title,
predicted_state[cur_position:
cur_position + seq_length]))
if (len(emissions) < (cur_position + seq_length)):
break
cur_position += seq_length
| [
"[email protected]"
] | |
7633f6f9cb7e06d32f07fd0f6c3f93f846667e26 | c128d73f1d988686e3e7377520d7475ae59d8016 | /test/cmd2.py | 674d0464539f09de007c6d8e163e51833f2b15f4 | [] | no_license | astroumd/SSINGMA | a9aba4aea0d0bf799643ebd7064b222b5c801894 | 044923b6e036d3679e88839593244b834e8e2d09 | refs/heads/master | 2021-07-01T12:48:08.520640 | 2019-03-22T01:50:29 | 2019-03-22T01:50:29 | 107,312,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | #
# example command line parser usage - functional approach with keyword database
#
# casa -c cmd2.py a=100 'c=[100,200]'
script_keywords = {
'a' : 1,
'b' : 2.0,
'c' : [1,2,3],
}
import sys
ng_initkeys(script_keywords,sys.argv)
a = ng_getkey('a')
b = ng_getkey('b')
c = ng_getkey('c')
print 'a=',a
print 'b=',b
print 'c=',c
| [
"[email protected]"
] | |
023cbe9820c1c4c54c9a10bd34d54c0cd287a76a | 30e58b930c31526a1e226a928bc77e23f232080e | /icesim/dataCheck.py | 7152527a7b7c93513f0deaa3399d7c90caf5dd22 | [] | no_license | bbw7561135/anisotropy | c8688f9d705234c6a90f607acb3e8cc28ea5be28 | a21f85788c16d8aa14fc5934f476def4c8954f34 | refs/heads/master | 2021-06-01T05:35:48.727845 | 2016-05-13T00:27:37 | 2016-05-13T00:27:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | #!/usr/bin/env python
from icecube import icetray, dataio
from I3Tray import *
import sys, time, glob, os
import numpy as np
def fileCheck(fileName):
t0 = time.time()
tray = I3Tray()
tray.AddModule('I3Reader', FileName=fileName)
tray.Execute()
tray.Finish()
print "Time taken: ", time.time() - t0
def checker(config, out, fileList):
badList = []
for file in fileList:
try: fileCheck(file)
except RuntimeError:
print 'Bad run found:', os.path.basename(file)
badList += [file+'\n']
with open(out, 'w') as f:
f.writelines(badList)
if __name__ == "__main__":
config = sys.argv[1]
out = sys.argv[2]
fileList = sys.argv[3:]
checker(config, out, fileList)
| [
"[email protected]"
] | |
34f96a0617099afd0d7f6f2da75c52d59c91696c | 3474b315da3cc5cb3f7823f19a18b63a8da6a526 | /scratch/KRAMS/src/ibvpy/core/i_bcond.py | 724c3af30b2cc5336fb60d2c0bd2d4c98c37a380 | [] | no_license | h4ck3rm1k3/scratch | 8df97462f696bc2be00f1e58232e1cd915f0fafd | 0a114a41b0d1e9b2d68dbe7af7cf34db11512539 | refs/heads/master | 2021-01-21T15:31:38.718039 | 2013-09-19T10:48:24 | 2013-09-19T10:48:24 | 29,173,525 | 0 | 0 | null | 2015-01-13T04:58:57 | 2015-01-13T04:58:56 | null | UTF-8 | Python | false | false | 1,522 | py |
from enthought.traits.api import Array, Bool, Enum, Float, HasTraits, \
Instance, Int, Trait, Str, Enum, \
Callable, List, TraitDict, Any, Range, \
Delegate, Event, on_trait_change, Button, \
Interface, implements, Property, cached_property
class IBCond( Interface ):
'''
Interface of the boundary condition.
'''
def is_essential( self ):
'''
Distinguish the essential and natural boundary conditions.
This is needed to reorganize the system matrices and vectors
to reflect the prescribed primary variables (displacements).
'''
def is_natural( self ):
'''
Distinguish the essential and natural boundary conditions.
This is needed to reorganize the system matrices and vectors
to reflect the prescribed primary variables (displacements).
'''
# def get_dofs( self ):
# '''
# Return the list of affected DOFs.
#
# This is needed to reorganize the system matrices and vectors
# to reflect the prescribed primary variables (displacements).
# '''
def setup( self, sctx ):
'''
Locate the spatial context.
'''
def apply_essential( self, K ):
'''
Locate the spatial context.
'''
def apply( self, step_flag, sctx, K, R, t_n, t_n1 ):
'''
Locate the spatial context.
'''
| [
"Axel@Axel-Pc"
] | Axel@Axel-Pc |
dfa897d3f257e32121c709f43bebd0d52f22631a | f032cbec4f03d8c163609d4c5144b38952b2cbe9 | /other/6_text_similarity/generic_process_corpus_gui.py | db1ee724f3ecd42c5669ccbac714535b829a2eae | [] | no_license | humlab/the_culture_of_international_relations | f12a604421debcc90996d98b8cccff6633615ffc | 4440753d396c88dc5902e85e4c7e38ef8aadcefd | refs/heads/master | 2022-11-13T14:05:01.373504 | 2022-11-01T16:30:16 | 2022-11-01T16:30:16 | 90,034,896 | 3 | 3 | null | 2021-09-06T08:08:06 | 2017-05-02T13:11:09 | Jupyter Notebook | UTF-8 | Python | false | false | 8,967 | py | import types
import ipywidgets as widgets
import textacy_corpus_utility as textacy_utility
import common.utility as utility
logger = utility.getLogger('corpus_text_analysis')
def process_corpus(corpus, terms_opts, process_function, process_opts):
def get_merged_words(scores, low, high):
ws = set([])
for x, wl in scores.items():
if low <= x <= high:
ws.update(wl)
return ws
normalize = terms_opts['normalize'] or 'orth'
extra_stop_words = set(terms_opts.get('extra_stop_words', []))
if terms_opts.get('min_freq', 1) > 1:
assert 'word_count_scores' in terms_opts
extra_stop_words.update(get_merged_words(terms_opts['word_count_scores'], 1, terms_opts.get('min_freq')))
if terms_opts.get('max_doc_freq') < 100:
assert 'word_document_count_scores' in terms_opts
extra_stop_words.update(get_merged_words(terms_opts['word_document_count_scores'], terms_opts.get('max_doc_freq'), 100))
#if terms_opts.get('mask_gpe', False):
# extra_stop_words.update(['_gpe_'])
extract_args = dict(
args=dict(
ngrams=terms_opts['ngrams'],
named_entities=terms_opts['named_entities'],
normalize=terms_opts['normalize'],
as_strings=True
),
kwargs=dict(
min_freq=terms_opts['min_freq'],
include_pos=terms_opts['include_pos'],
filter_stops=terms_opts['filter_stops'],
filter_punct=terms_opts['filter_punct']
),
extra_stop_words=extra_stop_words,
substitutions=(terms_opts.get('gpe_substitutions', []) if terms_opts.get('mask_gpe', False) else None),
)
process_function(corpus, process_opts, extract_args)
def process_corpus_gui(container, wti_index, process_function, **opts):
lw = lambda width: widgets.Layout(width=width)
def frequent_words(corpus, normalize, include_pos, n_top=100):
if include_pos is None or include_pos == ('', ):
include_pos = []
return [
x[0] for x in textacy_utility.get_most_frequent_words(
corpus, n_top, normalize=normalize, include_pos=include_pos
)
] + [ '_gpe_' ]
#logger.info('Preparing corpus statistics...')
corpus = container.textacy_corpus
gpe_substitutions = { }
if opts.get('gpe_filename', None) is not None:
logger.info('...loading term substitution mappings...')
gpe_substitutions = { x: '_gpe_' for x in textacy_utility.load_term_substitutions(filepath=opts['gpe_filename'], vocab=None) }
pos_tags = opts.get('tagset').groupby(['POS'])['DESCRIPTION'].apply(list).apply(lambda x: ', '.join(x[:1])).to_dict()
if False: # display_pos_legend:
pos_options = sorted([(k + ' (' + v + ')', k) for k,v in pos_tags.items() ])
else:
pos_options = sorted([(k, k) for k,v in pos_tags.items() ])
ngrams_options = { '1': [1], '1,2': [1,2], '1,2,3': [1,2,3]}
default_normalize = 'lemma'
gui = types.SimpleNamespace(
progress=widgets.IntProgress(value=0, min=0, max=5, step=1, description='', layout=lw('90%')),
min_freq=widgets.IntSlider(description='Min word freq', min=0, max=10, value=2, step=1, layout=lw('240px')),
max_doc_freq=widgets.IntSlider(description='Min doc. %', min=75, max=100, value=100, step=1, layout=lw('240px')),
mask_gpe=widgets.ToggleButton(value=False, description='Mask GPE', tooltip='Replace geographical entites with `_gpe_`', icon='check', layout=lw('115px')),
ngrams=widgets.Dropdown(description='n-grams', options=ngrams_options, value=[1], layout=lw('180px')),
min_word=widgets.Dropdown(description='Min length', options=[1,2,3,4], value=1, layout=lw('180px')),
normalize=widgets.Dropdown(description='Normalize', options=[ None, 'lemma', 'lower' ], value=default_normalize, layout=lw('180px')),
filter_stops=widgets.ToggleButton(value=False, description='Filter stops', tooltip='Filter out stopwords', icon='check', layout=lw('115px')),
filter_punct=widgets.ToggleButton(value=False, description='Filter punct', tooltip='Filter out punctuations', icon='check', layout=lw('115px')),
named_entities=widgets.ToggleButton(value=False, description='Merge entities', tooltip='Merge entities', icon='check', disabled=True, layout=lw('115px')),
apply_idf=widgets.ToggleButton(value=False, description='Apply IDF', tooltip='Apply IDF (skikit-learn) or TF-IDF (gensim)', icon='check'),
include_pos=widgets.SelectMultiple(description='POS', options=pos_options, value=list(), rows=10, layout=lw('180px')),
stop_words=widgets.SelectMultiple(description='STOP', options=[], value=list(), rows=10, layout=widgets.Layout(width='220px')),
output=widgets.Output(),
compute=widgets.Button(description='Compute', button_style='Success', layout=lw('115px'))
)
#logger.info('...word counts...')
#word_count_scores = opts.get('word_count_scores', None) or dict(
# lemma=textacy_utility.generate_word_count_score(corpus, 'lemma', gui.min_freq.max),
# lower=textacy_utility.generate_word_count_score(corpus, 'lower', gui.min_freq.max),
# orth=textacy_utility.generate_word_count_score(corpus, 'orth', gui.min_freq.max)
#)
#logger.info('...word document count...')
#word_document_count_scores = opts.get('word_document_count_scores', None) or dict(
# lemma=textacy_utility.generate_word_document_count_score(corpus, 'lemma', gui.max_doc_freq.min),
# lower=textacy_utility.generate_word_document_count_score(corpus, 'lower', gui.max_doc_freq.min),
# orth=textacy_utility.generate_word_document_count_score(corpus, 'orth', gui.max_doc_freq.min)
#)
#logger.info('...done!')
def pos_change_handler(*args):
with gui.output:
gui.compute.disabled = True
selected = set(gui.stop_words.value)
gui.stop_words.options = frequent_words(corpus, gui.normalize.value, gui.include_pos.value)
selected = selected & set(gui.stop_words.options)
gui.stop_words.value = list(selected)
gui.compute.disabled = False
pos_change_handler()
gui.include_pos.observe(pos_change_handler, 'value')
def tick(x=None, max_value=None):
if max_value is not None:
gui.progress.max = max_value
gui.progress.value = gui.progress.value + 1 if x is None else x
def buzy(is_buzy):
gui.compute.disabled = is_buzy
#gui.spinner.layout.visibility = 'visible' if is_buzy else 'hidden'
def process_corpus_handler(*args):
gui.output.clear_output()
buzy(True)
with gui.output:
try:
terms_opts = dict(
min_freq=gui.min_freq.value,
max_doc_freq=gui.max_doc_freq.value,
mask_gpe=gui.mask_gpe.value,
ngrams=gui.ngrams.value,
min_word=gui.min_word.value,
normalize=gui.normalize.value,
filter_stops=gui.filter_stops.value,
filter_punct=gui.filter_punct.value,
named_entities=gui.named_entities.value,
include_pos=gui.include_pos.value,
extra_stop_words=gui.stop_words.value,
gpe_substitutions=gpe_substitutions,
word_count_scores=container.get_word_count(gui.normalize.value),
word_document_count_scores=container.get_word_document_count(gui.normalize.value)
)
process_opts = dict(
container=container,
gui=gui,
tick=tick
)
process_opts.update(opts)
process_corpus(corpus, terms_opts, process_function, process_opts)
# display(result)
except Exception as ex:
logger.error(ex)
raise
finally:
buzy(False)
gui.compute.on_click(process_corpus_handler)
gui.boxes = widgets.VBox([
gui.progress,
widgets.HBox([
widgets.VBox([
widgets.HBox([gui.normalize]),
widgets.HBox([gui.ngrams]),
widgets.HBox([gui.min_word]),
gui.min_freq,
gui.max_doc_freq
]),
widgets.VBox([
gui.include_pos
]),
widgets.VBox([
gui.stop_words
]),
widgets.VBox([
gui.filter_stops,
gui.mask_gpe,
gui.filter_punct,
gui.named_entities,
gui.compute
])
]),
widgets.HBox([
gui.output
])
])
display(gui.boxes)
return gui
| [
"[email protected]"
] | |
60498b2a36b3b029e68117c353e9d62505294008 | 9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d | /python/uline/uline/uline/handlers/app/official/operations/form.py | 54954fb7a2dabc703be3fa1e80e3641216ebec7a | [] | no_license | apollowesley/Demo | f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8 | 471c4af95d3a7222d6933afc571a8e52e8fe4aee | refs/heads/master | 2021-02-15T04:01:51.590697 | 2018-01-29T01:44:29 | 2018-01-29T01:44:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from wtforms import validators, fields
from uline.utils.form import BaseForm
class MessageSendSearch(BaseForm):
create_at = fields.DateTimeField(validators=[validators.Optional()])
message_content = fields.StringField(validators=[validators.Optional()])
sended_count = fields.IntegerField(
validators=[validators.Optional()]) # 已发送的条数
need_send_count = fields.IntegerField(
validators=[validators.Optional()]) # 为发送的条数
| [
"[email protected]"
] | |
87fcbdd19d932c6379de0cd46fd9d4a1f81b1b85 | 47ce68e1ff970318fd31ac43405d0e1fa3594bf6 | /Models/Autoencoders/TransposeConvAutoencoderDeepExtraLLR.py | 281d1a5861b11b1efe478d6108fd1c7b3310b5eb | [
"BSD-3-Clause"
] | permissive | Midoriii/Anomaly_Detection_Diploma | 7196da379f8aefbd4546ca23e8303d1829e059fb | 11145e3e5210a4e45a33d98b138213edb7bc5d3d | refs/heads/master | 2023-03-25T20:42:56.961210 | 2021-03-14T01:13:39 | 2021-03-14T01:13:39 | 261,205,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | '''
Copyright (c) 2021, Štěpán Beneš
Convolutional Autoencoder with learnable Conv2DTranspose layers,
especially deep, the encoding is really small
With even further decreased learning rate
'''
import numpy as np
from Models.Autoencoders.BaseModel import BaseModel
from keras.layers import Input, Reshape, Dense, Flatten
from keras.layers import Activation, Conv2D, MaxPooling2D, Conv2DTranspose, PReLU
from keras.initializers import Constant
from keras.models import Model
from keras.callbacks import History
from keras.optimizers import Adam
class TransposeConvAutoencoderDeepExtraLLR(BaseModel):
def __init__(self):
super().__init__()
self.name = "TransposeConvAutoencoderDeepExtraLLR"
return
# Custom optimizer learning rate to see if it improves anything
def compile_net(self):
opt = Adam(learning_rate=0.00001)
self.model.compile(optimizer=opt, loss='mse')
self.model.summary()
return
def create_net(self, input_shape):
net_input = Input(shape=input_shape)
x = Conv2D(self.filters, (3, 3), padding='same')(net_input)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(self.filters, (3, 3), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
self.encoded = MaxPooling2D((2, 2), padding='same')(x)
# Keep the encoder part
self.encoder = Model(net_input, self.encoded)
# And now the decoder part
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(self.encoded)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
x = Conv2DTranspose(self.filters, (3,3), strides=(2,2), padding='same')(x)
x = PReLU(alpha_initializer=Constant(value=0.25))(x)
self.decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
self.model = Model(net_input, self.decoded)
return
| [
"[email protected]"
] | |
d522119641e45ea7f443f1458d207a02f08aa4b8 | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ControlRegions/WgS/Full2016/plot.py | ae45e9fd7f7af563855309ae7753825d3ffa7ccb | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 10,880 | py | # plot configuration
# groupPlot = {}
#
# Groups of samples to improve the plots.
# If not defined, normal plots is used
#
groupPlot['DY'] = {
'nameHR' : "DY",
'isSignal' : 0,
'color': 418, # kGreen+2
'samples' : ['DY']
}
groupPlot['Fake'] = {
'nameHR' : 'Non-prompt',
'isSignal' : 0,
'color': 921, # kGray + 1
'samples' : ['Fake']
}
groupPlot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'isSignal' : 0,
'color': 400, # kYellow
'samples' : ['top']
}
groupPlot['WW'] = {
'nameHR' : 'WW',
'isSignal' : 0,
'color': 851, # kAzure -9
'samples' : ['WW', 'ggWW']
}
groupPlot['VVV'] = {
'nameHR' : 'VVV',
'isSignal' : 0,
'color': 857, # kAzure -3
'samples' : ['VVV']
}
#groupPlot['VZ'] = {
# 'nameHR' : "VZ/#gamma*/#gamma",
# 'isSignal' : 0,
# 'color' : 617, # kViolet + 1
# 'samples' : ['VZ', 'Vg', 'Wg', 'VgS', 'WZ', 'ZZ']
# }
groupPlot['WZgS_L'] = {
'nameHR' : "WZmll01_L",
'isSignal' : 0,
'color' : 600, # kViolet + 1
'samples' : ['WZgS_L']
}
groupPlot['WZgS_H'] = {
'nameHR' : "WZmll01_H",
'isSignal' : 0,
'color' : 887, # kViolet + 1
'samples' : ['WZgS_H']
}
#groupPlot['WZ'] = {
# 'nameHR' : "WZmll40",
# 'isSignal' : 0,
# 'color' : 619, # kViolet + 1
# 'samples' : ['WZ']
# }
#
groupPlot['VZ'] = {
'nameHR' : "VZ",
'isSignal' : 0,
'color' : 617, # kViolet + 1
'samples' : ['VZ', 'ZZ']
}
groupPlot['Vg'] = {
'nameHR' : "V#gamma",
'isSignal' : 0,
'color' : 810, # kOrange + 10
'samples' : ['Vg', 'Wg']
}
#groupPlot['VgS'] = {
# 'nameHR' : "V#gamma*",
# 'isSignal' : 0,
# 'color' : 409, # kGreen - 9
# 'samples' : ['VgS']
# }
#
groupPlot['Higgs'] = {
'nameHR' : 'Higgs',
'isSignal' : 1,
'color': 632, # kRed
'samples' : ['H_htt', 'H_hww', 'ZH_hww', 'ggZH_hww', 'WH_hww', 'qqH_hww', 'ggH_hww', 'bbH_hww']
}
#plot = {}
# keys here must match keys in samples.py
#
plot['DY'] = {
'color': 418, # kGreen+2
'isSignal' : 0,
'isData' : 0,
'scale' : 1,
#'cuts' : {
#'hww2l2v_13TeV_of0j' : 0.95 ,
#'hww2l2v_13TeV_top_of0j' : 0.95 ,
#'hww2l2v_13TeV_dytt_of0j' : 0.95 ,
#'hww2l2v_13TeV_em_0j' : 0.95 ,
#'hww2l2v_13TeV_me_0j' : 0.95 ,
##
#'hww2l2v_13TeV_of1j' : 1.08 ,
#'hww2l2v_13TeV_top_of1j' : 1.08 ,
#'hww2l2v_13TeV_dytt_of1j' : 1.08 ,
#'hww2l2v_13TeV_em_1j' : 1.08 ,
#'hww2l2v_13TeV_me_1j' : 1.08 ,
#},
}
plot['Wjets'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Fake'] = {
'color': 921, # kGray + 1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['FakeQCD'] = {
'color': 922, # kGray + 2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ttbar'] = {
'nameHR' : 't#bart',
'color': 400, # kYellow
'isSignal' : 0,
'isData' : 0 ,
'scale' : 1.0
}
plot['singletop'] = {
'nameHR' : 't and tW',
'color': 401, # kYellow +1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['top'] = {
'nameHR' : 'tW and t#bar{t}',
'color': 400, # kYellow
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0,
#'cuts' : {
#'hww2l2v_13TeV_of0j' : 0.94 ,
#'hww2l2v_13TeV_top_of0j' : 0.94 ,
#'hww2l2v_13TeV_dytt_of0j' : 0.94 ,
#'hww2l2v_13TeV_em_0j' : 0.94 ,
#'hww2l2v_13TeV_me_0j' : 0.94 ,
##
#'hww2l2v_13TeV_of1j' : 0.86 ,
#'hww2l2v_13TeV_top_of1j' : 0.86 ,
#'hww2l2v_13TeV_dytt_of1j' : 0.86 ,
#'hww2l2v_13TeV_em_1j' : 0.86 ,
#'hww2l2v_13TeV_me_1j' : 0.86 ,
#},
}
plot['WW'] = {
'color': 851, # kAzure -9
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0 # ele/mu trigger efficiency datadriven
}
plot['ggWW'] = {
'color': 850, # kAzure -10
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ggWW_Int'] = {
'color': 616, # kMagenta
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Wg'] = {
'color': 859, # kAzure -1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['Vg'] = {
'color': 859, # kAzure -1
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
#plot['VgS'] = {
# 'color' : 617, # kViolet + 1
# 'isSignal' : 0,
# 'isData' : 0,
# 'scale' : 1.0
# }
#
plot['VZ'] = {
'color': 858, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['WZgS_L'] = {
'color': 890, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.1
}
plot['WZgS_H'] = {
'color': 887, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.24
}
plot['WZ'] = {
'color': 887, # kAzure -2
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ZZ'] = {
'color': 856, # kAzure -4
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['VVV'] = {
'color': 857, # kAzure -3
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
plot['ZZ'] = {
'color': 854, # kAzure -6
'isSignal' : 0,
'isData' : 0,
'scale' : 1.0
}
# Htautau
plot['H_htt'] = {
'nameHR' : 'Htt',
'color': 632+4, # kRed+4
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
# HWW
plot['H_hww'] = {
'nameHR' : 'Hww',
'color': 632, # kRed
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
plot['ZH_hww'] = {
'nameHR' : 'ZH',
'color': 632+3, # kRed+3
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
plot['ggZH_hww'] = {
'nameHR' : 'ggZH',
'color': 632+4, # kRed+4
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
plot['WH_hww'] = {
'nameHR' : 'WH',
'color': 632+2, # kRed+2
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
plot['qqH_hww'] = {
'nameHR' : 'qqH',
'color': 632+1, # kRed+1
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
plot['ggH_hww'] = {
'nameHR' : 'ggH',
'color': 632, # kRed
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
plot['bbH_hww'] = {
'nameHR' : 'bbH',
'color': 632+5, # kRed+5
'isSignal' : 1,
'isData' : 0,
'scale' : 1 #
}
# data
plot['DATA'] = {
'nameHR' : 'Data',
'color': 1 ,
'isSignal' : 0,
'isData' : 1 ,
'isBlind' : 0
}
# additional options
# legend['lumi'] = 'L = 2.3/fb' # 2.264 fb-1
#legend['lumi'] = 'L = 2.3/fb' # 2.318 fb-1
#legend['lumi'] = 'L = 0.8/fb' # 2.318 fb-1
#legend['lumi'] = 'L = 2.6/fb'
#legend['lumi'] = 'L = 4.3/fb'
#legend['lumi'] = 'L = 6.3/fb'
#legend['lumi'] = 'L = 12.9/fb'
legend['lumi'] = 'L = 35.9/fb'
legend['sqrt'] = '#sqrt{s} = 13 TeV'
| [
"[email protected]"
] | |
3878130f0f9cf98c52c5c9089cbe81841cf7040e | 20149c63930ebb2a950e45a7fafa12a17712695c | /ImageProcessing/camera.py | 914751cb174bbbf7236d7c86bdf4630599682c6b | [] | no_license | junyi1997/Final_OIT_projet | ffe7f1a1c61f8124ab72d5250e30c1e2cc49c1ab | 98fc039378021c6db259dbe79d7980750aa91710 | refs/heads/master | 2023-01-03T05:24:31.133416 | 2020-11-01T14:12:15 | 2020-11-01T14:12:15 | 201,788,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | import picamera
initialized = False
class Camera:
camera = None
def __init__(self):
self.camera = picamera.PiCamera()
def takePhoto(self, filepath):
""" Takes a photo and saves it to the /img directory. """
self.camera.capture(filepath)
def getPiCamera(self):
return self.camera
| [
"[email protected]"
] | |
9c68e91c36a93581f62c5d46e522b9bb8b7c38bb | 35f1d3ab93b5de8ed83f7dce534505a2b8b2217b | /backend/new_app_18938/settings.py | 4070cff46d149ab3cd53e54c5017494947f10cbd | [] | no_license | crowdbotics-apps/new-app-18938 | 5b01023da26e48735720ce0ded6bb74bed26ec28 | ba2ad11b6820b7561b818d3b7e13875331e03fe9 | refs/heads/master | 2022-11-16T00:31:11.211998 | 2020-07-16T16:36:10 | 2020-07-16T16:36:10 | 280,161,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | """
Django settings for new_app_18938 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'new_app_18938.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'new_app_18938.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
44da06eb51574b41af6813f5e6252a5a2750e648 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03767/s866081759.py | cf66b2ce220f7faa16d298b3290bd992eb740c88 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | import collections
n = int(input())
*a, = map(int, input().split())
a.sort()
a = collections.deque(a)
ans = 0
while n:
a.popleft()
a.pop()
ans += a.pop()
n -= 1
print(ans) | [
"[email protected]"
] | |
fd2299064cdb5b26bd595348ca1902f794c5e968 | 66c68ab7a6b62c4a94ddf4cdda64dc7f2181f6f7 | /samples/crater/crater.py | 4c20dc702a93465e1b15bd734aff41608960e0f7 | [
"MIT"
] | permissive | ZhiangChen/Mask_RCNN | bfaeebbc32bdd2eef40f77ffe385437c6d9e60e0 | 040611909c140a98ab09ae09b511ce2a47b5f195 | refs/heads/master | 2020-03-27T10:04:52.300122 | 2019-02-25T00:42:17 | 2019-02-25T00:42:17 | 146,393,525 | 1 | 0 | null | 2018-08-28T04:51:42 | 2018-08-28T04:51:42 | null | UTF-8 | Python | false | false | 4,235 | py | """
classes for lunar crater dataset
Zhiang Chen
Sep 13, 2018
[email protected]
"""
import os
import sys
import numpy as np
import skimage.draw
import pickle
import argparse
import matplotlib.pyplot as plt
from mrcnn import visualize
from mrcnn.config import Config
from mrcnn import model as modellib, utils
ROOT_DIR = os.path.abspath("../../")
sys.path.append(ROOT_DIR) # To find local version of the library
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Dataset config
############################################################
class CraterConfig(Config):
NAME = "crater"
GPU_COUNT = 1 # cannot create model when setting gpu count as 2
IMAGES_PER_GPU = 1
NUM_CLASSES = 1 + 1 # Background + crater
IMAGE_MIN_DIM = 256
IMAGE_MAX_DIM = 256
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
# IMAGE_CHANNEL = 1 # wrong, the input will be automatically converted to 3 channels (if greyscale, rgb will be repeated)
STEPS_PER_EPOCH = 100
DETECTION_MIN_CONFIDENCE = 0.9
MAX_GT_INSTANCES = 500
DETECTION_MAX_INSTANCES = 600
TRAIN_ROIS_PER_IMAGE = 1000
############################################################
# Dataset
############################################################
class CraterDataset(utils.Dataset):
def load_crater(self, datadir, subset, subsubset):
self.add_class("lunar_crater", 1, "lunar_crater")
assert subset in ["train", "val"]
subset_dir = os.path.join(datadir, subset)
dataset_dir = os.path.join(subset_dir, subsubset)
annotation_path = os.path.join(dataset_dir, 'annotations.pickle')
assert os.path.isfile(annotation_path)
with open(annotation_path, "rb") as f:
annotations = pickle.load(f, encoding='latin1')
del(f)
print('loading ' + subsubset)
for i in range(50):
image_path = os.path.join(dataset_dir, "img_{i:0{zp}d}.jpg".format(i=i, zp=2))
#print(image_path)
assert os.path.isfile(image_path)
image_id = int(subsubset)*50 + i
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
index = "{k:0{zp}d}".format(k=i, zp=2)
mask = annotations[index]['data']
mask = np.swapaxes(mask, 0, 1)
mask = np.swapaxes(mask, 1, 2)
self.add_image(
"lunar_crater",
image_id=image_id,
path=image_path,
width=width,
height=height,
annotation_path=annotation_path,
annotation = mask)
def load_mask(self, image_id):
info = self.image_info[image_id]
if info["source"] != "lunar_crater":
return super(self.__class__, self).load_mask(image_id)
mask = info["annotation"]
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def image_reference(self, image_id):
info = self.image_info[image_id]
if info["source"] == "lunar_crater":
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
def display_mask(self, image_id):
masks, ids = self.load_mask(image_id)
mask = mask.max(2)
plt.imshow(mask)
plt.show()
############################################################
# Training
############################################################
if __name__ == '__main__':
config = CraterConfig()
config.display()
dataset = CraterDataset()
dataset.load_crater('../../dataset/lunar_craters', 'train', '0')
dataset.load_crater('../../dataset/lunar_craters', 'train', '1')
#dataset.load_crater('../../dataset/lunar_craters', 'train', '2')
#dataset.load_crater('../../dataset/lunar_craters', 'train', '3')
a,b = dataset.load_mask(65)
| [
"[email protected]"
] | |
31e431da6f3792207879669b3ab90c80a53e57f4 | 1bb42bac177fb4e979faa441363c27cb636a43aa | /generalization/synthesization/cifar_synthesis.py | 988e89282aea8d759288af356198101494572410 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | google-research/federated | a6040e80fa0fbf533e0d665c66a9bc549d208b3d | 329e60fa56b87f691303638ceb9dfa1fc5083953 | refs/heads/master | 2023-08-28T13:10:10.885505 | 2023-08-22T23:06:08 | 2023-08-22T23:06:40 | 295,559,343 | 595 | 187 | Apache-2.0 | 2022-05-12T08:42:53 | 2020-09-14T23:09:07 | Python | UTF-8 | Python | false | false | 11,222 | py | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Synthesize a federated dataset from CIFAR10/100."""
import collections
import functools
from typing import Mapping, Optional, Tuple
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_federated as tff
from generalization.synthesization import coarse_dirichlet
from generalization.synthesization import dirichlet
from generalization.synthesization import gmm_embedding
def _cifar_consistency_postprocesser(ds: tf.data.Dataset) -> tf.data.Dataset:
"""Preprocess to keep consistency with the TFF official CIFAR dataset."""
def elem_postprocesser(
elem: Mapping[str, tf.Tensor]) -> Mapping[str, tf.Tensor]:
return collections.OrderedDict(image=elem['image'], label=elem['label'])
return ds.map(elem_postprocesser)
def _load_cifar_dataset(base_dataset_name: str, include_train: bool,
include_test: bool) -> tf.data.Dataset:
"""Load CIFAR dataset."""
total_ds_dict = tfds.load(base_dataset_name)
if include_train and not include_test:
ds = total_ds_dict['train']
elif include_test and (not include_train):
ds = total_ds_dict['test']
elif include_test and include_train:
ds = total_ds_dict['train'].concatenate(total_ds_dict['test'])
else:
raise ValueError('At least one of the `include_train` and'
'`include_test` must be True.')
return ds
def _load_cifar_pretrained_model(efficient_net_b: int = 7) -> tf.keras.Model:
"""Load pretrained model for CIFAR."""
model_builder = getattr(tf.keras.applications.efficientnet,
f'EfficientNetB{efficient_net_b}')
base_model = model_builder(
include_top=False,
weights='imagenet',
input_shape=(32, 32, 3),
)
inputs = tf.keras.Input(shape=(32, 32, 3))
x = base_model(inputs, training=False) # (None, 1, 1, 1280)
outputs = tf.keras.layers.Flatten()(x) # (None, 1280)
return tf.keras.Model(inputs=inputs, outputs=outputs)
def synthesize_cifar_by_gmm_embedding(
base_dataset_name: str,
num_clients: int,
efficient_net_b: int,
pca_components: Optional[int],
use_progressive_matching: bool,
kl_pairwise_batch_size: int,
gmm_init_params: str,
include_train: bool,
include_test: bool,
seed: Optional[int],
) -> Tuple[tff.simulation.datasets.ClientData, str]:
"""Synthesize a federated dataset from a CIFAR-like dataset via GMM over embeddding.
Args:
base_dataset_name: A str representing the name of the base CIFAR-like
dataset, can be either 'cifar10' or 'cifar100'.
num_clients: An integer representing the number of clients to construct.
efficient_net_b: An integer ranging from 0--7 representing the size of the
EfficientNet pretrained model.
pca_components: An optional integer representing the number of PCA
components to be extracted from the embedding arrays for GMM. If None, the
full embedding array will be used for GMM.
use_progressive_matching: Whether to use progressive matching. If True, the
function will progressively match the clusters of one unmatched label with
a matched label by computing the optimal bipartite matching under pairwise
KL divergence. If False, the function will randomly match the clusters
across labels.
kl_pairwise_batch_size: An optional integer representing the batch size when
computing pairwise KL divergence. If None, the full cost matrix will be
computed in one batch. This could result in large memory cost.
gmm_init_params: A str representing the initialization mode of GMM, can be
either 'random' or 'kmeans'.
include_train: A boolean representing whether to include training split of
the original CIFAR dataset.
include_test: A boolean representing whether to include test split of the
original CIFAR dataset. At least one of the include_train and include_test
should be True.
seed: An optional integer representing the random seed for all random
procedures. If None, no random seed is used.
Returns:
A ClientData instance holding the resulting federated dataset, and a
str representing the name of the synthesized dataset.
"""
dataset = _load_cifar_dataset(
base_dataset_name, include_train=include_train, include_test=include_test)
ds_name = base_dataset_name
if include_train and (not include_test):
ds_name = ds_name + '_train_only'
elif include_test and (not include_train):
ds_name = ds_name + '_test_only'
name = ','.join([
ds_name, 'gmm_embedding', f'clients={num_clients}',
f'model=b{efficient_net_b}', f'pca={pca_components}', 'matching=' +
('progressive_optimal' if use_progressive_matching else 'random'),
f'gmm_init={gmm_init_params}', f'seed={seed}'
])
cd = gmm_embedding.synthesize_by_gmm_over_pretrained_embedding(
dataset=dataset,
pretrained_model_builder=functools.partial(
_load_cifar_pretrained_model, efficient_net_b=efficient_net_b),
num_clients=num_clients,
pca_components=pca_components,
input_name='image',
label_name='label',
use_progressive_matching=use_progressive_matching,
kl_pairwise_batch_size=kl_pairwise_batch_size,
gmm_init_params=gmm_init_params,
seed=seed)
cd = cd.preprocess(_cifar_consistency_postprocesser)
return cd, name
def synthesize_cifar_by_dirichlet_over_labels(
base_dataset_name: str, num_clients: int, concentration_factor: float,
use_rotate_draw: bool, include_train: bool, include_test: bool,
seed: Optional[int]) -> Tuple[tff.simulation.datasets.ClientData, str]:
"""Synthesize a federated dataset from a CIFAR-like dataset via dirichlet over labels.
Args:
base_dataset_name: A str representing the name of the base CIFAR-like
dataset, can be either 'cifar10' or 'cifar100'.
num_clients: An integer representing the number of clients to construct.
concentration_factor: A float-typed parameter of Dirichlet distribution.
Each client will sample from Dirichlet(concentration_factor *
label_relative_popularity) to get a multinomial distribution over labels.
It controls the data heterogeneity of clients. If approaches 0, then each
client only have data from a single category label. If approaches
infinity, then the client distribution will approach overall popularity.
use_rotate_draw: Whether to rotate the drawing clients. If True, each client
will draw only one sample at once, and then rotate to the next random
client. This is intended to prevent the last clients from deviating from
its desired distribution. If False, a client will draw all the samples at
once before moving to the next client.
include_train: A boolean representing whether to include training split of
the original CIFAR dataset.
include_test: A boolean representing whether to include test split of the
original CIFAR dataset. At least one of the include_train and include_test
should be True.
seed: An optional integer representing the random seed for all random
procedures. If None, no random seed is used.
Returns:
A ClientData instance holding the resulting federated dataset, and a
str representing the name of the synthesized dataset.
"""
dataset = _load_cifar_dataset(
base_dataset_name, include_train=include_train, include_test=include_test)
ds_name = base_dataset_name
if include_train and (not include_test):
ds_name = ds_name + '_train_only'
elif include_test and (not include_train):
ds_name = ds_name + '_test_only'
name = ','.join([
ds_name, 'dirichlet', f'clients={num_clients}',
f'concentration_factor={concentration_factor}',
f'rotate={use_rotate_draw}', f'seed={seed}'
])
cd = dirichlet.synthesize_by_dirichlet_over_labels(
dataset=dataset,
num_clients=num_clients,
concentration_factor=concentration_factor,
use_rotate_draw=use_rotate_draw,
seed=seed)
cd = cd.preprocess(_cifar_consistency_postprocesser)
return cd, name
def synthesize_cifar100_over_coarse_and_fine_labels(
num_clients: int, coarse_concentration_factor: float,
fine_concentration_factor: float, use_rotate_draw: bool,
include_train: bool, include_test: bool,
seed: Optional[int]) -> Tuple[tff.simulation.datasets.ClientData, str]:
"""Synthesize a federated dataset from CIFAR100 via dirichlet over coarse and fine labels.
Args:
num_clients: An integer representing the number of clients to construct.
coarse_concentration_factor: A float-typed parameter of Dirichlet
distribution to draw coarse labels.
fine_concentration_factor: A float-typed parameter of Dirichlet distribution
to draw fine labels.
use_rotate_draw: Whether to rotate the drawing clients. If True, each client
will draw only one sample at once, and then rotate to the next random
client. This is intended to prevent the last clients from deviating from
its desired distribution. If False, a client will draw all the samples at
once before moving to the next client.
include_train: A boolean representing whether to include training split of
the original CIFAR dataset.
include_test: A boolean representing whether to include test split of the
original CIFAR dataset. At least one of the include_train and include_test
should be True.
seed: An optional integer representing the random seed for all random
procedures. If None, no random seed is used.
Returns:
A ClientData instance holding the resulting federated dataset, and a
str representing the name of the synthesized dataset.
"""
dataset = _load_cifar_dataset(
'cifar100', include_train=include_train, include_test=include_test)
ds_name = 'cifar100'
if include_train and (not include_test):
ds_name = ds_name + '_train_only'
elif include_test and (not include_train):
ds_name = ds_name + '_test_only'
name = ','.join([
ds_name, 'coarse_dirichlet', f'clients={num_clients}',
f'coarse_factor={coarse_concentration_factor}',
f'fine_factor={fine_concentration_factor}', f'rotate={use_rotate_draw}',
f'seed={seed}'
])
cd = coarse_dirichlet.synthesize_by_dirichlet_over_coarse_and_fine_labels(
dataset=dataset,
num_clients=num_clients,
coarse_concentration_factor=coarse_concentration_factor,
fine_concentration_factor=fine_concentration_factor,
use_rotate_draw=use_rotate_draw,
seed=seed)
cd = cd.preprocess(_cifar_consistency_postprocesser)
return cd, name
| [
"[email protected]"
] | |
1c839c157f06ac377b799639d8ae03cc25682b58 | 4235ae775f1061217504ea027ef32b533e8dea34 | /0x04-pagination/0-simple_helper_function.py | 83390a8b4302c52fe4e99a7266d5e60bc08eb3e8 | [] | no_license | Leidysalda/holbertonschool-web_back_end | 892cda3361a5fc18693f645b5b8f058087f6b9fd | 0b8bd14fa018d9480fd31cf300dad2a3ccd439d0 | refs/heads/master | 2023-08-23T18:19:17.226745 | 2021-10-18T05:26:30 | 2021-10-18T05:26:30 | 387,662,848 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | #!/usr/bin/env python3
"""Simple helper function
"""
def index_range(page: int, page_size: int) -> tuple:
"""Index range"""
return (page - 1) * page_size, page * page_size
| [
"[email protected]"
] | |
43ae88a0b1629e2e7b1ee73877ac8e03e6ad97a0 | 107ebb6d25812d12718d3f0731770318fb0eb2d2 | /study_code/Day_15/15展示表格Tree.py | d67b5c4e5773f4cf666b250e592e4924b4ca87ae | [] | no_license | taoranzhishang/Python_codes_for_learning | 0c5fa25a5da80e5cbca8cf0b9b3703d488e1af6f | 23d12b8c7524f3672ff3baed94dbfed04df821b5 | refs/heads/master | 2023-03-09T12:16:02.749807 | 2021-02-26T13:04:08 | 2021-02-26T13:04:08 | 315,070,954 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | import tkinter
from tkinter import ttk
win = tkinter.Tk()
tree = ttk.Treeview(win) # 显示表格或树
tree["columns"] = ("Name", "Age", "Height")
tree.column("Name", width=100) # 表示列,不显示
tree.column("Age", width=100)
tree.column("Height", width=100)
tree.heading("Name", text="姓名") # 表头
tree.heading("Age", text="年龄")
tree.heading("Height", text="身高")
tree.insert('', 0, text="line1", values=('1', '2', '3')) # 插入行
tree.insert('', 1, text="line2", values=('1', '2', '3'))
tree.insert('', 2, text="line3", values=('1', '2', '3'))
tree.insert('', 3, text="line4", values=('1', '2', '3'))
tree.pack()
win.mainloop()
| [
"[email protected]"
] | |
31b99f77c0ae6772cfdef2260dad07ec24ebc40a | e77a7cc1ed343a85662f0ad3c448a350ab776261 | /data_structures/array/dutch_flag_problem.py | f898486c9edeca6b8bfaf519c0fddcb52c4d54b5 | [
"MIT"
] | permissive | M4cs/python-ds | 9dcecab10291be6a274130c42450319dc112ac46 | 434c127ea4c49eb8d6bf65c71ff6ee10361d994e | refs/heads/master | 2020-08-10T03:40:22.340529 | 2019-10-10T17:52:28 | 2019-10-10T17:52:28 | 214,247,733 | 2 | 0 | MIT | 2019-10-10T17:43:31 | 2019-10-10T17:43:30 | null | UTF-8 | Python | false | false | 446 | py | def dutch(arr):
low = 0
mid = 0
high = len(arr) - 1
while mid <= high:
if arr[mid] == 0:
arr[low], arr[mid] = arr[mid], arr[low]
low += 1
mid += 1
elif arr[mid] == 1:
mid += 1
else:
arr[mid], arr[high] = arr[high], arr[mid]
high -= 1
arr = [1,0,2,1,0,2,1,2,1,2,1,1,0,2,1,0,1,2,1,2,1,1,2,1,0,2,1,1]
print(arr)
dutch(arr)
print(arr)
| [
"[email protected]"
] | |
905bd2fa91b8f2f596253a2ea0da0afae3162704 | 73c5bc071e496b67677c55dfb9cd8621eac1f34e | /probabilistic_utils/gmm_utils.py | 2887862615fb3b85f7da7651fe0e1736afc1d27f | [] | no_license | salt-fly/unsup_temp_embed | 55324ffba7b2858ff3c255f2a984d0253ea23dfb | be7fa1fdfb4466673e3d6953924194cdead281a5 | refs/heads/master | 2020-05-30T19:33:51.311017 | 2019-04-29T18:07:33 | 2019-04-29T18:07:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | #!/usr/bin/env python
"""
"""
__author__ = 'Anna Kukleva'
__date__ = 'September 2018'
import numpy as np
from utils.arg_pars import opt
from utils.logging_setup import logger
class AuxiliaryGMM:
def __init__(self):
self.means_ = [0]
self.covariances_ = [0]
def score_samples(self, features):
result = np.ones(features.shape[0]) * (-np.inf)
return result
class GMM_trh:
def __init__(self, gmm):
self._gmm = gmm
self.trh = np.inf
self.mean_score = 0
self.bg_trh_score = []
if not isinstance(gmm, AuxiliaryGMM):
self._define_threshold()
def _define_threshold(self):
mean = self._gmm.means_[0]
self.mean_score = self._gmm.score_samples(mean.reshape(1, -1))
logger.debug('mean: %f' % self.mean_score)
# cov = self._gmm.covariances_[0]
# sample = (mean - 3 * np.diag(cov)).reshape(1, -1)
# sample_score = self._gmm.score_samples(sample)
# # self.trh = self._gmm.score_samples(sample)
# self.trh = self.mean_score - opt.bg_trh
# str_print = 'GMM: %f lower bound: %f ' % (self.mean_score - sample_score, self._gmm.lower_bound_)
# str_print += 'var max: %f min: %f mean: %f' % (np.max(cov), np.min(cov), np.mean(cov))
# logger.debug(str_print)
def score_samples(self, features):
return self._gmm.score_samples(features)
def append_bg_score(self, score):
self.bg_trh_score.append(score)
def update_trh(self, new_bg_trh=None):
if self.mean_score != 0:
new_bg_trh = opt.bg_trh if new_bg_trh is None else new_bg_trh
self.trh = self.mean_score - new_bg_trh
# self.trh = self.mean_score - 1
| [
"[email protected]"
] | |
a8dc4dd41c5abc7755f49846ffa67574433c31ea | 2f2e9cd97d65751757ae0a92e8bb882f3cbc5b5b | /287.寻找重复数.py | e810ef03eb114a003fdd86c50269732634088bba | [] | no_license | mqinbin/python_leetcode | 77f0a75eb29f8d2f9a789958e0120a7df4d0d0d3 | 73e0c81867f38fdf4051d8f58d0d3dc245be081e | refs/heads/main | 2023-03-10T18:27:36.421262 | 2021-02-25T07:24:10 | 2021-02-25T07:24:10 | 314,410,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,150 | py | #
# @lc app=leetcode.cn id=287 lang=python3
#
# [287] 寻找重复数
#
# @lc code=start
class Solution:
# def findDuplicate(self, nums: List[int]) -> int:
def findDuplicate(self, nums: List[int]) -> int:
'''快慢指针'''
slow = fast = 0
while True:
slow = nums[slow]
fast = nums[nums[fast]]
print(slow,fast)
if slow == fast:
fast = 0
while True:
fast = nums[fast]
slow = nums[slow]
print('-' ,slow,fast)
if fast == slow:
return fast
# def findDuplicate(self, nums: List[int]) -> int:
# '''二分法'''
# n = len(nums)
# left = 1
# right = n
# while left < right:
# mid = (left + right) // 2
# cnt = 0
# for num in nums:
# if num <= mid:
# cnt += 1
# if cnt <= mid:
# left = mid + 1
# else:
# right = mid
# return left
# @lc code=end
| [
"[email protected]"
] | |
171e5c2dbed1ec67eced3b19ddf7954dfb167246 | 844a9fc6aac74166640657a54de6ea0978e433d0 | /core/losses.py | a96bd13d096c17345953cfc043f3cf24bf7b9868 | [] | no_license | TerrisGO/tDBN | 441ae76a7b9369ec8e93bfd35f15de615d1c1e57 | 91644521f0adcc838137523a7a178394e39f4d7b | refs/heads/master | 2022-06-04T00:51:08.247125 | 2020-05-03T04:21:41 | 2020-05-03T04:21:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,727 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
Classification losses:
* WeightedSigmoidClassificationLoss
* WeightedSoftmaxClassificationLoss
* BootstrappedSigmoidClassificationLoss
"""
from abc import ABCMeta, abstractmethod
import numpy as np
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn import functional as F
import torchplus
def indices_to_dense_vector(indices,
size,
indices_value=1.,
default_value=0,
dtype=np.float32):
"""Creates dense vector with indices set to specific value and rest to zeros.
This function exists because it is unclear if it is safe to use
tf.sparse_to_dense(indices, [size], 1, validate_indices=False)
with indices which are not ordered.
This function accepts a dynamic size (e.g. tf.shape(tensor)[0])
Args:
indices: 1d Tensor with integer indices which are to be set to
indices_values.
size: scalar with size (integer) of output Tensor.
indices_value: values of elements specified by indices in the output vector
default_value: values of other elements in the output vector.
dtype: data type.
Returns:
dense 1D Tensor of shape [size] with indices set to indices_values and the
rest set to default_value.
"""
dense = torch.zeros(size).fill_(default_value)
dense[indices] = indices_value
return dense
class Loss(object):
"""Abstract base class for loss functions."""
__metaclass__ = ABCMeta
def __call__(self,
prediction_tensor,
target_tensor,
ignore_nan_targets=False,
scope=None,
**params):
"""Call the loss function.
Args:
prediction_tensor: an N-d tensor of shape [batch, anchors, ...]
representing predicted quantities.
target_tensor: an N-d tensor of shape [batch, anchors, ...] representing
regression or classification targets.
ignore_nan_targets: whether to ignore nan targets in the loss computation.
E.g. can be used if the target tensor is missing groundtruth data that
shouldn't be factored into the loss.
scope: Op scope name. Defaults to 'Loss' if None.
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: a tensor representing the value of the loss function.
"""
if ignore_nan_targets:
target_tensor = torch.where(torch.isnan(target_tensor),
prediction_tensor,
target_tensor)
return self._compute_loss(prediction_tensor, target_tensor, **params)
@abstractmethod
def _compute_loss(self, prediction_tensor, target_tensor, **params):
"""Method to be overridden by implementations.
Args:
prediction_tensor: a tensor representing predicted quantities
target_tensor: a tensor representing regression or classification targets
**params: Additional keyword arguments for specific implementations of
the Loss.
Returns:
loss: an N-d tensor of shape [batch, anchors, ...] containing the loss per
anchor
"""
pass
class WeightedL2LocalizationLoss(Loss):
"""L2 localization loss function with anchorwise output support.
Loss[b,a] = .5 * ||weights[b,a] * (prediction[b,a,:] - target[b,a,:])||^2
"""
def __init__(self, code_weights=None):
super().__init__()
if code_weights is not None:
self._code_weights = np.array(code_weights, dtype=np.float32)
self._code_weights = Variable(torch.from_numpy(self._code_weights))#XXX remove .cuda())
else:
self._code_weights = None
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
self._code_weights = self._code_weights.type_as(prediction_tensor)
self._code_weights = self._code_weights.view(1, 1, -1)
diff = self._code_weights * diff
weighted_diff = diff * weights.unsqueeze(-1)
square_diff = 0.5 * weighted_diff * weighted_diff
return square_diff.sum(2)
class WeightedSmoothL1LocalizationLoss(Loss):
"""Smooth L1 localization loss function.
The smooth L1_loss is defined elementwise as .5 x^2 if |x|<1 and |x|-.5
otherwise, where x is the difference between predictions and target.
See also Equation (3) in the Fast R-CNN paper by Ross Girshick (ICCV 2015)
"""
def __init__(self, sigma=3.0, code_weights=None, codewise=True):
super().__init__()
self._sigma = sigma
if code_weights is not None:
self._code_weights = np.array(code_weights, dtype=np.float32)
self._code_weights = Variable(torch.from_numpy(self._code_weights)) ## XXX, remove .cuda
else:
self._code_weights = None
self._codewise = codewise
def _compute_loss(self, prediction_tensor, target_tensor, weights=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the (encoded) predicted locations of objects.
target_tensor: A float tensor of shape [batch_size, num_anchors,
code_size] representing the regression targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors] tensor
representing the value of the loss function.
"""
diff = prediction_tensor - target_tensor
if self._code_weights is not None:
code_weights = self._code_weights.type_as(prediction_tensor)
diff = code_weights.view(1, 1, -1) * diff
abs_diff = torch.abs(diff)
abs_diff_lt_1 = torch.le(abs_diff, 1 / (self._sigma**2)).type_as(abs_diff)
loss = abs_diff_lt_1 * 0.5 * torch.pow(abs_diff * self._sigma, 2) \
+ (abs_diff - 0.5 / (self._sigma**2)) * (1. - abs_diff_lt_1)
if self._codewise:
anchorwise_smooth_l1norm = loss
if weights is not None:
anchorwise_smooth_l1norm *= weights.unsqueeze(-1)
else:
anchorwise_smooth_l1norm = torch.sum(loss, 2)# * weights
if weights is not None:
anchorwise_smooth_l1norm *= weights
return anchorwise_smooth_l1norm
def _sigmoid_cross_entropy_with_logits(logits, labels):
# to be compatible with tensorflow, we don't use ignore_idx
loss = torch.clamp(logits, min=0) - logits * labels.type_as(logits)
loss += torch.log1p(torch.exp(-torch.abs(logits)))
# transpose_param = [0] + [param[-1]] + param[1:-1]
# logits = logits.permute(*transpose_param)
# loss_ftor = nn.NLLLoss(reduce=False)
# loss = loss_ftor(F.logsigmoid(logits), labels)
return loss
def _softmax_cross_entropy_with_logits(logits, labels):
param = list(range(len(logits.shape)))
transpose_param = [0] + [param[-1]] + param[1:-1]
logits = logits.permute(*transpose_param) # [N, ..., C] -> [N, C, ...]
loss_ftor = nn.CrossEntropyLoss(reduce=False)
loss = loss_ftor(logits, labels.max(dim=-1)[1])
return loss
class WeightedSigmoidClassificationLoss(Loss):
"""Sigmoid cross entropy classification loss function."""
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(-1)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights
class SigmoidFocalClassificationLoss(Loss):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
all_zero_negative: bool. if True, will treat all zero as background.
else, will treat first label as background. only affect alpha.
"""
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = (target_tensor * self._alpha +
(1 - target_tensor) * (1 - self._alpha))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class SoftmaxFocalClassificationLoss(Loss):
"""Softmax focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t) ^ gamma.
alpha: optional alpha weighting factor to balance positives vs negatives.
"""
self._alpha = alpha
self._gamma = gamma
def _compute_loss(self,
prediction_tensor,
target_tensor,
weights,
class_indices=None):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
class_indices: (Optional) A 1-D integer tensor of class indices.
If provided, computes loss only for the specified class indices.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
weights = weights.unsqueeze(2)
if class_indices is not None:
weights *= indices_to_dense_vector(class_indices,
prediction_tensor.shape[2]).view(1, 1, -1).type_as(prediction_tensor)
per_entry_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor, logits=prediction_tensor))
# convert [N, num_anchors] to [N, num_anchors, num_classes]
per_entry_cross_ent = per_entry_cross_ent.unsqueeze(-1) * target_tensor
prediction_probabilities = F.softmax(prediction_tensor, dim=-1)
p_t = ((target_tensor * prediction_probabilities) +
((1 - target_tensor) * (1 - prediction_probabilities)))
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = torch.where(target_tensor[..., 0] == 1,
torch.tensor(1 - self._alpha).type_as(per_entry_cross_ent),
torch.tensor(self._alpha).type_as(per_entry_cross_ent))
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return focal_cross_entropy_loss * weights
class WeightedSoftmaxClassificationLoss(Loss):
"""Softmax loss function."""
def __init__(self, logit_scale=1.0):
"""Constructor.
Args:
logit_scale: When this value is high, the prediction is "diffused" and
when this value is low, the prediction is made peakier.
(default 1.0)
"""
self._logit_scale = logit_scale
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors]
representing the value of the loss function.
"""
num_classes = prediction_tensor.shape[-1]
prediction_tensor = torch.div(
prediction_tensor, self._logit_scale)
per_row_cross_ent = (_softmax_cross_entropy_with_logits(
labels=target_tensor.view(-1, num_classes),
logits=prediction_tensor.view(-1, num_classes)))
return per_row_cross_ent.view(weights.shape) * weights
class BootstrappedSigmoidClassificationLoss(Loss):
"""Bootstrapped sigmoid cross entropy classification loss function.
This loss uses a convex combination of training labels and the current model's
predictions as training targets in the classification loss. The idea is that
as the model improves over time, its predictions can be trusted more and we
can use these predictions to mitigate the damage of noisy/incorrect labels,
because incorrect labels are likely to be eventually highly inconsistent with
other stimuli predicted to have the same label by the model.
In "soft" bootstrapping, we use all predicted class probabilities, whereas in
"hard" bootstrapping, we use the single class favored by the model.
See also Training Deep Neural Networks On Noisy Labels with Bootstrapping by
Reed et al. (ICLR 2015).
"""
def __init__(self, alpha, bootstrap_type='soft'):
"""Constructor.
Args:
alpha: a float32 scalar tensor between 0 and 1 representing interpolation
weight
bootstrap_type: set to either 'hard' or 'soft' (default)
Raises:
ValueError: if bootstrap_type is not either 'hard' or 'soft'
"""
if bootstrap_type != 'hard' and bootstrap_type != 'soft':
raise ValueError('Unrecognized bootstrap_type: must be one of '
'\'hard\' or \'soft.\'')
self._alpha = alpha
self._bootstrap_type = bootstrap_type
def _compute_loss(self, prediction_tensor, target_tensor, weights):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: a float tensor of shape [batch_size, num_anchors]
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
if self._bootstrap_type == 'soft':
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * torch.sigmoid(prediction_tensor)
else:
bootstrap_target_tensor = self._alpha * target_tensor + (
1.0 - self._alpha) * (torch.sigmoid(prediction_tensor) > 0.5).float()
per_entry_cross_ent = (_sigmoid_cross_entropy_with_logits(
labels=bootstrap_target_tensor, logits=prediction_tensor))
return per_entry_cross_ent * weights.unsqueeze(2)
| [
"[email protected]"
] | |
5970e87ee2061596cca1ae837cf3d8a58cc9226c | ea637e5e28056d0a332a7892bd241aa6a154e57a | /recyclescrollview.py | c3528005902e474677d732e97bfd75ece4cf01c5 | [
"MIT"
] | permissive | JesusZerpa/kivy-recyclescrollview | 85c4511569ce65e0cecf102238dd36858a6aa92c | 50016c313216833b583066ec0a9ed7e5b271da01 | refs/heads/master | 2022-04-20T21:52:22.862800 | 2020-04-21T00:54:28 | 2020-04-21T00:54:28 | 257,435,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,534 | py | from kivy.factory import Factory
from kivy.properties import *
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.boxlayout import BoxLayout
class RecycleScrollView(Factory.ScrollView):
viewclass=StringProperty("")
data=ListProperty([])
#box
orientation= 'vertical'
default_height= 1000
cursor=0
max_items=10
widget_height=None
def __init__(self,*args,**kwargs):
super(RecycleScrollView,self).__init__(*args,**kwargs)
self.do_scroll_y=True
self.box=BoxLayout(orientation="vertical",size_hint_y= None,height=self.default_height)
self.add_widget(self.box)
def on_parent(self,instance,value):
pass
def on_size(self,instance,value):
height=0
for elem in self.children[0].children:
height+=elem.height
self.children[0].height=height
def on_scroll_move(self,instance):
if self.widget_height:
dx=self.box.height-(self.scroll_y*self.box.height)
if dx>0:
item_passed=dx/self.widget_height
self.cursor=int(item_passed)
self.update()
return super().on_scroll_move(instance)
def on_scroll_stop(self,instance):
if self.widget_height:
dx=self.box.height-(self.scroll_y*self.box.height)
if dx>0:
item_passed=dx/self.widget_height
self.cursor=int(item_passed)
self.update()
return super().on_scroll_stop(instance)
def update(self):
self.clear_widgets()
widget=getattr(Factory,self.viewclass)
_widget=widget()
self.box=FloatLayout(size_hint_y= None,height=self.default_height)
super(RecycleScrollView,self).add_widget(self.box)
self.box.top=self.top
for k,item in enumerate(self.data[self.cursor:self.cursor+self.max_items]):
widget=getattr(Factory,self.viewclass)
_widget=widget()
_widget.size_hint_y=None
self.box.add_widget(_widget)
_widget.pos=(_widget.pos[0],(_widget.height*len(self.data))-(_widget.height*(self.cursor+k+1)))
for elem in item:
setattr(_widget,elem,item[elem])
self.box.height=self.widget_height*len(self.data)
def on_classview(self,instance,value):
instance.classview=value
def on_data(self,instance,value):
#button
#size_hint: (1, None)
#height: 200
self.data=value
for k,item in enumerate(self.data[self.cursor:self.cursor+self.max_items]):
widget=getattr(Factory,self.viewclass)
_widget=widget()
_widget.size_hint_y=None
for elem in item:
setattr(_widget,elem,item[elem])
if self.widget_height==None:
self.widget_height=_widget.height
self.box.add_widget(_widget)
| [
"[email protected]"
] | |
15b938d4100f7d789627cdd8c18f844a41b98930 | 836312e7ced2d5c5ed43d0e3ad34f2144cf9064e | /APIs/5_SecuringYourApi/models.py | 9f1b833e0b49a78a074f3a8471d669f318a8cb51 | [] | no_license | muhammad-mamdouh/udacity-fullstack-tools | 4342cdca75d88184a095d69b9484a4e50c87f905 | a881845042f3706b5e37ea72df645859541c73ab | refs/heads/master | 2022-12-15T23:33:20.813201 | 2019-07-11T21:05:53 | 2019-07-11T21:05:53 | 187,712,425 | 1 | 0 | null | 2022-12-08T05:51:53 | 2019-05-20T20:58:42 | Python | UTF-8 | Python | false | false | 715 | py | from sqlalchemy import Column, Integer, String, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from passlib.apps import custom_app_context as pwd_context
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(String(32), index=True)
password_hash = Column(String(64))
def hash_password(self, password):
self.password_hash = pwd_context.hash(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password_hash)
engine = create_engine('sqlite:///users.db')
Base.metadata.create_all(engine)
| [
"[email protected]"
] | |
a95cff2d9d860046fa74ac94747beb54443c7426 | 632099ac0d895943cbbeb9048a2cdfcd21102411 | /LPD8/__init__.py | 854fab1bea8584f563056348a8c3ca56979773b0 | [] | no_license | Toniigor/AbletonLive9_RemoteScripts | 7f4bbf759a79629584413f6d1797005e8cd7f2ff | fed1e5ee61ea12ea6360107a65a6e666364353ff | refs/heads/master | 2021-01-16T21:19:25.330221 | 2014-06-06T12:33:03 | 2014-06-06T12:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | #Embedded file name: /Users/versonator/Jenkins/live/Projects/AppLive/Resources/MIDI Remote Scripts/LPD8/__init__.py
from _Generic.GenericScript import GenericScript
import Live
from config import *
def create_instance(c_instance):
""" The generic script can be customised by using parameters (see config.py). """
return GenericScript(c_instance, Live.MidiMap.MapMode.absolute, Live.MidiMap.MapMode.absolute, DEVICE_CONTROLS, TRANSPORT_CONTROLS, VOLUME_CONTROLS, TRACKARM_CONTROLS, BANK_CONTROLS, CONTROLLER_DESCRIPTION)
from _Framework.Capabilities import *
def get_capabilities():
return {CONTROLLER_ID_KEY: controller_id(vendor_id=2536, product_ids=[117], model_name='LPD8'),
PORTS_KEY: [inport(props=[NOTES_CC, REMOTE, SCRIPT]), outport(props=[SCRIPT])]} | [
"[email protected]"
] | |
2a3e7dcabd45c50afa9aaec3c28922e67f7594b5 | caceb60f71165772b6d6155f619e79189e7c80a9 | /第一期/成都-MFC/task002/selflearn/advance02decorator/decorator/demo0.py | bcaf2abdb66537909eedbbbbfd2e83322255515b | [
"Apache-2.0"
] | permissive | beidou9313/deeptest | ff41999bb3eb5081cdc8d7523587d7bc11be5fea | e046cdd35bd63e9430416ea6954b1aaef4bc50d5 | refs/heads/master | 2021-04-26T23:06:08.890071 | 2019-04-03T02:18:44 | 2019-04-03T02:18:44 | 123,931,080 | 0 | 0 | Apache-2.0 | 2018-03-05T14:25:54 | 2018-03-05T14:25:53 | null | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = 'MFC'
__time__ = '18/1/24 15:14'
"""
01.python高级1
02.python高级2-生成器、闭包、装饰器
05-装饰器 01
"""
# example 1
def foo():
print('foo')
foo # 表示是函数
foo() # 表示执行foo函数
# example 2
def foo():
print('foo')
foo = lambda x: x + 1 # foo指向另一个函数
r = foo(3) # # 执行下面的lambda表达式,而不再是原来的foo函数,因为foo这个名字被重新指向了另外一个匿名函数
print(r)
| [
"[email protected]"
] | |
cde54a09147bedf311abc8e1fe4d88900bc757e9 | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/scipy/optimize/tests/test_linprog.py | a245d4719d083c4e3e039a7d01fc03a1e56ecc28 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:f8b63b53bd829fbc07a8138950ebbc2e32d2b860e947b44b004584abe4170b6a
size 69667
| [
"[email protected]"
] | |
a06fc5c4de927107934a98e2926e54b640c22983 | 478de38a95c2729ee2ef8c77b1c5a81f23aedb59 | /Programming-Algorithm/Palindrome Partitioning.py | a1578e415d692bf6775851f2802cd87fc34a869b | [] | no_license | shuzhancnjx/leetcode- | 0c711f720ef653ddff2af3af697a453122c28403 | 12093c92ef33707ad8ccdd59ad040c04cad1ee3b | refs/heads/master | 2020-12-24T16:32:25.976747 | 2016-03-03T15:36:44 | 2016-03-03T15:36:44 | 37,101,621 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,381 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Sep 16 14:08:57 2015
@author: ZSHU
"""
'''
1. the basic idea is to have a list 'temp' formed by each letter in the string, i.e., list(s)
2. combine the components of 'temp' when they are palindrome
3. 'pos' is used to record the center for determing the palindrome
'''
class Solution(object):
def partition(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
def par(res, temp, pos):
if pos>len(temp)-1:
res.append(temp)
return
else:
p1=pos-1; p2=pos+1
while p1>=0 and p2<len(temp):
if temp[p1]==temp[p2]:
par(res, temp[:p1]+[''.join(temp[p1:p2+1])] +temp[p2+1:],p1+1)
p1-=1;p2+=1
else:
break
p1=pos; p2=pos+1
while p1>=0 and p2<len(temp):
if temp[p1]==temp[p2]:
par(res, temp[:p1]+[''.join(temp[p1:p2+1])] +temp[p2+1:], p1+1)
p1-=1; p2+=1
else:
break
par(res, temp,pos+1) # if no palindrome ceterned at temp[pos], then move on to next
res=[]
par(res, list(s),0)
return res
| [
"[email protected]"
] | |
848614b9568aff2a658d60b2321afcea163f00e3 | 2d647ce5b0acf255b0713304acdb0f0c193df8fc | /doc/conf.py | d526459dffd408e28854f7bbfcccf8efeec1311c | [
"CC-BY-NC-4.0",
"MIT"
] | permissive | tonybaloney/pycharm-security | a1ed540f47a0af4cd659c72643f63f800df01702 | 5c1ceeb1fb2a18478fa7076a81f9f47fd450e592 | refs/heads/master | 2023-08-17T20:09:22.924182 | 2023-08-16T02:12:57 | 2023-08-16T02:12:57 | 231,985,043 | 341 | 27 | MIT | 2023-09-14T19:03:27 | 2020-01-05T22:35:55 | Kotlin | UTF-8 | Python | false | false | 2,438 | py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = 'PyCharm Python Security plugin'
copyright = '2020, Anthony Shaw'
author = 'Anthony Shaw'
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx_markdown_tables"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
source_suffix = [".rst", ".md"]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
html_theme_options = {
'logo': 'logo.png',
'logo_name': True,
'logo_text_align': "center",
'github_user': 'tonybaloney',
'github_repo': 'pycharm-security',
'github_banner': True,
'github_button': True,
'fixed_sidebar': True,
'sidebar_width': '330px',
'page_width': '70%',
'extra_nav_links': {
'JetBrains Marketplace': "https://plugins.jetbrains.com/plugin/13609-python-security",
"GitHub Marketplace": "https://github.com/marketplace/actions/pycharm-python-security-scanner",
"Docker Hub": "https://hub.docker.com/r/anthonypjshaw/pycharm-security"
},
'show_powered_by': False
}
html_show_copyright = False
html_show_sphinx = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {'**': ['about.html', 'navigation.html', 'searchbox.html'], }
master_doc = 'index' | [
"[email protected]"
] | |
8247ddf7a63ae3cc954fc819bd2bfa9d3ed5c402 | a46fc5187245f7ac79758ae475d4d865e24f482b | /33_search_in_rotated_array/search_in_rotated.py | 18c24c01cea4ab774ce6a9b99cb74a3bf1bf59d2 | [] | no_license | narnat/leetcode | ae31f9321ac9a087244dddd64706780ea57ded91 | 20a48021be5e5348d681e910c843e734df98b596 | refs/heads/master | 2022-12-08T00:58:12.547227 | 2020-08-26T21:04:53 | 2020-08-26T21:04:53 | 257,167,879 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | #!/usr/bin/env python3
from typing import List
class Solution:
def search(self, nums: List[int], target: int) -> int:
left = 0
right = len(nums) - 1
if not nums:
return -1
while left < right:
mid = left + (right - left) // 2
if nums[mid] > nums[right]:
left = mid + 1
else:
right = mid
pivot = left
left = 0
right = len(nums) - 1
if nums[pivot] <= target <= nums[right]:
left = pivot
else:
right = pivot
while left <= right:
mid = left + (right - left) // 2
if target == nums[mid]:
return mid
if nums[mid] > target:
right = mid - 1
else:
left = mid + 1
return -1
| [
"[email protected]"
] | |
b6d0c2267d52b5f791315417a324c2657e86e1db | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-dataform/samples/generated_samples/dataform_v1beta1_generated_dataform_list_workflow_invocations_async.py | dde7ff28eefb1bca3666f553e8ef38cb254977af | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,967 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListWorkflowInvocations
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataform
# [START dataform_v1beta1_generated_Dataform_ListWorkflowInvocations_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import dataform_v1beta1
async def sample_list_workflow_invocations():
# Create a client
client = dataform_v1beta1.DataformAsyncClient()
# Initialize request argument(s)
request = dataform_v1beta1.ListWorkflowInvocationsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_workflow_invocations(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dataform_v1beta1_generated_Dataform_ListWorkflowInvocations_async]
| [
"[email protected]"
] | |
68ca9762fd012943155b4e292cef1c5cbd9fa5f5 | 6649efd4a95645938221eca58404db5663cd2491 | /official/vision/dataloaders/tf_example_label_map_decoder_test.py | 3ff9a8b3c14c70ea2289e9cf6783a02f93db5b72 | [
"Apache-2.0"
] | permissive | Dithn/models | 8447866855959946358f2e5160b7d31aaafcfc98 | 36a140b8765eaa07525ac42a00cbd01a8b03b98e | refs/heads/master | 2023-09-01T07:41:28.596877 | 2022-03-16T18:12:00 | 2022-03-16T18:13:23 | 228,201,096 | 1 | 0 | Apache-2.0 | 2021-09-23T21:19:16 | 2019-12-15T14:52:24 | Python | UTF-8 | Python | false | false | 7,746 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_example_label_map_decoder.py."""
import os
# Import libraries
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from official.vision.dataloaders import tf_example_label_map_decoder
from official.vision.dataloaders import tfexample_utils
LABEL_MAP_CSV_CONTENT = '0,class_0\n1,class_1\n2,class_2'
class TfExampleDecoderLabelMapTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.parameters(
(100, 100, 0),
(100, 100, 1),
(100, 100, 2),
(100, 100, 0),
(100, 100, 1),
(100, 100, 2),
)
def test_result_shape(self, image_height, image_width, num_instances):
label_map_dir = self.get_temp_dir()
label_map_name = 'label_map.csv'
label_map_path = os.path.join(label_map_dir, label_map_name)
with open(label_map_path, 'w') as f:
f.write(LABEL_MAP_CSV_CONTENT)
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map_path, include_mask=True)
serialized_example = tfexample_utils.create_detection_test_example(
image_height=image_height,
image_width=image_width,
image_channel=3,
num_instances=num_instances).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
def test_result_content(self):
label_map_dir = self.get_temp_dir()
label_map_name = 'label_map.csv'
label_map_path = os.path.join(label_map_dir, label_map_name)
with open(label_map_path, 'w') as f:
f.write(LABEL_MAP_CSV_CONTENT)
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map_path, include_mask=True)
image_content = [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [255, 255, 255], [255, 255, 255], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]]
image = tfexample_utils.encode_image(np.uint8(image_content), fmt='PNG')
image_height = 4
image_width = 4
num_instances = 2
xmins = [0, 0.25]
xmaxs = [0.5, 1.0]
ymins = [0, 0]
ymaxs = [0.5, 1.0]
labels = [b'class_2', b'class_0']
areas = [
0.25 * image_height * image_width, 0.75 * image_height * image_width
]
is_crowds = [1, 0]
mask_content = [[[255, 255, 0, 0],
[255, 255, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]],
[[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255],
[0, 255, 255, 255]]]
masks = [
tfexample_utils.encode_image(np.uint8(m), fmt='PNG')
for m in list(mask_content)
]
serialized_example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=[image]))),
'image/source_id': (tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tfexample_utils.DUMP_SOURCE_ID]))),
'image/height': (tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_height]))),
'image/width': (tf.train.Feature(
int64_list=tf.train.Int64List(value=[image_width]))),
'image/object/bbox/xmin': (tf.train.Feature(
float_list=tf.train.FloatList(value=xmins))),
'image/object/bbox/xmax': (tf.train.Feature(
float_list=tf.train.FloatList(value=xmaxs))),
'image/object/bbox/ymin': (tf.train.Feature(
float_list=tf.train.FloatList(value=ymins))),
'image/object/bbox/ymax': (tf.train.Feature(
float_list=tf.train.FloatList(value=ymaxs))),
'image/object/class/text': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=labels))),
'image/object/is_crowd': (tf.train.Feature(
int64_list=tf.train.Int64List(value=is_crowds))),
'image/object/area': (tf.train.Feature(
float_list=tf.train.FloatList(value=areas))),
'image/object/mask': (tf.train.Feature(
bytes_list=tf.train.BytesList(value=masks))),
})).SerializeToString()
decoded_tensors = decoder.decode(
tf.convert_to_tensor(value=serialized_example))
results = tf.nest.map_structure(lambda x: x.numpy(), decoded_tensors)
self.assertAllEqual(
(image_height, image_width, 3), results['image'].shape)
self.assertAllEqual(image_content, results['image'])
self.assertEqual(tfexample_utils.DUMP_SOURCE_ID, results['source_id'])
self.assertEqual(image_height, results['height'])
self.assertEqual(image_width, results['width'])
self.assertAllEqual(
(num_instances,), results['groundtruth_classes'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_is_crowd'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_area'].shape)
self.assertAllEqual(
(num_instances, 4), results['groundtruth_boxes'].shape)
self.assertAllEqual(
(num_instances, image_height, image_width),
results['groundtruth_instance_masks'].shape)
self.assertAllEqual(
(num_instances,), results['groundtruth_instance_masks_png'].shape)
self.assertAllEqual(
[2, 0], results['groundtruth_classes'])
self.assertAllEqual(
[True, False], results['groundtruth_is_crowd'])
self.assertNDArrayNear(
[0.25 * image_height * image_width, 0.75 * image_height * image_width],
results['groundtruth_area'], 1e-4)
self.assertNDArrayNear(
[[0, 0, 0.5, 0.5], [0, 0.25, 1.0, 1.0]],
results['groundtruth_boxes'], 1e-4)
self.assertNDArrayNear(
mask_content, results['groundtruth_instance_masks'], 1e-4)
self.assertAllEqual(
masks, results['groundtruth_instance_masks_png'])
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
d0421b44a25e1116f523a219dbfed2dc534f5518 | f5485d955fa942711f337286a86f05374ea58a72 | /migrations/versions/cbe0f844650d_.py | 95626d4f1467e756a549807f7e637093f64f7ef7 | [] | no_license | bingfengjiyu/flask_demo | f6245d7e8696b3bc492ed3f922948bd01319be30 | 3feef9fcee6e3c8f8fae46fb0fb5f5a8bdb82f4d | refs/heads/master | 2020-03-24T11:04:03.012486 | 2018-07-28T12:16:01 | 2018-07-28T12:16:01 | 142,674,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | """empty message
Revision ID: cbe0f844650d
Revises:
Create Date: 2018-07-07 12:10:18.303153
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cbe0f844650d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tbl_authors', sa.Column('email', sa.String(length=64), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tbl_authors', 'email')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
a0a19a07d37174229a775fc5ab451b3a3396a995 | d9e0585e57b482d91e8af7514e683e2488e23381 | /padinfo/view/series_scroll.py | 8d6a99de4701c691fb50fc7dd9820cae2b135354 | [
"MIT"
] | permissive | TrendingTechnology/pad-cogs | d08abb8da8bf2763a4091a29139168d8c1d2333a | b913a4e16a6473b8b53fae4bda564bedcc82c876 | refs/heads/master | 2023-08-11T01:10:22.088761 | 2021-09-19T00:41:43 | 2021-09-19T00:41:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,190 | py | from typing import TYPE_CHECKING, List
from discordmenu.embed.base import Box
from discordmenu.embed.components import EmbedMain, EmbedField
from discordmenu.embed.text import BoldText
from discordmenu.embed.view import EmbedView
from tsutils.emoji import char_to_emoji
from tsutils.menu.footers import embed_footer_with_state
from tsutils.query_settings import QuerySettings
from padinfo.common.config import UserConfig
from padinfo.view.components.monster.header import MonsterHeader
from padinfo.view.components.view_state_base import ViewStateBase
if TYPE_CHECKING:
from dbcog.models.monster_model import MonsterModel
from dbcog.database_context import DbContext
class SeriesScrollViewState(ViewStateBase):
MAX_ITEMS_PER_PANE = 11
def __init__(self, original_author_id, menu_type, raw_query, query, color, series_id,
paginated_monsters: List[List["MonsterModel"]], current_page, rarity: int,
query_settings: QuerySettings,
all_rarities: List[int],
title, message,
current_index: int = None,
max_len_so_far: int = None,
reaction_list=None, extra_state=None,
child_message_id=None):
super().__init__(original_author_id, menu_type, raw_query,
extra_state=extra_state)
self.current_index = current_index
self.all_rarities = all_rarities
self.paginated_monsters = paginated_monsters
self.current_page = current_page or 0
self.series_id = series_id
self.rarity = rarity
self.query_settings = query_settings
self.idle_message = message
self.child_message_id = child_message_id
self.title = title
self.reaction_list = reaction_list
self.color = color
self.query = query
self._max_len_so_far = max(max_len_so_far or len(self.monster_list), len(self.monster_list))
@property
def monster_list(self) -> List["MonsterModel"]:
return self.paginated_monsters[self.current_page]
@property
def max_len_so_far(self) -> int:
self._max_len_so_far = max(len(self.monster_list), self._max_len_so_far)
return self._max_len_so_far
@property
def current_monster_id(self) -> int:
return self.monster_list[self.current_index].monster_id
@property
def pages_in_rarity(self) -> int:
return len(self.paginated_monsters)
def serialize(self):
ret = super().serialize()
ret.update({
'pane_type': SeriesScrollView.VIEW_TYPE,
'series_id': self.series_id,
'query_settings': self.query_settings.serialize(),
'current_page': self.current_page,
'pages_in_rarity': self.pages_in_rarity,
'title': self.title,
'rarity': self.rarity,
'all_rarities': self.all_rarities,
'reaction_list': self.reaction_list,
'child_message_id': self.child_message_id,
'idle_message': self.idle_message,
'max_len_so_far': self.max_len_so_far,
'current_index': self.current_index,
})
return ret
def get_serialized_child_extra_ims(self, emoji_names, menu_type):
extra_ims = {
'is_child': True,
'reaction_list': emoji_names,
'menu_type': menu_type,
'resolved_monster_id': self.current_monster_id,
'query_settings': self.query_settings.serialize(),
'idle_message': self.idle_message
}
return extra_ims
@staticmethod
async def deserialize(dbcog, user_config: UserConfig, ims: dict):
if ims.get('unsupported_transition'):
return None
series_id = ims['series_id']
rarity = ims['rarity']
all_rarities = ims['all_rarities']
query_settings = QuerySettings.deserialize(ims.get('query_settings'))
paginated_monsters = await SeriesScrollViewState.do_query(dbcog, series_id, rarity, query_settings.server)
current_page = ims['current_page']
title = ims['title']
raw_query = ims['raw_query']
query = ims.get('query') or raw_query
original_author_id = ims['original_author_id']
menu_type = ims['menu_type']
reaction_list = ims.get('reaction_list')
child_message_id = ims.get('child_message_id')
current_index = ims.get('current_index')
current_monster_list = paginated_monsters[current_page]
max_len_so_far = max(ims['max_len_so_far'] or len(current_monster_list), len(current_monster_list))
idle_message = ims.get('idle_message')
return SeriesScrollViewState(original_author_id, menu_type, raw_query, query, user_config.color, series_id,
paginated_monsters, current_page, rarity, query_settings,
all_rarities,
title, idle_message,
current_index=current_index,
max_len_so_far=max_len_so_far,
reaction_list=reaction_list,
extra_state=ims,
child_message_id=child_message_id)
@staticmethod
async def do_query(dbcog, series_id, rarity, server):
db_context: "DbContext" = dbcog.database
all_series_monsters = db_context.get_monsters_by_series(series_id, server=server)
base_monsters_of_rarity = list(filter(
lambda m: db_context.graph.monster_is_base(m) and m.rarity == rarity, all_series_monsters))
paginated_monsters = [base_monsters_of_rarity[i:i + SeriesScrollViewState.MAX_ITEMS_PER_PANE]
for i in range(
0, len(base_monsters_of_rarity), SeriesScrollViewState.MAX_ITEMS_PER_PANE)]
return paginated_monsters
@staticmethod
def query_all_rarities(dbcog, series_id, server):
db_context: "DbContext" = dbcog.database
return sorted({m.rarity for m in db_context.get_all_monsters(server) if
m.series_id == series_id and db_context.graph.monster_is_base(m)})
@staticmethod
async def query_from_ims(dbcog, ims) -> List[List["MonsterModel"]]:
series_id = ims['series_id']
rarity = ims['rarity']
query_settings = QuerySettings.deserialize(ims['query_settings'])
paginated_monsters = await SeriesScrollViewState.do_query(dbcog, series_id, rarity, query_settings.server)
return paginated_monsters
async def decrement_page(self, dbcog):
if self.current_page > 0:
self.current_page = self.current_page - 1
self.current_index = None
else:
# if there are multiple rarities, decrementing first page will change rarity
if len(self.all_rarities) > 1:
rarity_index = self.all_rarities.index(self.rarity)
self.rarity = self.all_rarities[rarity_index - 1]
self.paginated_monsters = await SeriesScrollViewState.do_query(dbcog, self.series_id, self.rarity,
self.query_settings.server)
self.current_index = None
self.current_page = len(self.paginated_monsters) - 1
if len(self.paginated_monsters) > 1:
self.current_index = None
async def increment_page(self, dbcog):
if self.current_page < len(self.paginated_monsters) - 1:
self.current_page = self.current_page + 1
self.current_index = None
else:
# if there are multiple rarities, incrementing last page will change rarity
if len(self.all_rarities) > 1:
rarity_index = self.all_rarities.index(self.rarity)
self.rarity = self.all_rarities[(rarity_index + 1) % len(self.all_rarities)]
self.paginated_monsters = await SeriesScrollViewState.do_query(dbcog, self.series_id, self.rarity,
self.query_settings.server)
self.current_index = None
self.current_page = 0
if len(self.paginated_monsters) > 1:
self.current_index = None
async def decrement_index(self, dbcog):
if self.current_index is None:
self.current_index = len(self.monster_list) - 1
return
if self.current_index > 0:
self.current_index = self.current_index - 1
return
await self.decrement_page(dbcog)
self.current_index = len(self.monster_list) - 1
async def increment_index(self, dbcog):
if self.current_index is None:
self.current_index = 0
return
if self.current_index < len(self.monster_list) - 1:
self.current_index = self.current_index + 1
return
await self.increment_page(dbcog)
self.current_index = 0
def set_index(self, new_index: int):
# don't want to go out of range, which will forget current index, break next, and break prev
if new_index < len(self.monster_list):
self.current_index = new_index
class SeriesScrollView:
VIEW_TYPE = 'SeriesScroll'
@staticmethod
def embed(state: SeriesScrollViewState):
fields = [
EmbedField(BoldText('Current rarity: {}'.format(state.rarity)),
Box(*SeriesScrollView._monster_list(
state.monster_list,
state.current_index))),
EmbedField(BoldText('Rarities'),
Box(
SeriesScrollView._all_rarity_text(state),
), inline=True
),
EmbedField(BoldText('Page'),
Box('{} of {}'.format(state.current_page + 1, state.pages_in_rarity)),
inline=True
)
]
return EmbedView(
EmbedMain(
title=state.title,
color=state.color,
),
embed_footer=embed_footer_with_state(state),
embed_fields=fields)
@staticmethod
def _all_rarity_text(state):
lines = []
for r in state.all_rarities:
if r != state.rarity:
lines.append(str(r))
else:
lines.append('**{}**'.format(state.rarity))
return ', '.join(lines)
@staticmethod
def _monster_list(monsters, current_index):
if not len(monsters):
return []
return [
MonsterHeader.short_with_emoji(
mon,
link=SeriesScrollView._is_linked(i, current_index),
prefix=char_to_emoji(str(i))
)
for i, mon in enumerate(monsters)
]
@staticmethod
def _is_linked(i, current_index):
if current_index is None:
return True
return i != current_index
| [
"[email protected]"
] | |
ee130f7ff914a89d881715663eba4f61cc90bcfb | ab4f74d127bfc89813ee359bb9c779eca5426ddc | /script/label_image.runfiles/org_tensorflow/tensorflow/core/framework/tensor_pb2.py | 9adcb52c8c3ea21bdcdcb2006d01b4c3f6a19493 | [
"MIT"
] | permissive | harshit-jain-git/ImageNET | cdfd5a340b62862ad8d1cc3b9a0f30cccc481744 | 1cd4c2b70917e4709ce75422c0205fe3735a1b01 | refs/heads/master | 2022-12-11T12:47:46.795376 | 2017-12-19T05:47:26 | 2017-12-19T05:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | /home/co/.cache/bazel/_bazel_co/2e35bede1f3fd334ff5ab28da2fc1540/execroot/org_tensorflow/bazel-out/k8-opt/genfiles/tensorflow/core/framework/tensor_pb2.py | [
"[email protected]"
] | |
7ae7198fa354aec4c1775a03d319da0d45d323ea | 0f07107b016d2aee64788966b9f0d322ac46b998 | /moya/testprojects/scratch/blog/py/test.py | 2b109b36a4486a66034ed4b3c5ad99d295a37b25 | [
"MIT"
] | permissive | fkztw/moya | 35f48cdc5d5723b04c671947099b0b1af1c7cc7a | 78b91d87b4519f91dfdd2b40dab44e72f201a843 | refs/heads/master | 2023-08-09T09:20:21.968908 | 2019-02-03T18:18:54 | 2019-02-03T18:18:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | from __future__ import print_function
import moya
@moya.expose.macro("test")
def test():
print("Success! :-)")
return 10
| [
"[email protected]"
] | |
c49f884ff1e502534a0cbabe7633c2134b201d34 | b391498124fdcaef989bf3ebafffb0df43e3e07f | /pygccxml-0.8.2/unittests/declarations_cache_tester.py | 10112018fdfee5e022b785ec191dc8df82e502cb | [
"BSL-1.0"
] | permissive | glehmann/WrapITK-unstable | 9a0dd9d387ecd59c9439465dcc32cca552e14576 | 402fc668f1f3c3dd57d0751a61efa3b1625d238b | refs/heads/master | 2021-01-10T22:02:04.715926 | 2008-05-25T16:53:07 | 2008-05-25T16:53:07 | 3,272,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,319 | py | # Copyright 2004 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import os, sys, unittest, os.path
import autoconfig
import pygccxml.parser
from pygccxml.parser.config import config_t
from pygccxml.parser.declarations_cache import *
class decl_cache_tester(unittest.TestCase):
def __init__(self, *args ):
unittest.TestCase.__init__(self, *args)
if not os.path.exists( autoconfig.build_dir ):
os.makedirs( autoconfig.build_dir )
def test_file_signature(self):
file1 = os.path.join(autoconfig.data_directory, 'decl_cache_file1.txt')
file1_dup = os.path.join(autoconfig.data_directory, 'decl_cache_file1_duplicate.txt')
file2 = os.path.join(autoconfig.data_directory, 'decl_cache_file2.txt')
sig1 = file_signature(file1)
sig1_dup = file_signature(file1_dup)
sig2 = file_signature(file2)
self.assert_(sig1 == sig1_dup)
self.assert_(sig1 != sig2)
def test_config_signature(self):
diff_cfg_list = self.build_differing_cfg_list()
def_cfg = diff_cfg_list[0]
def_sig = configuration_signature(def_cfg)
# Test changes that should cause sig changes
for cfg in diff_cfg_list[1:]:
self.assert_(configuration_signature(cfg) != def_sig)
# Test changes that should not cause sig changes
no_changes = def_cfg.clone()
self.assert_(configuration_signature(no_changes) == def_sig)
#start_decls_changed = def_cfg.clone()
#start_decls_changed.start_with_declarations = "test object"
#self.assert_(configuration_signature(start_decls_changed) == def_sig)
ignore_changed = def_cfg.clone()
ignore_changed.ignore_gccxml_output = True
self.assert_(configuration_signature(ignore_changed) == def_sig)
def test_cache_interface(self):
cache_file = os.path.join(autoconfig.build_dir, 'decl_cache_test.test_cache_read.cache')
file1 = os.path.join(autoconfig.data_directory, 'decl_cache_file1.txt')
file1_dup = os.path.join(autoconfig.data_directory, 'decl_cache_file1_duplicate.txt')
file2 = os.path.join(autoconfig.data_directory, 'decl_cache_file2.txt')
diff_cfg_list = self.build_differing_cfg_list()
def_cfg = diff_cfg_list[0]
if os.path.exists(cache_file):
os.remove(cache_file)
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 0)
# test creating new entries for differing files
cache.update(file1, def_cfg, 1,[])
self.assert_(len(cache._file_cache_t__cache) == 1)
cache.update(file1_dup, def_cfg, 2,[])
self.assert_(len(cache._file_cache_t__cache) == 1)
cache.update(file2, def_cfg, 3,[])
self.assert_(len(cache._file_cache_t__cache) == 2)
self.assert_(cache.cached_value(file1,def_cfg) == 2)
self.assert_(cache.cached_value(file2,def_cfg) == 3)
# Test reading again
cache.flush()
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 2)
self.assert_(cache.cached_value(file1,def_cfg) == 2)
self.assert_(cache.cached_value(file2,def_cfg) == 3)
# Test flushing doesn't happen if we don't touch the cache
cache = file_cache_t(cache_file)
self.assert_(cache.cached_value(file1,def_cfg) == 2) # Read from cache
cache.flush() # should not actually flush
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 2)
# Test flush culling
cache = file_cache_t(cache_file)
cache.update(file1_dup, def_cfg, 4,[]) # Modify cache
cache.flush() # should cull off one entry
cache = file_cache_t(cache_file)
self.assert_(len(cache._file_cache_t__cache) == 1)
def build_differing_cfg_list(self):
""" Return a list of configurations that all differ. """
cfg_list = []
def_cfg = config_t("gccxml_path",'.',['tmp'],['sym'],['unsym'],
None,False,"")
cfg_list.append(def_cfg)
# Test changes that should cause sig changes
gccxml_changed = def_cfg.clone()
gccxml_changed.gccxml_path = "other_path"
cfg_list.append(gccxml_changed)
wd_changed = def_cfg.clone()
wd_changed.working_directory = "other_dir"
cfg_list.append(wd_changed)
#inc_changed = def_cfg.clone()
#inc_changed.include_paths = ["/var/tmp"]
#self.assert_(configuration_signature(inc_changed) != def_sig)
inc_changed = config_t("gccxml_path",'.',['/var/tmp'],['sym'],['unsym'],
None,False,"")
cfg_list.append(inc_changed)
#def_changed = def_cfg.clone()
#def_changed.define_symbols = ["symbol"]
#self.assert_(configuration_signature(def_changed) != def_sig)
def_changed = config_t("gccxml_path",'.',['/var/tmp'],['new-sym'],['unsym'],
None,False,"")
cfg_list.append(def_changed)
#undef_changed = def_cfg.clone()
#undef_changed.undefine_symbols = ["symbol"]
#self.assert_(configuration_signature(undef_changed) != def_sig)
undef_changed = config_t("gccxml_path",'.',['/var/tmp'],['sym'],['new-unsym'],
None,False,"")
cfg_list.append(undef_changed)
cflags_changed = def_cfg.clone()
cflags_changed.cflags = "new flags"
cfg_list.append(cflags_changed)
return cfg_list
def create_suite():
suite = unittest.TestSuite()
suite.addTest( unittest.makeSuite(decl_cache_tester))
return suite
def run_suite():
unittest.TextTestRunner(verbosity=2).run( create_suite() )
if __name__ == "__main__":
run_suite() | [
"[email protected]"
] | |
9c14abf2141c645825bb3572f94ff19a61281d45 | 698ad822ff616b86e88784ec4fce08b42c46e870 | /torch/_dynamo/allowed_functions.py | 67daafc5adac78651457c01de7096eb20617b562 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | puppup420247-org/pytorch | 6fa244b817ace397e97e85c553bd315093ef4533 | 090fce2547ac96730f99877085a7477097c4ad97 | refs/heads/master | 2023-07-21T03:54:26.234286 | 2022-11-12T05:59:55 | 2022-11-12T05:59:55 | 188,915,637 | 1 | 1 | NOASSERTION | 2023-07-20T11:42:38 | 2019-05-27T22:20:25 | C++ | UTF-8 | Python | false | false | 8,428 | py | import builtins
import collections
import copy
import functools
import inspect
import itertools
import math
import operator
import types
import warnings
from typing import Dict, Optional, Set
import numpy
import torch
from torch.fx._symbolic_trace import is_fx_tracing
from . import config
from .utils import is_safe_constant
"""
A note on allowed functions:
Dynamo consults this file to determine if a particular function/module
is allowed to appear as a node in its fx output.
If a function is disallowed, it may either be traced-through, or skipped.
Trace-through means dynamo will continue to trace the interior code for
the function/module rather than stopping at its boundary and recording it
as a node in the fx graph. Whether tracing through or allowing, the functionality
of the function/module is part of the dynamo graph. Caveat: if tracing through,
any interior operation could trigger its own graph-break.
Skips are determined by (torch/_dynamo/skipfiles.py) - see "a note on
skipfiles" there.
"""
def make_function_id_set(lazy_initializer):
"""
Track a set of `id()`s of objects which are either allowed or not
allowed to go into the generated FX graph. Use to test for torch.*,
numpy.*, builtins.*, etc.
Support user modification to permit customization of what can be
added to the graph and what will cause a graph break.
"""
class FunctionIdSet:
function_ids: Optional[Set[int]] = None
function_names: Optional[Dict[int, str]] = None
def __call__(self):
if self.function_ids is None:
value = lazy_initializer()
if isinstance(value, dict):
self.function_ids = set(value.keys())
self.function_names = value
else:
assert isinstance(value, set)
self.function_ids = value
return self.function_ids
def get_name(self, idx: int, default: str):
self() # lazy init
return self.function_names.get(idx, default)
def add(self, idx: int):
self() # lazy init
self.function_ids.add(idx)
def remove(self, idx: int):
if idx in self():
self.function_ids.remove(idx)
def __contains__(self, idx: int):
return idx in self()
return FunctionIdSet()
@make_function_id_set
def _disallowed_function_ids():
remove = [
True,
False,
None,
collections.OrderedDict,
copy.copy,
copy.deepcopy,
inspect.signature,
math.__package__,
torch.__builtins__,
torch.autocast_decrement_nesting,
torch.autocast_increment_nesting,
torch.autograd.grad,
torch.clear_autocast_cache,
torch.cuda.current_device,
torch.cuda.amp.autocast_mode.autocast,
torch.distributions.constraints.is_dependent,
torch.distributions.normal.Normal,
torch.inference_mode,
torch.set_anomaly_enabled,
torch.set_autocast_cache_enabled,
torch.set_autocast_cpu_dtype,
torch.set_autocast_cpu_enabled,
torch.set_autocast_enabled,
torch.set_autocast_gpu_dtype,
torch.autograd.profiler.profile,
warnings.warn,
torch._C._dynamo.eval_frame.unsupported,
]
# extract all dtypes from torch
dtypes = [
obj for obj in torch.__dict__.values() if isinstance(obj, type(torch.float32))
]
remove += dtypes
storage = [
obj
for obj in torch.__dict__.values()
if isinstance(obj, type(torch.FloatStorage))
]
remove += storage
return {id(x) for x in remove}
@make_function_id_set
def _allowed_function_ids():
"""
Walk torch.* and get the ids of all the stuff in it
"""
warnings.filterwarnings("ignore", category=UserWarning, module="torch.distributed")
torch_object_ids = dict()
def _is_allowed_module_prefix(obj):
allowed_modules = ("torch", "math")
# torch.nn.modules.rnn is disallowed because these modules internally
# flatten their parameters. This flattening process will call
# Tensor.set_ with a Storage, and Storages cannot be traced with
# AOTAutograd; so we need to graph-break. To ensure this, we inline
# these functions, rather than keep them opaque-ly in the graph.
disallowed_modules = (
"torch.optim.",
"torch.nn.modules.rnn.",
"torch._dynamo.",
"torch._C._dynamo.",
"torch._inductor.",
"torch._C.inductor.",
"torch.fx.",
"torch.distributed.fsdp.",
)
allowed_modules_dot = tuple([x + "." for x in allowed_modules])
module = inspect.getmodule(obj)
if module is None:
return False
mod_name = module.__name__
if any(mod_name.startswith(m) for m in disallowed_modules):
return False
return mod_name in allowed_modules or mod_name.startswith(allowed_modules_dot)
def _find_torch_objects(module):
if any(
module.__name__.startswith(mod_name)
for mod_name in config.allowed_functions_module_string_ignorelist
):
return
torch_object_ids[id(module)] = module.__name__
for name, obj in list(module.__dict__.items()):
if id(obj) not in torch_object_ids:
if isinstance(obj, types.ModuleType):
if obj.__name__.startswith("torch.") and _is_allowed_module_prefix(
obj
):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(obj)
elif _is_allowed_module_prefix(obj):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
elif inspect.getmodule(obj) is None and not is_safe_constant(obj):
torch_object_ids[id(obj)] = f"{module.__name__}.{name}"
_find_torch_objects(torch)
_find_torch_objects(math)
for idx in _disallowed_function_ids():
if idx in torch_object_ids:
del torch_object_ids[idx]
for extra in (is_fx_tracing,):
torch_object_ids[id(extra)] = f"{extra.__module__}.{extra.__name__}"
return torch_object_ids
@make_function_id_set
def _builtin_function_ids():
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and callable(v)
}
rv.update(
{
id(v): f"operator.{k}"
for k, v in operator.__dict__.items()
if not k.startswith("_") and callable(v)
}
)
rv.update(
{id(v): f"functools.{v.__name__}" for v in (itertools.chain, itertools.islice)}
)
rv[id(functools.reduce)] = "functools.reduce"
return rv
@make_function_id_set
def _numpy_function_ids():
rv = dict()
for mod in (numpy, numpy.random):
rv.update(
{
id(v): f"{mod.__name__}.{k}"
for k, v in mod.__dict__.items()
if callable(v)
and (getattr(v, "__module__", None) or mod.__name__) == mod.__name__
}
)
return rv
@make_function_id_set
def _builtin_constant_ids():
"""
Collects constant builtins by eliminating callable items.
"""
rv = {
id(v): f"builtins.{k}"
for k, v in builtins.__dict__.items()
if not k.startswith("_") and not callable(v)
}
return rv
def is_allowed(obj):
"""Is this safe to trace like torch.add ?"""
# torch.ops is populated lazily so we don't necessarily have them in
# _allowed_function_ids. Figure it out by testing the type instead
# in those cases
return id(obj) in _allowed_function_ids or isinstance(
obj,
(torch._ops.OpOverloadPacket, torch._ops.OpOverload, torch._ops._OpNamespace),
)
def torch_get_name(obj, default):
"""Convert a torch.* funcion to a string"""
return _allowed_function_ids.get_name(id(obj), default)
def is_builtin_callable(obj):
return id(obj) in _builtin_function_ids
def is_builtin_constant(obj):
return id(obj) in _builtin_constant_ids
def is_numpy(obj):
return isinstance(obj, numpy.ndarray) or id(obj) in _numpy_function_ids
| [
"[email protected]"
] | |
25be6302bd9150151560453a17906af226789f01 | 904b0d81152649ccd3349f94f88e7b89a7b5c76a | /scripts/main/xicombNS_DA02.py | e2f17c6818b725c4127a2e6be411fb79ee8c98bd | [
"BSD-3-Clause"
] | permissive | desihub/LSS | ec33538a0e7280ad1c6b257368cc009ed4b39cbb | 5645461929172d327ed30389d76e7e887043c9bf | refs/heads/main | 2023-08-18T23:17:13.123605 | 2023-08-18T20:08:22 | 2023-08-18T20:08:22 | 36,753,969 | 14 | 28 | BSD-3-Clause | 2023-09-13T18:37:35 | 2015-06-02T18:42:51 | Jupyter Notebook | UTF-8 | Python | false | false | 1,710 | py | #!/usr/bin/env python
# coding: utf-8
import os
import argparse
import logging
import numpy as np
from astropy.table import Table, vstack
from matplotlib import pyplot as plt
from pycorr import TwoPointCorrelationFunction, TwoPointEstimator, KMeansSubsampler, utils, setup_logging
njack = '60'
trs = ['ELG_LOPnotqso','QSO','LRG','BGS_BRIGHT','QSO_ELG_LOPnotqso','LRG_QSO','LRG_ELG_LOPnotqso']
bsl = [1,2,4,5,10]
dirxi = '/global/cfs/cdirs/desi/survey/catalogs/DA02/LSS/guadalupe/LSScats/test/xi/smu/'
xit = 'poles'
for tr in trs:
if tr == 'ELG_LOPnotqso':
zws = ['0.8_1.6','0.8_1.1','1.1_1.6']
if tr == 'QSO_ELG_LOPnotqso':
zws = ['0.8_1.6','0.8_1.1','1.1_1.6']
if tr == 'QSO':
zws = ['0.8_1.1','0.8_2.1lowz','1.1_1.6','1.6_2.1','2.1_3.5','0.8_3.5']
if tr == 'LRG':
zws = ['0.4_0.6','0.6_0.8','0.8_1.1','0.4_1.1']
if tr == 'BGS_BRIGHT':
zws = ['0.1_0.3','0.3_0.5','0.1_0.5']
if tr == 'LRG_QSO' or tr == 'LRG_ELG_LOPnotqso':
zws = ['0.8_1.1']
for zw in zws:
result_N = TwoPointCorrelationFunction.load(dirxi+'allcounts_'+tr+'_N_'+zw+'_default_FKP_lin_njack'+njack+'.npy')
result_S = TwoPointCorrelationFunction.load(dirxi+'allcounts_'+tr+'_S_'+zw+'_default_FKP_lin_njack'+njack+'.npy')
result_NS = result_N.normalize() + result_S.normalize()
fn = dirxi+'allcounts_'+tr+'_NScomb_'+zw+'_default_FKP_lin_njack'+njack+'.npy'
result_NS.save(fn)
for bs in bsl:
rebinned = result_NS[:(result_NS.shape[0]//bs)*bs:bs]
fn_txt = dirxi+'xi'+xit+'_'+tr+'_NScomb_'+zw+'_default_FKP_lin'+str(bs)+'_njack'+njack+'.txt'
rebinned.save_txt(fn_txt, ells=(0, 2, 4))
| [
"[email protected]"
] | |
c15d4b3566815e61f9fc93ba6b974c34f358c170 | e2ad93398194942c13c27b25aa868eda4ff4f46c | /sponsortracker/download.py | 987305d12d281d57363d875f7b84883ec45f917e | [] | no_license | Auzzy/bfig-sponsor-tracker | ab2fbcf6ba562f977263b5f91c3aca756e037302 | cff466de6797ea276130335bdc368c706eed583d | refs/heads/master | 2023-06-01T04:41:26.082120 | 2023-05-11T11:52:11 | 2023-05-11T11:52:11 | 25,780,177 | 0 | 0 | null | 2023-05-11T11:52:12 | 2014-10-26T16:14:46 | Python | UTF-8 | Python | false | false | 2,989 | py | import collections
import os
import shutil
import tempfile
from enum import Enum
from os.path import exists, expanduser, join, splitext
from sqlalchemy import or_
from sponsortracker import model, uploads
from sponsortracker.data import AssetType, Level
ZIPNAME = "sponsortracker-assets"
def all(level=None):
return download(level=level)
def website_updates(start, level=None):
asset_filter = lambda deal: [asset for asset in deal.assets_by_type[AssetType.LOGO] if asset.date >= start]
return download('updates', asset_filter=asset_filter, level=level)
def logo_cloud(level=None):
asset_filter = lambda deal: deal.assets_by_type[AssetType.LOGO]
return download('logocloud', by_sponsor=False, info=False, asset_filter=asset_filter, level=level)
def download(zipname=ZIPNAME, by_sponsor=True, info=True, asset_filter=lambda deal: deal.assets, level=None):
with tempfile.TemporaryDirectory() as tempdir:
zipdir = join(tempdir, zipname)
os.makedirs(zipdir)
for deal in model.Deal.query.filter(model.Deal.level_name != ""):
if deal.level in (Level.SERVICE, Level.BRONZE, Level.BRONZE_BENEFITS, Level.SILVER, Level.GOLD, Level.PLATINUM) or deal.contract.received != None or deal.invoice.received != None:
if not level or deal.level_name == level:
target = join(*[zipdir, deal.level.name.lower()] + ([deal.sponsor.name] if by_sponsor else []))
os.makedirs(target, exist_ok=True)
if info:
_info_to_file(target, deal.sponsor)
_copy_assets(target, asset_filter(deal))
return shutil.make_archive(expanduser(join("~", zipname)), "zip", root_dir=tempdir)
def _info_to_file(target, sponsor):
if sponsor.link or sponsor.description:
with open(join(target, "info.txt"), 'w') as info_file:
if sponsor.link:
info_file.write(sponsor.link + "\n\n")
if sponsor.description:
info_file.write(sponsor.description)
def _copy_assets(target, assets):
for asset in assets:
name = '-'.join([asset.deal.sponsor.name.lower(), asset.type.name.lower()])
ext = splitext(asset.filename)[-1].lstrip('.')
dest = os.path.join(target, "{name}.{ext}".format(name=name, ext=ext))
uploads.Asset.get(asset.deal, asset.filename, dest)
'''
path = asset_uploader.path(asset.filename)
ext = splitext(asset.filename)[-1].lstrip('.')
name = '-'.join([asset.sponsor.name.lower(), asset.type.name.lower()])
shutil.copy(path, _filepath(target, name, ext))
'''
'''
def _filepath(target, basename, ext):
num = 2
name = "{name}.{ext}".format(name=basename, ext=ext)
while exists(join(target, name)):
name = "{name}_{num}.{ext}".format(name=basename, num=num, ext=ext)
num += 1
return join(target, name)
''' | [
"[email protected]"
] | |
11d173e2e009317f099e646a9d101c71ae82a9b9 | 519b4cf7623c40e0280c435246b6cde46853ecc1 | /project/holviapp/utils.py | d731848e8107fe128a56275bbd33d4b1a41ef18a | [
"MIT"
] | permissive | HelsinkiHacklab/asylum | a3fe492f76145c922125949c41acce6e8d4beec4 | 6fcf71fb5c7bb894322039144e814b9edc07d5bb | refs/heads/hhl_changes | 2023-02-16T18:54:17.277017 | 2021-08-13T17:59:46 | 2021-09-13T17:45:45 | 47,038,401 | 1 | 1 | MIT | 2023-02-03T13:22:48 | 2015-11-28T20:28:58 | Python | UTF-8 | Python | false | false | 2,574 | py | # -*- coding: utf-8 -*-
import holviapi
import holvirc
from django.conf import settings
CONNECTION_SINGLETON = None
def apikey_configured():
"""Check if we have apikey"""
return bool(settings.HOLVI_POOL) and bool(settings.HOLVI_APIKEY)
def userauth_configured():
"""Check if we have username/password"""
return bool(settings.HOLVI_POOL) and bool(settings.HOLVI_USER) and bool(settings.HOLVI_PASSWORD)
def api_configured():
"""Check that we have some API config"""
return apikey_configured() or userauth_configured()
def get_connection():
"""Shorhand connection singleton getter"""
global CONNECTION_SINGLETON
if CONNECTION_SINGLETON is not None:
return CONNECTION_SINGLETON
if not api_configured():
raise RuntimeError('Holvi API is not configured')
if userauth_configured():
CONNECTION_SINGLETON = holvirc.Connection.singleton(settings.HOLVI_POOL, settings.HOLVI_USER, settings.HOLVI_PASSWORD)
if apikey_configured():
CONNECTION_SINGLETON = holviapi.Connection.singleton(settings.HOLVI_POOL, settings.HOLVI_APIKEY)
return CONNECTION_SINGLETON
def get_invoiceapi():
"""Shorthand API instance creator"""
return holvirc.InvoiceAPI(get_connection())
def list_invoices(**kwargs):
"""Shorthand accessor for the API method"""
return get_invoiceapi().list_invoices(**kwargs)
def get_invoice(code):
"""Shorthand accessor for the API method"""
return get_invoiceapi().get_invoice(code)
def get_checkoutapi():
"""Shorthand API instance creator"""
cnc = get_connection()
if isinstance(cnc, (holvirc.Connection, holvirc.connection.Connection)):
raise RuntimeError("This only works with the old style api keys")
return holviapi.CheckoutAPI(cnc)
def list_orders(**kwargs):
"""Shorthand accessor for the API method"""
cnc = get_connection()
if isinstance(cnc, (holvirc.Connection, holvirc.connection.Connection)):
# TODO: Log the issue
return iter([])
return get_checkoutapi().list_orders(**kwargs)
def get_order(code):
"""Shorthand accessor for the API method"""
return get_checkoutapi().get_order(code)
def get_categoriesapi():
"""Shorthand API instance creator"""
cnc = get_connection()
if isinstance(cnc, (holviapi.Connection, holviapi.connection.Connection)):
return holviapi.CategoriesAPI(get_connection())
return holvirc.CategoriesAPI(cnc)
def get_category(code):
"""Shorthand accessor for the API method"""
return get_categoriesapi().get_category(code)
| [
"[email protected]"
] | |
fe8087783d56301fddb861866779ab604a5e83f6 | 4e5cdffa14c8404d836dc9f034cbbf34a86c7503 | /src/api/urls.py | 9113ef4c4d5780858faf92eee0a13749d97d0775 | [] | no_license | gtdata/publish_data_alpha | da1cf013a5b2c7290d0af7a48d43adc90e301f3f | f1a9753daac7fbe8cc5bed5f30b8601c781449ab | refs/heads/master | 2021-01-20T03:03:36.303264 | 2017-04-18T11:49:29 | 2017-04-18T11:49:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | from django.conf.urls import url, include
from rest_framework import routers
import api.views as v
import api.api_def as a
urlpatterns = [
url(r'locations$', v.gazeteer_lookup, name='gazeteer_lookup'),
url(r'^datasets$', v.dataset_lookup, name='dataset_lookup'),
url(r'^status', v.StatusEndpoint.as_view()),
#url(r'^1/', include(router.urls)),
url(r'^1/datasets$', a.DatasetList.as_view()),
url(r'^1/datasets/(?P<name>[\w-]+)$', a.DatasetDetail.as_view(), name='dataset-detail'),
url(r'^1/organisations$', a.OrganisationList.as_view()),
url(r'^1/organisations/(?P<name>[\w-]+)$', a.OrganisationDetail.as_view(), name='organisation-detail'),
]
| [
"[email protected]"
] | |
ed33a50324b7e3a5eecebca1a2b58fcd87538545 | 97fde6e1ee2c63d4359a005a8a17db87559dd3eb | /api/models.py | 44c72a280c9cf023ae7be9d686aecd138860d6d2 | [] | no_license | Bibin22/Book_project | 6f6d0cce452e0298d16676425eeb2f77e915c3e5 | 9884363927e6b3b559d43a6ead584f1741b54370 | refs/heads/master | 2023-03-31T21:36:02.594431 | 2021-03-24T07:15:46 | 2021-03-24T07:15:46 | 350,402,169 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from django.db import models
# Create your models here.
class Book(models.Model):
book_name = models.CharField(max_length=120, unique=100)
price = models.IntegerField()
pages = models.IntegerField()
author = models.CharField(max_length=100)
def __str__(self):
return self.book_name | [
"[email protected]"
] | |
4d64347ffa03e103de068e66bc946d0999ccfce2 | 436051d199fcc323a422b7fea377f43c01004366 | /helpers/labml_helpers/metrics/simple_state.py | 2a6575141ce8871da42ebc4cbb7b596e62453fdb | [
"MIT"
] | permissive | xet7/labml | 29d411b94f1d6b9ff03c6033f510cea443d38248 | 7f3918ca7de8cb21cf6dcc9d127a6ea64c0aebb9 | refs/heads/master | 2023-08-18T10:03:13.142430 | 2021-07-18T11:11:42 | 2021-07-18T11:11:42 | 387,184,226 | 0 | 0 | MIT | 2023-08-11T20:01:39 | 2021-07-18T13:36:49 | null | UTF-8 | Python | false | false | 813 | py | from typing import Generic, TypeVar, Optional
from . import StateModule
T = TypeVar('T')
class SimpleState(Generic[T]):
state: Optional[T]
def __init__(self):
self.state = None
def get(self) -> T:
return self.state
def set(self, data: T):
self.state = data
def reset(self):
self.state = None
class SimpleStateModule(StateModule, Generic[T]):
data: SimpleState[T]
def __init__(self):
super().__init__()
def set(self, data: T):
self.data.set(data)
def get(self) -> T:
return self.data.get()
def create_state(self):
return SimpleState()
def set_state(self, data: any):
self.data = data
def on_epoch_start(self):
self.data.reset()
def on_epoch_end(self):
pass
| [
"[email protected]"
] | |
91a64805557e29b680b1300121cddd217db78eef | e42a61b7be7ec3412e5cea0ffe9f6e9f34d4bf8d | /a10sdk/core/aam/aam_authentication_portal_logon_fail.py | 60ee400259987e360a04045658049f18c4e87e9f | [
"Apache-2.0"
] | permissive | amwelch/a10sdk-python | 4179565afdc76cdec3601c2715a79479b3225aef | 3e6d88c65bd1a2bf63917d14be58d782e06814e6 | refs/heads/master | 2021-01-20T23:17:07.270210 | 2015-08-13T17:53:23 | 2015-08-13T17:53:23 | 40,673,499 | 0 | 0 | null | 2015-08-13T17:51:35 | 2015-08-13T17:51:34 | null | UTF-8 | Python | false | false | 8,059 | py | from a10sdk.common.A10BaseClass import A10BaseClass
class FailMsgCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param fail_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "fail-face", "type": "string"}
:param fail_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param fail_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param fail_msg: {"default": 0, "type": "number", "description": "Configure logon failure message in default logon fail page", "format": "flag"}
:param fail_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify logon failure message (Default: Login Failed!!)", "format": "string-rlx"}
:param fail_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "fail-color-name", "type": "string"}
:param fail_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param fail_color_name: {"not": "fail-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param fail_face: {"not": "fail-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "fail-msg-cfg"
self.DeviceProxy = ""
self.fail_font_custom = ""
self.fail_color = ""
self.fail_size = ""
self.fail_msg = ""
self.fail_text = ""
self.fail_color_value = ""
self.fail_font = ""
self.fail_color_name = ""
self.fail_face = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class TitleCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param title: {"default": 0, "type": "number", "description": "Configure title in default logon fail page", "format": "flag"}
:param title_color: {"default": 0, "type": "number", "description": "Specify font color", "format": "flag"}
:param title_color_name: {"not": "title-color-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param title_font_custom: {"description": "Specify custom font", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "title-face", "type": "string"}
:param title_face: {"not": "title-font-custom", "enum": ["Arial", "Courier_New", "Georgia", "Times_New_Roman", "Verdana"], "type": "string", "description": "'Arial': Arial; 'Courier_New': Courier New; 'Georgia': Georgia; 'Times_New_Roman': Times New Roman; 'Verdana': Verdana; ", "format": "enum"}
:param title_color_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "title-color-name", "type": "string"}
:param title_size: {"description": "Specify font size", "minimum": 1, "type": "number", "maximum": 7, "format": "number"}
:param title_text: {"minLength": 1, "maxLength": 63, "type": "string", "description": "Specify title (Default: Try Too Many Times)", "format": "string-rlx"}
:param title_font: {"default": 0, "type": "number", "description": "Sepcify font", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "title-cfg"
self.DeviceProxy = ""
self.title = ""
self.title_color = ""
self.title_color_name = ""
self.title_font_custom = ""
self.title_face = ""
self.title_color_value = ""
self.title_size = ""
self.title_text = ""
self.title_font = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Background(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param bgfile: {"description": "Specify background image filename", "format": "string-rlx", "minLength": 1, "maxLength": 63, "not": "bgcolor", "type": "string"}
:param bgstyle: {"enum": ["tile", "stretch", "fit"], "type": "string", "description": "'tile': Tile; 'stretch': Stretch; 'fit': Fit; ", "format": "enum"}
:param bgcolor_value: {"description": "Specify 6-digit HEX color value", "format": "string", "minLength": 6, "maxLength": 6, "not": "bgcolor-name", "type": "string"}
:param bgcolor_name: {"not": "bgcolor-value", "enum": ["aqua", "black", "blue", "fuchsia", "gray", "green", "lime", "maroon", "navy", "olive", "orange", "purple", "red", "silver", "teal", "white", "yellow"], "type": "string", "description": "'aqua': aqua; 'black': black; 'blue': blue; 'fuchsia': fuchsia; 'gray': gray; 'green': green; 'lime': lime; 'maroon': maroon; 'navy': navy; 'olive': olive; 'orange': orange; 'purple': purple; 'red': red; 'silver': silver; 'teal': teal; 'white': white; 'yellow': yellow; ", "format": "enum"}
:param bgcolor: {"default": 0, "not": "bgfile", "type": "number", "description": "Specify background color", "format": "flag"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "background"
self.DeviceProxy = ""
self.bgfile = ""
self.bgstyle = ""
self.bgcolor_value = ""
self.bgcolor_name = ""
self.bgcolor = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class LogonFail(A10BaseClass):
"""Class Description::
Logon fail page configuration.
Class logon-fail supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/aam/authentication/portal/{name}/logon-fail`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "logon-fail"
self.a10_url="/axapi/v3/aam/authentication/portal/{name}/logon-fail"
self.DeviceProxy = ""
self.fail_msg_cfg = {}
self.title_cfg = {}
self.background = {}
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| [
"[email protected]"
] | |
ecec82f7d6a458418140579021abfe8fd06af04d | d5934c0624095112533201ca748e035cf33e19c1 | /CodeWars.py | cf1e344b5cb120c23316d6af8e874e87e7799ead | [] | no_license | waithope/codewars | b5bbb81193cb1b98830024c16b2470c5b0d070c9 | 315d9dca4e0163b03409a2d806ce0f809353a991 | refs/heads/master | 2020-03-14T15:49:17.211859 | 2018-05-10T04:30:40 | 2018-05-10T04:30:40 | 131,684,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,745 | py |
# 0123456789
# 0##########
# 1## ##
# 2# # # #
# 3# # # #
# 4# ## #
# 5# ## #
# 6# # # #
# 7# # # #
# 8## ##
# 9##########
# rowCount = 10
# columnCount = 10
# for i in range(rowCount):
# for j in range(columnCount):
# if i == 0 or i == rowCount - 1 or j == 0 or \
# j == columnCount - 1 or i == j or j == columnCount - i - 1:
# print("#", end='')
# else:
# print(" ", end='')
# print()
def high_and_low(numbers):
# l = numbers.split(' ')
# print(l)
# min = int(l[0])
# max = int(l[0])
# for num in l:
# if int(num) < min:
# min = int(num)
# if int(num) > max:
# max = int(num)
# more clever
l = [int(num) for num in numbers.split(' ')]
return str(max(l)) + ' ' + str(min(l))
# print(high_and_low("4 5 29 54 4 0 -214 542 -64 1 -3 6 -6"))
## Descending Order
# def Descending_Order(num):
# return int(''.join(sorted(str(num), reverse=True)))
# print(Descending_Order(10147237031))
# # initialize
# a = []
# # create the table (name, age, job)
# a.append(["Nick", 30, "Doctor"])
# a.append(["John", 8, "Student"])
# a.append(["Paul", 22, "Car Dealer"])
# a.append(["Mark", 66, "Retired"])
# # sort the table by age
# import operator
# a.sort(key=operator.itemgetter(0, 1), reverse=True)
# # print the table
# print(a)
def DNA_strand(dna):
dna_map = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
return ''.join([dna_map[sym] for sym in dna])
def DNA_strand_v2(dna):
return dna.translate(str.maketrans('ATCG', 'TAGC'))
# assert(DNA_strand('ATTGC') == 'TAACC')
## Given a string, replace every letter with its position in the alphabet.
## a being 1, b being 2, etc.
def alphabet_position(text):
return ' '.join([str(ord(item.lower()) - ord('a') + 1) \
for item in text if item.isalpha() \
])
# print(alphabet_position('asdjfak'))
## Take a list of non-negative integers and strings
## Returns a new list with the strings filtered out.
def filter_list(l):
return [item for item in l if item is not str(item)]
def filter_list_v2(l):
return [item for item in l if not isinstance(item, str)]
# print(filter_list([1,2,'aasf','1','123',123]) == [1,2,123])
## Decode morse_code
MORSE_CODE = {
'.-': 'A', '-...': 'B', '-.-.': 'C', '-..': 'D', '.': 'E', '..-.': 'F',
'--.': 'G', '....': 'H', '..': 'I', '.---': 'J', '-.-': 'K', '.-..': 'L',
'--': 'M', '-.': 'N', '---': 'O', '.--.': 'P', '--.-': 'Q', '.-.': 'R',
'...': 'S', '-': 'T', '..-': 'U', '...-': 'V', '.--': 'W', '-..-': 'X',
'-.--': 'Y', '--..': 'Z',
'-----': '0', '.----': '1', '..---': '2', '...--': '3', '....-': '4',
'.....': '5', '-....': '6', '--...': '7', '---..': '8', '----.': '9',
'.-.-.-': '.', '--..--': ',', '..--..': '?', '.----.': "'", '-.-.--': '!',
'-..-.': '/', '-.--.': '(', '-.--.-': ')', '.-...': '&', '---...': ':',
'-.-.-.': ';', '-...-': '=', '.-.-.': '+', '-....-': '-', '..--.-': '_',
'.-..-.': '"', '...-..-': '$', '.--.-.': '@', '...---...': 'SOS'
}
def decodeMorse(morse_code):
morse_code_part = morse_code.strip().split(' ')
space_cnt = 0
output = ''
for ele in morse_code_part:
if ele is '':
space_cnt += 1
if space_cnt == 2:
space_cnt = 0
output += ' '
else:
output += MORSE_CODE[ele]
return output
def decodeMorse_v2(morse_code):
return ' '.join([
''.join([MORSE_CODE[code] for code in word.split(' ')])
for word in morse_code.strip().split(' ')
])
# print(decodeMorse_v2(".... . -.-- .--- ..- -.. ."))
## persistence(999) => 4 # Because 9*9*9 = 729, 7*2*9 = 126,
## # 1*2*6 = 12, and finally 1*2 = 2.
def persistence(n):
factors = list(str(n))
cnt = 0
if len(factors) <= 1:
return 0
res = int(factors[0])
for i in range(1, len(factors)):
res *= int(factors[i])
cnt = persistence(res)
return cnt + 1
from functools import reduce
## reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5)
def persistence_v2(n):
factors = [int(x) for x in str(n)]
i = 0
while len(factors) > 1:
res = reduce(lambda x, y: x*y, factors)
i += 1
factors = [int(x) for x in str(res)]
return i
# print(persistence_v2(999))
def get_sign(x):
return (x > 0) - (x < 0)
# print(get_sign(-1))
## Write a function to calculate the absolute value of a 32-bit integer
def myabs(x):
high_bit_mask = x >> 31
return (x ^ high_bit_mask) - high_bit_mask
# print(myabs(7))
# import random
# print(random.randrange(10))
## Dig Pow 89 = 8^1 + 9^2
def sum_dig_pow(a, b):# range(a, b + 1) will be studied by the function
output = []
for num in range(a, b+1):
parts = list(str(num))
new_num = 0
for exp, base in enumerate(parts, 1):
new_num += (int(base))**exp
if num == new_num:
output.append(num)
return output
def dig_pow(n):
return sum([int(y)**x for x, y in enumerate(str(n), 1)])
def sum_dig_pow_v2(a, b):
return [num for num in range(a, b+1) if num == dig_pow(num)]
# print(sum_dig_pow_v2(89,135))
def countBits(n):
count = 0
while n > 0:
n = n & (n - 1)
count += 1
return count
# unique_in_order('AAAABBBCCDAABBB') == ['A', 'B', 'C', 'D', 'A', 'B']
# unique_in_order('ABBCcAD') == ['A', 'B', 'C', 'c', 'A', 'D']
# unique_in_order([1,2,2,3,3]) == [1,2,3]
def unique_in_order(iterable):
unique = []
prev = None
for char in iterable:
if char != prev:
unique.append(char)
prev = char
return unique
# print(unique_in_order([]))
def duplicate_count(text):
## str.count(sub) count the ocurrences of substring
occurs = [text.lower().count(char_cnt) for char_cnt in list(set(list(text.lower)))]
cnt = 0
for num in occurs:
if num > 1:
cnt += 1
return cnt
def duplicate_count_v2(text):
return len([c for c in set(text.lower()) if text.lower().count(c) > 1])
# print(duplicate_count_v2("aaBbccddeeffgg"))
# add 2 integers using bitwise operations
# but need to deal with special case a < 0; b > 0 abs(a) < b
def add(a, b):
while a:
b, a = b ^ a, (b & a) << 1
return b
print(add(-1, -800))
def reverseWords(str):
return ' '.join(str.split(' ')[::-1])
# print(reverseWords("hello world"))
## if a portion of str1 characters can be rearranged to match str2,
## otherwise returns false.
# Only lower case letters will be used (a-z).
# No punctuation or digits will be included.
# Performance needs to be considered.
# scramble('rkqodlw', 'world') ==> True
# scramble('katas', 'steak') ==> False
##cost time 4861ms
def scramble_v1(s1,s2):
for c in set(s2):
if s1.count(c) < s2.count(c):
return False
return True
##cost time 5865ms
def scramble_v2(s1, s2):
s1_dict = {}
s2_dict = {}
for char in s1:
if char in s1_dict:
s1_dict[char] += 1
else:
s1_dict[char] = 1
for char in s2:
if char in s2_dict:
s2_dict[char] += 1
else:
s2_dict[char] = 1
for k, v in s2_dict.items():
if s1_dict.get(k, 0) >= v:
continue
else:
return False
return True
## cost time 6396ms
def scramble_v3(s1, s2):
h = [0] * 26
for char in s1:
h[ord(char) - 97] += 1
for char in s2:
h[ord(char) - 97] -= 1
for i in h:
if i < 0:
return False
return True
## Divisors of 42 are : 1, 2, 3, 6, 7, 14, 21, 42.
## These divisors squared are: 1, 4, 9, 36, 49, 196, 441, 1764.
## The sum of the squared divisors is 2500 which is 50 * 50, a square!
## Given two integers m, n (1 <= m <= n) we want to find all integers between m and n
## whose sum of squared divisors is itself a square. 42 is such a number.
import math
def list_squared(m, n):
res = []
for num in range(m, n+1):
i = 1
sum = 0
while i <= math.sqrt(num): # all the divisors present in pairs
if num % i == 0:
div = num // i
sum += i**2
if div != i:
sum += div**2
i += 1
if math.sqrt(sum).is_integer():
res.append([num, sum])
return res
## If the input number is already a palindrome, the number of steps is 0.
## Input will always be a positive integer.
##For example, start with 87:
## 87 + 78 = 165; 165 + 561 = 726; 726 + 627 = 1353; 1353 + 3531 = 4884
##4884 is a palindrome and we needed 4 steps to obtain it, so palindrome_chain_length(87) == 4
def is_palindrome(n):
return str(n) == str(n)[::-1]
def palindrome_chain_length(n):
step = 0
while not is_palindrome(n):
n += int(str(n)[::-1])
step += 1
return step
# print(palindrome_chain_length(87))
## Breadcrumb Generator
ignore_words = ["the", "of", "in", "from", "by", "with", "and",
"or", "for", "to", "at", "a"
]
def generate_bc(url, separator):
if url.startswith("http"):
url = url.split("//")[1]
crumb = url.split('/')
crumb[-1] = crumb[-1].split('.')[0].split('?')[0].split('#')[0]
if crumb[-1] in ('', 'index'):
crumb.pop()
n = len(crumb)
processed_parts = []
for i, level in enumerate(crumb):
aux = level
if i == 0:
if n == 1:
processed_parts.append('<span class="active">HOME</span>')
else:
processed_parts.append('<a href="/">HOME</a>')
else:
if len(level) > 30:
aux = ''.join([entry[0] for entry in level.split('-')
if entry not in ignore_words
])
else:
aux = ' '.join(aux.split('-'))
if i > 1 and i <= n - 2:
level = "/".join(crumb[1:i+1])
if i == n - 1:
processed_parts.append('<span class="active">%s</span>' % aux.upper())
else:
processed_parts.append('<a href="/%s/">%s</a>' % (level, aux.upper()))
return separator.join(processed_parts)
## hamming number
# Write a function that computes the nth smallest Hamming number.
# Specifically:
# The first smallest Hamming number is 1 = 2^0 * 3^0 * 5^0
# The second smallest Hamming number is 2 = 2^1 * 3^0 * 5^0
# The third smallest Hamming number is 3 = 203150
# The fourth smallest Hamming number is 4 = 223050
# The fifth smallest Hamming number is 5 = 203051
def hamming(n):
hamm = [0 for num in range(n)]
hamm[0] = 1
a, b, c = 0, 0, 0
for i in range(1, n):
hamm[i] = min(hamm[a] * 2, hamm[b] * 3, hamm[c] * 5)
if hamm[i] == hamm[a] * 2: a += 1
if hamm[i] == hamm[b] * 3: b += 1
if hamm[i] == hamm[c] * 5: c += 1
return hamm[-1]
## original version also bad code
hamset = {1:1}
divisors = [2, 3, 5]
def hamming_v2(n):
if hamset.get(n) is not None:
return hamset[n]
i = list(hamset.keys())[-1] + 1
while i <= n:
now = hamset[i - 1]
find = False
while not find:
now += 1
rem = now
for div in divisors:
while (rem / div).is_integer():
rem = rem / div
if rem == 1:
hamset[i] = now
find = True
break
if find is True:
break
i += 1
return hamset[n]
# Strip Comments
# result = solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# result should == "apples, pears\ngrapes\nbananas"
def solution(string,markers):
parts = string.split('\n')
for m in markers:
parts = [p.split(m)[0].rstrip() for p in parts]
print(parts)
return '\n'.join(parts)
# solution("apples, pears # and bananas\ngrapes\nbananas !apples", ["#", "!"])
# Original Version
def solution_v2(string,markers):
strip = 0
s = list(string)
for i in range(len(string)):
if s[i] in markers:
strip = 1
if s[i - 1] == ' ':
s[i - 1] = ''
if s[i] == "\n":
strip = 0
if strip == 1:
s[i] = ''
return ''.join(s)
# How many numbers III?
# Generate all the numbers of three digits that:
# the value of adding their corresponding ones(digits) is equal to 10.
# their digits are in increasing order (the numbers may have two or more equal contiguous digits)
# The numbers that fulfill the two above constraints are: 118, 127, 136, 145, 226, 235, 244, 334
# recursion
def find_all(sum_dig, digs):
res = [''.join([str(num) for num in x]) for x in gen(digs) if sum(x) == sum_dig]
if not res:
return []
return [len(res), int(res[0]), int(res[-1])]
def gen(d, start=1):
if d == 1:
for x in range(start, 10):
yield [x]
else:
for x in range(start, 10):
for y in gen(d - 1, x):
yield [x] + y
# built-in
import itertools
def find_all_v2(sum_dig, digs):
res = []
aux = list(itertools.combinations_with_replacement(range(1, 10), digs))
res = [''.join([str(num) for num in t]) for t in aux if sum(t) == sum_dig]
if not res:
return []
return [len(res), int(res[0]), int(res[-1])]
| [
"[email protected]"
] | |
defd147ec57f50888e2a862cc79b2e4e99453437 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/cloud/listenerruleref.py | 6093233a61cc528637a150a4cfdf201bb235a56f | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ListenerRuleRef(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.cloud.ListenerRuleRef")
meta.moClassName = "cloudListenerRuleRef"
meta.rnFormat = "lisRuleRef-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "Cloud Load Balancer Listener Rule Reference"
meta.writeAccessMask = 0x6000000000000001
meta.readAccessMask = 0x6000000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.cloud.RuleActionRef")
meta.childClasses.add("cobra.model.cloud.RuleConditionRef")
meta.childNamesAndRnPrefix.append(("cobra.model.cloud.RuleConditionRef", "conditionref-"))
meta.childNamesAndRnPrefix.append(("cobra.model.cloud.RuleActionRef", "actionref-"))
meta.parentClasses.add("cobra.model.cloud.ListenerRef")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.cloud.AListenerRule")
meta.rnPrefixes = [
('lisRuleRef-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "default", "default", 52033, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("default", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 52414, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "priority", "priority", 51814, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("priority", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudLBToHcloudSecurityGroup", "From cloudLB to hcloudSecurityGroup", "cobra.model.hcloud.SecurityGroup"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CloudLBToVnsAbsGraph", "From cloudLB to vnsAbsGraph", "cobra.model.vns.AbsGraph"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("ALDevIfToGraphInst", "Graph Instances", "cobra.model.vns.GraphInst"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
b68d2743352106871caf8d9b6a7eae7a0237fab5 | 3a4f14d6638bc0c12c129ed73c6c3543437203df | /src/morphforge/simulation/__init__.py | 816c765420d8ed4ec98702b232f684d5e0c03a28 | [
"BSD-2-Clause"
] | permissive | unidesigner/morphforge | ef04ccb3877f069a0feea72eb1b44c97930dac44 | 510cd86549b2c2fb19296da2d4408ed8091fb962 | refs/heads/master | 2021-01-15T22:34:28.795355 | 2012-04-05T08:55:12 | 2012-04-05T08:55:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | #-------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#-------------------------------------------------------------------------------
| [
"[email protected]"
] | |
ab59227df732b23f0241889547035615c3d2a258 | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /plugins/connection/saltstack.py | 6b77c982a43b9377025aa7831e6c44e954316633 | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,800 | py | # Based on local.py (c) 2012, Michael DeHaan <[email protected]>
# Based on chroot.py (c) 2013, Maykel Moya <[email protected]>
# Based on func.py
# (c) 2014, Michael Scherer <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author: Michael Scherer (@mscherer) <[email protected]>
connection: saltstack
short_description: Allow ansible to piggyback on salt minions
description:
- This allows you to use existing Saltstack infrastructure to connect to targets.
'''
import re
import os
import pty
import subprocess
from ansible_collections.ansible.community.plugins.module_utils._text import to_bytes, to_text
from ansible.module_utils.six.moves import cPickle
HAVE_SALTSTACK = False
try:
import salt.client as sc
HAVE_SALTSTACK = True
except ImportError:
pass
import os
from ansible import errors
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
''' Salt-based connections '''
has_pipelining = False
# while the name of the product is salt, naming that module salt cause
# trouble with module import
transport = 'ansible.community.saltstack'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.host = self._play_context.remote_addr
def _connect(self):
if not HAVE_SALTSTACK:
raise errors.AnsibleError("saltstack is not installed")
self.client = sc.LocalClient()
self._connected = True
return self
def exec_command(self, cmd, sudoable=False, in_data=None):
''' run a command on the remote minion '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
self._display.vvv("EXEC %s" % (cmd), host=self.host)
# need to add 'true;' to work around https://github.com/saltstack/salt/issues/28077
res = self.client.cmd(self.host, 'cmd.exec_code_all', ['bash', 'true;' + cmd])
if self.host not in res:
raise errors.AnsibleError("Minion %s didn't answer, check if salt-minion is running and the name is correct" % self.host)
p = res[self.host]
return (p['retcode'], p['stdout'], p['stderr'])
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
out_path = self._normalize_path(out_path, '/')
self._display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
with open(in_path) as in_fh:
content = in_fh.read()
self.client.cmd(self.host, 'file.write', [out_path, content])
# TODO test it
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._normalize_path(in_path, '/')
self._display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
content = self.client.cmd(self.host, 'cp.get_file_str', [in_path])[self.host]
open(out_path, 'wb').write(content)
def close(self):
''' terminate the connection; nothing to do here '''
pass
| [
"[email protected]"
] | |
2ef4a0004adef2101bd16724ef51e27c1704ae7e | 66c3ff83c3e3e63bf8642742356f6c1817a30eca | /.vim/tmp/neocomplete/buffer_cache/=+home=+dante=+proyectos=+django-1.9=+veterinaria=+django_apps=+inventario=+forms.py | 5d06e80f35674c63172312b5fc3cafbb30c00408 | [] | no_license | pacifi/vim | 0a708e8bc741b4510a8da37da0d0e1eabb05ec83 | 22e706704357b961acb584e74689c7080e86a800 | refs/heads/master | 2021-05-20T17:18:10.481921 | 2020-08-06T12:38:58 | 2020-08-06T12:38:58 | 30,074,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | {'from', 'django', 'import', 'forms', 'models', 'Articulo', 'Categoria', 'class', 'ArticuloForm', 'ModelForm', 'Meta', 'model', 'fields', 'nombre', 'marca', 'codigo', 'estado', 'p_compra', 'p_venta', 'descripcion', 'avatar', 'CategoriaForm'}
| [
"[email protected]"
] | |
1fe0e2c621f45cf284a4b99347dfe1a3a3ff93c6 | e9032e64138d7b9dd90a330dfe4588e2c83f6667 | /google/cloud/compute_v1/services/region_health_checks/transports/__init__.py | 693981b3422dd04d3df9d0fab64969fdd9002c78 | [
"Apache-2.0"
] | permissive | Ctfbuster/python-compute | 6cff2418969009794c3fadadc4c45e20d7b40509 | 7a9e8324e08c46a93050908760b2b5aca054a863 | refs/heads/main | 2023-08-26T12:37:52.363526 | 2021-10-04T15:34:37 | 2021-10-04T15:34:37 | 412,884,620 | 0 | 0 | Apache-2.0 | 2021-10-02T18:49:05 | 2021-10-02T18:49:03 | null | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from typing import Dict, Type
from .base import RegionHealthChecksTransport
from .rest import RegionHealthChecksRestTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[RegionHealthChecksTransport]]
_transport_registry["rest"] = RegionHealthChecksRestTransport
__all__ = (
"RegionHealthChecksTransport",
"RegionHealthChecksRestTransport",
)
| [
"[email protected]"
] | |
be843d33ee176ec0d6827fefd01fdd6a15847466 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03282/s286249227.py | f28b18ffa16239f136d180ee675975bd01b56411 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | import sys
input = sys.stdin.readline
S = list(input().rstrip('\n'))
K = int(input())
n = 1
for i in range(min(len(S), K)):
if S[i] != '1':
n = int(S[i])
break
print(n)
| [
"[email protected]"
] | |
69dac76bc95dc024cdf72ce09289bdfc512ab898 | 4b9a90ad8debaec515dcbad50b02caecba6cd6a5 | /django/Dev/trydjango/venv2/lib/python3.7/token.py | 0a0df7a35f6645f14e34e8ddfd3ff0ff267527a2 | [] | no_license | nabilatajrin/python-programs | f51612808f1837d8e786c7f2eb1faf6387859b18 | 74533bab6ee2d204920c1b97622bdf757ae44aae | refs/heads/master | 2022-12-24T20:14:57.046497 | 2022-12-20T08:41:29 | 2022-12-20T08:41:29 | 199,279,168 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | /home/iit/anaconda3/lib/python3.7/token.py | [
"[email protected]"
] | |
eea07dff9a04a5582761f119169aec890ef58c11 | aba1d17ddc7d7ad9f49e2d6d87600e9e0387ba14 | /mi/dataset/driver/sio_eng/sio/sio_eng_sio_recovered_driver.py | d84a95f50b651d68787a6e469c0b57ced9a03727 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | oceanobservatories/mi-instrument | 3ad880c1366b1a8461fc9085768df0e9ddeb6ef5 | bdbf01f5614e7188ce19596704794466e5683b30 | refs/heads/master | 2023-07-23T07:28:36.091223 | 2023-07-14T15:54:49 | 2023-07-14T15:54:49 | 24,165,325 | 1 | 32 | BSD-2-Clause | 2023-07-13T01:39:22 | 2014-09-17T22:53:22 | Python | UTF-8 | Python | false | false | 1,673 | py | #!/usr/bin/env python
"""
@package mi.dataset.driver.sio_eng/sio
@file mi/dataset/driver/sio_eng/sio/sio_eng_sio_recovered_driver.py
@author Jeff Roy
@brief Driver for the sio_eng_sio instrument
Release notes:
Initial Release
"""
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.parser.sio_eng_sio import SioEngSioParser
from mi.core.versioning import version
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Java Object to consume the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rb') as stream_handle:
# create and instance of the concrete driver class defined below
driver = SioEngSioRecoveredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class SioEngSioRecoveredDriver(SimpleDatasetDriver):
"""
Derived sio_eng_sio driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.sio_eng_sio',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'SioEngSioRecoveredDataParticle'
}
parser = SioEngSioParser(parser_config, stream_handle,
self._exception_callback)
return parser
| [
"[email protected]"
] | |
87e6fcae17d4bffaeaa0b3223eed01cede3b60c5 | 2f666fc0c3ede72e032a66fe186300212d6160a6 | /flat/views.py | 5c15f6d5dea27d17645f032a9348c972d8709e2f | [] | no_license | anukat2015/flat | e8f7439ffe5cb4223a596ec7085846ebde07018c | 79a75d8d2bb6275fc749d13c19f4748dc8138a98 | refs/heads/master | 2021-01-24T14:46:54.045115 | 2016-03-23T09:07:19 | 2016-03-23T09:07:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,694 | py | from __future__ import print_function, unicode_literals, division, absolute_import
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
import django.contrib.auth
from pynlpl.formats import fql
import json
import datetime
from django.conf import settings
import flat.comm
import flat.users
import lxml.html
import sys
if sys.version < '3':
from urllib2 import URLError, HTTPError #pylint: disable=import-error
else:
from urllib.error import URLError, HTTPError
import os
def getcontext(request,namespace,docid, doc, mode):
return {
'configuration': settings.CONFIGURATIONS[request.session['configuration']],
'configuration_json': json.dumps(settings.CONFIGURATIONS[request.session['configuration']]),
'namespace': namespace,
'testnum': request.GET.get('testNumber',0),
'docid': docid,
'mode': mode,
'modes': settings.CONFIGURATIONS[request.session['configuration']]['modes'] ,
'modes_json': json.dumps([x[0] for x in settings.CONFIGURATIONS[request.session['configuration']]['modes'] ]),
'perspectives_json': json.dumps(settings.CONFIGURATIONS[request.session['configuration']]['perspectives']),
'docdeclarations': json.dumps(doc['declarations']) if 'declarations' in doc else "{}",
'setdefinitions': json.dumps(doc['setdefinitions']) if 'setdefinitions' in doc else "{}",
'toc': json.dumps(doc['toc']) if 'toc' in doc else "[]",
'slices': json.dumps(doc['slices']) if 'slices' in doc else "{}",
'loggedin': request.user.is_authenticated(),
'version': settings.VERSION,
'username': request.user.username,
'waitmessage': "Loading document on server and initialising web-environment...",
}
def validatenamespace(namespace):
return namespace.replace('..','').replace('"','').replace(' ','_').replace(';','').replace('&','').strip('/')
def getdocumentselector(query):
if query.startswith("USE "):
end = query[4:].index(' ') + 4
if end >= 0:
try:
namespace,docid = query[4:end].rsplit("/",1)
except:
raise fql.SyntaxError("USE statement takes namespace/docid pair")
return (validatenamespace(namespace),docid), query[end+1:]
else:
try:
namespace,docid = query[4:end].rsplit("/",1)
except:
raise fql.SyntaxError("USE statement takes namespace/docid pair")
return (validatenamespace(namespace),docid), ""
return None, query
def getbody(html):
doc = lxml.html.fromstring(html)
return doc.xpath("//body/p")[0].text_content()
def docserveerror(e, d={}):
if isinstance(e, HTTPError):
body = getbody(e.read())
d['fatalerror'] = "<strong>Fatal Error:</strong> The document server returned an error<pre style=\"font-weight: bold\">" + str(e) + "</pre><pre>" + body +"</pre>"
d['fatalerror_text'] = body
elif isinstance(e, URLError):
d['fatalerror'] = "<strong>Fatal Error:</strong> Could not connect to document server!"
d['fatalerror_text'] = "Could not connect to document server!"
elif isinstance(e, str) :
if sys.version < '3':
d['fatalerror'] = e.decode('utf-8') #pylint: disable=undefined-variable
d['fatalerror_text'] = e.decode('utf-8') #pylint: disable=undefined-variable
else:
d['fatalerror'] = e
d['fatalerror_text'] = e
elif sys.version < '3' and isinstance(e, unicode): #pylint: disable=undefined-variable
d['fatalerror'] = e
d['fatalerror_text'] = e
elif isinstance(e, Exception):
# we don't handle other exceptions, raise!
raise
return d
def initdoc(request, namespace, docid, mode, template, context={}):
"""Initialise a document (not invoked directly)"""
perspective = request.GET.get('perspective','document')
flatargs = {
'setdefinitions': True,
'declarations': True,
'toc': True,
'slices': request.GET.get('slices',settings.CONFIGURATIONS[request.session['configuration']].get('slices','p:25,s:100')), #overriden either by configuration or by user
'customslicesize': 0, #disabled for initial probe
'textclasses': True,
}
try:
doc = flat.comm.query(request, "USE " + namespace + "/" + docid + " PROBE", **flatargs) #retrieves only the meta information, not document content
context.update(getcontext(request,namespace,docid, doc, mode))
except Exception as e:
context.update(docserveerror(e))
response = render(request, template, context)
if 'fatalerror' in context:
response.status_code = 404
return response
@login_required
def query(request,namespace, docid):
if request.method != 'POST':
return HttpResponseForbidden("POST method required for " + namespace + "/" + docid + "/query")
flatargs = {
'customslicesize': request.POST.get('customslicesize',settings.CONFIGURATIONS[request.session['configuration']].get('customslicesize','50')), #for pagination of search results
}
if flat.users.models.hasreadpermission(request.user.username, namespace):
#stupid compatibility stuff
if sys.version < '3':
if hasattr(request, 'body'):
data = json.loads(unicode(request.body,'utf-8')) #pylint: disable=undefined-variable
else: #older django
data = json.loads(unicode(request.raw_post_data,'utf-8')) #pylint: disable=undefined-variable
else:
if hasattr(request, 'body'):
data = json.loads(str(request.body,'utf-8'))
else: #older django
data = json.loads(str(request.raw_post_data,'utf-8'))
if not data['queries']:
return HttpResponseForbidden("No queries to run")
for query in data['queries']:
#get document selector and check it doesn't violate the namespace
docselector, query = getdocumentselector(query)
if not docselector:
return HttpResponseForbidden("Query does not start with a valid document selector (USE keyword)!")
elif docselector[0] != namespace:
return HttpResponseForbidden("Query would affect a different namespace than your current one, forbidden!")
if query != "GET" and query[:4] != "CQL ":
#parse query on this end to catch syntax errors prior to sending, should be fast enough anyway
try:
query = fql.Query(query)
except fql.SyntaxError as e:
return HttpResponseForbidden("FQL Syntax Error: " + e)
needwritepermission = query.declarations or query.action and query.action.action != "SELECT"
else:
needwritepermission = False
if needwritepermission and not flat.users.models.haswritepermission(request.user.username, namespace):
return HttpResponseForbidden("Permission denied, no write access")
query = "\n".join(data['queries']) #throw all queries on a big pile to transmit
try:
d = flat.comm.query(request, query,**flatargs)
except Exception as e:
if sys.version < '3':
errmsg = docserveerror(e)['fatalerror_text']
return HttpResponseForbidden("FoLiA Document Server error: ".encode('utf-8') + errmsg.encode('utf-8'))
else:
return HttpResponseForbidden("FoLiA Document Server error: " + docserveerror(e)['fatalerror_text'])
return HttpResponse(json.dumps(d).encode('utf-8'), content_type='application/json')
else:
return HttpResponseForbidden("Permission denied, no read access")
def login(request):
if 'username' in request.POST and 'password' in request.POST:
username = request.POST['username']
password = request.POST['password']
request.session['configuration'] = request.POST['configuration']
user = django.contrib.auth.authenticate(username=username, password=password)
if user is not None:
if user.is_active:
django.contrib.auth.login(request, user)
# Redirect to a success page.
if 'next' in request.POST:
return redirect("/" + request.POST['next'])
elif 'next' in request.GET:
return redirect("/" + request.GET['next'])
else:
return redirect("/")
else:
# Return a 'disabled account' error message
return render(request, 'login.html', {'error': "This account is disabled","defaultconfiguration":settings.DEFAULTCONFIGURATION, "configurations":settings.CONFIGURATIONS , 'version': settings.VERSION} )
else:
# Return an 'invalid login' error message.
return render(request, 'login.html', {'error': "Invalid username or password","defaultconfiguration":settings.DEFAULTCONFIGURATION, "configurations":settings.CONFIGURATIONS, 'version': settings.VERSION} )
else:
return render(request, 'login.html',{"defaultconfiguration":settings.DEFAULTCONFIGURATION, "configurations":settings.CONFIGURATIONS, "version": settings.VERSION})
def logout(request):
if 'configuration' in request.session:
del request.session['configuration']
django.contrib.auth.logout(request)
return redirect("/login")
def register(request):
if request.method == 'POST':
form = django.contrib.auth.forms.UserCreationForm(request.POST)
if form.is_valid():
new_user = form.save()
return HttpResponseRedirect("/login/")
else:
form = django.contrib.auth.forms.UserCreationForm()
return render(request, "register.html", {
'form': form,
'version': settings.VERSION,
})
def fatalerror(request, e,code=404):
if isinstance(e, Exception):
response = render(request,'base.html', docserveerror(e))
else:
response = render(request,'base.html', {'fatalerror': e})
response.status_code = code
return response
@login_required
def index(request, namespace=""):
try:
namespaces = flat.comm.get(request, '/namespaces/' + namespace)
except Exception as e:
return fatalerror(request,e)
if not namespace:
#check if user namespace is preset, if not, make it
if not request.user.username in namespaces['namespaces']:
try:
flat.comm.get(request, "createnamespace/" + request.user.username, False)
except Exception as e:
return fatalerror(request,e)
readpermission = flat.users.models.hasreadpermission(request.user.username, namespace)
dirs = []
print(namespaces['namespaces'],file=sys.stderr)
for ns in sorted(namespaces['namespaces']):
if readpermission or flat.users.models.hasreadpermission(request.user.username, os.path.join(namespace, ns)):
dirs.append(ns)
dirs.sort()
docs = []
if namespace and readpermission:
try:
r = flat.comm.get(request, '/documents/' + namespace)
except Exception as e:
return fatalerror(request,e)
for d in sorted(r['documents']):
docid = os.path.basename(d.replace('.folia.xml',''))
docs.append( (docid, round(r['filesize'][d] / 1024 / 1024,2) , datetime.datetime.fromtimestamp(r['timestamp'][d]).strftime("%Y-%m-%d %H:%M") ) )
if not 'configuration' in request.session:
return logout(request)
docs.sort()
if namespace:
parentdir = '/'.join(namespace.split('/')[:-1])
else:
parentdir = ""
return render(request, 'index.html', {'namespace': namespace,'parentdir': parentdir, 'dirs': dirs, 'docs': docs, 'defaultmode': settings.DEFAULTMODE,'loggedin': request.user.is_authenticated(), 'username': request.user.username, 'configuration': settings.CONFIGURATIONS[request.session['configuration']], 'version': settings.VERSION})
@login_required
def download(request, namespace, docid):
data = flat.comm.query(request, "USE " + namespace + "/" + docid + " GET",False)
return HttpResponse(data, content_type='text/xml')
@login_required
def upload(request):
if request.method == 'POST':
namespace = request.POST['namespace'].replace('/','').replace('..','.').replace(' ','').replace('&','')
if flat.users.models.haswritepermission(request.user.username, namespace) and 'file' in request.FILES:
#if sys.version < '3':
# data = unicode(request.FILES['file'].read(),'utf-8') #pylint: disable=undefined-variable
#else:
# data = str(request.FILES['file'].read(),'utf-8')
try:
response = flat.comm.postxml(request,"upload/" + namespace , request.FILES['file'])
except Exception as e:
return fatalerror(request,e)
if 'error' in response and response['error']:
return fatalerror(response['error'],403)
else:
docid = response['docid']
return HttpResponseRedirect("/" + settings.DEFAULTMODE + "/" + namespace + "/" + docid )
else:
return fatalerror("Permission denied",403)
else:
return fatalerror("Permission denied",403)
@login_required
def addnamespace(request):
if request.method == 'POST':
namespace = request.POST['namespace'].replace('/','').replace('..','.').replace(' ','').replace('&','')
newdirectory = request.POST['newdirectory'].replace('/','').replace('..','.').replace(' ','').replace('&','')
if flat.users.models.haswritepermission(request.user.username, namespace):
try:
response = flat.comm.get(request,"createnamespace/" + namespace + "/" + newdirectory)
except Exception as e:
return fatalerror(request,e)
if 'error' in response and response['error']:
return fatalerror(response['error'],403)
elif namespace:
return HttpResponseRedirect("/index/" + namespace + '/' + newdirectory )
else:
return HttpResponseRedirect("/index/" + newdirectory )
else:
return fatalerror("Permission denied",403)
else:
return fatalerror("Permission denied",403)
| [
"[email protected]"
] | |
503df77b7e73dde22d4eea8e86d995e4f9983cbb | 798960eb97cd1d46a2837f81fb69d123c05f1164 | /symphony/cli/pyinventory/graphql/mutation/edit_service_type.py | 8b288246df912b964320536842d70b2deedf041e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | kyaaqba/magma | 36d5fa00ce4f827e6ca5ebd82d97a3d36e5f5b5b | fdb7be22a2076f9a9b158c9670a9af6cad68b85f | refs/heads/master | 2023-01-27T12:04:52.393286 | 2020-08-20T20:23:50 | 2020-08-20T20:23:50 | 289,102,268 | 0 | 0 | NOASSERTION | 2020-08-20T20:18:42 | 2020-08-20T20:18:41 | null | UTF-8 | Python | false | false | 2,154 | py | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass
from datetime import datetime
from gql.gql.datetime_utils import DATETIME_FIELD
from gql.gql.graphql_client import GraphqlClient
from gql.gql.client import OperationException
from gql.gql.reporter import FailedOperationException
from functools import partial
from numbers import Number
from typing import Any, Callable, List, Mapping, Optional, Dict
from time import perf_counter
from dataclasses_json import DataClassJsonMixin
from ..fragment.service_type import ServiceTypeFragment, QUERY as ServiceTypeFragmentQuery
from ..input.service_type_edit_data import ServiceTypeEditData
QUERY: List[str] = ServiceTypeFragmentQuery + ["""
mutation EditServiceTypeMutation($input: ServiceTypeEditData!) {
editServiceType(data: $input) {
...ServiceTypeFragment
}
}
"""]
@dataclass
class EditServiceTypeMutation(DataClassJsonMixin):
@dataclass
class EditServiceTypeMutationData(DataClassJsonMixin):
@dataclass
class ServiceType(ServiceTypeFragment):
pass
editServiceType: ServiceType
data: EditServiceTypeMutationData
@classmethod
# fmt: off
def execute(cls, client: GraphqlClient, input: ServiceTypeEditData) -> EditServiceTypeMutationData.ServiceType:
# fmt: off
variables: Dict[str, Any] = {"input": input}
try:
network_start = perf_counter()
response_text = client.call(''.join(set(QUERY)), variables=variables)
decode_start = perf_counter()
res = cls.from_json(response_text).data
decode_time = perf_counter() - decode_start
network_time = decode_start - network_start
client.reporter.log_successful_operation("EditServiceTypeMutation", variables, network_time, decode_time)
return res.editServiceType
except OperationException as e:
raise FailedOperationException(
client.reporter,
e.err_msg,
e.err_id,
"EditServiceTypeMutation",
variables,
)
| [
"[email protected]"
] | |
256ee04da6e71642f3282eebb0892374c143abcb | 2090b6b92d5cada89504de548b14f9c729856606 | /visualize/waveform/compare_waveforms_1obs2syn.py | 0b147dda0e923352ef1132a3ea3948fa71d94e60 | [] | no_license | ziyixiArchive/Japan_Slab_code | 4f6a366889278ad499971cf1132591b9029c0f8c | 4cb19939e45739faee7a8b6ec3d3a5da4549a108 | refs/heads/master | 2022-03-14T18:11:47.768695 | 2019-12-17T21:48:32 | 2019-12-17T21:48:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,378 | py | import matplotlib.backends.backend_pdf
import matplotlib.pyplot as plt
import obspy
import pyasdf
from obspy.geodetics.base import gps2dist_azimuth, locations2degrees
from obspy.taup import TauPyModel
from recordtype import recordtype
import numpy as np
import click
import matplotlib as mpl
import tqdm
label_size = 25
mpl.rcParams['xtick.labelsize'] = label_size
to_plot_trace = recordtype("to_plot_trace", [
"obs_z", "syn1_z", "syn2_z", "obs_r", "syn1_r", "syn2_r", "obs_t", "syn1_t", "syn2_t", "info"])
def build_to_plot_traces(obs_ds, syn1_ds, syn2_ds, trace_length):
# obs_ds,syn_ds opened asdf file
# get keys
key_obs = set(obs_ds.waveforms.list())
key_syn1 = set(syn1_ds.waveforms.list())
key_syn2 = set(syn2_ds.waveforms.list())
keys = key_obs & key_syn1 & key_syn2
result = {}
# for each item in keys, get info
# since the window is selected according to the two asdf files, we can just use keys
for key in keys:
axkey = key.replace(".", "_")
tag_obs = obs_ds.waveforms[key].get_waveform_tags()[0]
tag_syn1 = syn1_ds.waveforms[key].get_waveform_tags()[0]
tag_syn2 = syn2_ds.waveforms[key].get_waveform_tags()[0]
# here we use syn1_ds, which is not the normal case
info = obs_ds.auxiliary_data.Traveltimes[axkey].parameters
obs_st = obs_ds.waveforms[key][tag_obs].copy()
syn1_st = syn1_ds.waveforms[key][tag_syn1].copy()
syn2_st = syn2_ds.waveforms[key][tag_syn2].copy()
# slice
obs_st.trim(obs_st[0].stats.starttime,
obs_st[0].stats.starttime+trace_length)
syn1_st.trim(syn1_st[0].stats.starttime,
syn1_st[0].stats.starttime+trace_length)
syn2_st.trim(syn2_st[0].stats.starttime,
syn2_st[0].stats.starttime+trace_length)
obs_r = obs_st[0]
obs_t = obs_st[1]
obs_z = obs_st[2]
syn1_r = syn1_st[0]
syn1_t = syn1_st[1]
syn1_z = syn1_st[2]
syn2_r = syn2_st[0]
syn2_t = syn2_st[1]
syn2_z = syn2_st[2]
result[key] = to_plot_trace(
obs_z, syn1_z, syn2_z, obs_r, syn1_r, syn2_r, obs_t, syn1_t, syn2_t, info)
return result
def build_plottting_structure(plot_traces, azimuth_width):
# we assume 360%azimuth_width==0
num_azimuths = 360//azimuth_width
result = [[] for i in range(num_azimuths)]
# for each item in plot_traces, seprate them into different []
for key in plot_traces:
value = plot_traces[key]
info = value.info
azimuth = info["azimuth"]
index_azimuth = int(azimuth//azimuth_width)
result[index_azimuth].append((key, value))
# for each azimuth bin, sort them according to the gcarc
def sort_func(item):
return item[1].info["gcarc"]
for index_azimuth in range(num_azimuths):
result[index_azimuth] = sorted(result[index_azimuth], key=sort_func)
return result
@click.command()
@click.option('--obs_asdf', required=True, type=str)
@click.option('--syn1_asdf', required=True, type=str)
@click.option('--syn2_asdf', required=True, type=str)
@click.option('--azimuth_width', required=True, type=int)
@click.option('--output_pdf', required=True, type=str)
@click.option('--waves_perpage', required=True, type=int)
@click.option('--trace_length', required=True, type=int)
def main(obs_asdf, syn1_asdf, syn2_asdf, azimuth_width, output_pdf, waves_perpage, trace_length):
obs_ds = pyasdf.ASDFDataSet(obs_asdf, mode="r")
syn1_ds = pyasdf.ASDFDataSet(syn1_asdf, mode="r")
syn2_ds = pyasdf.ASDFDataSet(syn2_asdf, mode="r")
plot_traces = build_to_plot_traces(obs_ds, syn1_ds, syn2_ds, trace_length)
plotting_structure = build_plottting_structure(plot_traces, azimuth_width)
# plot figures
pdf = matplotlib.backends.backend_pdf.PdfPages(output_pdf)
figs = plt.figure()
num_azimuths = 360//azimuth_width
for index_azimuth in tqdm.tqdm(range(num_azimuths)):
# for each azimuth bin
azimuth_bin_plot_traces = plotting_structure[index_azimuth]
num_azimuth_bin_plot_traces = len(azimuth_bin_plot_traces)
# get num_pages for this azimuth bin
if(num_azimuth_bin_plot_traces % waves_perpage == 0):
num_pages = num_azimuth_bin_plot_traces // waves_perpage
else:
num_pages = (num_azimuth_bin_plot_traces // waves_perpage)+1
for ipage in range(num_pages):
start_index = ipage*waves_perpage
end_index = (ipage+1)*waves_perpage
azimuth_bin_plot_traces_this_page = azimuth_bin_plot_traces[start_index:end_index]
fig = plt.figure(figsize=(150, 150))
index_count = 1
axr, axz, axt = None, None, None # get the last axes
xticks = None
for each_plot_trace_all in azimuth_bin_plot_traces_this_page:
each_plot_trace = each_plot_trace_all[1]
each_plot_id = each_plot_trace_all[0]
# z
axz = fig.add_subplot(waves_perpage, 3, index_count)
obs = each_plot_trace.obs_z
syn1 = each_plot_trace.syn1_z
syn2 = each_plot_trace.syn2_z
x_obs = np.linspace(0, obs.stats.endtime -
obs.stats.starttime, obs.stats.npts)
x_syn1 = np.linspace(0, syn1.stats.endtime -
syn1.stats.starttime, syn1.stats.npts)
x_syn2 = np.linspace(0, syn2.stats.endtime -
syn2.stats.starttime, syn2.stats.npts)
y_obs = obs.data
y_syn1 = syn1.data
y_syn2 = syn2.data
axz.plot(x_obs, y_obs, color="k")
axz.plot(x_syn1, y_syn1, color="r")
axz.plot(x_syn2, y_syn2, color="b")
axz.get_yaxis().set_ticklabels([])
index_count += 1
# r
axr = fig.add_subplot(waves_perpage, 3, index_count)
obs = each_plot_trace.obs_r
syn1 = each_plot_trace.syn1_r
syn2 = each_plot_trace.syn2_r
x_obs = np.linspace(0, obs.stats.endtime -
obs.stats.starttime, obs.stats.npts)
x_syn1 = np.linspace(0, syn1.stats.endtime -
syn1.stats.starttime, syn1.stats.npts)
x_syn2 = np.linspace(0, syn2.stats.endtime -
syn2.stats.starttime, syn2.stats.npts)
y_obs = obs.data
y_syn1 = syn1.data
y_syn2 = syn2.data
axr.plot(x_obs, y_obs, color="k")
axr.plot(x_syn1, y_syn1, color="r")
axr.plot(x_syn2, y_syn2, color="b")
axr.get_yaxis().set_ticklabels([])
index_count += 1
# t
axt = fig.add_subplot(waves_perpage, 3, index_count)
obs = each_plot_trace.obs_t
syn1 = each_plot_trace.syn1_t
syn2 = each_plot_trace.syn2_t
x_obs = np.linspace(0, obs.stats.endtime -
obs.stats.starttime, obs.stats.npts)
x_syn1 = np.linspace(0, syn1.stats.endtime -
syn1.stats.starttime, syn1.stats.npts)
x_syn2 = np.linspace(0, syn2.stats.endtime -
syn2.stats.starttime, syn2.stats.npts)
y_obs = obs.data
y_syn1 = syn1.data
y_syn2 = syn2.data
axt.plot(x_obs, y_obs, color="k")
axt.plot(x_syn1, y_syn1, color="r")
axt.plot(x_syn2, y_syn2, color="b")
axt.get_yaxis().set_ticklabels([])
index_count += 1
# add labels
axz.set_ylabel(
f"id:{each_plot_id}\ngcarc:{each_plot_trace.info['gcarc']:.2f}\nazimuth:{each_plot_trace.info['azimuth']:.2f}", fontsize=60)
# get xticks
xticks = np.arange(np.min(x_obs), np.max(x_obs)+1, 100)
axz.set_xticks(xticks)
axr.set_xticks(xticks)
axt.set_xticks(xticks)
# plot title
if(index_count == 4):
axr.set_title(
f"azimuth:{azimuth_width*index_azimuth}-{azimuth_width*(index_azimuth+1)}\npage:{ipage}", fontsize=200)
# plot travel times
info = each_plot_trace.info
# z
plot_travel_times(axz, "p", info["p"], np.max(x_obs), "blue")
plot_travel_times(axz, "pp", info["pp"], np.max(x_obs), "y")
plot_travel_times(axz, "sp", info["sp"], np.max(x_obs), "r")
plot_travel_times(
axz, "rayleigh", info["rayleigh"], np.max(x_obs), "c")
plot_travel_times(axz, "s", info["s"], np.max(x_obs), "green")
plot_travel_times(
axz, "ss", info["ss"], np.max(x_obs), "black")
# r
plot_travel_times(axr, "p", info["p"], np.max(x_obs), "blue")
plot_travel_times(axr, "pp", info["pp"], np.max(x_obs), "y")
plot_travel_times(axr, "sp", info["sp"], np.max(x_obs), "r")
plot_travel_times(
axr, "rayleigh", info["rayleigh"], np.max(x_obs), "c")
plot_travel_times(axr, "s", info["s"], np.max(x_obs), "green")
plot_travel_times(
axr, "ss", info["ss"], np.max(x_obs), "black")
# t
plot_travel_times(axt, "s", info["s"], np.max(x_obs), "green")
plot_travel_times(
axt, "ss", info["ss"], np.max(x_obs), "black")
plot_travel_times(
axt, "scs", info["scs"], np.max(x_obs), "magenta")
plot_travel_times(
axt, "love", info["love"], np.max(x_obs), "teal")
if(index_count == 4):
axz.legend(loc='upper right')
axr.legend(loc='upper right')
axt.legend(loc='upper right')
plt.subplots_adjust(wspace=0, hspace=0)
pdf.savefig(fig)
plt.close(fig=fig)
pdf.close()
def plot_travel_times(ax, phasename, traveltime, length, thecolor):
if(traveltime < 1e-6):
return
if(traveltime < length):
ax.scatter(traveltime, 0, color=thecolor, label=phasename, s=9)
def plot_windows(ax, phasename, win, thecolor):
if(type(win) == type(None)):
return
mapper = {
"p": (3, 4),
"s": (5, 6),
"pp": (7, 8),
"ss": (9, 10),
"sp": (11, 12),
"scs": (13, 14),
"rayleigh": (15, 16),
"love": (17, 18)
}
start_time = win[mapper[phasename][0]]
end_time = win[mapper[phasename][1]]
if(start_time == "None" or end_time == "None"):
return
else:
start_time = float(start_time)
end_time = float(end_time)
ax.axvspan(start_time, end_time, alpha=0.1, color=thecolor)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
ada65d289c521001d259f3753dd35f98479c82ff | 1a04e02811c844ecf53cc041b104667e5c987a09 | /vgrabber/qtgui/tabs/items/finalexam.py | 75cd24590829f15938344be4414a71f826fdba8e | [] | no_license | janjanech/vzdelavanieGui | dff17add6e6946063597d4c1eba5d6d76b6f5374 | b2015f41f7cb1be1ecccf1c4778a91f43f8fba12 | refs/heads/master | 2021-10-24T16:21:24.911817 | 2019-01-15T17:03:49 | 2019-01-15T17:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | from PyQt5.QtWidgets import QTreeWidgetItem
class FinalExamItem(QTreeWidgetItem):
def __init__(self, data, final_exam):
super().__init__(data)
self.final_exam = final_exam
| [
"[email protected]"
] | |
2a1e8f91e9c52a31ab379194d4112818a490e643 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/span/vdestdef.py | ec024b57e914be3854e9f75296a3fa2cf918029d | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 8,334 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class VDestDef(Mo):
"""
The definition for the VLAN-based SPAN (VSPAN) destination.
"""
meta = ClassMeta("cobra.model.span.VDestDef")
meta.moClassName = "spanVDestDef"
meta.rnFormat = "vdestdef-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "SPAN Destination"
meta.writeAccessMask = 0x45041000020001
meta.readAccessMask = 0x45041000020001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.span.RsDestToVPortDef")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.span.RsDestToVPort")
meta.childClasses.add("cobra.model.span.VEpgSummaryDef")
meta.childClasses.add("cobra.model.span.CEpDef")
meta.childClasses.add("cobra.model.span.RsDestPathEp")
meta.childClasses.add("cobra.model.span.RsDestEpg")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.span.RsDestApic")
meta.childNamesAndRnPrefix.append(("cobra.model.span.RsDestToVPortDef", "rsdestToVPortDef-"))
meta.childNamesAndRnPrefix.append(("cobra.model.span.RsDestToVPort", "rsdestToVPort-"))
meta.childNamesAndRnPrefix.append(("cobra.model.span.VEpgSummaryDef", "vepgsummarydef"))
meta.childNamesAndRnPrefix.append(("cobra.model.span.RsDestPathEp", "rsdestPathEp-"))
meta.childNamesAndRnPrefix.append(("cobra.model.span.RsDestApic", "rsdestApic"))
meta.childNamesAndRnPrefix.append(("cobra.model.span.RsDestEpg", "rsdestEpg"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.span.CEpDef", "CEpD-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.span.VDestGrpDef")
meta.superClasses.add("cobra.model.fabric.ProtoPol")
meta.superClasses.add("cobra.model.fabric.ProtoInstPol")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Def")
meta.superClasses.add("cobra.model.span.ADest")
meta.superClasses.add("cobra.model.fabric.L2InstPol")
meta.superClasses.add("cobra.model.span.AVDest")
meta.rnPrefixes = [
('vdestdef-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "fstate", "fstate", 15656, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "no-fault"
prop._addConstant("dest-port-member-of-pc-vpc", "dest-port-is-a-member-of-pc/vpc", 7)
prop._addConstant("invalid-dest-apic", "invalid-destination-apic", 8)
prop._addConstant("invalid-dest-epg", "invalid-destination-epg", 2)
prop._addConstant("invalid-dest-epg-type", "dest-epg-is-of-the-wrong-type", 6)
prop._addConstant("invalid-dest-port", "invalid-destination-pathep", 3)
prop._addConstant("no-fault", "nofault", 0)
prop._addConstant("non-shared-route-leak", "non-shared-route-leak", 1)
prop._addConstant("unavailable-dest", "destination-unavailable", 5)
prop._addConstant("unavailable-dest-port", "unavailable-destination-pathep", 4)
meta.props.add("fstate", prop)
prop = PropMeta("str", "fstateMap", "fstateMap", 16812, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("fstateMap", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14657, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 7177, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
0484b2199fc895b1ac5faa6eeb44803cd770356e | c9c762a08c031bafa1e577733c6af53a7a1b142f | /Project4Proj/Project4App/admin.py | a33a7882886d7d816c15cd8b34a9aed7dc315c9f | [] | no_license | Joshtg1104/Project4-Django-VideoApp | aed635d493125f42482b6f8e2424bec2e93da848 | 46efc176762a2fcb76e743672d3eb9b62f7808b6 | refs/heads/master | 2020-05-29T09:25:13.002571 | 2019-06-12T18:58:27 | 2019-06-12T18:58:27 | 189,061,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from django.contrib import admin
from .models import Video, AccountModel, CommentModel
# Register your models here.
admin.site.register(AccountModel)
admin.site.register(Video)
admin.site.register(CommentModel)
| [
"[email protected]"
] | |
38f27425bfd3df62fce464d36f7d3ee1f274e55e | 912b0c6274e9c5c0956c4d1d1178f2b68fcf4296 | /suod/test/test_base.py | 2a5676b05176b356328d547265bcc359ee0aa19d | [
"BSD-2-Clause"
] | permissive | DreamtaleCore/SUOD | 04f238f48625a3a3e182b19c2af65612954453b2 | 353aeaf8d5e5c338f94686e73a8a0fec88670700 | refs/heads/master | 2022-10-30T04:44:17.617986 | 2020-06-15T21:07:26 | 2020-06-15T21:07:26 | 292,205,263 | 1 | 0 | BSD-2-Clause | 2020-09-02T07:01:50 | 2020-09-02T07:01:49 | null | UTF-8 | Python | false | false | 3,239 | py | # -*- coding: utf-8 -*-
import os
import sys
import unittest
import numpy as np
# temporary solution for relative imports in case pyod is not installed
# if suod
# is installed, no need to use the following line
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from suod.models.base import SUOD
from pyod.utils.data import generate_data
from pyod.models.lof import LOF
from pyod.models.pca import PCA
from pyod.models.hbos import HBOS
from pyod.models.lscp import LSCP
class TestBASE(unittest.TestCase):
def setUp(self):
self.n_train = 1000
self.n_test = 500
self.contamination = 0.1
self.roc_floor = 0.6
self.random_state = 42
self.X_train, self.y_train, self.X_test, self.y_test = generate_data(
n_train=self.n_train, n_test=self.n_test,
contamination=self.contamination, random_state=self.random_state)
self.base_estimators = [
LOF(n_neighbors=5, contamination=self.contamination),
LOF(n_neighbors=15, contamination=self.contamination),
LOF(n_neighbors=25, contamination=self.contamination),
LOF(n_neighbors=35, contamination=self.contamination),
LOF(n_neighbors=45, contamination=self.contamination),
HBOS(contamination=self.contamination),
PCA(contamination=self.contamination),
LSCP(detector_list=[
LOF(n_neighbors=5, contamination=self.contamination),
LOF(n_neighbors=15, contamination=self.contamination)],
random_state=self.random_state)
]
this_directory = os.path.abspath(os.path.dirname(__file__))
self.cost_forecast_loc_fit_ = os.path.join(this_directory,
'bps_train.joblib')
self.cost_forecast_loc_pred_ = os.path.join(this_directory,
'bps_prediction.joblib')
self.model = SUOD(base_estimators=self.base_estimators, n_jobs=2,
rp_flag_global=True, bps_flag=True,
contamination=self.contamination,
approx_flag_global=True,
cost_forecast_loc_fit=self.cost_forecast_loc_fit_,
cost_forecast_loc_pred=self.cost_forecast_loc_pred_)
def test_initialization(self):
self.model.get_params()
self.model.set_params(**{'n_jobs': 4})
def test_fit(self):
"""
Test base class initialization
:return:
"""
self.model.fit(self.X_train)
def test_approximate(self):
self.model.fit(self.X_train)
self.model.approximate(self.X_train)
def test_predict(self):
self.model.fit(self.X_train)
self.model.approximate(self.X_train)
self.model.predict(self.X_test)
def test_decision_function(self):
self.model.fit(self.X_train)
self.model.approximate(self.X_train)
self.model.decision_function(self.X_test)
def test_predict_proba(self):
self.model.fit(self.X_train)
self.model.approximate(self.X_train)
self.model.predict_proba(self.X_test)
| [
"[email protected]"
] | |
6149f8fede99ffcba481a3bb2377ebd72da708a4 | b697f5d8e441328c2deee1bb5853d80710ae9873 | /944.删列造序.py | d10994932e2f8dd4be69c43b93c603bca7107745 | [] | no_license | happy-luck/LeetCode-python | d06b0f6cf7bad4754e96e6a160e3a8fc495c0f95 | 63fc5a1f6e903a901ba799e77a2ee9df2b05543a | refs/heads/master | 2021-03-22T16:12:52.097329 | 2020-07-15T13:48:37 | 2020-07-15T13:48:37 | 247,381,313 | 0 | 0 | null | 2020-03-15T01:47:42 | 2020-03-15T01:28:38 | null | UTF-8 | Python | false | false | 757 | py | 方法一:
class Solution:
def minDeletionSize(self, A: List[str]) -> int:
D = 0
A_list = []
for string in A:
res = list(string)
res = [ord(i) for i in res]
A_list.append(res)
for j in range(len(res)):
for i in range(1,len(A)):
if A_list[i][j]-A_list[i-1][j]<0:
D += 1
break
return D
方法二:
class Solution(object):
def minDeletionSize(self, A):
ans = 0
for col in zip(*A):
if any(col[i] > col[i+1] for i in range(len(col) - 1)):
ans += 1
return ans
时间复杂度:O(N),其中 N 是数组 A 中的元素个数。
空间复杂度:O(1)。 | [
"[email protected]"
] | |
c035d9f0fb7bdc18413f6216781d4e272dbf8234 | 208796d60bba301648b76f3fd9af20738aca3ba7 | /neuclease/bin/cleave_server_debug_main.py | 83d3de98a286d0b91481496060849dd94ff1df21 | [
"BSD-3-Clause"
] | permissive | stuarteberg/pydvid | 2afaebeb886b8034852e21668bca6709b022cf0f | ce59988cbc8043c85fe3ba878d4fa415febba2f8 | refs/heads/master | 2021-01-17T20:34:47.843645 | 2019-10-10T19:47:23 | 2019-10-10T19:47:23 | 33,325,656 | 0 | 0 | null | 2015-04-02T18:43:25 | 2015-04-02T18:43:25 | null | UTF-8 | Python | false | false | 2,723 | py | #!/usr/bin/env python3
import sys
import neuclease.cleave_server
def main():
_debug_mode = False
## DEBUG
if len(sys.argv) == 1:
_debug_mode = True
import os
log_dir = os.path.dirname(neuclease.__file__) + '/../logs'
sys.argv += [#"--merge-table", "/magnetic/workspace/neuclease/tiny-merge-table.npy",
#"--mapping-file", "/magnetic/workspace/neuclease/tiny-mapping.npy",
#"--primary-dvid-server", "emdata3:8900",
#"--primary-uuid", "017a",
#"--primary-labelmap-instance", "segmentation",
#"--suspend-before-launch",
"--merge-table", "/tmp/merge-table-5812998448.csv",
"--primary-dvid-server", "emdata1:8900",
"--primary-uuid", "642cfed9e8704d0b83ccca2ee3688528",
"--primary-labelmap-instance", "segmentation",
"--log-dir", log_dir]
neuclease.cleave_server.main(_debug_mode)
## Example requests:
"""
{"body-id": 673509195, "mesh-instance": "segmentation_meshes_tars", "port": 8900, "request-timestamp": "2018-05-10 13:40:56.117063", "seeds": {"1": [675222237], "2": [1266560684], "3": [1142805921], "5": [1329312351], "6": [1328298063], "7": [1264523335], "8": [1233488801, 1358310013], "9": [1357286646]}, "segmentation-instance": "segmentation", "server": "emdata3.int.janelia.org", "user": "bergs", "uuid": "017a"}
{"body-id": 5812980088, "mesh-instance": "segmentation_meshes_tars", "port": 8900, "request-timestamp": "2018-05-10 13:48:32.071343", "seeds": {"1": [299622182, 769164613], "2": [727964335], "3": [1290606913], "4": [485167093], "5": [769514136]}, "segmentation-instance": "segmentation", "server": "emdata3.int.janelia.org", "user": "bergs", "uuid": "017a"}
{"body-id": 5812980124, "mesh-instance": "segmentation_meshes_tars", "port": 8900, "request-timestamp": "2018-05-10 13:51:46.112896", "seeds": {"1": [391090531], "2": [453151532, 515221115, 515221301, 515557950, 515562175, 515562381, 515562454, 546597327, 577632049, 608330428, 608667239, 639701979, 639702027, 639702182, 670736831, 670736971, 670737150, 670737574]}, "segmentation-instance": "segmentation", "server": "emdata3.int.janelia.org", "user": "bergs", "uuid": "017a"}
{"body-id": 5812980898, "mesh-instance": "segmentation_meshes_tars", "port": 8900, "request-timestamp": "2018-05-10 13:54:00.042885", "seeds": {"1": [449551305], "2": [1261194539], "3": [1229822848], "4": [883458155, 883458603], "5": [790693775]}, "segmentation-instance": "segmentation", "server": "emdata3.int.janelia.org", "user": "bergs", "uuid": "017a"}
"""
if __name__ == "__main__":
main()
| [
"[email protected]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.