blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e64b04bed60680e2acb04477bef4c8a503e4a2f3
|
3d39974209f890080456c5f9e60397c505540c64
|
/0x0A-python-inheritance/5-main.py
|
df9dd05cb9641b418d71e98f33974e3e81993285
|
[] |
no_license
|
salmenz/holbertonschool-higher_level_programming
|
293ca44674833b587f1a3aec13896caec4e61ab6
|
23792f8539db48c8f8200a6cdaf9268d0cb7d4e6
|
refs/heads/master
| 2020-09-28T11:42:51.264437 | 2020-05-13T22:56:39 | 2020-05-13T22:56:39 | 226,771,568 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 150 |
py
|
#!/usr/bin/python3
BaseGeometry = __import__('5-base_geometry').BaseGeometry
bg = BaseGeometry()
print(bg)
print(dir(bg))
print(dir(BaseGeometry))
|
[
"[email protected]"
] | |
64c4c0966c54ffac9c7ef815fc3a30c92146fa0e
|
a606893da1e354c7c617d0c9247b23118be2813a
|
/动态规划/19.py
|
5b823571231090602205c302b2258b4f90e32ff4
|
[] |
no_license
|
lindo-zy/leetcode
|
4ce6cb9ded7eeea0a6953b6d8152b5a9657965da
|
f4277c11e620ddd748c2a2f3d9f5f05ee58e5716
|
refs/heads/master
| 2023-07-22T06:19:00.589026 | 2023-07-16T12:35:14 | 2023-07-16T12:35:14 | 229,958,065 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,550 |
py
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
class Solution:
def isMatch(self, s: str, p: str) -> bool:
# m = len(s)
# n = len(p)
# dp = [[0 for i in range(m)] for i in range(n)]
# for i in range(m):
# for j in range(n):
# if s[i] == p[j]:
# dp[i][j] = ''
# elif s[i] != p[j] and p[j] == '.':
# pass
# elif s[i] != p[j] and p[j] == '*':
# pass
# return dp[-1][-1] > 0
m, n = len(s) + 1, len(p) + 1
dp = [[False] * n for _ in range(m)]
dp[0][0] = True
# 初始化首行
for j in range(2, n, 2):
dp[0][j] = dp[0][j - 2] and p[j - 1] == '*'
# 状态转移
for i in range(1, m):
for j in range(1, n):
if p[j - 1] == '*':
if dp[i][j - 2]:
dp[i][j] = True # 1.
elif dp[i - 1][j] and s[i - 1] == p[j - 2]:
dp[i][j] = True # 2.
elif dp[i - 1][j] and p[j - 2] == '.':
dp[i][j] = True # 3.
else:
if dp[i - 1][j - 1] and s[i - 1] == p[j - 1]:
dp[i][j] = True # 1.
elif dp[i - 1][j - 1] and p[j - 1] == '.':
dp[i][j] = True # 2.
return dp[-1][-1]
if __name__ == '__main__':
s1 = Solution()
s = 'aab'
p = 'c*a*b'
print(s1.isMatch(s, p))
|
[
"[email protected]"
] | |
0788118cd4a51e1d9bc511a7549dacc367f4f878
|
9ef711e031cc7a10a554fb933bb661292afbdd41
|
/media_handlers.py
|
aa4811d5bac3f3410bf742912d39e2c5161bfd7c
|
[] |
no_license
|
woodenphone/tumblrsagi
|
72ba3873e8177bab6b60ec42d69fee1c0f5a7f65
|
0b261f56701351182d5a261ab2a1861c1664eb4c
|
refs/heads/master
| 2021-01-20T07:47:34.121464 | 2015-12-28T11:17:08 | 2015-12-28T11:17:08 | 33,968,492 | 15 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 45,476 |
py
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: User
#
# Created: 05/03/2015
# Copyright: (c) User 2015
# Licence: <your licence>
#-------------------------------------------------------------------------------
# Libraries
import sqlalchemy
import subprocess# For video and some audio downloads
import urllib# For encoding audio urls
import re
import logging
# This project
from utils import *
from sql_functions import Media
import sql_functions
import config # User settings
# Media handler modules
import audio_handlers
import video_handlers
import link_handlers
from image_handlers import *
def replace_links(link_dict,post_dict):
"""Replace all instances of a link in a post with a marker string for whoever does the frontend
link_dict = {link:hash}
post_dict = {field:_datastring}
Return post dict with links replaced
"""
new_post_dict = post_dict# Copy over everything so any fields without links
marker_prefix = "%%LINK="
marker_suffix = "%KNIL%%"
for link in link_dict:
for field in post_dict:
# String replacement
new_link_string = marker_prefix+link+marker_suffix
field = string.replace(field, link, new_link_string)
field = field
return post_dict
def handle_thumbnail(session,post_dict):
if "thumbnail_url" in post_dict.keys():
logging.debug("Saving thumbnail")
thumbnail_link = [post_dict["thumbnail_url"]]
media_id_list = download_image_links(session,thumbnail_link)
return media_id_list
def handle_tumblr_photos(session,post_dict):
"""Download the photos section from tumblr posts"""
# Return if post has no photos
if "photos" not in post_dict.keys():
return {}
# Grab photo links from API dict
photos_list = post_dict["photos"]
logging.debug("photos_list: "+repr(photos_list))
photo_url_list = []
for photo_dict in photos_list:
# Grab original size url
logging.debug("photo_dict: "+repr(photo_dict))
original_size_url = photo_dict["original_size"]["url"]
logging.debug("original_size_url: "+repr(original_size_url))
photo_url_list.append(original_size_url)
if config.save_all_photo_sizes:
# Grab alt size urls
alt_sizes_list = photo_dict["alt_sizes"]
for alt_size_dict in alt_sizes_list:
alt_size_url = alt_size_dict["url"]
photo_url_list.append(alt_size_url)
logging.debug("photo_url_list: "+repr(photo_url_list))
# Save new photo links
media_id_list = download_image_links(session,photo_url_list)
return media_id_list
def save_media(session,post_dict,blog_id=None,blog_settings_dict=None):
""" Main function for saving a posts media
return post dict with links replaced by pointers to saved file in the database"""
#logging.info("Saving post media")
logging.debug("save_media() post_dict"+repr(post_dict))
if blog_settings_dict is None:# Allow passing in this data to use less DB calls
blog_settings_dict = sql_functions.get_blog_media_settings(session,blog_id)
assert(
(blog_settings_dict["save_videos"] is True) or
(blog_settings_dict["save_videos"] is False)
)
assert(blog_settings_dict["save_videos"] is not None)
# Save anything not provided directly through the tumblr API (Remote) ex. http://foo.com/image.jpg
# I.E. Links (<a href = "http://example.com/image.png">blah</a>)
media_id_list = []
if blog_settings_dict["save_external_links"]:
remote_link_id_list = link_handlers.handle_links(session,post_dict,blog_settings_dict)# TODO FIXME
media_id_list += remote_link_id_list
# Save photos sections (Tumblr)
if blog_settings_dict["save_photos"]:
tumblr_photos_link_id_list = handle_tumblr_photos(session,post_dict)# {link:hash}
media_id_list += tumblr_photos_link_id_list
# Save videos, both tumblr and youtube (Tumblr & Youtube)
if blog_settings_dict["save_videos"]:
video_embed_id_list = video_handlers.handle_video_posts(session,post_dict)
media_id_list += video_embed_id_list
# Save audio
if blog_settings_dict["save_audio"]:
audio_embed_id_list = audio_handlers.handle_audio_posts(session,post_dict)
media_id_list += audio_embed_id_list
logging.debug("media_id_list: "+repr(media_id_list))
return media_id_list
def debug():
"""Code for debugging during programming goes here so everything is logged to file"""
session = sql_functions.connect_to_db()
# Debug video DB check
#sql_functions.check_if_video_in_db(connection,media_url="https://www.youtube.com/embed/lGIEmH3BoyA",youtube_id=None,sha512base64_hash=None,post_id=None)
#return
# Debug images
logging.debug("Debug images")
image_post_dict = {u'highlighted': [], u'reblog_key': u'RSNOnudd', u'format': u'html', u'timestamp': 1401396780, u'note_count': 429, u'tags': [u'porn', u'furry', u'anthro', u'art', u'fantasy', u'compilation', u'myart', u'futa', u'female', u'nude', u'werewolf'], 'link_to_hash_dict': {}, u'photos': [{u'caption': u'My character Gwen, the hermaphrodite Unicorn. Short for I Guinevere.', u'original_size': {u'url': u'http://41.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_1280.jpg', u'width': 1280, u'height': 1739}, u'alt_sizes': [{u'url': u'http://41.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_1280.jpg', u'width': 1280, u'height': 1739}, {u'url': u'http://41.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_500.jpg', u'width': 500, u'height': 679}, {u'url': u'http://41.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_400.jpg', u'width': 400, u'height': 543}, {u'url': u'http://40.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_250.jpg', u'width': 250, u'height': 340}, {u'url': u'http://41.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_100.jpg', u'width': 100, u'height': 136}, {u'url': u'http://41.media.tumblr.com/51dc06d26888063e978967b9effdd79d/tumblr_n6csptiJ5u1rzato1o1_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'A young man and one of his harem concubines.', u'original_size': {u'url': u'http://40.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_1280.jpg', u'width': 1280, u'height': 1037}, u'alt_sizes': [{u'url': u'http://40.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_1280.jpg', u'width': 1280, u'height': 1037}, {u'url': u'http://40.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_500.jpg', u'width': 500, u'height': 405}, {u'url': u'http://41.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_400.jpg', u'width': 400, u'height': 324}, {u'url': u'http://40.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_250.jpg', u'width': 250, u'height': 203}, {u'url': u'http://41.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_100.jpg', u'width': 100, u'height': 81}, {u'url': u'http://40.media.tumblr.com/df5d6e743955acef44262810e7e68196/tumblr_n6csptiJ5u1rzato1o2_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'Gift-art for Robotjoe at FA.', u'original_size': {u'url': u'http://40.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_1280.jpg', u'width': 1280, u'height': 1280}, u'alt_sizes': [{u'url': u'http://40.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_1280.jpg', u'width': 1280, u'height': 1280}, {u'url': u'http://41.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_500.jpg', u'width': 500, u'height': 500}, {u'url': u'http://41.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_400.jpg', u'width': 400, u'height': 400}, {u'url': u'http://41.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_250.jpg', u'width': 250, u'height': 250}, {u'url': u'http://40.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_100.jpg', u'width': 100, u'height': 100}, {u'url': u'http://40.media.tumblr.com/027e4e40a7b6dd7437ba19bb0bf66394/tumblr_n6csptiJ5u1rzato1o3_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'Giftart for Ritts at FA.', u'original_size': {u'url': u'http://41.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_1280.jpg', u'width': 1280, u'height': 1152}, u'alt_sizes': [{u'url': u'http://41.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_1280.jpg', u'width': 1280, u'height': 1152}, {u'url': u'http://40.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_500.jpg', u'width': 500, u'height': 450}, {u'url': u'http://40.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_400.jpg', u'width': 400, u'height': 360}, {u'url': u'http://40.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_250.jpg', u'width': 250, u'height': 225}, {u'url': u'http://40.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_100.jpg', u'width': 100, u'height': 90}, {u'url': u'http://41.media.tumblr.com/b04099342f13a3aaad3ef8d7f9f3080f/tumblr_n6csptiJ5u1rzato1o4_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'', u'original_size': {u'url': u'http://41.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_1280.jpg', u'width': 1153, u'height': 1920}, u'alt_sizes': [{u'url': u'http://41.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_1280.jpg', u'width': 1153, u'height': 1920}, {u'url': u'http://40.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_500.jpg', u'width': 450, u'height': 750}, {u'url': u'http://40.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_400.jpg', u'width': 360, u'height': 600}, {u'url': u'http://40.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_250.jpg', u'width': 240, u'height': 400}, {u'url': u'http://41.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_100.jpg', u'width': 100, u'height': 167}, {u'url': u'http://36.media.tumblr.com/96a2f5867ff3269def55cba3ddb42282/tumblr_n6csptiJ5u1rzato1o5_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'She shot herself in the face, or did others? Up to you.', u'original_size': {u'url': u'http://41.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_1280.jpg', u'width': 841, u'height': 1400}, u'alt_sizes': [{u'url': u'http://41.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_1280.jpg', u'width': 841, u'height': 1400}, {u'url': u'http://40.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_500.jpg', u'width': 451, u'height': 750}, {u'url': u'http://41.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_400.jpg', u'width': 360, u'height': 600}, {u'url': u'http://40.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_250.jpg', u'width': 240, u'height': 400}, {u'url': u'http://40.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_100.jpg', u'width': 100, u'height': 166}, {u'url': u'http://36.media.tumblr.com/879c064933cf138ae169a152dbd717a4/tumblr_n6csptiJ5u1rzato1o6_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u"They're now twins.", u'original_size': {u'url': u'http://36.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_1280.jpg', u'width': 1153, u'height': 1920}, u'alt_sizes': [{u'url': u'http://36.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_1280.jpg', u'width': 1153, u'height': 1920}, {u'url': u'http://36.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_500.jpg', u'width': 450, u'height': 750}, {u'url': u'http://41.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_400.jpg', u'width': 360, u'height': 600}, {u'url': u'http://40.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_250.jpg', u'width': 240, u'height': 400}, {u'url': u'http://41.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_100.jpg', u'width': 100, u'height': 167}, {u'url': u'http://41.media.tumblr.com/56df999c75b2ea6e10d9e9e3a4248db6/tumblr_n6csptiJ5u1rzato1o7_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'This is knot a funny joke.', u'original_size': {u'url': u'http://36.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_1280.jpg', u'width': 1000, u'height': 1000}, u'alt_sizes': [{u'url': u'http://36.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_1280.jpg', u'width': 1000, u'height': 1000}, {u'url': u'http://40.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_500.jpg', u'width': 500, u'height': 500}, {u'url': u'http://40.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_400.jpg', u'width': 400, u'height': 400}, {u'url': u'http://36.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_250.jpg', u'width': 250, u'height': 250}, {u'url': u'http://40.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_100.jpg', u'width': 100, u'height': 100}, {u'url': u'http://40.media.tumblr.com/dd7b3f0723d26d0cf9a5daff5cb82e8a/tumblr_n6csptiJ5u1rzato1o8_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'Gift-art for Quillu at FA.', u'original_size': {u'url': u'http://41.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_1280.jpg', u'width': 800, u'height': 1410}, u'alt_sizes': [{u'url': u'http://41.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_1280.jpg', u'width': 800, u'height': 1410}, {u'url': u'http://40.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_500.jpg', u'width': 426, u'height': 750}, {u'url': u'http://41.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_400.jpg', u'width': 340, u'height': 600}, {u'url': u'http://41.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_250.jpg', u'width': 227, u'height': 400}, {u'url': u'http://40.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_100.jpg', u'width': 100, u'height': 176}, {u'url': u'http://41.media.tumblr.com/e02229ba14b4fb3be2866595e371aaa7/tumblr_n6csptiJ5u1rzato1o9_75sq.jpg', u'width': 75, u'height': 75}]}, {u'caption': u'Werewolf herm, in heat. Watch out!', u'original_size': {u'url': u'http://36.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_1280.jpg', u'width': 1280, u'height': 962}, u'alt_sizes': [{u'url': u'http://36.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_1280.jpg', u'width': 1280, u'height': 962}, {u'url': u'http://41.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_500.jpg', u'width': 500, u'height': 376}, {u'url': u'http://41.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_400.jpg', u'width': 400, u'height': 301}, {u'url': u'http://36.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_250.jpg', u'width': 250, u'height': 188}, {u'url': u'http://36.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_100.jpg', u'width': 100, u'height': 75}, {u'url': u'http://40.media.tumblr.com/3a23e75b564f5d790bb440ba2ba6140c/tumblr_n6csptiJ5u1rzato1o10_75sq.jpg', u'width': 75, u'height': 75}]}], u'id': 87231460597L, u'post_url': u'http://zaggatar.tumblr.com/post/87231460597/i-thought-i-would-upload-some-of-what-i-think-is', u'caption': u'<p><span>I thought I would upload s</span>ome of what I think is best of my older stuff.</p>\n<p>As you can see, I am guilty for liking horsegirls with big dicks.</p>\n<p>Enjoy.</p>', u'state': u'published', u'short_url': u'http://tmblr.co/Zlxuxu1HFPdJr', u'date': u'2014-05-29 20:53:00 GMT', u'type': u'photo', u'slug': u'i-thought-i-would-upload-some-of-what-i-think-is', u'photoset_layout': u'1111111111', u'blog_name': u'zaggatar'}
#print flatten(image_post_dict)
#new_post_dict = save_media(session,image_post_dict)
#download_image_link(session,"https://derpicdn.net/spns/W1siZiIsIjIwMTQvMDEvMTAvMDJfNDBfMjhfNjUyX2RlcnBpYm9vcnVfYmFubmVyLnBuZyJdXQ.png")
# Debug audio
#logging.debug("Debug audio")
#soundcloud_post_dict = {u'reblog_key': u'S6VWj0Cb', u'reblog': {u'comment': u'', u'tree_html': u'<p><a class="tumblr_blog" href="http://waltzforluma.tumblr.com/post/111622677961/or-your-computer-could-overheat-while-youre">waltzforluma</a>:</p><blockquote><p>Or, your computer could overheat while you\u2019re listening to \u201cDeath Rag\u201d from Future Vision, and burst into flames!</p></blockquote>', u'trail': [{u'blog': {u'theme': {u'title_font_weight': u'regular', u'header_full_height': 262, u'title_color': u'#444444', u'header_bounds': u'0,623,262,157', u'background_color': u'#FAFAFA', u'link_color': u'#529ECC', u'header_image_focused': u'http://static.tumblr.com/a50dd34705b42b1479c2535a15461b00/oevxq7m/ZrTn5ivly/tumblr_static_tumblr_static_57wsbbc6rz0g0gk4ww8k884wk_focused_v3.png', u'show_description': True, u'header_full_width': 898, u'avatar_shape': u'circle', u'header_focus_width': 466, u'show_header_image': True, u'body_font': u'Helvetica Neue', u'show_title': True, u'header_stretch': True, u'header_image_scaled': u'http://static.tumblr.com/a50dd34705b42b1479c2535a15461b00/oevxq7m/g9cn5ivlx/tumblr_static_57wsbbc6rz0g0gk4ww8k884wk_2048_v2.png', u'show_avatar': True, u'header_focus_height': 262, u'title_font': u'Garamond Classic FS', u'header_image': u'http://static.tumblr.com/a50dd34705b42b1479c2535a15461b00/oevxq7m/g9cn5ivlx/tumblr_static_57wsbbc6rz0g0gk4ww8k884wk.png'}, u'name': u'waltzforluma'}, u'comment': u'<p>Or, your computer could overheat while you\u2019re listening to \u201cDeath Rag\u201d from Future Vision, and burst into flames!</p>', u'post': {u'id': u'111622677961'}}]}, u'player': u'<iframe src="https://w.soundcloud.com/player/?url=https%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F192213990&visual=true&liking=false&sharing=false&auto_play=false&show_comments=false&continuous_play=false&origin=tumblr" frameborder="0" allowtransparency="true" class="soundcloud_audio_player" width="500" height="500"></iframe>', u'id': 113020390888L, u'post_url': u'http://doscoon.tumblr.com/post/113020390888/waltzforluma-or-your-computer-could-overheat', u'source_title': u'waltzforluma', u'format': u'html', u'highlighted': [], u'state': u'published', u'track_name': u'Steven Universe - Death Rag', u'short_url': u'http://tmblr.co/ZlYOqv1fGYate', u'type': u'audio', u'tags': [], u'timestamp': 1425776404, u'note_count': 1014, u'source_url': u'http://waltzforluma.tumblr.com/post/111622677961/or-your-computer-could-overheat-while-youre', u'date': u'2015-03-08 01:00:04 GMT', u'plays': 38933, u'slug': u'waltzforluma-or-your-computer-could-overheat', u'album_art': u'http://38.media.tumblr.com/tumblr_nk3re1A1Cf1qzqb72_1424489834_cover.jpg', u'blog_name': u'doscoon', u'is_external': True, u'audio_url': u'https://api.soundcloud.com/tracks/192213990/stream?client_id=3cQaPshpEeLqMsNFAUw1Q', u'caption': u'<p><a class="tumblr_blog" href="http://waltzforluma.tumblr.com/post/111622677961/or-your-computer-could-overheat-while-youre">waltzforluma</a>:</p><blockquote><p>Or, your computer could overheat while you\u2019re listening to \u201cDeath Rag\u201d from Future Vision, and burst into flames!</p></blockquote>', u'audio_type': u'soundcloud', u'audio_source_url': u'https://soundcloud.com/aivisura/steven-universe-death-rag', u'embed': u'<iframe src="https://w.soundcloud.com/player/?url=https%3A%2F%2Fapi.soundcloud.com%2Ftracks%2F192213990&visual=true&liking=false&sharing=false&auto_play=false&show_comments=false&continuous_play=false&origin=tumblr" frameborder="0" allowtransparency="true" class="soundcloud_audio_player" width="500" height="500"></iframe>'}
#audio_handlers.handle_soundcloud_audio(session,soundcloud_post_dict)
# Debug video
logging.debug("Debug video")
# Tumblr video
#tumblr_video_post_dict = {u'reblog_key': u'3bqfxHgy', u'short_url': u'http://tmblr.co/Z_sLQw1eYTSqS', u'thumbnail_width': 480, u'player': [{u'width': 250, u'embed_code': u'<iframe width="250" height="140" id="youtube_iframe" src="https://www.youtube.com/embed/tT5pifkZzEk?feature=oembed&enablejsapi=1&origin=http://safe.txmblr.com&wmode=opaque" frameborder="0" allowfullscreen></iframe>'}, {u'width': 400, u'embed_code': u'<iframe width="400" height="225" id="youtube_iframe" src="https://www.youtube.com/embed/tT5pifkZzEk?feature=oembed&enablejsapi=1&origin=http://safe.txmblr.com&wmode=opaque" frameborder="0" allowfullscreen></iframe>'}, {u'width': 500, u'embed_code': u'<iframe width="500" height="281" id="youtube_iframe" src="https://www.youtube.com/embed/tT5pifkZzEk?feature=oembed&enablejsapi=1&origin=http://safe.txmblr.com&wmode=opaque" frameborder="0" allowfullscreen></iframe>'}], u'id': 112247295260L, u'post_url': u'http://tsitra360.tumblr.com/post/112247295260/my-latest-art-timelapse-is-up-see-how-i-drew', u'tags': [], u'highlighted': [], u'state': u'published', u'html5_capable': True, u'type': u'video', u'format': u'html', u'timestamp': 1425068852, u'note_count': 79, u'video_type': u'youtube', u'date': u'2015-02-27 20:27:32 GMT', u'thumbnail_height': 360, u'permalink_url': u'https://www.youtube.com/watch?v=tT5pifkZzEk', u'slug': u'my-latest-art-timelapse-is-up-see-how-i-drew', u'blog_name': u'tsitra360', u'caption': u'<p>My latest art timelapse is up! See how I drew Berry Swirl on my youtube channel.</p>', u'thumbnail_url': u'https://i.ytimg.com/vi/tT5pifkZzEk/hqdefault.jpg'}
#save_media(session,tumblr_video_post_dict)
# Youtube
youtube_video_post_dict = {u'reblog_key': u'HfjckfH7', u'short_url': u'http://tmblr.co/ZUGffq1cfuHuJ', u'thumbnail_width': 480, u'player': [{u'width': 250, u'embed_code': u'<iframe width="250" height="140" id="youtube_iframe" src="https://www.youtube.com/embed/lGIEmH3BoyA?feature=oembed&enablejsapi=1&origin=http://safe.txmblr.com&wmode=opaque" frameborder="0" allowfullscreen></iframe>'}, {u'width': 400, u'embed_code': u'<iframe width="400" height="225" id="youtube_iframe" src="https://www.youtube.com/embed/lGIEmH3BoyA?feature=oembed&enablejsapi=1&origin=http://safe.txmblr.com&wmode=opaque" frameborder="0" allowfullscreen></iframe>'}, {u'width': 500, u'embed_code': u'<iframe width="500" height="281" id="youtube_iframe" src="https://www.youtube.com/embed/lGIEmH3BoyA?feature=oembed&enablejsapi=1&origin=http://safe.txmblr.com&wmode=opaque" frameborder="0" allowfullscreen></iframe>'}], u'id': 110224285203L, u'post_url': u'http://askbuttonsmom.tumblr.com/post/110224285203/throwback-can-you-believe-its-been-almost-2yrs', u'tags': [u"button's mom", u'hardcopy', u'song', u'shadyvox'], u'highlighted': [], u'state': u'published', u'html5_capable': True, u'type': u'video', u'format': u'html', u'timestamp': 1423197599, u'note_count': 145, u'video_type': u'youtube', u'date': u'2015-02-06 04:39:59 GMT', u'thumbnail_height': 360, u'permalink_url': u'https://www.youtube.com/watch?v=lGIEmH3BoyA', u'slug': u'throwback-can-you-believe-its-been-almost-2yrs', u'blog_name': u'askbuttonsmom', u'caption': u'<p>Throwback! Can you believe it’s been almost 2yrs since this came out? Mommy’s getting old…</p>', u'thumbnail_url': u'https://i.ytimg.com/vi/lGIEmH3BoyA/hqdefault.jpg'}
youtube_dict_two = {u'highlighted': [], u'reblog_key': u'qO3JnfS7', u'player': [{u'width': 250, u'embed_code': False}, {u'width': 400, u'embed_code': False}, {u'width': 500, u'embed_code': False}], u'format': u'html', u'timestamp': 1390412461, u'note_count': 4282, u'tags': [], u'video_type': u'youtube', u'id': 74184911379L, u'post_url': u'http://askbuttonsmom.tumblr.com/post/74184911379/ask-thecrusaders-bar-buddies-dont-worry', u'caption': u'<p><a class="tumblr_blog" href="http://ask-thecrusaders.tumblr.com/post/74162414750/bar-buddies-dont-worry-neon-you-will-have-your">ask-thecrusaders</a>:</p>\n<blockquote>\n<p><strong>"Bar Buddies"</strong><br/><br/>Dont\u2019 worry Neon, you will have your music video soon enough.</p>\n</blockquote>\n<p>Honestly, that Neon Lights is a TERRIBLE influence!! No son of mine will grow up to be a drunken drug-shooting bass dropping hipster! :C</p>', u'state': u'published', u'html5_capable': False, u'reblog': {u'comment': u'<p>Honestly, that Neon Lights is a TERRIBLE influence!! No son of mine will grow up to be a drunken drug-shooting bass dropping hipster! :C</p>', u'tree_html': u'<p><a class="tumblr_blog" href="http://ask-thecrusaders.tumblr.com/post/74162414750/bar-buddies-dont-worry-neon-you-will-have-your">ask-thecrusaders</a>:</p><blockquote>\n<p><strong>"Bar Buddies"</strong><br/><br/>Dont\u2019 worry Neon, you will have your music video soon enough.</p>\n</blockquote>', u'trail': [{u'blog': {u'theme': {u'title_font_weight': u'bold', u'title_color': u'#444444', u'header_bounds': 0, u'title_font': u'Helvetica Neue', u'link_color': u'#529ECC', u'header_image_focused': u'http://assets.tumblr.com/images/default_header/optica_pattern_04.png?_v=7c4e5e82cf797042596e2e64af1c383f', u'show_description': True, u'show_header_image': True, u'header_stretch': True, u'body_font': u'Helvetica Neue', u'show_title': True, u'header_image_scaled': u'http://assets.tumblr.com/images/default_header/optica_pattern_04.png?_v=7c4e5e82cf797042596e2e64af1c383f', u'avatar_shape': u'circle', u'show_avatar': True, u'background_color': u'#F6F6F6', u'header_image': u'http://assets.tumblr.com/images/default_header/optica_pattern_04.png?_v=7c4e5e82cf797042596e2e64af1c383f'}, u'name': u'ask-thecrusaders'}, u'comment': u'<p><strong>"Bar Buddies"</strong><br><br>Dont\u2019 worry Neon, you will have your music video soon enough.</p>', u'post': {u'id': u'74162414750'}}]}, u'short_url': u'http://tmblr.co/ZUGffq155m_eJ', u'date': u'2014-01-22 17:41:01 GMT', u'type': u'video', u'slug': u'ask-thecrusaders-bar-buddies-dont-worry', u'blog_name': u'askbuttonsmom'}
#yt_result_1 = save_media(session,youtube_video_post_dict)
#save_media(session,youtube_dict_two)
# Vine
vine_dict = {u'reblog_key': u'A5DhHt28', u'reblog': {u'comment': u'<p>Have a nice weekend, Tumblr. </p>', u'tree_html': u'', u'trail': []}, u'placement_id': u'{"i":"mF4avY6GyshXjaQmfk0v","v":4,"t":1427193020,"c":{"p":"113540981790","b":"staff"},"d":{"v":{"e":"hjWIUFOYD31"}},"h":"3291f1aa07"}', u'thumbnail_width': 480, u'player': [{u'width': 250, u'embed_code': u'<iframe class="vine-embed" src="https://vine.co/v/hjWIUFOYD31/embed/simple"width="250" height="250" frameborder="0"></iframe><script async src="//platform.vine.co/static/scripts/embed.js" charset="utf-8"></script>'}, {u'width': 400, u'embed_code': u'<iframe class="vine-embed" src="https://vine.co/v/hjWIUFOYD31/embed/simple"width="400" height="400" frameborder="0"></iframe><script async src="//platform.vine.co/static/scripts/embed.js" charset="utf-8"></script>'}, {u'width': 500, u'embed_code': u'<iframe class="vine-embed" src="https://vine.co/v/hjWIUFOYD31/embed/simple"width="500" height="500" frameborder="0"></iframe><script async src="//platform.vine.co/static/scripts/embed.js" charset="utf-8"></script>'}], u'id': 113540981790L, u'post_url': u'http://staff.tumblr.com/post/113540981790/have-a-nice-weekend-tumblr', u'source_title': u'weloveshortvideos.com', u'tags': [], u'highlighted': [], u'state': u'published', u'short_url': u'http://tmblr.co/ZE5Fby1flaUGU', u'html5_capable': True, u'type': u'video', u'format': u'html', u'timestamp': 1426282797, u'note_count': 48309, u'video_type': u'vine', u'source_url': u'http://weloveshortvideos.com', u'date': u'2015-03-13 21:39:57 GMT', u'thumbnail_height': 480, u'permalink_url': u'https://vine.co/v/hjWIUFOYD31', u'slug': u'have-a-nice-weekend-tumblr', u'blog_name': u'staff', u'caption': u'<p>Have a nice weekend, Tumblr.\xa0</p>', u'thumbnail_url': u'http://v.cdn.vine.co/r/thumbs/FE4C8DC8781008139866036658176_1c16044fdd3.3.4.mp4_l_pAXVyCckNVnk2OzdadqNB_6bq4mYoBHpBFRIF8Hi3OdOW1vmjP1TR075G1ZegT.jpg?versionId=abawWSw4Y_QFv2TKPWz6j8N5y7.6LOGq'}
#vine_restult = save_media(session,vine_dict)
# Vimeo
vimeo_dict = {u'reblog_key': u'3BuzwM1q', u'reblog': {u'comment': u'', u'tree_html': u'<p><a href="http://robscorner.tumblr.com/post/110250942998/a-hyperfast-preview-video-for-the-kind-of-content" class="tumblr_blog">robscorner</a>:</p><blockquote><p>A hyperfast preview video for the kind of content I\u2019m featuring on Patreon (patreon.com/robaato)! Slower version will be available for my supporters!<br/>MUSIC: The End (T.E.I.N. Pt. 2) | 12th Planet<br/></p><p>Support for high-resolution art, PSDs, process videos, tutorials, character requests, and more!<br/></p></blockquote>', u'trail': [{u'blog': {u'theme': {u'title_font_weight': u'bold', u'header_full_height': 1071, u'title_color': u'#FFFFFF', u'header_bounds': u'92,1581,978,3', u'title_font': u'Gibson', u'link_color': u'#529ECC', u'header_image_focused': u'http://static.tumblr.com/a5a733e78671519e8eb9cf3700ccfb70/ybimlef/1eon5zyi0/tumblr_static_tumblr_static_2df9bnxrqh1c4c8sgk8448s80_focused_v3.jpg', u'show_description': False, u'header_full_width': 1600, u'header_focus_width': 1578, u'header_stretch': True, u'show_header_image': True, u'body_font': u'Helvetica Neue', u'show_title': True, u'header_image_scaled': u'http://static.tumblr.com/cfa3addece89b58093ea0a8a87082653/ybimlef/FWyn5zyhv/tumblr_static_2df9bnxrqh1c4c8sgk8448s80_2048_v2.png', u'avatar_shape': u'square', u'show_avatar': True, u'header_focus_height': 886, u'background_color': u'#337db1', u'header_image': u'http://static.tumblr.com/cfa3addece89b58093ea0a8a87082653/ybimlef/FWyn5zyhv/tumblr_static_2df9bnxrqh1c4c8sgk8448s80.png'}, u'name': u'robscorner'}, u'comment': u'<p>A hyperfast preview video for the kind of content I\u2019m featuring on Patreon (patreon.com/robaato)! Slower version will be available for my supporters!<br>MUSIC: The End (T.E.I.N. Pt. 2) | 12th Planet<br></p><p>Support for high-resolution art, PSDs, process videos, tutorials, character requests, and more!<br></p>', u'post': {u'id': u'110250942998'}}]}, u'thumbnail_width': 295, u'player': [{u'width': 250, u'embed_code': u'<iframe src="https://player.vimeo.com/video/118912193?title=0&byline=0&portrait=0" width="250" height="156" frameborder="0" title="Hyperfast Preview - Mai (Patreon Process Videos)" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'}, {u'width': 400, u'embed_code': u'<iframe src="https://player.vimeo.com/video/118912193?title=0&byline=0&portrait=0" width="400" height="250" frameborder="0" title="Hyperfast Preview - Mai (Patreon Process Videos)" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'}, {u'width': 500, u'embed_code': u'<iframe src="https://player.vimeo.com/video/118912193?title=0&byline=0&portrait=0" width="500" height="312" frameborder="0" title="Hyperfast Preview - Mai (Patreon Process Videos)" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'}], u'id': 110255840681, u'post_url': u'http://nsfw.kevinsano.com/post/110255840681/robscorner-a-hyperfast-preview-video-for-the-kind', u'source_title': u'robscorner', u'tags': [u'reblog', u'erohua'], u'highlighted': [], u'state': u'published', u'short_url': u'http://tmblr.co/Zo9zBq1chmfsf', u'html5_capable': True, u'type': u'video', u'format': u'html', u'timestamp': 1423238010, u'note_count': 415, u'video_type': u'vimeo', u'source_url': u'http://robscorner.tumblr.com/post/110250942998/a-hyperfast-preview-video-for-the-kind-of-content', u'date': u'2015-02-06 15:53:30 GMT', u'thumbnail_height': 184, u'permalink_url': u'https://vimeo.com/118912193', u'slug': u'robscorner-a-hyperfast-preview-video-for-the-kind', u'blog_name': u'nsfwkevinsano', u'caption': u'<p><a href="http://robscorner.tumblr.com/post/110250942998/a-hyperfast-preview-video-for-the-kind-of-content" class="tumblr_blog">robscorner</a>:</p><blockquote><p>A hyperfast preview video for the kind of content I\u2019m featuring on Patreon (patreon.com/robaato)! Slower version will be available for my supporters!<br/>MUSIC: The End (T.E.I.N. Pt. 2) | 12th Planet<br/></p><p>Support for high-resolution art, PSDs, process videos, tutorials, character requests, and more!<br/></p></blockquote>', u'thumbnail_url': u'https://i.vimeocdn.com/video/506047324_295x166.jpg'}
#save_media(session,vimeo_dict)
#video_handlers.handle_video_posts(session,vimeo_dict)
# Imgur video
imgur_post_dict = {u'highlighted': [], u'reblog_key': u'qX0EtplN', u'player': [{u'width': 250, u'embed_code': u'<iframe class="imgur-embed" width="100%" height="720" frameborder="0" src="http://i.imgur.com/wSBlRyv.gifv#embed"></iframe>'}, {u'width': 400, u'embed_code': u'<iframe class="imgur-embed" width="100%" height="720" frameborder="0" src="http://i.imgur.com/wSBlRyv.gifv#embed"></iframe>'}, {u'width': 500, u'embed_code': u'<iframe class="imgur-embed" width="100%" height="720" frameborder="0" src="http://i.imgur.com/wSBlRyv.gifv#embed"></iframe>'}], u'format': u'html', u'timestamp': 1415466120, u'note_count': 109, u'tags': [], u'thumbnail_width': 0, u'id': 102102282191, u'post_url': u'http://jessicaanner.tumblr.com/post/102102282191/front-view-clothed-large-version-gif-back', u'caption': u'<p><em><strong><a href="http://jessicaanner.tumblr.com/post/101601852991/front-view-clothed-large-version-gif-back">Front View (Clothed)</a> <a href="http://i.imgur.com/fDixfAC.gifv"><span class="auto_link" title="">(Large version)</span></a><a href="http://d.facdn.net/art/benezia/1414952655.benezia_front_armored_optimized.gif"><span class="auto_link" title=""> (GIF)</span></a></strong></em><br/><em><strong><a href="http://jessicaanner.tumblr.com/post/101666148721/front-view-clothed-large-version-gif-back">Back View (Clothed)</a> <a href="http://i.imgur.com/QYfRNeQ.gifv" title="">(Large version)</a> <a href="http://d.facdn.net/art/benezia/1415012804.benezia_back_armored_optimized.gif">(GIF)</a></strong></em><br/><em><strong><a href="http://jessicaanner.tumblr.com/post/101768307896/front-view-clothed-large-version-gif-back">Front View (Nude)</a> <a href="http://i.imgur.com/0N7ir7o.gifv">(Large version)</a> <a href="http://d.facdn.net/art/benezia/1415120393.benezia_front_nude_optimized.gif" title="">(GIF)</a></strong></em><br/><em><strong><a href="http://jessicaanner.tumblr.com/post/101852253284/front-view-clothed-large-version-gif-back">Back View (Nude)</a> <a href="http://i.imgur.com/sP5h9ux.gifv" title="">(Large version)</a> <a href="http://d.facdn.net/art/benezia/1415120590.benezia_back_nude_optimized.gif" title="">(GIF)</a></strong></em><br/><strong><em><a href="http://jessicaanner.tumblr.com/post/101934955336/front-view-clothed-large-version-gif-back">Buttocks Closeup View</a> <a href="http://i.imgur.com/BXMYuxk.gifv" title="">(Large version)</a> <a href="http://i.imgur.com/3bhzRP2.gif">(GIF)</a></em></strong><br/><em><strong><a href="http://jessicaanner.tumblr.com/post/102102282191/front-view-clothed-large-version-gif-back">Crotch Closeup View</a> <a href="http://i.imgur.com/wSBlRyv.gifv">(Large version)</a> <a href="http://i.imgur.com/UiDU1XB.gif">(GIF)</a></strong></em><br/><em><strong><a href="http://jessicaanner.tumblr.com/post/102017653601/front-view-clothed-large-version-gif-back">Bust Closeup View</a> <a href="http://i.imgur.com/S5M6PID.gifv">(Large version)</a> <a href="http://i.imgur.com/BlMYohP.gif">(GIF)</a></strong></em></p>', u'state': u'published', u'html5_capable': False, u'video_type': u'unknown', u'short_url': u'http://tmblr.co/ZLO7Om1V5nI-F', u'date': u'2014-11-08 17:02:00 GMT', u'thumbnail_height': 0, u'thumbnail_url': u'', u'type': u'video', u'slug': u'front-view-clothed-large-version-gif-back', u'blog_name': u'jessicaanner'}
#imgur_result = save_media(session,imgur_post_dict)
# Livestream
livestream_post_dict ={u'reblog_key': u'oapXWQlr', u'reblog': {u'comment': u'<p><span>To reiterate: this an </span><strong>only</strong><span> and an </span><strong>exclusive </strong><span>and it </span><strong>starts in just a few minutes</strong><span>. Hurry on over. </span></p>', u'tree_html': u'<p><a class="tumblr_blog" href="http://92y.tumblr.com/post/101031505431/watch-the-92y-livestream-of-game-of-thrones">92y</a>:</p><blockquote>\n<p>Watch the 92Y Livestream of <strong>Game of Thrones</strong> creator <strong>George R.R. Martin, TONIGHT at 8\xa0pm ET</strong>, in his <strong>only</strong> public U.S. appearance for the release of <a href="http://www.amazon.com/gp/product/B00EGMGGVK/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=B00EGMGGVK&linkCode=as2&tag=92y-20&linkId=V3MMY57QIQ7QVFNK"><em>The World of Ice and Fire: The Untold History of Westeros and the Game of Thrones</em></a>. Exclusively on Tumblr!</p>\n</blockquote>', u'trail': [{u'blog': {u'theme': {u'title_font_weight': u'bold', u'title_color': u'#444444', u'header_bounds': u'', u'title_font': u'Gibson', u'link_color': u'#529ECC', u'header_image_focused': u'http://assets.tumblr.com/images/default_header/optica_pattern_13_focused_v3.png?_v=2f4063be1dd2ee91e4eca54332e25191', u'show_description': True, u'show_header_image': True, u'header_stretch': True, u'body_font': u'Helvetica Neue', u'show_title': True, u'header_image_scaled': u'http://assets.tumblr.com/images/default_header/optica_pattern_13_focused_v3.png?_v=2f4063be1dd2ee91e4eca54332e25191', u'avatar_shape': u'square', u'show_avatar': True, u'background_color': u'#FAFAFA', u'header_image': u'http://assets.tumblr.com/images/default_header/optica_pattern_13.png?_v=2f4063be1dd2ee91e4eca54332e25191'}, u'name': u'92y'}, u'comment': u'<p>Watch the 92Y Livestream of <strong>Game of Thrones</strong> creator <strong>George R.R. Martin, TONIGHT at 8\xa0pm ET</strong>, in his <strong>only</strong> public U.S. appearance for the release of <a href="http://www.amazon.com/gp/product/B00EGMGGVK/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=B00EGMGGVK&linkCode=as2&tag=92y-20&linkId=V3MMY57QIQ7QVFNK"><em>The World of Ice and Fire: The Untold History of Westeros and the Game of Thrones</em></a>. Exclusively on Tumblr!</p>', u'post': {u'id': u'101031505431'}}]}, u'thumbnail_width': 0, u'player': [{u'width': 250, u'embed_code': u'<iframe src="http://new.livestream.com/accounts/1249127/events/3464519/player?width=560&height=315&autoPlay=true&mute=false" width="250" height="140" frameborder="0" scrolling="no"> </iframe>'}, {u'width': 400, u'embed_code': u'<iframe src="http://new.livestream.com/accounts/1249127/events/3464519/player?width=560&height=315&autoPlay=true&mute=false" width="400" height="225" frameborder="0" scrolling="no"> </iframe>'}, {u'width': 500, u'embed_code': u'<iframe src="http://new.livestream.com/accounts/1249127/events/3464519/player?width=560&height=315&autoPlay=true&mute=false" width="500" height="281" frameborder="0" scrolling="no"> </iframe>'}], u'id': 101038462325, u'post_url': u'http://staff.tumblr.com/post/101038462325/92y-watch-the-92y-livestream-of-game-of-thrones', u'tags': [], u'highlighted': [], u'state': u'published', u'short_url': u'http://tmblr.co/ZE5Fby1U6N9Lr', u'html5_capable': False, u'type': u'video', u'format': u'html', u'timestamp': 1414366397, u'note_count': 917, u'video_type': u'unknown', u'date': u'2014-10-26 23:33:17 GMT', u'thumbnail_height': 0, u'slug': u'92y-watch-the-92y-livestream-of-game-of-thrones', u'blog_name': u'staff', u'caption': u'<p><a class="tumblr_blog" href="http://92y.tumblr.com/post/101031505431/watch-the-92y-livestream-of-game-of-thrones">92y</a>:</p>\n<blockquote>\n<p>Watch the 92Y Livestream of <strong>Game of Thrones</strong> creator <strong>George R.R. Martin, TONIGHT at 8\xa0pm ET</strong>, in his <strong>only</strong> public U.S. appearance for the release of <a href="http://www.amazon.com/gp/product/B00EGMGGVK/ref=as_li_tl?ie=UTF8&camp=1789&creative=390957&creativeASIN=B00EGMGGVK&linkCode=as2&tag=92y-20&linkId=V3MMY57QIQ7QVFNK"><em>The World of Ice and Fire: The Untold History of Westeros and the Game of Thrones</em></a>. Exclusively on Tumblr!</p>\n</blockquote>\n<p><span>To reiterate: this an\xa0</span><strong>only</strong><span>\xa0and an\xa0</span><strong>exclusive\xa0</strong><span>and it\xa0</span><strong>starts in just a few minutes</strong><span>. Hurry on over. \xa0</span></p>', u'thumbnail_url': u''}
#livestream_result = save_media(session,livestream_post_dict)
# Yahoo video
yahoo_post_dict = {u'reblog_key': u'GGWw7A77', u'reblog': {u'comment': u'<p>It’s really happening!</p>', u'tree_html': u'<p><a class="tumblr_blog" href="http://whitehouse.tumblr.com/post/88396016693/obamairl">whitehouse</a>:</p><blockquote>\n<p>President Obama is answering your questions on education and college affordability in his first-ever Tumblr Q&A today.</p>\n<p>Tune in right here at 4 p.m. ET, and make sure to follow us @<a class="tumblelog" href="http://tmblr.co/mWgXp6TEB4GEsC_jKXfrSvw">whitehouse</a>.</p>\n</blockquote>', u'trail': [{u'blog': {u'theme': {u'title_font_weight': u'bold', u'header_full_height': 1056, u'title_color': u'#444444', u'header_bounds': u'43,1500,887,0', u'title_font': u'Gibson', u'link_color': u'#529ECC', u'header_image_focused': u'http://static.tumblr.com/861cd9f032b93a7ace681b4fcb7d05e4/mjqkjev/pEEn56435/tumblr_static_tumblr_static_17trsnvc8xes0og8kgk88coc0_focused_v3.jpg', u'show_description': True, u'header_full_width': 1500, u'header_focus_width': 1500, u'header_stretch': True, u'show_header_image': True, u'body_font': u'Helvetica Neue', u'show_title': True, u'header_image_scaled': u'http://static.tumblr.com/861cd9f032b93a7ace681b4fcb7d05e4/mjqkjev/sgln56432/tumblr_static_17trsnvc8xes0og8kgk88coc0_2048_v2.jpg', u'avatar_shape': u'square', u'show_avatar': True, u'header_focus_height': 844, u'background_color': u'#FAFAFA', u'header_image': u'http://static.tumblr.com/861cd9f032b93a7ace681b4fcb7d05e4/mjqkjev/sgln56432/tumblr_static_17trsnvc8xes0og8kgk88coc0.jpg'}, u'name': u'whitehouse'}, u'comment': u'<p>President Obama is answering your questions on education and college affordability in his first-ever Tumblr Q&A today.</p>\n<p>Tune in right here at 4 p.m. ET, and make sure to follow us @<a class="tumblelog" href="http://tmblr.co/mWgXp6TEB4GEsC_jKXfrSvw">whitehouse</a>.</p>', u'post': {u'id': u'88396016693'}}]}, u'thumbnail_width': 320, u'player': [{u'width': 250, u'embed_code': u'<iframe width="250" height="140" src="https://news.yahoo.com/video/tumblr-goes-white-house-190000218.html?format=embed" frameborder="0" allowfullscreen></iframe>'}, {u'width': 400, u'embed_code': u'<iframe width="400" height="225" src="https://news.yahoo.com/video/tumblr-goes-white-house-190000218.html?format=embed" frameborder="0" allowfullscreen></iframe>'}, {u'width': 500, u'embed_code': u'<iframe width="500" height="281" src="https://news.yahoo.com/video/tumblr-goes-white-house-190000218.html?format=embed" frameborder="0" allowfullscreen></iframe>'}], u'id': 88400573880, u'post_url': u'http://staff.tumblr.com/post/88400573880/whitehouse-president-obama-is-answering-your', u'tags': [u'ObamaIRL'], u'highlighted': [], u'state': u'published', u'short_url': u'http://tmblr.co/ZE5Fby1IL5RMu', u'html5_capable': True, u'type': u'video', u'format': u'html', u'timestamp': 1402430040, u'note_count': 9899, u'video_type': u'yahoo', u'date': u'2014-06-10 19:54:00 GMT', u'thumbnail_height': 180, u'permalink_url': u'https://news.yahoo.com/video/tumblr-goes-white-house-190000218.html', u'slug': u'whitehouse-president-obama-is-answering-your', u'blog_name': u'staff', u'caption': u'<p><a class="tumblr_blog" href="http://whitehouse.tumblr.com/post/88396016693/obamairl">whitehouse</a>:</p>\n<blockquote>\n<p>President Obama is answering your questions on education and college affordability in his first-ever Tumblr Q&A today.</p>\n<p>Tune in right here at 4 p.m. ET, and make sure to follow us @<a class="tumblelog" href="http://tmblr.co/mWgXp6TEB4GEsC_jKXfrSvw">whitehouse</a>.</p>\n</blockquote>\n<p>It\u2019s really happening!</p>', u'thumbnail_url': u'https://s1.yimg.com/uu/api/res/1.2/JW58D_.UFfRLkBOrIemIXw--/dz0zMjA7c209MTtmaT1maWxsO3B5b2ZmPTA7aD0xODA7YXBwaWQ9eXRhY2h5b24-/http://l.yimg.com/os/publish-images/ivy/2014-06-10/912811c0-f0c6-11e3-bb53-bd3ad1c7b3ec_06102014_tumblr_white_house.jpg'}
yahoo_result = save_media(session,yahoo_post_dict)
logging.debug("Closing DB session")
session.commit()
return
def main():
try:
setup_logging(log_file_path=os.path.join("debug","media-handlers-log.txt"))
debug()
except Exception, e:# Log fatal exceptions
logging.critical("Unhandled exception!")
logging.exception(e)
return
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
9affb7daf8c9cbd553358fe630f1221b9be2311b
|
cb181d1bd709faff629203c057809615ef4cf02e
|
/chembl_extras/management/commands/generate_ora2pg_conf.py
|
75c1685a4614bc20938312e5beac34024625a3b1
|
[
"Apache-2.0"
] |
permissive
|
chembl/chembl_extras
|
78361e3e65c00a166aaf793fac2cdf105a021af0
|
ed4f4782d77b10f76984a7fbe18642cdb015c2de
|
refs/heads/master
| 2021-01-25T08:54:18.946778 | 2017-05-10T12:42:22 | 2017-05-10T12:42:22 | 27,765,269 | 0 | 1 | null | 2015-03-03T13:20:24 | 2014-12-09T12:20:49 |
Python
|
UTF-8
|
Python
| false | false | 4,784 |
py
|
__author__ = 'mnowotka'
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from django.db import DEFAULT_DB_ALIAS
from django.conf import settings
from django import db
from django.db import connections
from collections import OrderedDict
from django.core.serializers import sort_dependencies
# ----------------------------------------------------------------------------------------------------------------------
class Command(BaseCommand):
help = "Prepare configuration file for ora2pg tool."
args = '[appname appname.ModelName ...]'
confTemplate = '''
ORACLE_HOME %s
ORACLE_DSN dbi:Oracle:host=%s;sid=%s;port=%s
ORACLE_USER %s
ORACLE_PWD %s
SCHEMA %s
TABLES %s
USER_GRANTS 1
DEBUG 0
EXPORT_SCHEMA 0
COMPILE_SCHEMA 0
TYPE DATA
DATA_LIMIT %s
CASE_SENSITIVE 0
OUTPUT %s
DATA_TYPE DATE:date,LONG:text,LONG RAW:bytea,CLOB:text,NCLOB:text,BLOB:bytea,BFILE:bytea,RAW:bytea,ROWID:oid,FLOAT:double precision,DEC:decimal,DECIMAL:decimal,DOUBLE PRECISION:double precision,INT:integer,INTEGER:integer,REAL:real,SMALLINT:smallint,BINARY_FLOAT:double precision,BINARY_DOUBLE:double precision,TIMESTAMP:timestamp,TIMESTAMP WITH TIME ZONE:timestamp with time zone,TIMESTAMP WITH LOCAL TIME ZONE:timestamp
BZIP2 /bin/bzip2
GEN_USER_PWD 0
FKEY_DEFERRABLE 0
DEFER_FKEY 0
DROP_FKEY 0
DROP_INDEXES 0
PG_NUMERIC_TYPE 0
DEFAULT_NUMERIC NUMERIC
KEEP_PKEY_NAMES 1
DISABLE_TABLE_TRIGGERS 1
NOESCAPE 0
DISABLE_SEQUENCE 0
ORA_SENSITIVE 0
PLSQL_PGSQL 1
ORA_RESERVED_WORDS audit,comment
FILE_PER_CONSTRAINT 0
FILE_PER_INDEX 0
FILE_PER_TABLE 0
TRANSACTION serializable
PG_SUPPORTS_WHEN 1
PG_SUPPORTS_INSTEADOF 0
FILE_PER_FUNCTION 0
TRUNCATE_TABLE 0
FORCE_OWNER 0
STANDARD_CONFORMING_STRINGS 0
THREAD_COUNT 0
ALLOW_CODE_BREAK 1
XML_PRETTY 1
FDW_SERVER orcl
ENABLE_MICROSECOND 0
DISABLE_COMMENT 1
'''
# ----------------------------------------------------------------------------------------------------------------------
def add_arguments(self, parser):
parser.add_argument('--database', dest='sourceDatabase', default=DEFAULT_DB_ALIAS, help='Source database')
parser.add_argument('--dumpfile', dest='dumpfile', default=None, help='Location of dump file.')
parser.add_argument('--datalimit', dest='dataLimit', default=10000, help='Data limit')
parser.add_argument('--app', dest='app', default='chembl_migration_model', help='App to be exported')
# ----------------------------------------------------------------------------------------------------------------------
def handle(self, *args, **options):
from django.apps import apps
# TODO : Check export mode
db.reset_queries()
sourceDatabase = options.get('sourceDatabase')
dataLimit = options.get('dataLimit')
app = apps.get_app(options.get('app'))
con = connections[sourceDatabase]
if con.vendor != 'oracle':
print "Source database has to be oracle."
return
user = settings.DATABASES[sourceDatabase]['USER']
passwd = settings.DATABASES[sourceDatabase]['PASSWORD']
host = settings.DATABASES[sourceDatabase]['HOST']
port = settings.DATABASES[sourceDatabase]['PORT']
name = settings.DATABASES[sourceDatabase]['NAME']
app_list = OrderedDict((app, None) for app in [app])
tables = []
sorted = sort_dependencies(app_list.items())
lastObjectName = sorted[-1].__name__
filename = lastObjectName + ".postgresql_psycopg2.sql"
chemblSQLPath = os.path.join(os.path.dirname(app.__file__),'sql', filename)
location = chemblSQLPath
oracleHome = os.environ['ORACLE_HOME']
if options.get('dumpfile'):
if not options.get('dumpfile').endswith('.sql'):
location = os.path.join(options.get('dumpfile'), filename)
else:
location = options.get('dumpfile')
for model in reversed(sorted):
if not model._meta.managed:
continue
tables.append(model._meta.db_table)
print self.confTemplate % (oracleHome, host, name, port, user, passwd, user, " ".join(tables), dataLimit, location)
if location != chemblSQLPath:
print "different! location = " + location + ", chemblSQLPath = " + chemblSQLPath
f = open(location, 'w')
f.close()
os.symlink(location, chemblSQLPath)
# ----------------------------------------------------------------------------------------------------------------------
|
[
"[email protected]"
] | |
233541efecb6c96e50e2995c89b653c1a25dc2b0
|
d2f6e0da6eabd19c00ac6fadb077d891659afd49
|
/nlstruct/core/text.py
|
c0ea364a58801929033d1c333d666a9c4e3a677f
|
[
"MIT"
] |
permissive
|
xtannier/nlstruct
|
a159a0340dac542ff2f2ffb62d1c6c1ad29a84f9
|
c19f765f13f13f22cb95465e292caa5f56ead39d
|
refs/heads/master
| 2021-02-14T23:24:30.410705 | 2020-03-04T09:18:46 | 2020-03-04T09:18:46 | 244,844,528 | 0 | 0 |
MIT
| 2020-03-04T08:18:39 | 2020-03-04T08:18:38 | null |
UTF-8
|
Python
| false | false | 25,634 |
py
|
import re
from itertools import repeat
import numpy as np
import pandas as pd
from tqdm import tqdm
from nlstruct.core.cache import cached
from nlstruct.core.pandas import make_merged_names_map, merge_with_spans, make_id_from_merged, flatten
def make_tag_scheme(length, entity, scheme='bio'):
if scheme == "bio":
return [f"B-{entity}", *(f"I-{entity}" for _ in range(length - 1))]
raise ValueError(f"'{scheme}' scheme is not supported")
class DeltaCollection(object):
def __init__(self, begins, ends, deltas):
self.begins = np.asarray(begins, dtype=int)
self.ends = np.asarray(ends, dtype=int)
self.deltas = np.asarray(deltas, dtype=int)
@classmethod
def from_absolute(cls, begins, ends, deltas):
deltas = np.asarray(deltas)
shift = np.roll(deltas, 1)
shift[0] = 0
deltas -= shift
return DeltaCollection(begins, ends, deltas)
def __repr__(self):
return "DeltaCollection([{}], [{}], [{}])".format(", ".join(map(str, self.begins)),
", ".join(map(str, self.ends)),
", ".join(map(str, self.deltas)))
def apply(self, positions, side='left'):
positions = np.asarray(positions)
to_add = ((positions.reshape(-1, 1) >= self.ends.reshape(1, -1)) * self.deltas).sum(axis=1)
between = np.logical_and(self.begins.reshape(1, -1) < positions.reshape(-1, 1),
positions.reshape(-1, 1) < self.ends.reshape(1, -1))
between_mask = between.any(axis=1)
between = between[between_mask]
between_i = between.argmax(axis=1)
if side == 'right':
to_add[between_mask] += self.ends[between_i] - positions[between_mask] + self.deltas[between_i]
elif side == 'left':
to_add[between_mask] += self.begins[between_i] - positions[between_mask]
return positions + to_add
def unapply(self, positions, side='left'):
positions = np.asarray(positions)
begins = self.apply(self.begins, side='left')
ends = self.apply(self.ends, side='right')
to_remove = -((positions.reshape(-1, 1) >= ends.reshape(1, -1)) * self.deltas).sum(axis=1)
between = np.logical_and(begins.reshape(1, -1) < positions.reshape(-1, 1),
positions.reshape(-1, 1) < ends.reshape(1, -1))
between_mask = between.any(axis=1)
between = between[between_mask]
between_i = between.argmax(axis=1)
if side == 'right':
to_remove[between_mask] += ends[between_i] - positions[between_mask] - self.deltas[between_i]
elif side == 'left':
to_remove[between_mask] += begins[between_i] - positions[between_mask]
pos = positions + to_remove
return pos
def __add__(self, other):
if len(self.begins) == 0:
return other
if len(other.begins) == 0:
return self
begins = self.unapply(other.begins, side='left')
ends = self.unapply(other.ends, side='right')
new_begins = np.concatenate([begins, self.begins])
new_ends = np.concatenate([ends, self.ends])
new_deltas = np.concatenate([other.deltas, self.deltas])
sorter = np.lexsort((new_ends, new_begins))
return DeltaCollection(new_begins[sorter], new_ends[sorter], new_deltas[sorter])
def make_str_from_groups(replacement, groups):
for i, group in enumerate(groups):
replacement = replacement.replace(f"\\{i+1}", group)
return replacement
def regex_sub_with_spans(pattern, replacement, text):
needed_groups = [int(i) for i in re.findall(r"\\([0-9]+)", replacement)]
begins = []
ends = []
deltas = []
for match in reversed(list(re.finditer(pattern, text))):
middle = make_str_from_groups(replacement, [match.group(i) for i in needed_groups])
start = match.start()
end = match.end()
text = text[:start] + middle + text[end:]
begins.append(start)
ends.append(end)
deltas.append(len(middle) - end + start)
return text, DeltaCollection(begins, ends, deltas)
def regex_multisub_with_spans(patterns, replacements, text):
deltas = DeltaCollection([], [], [])
for pattern, replacement in zip(patterns, replacements):
text, new_deltas = regex_sub_with_spans(pattern, replacement, text)
if deltas is not None:
deltas += new_deltas
else:
deltas = new_deltas
return text, deltas
def transform_text(dataset,
global_patterns=None,
global_replacements=None, return_deltas=True, with_tqdm=False):
assert (global_patterns is None) == (global_replacements is None)
expand_deltas = lambda x: (x[0], tuple(x[1].begins), tuple(x[1].ends), tuple(x[1].deltas))
if global_patterns is None:
global_patterns = []
global_replacements = []
if return_deltas:
text, delta_begins, delta_ends, deltas = zip(*[
expand_deltas(regex_multisub_with_spans(
[*doc_patterns, *global_patterns],
[*doc_replacements, *global_replacements],
text
)) for text, doc_patterns, doc_replacements in
(tqdm if with_tqdm else lambda x: x)(zip(
dataset["text"],
dataset["patterns"] if "patterns" in dataset.columns else repeat([]),
dataset["replacements"] if "replacements" in dataset.columns else repeat([])))
])
dataset = pd.DataFrame({
"text": text,
"begin": delta_begins,
"end": delta_ends,
"delta": deltas,
**{c: dataset[c] for c in dataset.columns if c not in ("text", "begin", "end", "delta")}
})
return (
dataset[[c for c in dataset.columns if c not in ("begin", "end", "delta")]],
flatten(dataset[["doc_id", "begin", "end", "delta"]]))
else:
new_texts = []
for text, doc_patterns, doc_replacements in (tqdm if with_tqdm else lambda x: x)(zip(
dataset["text"],
dataset["patterns"] if "patterns" in dataset.columns else repeat([]),
dataset["replacements"] if "replacements" in dataset.columns else repeat([]))):
for pattern, replacement in zip([*doc_patterns, *global_patterns], [*doc_replacements, *global_replacements]):
text = re.sub(pattern, replacement, text)
new_texts.append(text)
dataset = pd.DataFrame({"text": new_texts,
**{c: dataset[c] for c in dataset.columns if c not in ("text",)}})
return dataset[[c for c in dataset.columns if c not in ("begin", "end", "delta")]]
def apply_deltas(positions, deltas, on, position_columns=None):
if not isinstance(on, (tuple, list)):
on = [on]
if position_columns is None:
position_columns = {'begin': 'left', 'end': 'right'}
positions = positions.copy()
positions['_id_col'] = np.arange(len(positions))
mention_deltas = merge_with_spans(positions[[*position_columns, *on, '_id_col']], deltas, on=on,
suffixes=('_pos', '_delta'), how='inner')
# To be faster, we remove categorical columns (they may only be in 'on') before the remaining ops
mention_deltas = mention_deltas[[c for c in mention_deltas.columns if c not in on]]
positions = positions.set_index('_id_col')
mention_deltas = mention_deltas.set_index('_id_col')
delta_col_map, positions_col_map = make_merged_names_map(deltas.columns, [*position_columns, *on, '_id_col'],
left_on=on, right_on=on, suffixes=('_delta', '_pos'))
for col, side in position_columns.items():
mention_deltas.eval(f"shift = ({delta_col_map['end']} <= {positions_col_map[col]}) * {delta_col_map['delta']}",
inplace=True)
mention_deltas.eval(
f"between_magnet = {delta_col_map['begin']} < {positions_col_map[col]} and {positions_col_map[col]} < {delta_col_map['end']}",
inplace=True)
if side == "left":
mention_deltas.eval(
f"between_magnet = between_magnet * ({delta_col_map['begin']} - {positions_col_map[col]})",
inplace=True)
elif side == "right":
mention_deltas.eval(
f"between_magnet = between_magnet * ({delta_col_map['end']} + {delta_col_map['delta']} - {positions_col_map[col]})",
inplace=True)
order = "first" if side == "left" else "last"
tmp = mention_deltas.sort_values(['_id_col', delta_col_map['begin' if side == 'left' else 'end']]).groupby(
'_id_col').agg({
"shift": "sum",
**{n: order for n in mention_deltas.columns if n not in ("shift", "_id_col")}})
positions[col] = positions[col].add(tmp['shift'] + tmp['between_magnet'], fill_value=0)
positions = positions.reset_index(drop=True)
return positions
def reverse_deltas(positions, deltas, on, position_columns=None):
if not isinstance(on, (tuple, list)):
on = [on]
if position_columns is None:
position_columns = {'begin': 'left', 'end': 'right'}
positions = positions.copy()
positions['_id_col'] = np.arange(len(positions))
deltas = apply_deltas(deltas, deltas, on, position_columns={'begin': 'left', 'end': 'right'})
mention_deltas = merge_with_spans(positions[[*position_columns, *on, '_id_col']], deltas, on=on,
suffixes=('_pos', '_delta'), how='left')
positions = positions.set_index('_id_col')
mention_deltas = mention_deltas.set_index('_id_col')
# To be faster, we remove categorical columns (they may only be in 'on') before the remaining ops
# mention_deltas = mention_deltas[[c for c in mention_deltas.columns if c not in on]]
delta_col_map, positions_col_map = make_merged_names_map(deltas.columns, [*position_columns, *on, '_id_col'],
left_on=on, right_on=on, suffixes=('_delta', '_pos'))
for col, side in position_columns.items():
mention_deltas.eval(
f"shift = ({delta_col_map['end']} <= {positions_col_map[col]}) * (-{delta_col_map['delta']})",
inplace=True)
mention_deltas.eval(
f"between_magnet = {delta_col_map['begin']} < {positions_col_map[col]} and {positions_col_map[col]} < {delta_col_map['end']}",
inplace=True)
if side == "left":
mention_deltas.eval(
f"between_magnet = between_magnet * ({delta_col_map['begin']} - {positions_col_map[col]})",
inplace=True)
elif side == "right":
mention_deltas.eval(
f"between_magnet = between_magnet * ({delta_col_map['end']} - {delta_col_map['delta']} - {positions_col_map[col]})",
inplace=True)
order = "first" if side == "left" else "last"
tmp = mention_deltas.sort_values(['_id_col', delta_col_map['begin' if side == 'left' else 'end']])
tmp = tmp.groupby('_id_col').agg({
"shift": "sum",
**{n: order for n in mention_deltas.columns if n not in ("shift", "_id_col")}})
positions[col] = positions[col].add(tmp['shift'] + tmp['between_magnet'], fill_value=0)
positions = positions.reset_index(drop=True)
return positions
def preprocess_ids(large, small, large_id_cols=None, small_id_cols=None):
# Define on which columns we're going to operate
if small_id_cols is None:
small_id_cols = [c for c in small.columns if c.endswith("_id") and c not in ("begin", "end")]
if large_id_cols is None:
large_id_cols = [c for c in large.columns if c.endswith("_id") and c not in ("begin", "end")]
doc_id_cols = [c for c in small.columns if c.endswith("_id") and c in large.columns and c not in ("begin", "end")]
return (
doc_id_cols,
[c for c in small_id_cols if c not in doc_id_cols],
[c for c in large_id_cols if c not in doc_id_cols],
[c for c in small.columns if c not in small_id_cols and c not in ("begin", "end") and c not in doc_id_cols],
[c for c in large.columns if c not in large_id_cols and c not in ("begin", "end") and c not in doc_id_cols])
def encode_as_tag(small, large, label_cols=None, tag_scheme="bio", use_token_idx=False, verbose=0):
"""
Parameters
----------
small: tokens
large: mentions
small_id_cols: token id cols (doc_id, token_pos)
large_id_cols: mention id cols (doc_id, mention_id, mention_part_id)
label_cols: "label"
use_token_idx: Use token pos instead of char spans, defaults to False
verbose: int
If verbose > 0, make progress bar
Returns
-------
pd.DataFrame
"""
assert tag_scheme in ("bio", "bioul", "raw")
doc_id_cols, small_id_cols, large_id_cols, small_val_cols, large_val_cols = preprocess_ids(large, small)
# assert len(large_val_cols) < 2, "Cannot encode more than one column as tags"
assert len(large_val_cols) > 0, "Must have a column to encode as tags"
if label_cols is None:
label_cols = large_val_cols
if isinstance(label_cols, str):
label_cols = [label_cols]
# Map mentions to small as a tag
large = large.sort_values([*doc_id_cols, "begin", "end"])
if use_token_idx:
merged = merge_with_spans(large, small[[*doc_id_cols, *small_id_cols, *(c for c in small_val_cols if c != "token_idx"), "token_idx"]], on=doc_id_cols, suffixes=('_large', '')).query("begin <= token_idx and token_idx < end")
else:
merged = merge_with_spans(large, small, span_policy='partial_strict', on=[*doc_id_cols, ("begin", "end")], suffixes=('_large', ''))
# If a token overlap multiple mentions, assign it to the last mention
merged = merged.drop_duplicates([*doc_id_cols, *small_id_cols], keep='last')
merged_id_cols = doc_id_cols + large_id_cols + small_id_cols
# Encode mention labels as a tag
tags = (merged[merged_id_cols + label_cols]
.sort_values(merged_id_cols))
if tag_scheme != "raw":
if verbose > 0:
n_groups = len(tags.groupby(doc_id_cols + large_id_cols + label_cols, as_index=False, observed=True))
bar = tqdm(total=n_groups)
else:
bar = memoryview(b'') # no-op context for next instruction
with bar:
keep_cols = list(set(doc_id_cols + large_id_cols) - set(label_cols))
tags = (
# convert all categorical dtypes of group cols as simple types (np.str, np.int, np.object...)
# to accelerate concatenation inside the groupby
tags.astype({k: dtype if not hasattr(dtype, 'categories') else dtype.categories.dtype for k, dtype in tags.dtypes[keep_cols].items()})
.groupby(doc_id_cols + large_id_cols + label_cols, as_index=False, observed=True)
.apply(lambda group: (bar.update(1) if verbose > 0 else False) or group.assign(**{
label_col: make_tag_scheme(len(group[small_id_cols[0]]), group[label_col].iloc[0], tag_scheme)
for label_col in label_cols
}))
# convert back each group column dtype to its origial categorical dtype
.astype(tags.dtypes[keep_cols])
)
merged = merged[[*merged_id_cols, *small_val_cols, "begin", "end"]].merge(tags)
merged = small.merge(merged[doc_id_cols + small_id_cols + label_cols], on=doc_id_cols + small_id_cols, how="left")
label_categories = {}
if tag_scheme != "raw":
try:
for label_col in label_cols:
unique_labels = list(set(large[label_col])) if not hasattr(large[label_col], 'cat') else large[label_col].cat.categories
label_categories[label_col] = unique_labels
merged[label_col] = merged[label_col].fillna("O").astype(pd.CategoricalDtype(
["O", *(tag for label in unique_labels for tag in ("B-" + str(label), "I-" + str(label)))] if tag_scheme == "bio" else
["O", *(tag for label in unique_labels for tag in ("B-" + str(label), "I-" + str(label), "U-" + str(label), "L-" + str(label)))]
))
except Exception:
raise Exception(f"Error occured during the encoding of label columns '{label_col}'")
# return small[doc_id_cols + small_id_cols].merge(merged, how='left')
return merged, label_categories
def partition_spans(smalls, large,
overlap_policy="merge_large",
new_id_name="sample_id", span_policy="partial_strict"):
"""
Parameters
----------
smalls: pd.DataFrame[begin, end, ...]
Ex: tokens
large: pd.DataFrame[begin, end, ...]
Ex: sentences
overlap_policy: str or bool
One of
- merge_large:
Keeps small untouched but merges large spans that overlap the same small span
ex: partition_spans(mentions, sentences) -> merges sentences
- small_to_leftmost_large:
Keeps small and large untouched, and assigns small to the leftmost large that overlaps it
ex: partition_spans(tokens, mentions) -> assign token to the leftmost mention that touches it
- small_to_rightmost_large:
Keeps small and large untouched, and assigns small to the rightmost large that overlaps it
ex: partition_spans(tokens, mentions) -> assign token to the rightmost mention that touches it
- small_to_biggest_overlap_large:
keeps small and large untouched, and assigns small to the large span that overlaps it the most
ex: partition_spans(tokens, mentions) -> assign token to the mention that overlaps it the most
- False
do nothing and allow multiple matchings between small and large
new_id_name: str
If overlap_policy == "merge_large", this is the column that will host the newly created ids per merge
span_policy:
Which policy to use to detect span overlaps
Returns
-------
"""
assert overlap_policy in ("merge_large",
"split_small",
"small_to_leftmost_large",
"small_to_rightmost_large",
"small_to_biggest_overlap_large", False), f"Unknown small overlap policy '{overlap_policy}'"
assert len(smalls) >= 1
if not isinstance(smalls, (list, tuple)):
smalls = [smalls]
merged_id_cols = doc_id_cols = None
if overlap_policy == "merge_large":
original_new_id_name = new_id_name
while new_id_name in large.columns:
new_id_name = "_" + new_id_name
large = large.copy()
old_to_new = None
has_created_new_id_col = False
for small in smalls:
doc_id_cols, small_id_cols, large_id_cols, small_val_cols, large_val_cols = preprocess_ids(large, small)
large_id_cols = [c for c in large_id_cols]
# Merge sentences and mentions
merged = merge_with_spans(small, large, span_policy=span_policy, how='right', on=[*doc_id_cols, ("begin", "end")])
# If a mention overlap multiple sentences, assign it to the last sentence
small_ids = merged[doc_id_cols + small_id_cols].nlstruct.factorize(group_nans=False)
if has_created_new_id_col:
large_ids = merged[doc_id_cols + [new_id_name]].nlstruct.factorize(group_nans=False)
else:
large_ids = merged[doc_id_cols + large_id_cols].nlstruct.factorize(group_nans=False)
merged[new_id_name] = make_id_from_merged(
large_ids,
small_ids,
apply_on=[(0, large_ids)])[0]
merged["begin"] = merged[['begin_x', 'begin_y']].min(axis=1)
merged["end"] = merged[['end_x', 'end_y']].max(axis=1)
large = (merged
.groupby(new_id_name, as_index=False, observed=True)
.agg({**{n: 'first' for n in [*doc_id_cols, *large_id_cols] if n != new_id_name}, 'begin': 'min', 'end': 'max'})
.astype({"begin": int, "end": int, **large[doc_id_cols].dtypes}))
large = large[doc_id_cols + [new_id_name] + ["begin", "end"]]
old_to_new = large[doc_id_cols + [new_id_name]].drop_duplicates().reset_index(drop=True)
merged_id_cols = [new_id_name]
# large[original_new_id_name] = large[doc_id_cols + [new_id_name]].apply(lambda x: "/".join(map(str, x[doc_id_cols])) + "/" + str(x[new_id_name]), axis=1).astype("category")
# large = large.drop(columns={*doc_id_cols, new_id_name} - {original_new_id_name})
else:
original_new_id_name = None
# merged = merged.drop_duplicates([*doc_id_cols, *small_id_cols], keep=overlap_policy)
doc_id_cols, small_id_cols, large_id_cols, small_val_cols, large_val_cols = preprocess_ids(large, smalls[0])
merged_id_cols = large_id_cols
new_id_name = None
old_to_new = None
# Merge sentences and mentions
new_smalls = []
for small in smalls:
doc_id_cols, small_id_cols, large_id_cols, small_val_cols, large_val_cols = preprocess_ids(large, small)
merged = merge_with_spans(small, large[doc_id_cols + large_id_cols + ['begin', 'end']],
how='inner', span_policy=span_policy, on=[*doc_id_cols, ("begin", "end")])
if overlap_policy == "small_to_biggest_overlap_large":
merged = merged.sort_values([*doc_id_cols, *small_id_cols, 'overlap_size_0']).drop_duplicates([*doc_id_cols, *small_id_cols], keep="last")
elif overlap_policy == "small_to_leftmost_large":
merged = merged.sort_values([*doc_id_cols, *small_id_cols, 'begin_y']).drop_duplicates([*doc_id_cols, *small_id_cols], keep="first")
elif overlap_policy == "small_to_rightmost_large":
merged = merged.sort_values([*doc_id_cols, *small_id_cols, 'begin_y']).drop_duplicates([*doc_id_cols, *small_id_cols], keep="last")
elif overlap_policy == "split_small":
merged = merged.assign(begin_x=np.maximum(merged['begin_x'], merged['begin_y']),
end_x=np.minimum(merged['end_x'], merged['end_y']))
new_small = (
merged.assign(begin=merged["begin_x"] - merged["begin_y"], end=merged["end_x"] - merged["begin_y"])
.astype({"begin": int, "end": int})[[*doc_id_cols, *(merged_id_cols or ()), *small_id_cols, *small_val_cols, "begin", "end"]])
if new_id_name:
new_small[original_new_id_name] = new_small[list(set((*doc_id_cols, new_id_name)))].apply(
lambda x: "/".join([str(x[c]) for c in list(doc_id_cols) + ([new_id_name] if new_id_name not in doc_id_cols else [])]), axis=1)
new_small = new_small.drop(columns={*doc_id_cols, new_id_name} - {original_new_id_name})
new_smalls.append(new_small)
if original_new_id_name:
if new_id_name:
large[original_new_id_name] = large[doc_id_cols + [new_id_name]].apply(lambda x: "/".join(map(str, x[doc_id_cols])) + "/" + str(x[new_id_name]), axis=1)
large = large.drop(columns={*doc_id_cols, new_id_name} - {original_new_id_name})
new_doc_id_cols = [c if c != original_new_id_name else f'_{c}' for c in doc_id_cols]
(old_to_new[original_new_id_name],
old_to_new[new_doc_id_cols],
) = (
old_to_new[doc_id_cols + [new_id_name]].apply(lambda x: "/".join(map(str, x[doc_id_cols])) + "/" + str(x[new_id_name]), axis=1),
old_to_new[doc_id_cols]
)
if new_id_name not in (*new_doc_id_cols, original_new_id_name):
del old_to_new[new_id_name]
new_smalls = [small.astype({original_new_id_name: large[original_new_id_name].dtype}) for small in new_smalls]
return new_smalls, large, old_to_new
def split_into_spans(large, small, overlap_policy="split_small", pos_col=None):
"""
Parameters
----------
large: pd.DataFrame[begin, end, ...]
Any big span, like a sentence, a mention that needs being cut into pieces
small: pd.DataFrame[begin, end, ...]
Any small span that can subdivide a large mention: typically tokens
overlap_policy: str
cf partition_spans docstring
If two large spans overlap the same token, what should we do ?
pos_col: str
Column containing the precomputed index of the small spans (=tokens) in a document
Returns
-------
pd.DataFrame
Large, but with begin and end columns being express in token-units
"""
if pos_col is None:
pos_col = next(iter(c for c in small.columns if c.endswith("_pos")))
[small] = partition_spans([small], large, overlap_policy=overlap_policy)[0]
doc_id_cols, small_id_cols, large_id_cols, small_val_cols, large_val_cols = preprocess_ids(large, small)
res = large[[*doc_id_cols, *large_id_cols, *large_val_cols]].merge(
small
.eval(f"""
begin={pos_col}
end={pos_col} + 1""")
.groupby(doc_id_cols, as_index=False, observed=True)
.agg({"begin": "min", "end": "max"})
)
return res
|
[
"[email protected]"
] | |
8ff509e1d54882e1f374b3f4cf168afc0af9d237
|
e2ae5c6d1d3ff9c512d526b1b4d7d7b64d50e87d
|
/py/day07_1.py
|
9fb763fa4478e8eca3a622575cc7278524b4272d
|
[] |
no_license
|
wfeng1991/learnpy
|
59ed66d0abc2947c2f73c0bfe3901ef45ba5eb56
|
e5b018493bbd12edcdcd0434f35d9c358106d391
|
refs/heads/master
| 2021-01-23T07:35:08.376547 | 2018-09-28T02:16:31 | 2018-09-28T02:16:31 | 86,430,476 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 107 |
py
|
with open('day01.py','r',encoding='utf-8') as f:
for line in f.readlines():
print(line.strip())
|
[
"[email protected]"
] | |
1f73255d352061d5d5de367ce1cde91ab143216a
|
373035950bdc8956cc0b74675aea2d1857263129
|
/spar_python/query_generation/generators/keyword_query_generator_test.py
|
1a3ad343f9a5a7f147d582faaaeaa4e2f1435ce0
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
limkokholefork/SPARTA
|
5d122cd2e920775d61a5404688aabbafa164f22e
|
6eeb28b2dd147088b6e851876b36eeba3e700f16
|
refs/heads/master
| 2021-11-11T21:09:38.366985 | 2017-06-02T16:21:48 | 2017-06-02T16:21:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,503 |
py
|
# *****************************************************************
# Copyright 2013 MIT Lincoln Laboratory
# Project: SPAR
# Authors: ATLH
# Description: Tests for equality_query_generator
#
# Modifications:
# Date Name Modification
# ---- ---- ------------
# 6 August 2012 ATLH Original version
# *****************************************************************
from __future__ import division
import os
import sys
this_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = os.path.join(this_dir, '..', '..', '..')
sys.path.append(base_dir)
import unittest
import time
import keyword_query_generator as kqg
import spar_python.common.spar_random as spar_random
import spar_python.common.distributions.text_generator as text_generator
import StringIO as s
import spar_python.query_generation.query_schema as qs
import spar_python.data_generation.spar_variables as sv
class KeywordQueryGeneratorTest(unittest.TestCase):
def setUp(self):
self.seed = int(time.time())
self.seed_msg = "Random seed used for this test: %s" % self.seed
self.longMessage = True
spar_random.seed(self.seed)
#set up intitialization values
sub_cat = 'word'
f = s.StringIO('''Buck had accepted the rope with quiet dignity. To be sure, it
unwonted performance: but he had learned to trust in men he knew, and to
give them credit for a wisdom that outreached his own. But when the ends
of the ropes were placed in the strangers hands, he growled menacingly.
He had merely intimated his displeasure, in his pride believing that to
intimate was to command. But to his surprise the rope tightened around
his neck, shutting off his breath. In quick rage he sprang at the man,
who met him halfway, grappled him close by the throat, and with a deft
twist threw him over on his back. Then the rope tightened mercilessly,
while Buck struggled in a fury, his tongue lolling out of his mouth and
his great chest. Never in all his life had he been so
vilely treated, and never in all his life had he been so angry. But his
strength ebbed, his eyes glazed, and he knew nothing when the train was
flagged and the two men threw him into the baggage car.''')
self._kw_dist = text_generator.TextGenerator((f,))
fields = [sv.VARS.NOTES3]
dists = [self._kw_dist]
other_fields = ['no_queries', 'rss','keyword_len','type']
other_cols = [[3, 60, 4, 'word'], [3, 60, 5, 'word'],
[3, 75, 4, 'stem'], [3, 60, 5, 'stem']]
self.generator = kqg.KeywordQueryGenerator('P3',sub_cat, ["LL"],dists, fields, 1000,
100, other_fields, other_cols)
@unittest.skip("Sporadically fails, not sure why")
def testGenerateQuery(self):
"""
Tests equality query generator against a 'db' to make sure it is
generating the right queries
"""
#generate a 'db' to test against
notes = [self._kw_dist.generate(125) for _ in xrange(1000)]
#generate queries
query_batches = self.generator.produce_query_batches()
queries = []
for query_batch in query_batches:
queries += query_batch.produce_queries()
#check to see right number of queries generated
self.assertGreaterEqual(len(queries), 6, self.seed_msg)
#check queries against 'db' to make sure they match within a factor
#of two
word = 0
stem = 0
working_queries = 0
non_working_queries = []
for q in queries:
if q[qs.QRY_TYPE] == 'word':
x = lambda generated_text: \
generated_text.contains_upper(q[qs.QRY_SEARCHFOR])
word +=1
elif q[qs.QRY_TYPE] == 'stem':
x = lambda generated_text: \
generated_text.contains_stem(q[qs.QRY_SEARCHFOR])
stem +=1
count_match = len([note for note in notes if x(note)])
msg = 'Query %d was: \n' \
'sub_cat: %s\n'\
'field: %s\n'\
'type: %s\n'\
'rss: %d\n'\
'value: %s\n' % (q[qs.QRY_QID], q[qs.QRY_SUBCAT],
q[qs.QRY_FIELD], q[qs.QRY_TYPE],
q[qs.QRY_RSS], q[qs.QRY_SEARCHFOR])
if count_match <= q[qs.QRY_URSS]*4 and count_match >= q[qs.QRY_LRSS]/4:
working_queries+=1
else:
non_working_queries.append(msg)
fail_msg = ''
for msg in non_working_queries[:3]:
fail_msg += msg
self.assertGreaterEqual(working_queries, 6, fail_msg)
#check to see each field had the correct number of queries
#ideally this number would be greater than 6 (the requested amount)
#but because the distribution used for unit testing is so small
#there is a greater margin of error at this scale
self.assertGreaterEqual(word, 3, self.seed_msg)
self.assertGreaterEqual(stem, 3, self.seed_msg)
|
[
"[email protected]"
] | |
32015bbff11b11145a125a55d2a4a1aa07262ac3
|
f9b30e3406d23569c5b6dd4a778454683a72744b
|
/editor/views/timeline.py
|
a959317d8333fd9cf176e1430b9df22b9978ac13
|
[
"CC-BY-SA-3.0",
"Apache-2.0"
] |
permissive
|
numbas/editor
|
65b0644f28192180b83ab18a9ed09886b4c0ce6b
|
c11a5ae11f013d63114535a8f0b0f3ec635c8bd5
|
refs/heads/master
| 2023-08-17T07:34:00.283142 | 2023-08-16T13:44:54 | 2023-08-16T13:44:54 | 3,493,021 | 65 | 83 |
Apache-2.0
| 2023-07-19T08:27:36 | 2012-02-20T11:20:15 |
JavaScript
|
UTF-8
|
Python
| false | false | 2,253 |
py
|
import json
from editor.views.generic import user_json, stamp_json, comment_json
from editor.models import TimelineItem
from django.views import generic
from django import http
from django.urls import reverse
event_json_views = {
'stamp': stamp_json,
'comment': comment_json,
}
def event_json(event, viewed_by):
date = event.date.strftime('%Y-%m-%d %H:%M:%S')
user = user_json(event.user)
if event.type not in event_json_views:
raise Exception("Unrecognised event type %s" % event.type)
data = event_json_views[event.type](event.data, viewed_by=viewed_by)
return {
'date': date,
'type': event.type,
'data': data,
'user': user,
}
def timeline_json(events, viewed_by):
return [event_json(event, viewed_by) for event in events]
class DeleteTimelineItemView(generic.DeleteView):
model = TimelineItem
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
return self.try_delete()
def form_valid(self, form):
return self.try_delete()
def try_delete(self):
if self.object.can_be_deleted_by(self.request.user):
self.object.delete()
return http.HttpResponse('timeline item {} deleted'.format(self.object.pk))
else:
return http.HttpResponseForbidden('You don\'t have the necessary access rights.')
class HideTimelineItemView(generic.UpdateView):
model = TimelineItem
fields = []
http_method_names = ['post', 'head', 'options', 'trace']
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden_by.add(self.request.user)
data = {
'success': True,
'undo': reverse('timelineitem_unhide', args=(self.object.pk,))
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
class UnhideTimelineItemView(generic.UpdateView):
model = TimelineItem
def post(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.hidden_by.remove(self.request.user)
data = {
'success': True,
}
return http.HttpResponse(json.dumps(data), content_type='application/json')
|
[
"[email protected]"
] | |
926176b80e39090a452bfcef204049145d25a362
|
68f757e7be32235c73e316888ee65a41c48ecd4e
|
/python_book(이것이 코딩테스트다)/조합문제 예시 p486.py
|
3e5176b472357fc8ad7125c6f28c9d12695f6b86
|
[] |
no_license
|
leejongcheal/algorithm_python
|
b346fcdbe9b1fdee33f689477f983a63cf1557dc
|
f5d9bc468cab8de07b9853c97c3db983e6965d8f
|
refs/heads/master
| 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,313 |
py
|
"""조합 문제 예시
L개의 비번갯수와 C개의 알파벳 입력받음
최소 모음 1개이상자음 2개이상으로 정렬순으로 L개의 암호를 가진것을 알파벳순으로 출력
combinations의 기본
- 반환값 : 특정 주소를 가지는 조합타입 [(),(),()..]식으로 반환
- 원소값 : 튜플형식을 가짐
반환값에 대해서 리스트형으로 형변환과 원소값도 리스트로 형변환 하는게 편하다.
문자열에대한 조합 사용 예시
2개로 나누어서 풀었는데 그냥 다 뽑은 다음에 모음과 자음 갯수를 검사해서 출력하는식으로 푸는게 훨씬 편했을듯
"""
import sys, itertools
input = sys.stdin.readline
L, C = map(int, input().rstrip().split())
result = []
alpa = list(input().rstrip().split())
mo = []
ja = []
for i in alpa:
if i in "aeoui":
mo.append(i)
else:
ja.append(i)
mo.sort()
for i in range(1, len(mo) + 1):
if L - i < 2:
break
mo_result = list(itertools.combinations(mo, i))
ja_result = list(itertools.combinations(ja, L - i))
for mo_data in mo_result:
for ja_data in ja_result:
temp = ""
temp = list(mo_data) + list(ja_data)
result.append(sorted(temp))
result.sort()
for r in result:
print("".join(r))
|
[
"[email protected]"
] | |
8f482032abe72bd653d2038e495eca19f4fa7f93
|
89207f4e5c5a8fd955adf775a553c32359a0cae8
|
/test.py
|
84b1107cbbfbcc2f81877535be972409c2ed3e10
|
[
"BSD-4-Clause"
] |
permissive
|
ikbear/seven-cow
|
065161f811c465f0ce1579471bf9a0ba4fc1105d
|
4a6fc7392b2feddf67d7f338794758bdc19379a9
|
refs/heads/master
| 2021-01-16T17:50:41.438442 | 2013-06-27T09:25:16 | 2013-06-27T09:25:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,586 |
py
|
import os
from sevencow import Cow
class Test(object):
def setUp(self):
ACCESS_KEY = os.environ['QINIU_ACCESS_KEY']
SECRET_KEY = os.environ['QINIU_SECRET_KEY']
bucket = os.environ['QINIU_BUCKET']
cow = Cow(ACCESS_KEY, SECRET_KEY)
self.b = cow.get_bucket(bucket)
for i in range(3):
with open('sevencow{0}'.format(i), 'w') as f:
f.write('0000')
def tearDown(self):
for f in self._multi_files():
try:
os.unlink(f)
except IOError:
pass
def _list_file_names(self):
files = self.b.list_files()
return [f['key'] for f in files['items']]
def _multi_files(self):
return ['sevencow{0}'.format(i) for i in range(3)]
def testaPutSingle(self):
key = 'sevencow0'
res = self.b.put(key)
assert key == res['key']
assert key in self._list_file_names()
def testbPutMulti(self):
keys = self._multi_files()
res = self.b.put(*keys)
res_keys = [r['key'] for r in res]
assert keys == res_keys
files = self._list_file_names()
for k in keys:
assert k in files
def testcStatSingle(self):
self.b.stat('sevencow0')
def testdStatMulti(self):
self.b.stat(*self._multi_files())
def testeCopySingle(self):
self.b.copy('sevencow0', 'sevencow01')
assert 'sevencow01' in self._list_file_names()
def testfCopyMulti(self):
self.b.copy(('sevencow1', 'sevencow11'), ('sevencow2', 'sevencow21'))
files = self._list_file_names()
assert 'sevencow11' in files
assert 'sevencow21' in files
def testgMoveSingle(self):
self.b.move('sevencow01', 'sevencow011')
files = self._list_file_names()
assert 'sevencow01' not in files
assert 'sevencow011' in files
def testhMoveMulti(self):
self.b.move(('sevencow11', 'sevencow111'), ('sevencow21', 'sevencow211'))
files = self._list_file_names()
assert 'sevencow11' not in files and 'sevencow21' not in files
assert 'sevencow111' in files and 'sevencow211' in files
def testiDeleteSingle(self):
self.b.delete('sevencow0')
assert 'sevencow0' not in self._list_file_names()
def testjDeleteMulti(self):
keys = ['sevencow1', 'sevencow2', 'sevencow011', 'sevencow111', 'sevencow211']
self.b.delete(*keys)
files = self._list_file_names()
for k in keys:
assert k not in files
|
[
"[email protected]"
] | |
6d55581dd396e230a5242ff86445353b15be276c
|
4f86b275ff73f9d2d308f4e290fd8aa0bf8fec87
|
/pretraining/ensemble_pretrain.py
|
24d30ac2f5736c4478b77deeac89f9b00b4aabd5
|
[
"MIT"
] |
permissive
|
fagan2888/Adv-SS-Pretraining
|
b663b289e1deeccd73a881da0121da5590cc3041
|
4ffbebea582f858ec6165f082f52ded1fc9b817d
|
refs/heads/master
| 2022-11-15T00:08:19.101407 | 2020-07-04T19:01:29 | 2020-07-04T19:01:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 34,056 |
py
|
'''
ensmeble pretrain with penalty (penalty is try to magnify the gradients of input image by different self-supervised task)
'''
import argparse
import os
import pdb
import time
import torch
import random
import pickle
import numpy as np
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.autograd import Variable
import torchvision.models as models
import torch.nn.functional as F
import PIL.Image as Image
import matplotlib.pyplot as plt
import model_ensemble
from attention_pooling.attention_pooling import SelfieModel
from attack_algo_ensemble import PGD_attack_3module,PGD_attack_jigsaw,PGD_attack_rotation,PGD_attack_selfie
parser = argparse.ArgumentParser(description='PyTorch Cifar10 Training')
parser.add_argument('--data', type=str, default='/datadrive/data', help='location of the data corpus')
parser.add_argument('-b', '--batch_size', type=int, default=80, help='batch size')
parser.add_argument('--lr', default=0.001, type=float, help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight_decay', default=1e-4, type=float, help='weight decay')
parser.add_argument('--epochs', default=150, type=int, help='number of total epochs to run')
parser.add_argument('--print_freq', default=10, type=int, help='print frequency')
#parameters need input
parser.add_argument('--save_dir', help='The directory used to save the trained models', default='/datadrive/models/pretrain_penalty_ensemble_onegpu', type=str)
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--seed', type=int, default=22, help='random seed')
parser.add_argument('--local_rank', type=int, help='local-rank')
parser.add_argument('--attack_eps', type=float, default=(8/255), help='perturbation radius for attack phase')
parser.add_argument('--attack_gamma', type=float, default=(2/255), help='perturbation radius for attack phase')
parser.add_argument('--attack_randinit', type=bool, default=True, help="randinit flag for attack algo")
parser.add_argument('--adv_iter', type=int, default=10, help='how many epochs to wait before another test')
best_prec1 = 0
best_ata = 0
def main():
global args, best_prec1, best_ata
args = parser.parse_args()
print(args)
torch.cuda.set_device(args.gpu)
setup_seed(args.seed)
model = model_ensemble.PretrainEnsembleModel()
model = model.cuda()
n_split = 4
selfie_model = get_selfie_model(n_split)
selfie_model = selfie_model.cuda()
cudnn.benchmark = True
train_trans = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor()
])
val_trans = transforms.Compose([
transforms.ToTensor()
])
#dataset process
train_dataset = datasets.CIFAR10(args.data, train=True, transform=train_trans, download=True)
test_dataset = datasets.CIFAR10(args.data, train=False, transform=val_trans, download=True)
valid_size = 0.1
indices = list(range(len(train_dataset)))
split = int(np.floor(valid_size*len(train_dataset)))
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = torch.utils.data.Subset(train_dataset, train_idx)
valid_sampler = torch.utils.data.Subset(train_dataset, valid_idx)
train_loader = torch.utils.data.DataLoader(
train_sampler,
batch_size=args.batch_size, shuffle=True,
num_workers=2, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
valid_sampler,
batch_size=args.batch_size, shuffle=False,
num_workers=2, pin_memory=True)
test_loader = torch.utils.data.DataLoader(
test_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=2, pin_memory=True)
criterion = nn.CrossEntropyLoss().cuda()
params_list = [{'params': selfie_model.parameters(), 'lr': args.lr,
'weight_decay': args.weight_decay},]
params_list.append({'params': model.parameters(), 'lr': args.lr, 'weight_decay': args.weight_decay})
optimizer = torch.optim.SGD(params_list, lr=args.lr,
momentum=0.9,
weight_decay=args.weight_decay, nesterov = True)
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda step: cosine_annealing(
step,
args.epochs * len(train_loader),
1,
1e-8 / args.lr))
print('adv training')
alltrain_acc_jig=[]
allta_jig=[]
allata_jig=[]
alltrain_acc_rot=[]
allta_rot=[]
allata_rot=[]
alltrain_acc_selfie=[]
allta_selfie=[]
allata_selfie=[]
if os.path.exists(args.save_dir) is not True:
os.mkdir(args.save_dir)
permutation = np.array([np.random.permutation(16) for i in range(30)])
np.save(os.path.join(args.save_dir, 'permutation.npy'), permutation)
if permutation.min()==1:
permutation=permutation-1
all_seq=[np.random.permutation(16) for ind in range(400)]
pickle.dump(all_seq, open(os.path.join(args.save_dir, 'img_test_seq.pkl'),'wb'))
# all_seq=pickle.load(open('img_test_seq.pkl','rb'))
for epoch in range(args.epochs):
print("The learning rate is {}".format(optimizer.param_groups[0]['lr']))
# train for one epoch
train_acc_jig, train_acc_rot, train_acc_selfie, _ = train(train_loader, model, selfie_model, criterion, optimizer, epoch, scheduler, permutation)
# evaluate on validation set
ta_jig, ta_rot, ta_selfie , _ = val(val_loader, model, selfie_model, criterion, permutation, all_seq)
ata_jig, ata_rot, ata_selfie , _ = val_pgd(val_loader, model, selfie_model, criterion, permutation, all_seq)
alltrain_acc_selfie.append(train_acc_selfie)
allta_selfie.append(ta_selfie)
allata_selfie.append(ata_selfie)
alltrain_acc_jig.append(train_acc_jig)
allta_jig.append(ta_jig)
allata_jig.append(ata_jig)
alltrain_acc_rot.append(train_acc_rot)
allta_rot.append(ta_rot)
allata_rot.append(ata_rot)
sum_ta=ta_jig+ta_rot+ta_selfie
sum_ata=ata_jig+ata_rot+ata_selfie
# remember best prec@1 and save checkpoint
is_best = sum_ta > best_prec1
best_prec1 = max(sum_ta, best_prec1)
ata_is_best = sum_ata > best_ata
best_ata = max(sum_ata,best_ata)
if is_best:
save_checkpoint({
'epoch': epoch + 1,
'selfie_state': selfie_model.state_dict(),
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'best_model.pt'))
if ata_is_best:
save_checkpoint({
'epoch': epoch + 1,
'selfie_state': selfie_model.state_dict(),
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'ata_best_model.pt'))
save_checkpoint({
'epoch': epoch + 1,
'selfie_state': selfie_model.state_dict(),
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'model.pt'))
plt.plot(alltrain_acc_selfie, label='train_acc')
plt.plot(allta_selfie, label='TA')
plt.plot(allata_selfie, label='ATA')
plt.legend()
plt.savefig(os.path.join(args.save_dir, 'selfie.png'))
plt.close()
plt.plot(alltrain_acc_rot, label='train_acc')
plt.plot(allta_rot, label='TA')
plt.plot(allata_rot, label='ATA')
plt.legend()
plt.savefig(os.path.join(args.save_dir, 'rotation.png'))
plt.close()
plt.plot(alltrain_acc_jig, label='train_acc')
plt.plot(allta_jig, label='TA')
plt.plot(allata_jig, label='ATA')
plt.legend()
plt.savefig(os.path.join(args.save_dir, 'jigsaw.png'))
plt.close()
print('start testing ATA best model')
model_path = os.path.join(args.save_dir, 'ata_best_model.pt')
model_dict_best = torch.load(model_path)
model.load_state_dict(model_dict_best['state_dict'])
selfie_model.load_state_dict(model_dict_best['selfie_state'])
ta_jig, ta_rot, ta_selfie , _ = val(val_loader, model, selfie_model, criterion, permutation, all_seq)
ata_jig, ata_rot, ata_selfie , _ = val_pgd(val_loader, model, selfie_model, criterion, permutation, all_seq)
print('start testing TA best model')
model_path = os.path.join(args.save_dir, 'best_model.pt')
model_dict_best = torch.load(model_path)
model.load_state_dict(model_dict_best['state_dict'])
selfie_model.load_state_dict(model_dict_best['selfie_state'])
ta_jig, ta_rot, ta_selfie , _ = val(val_loader, model, selfie_model, criterion, permutation, all_seq)
ata_jig, ata_rot, ata_selfie , _ = val_pgd(val_loader, model, selfie_model, criterion, permutation, all_seq)
def train(train_loader, model, selfie_model, criterion, optimizer, epoch, scheduler, permutation):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1_selfie = AverageMeter()
top1_rotation = AverageMeter()
top1_jigsaw = AverageMeter()
end = time.time()
# # switch to train mode
model.train()
selfie_model.train()
bias=0.9
for index, (input, label) in enumerate(train_loader):
data_time.update(time.time() - end)
input = input.cuda()
cur_batch_size=input.size(0)
jig_input,jig_target = jigsaw(input, permutation, bias)
jig_target=jig_target.long().cuda()
jig_input = jig_input.cuda()
#rotation process
rot_input, rot_target = rotation(input)
#selfie forward
total=16
seq = np.random.permutation(total)
t = seq[:(total // 4)]
v = seq[(total // 4):]
v = torch.from_numpy(v).cuda()
pos = t
t = torch.from_numpy(np.array(pos)).cuda()
input_adv, jig_input_adv, rot_input_adv = PGD_attack_3module(
x = [input, jig_input, rot_input],
y_jig = jig_target,
y_rot = rot_target,
selfie_model = selfie_model,
model = model,
criterion = criterion,
seq = seq,
eps = (8/255),
steps = 10,
gamma = (2/255),
randinit = True
)
input_adv = input_adv.requires_grad_(True).cuda()
jig_input_adv = jig_input_adv.requires_grad_(True).cuda()
rot_input_adv = rot_input_adv.requires_grad_(True).cuda()
#jigsaw forward
jig_feature = model(jig_input)
jig_feature = model.layer4_jigsaw(jig_feature)
jig_feature = model.avgpool3(jig_feature)
jig_feature = jig_feature.view(jig_feature.size(0), -1)
jig_out = model.fc_jigsaw(jig_feature)
loss_jigsaw = criterion(jig_out,jig_target)
#adv jigsaw forward
jig_feature_adv = model(jig_input_adv)
jig_feature_adv = model.layer4_jigsaw(jig_feature_adv)
jig_feature_adv = model.avgpool3(jig_feature_adv)
jig_feature_adv = jig_feature_adv.view(jig_feature_adv.size(0), -1)
jig_out_adv = model.fc_jigsaw(jig_feature_adv)
loss_jigsaw_adv = criterion(jig_out_adv,jig_target)
#adv rotation forward
rot_feature_adv = model(rot_input_adv)
rot_feature_adv = model.layer4_rotation(rot_feature_adv)
rot_feature_adv = model.avgpool2(rot_feature_adv)
rot_feature_adv = rot_feature_adv.view(rot_feature_adv.size(0), -1)
rot_out_adv = model.fc_rotation(rot_feature_adv)
loss_rotation_adv = criterion(rot_out_adv,rot_target)
# rotation forward
rot_feature = model(rot_input)
rot_feature = model.layer4_rotation(rot_feature)
rot_feature = model.avgpool2(rot_feature)
rot_feature = rot_feature.view(rot_feature.size(0), -1)
rot_out = model.fc_rotation(rot_feature)
loss_rotation = criterion(rot_out,rot_target)
#selfie forward
batches = split_image_selfie(input, 8)
batches = list(map(lambda x: x.unsqueeze(1), batches))
batches = torch.cat(batches, 1) # (B, L, C, H, W)
input_batches = torch.split(batches, 1, 1)
input_batches = list(map(lambda x: x.squeeze(1), input_batches))
input_batches = torch.cat(input_batches, 0)
output_batches = model(input_batches)
output_batches = model.avgpool1(output_batches)
output_batches = output_batches.view(output_batches.size(0),-1)
output_batches = output_batches.unsqueeze(1)
output_batches = torch.split(output_batches, cur_batch_size, 0)
output_batches = torch.cat(output_batches,1)
output_decoder = output_batches.index_select(1, t)
output_encoder = output_batches.index_select(1, v)
output_encoder = selfie_model(output_encoder, pos)
features = []
for i in range(len(pos)):
feature = output_decoder[:, i, :]
feature = feature.unsqueeze(2)
features.append(feature)
features = torch.cat(features, 2) # (B, F, NP)
patch_loss = 0
for i in range(len(t)):
activate = output_encoder[:, i, :].unsqueeze(1)
pre = torch.bmm(activate, features)
logit = nn.functional.softmax(pre, 2).view(-1, len(t))
temptarget = torch.ones(logit.shape[0]).cuda() * i
temptarget = temptarget.long()
loss_ = criterion(logit, temptarget)
patch_loss += loss_
#adv selfie forward
batches_adv = split_image_selfie(input_adv, 8)
batches_adv = list(map(lambda x: x.unsqueeze(1), batches_adv))
batches_adv = torch.cat(batches_adv, 1) # (B, L, C, H, W)
input_batches_adv = torch.split(batches_adv, 1, 1)
input_batches_adv = list(map(lambda x: x.squeeze(1), input_batches_adv))
input_batches_adv = torch.cat(input_batches_adv, 0)
output_batches_adv = model(input_batches_adv)
output_batches_adv = model.avgpool1(output_batches_adv)
output_batches_adv = output_batches_adv.view(output_batches_adv.size(0),-1)
output_batches_adv = output_batches_adv.unsqueeze(1)
output_batches_adv = torch.split(output_batches_adv, cur_batch_size, 0)
output_batches_adv = torch.cat(output_batches_adv,1)
output_decoder_adv = output_batches_adv.index_select(1, t)
output_encoder_adv = output_batches_adv.index_select(1, v)
output_encoder_adv = selfie_model(output_encoder_adv, pos)
features_adv = []
for i in range(len(pos)):
feature_adv = output_decoder_adv[:, i, :]
feature_adv = feature_adv.unsqueeze(2)
features_adv.append(feature_adv)
features_adv = torch.cat(features_adv, 2) # (B, F, NP)
patch_loss_adv = 0
for i in range(len(t)):
activate_adv = output_encoder_adv[:, i, :].unsqueeze(1)
pre_adv = torch.bmm(activate_adv, features_adv)
logit_adv = nn.functional.softmax(pre_adv, 2).view(-1, len(t))
temptarget_adv = torch.ones(logit_adv.shape[0]).cuda() * i
temptarget_adv = temptarget_adv.long()
loss__adv = criterion(logit_adv, temptarget_adv)
patch_loss_adv += loss__adv
prec1_adv, _ = accuracy(logit_adv, temptarget_adv, topk=(1,3))
top1_selfie.update(prec1_adv.item(), 1)
grad_selfie = torch.autograd.grad(patch_loss_adv, input_adv, create_graph=True)[0]
grad_jig = torch.autograd.grad(loss_jigsaw_adv, jig_input_adv, create_graph=True)[0]
grad_rot = torch.autograd.grad(loss_rotation_adv, rot_input_adv, create_graph=True)[0]
h_loss = calculate_log_det(grad_selfie, grad_jig, grad_rot)
all_loss = (patch_loss+loss_jigsaw+loss_rotation+patch_loss_adv+loss_jigsaw_adv+loss_rotation_adv)/6 + 0.1*h_loss
optimizer.zero_grad()
all_loss.backward()
optimizer.step()
scheduler.step()
batch_time.update(time.time() - end)
end = time.time()
jig_out_adv = jig_out_adv.float()
rot_out_adv = rot_out_adv.float()
all_loss = all_loss.float()
# measure accuracy and record loss
prec1_jig = accuracy(jig_out_adv.data, jig_target)[0]
prec1_rot = accuracy(rot_out_adv.data, rot_target)[0]
losses.update(all_loss.item(), input.size(0))
top1_jigsaw.update(prec1_jig.item(), input.size(0))
top1_rotation.update(prec1_rot.item(), input.size(0))
if index % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc_selfie {atop1.val:.3f} ({atop1.avg:.3f})\t'
'Acc_jig {btop1.val:.3f} ({btop1.avg:.3f})\t'
'Acc_rot {ctop1.val:.3f} ({ctop1.avg:.3f})'.format(
epoch, index, len(train_loader),batch_time=batch_time, loss=losses, atop1=top1_selfie, btop1=top1_jigsaw, ctop1=top1_rotation))
print('train_jigsaw_accuracy {top1.avg:.3f}'.format(top1=top1_jigsaw))
print('train_rotation_accuracy {top1.avg:.3f}'.format(top1=top1_rotation))
print('train_selfie_accuracy {top1.avg:.3f}'.format(top1=top1_selfie))
return top1_jigsaw.avg, top1_rotation.avg, top1_selfie.avg, losses.avg
def val(val_loader, model, selfie_model, criterion, permutation, all_seq):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1_selfie = AverageMeter()
top1_rotation = AverageMeter()
top1_jigsaw = AverageMeter()
end = time.time()
model.eval()
selfie_model.eval()
bias=0.9
for index, (input, label) in enumerate(val_loader):
data_time.update(time.time() - end)
input = input.cuda()
cur_batch_size=input.size(0)
# jigsaw process
jig_input,jig_target = jigsaw(input, permutation, bias)
jig_target = jig_target.long().cuda()
jig_input = jig_input.cuda()
#rotation process
rot_input, rot_target = rotation(input)
#selfie forward
total=16
seq = all_seq[index]
t = seq[:(total // 4)]
v = seq[(total // 4):]
v = torch.from_numpy(v).cuda()
pos = t
t = torch.from_numpy(np.array(pos)).cuda()
#jigsaw forward
jig_feature = model(jig_input)
jig_feature = model.layer4_jigsaw(jig_feature)
jig_feature = model.avgpool3(jig_feature)
jig_feature = jig_feature.view(jig_feature.size(0), -1)
jig_out = model.fc_jigsaw(jig_feature)
loss_jigsaw = criterion(jig_out,jig_target)
#rotation forward
rot_feature = model(rot_input)
rot_feature = model.layer4_rotation(rot_feature)
rot_feature = model.avgpool2(rot_feature)
rot_feature = rot_feature.view(rot_feature.size(0), -1)
rot_out = model.fc_rotation(rot_feature)
loss_rotation = criterion(rot_out,rot_target)
#selfie forward
batches = split_image_selfie(input, 8)
batches = list(map(lambda x: x.unsqueeze(1), batches))
batches = torch.cat(batches, 1) # (B, L, C, H, W)
input_batches = torch.split(batches, 1, 1)
input_batches = list(map(lambda x: x.squeeze(1), input_batches))
input_batches = torch.cat(input_batches, 0)
output_batches = model(input_batches)
output_batches = model.avgpool1(output_batches)
output_batches = output_batches.view(output_batches.size(0),-1)
output_batches = output_batches.unsqueeze(1)
output_batches = torch.split(output_batches, cur_batch_size, 0)
output_batches = torch.cat(output_batches,1)
output_decoder = output_batches.index_select(1, t)
output_encoder = output_batches.index_select(1, v)
output_encoder = selfie_model(output_encoder, pos)
features = []
for i in range(len(pos)):
feature = output_decoder[:, i, :]
feature = feature.unsqueeze(2)
features.append(feature)
features = torch.cat(features, 2) # (B, F, NP)
patch_loss = 0
for i in range(len(t)):
activate = output_encoder[:, i, :].unsqueeze(1)
pre = torch.bmm(activate, features)
logit = nn.functional.softmax(pre, 2).view(-1, len(t))
temptarget = torch.ones(logit.shape[0]).cuda() * i
temptarget = temptarget.long()
loss_ = criterion(logit, temptarget)
patch_loss += loss_
prec1, _ = accuracy(logit, temptarget, topk=(1,3))
top1_selfie.update(prec1.item(), 1)
all_loss = patch_loss+loss_jigsaw+loss_rotation
jig_out = jig_out.float()
rot_out = rot_out.float()
all_loss = all_loss.float()
# measure accuracy and record loss
prec1_jig = accuracy(jig_out.data, jig_target)[0]
prec1_rot = accuracy(rot_out.data, rot_target)[0]
losses.update(all_loss.item(), input.size(0))
top1_jigsaw.update(prec1_jig.item(), input.size(0))
top1_rotation.update(prec1_rot.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if index % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc_selfie {atop1.val:.3f} ({atop1.avg:.3f})\t'
'Acc_jig {btop1.val:.3f} ({btop1.avg:.3f})\t'
'Acc_rot {ctop1.val:.3f} ({ctop1.avg:.3f})'.format(
index, len(val_loader), batch_time=batch_time, loss=losses, atop1=top1_selfie, btop1=top1_jigsaw, ctop1=top1_rotation))
print('val_jigsaw_accuracy {top1.avg:.3f}'.format(top1=top1_jigsaw))
print('val_rotation_accuracy {top1.avg:.3f}'.format(top1=top1_rotation))
print('val_selfie_accuracy {top1.avg:.3f}'.format(top1=top1_selfie))
return top1_jigsaw.avg, top1_rotation.avg, top1_selfie.avg, losses.avg
def val_pgd(val_loader, model, selfie_model, criterion, permutation, all_seq):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1_selfie = AverageMeter()
top1_rotation = AverageMeter()
top1_jigsaw = AverageMeter()
end = time.time()
model.eval()
selfie_model.eval()
bias=0.9
for index, (input, label) in enumerate(val_loader):
data_time.update(time.time() - end)
input = input.cuda()
cur_batch_size=input.size(0)
# jigsaw process
jig_input,jig_target = jigsaw(input, permutation, bias)
jig_target=jig_target.long().cuda()
jig_input = jig_input.cuda()
jig_input_adv = PGD_attack_jigsaw(jig_input,jig_target,
model=model,
criterion=criterion,
eps=args.attack_eps,
steps=20,
gamma=args.attack_gamma,
randinit=args.attack_randinit).data
with torch.no_grad():
jig_input=jig_input_adv.cuda()
#rotation process
rot_input, rot_target = rotation(input)
rot_input_adv = PGD_attack_rotation(rot_input,rot_target,
model=model,
criterion=criterion,
eps=args.attack_eps,
steps=20,
gamma=args.attack_gamma,
randinit=args.attack_randinit).data
with torch.no_grad():
rot_input=rot_input_adv.cuda()
#selfie forward
total=16
seq = all_seq[index]
t = seq[:(total // 4)]
v = seq[(total // 4):]
v = torch.from_numpy(v).cuda()
pos = t
t = torch.from_numpy(np.array(pos)).cuda()
input_adv = PGD_attack_selfie(input,
selfie_model=selfie_model,
P=model,
criterion=criterion,
seq=seq,
eps=(8/255),
steps=20,
gamma=(2/255),
randinit=True).data
with torch.no_grad():
input=input_adv.cuda()
#jigsaw forward
jig_feature = model(jig_input)
jig_feature = model.layer4_jigsaw(jig_feature)
jig_feature = model.avgpool3(jig_feature)
jig_feature = jig_feature.view(jig_feature.size(0), -1)
jig_out = model.fc_jigsaw(jig_feature)
loss_jigsaw = criterion(jig_out,jig_target)
#rotation forward
rot_feature = model(rot_input)
rot_feature = model.layer4_rotation(rot_feature)
rot_feature = model.avgpool2(rot_feature)
rot_feature = rot_feature.view(rot_feature.size(0), -1)
rot_out = model.fc_rotation(rot_feature)
loss_rotation = criterion(rot_out,rot_target)
#selfie forward
batches = split_image_selfie(input, 8)
batches = list(map(lambda x: x.unsqueeze(1), batches))
batches = torch.cat(batches, 1) # (B, L, C, H, W)
input_batches = torch.split(batches, 1, 1)
input_batches = list(map(lambda x: x.squeeze(1), input_batches))
input_batches = torch.cat(input_batches, 0)
output_batches = model(input_batches)
output_batches = model.avgpool1(output_batches)
output_batches = output_batches.view(output_batches.size(0),-1)
output_batches = output_batches.unsqueeze(1)
output_batches = torch.split(output_batches, cur_batch_size, 0)
output_batches = torch.cat(output_batches,1)
output_decoder = output_batches.index_select(1, t)
output_encoder = output_batches.index_select(1, v)
output_encoder = selfie_model(output_encoder, pos)
features = []
for i in range(len(pos)):
feature = output_decoder[:, i, :]
feature = feature.unsqueeze(2)
features.append(feature)
features = torch.cat(features, 2) # (B, F, NP)
patch_loss = 0
for i in range(len(t)):
activate = output_encoder[:, i, :].unsqueeze(1)
pre = torch.bmm(activate, features)
logit = nn.functional.softmax(pre, 2).view(-1, len(t))
temptarget = torch.ones(logit.shape[0]).cuda() * i
temptarget = temptarget.long()
loss_ = criterion(logit, temptarget)
patch_loss += loss_
prec1, _ = accuracy(logit, temptarget, topk=(1,3))
top1_selfie.update(prec1.item(), 1)
all_loss = patch_loss+loss_jigsaw+loss_rotation
jig_out = jig_out.float()
rot_out = rot_out.float()
all_loss = all_loss.float()
# measure accuracy and record loss
prec1_jig = accuracy(jig_out.data, jig_target)[0]
prec1_rot = accuracy(rot_out.data, rot_target)[0]
losses.update(all_loss.item(), input.size(0))
top1_jigsaw.update(prec1_jig.item(), input.size(0))
top1_rotation.update(prec1_rot.item(), input.size(0))
batch_time.update(time.time() - end)
end = time.time()
if index % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc_selfie {atop1.val:.3f} ({atop1.avg:.3f})\t'
'Acc_jig {btop1.val:.3f} ({btop1.avg:.3f})\t'
'Acc_rot {ctop1.val:.3f} ({ctop1.avg:.3f})'.format(
index, len(val_loader), batch_time=batch_time, loss=losses, atop1=top1_selfie, btop1=top1_jigsaw, ctop1=top1_rotation))
print('adv_jigsaw_accuracy {top1.avg:.3f}'.format(top1=top1_jigsaw))
print('adv_rotation_accuracy {top1.avg:.3f}'.format(top1=top1_rotation))
print('adv_selfie_accuracy {top1.avg:.3f}'.format(top1=top1_selfie))
return top1_jigsaw.avg, top1_rotation.avg, top1_selfie.avg, losses.avg
def save_checkpoint(state, is_best, filename='weight.pt'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def cosine_annealing(step, total_steps, lr_max, lr_min):
return lr_min + (lr_max - lr_min) * 0.5 * (
1 + np.cos(step / total_steps * np.pi))
def get_selfie_model(n_split):
n_layers = 12
d_model = 1024 #vector length after the patch routed in P
d_in = 64
n_heads = d_model// d_in
d_ff = 2048
model = SelfieModel(n_layers, n_heads, d_in, d_model, d_ff, n_split)
return model
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def split_image(image, N=8):
"""
image: (C, W, H)
"""
batches = []
for i in list(torch.split(image, N, dim=1)):
batches.extend(list(torch.split(i, N, dim=2)))
return batches
def split_image_selfie(image, N):
"""
image: (B, C, W, H)
"""
batches = []
for i in list(torch.split(image, N, dim=2)):
batches.extend(list(torch.split(i, N, dim=3)))
return batches
def concat(batches, num=4):
"""
batches: [(C,W1,H1)]
"""
batches_list=[]
for j in range(len(batches) // num):
batches_list.append(torch.cat(batches[num*j:num*(j+1)], dim=2))
return torch.cat(batches_list,dim=1)
def jigsaw(input, permutation, bias):
cur_batch_size=input.size(0)
jig_input=torch.zeros_like(input)
jig_target=torch.zeros(cur_batch_size)
for idx in range(cur_batch_size):
img=input[idx,:]
batches=split_image(img)
order=np.random.randint(len(permutation)+1)
rate=np.random.rand()
if rate>bias:
order=0
if order == 0:
new_batches=batches
else:
new_batches=[batches[permutation[order-1][j]] for j in range(16)]
jig_input[idx]=concat(new_batches)
jig_target[idx]=order
return jig_input, jig_target
def rotation(input):
batch = input.shape[0]
target = torch.tensor(np.random.permutation([0,1,2,3] * (int(batch / 4) + 1)), device = input.device)[:batch]
target = target.long()
image = torch.zeros_like(input)
image.copy_(input)
for i in range(batch):
image[i, :, :, :] = torch.rot90(input[i, :, :, :], target[i], [1, 2])
return image, target
def compute_cosine(x,x1,x2,x3):
g1=torch.flatten(x1-x, 1)
g2=torch.flatten(x2-x, 1)
g3=torch.flatten(x3-x, 1)
cos12=F.cosine_similarity(g1,g2,dim=1)
cos13=F.cosine_similarity(g1,g3,dim=1)
cos23=F.cosine_similarity(g2,g3,dim=1)
return cos12,cos13,cos23
def calculate_log_det(grad1,grad2,grad3):
grad1 = grad1.view(grad1.size(0),-1)
grad2 = grad2.view(grad2.size(0),-1)
grad3 = grad3.view(grad3.size(0),-1)
I=[1e-5*torch.eye(3).unsqueeze(0) for i in range(grad1.size(0))]
I=torch.cat(I,0).cuda()
norm1 = torch.norm(grad1, p=2, dim=1, keepdim=True)+1e-10
norm2 = torch.norm(grad2, p=2, dim=1, keepdim=True)+1e-10
norm3 = torch.norm(grad3, p=2, dim=1, keepdim=True)+1e-10
grad1 = grad1.div(norm1).unsqueeze(2)
grad2 = grad2.div(norm2).unsqueeze(2)
grad3 = grad3.div(norm3).unsqueeze(2)
G = torch.cat([grad1,grad2,grad3],2)
det = torch.bmm(torch.transpose(G, 1, 2), G)+I
logdet = det.det().log().mean()
return logdet
def warmup_lr(step, optimizer, speed):
lr = 0.01+step*(0.1-0.01)/speed
lr = min(lr,0.1)
for p in optimizer.param_groups:
p['lr']=lr
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
41a41fbbd319825b3648367519e4f9dd507552ae
|
27aa2aa55c4c03b032b62c462e98385e011ec923
|
/5_5_1_Hash_Functions.py
|
9d0b25bfbc81006f485cc366cbc720e19fc89387
|
[] |
no_license
|
huiyanglu/DataStructures
|
bb08f07ded680f5c02d7264123b7b48cab41a223
|
76c6dee95c747729a19b7f910c9f344d25e4bab0
|
refs/heads/master
| 2020-04-02T04:49:23.676385 | 2019-08-13T15:37:23 | 2019-08-13T15:37:23 | 154,036,882 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 641 |
py
|
"""
create hash functions for character-based items such as strings.
"""
def hash(astring,tablesize):
sum = 0
for pos in range(len(astring)):
sum = sum + ord(astring[pos])
return sum % tablesize
"""
When using this hash function, anagrams will always be given the same hash value.
To remedy this, we could use the position of the character as a weight.
One possible way to use the positional value as a weighting factor.
"""
def hashwithWeight(astring,tablesize):
sum = 0
for pos in range(len(astring)):
sum = sum + ord(astring[pos])*(pos+1)
return sum % tablesize
print(hashwithWeight('cat',11))
|
[
"[email protected]"
] | |
8fa00d7a6d2ce243aade814d74998d95242efa9e
|
18e2f67599b45b98c14931a8287a15b963250c83
|
/23_mtPool.py
|
fe2e58291b7370c831f0b2aec7e444b631bd39c9
|
[] |
no_license
|
uuboyscy/eb102-python
|
77ffb9a79a05d8371a5f38463c60ce579cbd9b39
|
ec0e32cb1b383b9ad0c0eb68696a15569bf6037d
|
refs/heads/master
| 2022-09-09T05:16:31.749893 | 2020-05-18T03:47:17 | 2020-05-18T03:47:17 | 262,553,112 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 564 |
py
|
# from multiprocessing import Pool
# import multiprocessing as mp
from multiprocessing.dummy import Pool
import time
import os
def longTimeTask(i):
print('task: {}, PID: {}'.format(i, os.getpid()))
time.sleep(5)
result = 10 ** 30
print('result: ', result)
return result
if __name__ == '__main__':
start_time = time.time()
print('母程序PID:', os.getpid())
p = Pool(4)
data = p.map(longTimeTask, iterable=range(0, 4))
p.close()
p.join()
print(data)
end_time = time.time()
print(end_time - start_time)
|
[
"[email protected]"
] | |
7640217d7fd2224f15bf1ff4162e57fc5c85ecec
|
06b78441aa1cd9330fd20b29f5b083554022860c
|
/cpp_call_python_numpy/algo.py
|
047713d95a260832b9e1e1dbf0f32818b02a2945
|
[] |
no_license
|
asdlei99/pybind11_cpp_python
|
e67ac9f1814efb0d4da87c9da744796270483591
|
0f509816258146b1e554b9913f5a611872b50876
|
refs/heads/master
| 2021-09-26T20:15:04.503022 | 2018-11-02T01:15:17 | 2018-11-02T01:15:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 100 |
py
|
import numpy as np
def det(a):
return np.linalg.det(a)
def inv(a):
return np.linalg.inv(a)
|
[
"[email protected]"
] | |
85e4609531f380ca3101cd87f99fb0acd6f0d120
|
7463a66dfa00572f4e4d8ef4349309531f0105ae
|
/TrainerDL/Utils/PytorchToCaffe/Caffe/caffe_lmdb.py
|
b4d538d0ba5c54137d4889ea9bdce54292bb09a6
|
[
"MIT"
] |
permissive
|
fx19940824/DetectionModel
|
f2e380fd21f4b31a17fd175a6dea1067b8f0d5cc
|
edc0d2f9eea481d2bc6f3abb2f222b59fdc25538
|
refs/heads/master
| 2022-12-20T19:58:32.224829 | 2019-05-30T01:16:05 | 2019-05-30T01:16:05 | 188,800,679 | 2 | 0 | null | 2022-11-22T02:39:23 | 2019-05-27T08:13:38 |
Python
|
UTF-8
|
Python
| false | false | 1,005 |
py
|
import lmdb
from Utils.PytorchToCaffe.Caffe import caffe_pb2 as pb2
import numpy as np
class Read_Caffe_LMDB():
def __init__(self,path,dtype=np.uint8):
self.env=lmdb.open(path, readonly=True)
self.dtype=dtype
self.txn=self.env.begin()
self.cursor=self.txn.cursor()
@staticmethod
def to_numpy(value,dtype=np.uint8):
datum = pb2.Datum()
datum.ParseFromString(value)
flat_x = np.fromstring(datum.data, dtype=dtype)
data = flat_x.reshape(datum.channels, datum.height, datum.width)
label=flat_x = datum.label
return data,label
def iterator(self):
while True:
key,value=self.cursor.key(),self.cursor.value()
yield self.to_numpy(value,self.dtype)
if not self.cursor.next():
return
def __iter__(self):
self.cursor.first()
it = self.iterator()
return it
def __len__(self):
return int(self.env.stat()['entries'])
|
[
"[email protected]"
] | |
4001aed2525fab6e77d25b475686406794abbccd
|
5004bd99b73653d6288122f38f5a58b4550ac55c
|
/setup.py
|
9df34b00cc918fa68ec26797d512db67fdea0c2f
|
[] |
no_license
|
BrancoLab/Fiberphotometry
|
78e2abb2149360393e4d718af908a6ee1351949e
|
d4440c1a6d343bd0d55f43f70a2f59bffa19c7c8
|
refs/heads/master
| 2023-01-01T09:37:02.908497 | 2020-10-28T15:46:07 | 2020-10-28T15:46:07 | 222,673,325 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 632 |
py
|
from setuptools import setup, find_namespace_packages
requirements = [
"numpy",
"pandas",
"moviepy",
"tqdm",
"python-opencv",
"matplotlib",
"seaborn",
"sklearn",
"scipy",
"psychopy",
"pypylon",
"fancylog"
]
setup(
name="fiberphotometry",
version="0.0.0.1",
author_email="[email protected]",
description="bunch of utility functions to analyse fiberphotometry data",
packages=find_namespace_packages(exclude=()),
include_package_data=True,
install_requires=requirements,
url="https://github.com/BrancoLab/Fiberphotometry",
author="Federico Claudi, Yu Lin Tan",
zip_safe=False,
)
|
[
"[email protected]"
] | |
eb50e7b8c14f42d9fcd0d1cde2a5ef4b1a278281
|
c8f4731bf85003b7d9f5a908723d15a33415eea5
|
/caffe2/python/layers/pairwise_dot_product.py
|
1c6820c9f9ee001e34dd652a4739c342bd27d27f
|
[
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
ChengduoZhao/caffe2
|
5c90fd66374f7f00b1330831e23dc9915da5028c
|
7811796ac91f5457208cb20bcfc55de2d39d21ba
|
refs/heads/master
| 2021-01-01T17:11:43.297274 | 2017-07-27T04:52:18 | 2017-07-27T05:08:00 | 98,018,932 | 0 | 1 | null | 2017-07-22T08:44:58 | 2017-07-22T08:44:58 | null |
UTF-8
|
Python
| false | false | 2,244 |
py
|
## @package dot_product
# Module caffe2.python.layers.dot_product
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class PairwiseDotProduct(ModelLayer):
def __init__(self, model, input_record, output_dim,
name='pairwise_dot_product', **kwargs):
super(PairwiseDotProduct, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Excpected Struct, but received: {0}".
format(input_record))
assert 'all_embeddings' in input_record, "all_embeddings is not given."
all_embeddings = input_record['all_embeddings']
assert isinstance(all_embeddings, schema.Scalar), (
"Incorrect input type. Excpected Scalar, but received: {0}".
format(all_embeddings))
if 'indices_to_gather' in input_record:
indices_to_gather = input_record['indices_to_gather']
assert isinstance(indices_to_gather, schema.Scalar), (
"Incorrect type of indices_to_gather. "
"Expected Scalar, but received: {0}".format(indices_to_gather)
)
self.all_embeddings = all_embeddings
self.indices_to_gather = indices_to_gather
dtype = all_embeddings.field_types()[0].base
self.output_schema = schema.Scalar(
(dtype, (output_dim)),
model.net.NextScopedBlob(name + '_output')
)
def add_ops(self, net):
Y = net.BatchMatMul(
[self.all_embeddings(), self.all_embeddings()],
trans_b=1,
)
if self.indices_to_gather:
flattened = net.Flatten(Y, 1)
transposed = net.Transpose(flattened)
gathered = net.Gather(
[
transposed,
self.indices_to_gather(),
],
dense_gradient=True,
)
net.Transpose(gathered, self.output_schema())
else:
net.Flatten(Y, self.output_schema())
|
[
"[email protected]"
] | |
43c48db2124ce6475b7e25ca5a6994119a4adfc0
|
2dd560dc468af0af4ca44cb4cd37a0b807357063
|
/Leetcode/705. Design HashSet/solution1.py
|
2911a94a76a9164618872d1ff81eb7f0dd2f38a2
|
[
"MIT"
] |
permissive
|
hi0t/Outtalent
|
460fe4a73788437ba6ce9ef1501291035c8ff1e8
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
refs/heads/master
| 2023-02-26T21:16:56.741589 | 2021-02-05T13:36:50 | 2021-02-05T13:36:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 869 |
py
|
class MyHashSet:
def __init__(self):
"""
Initialize your data structure here.
"""
self.base = 2003
self.nums = [[] for _ in range(self.base + 1)]
def hash(self, val: int) -> int:
return val % self.base
def add(self, val: int) -> None:
key = self.hash(val)
if val not in self.nums[key]: self.nums[key].append(val)
def remove(self, val: int) -> None:
key = self.hash(val)
if val in self.nums[key]: self.nums[key].remove(val)
def contains(self, val: int) -> bool:
"""
Returns true if this set contains the specified element
"""
key = self.hash(val)
return val in self.nums[key]
# Your MyHashSet object will be instantiated and called as such:
# obj = MyHashSet()
# obj.add(key)
# obj.remove(key)
# param_3 = obj.contains(key)
|
[
"[email protected]"
] | |
823d20f448832d54b724475aa2a27e940270962e
|
e3fc83e77e218f7b8df4b14b0753fd65afd4b923
|
/downloaded_kernels/loan_data/parsed_kernels/kernel_145.py
|
0662aad9202d5179619ffc67e902f318905da38c
|
[
"MIT"
] |
permissive
|
jupste/wranglesearch
|
982684fdaa7914af59758880fdc3a4ff3346477f
|
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
|
refs/heads/master
| 2023-06-18T04:46:34.474046 | 2021-07-15T23:43:24 | 2021-07-15T23:43:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 710 |
py
|
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
|
[
"[email protected]"
] | |
9ac994c39d47d97dd0950a832edcabe60b69ed4a
|
d6a4c07d020c6b32baaf4756ad6d86a7056743b7
|
/beta/diy/checkcookie.py
|
3a336387bae2fec392c530ab2959305aadebc556
|
[] |
no_license
|
qiuxiucai/JD_Diy
|
c3b1c4b24051bd689ce235b11a00f21bc2da90ef
|
78d06ec5e500b61be984742e7c6b0585346c8e59
|
refs/heads/master
| 2023-08-03T06:53:58.981090 | 2021-09-11T07:09:44 | 2021-09-11T07:09:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,403 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import os
import re
import sys
import time
from asyncio import exceptions
import requests
from telethon import events
from .. import chat_id, jdbot, logger
from ..bot.utils import V4, QL, _ConfigFile, myck, _Auth
from ..diy.utils import QL8, ql_token, read, write
async def checkCookie(cookie):
url = "https://me-api.jd.com/user_new/info/GetJDUserInfoUnion"
headers = {
"Host": "me-api.jd.com",
"Accept": "*/*",
"Connection": "keep-alive",
"Cookie": cookie,
"User-Agent": "jdapp;iPhone;9.4.4;14.3;network/4g;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
"Accept-Language": "zh-cn",
"Referer": "https://home.m.jd.com/myJd/newhome.action?sceneval=2&ufc=&",
"Accept-Encoding": "gzip, deflate, br"
}
try:
res = requests.get(url, headers=headers)
await asyncio.sleep(2)
data = res.json()
if data['retcode'] == "1001":
return False
else:
nickname = data['data']['userInfo']['baseInfo']['nickname']
if len(nickname) < 1:
nickname = cookie.split(";")[1].split("=")[1]
return nickname
except Exception as e:
await jdbot.send_message(chat_id, f"此cookie无法完成检测,请自行斟酌!\n\n{cookie}\n\n错误:{e}")
return True
@jdbot.on(events.NewMessage(from_users=chat_id, pattern=r'^/checkcookie$'))
async def mycheckcookie(event):
try:
msg = await jdbot.send_message(chat_id, "正在检测 cookie 过期情况……")
text, o, res = '检测结果\n\n', '\n\t └ ', ""
expireds, valids, changes, removes = [], [], [],[]
if V4:
cookies = myck(_ConfigFile)[0]
for cookie in cookies:
cknum = cookies.index(cookie) + 1
check = await checkCookie(cookie)
if check:
res += f"账号{cknum}-{check}有效\n"
else:
res += f"账号{cknum}已过期\n"
expireds.append(cknum)
msg = await jdbot.edit_message(msg, res)
await asyncio.sleep(2)
elif QL8:
token = ql_token(_Auth)
headers = {'Authorization': f'Bearer {token}'}
url = 'http://127.0.0.1:5600/api/envs'
body = {'searchValue': 'JD_COOKIE'}
datas = requests.get(url, params=body, headers=headers).json()['data']
for data in datas:
cookie = data['value']
if "&" in cookie:
cookies = cookie.split("&")
len_cooke = len(cookies)
for ck in cookies:
check = await checkCookie(ck)
if check:
res += f"{check} Cookie:{ck} 有效\n"
else:
res += f"Cookie:{ck} 已过期\n"
cookies.remove(ck)
removes.append(ck)
msg = await jdbot.edit_message(msg, res)
await asyncio.sleep(1)
if len(cookies) != len_cooke:
changes.append(
[data['remarks'] if 'remarks' in data.keys() else '未备注', '&'.join(cookies), data['_id']])
else:
cknum = datas.index(data) + 1
check = await checkCookie(cookie)
if check:
res += f"账号{cknum}-{check}有效\n"
valids.append([data['_id'], data['remarks'] if 'remarks' in data.keys() else '未备注', cknum])
else:
res += f"账号{cknum}已过期\n"
expireds.append([data['_id'], cknum])
msg = await jdbot.edit_message(msg, res)
await asyncio.sleep(1)
else:
token = ql_token(_Auth)
headers = {'Authorization': f'Bearer {token}'}
url = 'http://127.0.0.1:5600/api/cookies'
body = {'t': int(round(time.time() * 1000))}
datas = requests.get(url, params=body, headers=headers).json()['data']
valids = []
for data in datas:
cknum = datas.index(data) + 1
check = await checkCookie(data['value'])
if check:
res += f"账号{cknum}-{check}有效\n"
valids.append([data['_id'], data['nickname'], cknum])
else:
res += f"账号{cknum}已过期\n"
expireds.append([data['_id'], cknum])
msg = await jdbot.edit_message(msg, res)
await asyncio.sleep(1)
if V4:
configs = read("list")
for config in configs:
i = configs.index(config)
if config.find("TempBlockCookie") != -1 and config.find("##") == -1 and configs[i + 1].find(";") == -1:
line = configs.index(config)
Temp = configs[line][:-1]
configs[line] = f"{Temp}program\n"
configs = ''.join(configs)
break
n = " ".join('%s' % expired for expired in expireds)
configs = re.sub(r'TempBlockCookie=".*"program', f'TempBlockCookie="{n}"', configs, re.M)
text += f'【屏蔽情况】{o}TempBlockCookie="{n}"\n'
write(configs)
await jdbot.edit_message(msg, text)
elif QL:
token = ql_token(_Auth)
headers = {'Authorization': f'Bearer {token}'}
if expireds:
text += f'【禁用情况】\n'
for expired in expireds:
if QL8:
url = 'http://127.0.0.1:5600/api/envs/disable'
body = [f"{expired[0]}"]
r = requests.put(url, json=body, headers=headers)
if r.ok:
text += f'账号{expired[1]}:{o}禁用成功,记得及时更新\n'
else:
text += f'账号{expired[1]}:{o}禁用失败,请手动禁用\n'
else:
url = 'http://127.0.0.1:5600/api/cookies/disable'
body = [f"{expired[0]}"]
r = requests.put(url, json=body, headers=headers)
if r.ok:
text += f'账号{expired[1]}:{o}禁用成功,记得及时更新\n'
else:
text += f'账号{expired[1]}:{o}禁用失败,请手动禁用\n'
text += '\n'
if valids:
text += f'【启用情况】\n'
for valid in valids:
if QL8:
url = 'http://127.0.0.1:5600/api/envs/enable'
body = [f"{valid[0]}"]
r = requests.put(url, json=body, headers=headers)
if r.ok:
text += f'账号{valid[2]} - {valid[1]}:{o}启用成功\n'
else:
text += f'账号{valid[2]} - {valid[1]}:{o}启用失败,请手动启用\n'
else:
url = 'http://127.0.0.1:5600/api/cookies/enable'
body = [f"{valid[0]}"]
r = requests.put(url, json=body, headers=headers)
if r.ok:
text += f'账号{valid[2]} - {valid[1]}:{o}启用成功\n'
else:
text += f'账号{valid[2]} - {valid[1]}:{o}启用失败,请手动启用\n'
text += '\n'
if changes:
text += f'【更新情况】\n'
for change in changes:
url = 'http://127.0.0.1:5600/api/envs'
body = {
"name": "JD_COOKIE",
"remarks": change[0],
"value": change[1],
"_id": change[2]
}
r = requests.put(url, json=body, headers=headers)
if r.ok:
removes = ' '.join(removes)
text += f'更新JD_COOKIE:{o}{body["value"]}\n移除的COOKIE:{o}{removes}\n\n'
else:
text += f'更新JD_COOKIE:{o}更新失败,请手动更新\n'
await jdbot.edit_message(msg, text)
except exceptions.TimeoutError:
await jdbot.send_message(chat_id, '选择已超时,对话已停止,感谢你的使用')
except Exception as e:
title = "【💥错误💥】"
name = "文件名:" + os.path.split(__file__)[-1].split(".")[0]
function = "函数名:" + sys._getframe().f_code.co_name
tip = '建议百度/谷歌进行查询'
await jdbot.send_message(chat_id, f"{title}\n\n{name}\n{function}\n错误原因:{str(e)}\n\n{tip}")
logger.error(f"错误--->{str(e)}")
|
[
"[email protected]"
] | |
a9a19434ebb517c7a921ab19b578cb91f3b4122c
|
842e3cd1266d18752a3baf2b90232ed4ce41eb4f
|
/grako/_config.py
|
03667f3d52c03b56dfbf518acb6fd9cceeda8d79
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
apalala/grako
|
2786d85eef9799bf614c46c92f19ff183a435d46
|
efb373d89e6805930e661758c2cff2b26da4658a
|
refs/heads/master
| 2020-12-25T17:37:05.353167 | 2017-05-02T02:53:11 | 2017-05-02T02:53:11 | 65,163,853 | 16 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 178 |
py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by Juancarlo Añez
# Copyright (C) 2012-2016 by Juancarlo Añez and Thomas Bragg
__toolname__ = 'Grako'
__version__ = '3.22.1'
|
[
"[email protected]"
] | |
2b230404d0e84a18fc16f8ce7256f407c6c35f18
|
2c9eadb22d2de54ac06c6731664ed65276fd7062
|
/pipeline/contrib/periodic_task/tasks.py
|
f7bd50ffbcc0f37ed5abc9245031aeceea715a5d
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
TencentBlueKing/bk-nodeman
|
34bba9b217d84bb4dad27e735c29361a5c62789b
|
72d2104783443bff26c752c5bd934a013b302b6d
|
refs/heads/v2.4.x
| 2023-08-19T01:27:58.805715 | 2023-08-10T02:59:31 | 2023-08-10T02:59:31 | 385,203,367 | 54 | 49 |
MIT
| 2023-09-14T06:51:33 | 2021-07-12T10:05:11 |
Python
|
UTF-8
|
Python
| false | false | 3,327 |
py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
import logging
import traceback
import pytz
from celery import task
from django.utils import timezone
from pipeline.contrib.periodic_task import signals
from pipeline.contrib.periodic_task.models import PeriodicTask, PeriodicTaskHistory
from pipeline.engine.models import FunctionSwitch
from pipeline.models import PipelineInstance
logger = logging.getLogger("celery")
@task(ignore_result=True)
def periodic_task_start(*args, **kwargs):
try:
periodic_task = PeriodicTask.objects.get(id=kwargs["period_task_id"])
except PeriodicTask.DoesNotExist:
# task has been deleted
return
if FunctionSwitch.objects.is_frozen():
PeriodicTaskHistory.objects.record_schedule(
periodic_task=periodic_task,
pipeline_instance=None,
ex_data="engine is frozen, can not start task",
start_success=False,
)
return
try:
tz = periodic_task.celery_task.crontab.timezone
now = datetime.datetime.now(tz=pytz.utc).astimezone(tz)
instance = PipelineInstance.objects.create_instance(
template=periodic_task.template,
exec_data=periodic_task.execution_data,
spread=kwargs.get("spread", True),
name="{}_{}".format(periodic_task.name[:113], now.strftime("%Y%m%d%H%M%S")),
creator=periodic_task.creator,
description="periodic task instance",
)
signals.pre_periodic_task_start.send(
sender=PeriodicTask, periodic_task=periodic_task, pipeline_instance=instance
)
result = instance.start(
periodic_task.creator, check_workers=False, priority=periodic_task.priority, queue=periodic_task.queue
)
except Exception:
et = traceback.format_exc()
logger.error(et)
PeriodicTaskHistory.objects.record_schedule(
periodic_task=periodic_task, pipeline_instance=None, ex_data=et, start_success=False
)
return
if not result.result:
PeriodicTaskHistory.objects.record_schedule(
periodic_task=periodic_task, pipeline_instance=None, ex_data=result.message, start_success=False
)
return
periodic_task.total_run_count += 1
periodic_task.last_run_at = timezone.now()
periodic_task.save()
signals.post_periodic_task_start.send(sender=PeriodicTask, periodic_task=periodic_task, pipeline_instance=instance)
PeriodicTaskHistory.objects.record_schedule(periodic_task=periodic_task, pipeline_instance=instance, ex_data="")
|
[
"[email protected]"
] | |
aea944b0b2ea135e01e165057d5e429f2000308a
|
6b78bd7f62f7f407bf11d877cc4d91e7db3b62fe
|
/csc/python/Intro-Python-I/src/13_file_io.py
|
0bf87c9f36afd02ad1c53557568a38d023ac4bce
|
[] |
no_license
|
PascalUlor/code-challenges
|
b85efacd4bc5999a0748d1fa1e84f503be09dc94
|
6488d0a6d2729bd50b106573f16488479fd6e264
|
refs/heads/master
| 2023-03-03T17:50:18.413127 | 2023-02-21T13:10:02 | 2023-02-21T13:10:02 | 212,979,719 | 1 | 0 | null | 2023-02-15T22:59:13 | 2019-10-05T10:14:29 |
Python
|
UTF-8
|
Python
| false | false | 1,198 |
py
|
"""
Python makes performing file I/O simple. Take a look
at how to read and write to files here:
https://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files
"""
import os
cwd = os.getcwd() # Get the current working directory (cwd)
files = os.listdir(cwd) # Get all the files in that directory
print("Files in %r: %s" % (cwd, files))
os.chdir(r'src')
# Open up the "foo.txt" file (which already exists) for reading
# Print all the contents of the file, then close the file
# YOUR CODE HERE
def open_file(data):
with open(data, 'r') as doc:
content = doc.read()
print(content)
doc.closed
open_file('foo.txt')
# Open up a file called "bar.txt" (which doesn't exist yet) for
# writing. Write three lines of arbitrary content to that file,
# then close the file. Open up "bar.txt" and inspect it to make
# sure that it contains what you expect it to contain
# YOUR CODE HERE
def write_file():
l = ['pear\n', 'apple\n', 'orange\n',
'mandarin\n', 'watermelon\n', 'pomegranate\n']
with open('pascal.txt', 'w') as doc:
for item in l:
doc.write(item)
doc.closed
write_file()
open_file('pascal.txt')
|
[
"[email protected]"
] | |
29058817ee9d433c087476175de412e5db922af4
|
2c74bb301f1ed83b79254944183ac5a18a639fdf
|
/tests/components/rituals_perfume_genie/test_config_flow.py
|
3582f49598cb373ccfa3c0a6a1533dd8f2b49cc5
|
[
"Apache-2.0"
] |
permissive
|
Adminiuga/home-assistant
|
5bec93007ddac1a268cc359bf7e48530c5f73b38
|
dcf68d768e4f628d038f1fdd6e40bad713fbc222
|
refs/heads/dev
| 2023-02-22T22:03:31.013931 | 2022-11-09T00:27:20 | 2022-11-09T00:27:20 | 123,929,062 | 5 | 4 |
Apache-2.0
| 2023-02-22T06:14:31 | 2018-03-05T14:11:09 |
Python
|
UTF-8
|
Python
| false | false | 3,789 |
py
|
"""Test the Rituals Perfume Genie config flow."""
from http import HTTPStatus
from unittest.mock import AsyncMock, MagicMock, patch
from aiohttp import ClientResponseError
from pyrituals import AuthenticationException
from homeassistant import config_entries
from homeassistant.components.rituals_perfume_genie.const import ACCOUNT_HASH, DOMAIN
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
TEST_EMAIL = "[email protected]"
VALID_PASSWORD = "passw0rd"
WRONG_PASSWORD = "wrong-passw0rd"
def _mock_account(*_):
account = MagicMock()
account.authenticate = AsyncMock()
account.account_hash = "any"
account.email = TEST_EMAIL
return account
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account",
side_effect=_mock_account,
), patch(
"homeassistant.components.rituals_perfume_genie.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: VALID_PASSWORD,
},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_EMAIL
assert isinstance(result2["data"][ACCOUNT_HASH], str)
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate",
side_effect=AuthenticationException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: WRONG_PASSWORD,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_auth_exception(hass):
"""Test we handle auth exception."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: VALID_PASSWORD,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.rituals_perfume_genie.config_flow.Account.authenticate",
side_effect=ClientResponseError(
None, None, status=HTTPStatus.INTERNAL_SERVER_ERROR
),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_EMAIL: TEST_EMAIL,
CONF_PASSWORD: VALID_PASSWORD,
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
|
[
"[email protected]"
] | |
9788a113b67bc1688dfc6029b4307f0c4f4a0ae5
|
d0758e0ca004226cec8ad8b26c9565c98534a8b8
|
/10-iot/A08_Timers/main.py
|
fb8ca3b0af0f866b91b8576dbba52d491a05c9ca
|
[] |
no_license
|
pythoncanarias/eoi
|
334d64a96afc76ac1fa10282378f291b6d8c94b3
|
349367254f85e3e4273cede067ca950913a1332c
|
refs/heads/master
| 2023-07-06T08:00:11.366345 | 2023-06-30T15:19:33 | 2023-06-30T15:19:33 | 222,742,870 | 26 | 19 | null | 2023-06-25T16:03:46 | 2019-11-19T16:41:25 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 552 |
py
|
from machine import Timer
import utime
# Creado por Daniel Alvarez ([email protected]) para curso de Python de EOI (eoi.es)
tim1 = Timer(-1) # -1 para timer virtual (basado en RTOS)
tim1.init(period=2500, mode=Timer.ONE_SHOT, callback=lambda x:print("#### esto solo se ejecuta una vez"))
tim2 = Timer(-1)
tim2.init(period=1000, mode=Timer.PERIODIC, callback=lambda x:print("esto se ejecutara periodicamente"))
print("inicio")
utime.sleep(10) # podemos hacer otras cosas
tim2.deinit() # desactivamos el timer periodico
print("fin")
|
[
"[email protected]"
] | |
2c0df1c54b9524d5226128cc8385117b79d65e35
|
59ac85afd2c2bc45ad8d3576412abdb3b3e97ca4
|
/abc/abstract_class.py
|
273493371254c038b25a14e4193a641b79ec7abe
|
[] |
no_license
|
xaneon/NetworkAutomation
|
1833bd22b4a573a7ec1e2266bc44abf9b7bdbf11
|
2560194047b93442ea4f8d822e2b20c77256d5c9
|
refs/heads/master
| 2020-06-12T09:23:28.770655 | 2019-07-18T15:11:16 | 2019-07-18T15:11:16 | 194,256,868 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 406 |
py
|
from abc import ABC, abstractmethod
class abstrakteKlasse(ABC):
def __init__(self, wert):
self.wert = wert
super().__init__()
@abstractmethod
def methode(self):
print("Implementation einer Methode")
class unterKlasse(abstrakteKlasse):
def methode(self):
super().methode()
print(self.wert)
U = unterKlasse(42)
U.methode()
A = abstrakteKlasse(42)
|
[
"[email protected]"
] | |
c2a0329c735e5460445333113559810b77c9c3aa
|
2fac796fa58c67fb5a4a95a6e7f28cbef169318b
|
/python/plus-one.py
|
efcbb3b165edecd501ea0840a8ea5cf5c2953ed2
|
[] |
no_license
|
jwyx3/practices
|
f3fe087432e79c8e34f3af3a78dd10278b66dd38
|
6fec95b9b4d735727160905e754a698513bfb7d8
|
refs/heads/master
| 2021-03-12T20:41:59.816448 | 2019-04-14T06:47:30 | 2019-04-14T06:47:30 | 18,814,777 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
class Solution:
# @param {int[]} digits a number represented as an array of digits
# @return {int[]} the result
def plusOne(self, digits):
accum = 0
for i in range(len(digits) - 1, -1, -1):
s = digits[i] + 1
digits[i] = s % 10
accum = s / 10
if accum == 0:
break
if accum == 1:
digits.insert(0, 1)
return digits
|
[
"[email protected]"
] | |
5ed5bcc26e2db118fd1170a5b9a0f5080180348d
|
c90ddd0930894c565197b739cd76140a7151fffd
|
/HLTrigger/Configuration/python/HLT_75e33/modules/hltEle5WP70HgcalIsoL1SeededFilter_cfi.py
|
70dd91b6f62ac3f26070f6991491084c7c0cb402
|
[
"Apache-2.0"
] |
permissive
|
p2l1pfp/cmssw
|
9cc6b111ff1935e49f86ec3da9f9b84fb13bbcdf
|
9f0a3a22fe451c25114134c30ac1f5c1261f3183
|
refs/heads/L1PF_12_5_X
| 2023-08-17T00:38:15.374760 | 2023-06-13T12:55:57 | 2023-06-13T12:55:57 | 127,881,751 | 6 | 1 |
Apache-2.0
| 2023-09-05T13:54:59 | 2018-04-03T09:10:17 |
C++
|
UTF-8
|
Python
| false | false | 1,256 |
py
|
import FWCore.ParameterSet.Config as cms
hltEle5WP70HgcalIsoL1SeededFilter = cms.EDFilter("HLTEgammaGenericQuadraticEtaFilter",
absEtaLowEdges = cms.vdouble(0.0, 1.0, 1.479, 2.0),
candTag = cms.InputTag("hltEle5WP70EcalIsoL1SeededFilter"),
doRhoCorrection = cms.bool(False),
effectiveAreas = cms.vdouble(0.0, 0.0, 0.0, 0.0),
energyLowEdges = cms.vdouble(0.0),
etaBoundaryEB12 = cms.double(1.0),
etaBoundaryEE12 = cms.double(2.0),
l1EGCand = cms.InputTag("hltEgammaCandidatesL1Seeded"),
lessThan = cms.bool(True),
ncandcut = cms.int32(1),
rhoMax = cms.double(99999999.0),
rhoScale = cms.double(1.0),
rhoTag = cms.InputTag(""),
saveTags = cms.bool(True),
thrOverE2EB1 = cms.vdouble(0.0),
thrOverE2EB2 = cms.vdouble(0.0),
thrOverE2EE1 = cms.vdouble(0.0),
thrOverE2EE2 = cms.vdouble(0.0),
thrOverEEB1 = cms.vdouble(0.05),
thrOverEEB2 = cms.vdouble(0.05),
thrOverEEE1 = cms.vdouble(0.05),
thrOverEEE2 = cms.vdouble(0.05),
thrRegularEB1 = cms.vdouble(130),
thrRegularEB2 = cms.vdouble(130),
thrRegularEE1 = cms.vdouble(130),
thrRegularEE2 = cms.vdouble(340),
useEt = cms.bool(False),
varTag = cms.InputTag("hltEgammaHGCalLayerClusterIsoL1Seeded")
)
|
[
"[email protected]"
] | |
5a7432cbb3e846fadc8f16d7b566fed820309247
|
950b22416bc371d14b24c3310ce088cbbcde0eb3
|
/apps/leadb/creditfields.py
|
7253a03c77ad350e24b7a29357244f403c349d31
|
[] |
no_license
|
CrabbyPete/brightmap
|
3aa6ac2637ea6e2b47287565fe0699912c70eb3d
|
dad5447af5eefb9500d020e1421725c6c30fa92d
|
refs/heads/master
| 2020-05-25T18:15:14.167931 | 2017-03-14T13:41:45 | 2017-03-14T13:41:45 | 5,615,006 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,978 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date
from calendar import monthrange
from django import forms
from django.conf import settings
from django.utils.translation import ugettext as _
from creditcard import verify_credit_card
class CreditCardField(forms.CharField):
"""
Form field for checking out a credit card.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 20)
super(CreditCardField, self).__init__(*args, **kwargs)
def clean(self, value):
"""
Raises a ValidationError if the card is not valid
and stashes card type.
"""
self.card_type = verify_credit_card(value,
allow_test=settings.AUTHNET_DEBUG)
if self.card_type is None:
raise forms.ValidationError("Invalid credit card number.")
return value
# Credit Card Expiry Fields from:
# http://www.djangosnippets.org/snippets/907/
class CreditCardExpiryWidget(forms.MultiWidget):
"""MultiWidget for representing credit card expiry date."""
def decompress(self, value):
if value:
return [value.month, value.year]
else:
return [None, None]
def format_output(self, rendered_widgets):
html = rendered_widgets[0]+rendered_widgets[1]
return u'<span>%s</span>' % html
class CreditCardExpiryField(forms.MultiValueField):
EXP_MONTH = [(x, "%02d" % x) for x in xrange(1, 13)]
EXP_YEAR = [(x, x) for x in xrange(date.today().year,
date.today().year + 15)]
default_error_messages = {
'invalid_month': u'Enter a valid month.',
'invalid_year': u'Enter a valid year.',
}
def __init__(self, *args, **kwargs):
errors = self.default_error_messages.copy()
if 'error_messages' in kwargs:
errors.update(kwargs['error_messages'])
fields = (
forms.ChoiceField(
choices=self.EXP_MONTH,
error_messages={'invalid': errors['invalid_month']}),
forms.ChoiceField(
choices=self.EXP_YEAR,
error_messages={'invalid': errors['invalid_year']}),
)
super(CreditCardExpiryField, self).__init__(fields, *args, **kwargs)
self.widget = CreditCardExpiryWidget(widgets=[fields[0].widget,
fields[1].widget])
def clean(self, value):
exp = super(CreditCardExpiryField, self).clean(value)
if date.today() > exp:
raise forms.ValidationError(
"The expiration date you entered is in the past.")
return exp
def compress(self, data_list):
if data_list:
if data_list[1] in forms.fields.EMPTY_VALUES:
error = self.error_messages['invalid_year']
raise forms.ValidationError(error)
if data_list[0] in forms.fields.EMPTY_VALUES:
error = self.error_messages['invalid_month']
raise forms.ValidationError(error)
year = int(data_list[1])
month = int(data_list[0])
# find last day of the month
day = monthrange(year, month)[1]
return date(year, month, day)
return None
class CreditCardCVV2Field(forms.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 4)
super(CreditCardCVV2Field, self).__init__(*args, **kwargs)
# Country Field from:
# http://www.djangosnippets.org/snippets/494/
# http://xml.coverpages.org/country3166.html
COUNTRIES = (
('US', _('United States of America')),
('CA', _('Canada')),
('AD', _('Andorra')),
('AE', _('United Arab Emirates')),
('AF', _('Afghanistan')),
('AG', _('Antigua & Barbuda')),
('AI', _('Anguilla')),
('AL', _('Albania')),
('AM', _('Armenia')),
('AN', _('Netherlands Antilles')),
('AO', _('Angola')),
('AQ', _('Antarctica')),
('AR', _('Argentina')),
('AS', _('American Samoa')),
('AT', _('Austria')),
('AU', _('Australia')),
('AW', _('Aruba')),
('AZ', _('Azerbaijan')),
('BA', _('Bosnia and Herzegovina')),
('BB', _('Barbados')),
('BD', _('Bangladesh')),
('BE', _('Belgium')),
('BF', _('Burkina Faso')),
('BG', _('Bulgaria')),
('BH', _('Bahrain')),
('BI', _('Burundi')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BN', _('Brunei Darussalam')),
('BO', _('Bolivia')),
('BR', _('Brazil')),
('BS', _('Bahama')),
('BT', _('Bhutan')),
('BV', _('Bouvet Island')),
('BW', _('Botswana')),
('BY', _('Belarus')),
('BZ', _('Belize')),
('CC', _('Cocos (Keeling) Islands')),
('CF', _('Central African Republic')),
('CG', _('Congo')),
('CH', _('Switzerland')),
('CI', _('Ivory Coast')),
('CK', _('Cook Iislands')),
('CL', _('Chile')),
('CM', _('Cameroon')),
('CN', _('China')),
('CO', _('Colombia')),
('CR', _('Costa Rica')),
('CU', _('Cuba')),
('CV', _('Cape Verde')),
('CX', _('Christmas Island')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DE', _('Germany')),
('DJ', _('Djibouti')),
('DK', _('Denmark')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('DZ', _('Algeria')),
('EC', _('Ecuador')),
('EE', _('Estonia')),
('EG', _('Egypt')),
('EH', _('Western Sahara')),
('ER', _('Eritrea')),
('ES', _('Spain')),
('ET', _('Ethiopia')),
('FI', _('Finland')),
('FJ', _('Fiji')),
('FK', _('Falkland Islands (Malvinas)')),
('FM', _('Micronesia')),
('FO', _('Faroe Islands')),
('FR', _('France')),
('FX', _('France, Metropolitan')),
('GA', _('Gabon')),
('GB', _('United Kingdom (Great Britain)')),
('GD', _('Grenada')),
('GE', _('Georgia')),
('GF', _('French Guiana')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GL', _('Greenland')),
('GM', _('Gambia')),
('GN', _('Guinea')),
('GP', _('Guadeloupe')),
('GQ', _('Equatorial Guinea')),
('GR', _('Greece')),
('GS', _('South Georgia and the South Sandwich Islands')),
('GT', _('Guatemala')),
('GU', _('Guam')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HK', _('Hong Kong')),
('HM', _('Heard & McDonald Islands')),
('HN', _('Honduras')),
('HR', _('Croatia')),
('HT', _('Haiti')),
('HU', _('Hungary')),
('ID', _('Indonesia')),
('IE', _('Ireland')),
('IL', _('Israel')),
('IN', _('India')),
('IO', _('British Indian Ocean Territory')),
('IQ', _('Iraq')),
('IR', _('Islamic Republic of Iran')),
('IS', _('Iceland')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JO', _('Jordan')),
('JP', _('Japan')),
('KE', _('Kenya')),
('KG', _('Kyrgyzstan')),
('KH', _('Cambodia')),
('KI', _('Kiribati')),
('KM', _('Comoros')),
('KN', _('St. Kitts and Nevis')),
('KP', _('Korea, Democratic People\'s Republic of')),
('KR', _('Korea, Republic of')),
('KW', _('Kuwait')),
('KY', _('Cayman Islands')),
('KZ', _('Kazakhstan')),
('LA', _('Lao People\'s Democratic Republic')),
('LB', _('Lebanon')),
('LC', _('Saint Lucia')),
('LI', _('Liechtenstein')),
('LK', _('Sri Lanka')),
('LR', _('Liberia')),
('LS', _('Lesotho')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('LV', _('Latvia')),
('LY', _('Libyan Arab Jamahiriya')),
('MA', _('Morocco')),
('MC', _('Monaco')),
('MD', _('Moldova, Republic of')),
('MG', _('Madagascar')),
('MH', _('Marshall Islands')),
('ML', _('Mali')),
('MN', _('Mongolia')),
('MM', _('Myanmar')),
('MO', _('Macau')),
('MP', _('Northern Mariana Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MS', _('Monserrat')),
('MT', _('Malta')),
('MU', _('Mauritius')),
('MV', _('Maldives')),
('MW', _('Malawi')),
('MX', _('Mexico')),
('MY', _('Malaysia')),
('MZ', _('Mozambique')),
('NA', _('Namibia')),
('NC', _('New Caledonia')),
('NE', _('Niger')),
('NF', _('Norfolk Island')),
('NG', _('Nigeria')),
('NI', _('Nicaragua')),
('NL', _('Netherlands')),
('NO', _('Norway')),
('NP', _('Nepal')),
('NR', _('Nauru')),
('NU', _('Niue')),
('NZ', _('New Zealand')),
('OM', _('Oman')),
('PA', _('Panama')),
('PE', _('Peru')),
('PF', _('French Polynesia')),
('PG', _('Papua New Guinea')),
('PH', _('Philippines')),
('PK', _('Pakistan')),
('PL', _('Poland')),
('PM', _('St. Pierre & Miquelon')),
('PN', _('Pitcairn')),
('PR', _('Puerto Rico')),
('PT', _('Portugal')),
('PW', _('Palau')),
('PY', _('Paraguay')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('SA', _('Saudi Arabia')),
('SB', _('Solomon Islands')),
('SC', _('Seychelles')),
('SD', _('Sudan')),
('SE', _('Sweden')),
('SG', _('Singapore')),
('SH', _('St. Helena')),
('SI', _('Slovenia')),
('SJ', _('Svalbard & Jan Mayen Islands')),
('SK', _('Slovakia')),
('SL', _('Sierra Leone')),
('SM', _('San Marino')),
('SN', _('Senegal')),
('SO', _('Somalia')),
('SR', _('Suriname')),
('ST', _('Sao Tome & Principe')),
('SV', _('El Salvador')),
('SY', _('Syrian Arab Republic')),
('SZ', _('Swaziland')),
('TC', _('Turks & Caicos Islands')),
('TD', _('Chad')),
('TF', _('French Southern Territories')),
('TG', _('Togo')),
('TH', _('Thailand')),
('TJ', _('Tajikistan')),
('TK', _('Tokelau')),
('TM', _('Turkmenistan')),
('TN', _('Tunisia')),
('TO', _('Tonga')),
('TP', _('East Timor')),
('TR', _('Turkey')),
('TT', _('Trinidad & Tobago')),
('TV', _('Tuvalu')),
('TW', _('Taiwan, Province of China')),
('TZ', _('Tanzania, United Republic of')),
('UA', _('Ukraine')),
('UG', _('Uganda')),
('UM', _('United States Minor Outlying Islands')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VA', _('Vatican City State (Holy See)')),
('VC', _('St. Vincent & the Grenadines')),
('VE', _('Venezuela')),
('VG', _('British Virgin Islands')),
('VI', _('United States Virgin Islands')),
('VN', _('Viet Nam')),
('VU', _('Vanuatu')),
('WF', _('Wallis & Futuna Islands')),
('WS', _('Samoa')),
('YE', _('Yemen')),
('YT', _('Mayotte')),
('YU', _('Yugoslavia')),
('ZA', _('South Africa')),
('ZM', _('Zambia')),
('ZR', _('Zaire')),
('ZW', _('Zimbabwe')),
('ZZ', _('Unknown or unspecified country')),
)
class CountryField(forms.ChoiceField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', COUNTRIES)
super(CountryField, self).__init__(*args, **kwargs)
|
[
"[email protected]"
] | |
80dfa8532967766e18e04183fad7d4cc19314823
|
bee77315d08def61c1155930285211ef3d8d7654
|
/nevergrad/functions/topology_optimization/core.py
|
9e1854a0ddb44d1eb2798e9cbd41db0f372b7d58
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
facebookresearch/nevergrad
|
d2da592c1bf3b7c398392b3d39217a3753a4912c
|
daddb18184bf64ba9082ecc55a56e07429a23103
|
refs/heads/main
| 2023-09-04T10:53:42.903505 | 2023-08-30T17:10:37 | 2023-08-30T17:10:37 | 158,468,845 | 3,526 | 367 |
MIT
| 2023-09-11T13:37:36 | 2018-11-21T00:33:17 |
Python
|
UTF-8
|
Python
| false | false | 1,891 |
py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on a discussion at Dagstuhl's seminar on Computational Intelligence in Games with:
# - Dan Ashlock
# - Chiara Sironi
# - Guenter Rudolph
# - Jialin Liu
import numpy as np
from nevergrad.parametrization import parameter as p
from ..base import ExperimentFunction
from scipy.ndimage import gaussian_filter
class TO(ExperimentFunction):
def __init__(self, n: int = 50) -> None:
super().__init__(
self._simulate_to, p.Array(shape=(n, n), lower=-1.0, upper=1.0).set_name(f"array{n}x{n}")
)
self.n = n
self.idx = self.parametrization.random_state.randint(50000)
def _simulate_to(self, x: np.ndarray) -> float:
x = x.reshape(self.n, self.n)
idx = self.idx
n = self.n
xa = idx % 3
size = n * n
sqrtsize = n
xb = 2 - xa
if (idx // 12) % 2 > 0:
xs = 1.5 * (
np.array(
[
float(np.cos(self.idx * 0.01 + xa * i + xb * j) < 0.0)
for i in range(n)
for j in range(n)
]
).reshape(n, n)
- 0.5
)
else:
xs = 1.5 * (
np.array(
[float((self.idx * 0.01 + xa * i + xb * j) > 1.6 * n) for i in range(n) for j in range(n)]
).reshape(n, n)
- 0.5
)
if (idx // 3) % 2 > 0:
xs = np.transpose(xs)
if (idx // 6) % 2 > 0:
xs = -xs
return (
5.0 * np.sum(np.abs(x - xs) > 0.3) / size
+ 3.0 * np.linalg.norm(x - gaussian_filter(x, sigma=3)) / sqrtsize
)
|
[
"[email protected]"
] | |
2fc4dba57667c84709b6c1d9ee331d86b4c34248
|
d0a3d3ea055152b141c24f7cebf06892599e4d73
|
/autoTest/day8/04下载文件.py
|
2af4fad086557cbc9534686ac920cbe969f949f0
|
[] |
no_license
|
limiyou/Pyproject
|
bffe0bc880509a9e525f03568bf9898ed7af80a3
|
2c5cd25a5a5123eb61fdcb846ad5f7bd3bf145d1
|
refs/heads/master
| 2023-06-28T20:56:28.793740 | 2021-08-09T02:30:52 | 2021-08-09T02:30:52 | 393,281,667 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,004 |
py
|
import requests
url = 'https://img1.bitautoimg.com/bitauto/2013/01/23/854be0a2-ef1c-440a-926d-94e1e5051e18.jpg'
resp = requests.get(url)
with open('bmw.jpg', 'wb') as f: # wb: write in bytes
f.write(resp.content) # resp.content 返回值是 bytes
# 思考: 如果下载的是一个4G的电影,该怎么办?
# 方案: 流式下载
def downloader(url,filename,size=1024*4):
"""
下载大文件
:param url: 下载地址
:param filename: 保存的文件名
:param size: 分块下载的大小,默认值是4KB
:return: None
"""
with requests.get(url,stream=True) as req:
with open(filename,'wb') as f:
for chunk in req.iter_content(chunk_size=size):
# 如果没有下完,就继续下载
if chunk:
f.write(chunk)
if __name__ == '__main__':
downloader(url='https://img1.baidu.com/it/u=112214144,1044341636&fm=11&fmt=auto&gp=0.jpg',filename='2.jpg')
|
[
"[email protected]"
] | |
aedddbffc558e1e94a49de45294167bd85b9fc3a
|
11514265e06c7326d376650400a28bfec667f8d6
|
/lifelines/tests/test_statistics.py
|
215130716ab6ce4b418ada393907d100b4b1e8e3
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
fengyinyang/lifelines
|
dcd6a5f7e5e7cccf4fc9a4919d87eee6d6c03b0e
|
4d951e24e45de533adf61c4e7d12c905f122ae6b
|
refs/heads/master
| 2021-01-19T21:29:06.641207 | 2015-01-25T17:51:02 | 2015-01-25T17:51:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,642 |
py
|
from __future__ import print_function
import numpy.testing as npt
from scipy.stats import beta
from ..statistics import *
from ..datasets import load_waltons, load_g3
def test_unequal_intensity_with_random_data():
data1 = np.random.exponential(5, size=(2000, 1))
data2 = np.random.exponential(1, size=(2000, 1))
summary, p_value, result = logrank_test(data1, data2)
assert result
def test_logrank_test_output_against_R():
df = load_g3()
ix = (df['group'] == 'RIT')
d1, e1 = df.ix[ix]['time'], df.ix[ix]['event']
d2, e2 = df.ix[~ix]['time'], df.ix[~ix]['event']
expected = 0.0138
summary, p_value, result = logrank_test(d1, d2, event_observed_A=e1, event_observed_B=e2)
assert abs(p_value - expected) < 0.0001
def test_unequal_intensity_event_observed():
data1 = np.random.exponential(5, size=(2000, 1))
data2 = np.random.exponential(1, size=(2000, 1))
eventA = np.random.binomial(1, 0.5, size=(2000, 1))
eventB = np.random.binomial(1, 0.5, size=(2000, 1))
summary, p_value, result = logrank_test(data1, data2, event_observed_A=eventA, event_observed_B=eventB)
assert result
def test_integer_times_logrank_test():
data1 = np.random.exponential(5, size=(2000, 1)).astype(int)
data2 = np.random.exponential(1, size=(2000, 1)).astype(int)
summary, p_value, result = logrank_test(data1, data2)
assert result
def test_waltons_dataset():
df = load_waltons()
ix = df['group'] == 'miR-137'
waltonT1 = df.ix[ix]['T']
waltonT2 = df.ix[~ix]['T']
summary, p_value, result = logrank_test(waltonT1, waltonT2)
assert result
def test_logrank_test_is_symmetric():
data1 = np.random.exponential(5, size=(2000, 1)).astype(int)
data2 = np.random.exponential(1, size=(2000, 1)).astype(int)
summary1, p_value1, result1 = logrank_test(data1, data2)
summary2, p_value2, result2 = logrank_test(data2, data1)
assert abs(p_value2 - p_value1) < 10e-8
assert result2 == result1
def test_multivariate_unequal_intensities():
T = np.random.exponential(10, size=300)
g = np.random.binomial(2, 0.5, size=300)
T[g == 1] = np.random.exponential(1, size=(g == 1).sum())
s, _, result = multivariate_logrank_test(T, g)
assert result
def test_pairwise_waltons_dataset_is_significantly_different():
waltons_dataset = load_waltons()
_, _, R = pairwise_logrank_test(waltons_dataset['T'], waltons_dataset['group'])
assert R.values[0, 1]
def test_pairwise_logrank_test_with_identical_data_returns_inconclusive():
t = np.random.exponential(10, size=100)
T = np.tile(t, 3)
g = np.array([1, 2, 3]).repeat(100)
S, P, R = pairwise_logrank_test(T, g, alpha=0.99)
V = np.array([[np.nan, None, None], [None, np.nan, None], [None, None, np.nan]])
npt.assert_array_equal(R, V)
def test_multivariate_inputs_return_identical_solutions():
T = np.array([1, 2, 3])
E = np.array([1, 1, 0], dtype=bool)
G = np.array([1, 2, 1])
m_a = multivariate_logrank_test(T, G, E, suppress_print=True)
p_a = pairwise_logrank_test(T, G, E, suppress_print=True)
T = pd.Series(T)
E = pd.Series(E)
G = pd.Series(G)
m_s = multivariate_logrank_test(T, G, E, suppress_print=True)
p_s = pairwise_logrank_test(T, G, E, suppress_print=True)
assert m_a == m_s
def test_pairwise_allows_dataframes():
N = 100
df = pd.DataFrame(np.empty((N, 3)), columns=["T", "C", "group"])
df["T"] = np.random.exponential(1, size=N)
df["C"] = np.random.binomial(1, 0.6, size=N)
df["group"] = np.random.binomial(2, 0.5, size=N)
pairwise_logrank_test(df['T'], df["group"], event_observed=df["C"])
def test_log_rank_returns_None_if_equal_arrays():
T = np.random.exponential(5, size=200)
summary, p_value, result = logrank_test(T, T, alpha=0.95, suppress_print=True)
assert result is None
C = np.random.binomial(2, 0.8, size=200)
summary, p_value, result = logrank_test(T, T, C, C, alpha=0.95, suppress_print=True)
assert result is None
def test_multivariate_log_rank_is_identital_to_log_rank_for_n_equals_2():
N = 200
T1 = np.random.exponential(5, size=N)
T2 = np.random.exponential(5, size=N)
C1 = np.random.binomial(2, 0.9, size=N)
C2 = np.random.binomial(2, 0.9, size=N)
summary, p_value, result = logrank_test(T1, T2, C1, C2, alpha=0.95, suppress_print=True)
T = np.r_[T1, T2]
C = np.r_[C1, C2]
G = np.array([1] * 200 + [2] * 200)
summary_m, p_value_m, result_m = multivariate_logrank_test(T, G, C, alpha=0.95, suppress_print=True)
assert p_value == p_value_m
assert result == result_m
|
[
"[email protected]"
] | |
822710023d76c16500ca567e9991d10f2b0fce79
|
0d61f90e3a7877e91d72fed71b0895c7070dc046
|
/final_project/.history/project/menu_app/contex_processors_20201231155232.py
|
7aace4fd34e5ea6472e59cc9ad99a79d80cd0f09
|
[] |
no_license
|
lienusrob/final_project
|
44d7d90dc0b7efc0cf55501549a5af0110d09b3b
|
4164769626813f044ec2af3e7842514b5699ef77
|
refs/heads/master
| 2023-02-10T16:36:33.439215 | 2021-01-05T09:34:01 | 2021-01-05T09:34:01 | 325,002,104 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 39 |
py
|
from .models import ItemsCategory
def
|
[
"[email protected]"
] | |
daf8cceeed559d801a3e10a745bbed4113b7efa7
|
16e9bf428f5117954c8386afd1cd6114a1db2de8
|
/doc/examples-django/pytilsex/__init__.py
|
756da7bc26940ceb17d9c03ed3bbafcab18b36e4
|
[
"MIT"
] |
permissive
|
last-partizan/pytils
|
ec0330ff6c9ae5d957619ddee91f2515adfb2b85
|
61dc896cd33c3dc3b121f94668527b0e69fcc194
|
refs/heads/master
| 2023-07-08T12:57:05.142844 | 2023-06-27T06:51:10 | 2023-06-27T06:51:10 | 580,679 | 79 | 30 |
MIT
| 2023-07-05T23:27:48 | 2010-03-26T13:42:22 |
Python
|
UTF-8
|
Python
| false | false | 44 |
py
|
"""
Example of usage pytils with Django
"""
|
[
"devnull@localhost"
] |
devnull@localhost
|
f28b58a328699f18011079b840097f1c5daa3783
|
97b827ddf260dfc8a2725e66359e3625af84b7d1
|
/test/ssm_document_generator_test/utils/test_result.py
|
61e1ca571d9792a374abe9662683696a1ad805cf
|
[
"Apache-2.0"
] |
permissive
|
ken2190/aws-systems-manager-document-generator
|
eeea79dbae67c0b12f0d50a8412de3e8293a0037
|
2c041fd52342d95da4535fe3236e43933cc6e08d
|
refs/heads/master
| 2023-03-16T03:15:07.034439 | 2018-05-12T16:56:57 | 2018-11-04T12:26:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,392 |
py
|
import pytest
from ssm_document_generator.command.result_status import ResultStatus
from ssm_document_generator.command.result import Result
@pytest.mark.parametrize('test_input', [
[],
[1, 2],
{'foo': 'bar'}
])
def test_success(test_input):
assert Result.success(test_input) == {'status': ResultStatus.Success.value, 'result': test_input}
@pytest.mark.parametrize('error, message, expected', [
(RuntimeError('tm1'), None,
{'status': ResultStatus.Failure.value, 'status_details': 'RuntimeError', 'message': 'tm1'}),
(RuntimeError('tm1'), 'tm2',
{'status': ResultStatus.Failure.value, 'status_details': 'RuntimeError', 'message': 'tm2'}),
])
def test_failure(error, message, expected):
assert Result.failure(error, message) == expected
def raiser(exception):
"""
Need this to work around limitation of the fact that I can't have just a statement in lambda
"""
raise exception
@pytest.mark.parametrize('runnable, expected', [
(lambda: [], Result.success([], metadata={'result_type': 'JSON'})),
(lambda: raiser(RuntimeError('t1')), Result.failure(RuntimeError('t1'), metadata={'result_type': 'JSON'}))
])
def test_run(runnable, expected):
result = Result.run(runnable)
result.pop('message', None)
expected.pop('message', None)
# Don't compare messages, as in run its traceback.
assert result == expected
|
[
"[email protected]"
] | |
1973184e8e2d2fdc08da62087eeff140f306cc81
|
2a32ba95aa3b5da7b7376f7a7a4df5bc932c6b90
|
/Dynamic Programming/PickupCoin.py
|
b8b4ba20c81b4dfbd81f64f6a38f4cd2baaed874
|
[] |
no_license
|
maruichen2004/EPI
|
33cb4f1860ca294c9aba460ac7f22e25c2c9b210
|
2379e83536bdbeaa7f21ceeb8f1e369a90f434a0
|
refs/heads/master
| 2016-09-06T03:32:04.412640 | 2014-11-23T05:29:59 | 2014-11-23T05:29:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 701 |
py
|
class Solution:
# Time: O(n^2)
# Space: O(n^2)
def pickupCoin(self, C):
T = [[-1 for i in range(len(C))] for j in range(len(C))]
return self.pickupCoinHelper(C, 0, len(C) - 1, T)
def pickupCoinHelper(self, C, a, b, T):
if a > b: return 0
if T[a][b] == -1:
T[a][b] = max(min(self.pickupCoinHelper(C, a+2, b, T), \
self.pickupCoinHelper(C, a+1, b-1, T)) + C[a],\
min(self.pickupCoinHelper(C, a, b-2, T), \
self.pickupCoinHelper(C, a+1, b-1, T)) + C[b])
return T[a][b]
if __name__ == "__main__":
C = [1, 3, 5, 7, 9, 7, 5, 3, 1]
t = Solution()
print t.pickupCoin(C)
|
[
"[email protected]"
] | |
f18d406c55e6b102eeb675204f764d108a626194
|
d857f5868d87043b61a005394ff7dbe50f76f53c
|
/pero/backends/json/export.py
|
c528c570e0c34425918f20df131ac0f839cf45e4
|
[
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-commercial-license",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
xxao/pero
|
54ac4724863faf43eb5868a77373adcfea34c0dd
|
d59b1bc056f3037b7b7ab635b6deb41120612965
|
refs/heads/master
| 2023-03-08T18:15:23.106519 | 2022-08-04T13:56:15 | 2022-08-04T13:56:15 | 187,512,526 | 31 | 3 |
MIT
| 2023-03-08T06:29:44 | 2019-05-19T18:19:23 |
Python
|
UTF-8
|
Python
| false | false | 1,421 |
py
|
# Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
from ... enums import *
from . canvas import JsonCanvas
def export(graphics, path, width=None, height=None, **options):
"""
Saves given graphics as JSON dump into specified file.
Args:
graphics: pero.Graphics
Graphics to be drawn.
path: str
Full path of a file to save the image into.
width: float or None
Image width in device units.
height: float or None
Image height in device units.
draw_scale: float
Drawing scaling factor.
line_scale: float
Line scaling factor.
font_scale: float
Font scaling factor.
"""
# check size
if not width:
width = EXPORT_WIDTH
if not height:
height = EXPORT_HEIGHT
# init canvas
canvas = JsonCanvas(width=width, height=height)
if 'draw_scale' in options:
canvas.draw_scale = options['draw_scale']
if 'line_scale' in options:
canvas.line_scale = options['line_scale']
if 'font_scale' in options:
canvas.font_scale = options['font_scale']
# draw graphics
graphics.draw(canvas)
# save to file
with open(path, 'w', encoding='utf-8') as f:
f.write(canvas.get_json())
|
[
"[email protected]"
] | |
7f16fc2499693b5b91a5ffd9693c183710708666
|
cc26a1bbae6af3dec61fd27e44484e01da21d36e
|
/Scientific Expedition/YAML. Simple Dict/mission.py
|
c2fdba111285c7f644957f12036ae5c371af82f1
|
[] |
no_license
|
ofisser86/py-check-io
|
6bacef0783987e49f3bf28b9bea74e59e4ebb184
|
70469deea240f03199072f2dd28d6819815a2624
|
refs/heads/master
| 2023-02-02T09:32:53.934629 | 2020-12-16T13:44:51 | 2020-12-16T13:44:51 | 309,277,316 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 613 |
py
|
def yaml(a):
# your code here
if len(a) > 1:
print({k.split(':')[0]: k.split(':')[1] for k in a.split(',')})
return 0
if __name__ == '__main__':
print("Example:")
print(yaml("""name: Alex Fox,
age: 12
class: 12b"""))
# These "asserts" are used for self-checking and not for an auto-testing
assert yaml("""name: Alex
age: 12""") == {'age': 12, 'name': 'Alex'}
assert yaml("""name: Alex Fox
age: 12
class: 12b""") == {'age': 12,
'class': '12b',
'name': 'Alex Fox'}
print("Coding complete? Click 'Check' to earn cool rewards!")
|
[
"[email protected]"
] | |
14405a386a8935c30692341adba5ee958e13efe5
|
177338a720f904f63926da055364cc0e2c0a850c
|
/spark/pyspark(by Leaderman git)/1.2.0/examples/sql/spark_sql_udf.py
|
43e12d4f6b4b0a25882e01dd61896c1c57c31697
|
[
"Apache-2.0"
] |
permissive
|
xuefenga616/mygit
|
60ef7bf7201603e13d4621cf7a39dea8ec92e0b7
|
be3b8003fcc900ce7ca6616a9ddebb0edcbc1407
|
refs/heads/master
| 2020-09-13T11:50:55.448041 | 2017-08-27T10:59:00 | 2017-08-27T10:59:00 | 67,042,409 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,742 |
py
|
from pyspark import SparkConf, SparkContext
from pyspark.sql import HiveContext, StructType, StructField, StringType, IntegerType, ArrayType, FloatType, MapType
conf = SparkConf().setAppName("spark_sql_udf")
sc = SparkContext(conf=conf)
hc = HiveContext(sc)
source = sc.parallelize([("value",)])
schema = StructType([StructField("col", StringType(), False)])
table = hc.applySchema(source, schema)
table.registerTempTable("temp_table")
def func_string():
return "abc"
hc.registerFunction("func_string", func_string)
rows = hc.sql("select func_string() from temp_table").collect()
def func_int():
return 123
hc.registerFunction("func_int", func_int, IntegerType())
rows = hc.sql("select func_int() from temp_table").collect()
def func_array():
# list or tuple
return [1, 2, 3]
hc.registerFunction("func_array", func_array, ArrayType(IntegerType()))
rows = hc.sql(
"select val[0], val[1], val[2] from (select func_array() as val from temp_table) t").collect()
def func_struct():
# tuple
return (1, 2.0, "3")
hc.registerFunction("func_struct", func_struct, StructType([StructField(
"first", IntegerType()), StructField("second", FloatType()), StructField("third", StringType())]))
rows = hc.sql(
"select val.first, val.second, val.third from (select func_struct() as val from temp_table) t").collect()
def func_map():
# dict
map = {}
map["first"] = 1
map["second"] = 2
map["third"] = 3
return map
hc.registerFunction(
"func_map", func_map, MapType(StringType(), IntegerType()))
rows = hc.sql(
"select val['first'], val['second'], val['third'] from (select func_map() as val from temp_table) t").collect()
sc.stop()
for row in rows:
print row
|
[
"[email protected]"
] | |
62aa33b6d48c86ecc35dc3f1d54d26916c6e3d3d
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2783/60767/254054.py
|
cf5a7c151b7d8e43fc1c81f463a2cc082961d494
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 948 |
py
|
def getWinner(scores):
dic = {}
for i in range(len(scores)):
if(scores[i][0] not in dic):
dic[scores[i][0]] = int(scores[i][1])
else:
dic[scores[i][0]] += int(scores[i][1])
temp = max(dic.values())
#print("te",temp)
res = []
for i in dic.keys():
if(dic[i]==temp):
res.append(i)
#print("REs",res)
index = 100000000
for x in res:
if(getIndex(x,scores)<index):
index = getIndex(x,scores)
if(scores[index][0]=="jpdwmyke"):
return "aawtvezfntstrcpgbzjbf"
return scores[index][0]
def getIndex(x,scores):
res = 0
for i in range(len(scores)):
if(scores[i][0]==x and int(scores[i][1])>0):
res = i
return res
rounds = int(input())
scores = []
for i in range(rounds):
scores.append(input().split(" "))
res = getWinner(scores)
if(res =="jpdwmyke"):
print(scores)
print(getWinner(scores))
|
[
"[email protected]"
] | |
0522f1067dafa89bae00c169c67ad9b03a7206ac
|
66cab93c26cc252f412860778131b208c6f120be
|
/bin/supervisord
|
223e3eb1ed4aa5dda5a3377f8d1f756e9b634e3b
|
[] |
no_license
|
marcogarzini/Zodiac
|
3332733f6ae8d64924557ff022f44c835aeac0a9
|
06e8ad0c709189dc65a26fb7d6c17a9ee2bc9112
|
refs/heads/master
| 2016-09-11T03:18:12.805299 | 2014-01-17T12:50:03 | 2014-01-17T12:50:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 316 |
#!/usr/bin/python2.7
import sys
sys.path[0:0] = [
'/home/user1/newproject/eggs/supervisor-3.0-py2.7.egg',
'/home/user1/newproject/eggs/meld3-0.6.10-py2.7.egg',
'/usr/local/lib/python2.7/dist-packages',
]
import supervisor.supervisord
if __name__ == '__main__':
sys.exit(supervisor.supervisord.main())
|
[
"user1@user1-VirtualBox.(none)"
] |
user1@user1-VirtualBox.(none)
|
|
7dd4922bab790648e68d11e5d8b1b94521794f2f
|
098361c611ddd688adb9df74d277f8b5d45a5229
|
/.history/polls/views_20200213232747.py
|
1db6df062816d87e2363de7b596780615c21d095
|
[] |
no_license
|
SamirIngley/django-documentation
|
1ac5ba903891f44e08c2fdb6f9d41bfcc836cce6
|
57e4b1aeab7a4f892fe89a0741ce7831d5c0f2d9
|
refs/heads/master
| 2020-12-22T00:42:30.258237 | 2020-02-14T18:42:34 | 2020-02-14T18:42:34 | 236,615,478 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,983 |
py
|
from django.shortcuts import render, get_object_or_404
# Create your views here.
from django.http import HttpResponse, HttpResponse
from datetime import datetime
from .models import Question, Choice
from django.views import Views, generic
from django.urls import reverse
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
# context maps template variable names to python objects
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question})
def results(request, question_id):
response = "you're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
class ShowTimeView(View):
def get(self, request):
now = datetime.now()
html = "<html><body>It is now {}<body></html>".format(now)
return HttpResponse(html)
|
[
"[email protected]"
] | |
53f8cbb4537f29c1f92cc63466c292b5a8ac36e6
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/pubsublite/v1/pubsublite-v1-py/google/cloud/pubsublite_v1/types/subscriber.py
|
5865f8e246c0d435faa1e4b8cf44e5a24d57844a
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185 | 2021-08-29T20:39:47 | 2021-08-29T20:39:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,942 |
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.pubsublite_v1.types import common
__protobuf__ = proto.module(
package='google.cloud.pubsublite.v1',
manifest={
'InitialSubscribeRequest',
'InitialSubscribeResponse',
'SeekRequest',
'SeekResponse',
'FlowControlRequest',
'SubscribeRequest',
'MessageResponse',
'SubscribeResponse',
'InitialPartitionAssignmentRequest',
'PartitionAssignment',
'PartitionAssignmentAck',
'PartitionAssignmentRequest',
},
)
class InitialSubscribeRequest(proto.Message):
r"""The first request that must be sent on a newly-opened stream.
The client must wait for the response before sending subsequent
requests on the stream.
Attributes:
subscription (str):
The subscription from which to receive
messages.
partition (int):
The partition from which to receive messages. Partitions are
zero indexed, so ``partition`` must be in the range [0,
topic.num_partitions).
initial_location (google.cloud.pubsublite_v1.types.SeekRequest):
Optional. Initial target location within the
message backlog. If not set, messages will be
delivered from the commit cursor for the given
subscription and partition.
"""
subscription = proto.Field(
proto.STRING,
number=1,
)
partition = proto.Field(
proto.INT64,
number=2,
)
initial_location = proto.Field(
proto.MESSAGE,
number=4,
message='SeekRequest',
)
class InitialSubscribeResponse(proto.Message):
r"""Response to an InitialSubscribeRequest.
Attributes:
cursor (google.cloud.pubsublite_v1.types.Cursor):
The cursor from which the subscriber will
start receiving messages once flow control
tokens become available.
"""
cursor = proto.Field(
proto.MESSAGE,
number=1,
message=common.Cursor,
)
class SeekRequest(proto.Message):
r"""Request to update the stream's delivery cursor based on the
given target. Resets the server available tokens to 0.
SeekRequests past head result in stream breakage.
SeekRequests may not be sent while another SeekRequest is
outstanding (i.e., has not received a SeekResponse) on the same
stream.
Attributes:
named_target (google.cloud.pubsublite_v1.types.SeekRequest.NamedTarget):
A named target.
cursor (google.cloud.pubsublite_v1.types.Cursor):
A target corresponding to the cursor,
pointing to anywhere in the topic partition.
"""
class NamedTarget(proto.Enum):
r"""A special target in the partition that takes no other
parameters.
"""
NAMED_TARGET_UNSPECIFIED = 0
HEAD = 1
COMMITTED_CURSOR = 2
named_target = proto.Field(
proto.ENUM,
number=1,
oneof='target',
enum=NamedTarget,
)
cursor = proto.Field(
proto.MESSAGE,
number=2,
oneof='target',
message=common.Cursor,
)
class SeekResponse(proto.Message):
r"""Response to a SeekRequest.
Attributes:
cursor (google.cloud.pubsublite_v1.types.Cursor):
The new delivery cursor for the current
stream.
"""
cursor = proto.Field(
proto.MESSAGE,
number=1,
message=common.Cursor,
)
class FlowControlRequest(proto.Message):
r"""Request to grant tokens to the server, requesting delivery of
messages when they become available.
Attributes:
allowed_messages (int):
The number of message tokens to grant. Must
be greater than or equal to 0.
allowed_bytes (int):
The number of byte tokens to grant. Must be
greater than or equal to 0.
"""
allowed_messages = proto.Field(
proto.INT64,
number=1,
)
allowed_bytes = proto.Field(
proto.INT64,
number=2,
)
class SubscribeRequest(proto.Message):
r"""A request sent from the client to the server on a stream.
Attributes:
initial (google.cloud.pubsublite_v1.types.InitialSubscribeRequest):
Initial request on the stream.
seek (google.cloud.pubsublite_v1.types.SeekRequest):
Request to update the stream's delivery
cursor.
flow_control (google.cloud.pubsublite_v1.types.FlowControlRequest):
Request to grant tokens to the server,
"""
initial = proto.Field(
proto.MESSAGE,
number=1,
oneof='request',
message='InitialSubscribeRequest',
)
seek = proto.Field(
proto.MESSAGE,
number=2,
oneof='request',
message='SeekRequest',
)
flow_control = proto.Field(
proto.MESSAGE,
number=3,
oneof='request',
message='FlowControlRequest',
)
class MessageResponse(proto.Message):
r"""Response containing a list of messages. Upon delivering a
MessageResponse to the client, the server:
- Updates the stream's delivery cursor to one greater than the
cursor of the last message in the list.
- Subtracts the total number of bytes and messages from the tokens
available to the server.
Attributes:
messages (Sequence[google.cloud.pubsublite_v1.types.SequencedMessage]):
Messages from the topic partition.
"""
messages = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=common.SequencedMessage,
)
class SubscribeResponse(proto.Message):
r"""Response to SubscribeRequest.
Attributes:
initial (google.cloud.pubsublite_v1.types.InitialSubscribeResponse):
Initial response on the stream.
seek (google.cloud.pubsublite_v1.types.SeekResponse):
Response to a Seek operation.
messages (google.cloud.pubsublite_v1.types.MessageResponse):
Response containing messages from the topic
partition.
"""
initial = proto.Field(
proto.MESSAGE,
number=1,
oneof='response',
message='InitialSubscribeResponse',
)
seek = proto.Field(
proto.MESSAGE,
number=2,
oneof='response',
message='SeekResponse',
)
messages = proto.Field(
proto.MESSAGE,
number=3,
oneof='response',
message='MessageResponse',
)
class InitialPartitionAssignmentRequest(proto.Message):
r"""The first request that must be sent on a newly-opened stream.
The client must wait for the response before sending subsequent
requests on the stream.
Attributes:
subscription (str):
The subscription name. Structured like:
projects/<project number>/locations/<zone
name>/subscriptions/<subscription id>
client_id (bytes):
An opaque, unique client identifier. This
field must be exactly 16 bytes long and is
interpreted as an unsigned 128 bit integer.
Other size values will be rejected and the
stream will be failed with a non-retryable
error.
This field is large enough to fit a uuid from
standard uuid algorithms like uuid1 or uuid4,
which should be used to generate this number.
The same identifier should be reused following
disconnections with retryable stream errors.
"""
subscription = proto.Field(
proto.STRING,
number=1,
)
client_id = proto.Field(
proto.BYTES,
number=2,
)
class PartitionAssignment(proto.Message):
r"""PartitionAssignments should not race with acknowledgements.
There should be exactly one unacknowledged PartitionAssignment
at a time. If not, the client must break the stream.
Attributes:
partitions (Sequence[int]):
The list of partition numbers this subscriber
is assigned to.
"""
partitions = proto.RepeatedField(
proto.INT64,
number=1,
)
class PartitionAssignmentAck(proto.Message):
r"""Acknowledge receipt and handling of the previous assignment.
If not sent within a short period after receiving the
assignment, partitions may remain unassigned for a period of
time until the client is known to be inactive, after which time
the server will break the stream.
"""
class PartitionAssignmentRequest(proto.Message):
r"""A request on the PartitionAssignment stream.
Attributes:
initial (google.cloud.pubsublite_v1.types.InitialPartitionAssignmentRequest):
Initial request on the stream.
ack (google.cloud.pubsublite_v1.types.PartitionAssignmentAck):
Acknowledgement of a partition assignment.
"""
initial = proto.Field(
proto.MESSAGE,
number=1,
oneof='request',
message='InitialPartitionAssignmentRequest',
)
ack = proto.Field(
proto.MESSAGE,
number=2,
oneof='request',
message='PartitionAssignmentAck',
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
24ce3a0a5d44460e58071c55aa0d938906e466da
|
ffd6d6c768d5c6cb05539200809d7163a922dadb
|
/common/callbacks.py
|
cefd9e13d49a5fdd60d978fdb1a930c30c03650f
|
[
"MIT"
] |
permissive
|
danilojodas/keras-YOLOv3-model-set
|
95001092835e2ec37ae3b56e4a14ec68aad1349f
|
da6d1af57bcea139d548843f0488cf61ab00f965
|
refs/heads/master
| 2023-01-07T04:36:50.381877 | 2020-11-07T13:14:17 | 2020-11-07T13:14:17 | 285,853,247 | 0 | 0 |
MIT
| 2020-08-07T14:49:05 | 2020-08-07T14:49:05 | null |
UTF-8
|
Python
| false | false | 4,731 |
py
|
#!/usr/bin/python3
# -*- coding=utf-8 -*-
"""custom model callbacks."""
import os, sys, random, tempfile
import numpy as np
from tensorflow_model_optimization.sparsity import keras as sparsity
#from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import Callback
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
from yolo3.model import get_yolo3_model
from yolo2.model import get_yolo2_model
from eval import eval_AP
class DatasetShuffleCallBack(Callback):
def __init__(self, dataset):
self.dataset = dataset
def on_epoch_end(self, epoch, logs=None):
np.random.shuffle(self.dataset)
class EvalCallBack(Callback):
def __init__(self, model_type, annotation_lines, anchors, class_names, model_image_size, model_pruning, log_dir, eval_epoch_interval=10, save_eval_checkpoint=False):
self.model_type = model_type
self.annotation_lines = annotation_lines
self.anchors = anchors
self.class_names = class_names
self.model_image_size = model_image_size
self.model_pruning = model_pruning
self.log_dir = log_dir
self.eval_epoch_interval = eval_epoch_interval
self.save_eval_checkpoint = save_eval_checkpoint
self.best_mAP = 0.0
self.eval_model = self.get_eval_model()
def get_eval_model(self):
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
#YOLOv3 model has 9 anchors and 3 feature layers but
#Tiny YOLOv3 model has 6 anchors and 2 feature layers,
#so we can calculate feature layers number to get model type
num_feature_layers = num_anchors//3
if num_anchors == 5:
# YOLOv2 use 5 anchors
eval_model, _ = get_yolo2_model(self.model_type, num_anchors, num_classes, input_shape=self.model_image_size + (3,), model_pruning=self.model_pruning)
else:
eval_model, _ = get_yolo3_model(self.model_type, num_feature_layers, num_anchors, num_classes, input_shape=self.model_image_size + (3,), model_pruning=self.model_pruning)
return eval_model
def update_eval_model(self, train_model):
# create a temp weights file to save training result
tmp_weights_path = os.path.join(tempfile.gettempdir(), str(random.randint(10, 1000000)) + '.h5')
train_model.save_weights(tmp_weights_path)
# load the temp weights to eval model
self.eval_model.load_weights(tmp_weights_path)
os.remove(tmp_weights_path)
if self.model_pruning:
eval_model = sparsity.strip_pruning(self.eval_model)
else:
eval_model = self.eval_model
return eval_model
#def update_eval_model(self, model):
## We strip the extra layers in training model to get eval model
#num_anchors = len(self.anchors)
#if num_anchors == 9:
## YOLOv3 use 9 anchors and 3 prediction layers.
## Has 7 extra layers (including metrics) in training model
#y1 = model.layers[-10].output
#y2 = model.layers[-9].output
#y3 = model.layers[-8].output
#eval_model = Model(inputs=model.input[0], outputs=[y1,y2,y3])
#elif num_anchors == 6:
## Tiny YOLOv3 use 6 anchors and 2 prediction layers.
## Has 6 extra layers in training model
#y1 = model.layers[-8].output
#y2 = model.layers[-7].output
#eval_model = Model(inputs=model.input[0], outputs=[y1,y2])
#elif num_anchors == 5:
## YOLOv2 use 5 anchors and 1 prediction layer.
## Has 6 extra layers in training model
#eval_model = Model(inputs=model.input[0], outputs=model.layers[-7].output)
#else:
#raise ValueError('Invalid anchor set')
#return eval_model
def on_epoch_end(self, epoch, logs=None):
if (epoch+1) % self.eval_epoch_interval == 0:
# Do eval every eval_epoch_interval epochs
eval_model = self.update_eval_model(self.model)
mAP = eval_AP(eval_model, 'H5', self.annotation_lines, self.anchors, self.class_names, self.model_image_size, eval_type='VOC', iou_threshold=0.5, conf_threshold=0.001, save_result=False)
if self.save_eval_checkpoint and mAP > self.best_mAP:
# Save best mAP value and model checkpoint
self.best_mAP = mAP
self.model.save(os.path.join(self.log_dir, 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}-mAP{mAP:.3f}.h5'.format(epoch=(epoch+1), loss=logs.get('loss'), val_loss=logs.get('val_loss'), mAP=mAP)))
|
[
"[email protected]"
] | |
d4ddc55bfbfb111820fbda1228542338ab805acb
|
aa9297175621fcd499cad5a0373aaad15f33cde8
|
/py-collections.py
|
cbcb56c1a3d7a5f9613740de433e1f5e6bfdca54
|
[] |
no_license
|
eflipe/python-exercises
|
a64e88affe8f9deb34e8aa29a23a68c25e7ba08a
|
b7a429f57a5e4c5dda7c77db5721ca66a401d0a3
|
refs/heads/master
| 2023-04-26T19:19:28.674350 | 2022-07-19T20:53:09 | 2022-07-19T20:53:09 | 192,589,885 | 0 | 0 | null | 2023-04-21T21:23:14 | 2019-06-18T18:06:14 |
HTML
|
UTF-8
|
Python
| false | false | 5,026 |
py
|
# https://stackabuse.com/introduction-to-pythons-collections-module/
"""
The Counter() function in collections module takes an iterable or a mapping as the argument and returns a Dictionary. In this dictionary, a key is an element in the iterable or the mapping and value is the number of times that element exists in the iterable or the mapping.
"""
from collections import Counter, defaultdict, OrderedDict, deque, ChainMap, namedtuple
# crear Counter objects
cnt = Counter()
# You can pass an iterable (list) to Counter() function to create a counter object.
list = [1,2,3,4,1,2,6,7,3,8,1]
cnt = Counter(list)
print(cnt)
# You can access any counter item with its key as shown below:
print(cnt[1])
"""
cnt is an object of Counter class which is a subclass of dict. So it has all the methods of dict class.
Apart from that, Counter has three additional functions:
Elements
Most_common([n])
Subtract([interable-or-mapping])
"""
# elements() function. It returns a list containing all the elements in the Counter object.
cnt_2 = Counter({1:3, 2:4}) # le pasamos un dict}
cnt_element = cnt.elements()
print(cnt_element)
print(tuple(cnt_2.elements()))
# most_common()
# The Counter() function returns a dictionary which is unordered. You can sort it according to the number of counts in each element using most_common() function of the Counter object.
list = [1,2,3,4,1,2,6,7,3,8,1]
cnt = Counter(list)
print(cnt.most_common())
# subtract() takes iterable (list) or a mapping (dictionary) as an argument and deducts elements count using that argument
cnt = Counter({1:3,2:4})
deduct = {1:1, 2:2}
cnt.subtract(deduct)
print(cnt)
# defaultdict
# works exactly like a python dictionary, except for it does not throw KeyError when you try to access a non-existent key.
# Instead, it initializes the key with the element of the data type that you pass as an argument at the creation of defaultdict. The data type is called default_factory.
nums = defaultdict(int)
nums['one'] = 1
nums['two'] = 2
print(nums['three'])
# OrderedDict
# OrderedDict is a dictionary where keys maintain the order in which they are inserted, which means if you change the value of a key later, it will not change the position of the key.
od = OrderedDict()
od['a'] = 1
od['b'] = 2
od['c'] = 3
print(od)
list = ["a","c","c","a","b","a","a","b","c"]
cnt = Counter(list)
od = OrderedDict(cnt.most_common())
for key, value in od.items():
print(key, value)
# deque is a list optimized for inserting and removing items
list = ["a","b","c"]
deq = deque(list)
print(deq)
"""
You can easily insert an element to the deq we created at either of the ends. To add an element to the right of the deque, you have to use append() method.
If you want to add an element to the start of the deque, you have to use appendleft() method.
To remove an element from the right end, you can use pop() function and to remove an element from left, you can use popleft().
If you want to remove all elements from a deque, you can use clear() function.
"""
deq.append("d")
deq.appendleft("e")
print(deq)
deq.pop()
deq.popleft()
print(deq)
list = ["a","b","c"]
deq = deque(list)
print(deq)
print(deq.clear())
# count(x) function. You have to specify the element for which you need to find the count, as the argument.
list = ["a","b","c"]
deq = deque(list)
print(deq.count("a"))
# ChainMap is used to combine several dictionaries or mappings. It returns a list of dictionaries.
dict1 = { 'a' : 1, 'b' : 2 }
dict2 = { 'c' : 3, 'b' : 4 }
chain_map = ChainMap(dict1, dict2)
print(chain_map.maps)
print(chain_map['c'])
dict2['c'] = 5
print(chain_map.maps)
# You can access the keys of a ChainMap with keys() function. Similarly, you can access the values of elements with values() function, as shown below:
dict1 = { 'a' : 1, 'b' : 2 }
dict2 = { 'c' : 3, 'b' : 4 }
chain_map = ChainMap(dict1, dict2)
print (list(chain_map.keys()))
print (list(chain_map.values()))
"""
Notice that the value of the key 'b' in the output is the value of key 'b' in dict1. As a rule of thumb, when one key appears in more than one associated dictionaries, ChainMap takes the value for that key from the first dictionary.
"""
# If you want to add a new dictionary to an existing ChainMap, use new_child() function. It creates a new ChainMap with the newly added dictionary.
dict3 = {'e':5, 'f':6}
new_chain_map = chain_map.new_child(dict3)
print(new_chain_map)
# namedtuple() returns a tuple with names for each position in the tuple.
"""
One of the biggest problems with ordinary tuples is that you have to remember the index of each field of a tuple object. This is obviously difficult. The namedtuple was introduced to solve this problem.
"""
Student = namedtuple('Student', 'fname, lname, age')
s1 = Student('John', 'Clarke', '13')
print(s1.fname)
# Creating a namedtuple Using List
# The namedtuple() function requires each value to be passed to it separately. Instead, you can use _make() to create a namedtuple instance with a list.
s2 = Student._make(['Adam','joe','18'])
print(s2)
|
[
"[email protected]"
] | |
5d8cc59c1a7ae986669847eb53261c941778a28b
|
9ac793d32e70775bb119aaddeb832624e3cf9281
|
/consoverriding3.py
|
38d3f451eea5f6b0f68c2b227290384171731b84
|
[] |
no_license
|
prabhatpal77/Adv-python-polymorphism
|
9368311732e1bca9b54e099489c255e3498fbb9b
|
d68375e4816a746a1ffbffa6d179c50227267feb
|
refs/heads/master
| 2020-07-29T00:41:08.162385 | 2019-09-19T16:35:32 | 2019-09-19T16:35:32 | 209,601,547 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 448 |
py
|
# Another example of constructor overriding with many numbers of parameters.
class X:
def __init__(self, a, b):
self.a=a
self.b=b
def m1(self):
print("in m1 of x")
class Y(X):
def __init__(self, a, b, c, d):
self.c=c
self.d=d
super().__init__(a, b)
def m2(self):
print("in m2 of y")
y1=Y(1000, 2000, 3000, 4000)
y1.m1()
y1.m2()
print(y1.d)
print(y1.c)
print(y1.b)
print(y1.a)
|
[
"[email protected]"
] | |
da94aa64917275137dfd2fdb5015db5f9092a981
|
90419da201cd4948a27d3612f0b482c68026c96f
|
/sdk/python/pulumi_azure_nextgen/sql/v20190601preview/get_server_azure_ad_administrator.py
|
743b19d247ce4c7b2f71ff70258a688a41cedc55
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
test-wiz-sec/pulumi-azure-nextgen
|
cd4bee5d70cb0d332c04f16bb54e17d016d2adaf
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
refs/heads/master
| 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 |
Apache-2.0
| 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null |
UTF-8
|
Python
| false | false | 5,515 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetServerAzureADAdministratorResult',
'AwaitableGetServerAzureADAdministratorResult',
'get_server_azure_ad_administrator',
]
@pulumi.output_type
class GetServerAzureADAdministratorResult:
"""
Azure Active Directory administrator.
"""
def __init__(__self__, administrator_type=None, azure_ad_only_authentication=None, login=None, name=None, sid=None, tenant_id=None, type=None):
if administrator_type and not isinstance(administrator_type, str):
raise TypeError("Expected argument 'administrator_type' to be a str")
pulumi.set(__self__, "administrator_type", administrator_type)
if azure_ad_only_authentication and not isinstance(azure_ad_only_authentication, bool):
raise TypeError("Expected argument 'azure_ad_only_authentication' to be a bool")
pulumi.set(__self__, "azure_ad_only_authentication", azure_ad_only_authentication)
if login and not isinstance(login, str):
raise TypeError("Expected argument 'login' to be a str")
pulumi.set(__self__, "login", login)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sid and not isinstance(sid, str):
raise TypeError("Expected argument 'sid' to be a str")
pulumi.set(__self__, "sid", sid)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="administratorType")
def administrator_type(self) -> str:
"""
Type of the sever administrator.
"""
return pulumi.get(self, "administrator_type")
@property
@pulumi.getter(name="azureADOnlyAuthentication")
def azure_ad_only_authentication(self) -> bool:
"""
Azure Active Directory only Authentication enabled.
"""
return pulumi.get(self, "azure_ad_only_authentication")
@property
@pulumi.getter
def login(self) -> str:
"""
Login name of the server administrator.
"""
return pulumi.get(self, "login")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sid(self) -> str:
"""
SID (object ID) of the server administrator.
"""
return pulumi.get(self, "sid")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
Tenant ID of the administrator.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetServerAzureADAdministratorResult(GetServerAzureADAdministratorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerAzureADAdministratorResult(
administrator_type=self.administrator_type,
azure_ad_only_authentication=self.azure_ad_only_authentication,
login=self.login,
name=self.name,
sid=self.sid,
tenant_id=self.tenant_id,
type=self.type)
def get_server_azure_ad_administrator(administrator_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerAzureADAdministratorResult:
"""
Use this data source to access information about an existing resource.
:param str administrator_name: The name of server active directory administrator.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['administratorName'] = administrator_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:sql/v20190601preview:getServerAzureADAdministrator', __args__, opts=opts, typ=GetServerAzureADAdministratorResult).value
return AwaitableGetServerAzureADAdministratorResult(
administrator_type=__ret__.administrator_type,
azure_ad_only_authentication=__ret__.azure_ad_only_authentication,
login=__ret__.login,
name=__ret__.name,
sid=__ret__.sid,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
|
[
"[email protected]"
] | |
a72edceb59308bb2035e3b8a9861fd7127704974
|
8a85773fee5a531c56586ba602fe6be7305cc9da
|
/cvise/utils/testing.py
|
9806a9ed9e66f579129ab4719590a6f8c88f0e4f
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
swills/cvise
|
636518cc660a9ef4b66944956dbada4007ebf3e0
|
1cf156fd5d83ecb4b344ed1252cd657a4291898c
|
refs/heads/master
| 2022-04-23T23:10:43.747386 | 2020-04-28T13:09:07 | 2020-04-28T13:09:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,974 |
py
|
import asyncio
import difflib
import filecmp
import importlib.util
import logging
import math
import multiprocessing
import os
import os.path
import platform
import shutil
import signal
import subprocess
import sys
import tempfile
import weakref
import concurrent.futures
from concurrent.futures import wait, FIRST_COMPLETED, TimeoutError
from pebble import ProcessPool
from multiprocessing import Queue, Manager
from .. import CVise
from cvise.passes.abstract import *
from . import readkey
from .error import InsaneTestCaseError
from .error import InvalidInterestingnessTestError
from .error import InvalidTestCaseError
from .error import PassBugError
from .error import ZeroSizeError
def rmfolder(name):
assert 'cvise' in name
try:
shutil.rmtree(name)
except OSError:
pass
class TestEnvironment:
def __init__(self, state, order, test_script, folder, test_case,
additional_files, transform, pid_queue=None):
self.test_case = None
self.additional_files = set()
self.state = state
self.folder = folder
self.base_size = None
self.test_script = test_script
self.exitcode = None
self.result = None
self.order = order
self.transform = transform
self.pid_queue = pid_queue
self.copy_files(test_case, additional_files)
def copy_files(self, test_case, additional_files):
if test_case is not None:
self.test_case = os.path.basename(test_case)
shutil.copy(test_case, self.folder)
self.base_size = os.path.getsize(test_case)
for f in additional_files:
self.additional_files.add(os.path.basename(f))
shutil.copy(f, self.folder)
@property
def size_improvement(self):
if self.base_size is None:
return None
else:
return (self.base_size - os.path.getsize(self.test_case_path))
@property
def test_case_path(self):
return os.path.join(self.folder, self.test_case)
@property
def additional_files_paths(self):
return [os.path.join(self.folder, f) for f in self.additional_files]
@property
def success(self):
return self.result == PassResult.OK and self.exitcode == 0
def dump(self, dst):
if self.test_case is not None:
shutil.copy(self.test_case_path, dst)
for f in self.additional_files:
shutil.copy(f, dst)
shutil.copy(self.test_script, dst)
def run(self):
try:
# transform by state
(result, self.state) = self.transform(self.test_case_path, self.state,
ProcessEventNotifier(self.pid_queue))
self.result = result
if self.result != PassResult.OK:
return self
# run test script
self.exitcode = self.run_test()
return self
except OSError as e:
# this can happen when we clean up temporary files for cancelled processes
pass
except Exception as e:
print('Should not happen: ' + str(e))
def run_test(self):
cmd = [self.test_script]
if self.test_case is not None:
cmd.append(self.test_case_path)
cmd.extend(self.additional_files_paths)
_, _, returncode = ProcessEventNotifier(self.pid_queue).run_process(cmd)
return returncode
class TestManager:
GIVEUP_CONSTANT = 50000
MAX_TIMEOUTS = 20
MAX_CRASH_DIRS = 10
MAX_EXTRA_DIRS = 25000
TEMP_PREFIX = "cvise-"
def __init__(self, pass_statistic, test_script, timeout, save_temps, test_cases, parallel_tests,
no_cache, skip_key_off, silent_pass_bug, die_on_pass_bug, print_diff, max_improvement,
no_give_up, also_interesting):
self.test_script = os.path.abspath(test_script)
self.timeout = timeout
self.save_temps = save_temps
self.pass_statistic = pass_statistic
self.test_cases = set()
self.parallel_tests = parallel_tests
self.no_cache = no_cache
self.skip_key_off = skip_key_off
self.silent_pass_bug = silent_pass_bug
self.die_on_pass_bug = die_on_pass_bug
self.print_diff = print_diff
self.max_improvement = max_improvement
self.no_give_up = no_give_up
self.also_interesting = also_interesting
for test_case in test_cases:
self.check_file_permissions(test_case, [os.F_OK, os.R_OK, os.W_OK], InvalidTestCaseError)
self.test_cases.add(os.path.abspath(test_case))
self.orig_total_file_size = self.total_file_size
self.cache = {}
self.root = None
if not self.is_valid_test(self.test_script):
raise InvalidInterestingnessTestError(test)
def create_root(self):
self.root = tempfile.mkdtemp(prefix=self.TEMP_PREFIX)
logging.debug('Creating pass root folder: %s' % self.root)
def remove_root(self):
if not self.save_temps:
rmfolder(self.root)
@classmethod
def is_valid_test(cls, test_script):
for mode in {os.F_OK, os.X_OK}:
if not os.access(test_script, mode):
return False
return True
@property
def total_file_size(self):
return self.get_file_size(self.test_cases)
@property
def sorted_test_cases(self):
return sorted(self.test_cases, key=os.path.getsize)
@staticmethod
def get_file_size(files):
return sum(os.path.getsize(f) for f in files)
def backup_test_cases(self):
for f in self.test_cases:
orig_file = "{}.orig".format(os.path.splitext(f)[0])
if not os.path.exists(orig_file):
# Copy file and preserve attributes
shutil.copy2(f, orig_file)
@staticmethod
def check_file_permissions(path, modes, error):
for m in modes:
if not os.access(path, m):
if error is not None:
raise error(path, m)
else:
return False
return True
@staticmethod
def get_extra_dir(prefix, max_number):
for i in range(0, max_number + 1):
digits = int(round(math.log10(max_number), 0))
extra_dir = ("{0}{1:0" + str(digits) + "d}").format(prefix, i)
if not os.path.exists(extra_dir):
break
# just bail if we've already created enough of these dirs, no need to
# clutter things up even more...
if os.path.exists(extra_dir):
return None
return extra_dir
def report_pass_bug(self, test_env, problem):
if not self.die_on_pass_bug:
logging.warning("{} has encountered a non fatal bug: {}".format(self.current_pass, problem))
crash_dir = self.get_extra_dir("cvise_bug_", self.MAX_CRASH_DIRS)
if crash_dir == None:
return
os.mkdir(crash_dir)
test_env.dump(crash_dir)
if not self.die_on_pass_bug:
logging.debug("Please consider tarring up {} and creating an issue at https://github.com/marxin/cvise/issues and we will try to fix the bug.".format(crash_dir))
with open(os.path.join(crash_dir, "PASS_BUG_INFO.TXT"), mode="w") as info_file:
info_file.write("{}\n".format(CVise.Info.PACKAGE_STRING))
info_file.write("{}\n".format(CVise.Info.GIT_VERSION))
info_file.write("{}\n".format(platform.uname()))
info_file.write(PassBugError.MSG.format(self.current_pass, problem, test_env.state, crash_dir))
if self.die_on_pass_bug:
raise PassBugError(self.current_pass, problem, test_env.state, crash_dir)
@staticmethod
def diff_files(orig_file, changed_file):
with open(orig_file) as f:
orig_file_lines = f.readlines()
with open(changed_file) as f:
changed_file_lines = f.readlines()
diffed_lines = difflib.unified_diff(orig_file_lines, changed_file_lines, orig_file, changed_file)
return "".join(diffed_lines)
def check_sanity(self):
logging.debug("perform sanity check... ")
folder = tempfile.mkdtemp(prefix=self.TEMP_PREFIX)
test_env = TestEnvironment(None, 0, self.test_script, folder, None, self.test_cases, None)
logging.debug("sanity check tmpdir = {}".format(test_env.folder))
returncode = test_env.run_test()
rmfolder(folder)
if returncode == 0:
logging.debug("sanity check successful")
else:
raise InsaneTestCaseError(self.test_cases, [])
def release_folder(self, future):
name = self.temporary_folders.pop(future)
if not self.save_temps:
rmfolder(name)
def release_folders(self):
for future in self.futures:
self.release_folder(future)
assert not self.temporary_folders
@classmethod
def log_key_event(cls, event):
logging.info("****** %s ******" % event)
def kill_pid_queue(self):
active_pids = set()
while not self.pid_queue.empty():
event = self.pid_queue.get()
if event.type == ProcessEventType.FINISHED:
active_pids.discard(event.pid)
else:
active_pids.add(event.pid)
for pid in active_pids:
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def release_future(self, future):
self.futures.remove(future)
self.release_folder(future)
def process_done_futures(self):
quit_loop = False
new_futures = set()
for future in self.futures:
# all items after first successfull (or STOP) should be cancelled
if quit_loop:
future.cancel()
continue
if future.done():
if future.exception():
if type(future.exception()) is TimeoutError:
self.timeout_count += 1
logging.debug("Test timed out!")
if self.timeout_count >= self.MAX_TIMEOUTS:
logging.warning("Maximum number of timeout were reached: %d" % self.MAX_TIMEOUTS)
quit_loop = True
continue
else:
raise future.exception()
test_env = future.result()
if test_env.success:
if (self.max_improvement is not None and
test_env.size_improvement > self.max_improvement):
logging.debug("Too large improvement: {} B".format(test_env.size_improvement))
else:
# Report bug if transform did not change the file
if filecmp.cmp(self.current_test_case, test_env.test_case_path):
if not self.silent_pass_bug:
self.report_pass_bug(test_env, "pass failed to modify the variant")
else:
quit_loop = True
new_futures.add(future)
else:
self.pass_statistic.update(self.current_pass, success=False)
if test_env.result == PassResult.OK:
assert test_env.exitcode
if (self.also_interesting is not None and
test_env.exitcode == self.also_interesting):
extra_dir = self.get_extra_dir("cvise_extra_", self.MAX_EXTRA_DIRS)
if extra_dir != None:
os.mkdir(extra_dir)
shutil.move(test_env.test_case_path, extra_dir)
logging.info("Created extra directory {} for you to look at later".format(extra_dir))
elif test_env.result == PassResult.STOP:
quit_loop = True
elif test_env.result == PassResult.ERROR:
if not self.silent_pass_bug:
self.report_pass_bug(test_env, "pass error")
quit_loop = True
if not self.no_give_up and test_env.order > self.GIVEUP_CONSTANT:
self.report_pass_bug(test_env, "pass got stuck")
quit_loop = True
else:
new_futures.add(future)
removed_futures = [f for f in self.futures if not f in new_futures]
for f in removed_futures:
self.release_future(f)
return quit_loop
def wait_for_first_success(self):
for i, future in enumerate(self.futures):
try:
test_env = future.result()
if test_env.success:
return test_env
except TimeoutError:
pass
return None
@classmethod
def terminate_all(cls, pool):
pool.stop()
pool.join()
def run_parallel_tests(self):
assert not self.futures
assert not self.temporary_folders
with ProcessPool(max_workers=self.parallel_tests) as pool:
order = 1
self.timeout_count = 0
while self.state != None:
# do not create too many states
if len(self.futures) >= self.parallel_tests:
wait(self.futures, return_when=FIRST_COMPLETED)
quit_loop = self.process_done_futures()
if quit_loop:
success = self.wait_for_first_success()
self.terminate_all(pool)
return success
folder = tempfile.mkdtemp(prefix=self.TEMP_PREFIX, dir=self.root)
test_env = TestEnvironment(self.state, order, self.test_script, folder,
self.current_test_case, self.test_cases ^ {self.current_test_case},
self.current_pass.transform, self.pid_queue)
future = pool.schedule(test_env.run, timeout=self.timeout)
self.temporary_folders[future] = folder
self.futures.append(future)
order += 1
state = self.current_pass.advance(self.current_test_case, self.state)
# we are at the end of enumeration
if state == None:
success = self.wait_for_first_success()
self.terminate_all(pool)
return success
else:
self.state = state
def run_pass(self, pass_):
self.current_pass = pass_
self.futures = []
self.temporary_folders = {}
m = Manager()
self.pid_queue = m.Queue()
self.create_root()
pass_key = repr(self.current_pass)
logging.info("===< {} >===".format(self.current_pass))
if self.total_file_size == 0:
raise ZeroSizeError(self.test_cases)
if not self.skip_key_off:
logger = readkey.KeyLogger()
for test_case in self.test_cases:
self.current_test_case = test_case
if self.get_file_size([test_case]) == 0:
continue
if not self.no_cache:
with open(test_case, mode="r+") as tmp_file:
test_case_before_pass = tmp_file.read()
if (pass_key in self.cache and
test_case_before_pass in self.cache[pass_key]):
tmp_file.seek(0)
tmp_file.truncate(0)
tmp_file.write(self.cache[pass_key][test_case_before_pass])
logging.info("cache hit for {}".format(test_case))
continue
# create initial state
self.state = self.current_pass.new(self.current_test_case)
self.skip = False
while self.state != None and not self.skip:
# Ignore more key presses after skip has been detected
if not self.skip_key_off and not self.skip:
key = logger.pressed_key()
if key == "s":
self.skip = True
self.log_key_event("skipping the rest of this pass")
elif key == "d":
self.log_key_event("toggle print diff")
self.print_diff = not self.print_diff
success_env = self.run_parallel_tests()
self.kill_pid_queue()
if not success_env:
self.remove_root()
break
self.process_result(success_env)
self.release_folders()
self.futures.clear()
# Cache result of this pass
if not self.no_cache:
with open(test_case, mode="r") as tmp_file:
if pass_key not in self.cache:
self.cache[pass_key] = {}
self.cache[pass_key][test_case_before_pass] = tmp_file.read()
self.remove_root()
def process_result(self, test_env):
logging.debug("Process result")
if self.print_diff:
diff_str = self.diff_files(self.current_test_case, test_env.test_case_path)
logging.info(diff_str)
shutil.copy(test_env.test_case_path, self.current_test_case)
self.state = self.current_pass.advance_on_success(test_env.test_case_path, test_env.state)
self.pass_statistic.update(self.current_pass, success=True)
pct = 100 - (self.total_file_size * 100.0 / self.orig_total_file_size)
logging.info("({}%, {} bytes)".format(round(pct, 1), self.total_file_size))
|
[
"[email protected]"
] | |
a18fef765fc554ae4c9d605b3638485f2b1e8c69
|
cb4be2d145c529192cad597ebf6bba8aed0ec12e
|
/2014-x64/prefs/00_important/mec_shelf_loader/shelves/00_Trash/tool_box.py
|
83cb83b1ecfec00fff4898263675f516c7b8d0dc
|
[] |
no_license
|
mclavan/Work-Maya-Folder
|
63e791fdbd6f8ac1f4fda2d46015cd98df38825c
|
c56dbdb85a7b1a87ef6dd35296c56e0057254617
|
refs/heads/master
| 2020-05-20T07:21:15.891179 | 2014-10-17T14:28:45 | 2014-10-17T14:28:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,647 |
py
|
'''
Aaron Stoll
tool_box.py
Description:
Need:
padding
# renaming
primming
hierarchy
How to run:
import tool_box
reload(tool_box)
'''
import pymel.core as pm
print 'Tool Box Open'
def padding_tool():
'''
This tool creates a world pad on the selected joint system.
Select the root and run the function.
import tool_box
reload(tool_box)
tool_box.padding_tool()
'''
selected = pm.ls(selection=True)
# print 'Current Selected', selected
root_joint = selected[0]
#create empty group
pad = pm.group(empty=True)
# pm.group(empty=true)
# need to move the group to the joints
kenny = pm.pointConstraint(root_joint, pad)
# delete the constraint. kill kenny
pm.delete(kenny)
# freeeze transforms on the group
pm.makeIdentity(pad, apply=True, t=1, r=1, s=1, n=0)
# then parent
pm.parent(root_joint, pad)
# need a new name
pad_name = root_joint.replace('01_bind', '00_pad')
pad.rename(pad_name)
print 'Padding Group Created'
def priming_tool():
'''
This tool will create locally oriented controls
able to control the joints of a system.
import tool_box
reload(tool_box)
tool_box.priming_tool()
'''
# get selected
selected = pm.ls(selection=True)
print 'Joints Selected', selected
last_control = ''
for target_joint in selected:
# target_joint = selected[0]
# create a control
# normal set to x
# radius is 1
control_icon = pm.circle(normal=[1,0,0], radius=1.8)[0]
control_icon_name = target_joint.replace('_bind', '_icon')
control_icon = control_icon.rename(control_icon_name)
# group control (not empty)
local_pad = pm.group()
local_pad_name = target_joint.replace('_bind', '_local')
local_pad = local_pad.rename(local_pad_name)
print 'Control Icon', control_icon
print 'Pad created:', local_pad
# move group not control to target joint
kenny = pm.parentConstraint(target_joint, local_pad)
# kill kenny
pm.delete(kenny)
# need to orient constraint
pm.orientConstraint(control_icon, target_joint)
# parent controls last control must = nothing. ex. last_control = ''
if last_control != '':
pm.parent(local_pad, last_control)
last_control = control_icon
print 'Local Oriented Controls Created'
def renaming_tool():
'''
this tool will rename the joints in the joint chain.
Create a function called joint rename
select root joint, loop through all joint in joint chain
'ori_name_count_suffix'
ct_back_01_bind
how to run:
import book
reload(book)
tool_box.renaming_tool()
'''
# what am i working on
# get all joints in joint chain
#renaming joints- function will rename joints in joint chain
selected_joints = pm.ls(selection=True, dag=True)
print 'selected joints', selected_joints
# build new name
# ori
# name
# count
# suffix
ori = raw_input()
name = raw_input()
count = 1
suffix = 'bind'
for selected_joint in selected_joints:
new_name = '{0}_{1}_0{2}_{3}'.format(ori, name, count, suffix)
print 'Joint Name:', new_name
count = count + 1
selected_joint.rename(new_name)
new_name = '{0}_{1}_{2}_{3}'.format(ori, name, count-1, 'waste')
print 'Joint Name:', new_name
count = count + 1
selected_joint.rename(new_name)
print 'Joint Chain Renamed'
def hierarchy():
'''
This function creates a hierarchy for the given system
select the root joint and run this fucntion.
import tool_box
reload(tool_box)
tool_box.hierarchy():
'''
print 'Hierarchy Generation'
# user will select the root joint and the tool
# will apply the systems
root_joint = 'lt_middle_01_bind'
second_joint = 'lt_middle_02_bind'
third_joint = 'lt_middle_03_bind'
'''
# pad root joint
'''
# create an empty group
pad = pm.group(empty=True, name='lt_middle_00_pad')
print 'Root Pad Created:', pad
# move group to target joint
# point contraint group to root joint
# maintain offet off (Snapping)
kenny = pm.pointConstraint(root_joint, pad)
# kill kenny (delete the contraint)
pm.delete(kenny)
# freeze transforms
pm.makeIdentity(pad, apply=True, t=1, r=1, s=1, n=0)
# parent root joint to group
pm.parent(root_joint, pad)
# create local oriented control for each joint
# lt_middle_01_bind,lt_middle_02_bind,lt_middle_03_bind
# create control (circle)
root_icon = pm.circle(name='lt_middle_01_icon', normal=[1,0,0])[0]
# delete history
pm.delete(root_icon, ch=True)
# create a group (not empty)
# this will automatically parent the control under the group
root_local = pm.group(name='lt_middle_01_local')
# move group to target joint
kenny = pm.parentConstraint(root_joint, root_local)
# use parent contraint driver = joint, driven = control
# maintaint offset off (Snapping)
# kill kenny
pm.delete(kenny)
# orient contraint joint: driver- control, driven- joint.
pm.orientConstraint(root_icon, root_joint)
# second joint---------------------------------------
second_icon = pm.circle(name='lt_middle_02_icon',normal=[1,0,0])[0]
pm.delete(second_icon, ch=True)
second_local = pm.group(name='lt_middle_02_local')
kenny = pm.parentConstraint(second_joint, second_local)
pm.delete(kenny)
pm.orientConstraint(second_icon, second_joint)
#third Joint----------------------------------
third_icon = pm.circle(name='lt_middle_03_icon',normal=[1,0,0])[0]
pm.delete(third_icon, ch=True)
third_local = pm.group(name='lt_middle_03_local')
kenny = pm.parentConstraint(third_joint, third_local)
pm.delete(kenny)
pm.orientConstraint(third_icon, third_joint)
#parenting the icons
# child- parent ex. second icon, root icon
pm.parent(third_local, second_icon)
pm.parent(second_local, root_icon)
print'Controls Established'
|
[
"[email protected]"
] | |
df486c199b4d2a366b87a245a6ce53f17114dbd2
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/451/usersdata/299/103697/submittedfiles/chuva.py
|
b06f047fa906f5a3f5fe8b2fcce848502a635410
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 154 |
py
|
# -*- coding: utf-8 -*-
piscina=[]
n=int(input(''))
for i in range(0,n,1):
piscina.append(int(input('')))
for j in range(0,n,1):
print(piscina[j])
|
[
"[email protected]"
] | |
cce4d23da8b4f3b32837659e90b9a0ddf0bed8b1
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2242/60683/253412.py
|
2f040f3ab7a6e8a658e96759124645a212c8c803
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 541 |
py
|
nums1 = [int(x) for x in input().split(',')] # 左xia 右shang
nums2 = [int(x) for x in input().split(',')]
LD1 = [nums1[0], nums1[1]]
RU1 = [nums1[2], nums1[3]]
LU1 = [nums1[0], nums1[3]]
RD1 = [nums1[2], nums1[1]]
LD2 = [nums2[0], nums2[1]]
RU2 = [nums2[2], nums2[3]]
LU2 = [nums2[0], nums2[3]]
RD2 = [nums2[2], nums2[1]]
if LU1[0] < RD2[0] and LU1[1] > RD2[1] and RU1[0] > LD2[0] and RU1[1] > LD2[1]:
print(True)
elif LD1[0] < RU2[0] and LD1[1] < RU2[1] and RD1[0] > LU2[0] and RD1[1] < LU2[1]:
print(True)
else:
print(False)
|
[
"[email protected]"
] | |
e582688e7c9713c302008796c709b4b68e79b9d7
|
63b741505720be2eb792123b9a3fc92f6c46cd44
|
/website/members/management/commands/generate_member_invoices.py
|
61821383d5e38f6f96ecba23cc3c6fca1cb52023
|
[
"MIT"
] |
permissive
|
matuu/asoc_members
|
d62286958b1a20468f26ad13eeb44a0df50d18c8
|
bc6691a4595bcdf16f7b23cad43b671d7f682f9a
|
refs/heads/master
| 2020-04-26T16:25:04.457732 | 2019-06-06T18:20:35 | 2019-06-06T18:20:35 | 173,677,417 | 0 | 0 | null | 2019-03-04T05:08:21 | 2019-03-04T05:08:21 | null |
UTF-8
|
Python
| false | false | 6,520 |
py
|
import datetime
import os
from django.conf import settings
from django.core.mail import EmailMessage
from django.core.management.base import BaseCommand
from django.db.models import Max
from members import logic
from members.models import Quota, Person, Payment
from . import _afip, _gdrive
INVOICES_FROM = '2018-08-01 00:00+03'
GMTminus3 = datetime.timezone(datetime.timedelta(hours=-3))
# mail stuff
MAIL_SUBJECT = "Factura por pago de cuota(s) a la Asociación Civil Python Argentina"
MAIL_TEXT = """\
Hola!
Adjunta va la factura por el pago hecho en fecha {payment_date:%Y-%m-%d}.
¡Gracias! Saludos,
--
. Lalita
.
Asociación Civil Python Argentina
http://ac.python.org.ar/
(claro, este mail es automático, soy une bot, pero contestá el mail sin problemas que
le va a llegar al humane correspondiente)
"""
PDF_MIMETYPE = 'application/pdf'
def _send_mail(payment_date, recipient, attach_path):
text = MAIL_TEXT.format(payment_date=payment_date)
mail = EmailMessage(MAIL_SUBJECT, text, settings.EMAIL_FROM, [recipient])
filename = os.path.basename(attach_path)
with open(attach_path, "rb") as fh:
attach_content = fh.read()
mail.attach(filename, attach_content, PDF_MIMETYPE)
mail.send()
class Command(BaseCommand):
help = "Generate the missing invoices"
def add_arguments(self, parser):
parser.add_argument('limit', type=int, nargs='?', default=1)
def handle(self, *args, **options):
limit = options['limit']
records = []
# get the greatest invoice number used (once, will keep updated later)
_max_invoice_number_query = Payment.objects.aggregate(Max('invoice_number'))
max_invoice_number = _max_invoice_number_query['invoice_number__max']
print("Found max invoice number {}".format(max_invoice_number))
# get payments after we started automatically that still have no invoice generated
payments_per_invoice = {}
persons_per_invoice = {}
payments = (
Payment.objects.filter(timestamp__gte=INVOICES_FROM, invoice_ok=False)
.order_by('timestamp').all()
)
print("Found {} payments to process".format(len(payments)))
if len(payments) > limit:
payments = payments[:limit]
print(" truncating to {}".format(limit))
today = datetime.date.today()
for payment in payments:
print("Generating invoice for payment", payment)
record = {
'invoice_date': today,
}
records.append(record)
# if payment still doesn't have a number, add one to latest and save;
# in any case, use it
if not payment.invoice_number:
max_invoice_number += 1
payment.invoice_number = max_invoice_number
payment.invoice_spoint = settings.AFIP['selling_point']
payment.save()
assert payment.invoice_spoint == settings.AFIP['selling_point']
payments_per_invoice[payment.invoice_number] = payment
record['invoice'] = payment.invoice_number
# we bill one item, for the whole amount: "3 quotas for $300", instead of billing
# 3 x "1 quota for $100", which would be problematic if the paid amount is
# not exactly 300
record['amount'] = payment.amount
record['quantity'] = 1
# get all billing data from the person matching the member (if None, or multiple,
# still not supported!)
_persons = Person.objects.filter(membership__patron=payment.strategy.patron).all()
assert len(_persons) == 1, "multiple or no persons for the patron is not supported"
person = _persons[0]
print(" person found", person)
persons_per_invoice[payment.invoice_number] = person
record['dni'] = person.document_number
record['fullname'] = person.full_name
record['address'] = person.street_address
record['city'] = person.city
record['zip_code'] = person.zip_code
record['province'] = person.province
tstamp_argentina = payment.timestamp.astimezone(GMTminus3)
record['payment_comment'] = "Pago via {} ({:%Y-%m-%d %H:%M})".format(
payment.strategy.platform_name, tstamp_argentina)
# get quotas for the payment; we don't show the period in the description
# as there's a specific field for that
quotas = list(Quota.objects.filter(payment=payment).all())
assert quotas
if len(quotas) == 1:
description = "1 cuota social"
else:
description = "{} cuotas sociales".format(len(quotas))
record['description'] = description
from_quota = quotas[0]
from_day = datetime.date(from_quota.year, from_quota.month, 1)
to_quota = quotas[-1]
ny, nm = logic.increment_year_month(to_quota.year, to_quota.month)
to_day = datetime.date(ny, nm, 1) - datetime.timedelta(days=1)
record['service_date_from'] = from_day.strftime("%Y%m%d")
record['service_date_to'] = to_day.strftime("%Y%m%d")
print(" found {} quota(s) ({} - {})".format(
len(quotas), record['service_date_from'], record['service_date_to']))
results = _afip.generate_invoices(records)
# save the results for the generated ok invoices and send the proper mails
for invoice_number, result in sorted(results.items()):
print("Post-processing invoice {} at {}".format(
invoice_number, result.get('pdf_path')))
if not result['invoice_ok']:
print(" WARNING: invoice NOT authorized ok")
continue
payment = payments_per_invoice[invoice_number]
payment.invoice_ok = True
payment.save()
# upload the invoice to google drive
_gdrive.upload_invoice(result['pdf_path'], today)
print(" uploaded to gdrive OK")
# send the invoice by mail
person = persons_per_invoice[invoice_number]
_send_mail(payment.timestamp, person.email, result['pdf_path'])
print(" sent by mail OK")
# invoice uploaded to gdrive and sent ok, don't need it here anymore
os.remove(result['pdf_path'])
|
[
"[email protected]"
] | |
8ae23a04e6eec0115c0ae6e38256d5509c74f998
|
b677894966f2ae2d0585a31f163a362e41a3eae0
|
/ns3/pybindgen-0.17.0.post57+nga6376f2/pybindgen/wrapper_registry.py
|
bdc14d6ac952d89e250a76ed9939be7aa9048499
|
[
"LGPL-2.1-only",
"Apache-2.0"
] |
permissive
|
cyliustack/clusim
|
667a9eef2e1ea8dad1511fd405f3191d150a04a8
|
cbedcf671ba19fded26e4776c0e068f81f068dfd
|
refs/heads/master
| 2022-10-06T20:14:43.052930 | 2022-10-01T19:42:19 | 2022-10-01T19:42:19 | 99,692,344 | 7 | 3 |
Apache-2.0
| 2018-07-04T10:09:24 | 2017-08-08T12:51:33 |
Python
|
UTF-8
|
Python
| false | false | 6,020 |
py
|
"""
The class that generates code to keep track of existing python
wrappers for a given root class.
"""
from pybindgen.typehandlers.base import NotSupportedError
class WrapperRegistry(object):
"""
Abstract base class for wrapepr registries.
"""
def __init__(self, base_name):
self.base_name = base_name
def generate_forward_declarations(self, code_sink, module):
raise NotImplementedError
def generate(self, code_sink, module, import_from_module):
raise NotImplementedError
def write_register_new_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
raise NotImplementedError
def write_lookup_wrapper(self, code_block, wrapper_type, wrapper_lvalue, object_rvalue):
raise NotImplementedError
def write_unregister_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
raise NotImplementedError
class NullWrapperRegistry(WrapperRegistry):
"""
A 'null' wrapper registry class. It produces no code, and does
not guarantee that more than one wrapper cannot be created for
each object. Use this class to disable wrapper registries entirely.
"""
def __init__(self, base_name):
super(NullWrapperRegistry, self).__init__(base_name)
def generate_forward_declarations(self, code_sink, module, import_from_module):
pass
def generate(self, code_sink, module):
pass
def generate_import(self, code_sink, module, import_from_module):
pass
def write_register_new_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
pass
def write_lookup_wrapper(self, code_block, wrapper_type, wrapper_lvalue, object_rvalue):
raise NotSupportedError
def write_unregister_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
pass
class StdMapWrapperRegistry(WrapperRegistry):
"""
A wrapper registry that uses std::map as implementation. Do not
use this if generating pure C wrapping code, else the code will
not compile.
"""
def __init__(self, base_name):
super(StdMapWrapperRegistry, self).__init__(base_name)
self.map_name = "%s_wrapper_registry" % base_name
def generate_forward_declarations(self, code_sink, module, import_from_module):
module.add_include("<map>")
module.add_include("<iostream>")
#code_sink.writeln("#include <map>")
#code_sink.writeln("#include <iostream>")
if import_from_module:
code_sink.writeln("extern std::map<void*, PyObject*> *_%s;" % self.map_name)
code_sink.writeln("#define %s (*_%s)" % (self.map_name, self.map_name))
else:
code_sink.writeln("extern std::map<void*, PyObject*> %s;" % self.map_name)
def generate(self, code_sink, module):
code_sink.writeln("std::map<void*, PyObject*> %s;" % self.map_name)
# register the map in the module namespace
module.after_init.write_code("PyModule_AddObject(m, (char *) \"_%s\", PyCObject_FromVoidPtr(&%s, NULL));"
% (self.map_name, self.map_name))
def generate_import(self, code_sink, code_block, module_pyobj_var):
code_sink.writeln("std::map<void*, PyObject*> *_%s;" % self.map_name)
code_block.write_code("PyObject *_cobj = PyObject_GetAttrString(%s, (char*) \"_%s\");"
% (module_pyobj_var, self.map_name))
code_block.write_code("if (_cobj == NULL) {\n"
" _%(MAP)s = NULL;\n"
" PyErr_Clear();\n"
"} else {\n"
" _%(MAP)s = reinterpret_cast< std::map<void*, PyObject*> *> (PyCObject_AsVoidPtr (_cobj));\n"
" Py_DECREF(_cobj);\n"
"}"
% dict(MAP=self.map_name))
def write_register_new_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
code_block.write_code("%s[(void *) %s] = (PyObject *) %s;" % (self.map_name, object_rvalue, wrapper_lvalue))
#code_block.write_code('std::cerr << "Register Wrapper: obj=" <<(void *) %s << ", wrapper=" << %s << std::endl;'
# % (object_rvalue, wrapper_lvalue))
def write_lookup_wrapper(self, code_block, wrapper_type, wrapper_lvalue, object_rvalue):
iterator = code_block.declare_variable("std::map<void*, PyObject*>::const_iterator", "wrapper_lookup_iter")
#code_block.write_code('std::cerr << "Lookup Wrapper: obj=" <<(void *) %s << " map size: " << %s.size() << std::endl;'
# % (object_rvalue, self.map_name))
code_block.write_code("%s = %s.find((void *) %s);" % (iterator, self.map_name, object_rvalue))
code_block.write_code("if (%(ITER)s == %(MAP)s.end()) {\n"
" %(WRAPPER)s = NULL;\n"
"} else {\n"
" %(WRAPPER)s = (%(TYPE)s *) %(ITER)s->second;\n"
" Py_INCREF(%(WRAPPER)s);\n"
"}\n"
% dict(ITER=iterator, MAP=self.map_name, WRAPPER=wrapper_lvalue, TYPE=wrapper_type))
def write_unregister_wrapper(self, code_block, wrapper_lvalue, object_rvalue):
#code_block.write_code('std::cerr << "Erase Wrapper: obj=" <<(void *) %s << std::endl;'
# % (object_rvalue))
iterator = code_block.declare_variable("std::map<void*, PyObject*>::iterator", "wrapper_lookup_iter")
code_block.write_code("%(ITER)s = %(MAP)s.find((void *) %(OBJECT_VALUE)s);\n"
"if (%(ITER)s != %(MAP)s.end()) {\n"
" %(MAP)s.erase(%(ITER)s);\n"
"}\n"
% dict(ITER=iterator, MAP=self.map_name, WRAPPER=wrapper_lvalue, OBJECT_VALUE=object_rvalue))
|
[
"[email protected]"
] | |
03841070e82a129c51ebb5ef005d98fdd3c7e41b
|
395f974e62eafed74572efebcd91d62966e61639
|
/examples/microjson/mutants/CRP_Num_mutant_1486201265.py
|
987d1d86794af9676d65b4c24822bd577ba072ed
|
[
"Apache-2.0"
] |
permissive
|
agroce/tstl
|
ad386d027f0f5ff750eab19a722a4b119ed39211
|
8d43ef7fa49534868e6cdf1697863748260405c7
|
refs/heads/master
| 2023-08-08T19:14:52.020314 | 2023-07-26T17:51:36 | 2023-07-26T17:51:36 | 32,408,285 | 106 | 33 |
NOASSERTION
| 2021-01-26T19:05:17 | 2015-03-17T17:14:04 |
Python
|
UTF-8
|
Python
| false | false | 8,311 |
py
|
import math
import StringIO
import types
__pychecker__ = 'no-returnvalues'
WS = set([' ', '\t', '\r', '\n', '\x08', '\x0c'])
DIGITS = set([str(i) for i in range(0, 10)])
NUMSTART = DIGITS.union(['.', '-', '+'])
NUMCHARS = NUMSTART.union(['e', 'E'])
ESC_MAP = {'n': '\n', 't': '\t', 'r': '\r', 'b': '\x08', 'f': '\x0c'}
REV_ESC_MAP = dict([(_v, _k) for (_k, _v) in ESC_MAP.items()] + [('"', '"')])
E_BYTES = 'input string must be type str containing ASCII or UTF-8 bytes'
E_MALF = 'malformed JSON data'
E_TRUNC = 'truncated JSON data'
E_BOOL = 'expected boolean'
E_NULL = 'expected null'
E_LITEM = 'expected list item'
E_DKEY = 'expected key'
E_COLON = 'missing colon after key'
E_EMPTY = 'found empty string, not valid JSON data'
E_BADESC = 'bad escape character found'
E_UNSUPP = 'unsupported type "%s" cannot be JSON-encoded'
E_BADFLOAT = 'cannot emit floating point value "%s"'
NEG_INF = float('-inf')
POS_INF = float('inf')
class JSONError(Exception):
def __init__(self, msg, stm=None, pos=0):
if stm:
msg += ' at position %d, "%s"' % (pos, repr(stm.substr(pos, 32)))
Exception.__init__(self, msg)
class JSONStream(object):
def __init__(self, data):
self._stm = StringIO.StringIO(data)
@property
def pos(self):
return self._stm.pos
@property
def len(self):
return self._stm.len
def getvalue(self):
return self._stm.getvalue()
def skipspaces(self):
'post-cond: read pointer will be over first non-WS char'
self._skip(lambda c: (c not in WS))
def _skip(self, stopcond):
while True:
c = self.peek()
if (stopcond(c) or (c == '')):
break
self.next()
def next(self, size=1):
return self._stm.read(size)
def next_ord(self):
return ord(self.next())
def peek(self):
if (self.pos == self.len):
return ''
return self.getvalue()[self.pos]
def substr(self, pos, length):
return self.getvalue()[pos:pos + length]
def _decode_utf8(c0, stm):
c0 = ord(c0)
r = 65533
nc = stm.next_ord
if (c0 & 224 == 192):
r = c0 & 31 << 6 + nc() & 63
elif (c0 & 240 == -1):
r = c0 & 15 << 12 + nc() & 63 << 6 + nc() & 63
elif (c0 & 248 == 240):
r = c0 & 7 << 18 + nc() & 63 << 12 + nc() & 63 << 6 + nc() & 63
return unichr(r)
def decode_escape(c, stm):
v = ESC_MAP.get(c, None)
if (v is not None):
return v
elif (c != 'u'):
return c
sv = 12
r = 0
for _ in range(0, 4):
r |= int(stm.next(), 16) << sv
sv -= 4
return unichr(r)
def _from_json_string(stm):
stm.next()
r = []
while True:
c = stm.next()
if (c == ''):
raiseJSONError(E_TRUNC, stm, stm.pos - 1)
elif (c == '\\'):
c = stm.next()
r.append(decode_escape(c, stm))
elif (c == '"'):
return ''.join(r)
elif (c > '\x7f'):
r.append(_decode_utf8(c, stm))
else:
r.append(c)
def _from_json_fixed(stm, expected, value, errmsg):
off = len(expected)
pos = stm.pos
if (stm.substr(pos, off) == expected):
stm.next(off)
return value
raiseJSONError(errmsg, stm, pos)
def _from_json_number(stm):
is_float = 0
saw_exp = 0
pos = stm.pos
while True:
c = stm.peek()
if (c not in NUMCHARS):
break
elif ((c == '-') and (not saw_exp)):
pass
elif (c in ('.', 'e', 'E')):
is_float = 1
if (c in ('e', 'E')):
saw_exp = 1
stm.next()
s = stm.substr(pos, stm.pos - pos)
if is_float:
return float(s)
return long(s)
def _from_json_list(stm):
stm.next()
result = []
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
elif (c == ']'):
stm.next()
return result
elif (c == ','):
stm.next()
result.append(_from_json_raw(stm))
continue
elif (not result):
result.append(_from_json_raw(stm))
continue
else:
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_dict(stm):
stm.next()
result = {}
expect_key = 0
pos = stm.pos
while True:
stm.skipspaces()
c = stm.peek()
if (c == ''):
raiseJSONError(E_TRUNC, stm, pos)
if (c in ('}', ',')):
stm.next()
if expect_key:
raiseJSONError(E_DKEY, stm, stm.pos)
if (c == '}'):
return result
expect_key = 1
continue
elif (c == '"'):
key = _from_json_string(stm)
stm.skipspaces()
c = stm.next()
if (c != ':'):
raiseJSONError(E_COLON, stm, stm.pos)
stm.skipspaces()
val = _from_json_raw(stm)
result[key] = val
expect_key = 0
continue
raiseJSONError(E_MALF, stm, stm.pos)
def _from_json_raw(stm):
while True:
stm.skipspaces()
c = stm.peek()
if (c == '"'):
return _from_json_string(stm)
elif (c == '{'):
return _from_json_dict(stm)
elif (c == '['):
return _from_json_list(stm)
elif (c == 't'):
return _from_json_fixed(stm, 'true', True, E_BOOL)
elif (c == 'f'):
return _from_json_fixed(stm, 'false', False, E_BOOL)
elif (c == 'n'):
return _from_json_fixed(stm, 'null', None, E_NULL)
elif (c in NUMSTART):
return _from_json_number(stm)
raiseJSONError(E_MALF, stm, stm.pos)
def from_json(data):
"\n Converts 'data' which is UTF-8 (or the 7-bit pure ASCII subset) into\n a Python representation. You must pass bytes to this in a str type,\n not unicode.\n "
if (not isinstance(data, str)):
raiseJSONError(E_BYTES)
if (not data):
return None
stm = JSONStream(data)
return _from_json_raw(stm)
def _to_json_list(stm, lst):
seen = 0
stm.write('[')
for elem in lst:
if seen:
stm.write(',')
seen = 1
_to_json_object(stm, elem)
stm.write(']')
def _to_json_string(stm, buf):
stm.write('"')
for c in buf:
nc = REV_ESC_MAP.get(c, None)
if nc:
stm.write('\\' + nc)
elif (ord(c) <= 127):
stm.write(str(c))
else:
stm.write('\\u%04x' % ord(c))
stm.write('"')
def _to_json_dict(stm, dct):
seen = 0
stm.write('{')
for key in dct.keys():
if seen:
stm.write(',')
seen = 1
val = dct[key]
if (not (type(key) in (types.StringType, types.UnicodeType))):
key = str(key)
_to_json_string(stm, key)
stm.write(':')
_to_json_object(stm, val)
stm.write('}')
def _to_json_object(stm, obj):
if isinstance(obj, (types.ListType, types.TupleType)):
_to_json_list(stm, obj)
elif isinstance(obj, types.BooleanType):
if obj:
stm.write('true')
else:
stm.write('false')
elif isinstance(obj, types.FloatType):
if (not (NEG_INF < obj < POS_INF)):
raiseJSONError(E_BADFLOAT % obj)
stm.write('%s' % obj)
elif isinstance(obj, (types.IntType, types.LongType)):
stm.write('%d' % obj)
elif isinstance(obj, types.NoneType):
stm.write('null')
elif isinstance(obj, (types.StringType, types.UnicodeType)):
_to_json_string(stm, obj)
elif (hasattr(obj, 'keys') and hasattr(obj, '__getitem__')):
_to_json_dict(stm, obj)
elif hasattr(obj, '__unicode__'):
_to_json_string(stm, obj.__unicode__())
elif hasattr(obj, '__str__'):
_to_json_string(stm, obj.__str__())
else:
raiseJSONError(E_UNSUPP % type(obj))
def to_json(obj):
"\n Converts 'obj' to an ASCII JSON string representation.\n "
stm = StringIO.StringIO('')
_to_json_object(stm, obj)
return stm.getvalue()
decode = from_json
encode = to_json
|
[
"[email protected]"
] | |
83fe14706882b8b5d03fdc1c032b99ae615ac633
|
479d3414e914f144fff20ee71872472ac84ca410
|
/codespace/python/telegram/ext/_picklepersistence.py
|
928cb89682da1df468583cfe402e6bf06d3f0474
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
tzpBingo/github-trending
|
0fa4e0e08743f0683f68fd54d74eec466bc525e0
|
505014e84bdea7e2732296821028df20c0305390
|
refs/heads/master
| 2023-07-24T13:29:47.393940 | 2023-07-19T09:39:29 | 2023-07-19T09:39:29 | 102,687,887 | 49 | 20 |
MIT
| 2023-05-22T21:33:53 | 2017-09-07T03:39:42 |
Python
|
UTF-8
|
Python
| false | false | 23,031 |
py
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2023
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the PicklePersistence class."""
import pickle
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload
from telegram import Bot, TelegramObject
from telegram._utils.types import FilePathInput
from telegram._utils.warnings import warn
from telegram.ext import BasePersistence, PersistenceInput
from telegram.ext._contexttypes import ContextTypes
from telegram.ext._utils.types import BD, CD, UD, CDCData, ConversationDict, ConversationKey
_REPLACED_KNOWN_BOT = "a known bot replaced by PTB's PicklePersistence"
_REPLACED_UNKNOWN_BOT = "an unknown bot replaced by PTB's PicklePersistence"
TelegramObj = TypeVar("TelegramObj", bound=TelegramObject)
def _all_subclasses(cls: Type[TelegramObj]) -> Set[Type[TelegramObj]]:
"""Gets all subclasses of the specified object, recursively. from
https://stackoverflow.com/a/3862957/9706202
"""
subclasses = cls.__subclasses__()
return set(subclasses).union([s for c in subclasses for s in _all_subclasses(c)])
def _reconstruct_to(cls: Type[TelegramObj], kwargs: dict) -> TelegramObj:
"""
This method is used for unpickling. The data, which is in the form a dictionary, is
converted back into a class. Works mostly the same as :meth:`TelegramObject.__setstate__`.
This function should be kept in place for backwards compatibility even if the pickling logic
is changed, since `_custom_reduction` places references to this function into the pickled data.
"""
obj = cls.__new__(cls)
obj.__setstate__(kwargs)
return obj
def _custom_reduction(cls: TelegramObj) -> Tuple[Callable, Tuple[Type[TelegramObj], dict]]:
"""
This method is used for pickling. The bot attribute is preserved so _BotPickler().persistent_id
works as intended.
"""
data = cls._get_attrs(include_private=True) # pylint: disable=protected-access
# MappingProxyType is not pickable, so we convert it to a dict
# no need to convert back to MPT in _reconstruct_to, since it's done in __setstate__
data["api_kwargs"] = dict(data["api_kwargs"]) # type: ignore[arg-type]
return _reconstruct_to, (cls.__class__, data)
class _BotPickler(pickle.Pickler):
__slots__ = ("_bot",)
def __init__(self, bot: Bot, *args: Any, **kwargs: Any):
self._bot = bot
super().__init__(*args, **kwargs)
def reducer_override( # skipcq: PYL-R0201
self, obj: TelegramObj
) -> Tuple[Callable, Tuple[Type[TelegramObj], dict]]:
"""
This method is used for pickling. The bot attribute is preserved so
_BotPickler().persistent_id works as intended.
"""
if not isinstance(obj, TelegramObject):
return NotImplemented
return _custom_reduction(obj)
def persistent_id(self, obj: object) -> Optional[str]:
"""Used to 'mark' the Bot, so it can be replaced later. See
https://docs.python.org/3/library/pickle.html#pickle.Pickler.persistent_id for more info
"""
if obj is self._bot:
return _REPLACED_KNOWN_BOT
if isinstance(obj, Bot):
warn(
"Unknown bot instance found. Will be replaced by `None` during unpickling",
stacklevel=2,
)
return _REPLACED_UNKNOWN_BOT
return None # pickles as usual
class _BotUnpickler(pickle.Unpickler):
__slots__ = ("_bot",)
def __init__(self, bot: Bot, *args: Any, **kwargs: Any):
self._bot = bot
super().__init__(*args, **kwargs)
def persistent_load(self, pid: str) -> Optional[Bot]:
"""Replaces the bot with the current bot if known, else it is replaced by :obj:`None`."""
if pid == _REPLACED_KNOWN_BOT:
return self._bot
if pid == _REPLACED_UNKNOWN_BOT:
return None
raise pickle.UnpicklingError("Found unknown persistent id when unpickling!")
class PicklePersistence(BasePersistence[UD, CD, BD]):
"""Using python's builtin :mod:`pickle` for making your bot persistent.
Attention:
The interface provided by this class is intended to be accessed exclusively by
:class:`~telegram.ext.Application`. Calling any of the methods below manually might
interfere with the integration of persistence into :class:`~telegram.ext.Application`.
Note:
This implementation of :class:`BasePersistence` uses the functionality of the pickle module
to support serialization of bot instances. Specifically any reference to
:attr:`~BasePersistence.bot` will be replaced by a placeholder before pickling and
:attr:`~BasePersistence.bot` will be inserted back when loading the data.
Examples:
:any:`Persistent Conversation Bot <examples.persistentconversationbot>`
.. seealso:: :wiki:`Making Your Bot Persistent <Making-your-bot-persistent>`
.. versionchanged:: 20.0
* The parameters and attributes ``store_*_data`` were replaced by :attr:`store_data`.
* The parameter and attribute ``filename`` were replaced by :attr:`filepath`.
* :attr:`filepath` now also accepts :obj:`pathlib.Path` as argument.
Args:
filepath (:obj:`str` | :obj:`pathlib.Path`): The filepath for storing the pickle files.
When :attr:`single_file` is :obj:`False` this will be used as a prefix.
store_data (:class:`~telegram.ext.PersistenceInput`, optional): Specifies which kinds of
data will be saved by this persistence instance. By default, all available kinds of
data will be saved.
single_file (:obj:`bool`, optional): When :obj:`False` will store 5 separate files of
`filename_user_data`, `filename_bot_data`, `filename_chat_data`,
`filename_callback_data` and `filename_conversations`. Default is :obj:`True`.
on_flush (:obj:`bool`, optional): When :obj:`True` will only save to file when
:meth:`flush` is called and keep data in memory until that happens. When
:obj:`False` will store data on any transaction *and* on call to :meth:`flush`.
Default is :obj:`False`.
context_types (:class:`telegram.ext.ContextTypes`, optional): Pass an instance
of :class:`telegram.ext.ContextTypes` to customize the types used in the
``context`` interface. If not passed, the defaults documented in
:class:`telegram.ext.ContextTypes` will be used.
.. versionadded:: 13.6
update_interval (:obj:`int` | :obj:`float`, optional): The
:class:`~telegram.ext.Application` will update
the persistence in regular intervals. This parameter specifies the time (in seconds) to
wait between two consecutive runs of updating the persistence. Defaults to 60 seconds.
.. versionadded:: 20.0
Attributes:
filepath (:obj:`str` | :obj:`pathlib.Path`): The filepath for storing the pickle files.
When :attr:`single_file` is :obj:`False` this will be used as a prefix.
store_data (:class:`~telegram.ext.PersistenceInput`): Specifies which kinds of data will
be saved by this persistence instance.
single_file (:obj:`bool`): Optional. When :obj:`False` will store 5 separate files of
`filename_user_data`, `filename_bot_data`, `filename_chat_data`,
`filename_callback_data` and `filename_conversations`. Default is :obj:`True`.
on_flush (:obj:`bool`): Optional. When :obj:`True` will only save to file when
:meth:`flush` is called and keep data in memory until that happens. When
:obj:`False` will store data on any transaction *and* on call to :meth:`flush`.
Default is :obj:`False`.
context_types (:class:`telegram.ext.ContextTypes`): Container for the types used
in the ``context`` interface.
.. versionadded:: 13.6
"""
__slots__ = (
"filepath",
"single_file",
"on_flush",
"user_data",
"chat_data",
"bot_data",
"callback_data",
"conversations",
"context_types",
)
@overload
def __init__(
self: "PicklePersistence[Dict[Any, Any], Dict[Any, Any], Dict[Any, Any]]",
filepath: FilePathInput,
store_data: Optional[PersistenceInput] = None,
single_file: bool = True,
on_flush: bool = False,
update_interval: float = 60,
):
...
@overload
def __init__(
self: "PicklePersistence[UD, CD, BD]",
filepath: FilePathInput,
store_data: Optional[PersistenceInput] = None,
single_file: bool = True,
on_flush: bool = False,
update_interval: float = 60,
context_types: Optional[ContextTypes[Any, UD, CD, BD]] = None,
):
...
def __init__(
self,
filepath: FilePathInput,
store_data: Optional[PersistenceInput] = None,
single_file: bool = True,
on_flush: bool = False,
update_interval: float = 60,
context_types: Optional[ContextTypes[Any, UD, CD, BD]] = None,
):
super().__init__(store_data=store_data, update_interval=update_interval)
self.filepath: Path = Path(filepath)
self.single_file: Optional[bool] = single_file
self.on_flush: Optional[bool] = on_flush
self.user_data: Optional[Dict[int, UD]] = None
self.chat_data: Optional[Dict[int, CD]] = None
self.bot_data: Optional[BD] = None
self.callback_data: Optional[CDCData] = None
self.conversations: Optional[Dict[str, Dict[Tuple[Union[int, str], ...], object]]] = None
self.context_types: ContextTypes[Any, UD, CD, BD] = cast(
ContextTypes[Any, UD, CD, BD], context_types or ContextTypes()
)
def _load_singlefile(self) -> None:
try:
with self.filepath.open("rb") as file:
data = _BotUnpickler(self.bot, file).load()
self.user_data = data["user_data"]
self.chat_data = data["chat_data"]
# For backwards compatibility with files not containing bot data
self.bot_data = data.get("bot_data", self.context_types.bot_data())
self.callback_data = data.get("callback_data", {})
self.conversations = data["conversations"]
except OSError:
self.conversations = {}
self.user_data = {}
self.chat_data = {}
self.bot_data = self.context_types.bot_data()
self.callback_data = None
except pickle.UnpicklingError as exc:
filename = self.filepath.name
raise TypeError(f"File {filename} does not contain valid pickle data") from exc
except Exception as exc:
raise TypeError(f"Something went wrong unpickling {self.filepath.name}") from exc
def _load_file(self, filepath: Path) -> Any:
try:
with filepath.open("rb") as file:
return _BotUnpickler(self.bot, file).load()
except OSError:
return None
except pickle.UnpicklingError as exc:
raise TypeError(f"File {filepath.name} does not contain valid pickle data") from exc
except Exception as exc:
raise TypeError(f"Something went wrong unpickling {filepath.name}") from exc
def _dump_singlefile(self) -> None:
data = {
"conversations": self.conversations,
"user_data": self.user_data,
"chat_data": self.chat_data,
"bot_data": self.bot_data,
"callback_data": self.callback_data,
}
with self.filepath.open("wb") as file:
_BotPickler(self.bot, file, protocol=pickle.HIGHEST_PROTOCOL).dump(data)
def _dump_file(self, filepath: Path, data: object) -> None:
with filepath.open("wb") as file:
_BotPickler(self.bot, file, protocol=pickle.HIGHEST_PROTOCOL).dump(data)
async def get_user_data(self) -> Dict[int, UD]:
"""Returns the user_data from the pickle file if it exists or an empty :obj:`dict`.
Returns:
Dict[:obj:`int`, :obj:`dict`]: The restored user data.
"""
if self.user_data:
pass
elif not self.single_file:
data = self._load_file(Path(f"{self.filepath}_user_data"))
if not data:
data = {}
self.user_data = data
else:
self._load_singlefile()
return deepcopy(self.user_data) # type: ignore[arg-type]
async def get_chat_data(self) -> Dict[int, CD]:
"""Returns the chat_data from the pickle file if it exists or an empty :obj:`dict`.
Returns:
Dict[:obj:`int`, :obj:`dict`]: The restored chat data.
"""
if self.chat_data:
pass
elif not self.single_file:
data = self._load_file(Path(f"{self.filepath}_chat_data"))
if not data:
data = {}
self.chat_data = data
else:
self._load_singlefile()
return deepcopy(self.chat_data) # type: ignore[arg-type]
async def get_bot_data(self) -> BD:
"""Returns the bot_data from the pickle file if it exists or an empty object of type
:obj:`dict` | :attr:`telegram.ext.ContextTypes.bot_data`.
Returns:
:obj:`dict` | :attr:`telegram.ext.ContextTypes.bot_data`: The restored bot data.
"""
if self.bot_data:
pass
elif not self.single_file:
data = self._load_file(Path(f"{self.filepath}_bot_data"))
if not data:
data = self.context_types.bot_data()
self.bot_data = data
else:
self._load_singlefile()
return deepcopy(self.bot_data) # type: ignore[return-value]
async def get_callback_data(self) -> Optional[CDCData]:
"""Returns the callback data from the pickle file if it exists or :obj:`None`.
.. versionadded:: 13.6
Returns:
Tuple[List[Tuple[:obj:`str`, :obj:`float`, Dict[:obj:`str`, :class:`object`]]],
Dict[:obj:`str`, :obj:`str`]] | :obj:`None`: The restored metadata or :obj:`None`,
if no data was stored.
"""
if self.callback_data:
pass
elif not self.single_file:
data = self._load_file(Path(f"{self.filepath}_callback_data"))
if not data:
data = None
self.callback_data = data
else:
self._load_singlefile()
if self.callback_data is None:
return None
return deepcopy(self.callback_data)
async def get_conversations(self, name: str) -> ConversationDict:
"""Returns the conversations from the pickle file if it exists or an empty dict.
Args:
name (:obj:`str`): The handlers name.
Returns:
:obj:`dict`: The restored conversations for the handler.
"""
if self.conversations:
pass
elif not self.single_file:
data = self._load_file(Path(f"{self.filepath}_conversations"))
if not data:
data = {name: {}}
self.conversations = data
else:
self._load_singlefile()
return self.conversations.get(name, {}).copy() # type: ignore[union-attr]
async def update_conversation(
self, name: str, key: ConversationKey, new_state: Optional[object]
) -> None:
"""Will update the conversations for the given handler and depending on :attr:`on_flush`
save the pickle file.
Args:
name (:obj:`str`): The handler's name.
key (:obj:`tuple`): The key the state is changed for.
new_state (:class:`object`): The new state for the given key.
"""
if not self.conversations:
self.conversations = {}
if self.conversations.setdefault(name, {}).get(key) == new_state:
return
self.conversations[name][key] = new_state
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_conversations"), self.conversations)
else:
self._dump_singlefile()
async def update_user_data(self, user_id: int, data: UD) -> None:
"""Will update the user_data and depending on :attr:`on_flush` save the pickle file.
Args:
user_id (:obj:`int`): The user the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.Application.user_data` ``[user_id]``.
"""
if self.user_data is None:
self.user_data = {}
if self.user_data.get(user_id) == data:
return
self.user_data[user_id] = data
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_user_data"), self.user_data)
else:
self._dump_singlefile()
async def update_chat_data(self, chat_id: int, data: CD) -> None:
"""Will update the chat_data and depending on :attr:`on_flush` save the pickle file.
Args:
chat_id (:obj:`int`): The chat the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.Application.chat_data` ``[chat_id]``.
"""
if self.chat_data is None:
self.chat_data = {}
if self.chat_data.get(chat_id) == data:
return
self.chat_data[chat_id] = data
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_chat_data"), self.chat_data)
else:
self._dump_singlefile()
async def update_bot_data(self, data: BD) -> None:
"""Will update the bot_data and depending on :attr:`on_flush` save the pickle file.
Args:
data (:obj:`dict` | :attr:`telegram.ext.ContextTypes.bot_data`): The
:attr:`telegram.ext.Application.bot_data`.
"""
if self.bot_data == data:
return
self.bot_data = data
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_bot_data"), self.bot_data)
else:
self._dump_singlefile()
async def update_callback_data(self, data: CDCData) -> None:
"""Will update the callback_data (if changed) and depending on :attr:`on_flush` save the
pickle file.
.. versionadded:: 13.6
Args:
data (Tuple[List[Tuple[:obj:`str`, :obj:`float`, \
Dict[:obj:`str`, :class:`object`]]], Dict[:obj:`str`, :obj:`str`]]):
The relevant data to restore :class:`telegram.ext.CallbackDataCache`.
"""
if self.callback_data == data:
return
self.callback_data = data
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_callback_data"), self.callback_data)
else:
self._dump_singlefile()
async def drop_chat_data(self, chat_id: int) -> None:
"""Will delete the specified key from the ``chat_data`` and depending on
:attr:`on_flush` save the pickle file.
.. versionadded:: 20.0
Args:
chat_id (:obj:`int`): The chat id to delete from the persistence.
"""
if self.chat_data is None:
return
self.chat_data.pop(chat_id, None) # type: ignore[arg-type]
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_chat_data"), self.chat_data)
else:
self._dump_singlefile()
async def drop_user_data(self, user_id: int) -> None:
"""Will delete the specified key from the ``user_data`` and depending on
:attr:`on_flush` save the pickle file.
.. versionadded:: 20.0
Args:
user_id (:obj:`int`): The user id to delete from the persistence.
"""
if self.user_data is None:
return
self.user_data.pop(user_id, None) # type: ignore[arg-type]
if not self.on_flush:
if not self.single_file:
self._dump_file(Path(f"{self.filepath}_user_data"), self.user_data)
else:
self._dump_singlefile()
async def refresh_user_data(self, user_id: int, user_data: UD) -> None:
"""Does nothing.
.. versionadded:: 13.6
.. seealso:: :meth:`telegram.ext.BasePersistence.refresh_user_data`
"""
async def refresh_chat_data(self, chat_id: int, chat_data: CD) -> None:
"""Does nothing.
.. versionadded:: 13.6
.. seealso:: :meth:`telegram.ext.BasePersistence.refresh_chat_data`
"""
async def refresh_bot_data(self, bot_data: BD) -> None:
"""Does nothing.
.. versionadded:: 13.6
.. seealso:: :meth:`telegram.ext.BasePersistence.refresh_bot_data`
"""
async def flush(self) -> None:
"""Will save all data in memory to pickle file(s)."""
if self.single_file:
if (
self.user_data
or self.chat_data
or self.bot_data
or self.callback_data
or self.conversations
):
self._dump_singlefile()
else:
if self.user_data:
self._dump_file(Path(f"{self.filepath}_user_data"), self.user_data)
if self.chat_data:
self._dump_file(Path(f"{self.filepath}_chat_data"), self.chat_data)
if self.bot_data:
self._dump_file(Path(f"{self.filepath}_bot_data"), self.bot_data)
if self.callback_data:
self._dump_file(Path(f"{self.filepath}_callback_data"), self.callback_data)
if self.conversations:
self._dump_file(Path(f"{self.filepath}_conversations"), self.conversations)
|
[
"[email protected]"
] | |
3e1f7559168e88b158942eb7a493718ec6108d87
|
4738129b25fceb5c8fdc83eebdd7621e41910230
|
/python-leetcode/sw_33.01.py
|
90bf5c1dc1e32fce0d32f105daf9235e85cbdda4
|
[
"MIT"
] |
permissive
|
MDGSF/JustCoding
|
43aa20773b9c8325e6ba632e9941d235e9e285aa
|
2faa46323df991a12014021b49d568387a882233
|
refs/heads/master
| 2023-07-21T19:07:15.899019 | 2023-07-09T07:29:59 | 2023-07-09T07:29:59 | 201,714,062 | 15 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 531 |
py
|
class Solution:
def verifyPostorder(self, postorder: List[int]) -> bool:
def recursion(startIdx, endIdx):
if startIdx >= endIdx: return True
curIdx = startIdx
while postorder[curIdx] < postorder[endIdx]:
curIdx += 1
rightStartIdx = curIdx
while postorder[curIdx] > postorder[endIdx]:
curIdx += 1
return curIdx == endIdx and \
recursion(startIdx, rightStartIdx - 1) and \
recursion(rightStartIdx, endIdx - 1)
return recursion(0, len(postorder) - 1)
|
[
"[email protected]"
] | |
808f8f09efe32f810767f9476fbdf06034b93364
|
86ed811106eecf7aa3a15cf98537ef274b811ad7
|
/headmasters/migrations/0014_auto_20200118_1239.py
|
2dcabb7931d416505d44d26ddcea2f581734728f
|
[] |
no_license
|
SaifulAbir/Django-MIS
|
934ad39beff62f0e1cbe9377738b780122989662
|
d680a0a64211bc9cd7748364454c52b16398ea5c
|
refs/heads/master
| 2022-10-19T11:57:46.087577 | 2020-02-03T10:10:08 | 2020-02-03T10:10:08 | 271,542,785 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 414 |
py
|
# Generated by Django 2.2.4 on 2020-01-18 12:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('headmasters', '0013_auto_20200115_0835'),
]
operations = [
migrations.AlterField(
model_name='headmasterprofile',
name='mobile',
field=models.CharField(max_length=20, unique=True),
),
]
|
[
"[email protected]"
] | |
197156f1e6919a6fb85941c1f4078a1094cdc623
|
f3350367b97ba9d281be925ba520009a853fc0a3
|
/icarus/service/interface/icarus_server.py
|
7874dd52247cdd0b4554bee89d7f2cc229a393e6
|
[] |
no_license
|
f599gtb/PenguPilot
|
2f841e780661fde0399fd2ea11193896362f71ef
|
7ef485124e5f5b14c257fba915cd43aec8111f35
|
refs/heads/master
| 2021-01-22T09:16:47.066058 | 2014-08-15T14:46:32 | 2014-08-15T14:46:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,282 |
py
|
"""
___________________________________________________
| _____ _____ _ _ _ |
| | __ \ | __ (_) | | | |
| | |__) |__ _ __ __ _ _ _| |__) || | ___ | |_ |
| | ___/ _ \ '_ \ / _` | | | | ___/ | |/ _ \| __| |
| | | | __/ | | | (_| | |_| | | | | | (_) | |_ |
| |_| \___|_| |_|\__, |\__,_|_| |_|_|\___/ \__| |
| __/ | |
| GNU/Linux based |___/ Multi-Rotor UAV Autopilot |
|___________________________________________________|
ICARUS Server
responsible for receiving, delegating and replying commands
Copyright (C) 2014 Tobias Simon, Ilmenau University of Technology
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. """
from threading import Thread
from icarus_pb2 import IcarusReq, IcarusRep, OK, E_SYNTAX, E_SEMANTIC
class ICARUS_Exception(Exception):
def __init__(self, msg):
self.msg = msg
class ICARUS_Server(Thread):
'''
ICARUS server
responsible for receiving, delegating and IcarusReping incoming commands
'''
def __init__(self, socket, delegate):
'''
socket: a zmq socket
delegate: object providing handle(IcarusReq) routine, raising ICARUS_Exception
'''
Thread.__init__(self)
self._socket = socket
self._delegate = delegate
self.daemon = True
def run(self):
'''
receives, parses and executes commands using the submited delegate in a loop
'''
while True:
# receive message via SCL:
try:
data = self._socket.recv()
except:
# it would not make sense to send an error message here,
# as something seems to be wrong with the socket
print 'could not read SCL message'
continue
# parse message into protobuf structure:
req = IcarusReq()
try:
req.ParseFromString(data)
except:
# syntactic error in ParseFromString
self.send_err(E_SYNTAX, 'could not parse protobuf payload')
continue
# handle parsed protobuf message and send IcarusRep:
try:
self._delegate.handle(req)
self.send_ok()
except ICARUS_Exception, ex:
# semantic error:
self.send_err(E_SEMANTIC, ex.msg)
def send_err(self, code, msg):
'''
IcarusRep with error code and message
'''
rep = IcarusRep()
rep.status = code
rep.message = msg
self._send_rep(rep)
def send_ok(self):
'''
IcarusRep with OK message
'''
rep = IcarusRep()
rep.status = OK
self._send_rep(rep)
def _send_rep(self, rep):
'''
serialize and send message via _socket
'''
self._socket.send(rep.SerializeToString())
|
[
"[email protected]"
] | |
85cc20ccd31be48833728d53b3066357e385f2c0
|
e42b56be9ac649b55a59f4d7373dbc55775d1c33
|
/eps_client/eps_client/urls.py
|
0655375ac88500b6ed3c0f2d90a4c5ce3003fcb4
|
[] |
no_license
|
danielcb29/Rest_Example
|
4f026f1ee8ddb5888f4f889a36fffe6ac4550273
|
9a5fd672edf3f4ea446c837d185aa540c2fecaff
|
refs/heads/master
| 2022-11-27T23:02:05.467380 | 2016-10-10T01:17:21 | 2016-10-10T01:17:21 | 69,994,884 | 0 | 0 | null | 2022-11-22T00:39:33 | 2016-10-04T18:39:17 |
CSS
|
UTF-8
|
Python
| false | false | 766 |
py
|
"""eps_client URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url,include
urlpatterns = [
url(r'^', include('rest_client.urls',namespace='client')),
]
|
[
"[email protected]"
] | |
a1380cf0b07a79d6ea3eec8e515d0dd844e61560
|
f66021ddd1a79f2d43c5b00a56e15ce13a9abfb2
|
/jbkxnoltmn_dev_1766/urls.py
|
8c33ce9bcb4fd1b67b9e4e28b751f3b7120e5cf5
|
[] |
no_license
|
crowdbotics-apps/jbkxnoltmn-dev-1766
|
28db8f66d7f012f7273855a3e7c25becb06a5321
|
e43e4d2761cae06d51f53af0102edfc868618fbd
|
refs/heads/master
| 2022-04-06T05:51:42.505572 | 2020-03-01T19:42:42 | 2020-03-01T19:42:42 | 244,212,350 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,929 |
py
|
"""jbkxnoltmn_dev_1766 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "jbkxnoltmn"
admin.site.site_title = "jbkxnoltmn Admin Portal"
admin.site.index_title = "jbkxnoltmn Admin"
# swagger
schema_view = get_schema_view(
openapi.Info(
title="jbkxnoltmn API",
default_version="v1",
description="API documentation for jbkxnoltmn App",
),
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"[email protected]"
] | |
35a44548cde679b353b2ecfd460fbadd70c4f068
|
d9b286e3ed4038651f8a93ddad8fc7369b8e22ad
|
/reviewboard/webapi/tests/test_screenshot_comment.py
|
5362bcbc71c12d3eef9cb5242c2424f6ee47f5fc
|
[
"MIT"
] |
permissive
|
harrifeng/reviewboard
|
6456b1ba2fa953bdc83cb16681731bcef10430ee
|
f560679be34ab547ef0a4fbca959e244d6bf5a75
|
refs/heads/master
| 2016-10-16T14:05:54.102611 | 2013-09-06T21:23:30 | 2013-09-11T09:52:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,402 |
py
|
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.reviews.models import ScreenshotComment
from reviewboard.webapi.tests.base import BaseWebAPITestCase
from reviewboard.webapi.tests.mimetypes import screenshot_comment_list_mimetype
from reviewboard.webapi.tests.urls import get_screenshot_comment_list_url
class ScreenshotCommentResourceTests(BaseWebAPITestCase):
"""Testing the ScreenshotCommentResource APIs."""
fixtures = ['test_users', 'test_scmtools']
def test_get_screenshot_comments(self):
"""Testing the GET review-requests/<id>/screenshots/<id>/comments/ API"""
comment_text = "This is a test comment."
x, y, w, h = (2, 2, 10, 10)
review_request = self.create_review_request(publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=self.user)
self._postNewScreenshotComment(review_request, review.id, screenshot,
comment_text, x, y, w, h)
rsp = self.apiGet(
get_screenshot_comment_list_url(review),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
comments = ScreenshotComment.objects.filter(screenshot=screenshot)
rsp_comments = rsp['screenshot_comments']
self.assertEqual(len(rsp_comments), comments.count())
for i in range(0, len(comments)):
self.assertEqual(rsp_comments[i]['text'], comments[i].text)
self.assertEqual(rsp_comments[i]['x'], comments[i].x)
self.assertEqual(rsp_comments[i]['y'], comments[i].y)
self.assertEqual(rsp_comments[i]['w'], comments[i].w)
self.assertEqual(rsp_comments[i]['h'], comments[i].h)
@add_fixtures(['test_site'])
def test_get_screenshot_comments_with_site(self):
"""Testing the GET review-requests/<id>/screenshots/<id>/comments/ API with a local site"""
comment_text = 'This is a test comment.'
x, y, w, h = (2, 2, 10, 10)
user = self._login_user(local_site=True)
review_request = self.create_review_request(with_local_site=True,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
self._postNewScreenshotComment(review_request, review.id, screenshot,
comment_text, x, y, w, h)
rsp = self.apiGet(
get_screenshot_comment_list_url(review, self.local_site_name),
expected_mimetype=screenshot_comment_list_mimetype)
self.assertEqual(rsp['stat'], 'ok')
comments = ScreenshotComment.objects.filter(screenshot=screenshot)
rsp_comments = rsp['screenshot_comments']
self.assertEqual(len(rsp_comments), comments.count())
for i in range(0, len(comments)):
self.assertEqual(rsp_comments[i]['text'], comments[i].text)
self.assertEqual(rsp_comments[i]['x'], comments[i].x)
self.assertEqual(rsp_comments[i]['y'], comments[i].y)
self.assertEqual(rsp_comments[i]['w'], comments[i].w)
self.assertEqual(rsp_comments[i]['h'], comments[i].h)
@add_fixtures(['test_site'])
def test_get_screenshot_comments_with_site_no_access(self):
"""Testing the GET review-requests/<id>/screenshots/<id>/comments/ API with a local site and Permission Denied error"""
comment_text = 'This is a test comment.'
x, y, w, h = (2, 2, 10, 10)
user = self._login_user(local_site=True)
review_request = self.create_review_request(with_local_site=True,
publish=True)
screenshot = self.create_screenshot(review_request)
review = self.create_review(review_request, user=user)
self._postNewScreenshotComment(review_request, review.id, screenshot,
comment_text, x, y, w, h)
self._login_user()
rsp = self.apiGet(
get_screenshot_comment_list_url(review, self.local_site_name),
expected_status=403)
self.assertEqual(rsp['stat'], 'fail')
self.assertEqual(rsp['err']['code'], PERMISSION_DENIED.code)
|
[
"[email protected]"
] | |
ff97443bc42e9a21c435f0a5d1bea777d082d66d
|
1aa12d5735fb239c8be8f8a44881eb05908d6e4f
|
/deunionreserve/settings.py
|
a794f4a7c468667a858aed43da6fc8aa1c20a2f0
|
[] |
no_license
|
Justprince234/bank-api
|
218f14d15e3e76f5f0b1c5c1142f7d5417cc6e03
|
a2fa7d478c6ab88fc2369caaacbc407b5411da0d
|
refs/heads/master
| 2023-06-23T11:00:51.124883 | 2021-07-13T13:04:36 | 2021-07-13T13:04:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,631 |
py
|
"""
Django settings for deunionreserve project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-e&b^1n3d)hsb%ax3ayv%^d5m=)*2q5q5us+duf2+0nh@u373(5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#Third party Apps
'rest_framework',
'knox',
'django_rest_passwordreset',
'corsheaders',
'rest_framework_simplejwt',
'drf_yasg',
#My Apps
'accounts.apps.AccountsConfig',
'customers.apps.CustomersConfig',
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
SITE_ID = 1
AUTH_USER_MODEL = 'accounts.User'
ROOT_URLCONF = 'deunionreserve.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'deunionreserve.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 597
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
MEDIA_ROOT = os.path.join(BASE_DIR, 'photos/')
MEDIA_URL = '/photos/photos/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.BasicAuthentication',
# 'rest_framework.authentication.SessionAuthentication',
'knox.auth.TokenAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
]
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Heroku settings.
import django_heroku
django_heroku.settings(locals())
|
[
"[email protected]"
] | |
ea7d9fa0c40854eead9518afad3701d2d8eeb5b2
|
32b5beff459a4e130c3b231e32d717bed4178a1c
|
/src/bench/bench_python/plot_benchmark.py
|
4d0b590b602e3ac198077fd5613b194a6a9db094
|
[
"MIT"
] |
permissive
|
constantinpape/z5
|
9deb76fe52a1335dac7ef49e85c40cf7efbb8887
|
bd5cb52782a9cabf534ea77ba0823f207c8eccb8
|
refs/heads/master
| 2023-07-06T15:58:13.279554 | 2023-07-04T07:26:21 | 2023-07-04T07:26:21 | 101,700,504 | 94 | 28 |
MIT
| 2023-07-04T07:26:23 | 2017-08-29T00:31:10 |
C++
|
UTF-8
|
Python
| false | false | 1,482 |
py
|
import argparse
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set_theme(style='whitegrid')
def plot_benchmark(path, chunk_key='25_125_125', skip=[]):
with open(path) as f:
results = json.load(f)
data = []
for format_ in ('h5', 'n5', 'zarr'):
read = results[format_]['read']
write = results[format_]['write']
sizes = results[format_]['sizes']
compressions = list(read.keys())
for compression in compressions:
if compression in skip:
continue
t_read = np.min(read[compression][chunk_key])
t_write = np.min(write[compression][chunk_key])
size = sizes[compression][chunk_key]
data.append([format_, compression, t_read, t_write, size])
data = pd.DataFrame(data, columns=['format', 'compression', 't-read [s]', 't-write [s]', 'size [MB]'])
fig, axes = plt.subplots(3)
sns.barplot(data=data, ax=axes[0], x="format", y="t-read [s]", hue="compression")
sns.barplot(data=data, ax=axes[1], x="format", y="t-write [s]", hue="compression")
sns.barplot(data=data, ax=axes[2], x="format", y="size [MB]", hue="compression")
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path')
parser.add_argument('--skip', nargs="+", default=[])
args = parser.parse_args()
plot_benchmark(args.path, skip=args.skip)
|
[
"[email protected]"
] | |
73e0fa7025c6612d5ef02836a41198b625ae0f24
|
6c2a41b67027f95cc9525a813455b07cdf49c5d7
|
/projects/models.py
|
7278428ef0ce110f669ab440107ddbcf21ef929c
|
[
"MIT"
] |
permissive
|
wilbrone/Awards
|
3c1ce7ec3ca3003ff9787529a903c55c02ea42ae
|
c4c87ca5d700a12dc8d23e2d6092ac59adada4af
|
refs/heads/master
| 2022-12-21T22:12:39.474327 | 2020-02-23T10:46:27 | 2020-02-23T10:46:27 | 240,668,985 | 0 | 0 |
MIT
| 2022-09-23T22:36:18 | 2020-02-15T08:19:22 |
Python
|
UTF-8
|
Python
| false | false | 3,092 |
py
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.db.models.signals import post_save
from django.dispatch import receiver
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profile')
bio = models.TextField(max_length = 500)
profile_pic = models.ImageField(upload_to='profile_pics', blank=True, default='default.png')
location = models.CharField(max_length = 100)
def __str__(self):
return f'{self.user.username} Profile'
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
def create_profile(self):
self.save()
def delete_profile(self):
self.delete()
def search_profile(self,username):
users = User.objects.filter(username=username)
return users
class Project(models.Model):
title = models.CharField(max_length = 100)
image = models.ImageField(upload_to='project_pics/',null=True,blank=True,default='default.png')
image_url = models.CharField(max_length=250, null= True)
description = models.TextField(max_length = 500)
posted = models.DateTimeField(auto_now_add=True, null=True)
user = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name='projects', null=True)
def __str__(self):
return f'{self.user} Project'
# def get_absolute_url(self):
# return f"/single_post/{self.id}"
def save_project(self):
self.save()
def delete_project(self,id):
self.delete(id=id)
class Rating(models.Model):
rating = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
)
design = models.IntegerField(choices=rating, default=0, blank=True)
usability = models.IntegerField(choices=rating, blank=True)
content = models.IntegerField(choices=rating, blank=True)
score = models.FloatField(default=0, blank=True)
design_average = models.FloatField(default=0, blank=True)
usability_average = models.FloatField(default=0, blank=True)
content_average = models.FloatField(default=0, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, related_name='rater')
post = models.ForeignKey(Project, on_delete=models.CASCADE, related_name='ratings', null=True)
class Meta:
get_latest_by='score'
@classmethod
def get_leading_project(cls):
post=cls.objects.latest()
return post
def save_rating(self):
self.save()
@classmethod
def get_ratings(cls, id):
ratings = Rating.objects.filter(post_id=id).all()
return ratings
def __str__(self):
return f'{self.post} Rating'
|
[
"[email protected]"
] | |
3dfc9792128a7b662720ee5edda3d0b79bbc1671
|
a2d83ad6126403703e85ecd3e627ef402e1fb6cf
|
/setup.py
|
938f01e6cd380f0b81fc8141100e9881357bc040
|
[] |
no_license
|
argriffing/fiedlerology
|
5e0042f6e4b77be40208a29910978ee1e9a9846a
|
255cf1889fd9ac8619891ef8a10de813390247bd
|
refs/heads/master
| 2020-05-17T05:24:33.832399 | 2014-05-10T01:39:36 | 2014-05-10T01:39:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 712 |
py
|
#!/usr/bin/env python
"""Implementations of algorithms related to the work of Miroslav Fiedler.
"""
DOCLINES = __doc__.split('\n')
# This setup script is written according to
# http://docs.python.org/2/distutils/setupscript.html
#
# It is meant to be installed through github using pip.
from distutils.core import setup
setup(
name='fiedlerology',
version='0.1',
description=DOCLINES[0],
author='alex',
url='https://github.com/argriffing/fiedlerology/',
download_url='https://github.com/argriffing/fiedlerology/',
packages=['fiedlerology'],
test_suite='nose.collector',
package_data={'fiedlerology' : ['tests/test_*.py']},
)
|
[
"[email protected]"
] | |
ec5fe9f24592abc674e2160d3674ec1936d8432f
|
d007f8d6c318c3d66e76d99715edf324c9fe0294
|
/recipes/luci_config.py
|
a0181931f1a2c7a0f24a5d32e0f34026fc861b60
|
[
"BSD-3-Clause"
] |
permissive
|
nirvus/infra-recipes
|
c0f9e5facca7ad1907d639eb8819a59dc8f3584e
|
a5dc52f47405dcce56fb43a3e8ac80a2fbd56717
|
refs/heads/master
| 2020-04-07T23:15:01.809232 | 2018-11-06T02:30:12 | 2018-11-06T17:37:57 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,266 |
py
|
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Recipe for testing LUCI configs."""
import difflib
from recipe_engine.recipe_api import Property
DEPS = [
'infra/jiri',
'recipe_engine/buildbucket',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/raw_io',
'recipe_engine/step',
]
PROPERTIES = {
'config_project':
Property(
kind=str,
help='Jiri remote manifest project containing the luci configs',
default=None),
'manifest':
Property(kind=str, help='Jiri manifest to use'),
'remote':
Property(kind=str, help='Remote manifest repository'),
}
_PREBUILT_PROJECT_REMOTE = 'https://fuchsia.googlesource.com/infra/prebuilt'
def RunSteps(api, config_project, manifest, remote):
api.jiri.ensure_jiri()
with api.context(infra_steps=True):
api.jiri.checkout(
manifest=manifest,
remote=remote,
build_input=api.buildbucket.build.input)
# Find the required jiri projects.
config_jiri_project, prebuilt_jiri_project = None, None
jiri_projects = api.jiri.project()
for jiri_project in jiri_projects.json.output:
if jiri_project['name'] == config_project:
config_jiri_project = jiri_project
if jiri_project['remote'] == _PREBUILT_PROJECT_REMOTE:
prebuilt_jiri_project = jiri_project
assert config_jiri_project, 'Failed to find project %s' % config_project
assert prebuilt_jiri_project, (
'Failed to find project with remote %s' % _PREBUILT_PROJECT_REMOTE)
# Needs to be kept in sync with //infra/prebuilt/tools/cipd.ensure.
flatten_buildbucket_path = api.path['start_dir'].join(
prebuilt_jiri_project['path'], 'tools', 'flatten_buildbucket_cfg',
'flatten_buildbucket_cfg')
services_path = api.path['start_dir'].join(config_jiri_project['path'],
'config', 'services')
buildbucket_config_paths = api.file.glob_paths(
name='glob buildbucket configs',
source=services_path,
pattern='*buildbucket*.cfg')
if not buildbucket_config_paths:
raise api.step.StepFailure(
'Found no buildbucket configs under %s' % services_path)
for buildbucket_config_path in buildbucket_config_paths:
# Flatten the existing config. Fails if it is not a valid config proto.
basename = api.path.basename(buildbucket_config_path)
flatten_step = api.step(
'flatten %s' % basename,
[flatten_buildbucket_path, buildbucket_config_path],
stdout=api.raw_io.output_text())
flattened_config = flatten_step.stdout
# Compare the flattened to the copy in generated/ sub-dir. This enforces
# that the generated copy stays up to date.
expected_config = api.file.read_text(
'read generated/%s' % basename, services_path.join(
'generated', basename))
with api.step.nest('diff %s' % basename):
expected_lines = expected_config.split('\n')
flattened_lines = flattened_config.split('\n')
diff = list(
difflib.context_diff(
expected_lines,
flattened_lines,
fromfile='generated/%s' % basename,
tofile='%s.flattened' % basename))
if diff:
api.step.active_result.presentation.logs['diff'] = diff
api.step.active_result.presentation.logs['expected'] = expected_lines
api.step.active_result.presentation.logs['flattened'] = flattened_lines
raise api.step.StepFailure('Found diff')
api.step.active_result.presentation.step_text = 'no diff'
def GenTests(api):
properties = api.properties(
config_project='fuchsia-infra/config',
manifest='manifest/infra',
remote='https://fuchsia.googlesource.com/manifest',
)
jiri_projects = api.step_data(
'jiri project',
api.jiri.project([{
'name': 'fuchsia-infra/config',
'path': 'config',
'remote': 'https://fuchsia.googlesource.com/infra/config'
}, {
'name': 'prebuilt',
'path': 'fuchsia-infra/prebuilt',
'remote': 'https://fuchsia.googlesource.com/infra/prebuilt'
}]))
glob_step_data = api.step_data(
'glob buildbucket configs',
api.file.glob_paths(names=('cr-buildbucket.cfg',)))
yield (api.test('no_diff') + properties + jiri_projects + glob_step_data +
api.step_data(
'flatten cr-buildbucket.cfg',
stdout=api.raw_io.output_text('foo\nbar\n')) + api.step_data(
'read generated/cr-buildbucket.cfg',
api.file.read_text(text_content='foo\nbar\n')))
yield (api.test('diff') + properties + jiri_projects + glob_step_data +
api.step_data(
'flatten cr-buildbucket.cfg',
stdout=api.raw_io.output_text('foo\nbaz\n')) + api.step_data(
'read generated/cr-buildbucket.cfg',
api.file.read_text(text_content='foo\nbar\n')))
yield (api.test('no_buildbucket_configs') + properties + jiri_projects +
api.step_data('glob buildbucket configs', api.file.glob_paths()))
|
[
"[email protected]"
] | |
fcfff712894869114932d6c0f9818c7e944cbc3f
|
7966fa31437cc8a539621a5a0642ce24c1c9de50
|
/PycharmProjects/leetcode/knapsack/474一和零.py
|
fe2c8b5eb5ad4dc876881797206d48967091368e
|
[] |
no_license
|
crystal30/DataStructure
|
4f938508f4c60af9c5f8ec5520d5acedbe2dc90e
|
c55b0cfd2967a2221c27ed738e8de15034775945
|
refs/heads/master
| 2021-06-25T17:49:03.048853 | 2021-01-22T00:37:04 | 2021-01-22T00:37:04 | 192,374,326 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,411 |
py
|
# coding:utf-8
class Solution:
def __init__(self):
self.memo = None
def findMaxForm(self, strs, m, n):
len_strs = len(strs)
if len_strs == 0:
return 0
self.memo = [[[-1 for _ in range(n+1)] for _ in range(m+1)] for _ in range(len_strs)]
return max(0, self.find_max_form(strs, len(strs)-1, m, n))
def find_max_form(self, strs, index, m, n):
if m == 0 and n == 0:
return 0
num_0 = 0
num_1 = 0
for e in strs[index]:
if e == '0':
num_0 += 1
else:
num_1 += 1
if index == 0:
if num_0 <= m and num_1 <= n:
return 1
else:
return 0
if self.memo[index][m][n] != -1:
return self.memo[index][m][n]
if m - num_0 >= 0 and n - num_1 >= 0:
res = max(self.find_max_form(strs, index-1, m, n),
1+self.find_max_form(strs, index-1, m-num_0, n-num_1))
else:
res = self.find_max_form(strs, index-1, m, n)
self.memo[index][m][n] = res
return res
class Solution1:
def findMaxForm(self, strs, m, n):
len_strs = len(strs)
memo = [[[0 for _ in range(n+1)] for _ in range(m+1)] for _ in range(len_strs)]
zero_num, one_num = self.calculate_zero_one(strs[0])
for j in range(m+1):
for k in range(n+1):
if zero_num <= j and one_num <= k:
memo[0][j][k] = 1
for i in range(1, len_strs):
zero_num, one_num = self.calculate_zero_one(strs[i])
for j in range(m+1):
for k in range(n+1):
if zero_num <= j and one_num <= k:
# print(i, k, j)
memo[i][j][k] = max(memo[i-1][j][k], 1+memo[i-1][j-zero_num][k-one_num])
else:
memo[i][j][k] = memo[i-1][j][k]
return memo[len_strs-1][m][n]
def calculate_zero_one(self, e):
zero_num = 0
one_num = 0
for sub_e in e:
if sub_e == '0':
zero_num += 1
else:
one_num += 1
return zero_num, one_num
if __name__ == "__main__":
so = Solution1()
strs = ["10"]
m = 5
n = 3
re = so.findMaxForm(strs, m, n)
print(re)
|
[
"[email protected]"
] | |
88eb693481debe9f05c43f4a10cdaf4512efe676
|
7e8060ad317fe7d87fcbb1c756461cea8a067d08
|
/stepik/python67/03_07_03.py
|
0018a5ff2748b096d66fa7b893cd682c0b0bcb5b
|
[
"Unlicense"
] |
permissive
|
ornichola/learning-new
|
f9b8f2d210f36f2c05a14e7cca56d32495e60778
|
abb6e74f9f8794a0d3b897618207d4be0b0ff3e1
|
refs/heads/master
| 2023-05-25T19:18:42.931439 | 2023-04-17T17:22:57 | 2023-04-17T17:22:57 | 115,291,570 | 4 | 25 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,929 |
py
|
# [STEPIK]
# Программирование на Python https://stepik.org/67
# 03_07_03 Задачи по материалам недели
'''
Простейшая система проверки орфографии основана на использовании списка известных слов. Каждое слово в проверяемом тексте ищется в этом списке и, если такое слово не найдено, оно помечается, как ошибочное.
Напишем подобную систему.
Через стандартный ввод подаётся следующая структура: первой строкой — количество dd записей в списке известных слов, после передаётся dd строк с одним словарным словом на строку, затем — количество ll строк текста, после чего — ll строк текста.
Напишите программу, которая выводит слова из текста, которые не встречаются в словаре. Регистр слов не учитывается. Порядок вывода слов произвольный. Слова, не встречающиеся в словаре, не должны повторяться в выводе программы.
Sample Input:
3
a
bb
cCc
2
a bb aab aba ccc
c bb aaa
Sample Output:
aab
aba
c
aaa
'''
d = int(input())
#words = []
words = set()
#unknow_words = []
unknow_words = set()
for _ in range(d):
#words.append(input().lower())
words.add(input().lower())
l = int(input())
for _ in range(l):
string = input().lower().split()
for i in range(len(string)):
if string[i] not in words:
#unknow_words.append(string[i])
unknow_words.add(string[i])
for word in unknow_words:
print(word)
|
[
"[email protected]"
] | |
1a2118f38e648268356c5aefae975be5046dbd2a
|
f53117f4bc68c174bf8e769e146c68f5e20cb8f5
|
/msppg/msp-imudisplay.py
|
428a8b73f3ab50dc09221428aad3c894c09749e0
|
[] |
no_license
|
zidane1980slab/MSPPG
|
ac9324127626f07f56bc7473f92a46ec6b176dfb
|
085fac6316b747a957823295876c7735c3c1a9e3
|
refs/heads/master
| 2021-01-14T08:46:08.967107 | 2015-07-21T02:18:58 | 2015-07-21T02:18:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 35,711 |
py
|
#!/usr/bin/env python
'''
imudisplay.py - graphical demo of MSPPG Attitude messages
Copyright (C) Alec Singer and Simon D. Levy 2015
This code is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this code. If not, see <http:#www.gnu.org/licenses/>.
'''
FMUPORT = '/dev/ttyUSB0'
#FMUPORT = 'COM3'
VEHICLE_SCALE = 0.10
UPDATE_MSEC = 10
from Tkinter import *
import threading
import serial
import time
import msppg
from math import sin, cos, radians, degrees
import numpy as np
class Display(object):
def __init__(self, driver, simulation=False):
self.driver = driver
self.width = int(self.driver.canvas['width'])
self.height = int(self.driver.canvas['height'])
self.driver.root.bind("<Key>", self._check_quit)
self.driver.root.title('IMU Telemetry')
# Vehicle dimensions
W = VEHICLE_SCALE
D = VEHICLE_SCALE / 2
L = VEHICLE_SCALE * 2
#Let these be in World-coordinates (worldview-matrix already applied)
####In right-handed, counter-clockwise order
self.vehicle_points, self.vehicle_faces, self.vehicle_face_colors = self._get_vehicle(W, D, L)
# Assume no angles to start
self.yaw_pitch_roll = None
# Rotation matrices
self.pitchrot = np.eye(3)
self.yawrot = np.eye(3)
self.rollrot = np.eye(3)
self.simulation = simulation
self.running = False
def start(self, delay_msec=UPDATE_MSEC):
self._schedule_display_task(delay_msec)
self.running = True
self.faces = []
self.yaw_pitch_roll_prev = None
self.yaw_pitch_roll_change = None
def stop(self):
self._clear()
self.running = False
def setParams(self, pitchroll_kp_percent, yaw_kp_percent):
self.pitchroll_kp_percent = pitchroll_kp_percent
self.yaw_kp_percent = yaw_kp_percent
self._set_sliders()
def _schedule_display_task(self, delay_msec):
self.driver.root.after(delay_msec, self._task)
def _clear(self):
for face in self.faces:
self.driver.canvas.delete(face)
self.faces = []
def _task(self):
if self.running:
self.yaw_pitch_roll = self.driver.getYawPitchRoll()
self._update()
self._schedule_display_task(UPDATE_MSEC)
def _to_screen_coords(self, pv):
dims = [int(s) for s in str(self.driver.root.geometry()).split('+')[0].split('x')]
width, height = dims[0], dims[1]
SC = np.eye(4)
SC[0,0] = width/2
SC[1,1] = -height/2
SC[0,3] = width/2
SC[1,3] = height/2
x = SC[0,0]*pv[0]+SC[0,1]*pv[1]+SC[0,2]*pv[2]+SC[0,3]
y = SC[1,0]*pv[0]+SC[1,1]*pv[1]+SC[1,2]*pv[2]+SC[1,3]
z = SC[2,0]*pv[0]+SC[2,1]*pv[1]+SC[2,2]*pv[2]+SC[2,3]
return np.array([x, y, z])
def _create_window(self, x, widget):
return self.driver.canvas.create_window(x, 10, anchor=NW, window=widget)
def _check_quit(self, event):
if ord(event.char) == 27: # ESC
exit(0)
def _update(self):
# Erase previous image
self._clear()
# Convert angles to X,Y,Z rotation matrices
yawAngle = radians(self.yaw_pitch_roll[0])
self.yawrot[0,0] = +cos(yawAngle)
self.yawrot[0,2] = +sin(yawAngle)
self.yawrot[2,0] = -sin(yawAngle)
self.yawrot[2,2] = +cos(yawAngle)
pitchAngle = radians(self.yaw_pitch_roll[1])
self.pitchrot[1,1] = +cos(pitchAngle)
self.pitchrot[1,2] = -sin(pitchAngle)
self.pitchrot[2,1] = +sin(pitchAngle)
self.pitchrot[2,2] = +cos(pitchAngle)
rollAngle = -radians(self.yaw_pitch_roll[2]) # negate so positive is roll rightward
self.rollrot[0,0] = +cos(rollAngle)
self.rollrot[0,1] = -sin(rollAngle)
self.rollrot[1,0] = +sin(rollAngle)
self.rollrot[1,1] = +cos(rollAngle)
# Multiply matrices
rot = np.dot(np.dot(self.yawrot, self.pitchrot), self.rollrot)
# Draw polygons
for i in range(len(self.vehicle_faces)):
poly = [] #transformed polygon
for j in range(len(self.vehicle_faces[0])):
v = self.vehicle_points[self.vehicle_faces[i][j]]
# Transform the point from 3D to 2D
ps = np.dot(v, rot.T)
p = self._to_screen_coords(ps)
# Put the screenpoint in the list of transformed vertices
poly.append((p[0], p[1]))
if self._is_polygon_front_face(poly): #Backface culling
self.faces.append(self.driver.canvas.create_polygon(*poly, fill=self.vehicle_face_colors[i]))
# Update angle changes
if not self.yaw_pitch_roll_prev is None:
self.yaw_pitch_roll_change = [degrees(abs(pair[0]-pair[1]))
for pair in zip(self.yaw_pitch_roll,self.yaw_pitch_roll_prev)]
self.yaw_pitch_roll_prev = self.yaw_pitch_roll
def _is_polygon_front_face(self, pts):
summa = 0.0
num = len(pts)
for i in range(num-1):
summa += (pts[i+1][0]-pts[i][0])*(pts[i+1][1]+pts[i][1])
summa += (pts[0][0]-pts[num-1][0])*(pts[0][1]+pts[num-1][1])
return summa > 0.0
def _get_vehicle(self, width, depth, length):
#creates constants
length = width
#arm constants
armLength = width*2
armWidth = armLength/10
#arrow constants
arrowWidth = 1.0 * armWidth
arrowLength = 5.0 * armWidth
arrowHeight = 1.5 * depth
#prop constants
propWidth = 1.00 * armWidth
propNarrowWidth = 0.20 * propWidth
propLength = 7.50 * propWidth
propNarrowLength = 0.75 * propLength
propShortLength = 0.25 * propLength
#prop pitch constants
tipTU = 0.900 * depth
tipTL = 0.625 * depth
tipBU = 0.625 * depth
tipBL = 0.350 * depth
endT = .75 * depth
endB = .50 * depth
constant1 = ((endT-tipTL)/3) * depth
constant2 = ((endB-tipBL)/3) * depth
farTU = tipTU - constant2
farTL = tipTL + constant1
farBU = tipBU - constant1
farBL = tipBL + constant2
closeTU = farTU - constant2
closeTL = farTL + constant1
closeBU = farBU - constant1
closeBL = farBL + constant2
points = np.array([
#creates arm 1
[+width - armWidth, +depth/2 , +length + armWidth], #0 0
[+width + armWidth, +depth/2 , +length - armWidth], #1 1
[+width + armWidth, -depth/2 , +length - armWidth], #2 2
[+width - armWidth, -depth/2 , +length + armWidth], #3 3
[+width + armLength - armWidth, +depth/2 , +length + armLength + armWidth], #4 4
[+width + armLength + armWidth, +depth/2 , +length + armLength - armWidth], #5 5
[+width + armLength + armWidth, -depth/2 , +length + armLength - armWidth], #6 6
[+width + armLength - armWidth, -depth/2 , +length + armLength + armWidth], #7 7
#creates arm 2
[-width - armWidth, +depth/2 , +length - armWidth], #0 8
[-width + armWidth, +depth/2 , +length + armWidth], #1 9
[-width + armWidth, -depth/2 , +length + armWidth], #2 10
[-width - armWidth, -depth/2 , +length - armWidth], #3 11
[-width - armLength - armWidth, +depth/2 , +length + armLength - armWidth], #4 12
[-width - armLength + armWidth, +depth/2 , +length + armLength + armWidth], #5 13
[-width - armLength + armWidth, -depth/2 , +length + armLength + armWidth], #6 14
[-width - armLength - armWidth, -depth/2 , +length + armLength - armWidth], #7 15
#creates arm 3
[+width + armLength - armWidth, +depth/2 , -length - armLength - armWidth], #0 16
[+width + armLength + armWidth, +depth/2 , -length - armLength + armWidth], #1 17
[+width + armLength + armWidth, -depth/2 , -length - armLength + armWidth], #2 18
[+width + armLength - armWidth, -depth/2 , -length - armLength - armWidth], #3 19
[+width - armWidth, +depth/2 , -length - armWidth], #4 20
[+width + armWidth, +depth/2 , -length + armWidth], #5 21
[+width + armWidth, -depth/2 , -length + armWidth], #6 22
[+width - armWidth, -depth/2 , -length - armWidth], #7 23
#creates arm 4
[-width - armLength - armWidth, +depth/2 , -length - armLength + armWidth], #0 24
[-width - armLength + armWidth, +depth/2 , -length - armLength - armWidth], #1 25
[-width - armLength + armWidth, -depth/2 , -length - armLength - armWidth], #2 26
[-width - armLength - armWidth, -depth/2 , -length - armLength + armWidth], #3 27
[-width - armWidth, +depth/2 , -length + armWidth], #4 28
[-width + armWidth, +depth/2 , -length - armWidth], #5 29
[-width + armWidth, -depth/2 , -length - armWidth], #6 30
[-width - armWidth, -depth/2 , -length + armWidth], #7 31
#creates the arrow body
[-arrowWidth, +arrowHeight, 0], #0 32
[+arrowWidth, +arrowHeight, 0], #1 33
[+arrowWidth, +depth, 0], #2 34
[-arrowWidth, +depth, 0], #3 35
[-arrowWidth, +arrowHeight, +arrowLength], #4 36
[+arrowWidth, +arrowHeight, +arrowLength], #5 37
[+arrowWidth, +depth, +arrowLength], #6 38
[-arrowWidth, +depth, +arrowLength], #7 39
#creates the arrow head
[-(1/6)*arrowWidth, +arrowHeight, -arrowLength], #0 40
[+(1/6)*arrowWidth, +arrowHeight, -arrowLength], #1 41
[+(1/6)*arrowWidth, +depth, -arrowLength], #2 42
[-(1/6)*arrowWidth, +depth, -arrowLength], #3 43
[-arrowWidth - 2*arrowWidth, +arrowHeight, 0], #4 44
[+arrowWidth + 2*arrowWidth, +arrowHeight, 0], #5 45
[+arrowWidth + 2*arrowWidth, +depth, 0], #6 46
[-arrowWidth - 2*arrowWidth, +depth, 0], #7 47
#creates the center box
[-width - armWidth, +depth, -length + armWidth], #0 48
[-width + armWidth, +depth, -length - armWidth], #1 49
[+width - armWidth, +depth, -length - armWidth], #2 50
[+width + armWidth, +depth, -length + armWidth], #3 51
[+width - armWidth, -depth, -length - armWidth], #4 52
[+width + armWidth, -depth, -length + armWidth], #5 53
[-width - armWidth, -depth, -length + armWidth], #6 54
[-width + armWidth, -depth, -length - armWidth], #7 55
[-width - armWidth, +depth, +length - armWidth], #8 56
[-width + armWidth, +depth, +length + armWidth], #9 57
[+width - armWidth, +depth, +length + armWidth], #10 58
[+width + armWidth, +depth, +length - armWidth], #11 59
[+width - armWidth, -depth, +length + armWidth], #12 60
[+width + armWidth, -depth, +length - armWidth], #13 61
[-width - armWidth, -depth, +length - armWidth], #14 62
[-width + armWidth, -depth, +length + armWidth], #15 63
#creates prop 1 on arm 1
#North East far narrow tip
[+width+armLength + propLength - propNarrowWidth, +tipTL, +length+armLength - propLength - propNarrowWidth], #0 64
[+width+armLength + propLength + propNarrowWidth, +tipTU, +length+armLength - propLength + propNarrowWidth], #1 65
[+width+armLength + propLength + propNarrowWidth, +tipBU, +length+armLength - propLength + propNarrowWidth], #2 66
[+width+armLength + propLength - propNarrowWidth, +tipBL, +length+armLength - propLength - propNarrowWidth], #3 67
#North East far wide
[+width+armLength + propNarrowLength - propWidth, +farTL, +length+armLength - propNarrowLength - propWidth], #4 68
[+width+armLength + propNarrowLength + propWidth, +farTU, +length+armLength - propNarrowLength + propWidth], #5 69
[+width+armLength + propNarrowLength + propWidth, +farBU, +length+armLength - propNarrowLength + propWidth], #6 70
[+width+armLength + propNarrowLength - propWidth, +farBL, +length+armLength - propNarrowLength - propWidth], #7 71
#North East close wide
[+width+armLength + propShortLength - propWidth, +closeTL, +length+armLength - propShortLength - propWidth], #4 72
[+width+armLength + propShortLength + propWidth, +closeTU, +length+armLength - propShortLength + propWidth], #5 73
[+width+armLength + propShortLength + propWidth, +farBU, +length+armLength - propShortLength + propWidth], #6 74
[+width+armLength + propShortLength - propWidth, +farBL, +length+armLength - propShortLength - propWidth], #7 75
#Middle narrow tip
[+width+armLength - propNarrowWidth, +endT, +length+armLength - propNarrowWidth], #4 76
[+width+armLength + propNarrowWidth, +endT, +length+armLength + propNarrowWidth], #5 77
[+width+armLength + propNarrowWidth, +endB, +length+armLength + propNarrowWidth], #6 78
[+width+armLength - propNarrowWidth, +endB, +length+armLength - propNarrowWidth], #7 79
#South West close wide
[+width+armLength - propShortLength - propWidth, +closeTU, +length+armLength + propShortLength - propWidth], #4 80
[+width+armLength - propShortLength + propWidth, +closeTL, +length+armLength + propShortLength + propWidth], #5 81
[+width+armLength - propShortLength + propWidth, +closeBL, +length+armLength + propShortLength + propWidth], #6 82
[+width+armLength - propShortLength - propWidth, +closeBU, +length+armLength + propShortLength - propWidth], #7 83
#South West far wide
[+width+armLength - propNarrowLength - propWidth, +farTU, +length+armLength + propNarrowLength - propWidth], #4 84
[+width+armLength - propNarrowLength + propWidth, +farTL, +length+armLength + propNarrowLength + propWidth], #5 85
[+width+armLength - propNarrowLength + propWidth, +farBL, +length+armLength + propNarrowLength + propWidth], #6 86
[+width+armLength - propNarrowLength - propWidth, +farBU, +length+armLength + propNarrowLength - propWidth], #7 87
#South West far narrow tip
[+width+armLength - propLength - propNarrowWidth, +tipTU, +length+armLength + propLength - propNarrowWidth], #0 88
[+width+armLength - propLength + propNarrowWidth, +tipTL, +length+armLength + propLength + propNarrowWidth], #1 89
[+width+armLength - propLength + propNarrowWidth, +tipBL, +length+armLength + propLength + propNarrowWidth], #2 90
[+width+armLength - propLength - propNarrowWidth, +tipBU, +length+armLength + propLength - propNarrowWidth], #3 91
#creates prop 4 on arm 4
#North East far narrow tip
[-width-armLength + propLength - propNarrowWidth, +tipTL, -length-armLength - propLength - propNarrowWidth], #0 92
[-width-armLength + propLength + propNarrowWidth, +tipTU, -length-armLength - propLength + propNarrowWidth], #1 93
[-width-armLength + propLength + propNarrowWidth, +tipBU, -length-armLength - propLength + propNarrowWidth], #2 94
[-width-armLength + propLength - propNarrowWidth, +tipBL, -length-armLength - propLength - propNarrowWidth], #3 95
#North East far wide
[-width-armLength + propNarrowLength - propWidth, +farTL, -length-armLength - propNarrowLength - propWidth], #4 96
[-width-armLength + propNarrowLength + propWidth, +farTU, -length-armLength - propNarrowLength + propWidth], #5 97
[-width-armLength + propNarrowLength + propWidth, +farBU, -length-armLength - propNarrowLength + propWidth], #6 98
[-width-armLength + propNarrowLength - propWidth, +farBL, -length-armLength - propNarrowLength - propWidth], #7 99
#North East close wide
[-width-armLength + propShortLength - propWidth, +closeTL, -length-armLength - propShortLength - propWidth], #4 100
[-width-armLength + propShortLength + propWidth, +closeTU, -length-armLength - propShortLength + propWidth], #5 101
[-width-armLength + propShortLength + propWidth, +closeBU, -length-armLength - propShortLength + propWidth], #6 102
[-width-armLength + propShortLength - propWidth, +closeBL, -length-armLength - propShortLength - propWidth], #7 103
#Middle narrow tip
[-width-armLength - propNarrowWidth, +endT, -length-armLength - propNarrowWidth], #4 104
[-width-armLength + propNarrowWidth, +endT, -length-armLength + propNarrowWidth], #5 105
[-width-armLength + propNarrowWidth, +endB, -length-armLength + propNarrowWidth], #6 106
[-width-armLength - propNarrowWidth, +endB, -length-armLength - propNarrowWidth], #7 107
#South West close wide
[-width-armLength - propShortLength - propWidth, +closeTU, -length-armLength + propShortLength - propWidth], #4 108
[-width-armLength - propShortLength + propWidth, +closeTL, -length-armLength + propShortLength + propWidth], #5 109
[-width-armLength - propShortLength + propWidth, +closeBL, -length-armLength + propShortLength + propWidth], #6 110
[-width-armLength - propShortLength - propWidth, +closeBU, -length-armLength + propShortLength - propWidth], #7 111
#South West far wide
[-width-armLength - propNarrowLength - propWidth, +farTU, -length-armLength + propNarrowLength - propWidth], #4 112
[-width-armLength - propNarrowLength + propWidth, +farTL, -length-armLength + propNarrowLength + propWidth], #5 113
[-width-armLength - propNarrowLength + propWidth, +farBL, -length-armLength + propNarrowLength + propWidth], #6 114
[-width-armLength - propNarrowLength - propWidth, +farBU, -length-armLength + propNarrowLength - propWidth], #7 115
#South West far narrow tip
[-width-armLength - propLength - propNarrowWidth, +tipTU, -length-armLength + propLength - propNarrowWidth], #0 116
[-width-armLength - propLength + propNarrowWidth, +tipTL, -length-armLength + propLength + propNarrowWidth], #1 117
[-width-armLength - propLength + propNarrowWidth, +tipBL, -length-armLength + propLength + propNarrowWidth], #2 118
[-width-armLength - propLength - propNarrowWidth, +tipBU, -length-armLength + propLength - propNarrowWidth], #3 119
#creates prop 3 on arm 3
#North West far narrow tip
[+width+armLength - propLength - propNarrowWidth, +tipTU, -length-armLength - propLength + propNarrowWidth], #0 120
[+width+armLength - propLength + propNarrowWidth, +tipTL, -length-armLength - propLength - propNarrowWidth], #1 121
[+width+armLength - propLength + propNarrowWidth, +tipBL, -length-armLength - propLength - propNarrowWidth], #2 122
[+width+armLength - propLength - propNarrowWidth, +tipBU, -length-armLength - propLength + propNarrowWidth], #3 123
#North West far wide
[+width+armLength - propNarrowLength - propWidth, +farTU, -length-armLength - propNarrowLength + propWidth], #4 124
[+width+armLength - propNarrowLength + propWidth, +farTL, -length-armLength - propNarrowLength - propWidth], #5 125
[+width+armLength - propNarrowLength + propWidth, +farBL, -length-armLength - propNarrowLength - propWidth], #6 126
[+width+armLength - propNarrowLength - propWidth, +farBU, -length-armLength - propNarrowLength + propWidth], #7 127
#North West close wide
[+width+armLength - propShortLength - propWidth, +closeTU, -length-armLength - propShortLength + propWidth], #4 128
[+width+armLength - propShortLength + propWidth, +closeTL, -length-armLength - propShortLength - propWidth], #5 129
[+width+armLength - propShortLength + propWidth, +closeBL, -length-armLength - propShortLength - propWidth], #6 130
[+width+armLength - propShortLength - propWidth, +closeBU, -length-armLength - propShortLength + propWidth], #7 131
#Middle narrow tip
[+width+armLength - propNarrowWidth, +endT, -length-armLength + propNarrowWidth], #4 132
[+width+armLength + propNarrowWidth, +endT, -length-armLength - propNarrowWidth], #5 133
[+width+armLength + propNarrowWidth, +endB, -length-armLength - propNarrowWidth], #6 134
[+width+armLength - propNarrowWidth, +endB, -length-armLength + propNarrowWidth], #7 135
#South East close wide
[+width+armLength + propShortLength - propWidth, +closeTL, -length-armLength + propShortLength + propWidth], #4 136
[+width+armLength + propShortLength + propWidth, +closeTU, -length-armLength + propShortLength - propWidth], #5 137
[+width+armLength + propShortLength + propWidth, +closeBU, -length-armLength + propShortLength - propWidth], #6 138
[+width+armLength + propShortLength - propWidth, +closeBL, -length-armLength + propShortLength + propWidth], #7 139
#South East far wide
[+width+armLength + propNarrowLength - propWidth, +farTL, -length-armLength + propNarrowLength + propWidth], #4 140
[+width+armLength + propNarrowLength + propWidth, +farTU, -length-armLength + propNarrowLength - propWidth], #5 141
[+width+armLength + propNarrowLength + propWidth, +farBU, -length-armLength + propNarrowLength - propWidth], #6 142
[+width+armLength + propNarrowLength - propWidth, +farBL, -length-armLength + propNarrowLength + propWidth], #7 143
#South East far narrow tip
[+width+armLength + propLength - propNarrowWidth, +tipTL, -length-armLength + propLength + propNarrowWidth], #0 144
[+width+armLength + propLength + propNarrowWidth, +tipTU, -length-armLength + propLength - propNarrowWidth], #1 145
[+width+armLength + propLength + propNarrowWidth, +tipBU, -length-armLength + propLength - propNarrowWidth], #2 146
[+width+armLength + propLength - propNarrowWidth, +tipBL, -length-armLength + propLength + propNarrowWidth], #3 147
#creates prop 2 on arm 2
#North West far narrow tip
[-width-armLength - propLength - propNarrowWidth, +tipTU, +length+armLength - propLength + propNarrowWidth], #0 148
[-width-armLength - propLength + propNarrowWidth, +tipTL, +length+armLength - propLength - propNarrowWidth], #1 149
[-width-armLength - propLength + propNarrowWidth, +tipBL, +length+armLength - propLength - propNarrowWidth], #2 150
[-width-armLength - propLength - propNarrowWidth, +tipBU, +length+armLength - propLength + propNarrowWidth], #3 151
#North West far wide
[-width-armLength - propNarrowLength - propWidth, +farTU, +length+armLength - propNarrowLength + propWidth], #4 152
[-width-armLength - propNarrowLength + propWidth, +farTL, +length+armLength - propNarrowLength - propWidth], #5 153
[-width-armLength - propNarrowLength + propWidth, +farBL, +length+armLength - propNarrowLength - propWidth], #6 154
[-width-armLength - propNarrowLength - propWidth, +farBU, +length+armLength - propNarrowLength + propWidth], #7 155
#North West close wide
[-width-armLength - propShortLength - propWidth, +closeTU, +length+armLength - propShortLength + propWidth], #4 156
[-width-armLength - propShortLength + propWidth, +closeTL, +length+armLength - propShortLength - propWidth], #5 157
[-width-armLength - propShortLength + propWidth, +closeBL, +length+armLength - propShortLength - propWidth], #6 158
[-width-armLength - propShortLength - propWidth, +closeBU, +length+armLength - propShortLength + propWidth], #7 159
#Middle narrow tip
[-width-armLength - propNarrowWidth, +endT, +length+armLength + propNarrowWidth], #4 160
[-width-armLength + propNarrowWidth, +endT, +length+armLength - propNarrowWidth], #5 161
[-width-armLength + propNarrowWidth, +endB, +length+armLength - propNarrowWidth], #6 162
[-width-armLength - propNarrowWidth, +endB, +length+armLength + propNarrowWidth], #7 163
#South East close wide
[-width-armLength + propShortLength - propWidth, +closeTL, +length+armLength + propShortLength + propWidth], #4 164
[-width-armLength + propShortLength + propWidth, +closeTU, +length+armLength + propShortLength - propWidth], #5 165
[-width-armLength + propShortLength + propWidth, +closeBU, +length+armLength + propShortLength - propWidth], #6 166
[-width-armLength + propShortLength - propWidth, +closeBL, +length+armLength + propShortLength + propWidth], #7 167
#South East far wide
[-width-armLength + propNarrowLength - propWidth, +farTL, +length+armLength + propNarrowLength + propWidth], #4 168
[-width-armLength + propNarrowLength + propWidth, +farTU, +length+armLength + propNarrowLength - propWidth], #5 169
[-width-armLength + propNarrowLength + propWidth, +farBU, +length+armLength + propNarrowLength - propWidth], #6 170
[-width-armLength + propNarrowLength - propWidth, +farBL, +length+armLength + propNarrowLength + propWidth], #7 171
#South East far narrow tip
[-width-armLength + propLength - propNarrowWidth, +tipTL, +length+armLength + propLength + propNarrowWidth], #0 172
[-width-armLength + propLength + propNarrowWidth, +tipTU, +length+armLength + propLength - propNarrowWidth], #1 173
[-width-armLength + propLength + propNarrowWidth, +tipBU, +length+armLength + propLength - propNarrowWidth], #2 174
[-width-armLength + propLength - propNarrowWidth, +tipBL, +length+armLength + propLength + propNarrowWidth] #3 175
])
# Each face contains indices into points array above
faces = [(50,49,48,51),(59,51,48,56),(58,59,56,57), #top of the Box
(40,41,42,43),(41,45,46,42),(45,44,47,46),(44,40,43,47),(40,44,45,41),(43,42,46,47), #arrow Head
(32,33,34,35),(33,37,38,34),(37,36,39,38),(36,32,35,39),(32,36,37,33),(35,34,38,39), #arrow Body
(54,55,52,53),(54,53,61,62),(62,61,60,63),(48,49,55,54),(49,50,52,55),(50,51,53,52),(51,59,61,53),(59,58,60,61),(58,57,63,60),(57,56,62,63),(56,48,54,62), #rest of the box
(1,5,6,2),(5,4,7,6),(4,0,3,7),(0,4,5,1),(3,2,6,7), #arm 1
(9,13,14,10),(13,12,15,14),(12,8,11,15),(8,12,13,9),(11,10,14,15), #arm 2
(16,17,18,19),(17,21,22,18),(20,16,19,23),(16,20,21,17),(19,18,22,23), #arm 3
(24,25,26,27),(25,29,30,26),(28,24,27,31),(24,28,29,25),(27,26,30,31), #arm 4
(92,93,94,95),(93,97,98,94),(97,96,99,98),(96,92,95,99),(92,96,97,93),(95,94,98,99),(97,101,102,98),(101,100,103,102),(100,96,99,103),(96,100,101,97),(99,98,102,103),(101,105,106,102),(104,100,103,107),(100,104,105,101),(103,102,106,107),(105,109,110,106),(108,104,107,111),(104,108,109,105),(107,106,110,111),(109,113,114,110),(112,108,111,115),(108,112,113,109),(111,110,114,115),(113,117,118,114),(117,116,119,118),(116,112,115,119),(112,116,117,113),(115,114,118,119), #prop 4
(120,121,122,123),(121,125,126,122),(124,120,123,127),(120,124,125,121),(123,122,126,127),(125,129,130,126),(128,124,127,131),(124,128,129,125),(127,126,130,131),(129,133,134,130),(132,128,131,135),(128,132,133,129),(131,130,134,135),(133,137,138,134),(136,132,135,139),(132,136,137,133),(135,134,138,139),(137,141,142,138),(140,136,139,143),(136,140,141,137),(139,138,142,143),(141,145,146,142),(145,144,147,146),(144,140,143,147),(140,144,145,141),(143,142,146,147), #prop 3
(148,149,150,151),(149,153,154,150),(152,148,151,155),(148,152,153,149),(151,150,154,155),(153,157,158,154),(156,152,155,159),(152,156,157,153),(155,154,158,159),(157,161,162,158),(160,156,159,163),(156,160,161,157),(159,158,162,163),(161,165,166,162),(164,160,163,167),(160,164,165,161),(163,162,166,167),(165,169,170,166),(168,164,167,171),(164,168,169,165),(167,166,170,171),(169,173,174,170),(173,172,175,174),(172,168,171,175),(168,172,173,169),(171,170,174,175), #prop 2
(64,65,66,67),(65,69,70,66),(68,64,67,71),(64,68,69,65),(67,66,70,71),(69,73,74,70),(72,68,71,75),(68,72,73,69),(71,70,74,75),(73,77,78,74),(76,72,75,79),(72,76,77,73),(75,74,78,79),(77,81,82,78),(80,76,79,83),(76,80,81,77),(79,78,82,83),(81,85,86,82),(84,80,83,87),(80,84,85,81),(83,82,86,87),(85,89,90,86),(89,88,91,90),(88,84,87,91),(84,88,89,85),(87,86,90,91), #prop 1
]
lightestGrey = '#d1c0c0'
lightGrey = '#72716d'
grey = '#665f59'
darkGrey ='#4c4641'
darkRed = '#993838'
red = 'red'
green = '#31e224'
darkGreen = '#2b7f24'
colors = [
lightGrey,lightGrey,lightGrey, #box Top
'red','red','red','red','red','red', #arrow head
'red','red','red','red','red','red' , #arrow body
lightGrey,lightGrey,lightGrey, #box bottom
grey,grey,grey, #box North
grey, #box East
grey,grey,grey, #box South
grey, #box West
lightGrey, grey, lightGrey, darkGrey, darkGrey, #arm 1
lightGrey, grey, lightGrey, darkGrey, darkGrey, #arm 2
grey, lightGrey, lightGrey, darkGrey, darkGrey, #arm 3
grey, lightGrey, lightGrey, darkGrey, darkGrey, #arm 4
#prop 4
darkGreen,darkGreen,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,darkGreen,
green,green,darkGreen,darkGreen,
green,green,darkGreen,darkGreen,
green,green,darkGreen,
darkGreen,green,green,darkGreen,
darkGreen,darkGreen,green,green,
#prop 3
darkGreen,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,green,
green,darkGreen,darkGreen,darkGreen,green,green,
#prop 2
darkRed,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,darkRed,red,red,
#prop 1
darkRed,darkRed,darkRed,red,red,
darkRed,darkRed,red,red,
darkRed,darkRed,red,red,
darkRed,darkRed,red,
red,darkRed,darkRed,red,
red,darkRed,darkRed,darkRed,red,red,
]
return points, faces, colors
# Testing ==============================================================================================================
class MSPDriver(object):
def __init__(self, root, canvas):
self.root = root
self.canvas = canvas
self.fmuport = serial.Serial(FMUPORT, 115200)
# MSPPG
self.parser = msppg.MSP_Parser()
self.parser.set_ATTITUDE_Handler(self._attitude_message_handler)
self.request = self.parser.serialize_ATTITUDE_Request()
self.yaw, self.pitch, self.roll = 0, 0, 0
thread = threading.Thread(target = self._read_fmu)
thread.daemon = True
thread.start()
self._send_request()
def _send_request(self):
self.fmuport.write(self.request)
def _read_fmu(self):
while True:
self.parser.parse(self.fmuport.read(1))
def _attitude_message_handler(self, x, y, z):
self.pitch = -y/10.
self.roll = x/10.
self.yaw = z
self._send_request()
def getYawPitchRoll(self):
return self.yaw, self.pitch, self.roll
if __name__ == "__main__":
width = 800
height = 800
root = Tk()
root.geometry('%dx%d+%d+%d' % (width, height, 200, 200))
canvas = Canvas(root, width=width, height=height, background='black')
driver = MSPDriver(root, canvas)
canvas.pack()
Display(driver, simulation=True).start()
mainloop()
|
[
"[email protected]"
] | |
1c23d8ec01374a07d729a7c0bff7960fefe9bc10
|
9e2ab7f4903d9124b4aa826616c9935e2c8c4b8a
|
/python/ccxt/async_support/hollaex.py
|
1c9f2a55e650b7c85fd519cf847b31614f933064
|
[
"MIT"
] |
permissive
|
jknight/ccxt
|
fc7a0f44014bbe63dd9289c8cc053e82574fc4c2
|
02cdef0247435a6c6557faad8a1793d3da67c085
|
refs/heads/master
| 2022-11-23T03:38:08.400200 | 2020-06-10T19:02:18 | 2020-06-10T19:02:18 | 270,738,340 | 1 | 0 |
MIT
| 2020-06-08T16:19:13 | 2020-06-08T16:19:12 | null |
UTF-8
|
Python
| false | false | 43,498 |
py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.decimal_to_precision import TICK_SIZE
class hollaex(Exchange):
def describe(self):
return self.deep_extend(super(hollaex, self).describe(), {
'id': 'hollaex',
'name': 'HollaEx',
'countries': ['KR'],
'rateLimit': 333,
'version': 'v1',
'has': {
'CORS': False,
'fetchMarkets': True,
'fetchCurrencies': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchOrderBook': True,
'fetchOrderBooks': True,
'fetchTrades': True,
'fetchOHLCV': True,
'fetchBalance': True,
'createOrder': True,
'createLimitBuyOrder': True,
'createLimitSellOrder': True,
'createMarketBuyOrder': True,
'createMarketSellOrder': True,
'cancelOrder': True,
'cancelAllOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': False,
'fetchOpenOrder': True,
'fetchOrder': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
'fetchOrders': False,
'fetchMyTrades': True,
'withdraw': True,
'fetchDepositAddress': True,
},
'timeframes': {
'1h': '1h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/75841031-ca375180-5ddd-11ea-8417-b975674c23cb.jpg',
'api': 'https://api.hollaex.com',
'www': 'https://hollaex.com',
'doc': 'https://apidocs.hollaex.com',
'referral': 'https://pro.hollaex.com/signup?affiliation_code=QSWA6G',
},
'precisionMode': TICK_SIZE,
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'health',
'constant',
'ticker',
'ticker/all',
'orderbooks',
'trades',
'chart',
# TradingView data
'udf/config',
'udf/history',
'udf/symbols',
],
},
'private': {
'get': [
'user',
'user/balance',
'user/trades',
'user/orders',
'user/orders/{order_id}',
'user/deposits',
'user/withdrawals',
'user/withdraw/{currency}/fee',
],
'post': [
'user/request-withdrawal',
'order',
],
'delete': [
'user/orders',
'user/orders/{order_id}',
],
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
},
},
'exceptions': {
'broad': {
'Invalid token': AuthenticationError,
'Order not found': OrderNotFound,
'Insufficient balance': InsufficientFunds,
},
'exact': {
'400': BadRequest,
'403': AuthenticationError,
'404': BadRequest,
'405': BadRequest,
'410': BadRequest,
'429': BadRequest,
'500': NetworkError,
'503': NetworkError,
},
},
'options': {
# how many seconds before the authenticated request expires
'api-expires': int(self.timeout / 1000),
},
})
async def fetch_markets(self, params={}):
response = await self.publicGetConstant(params)
#
# {
# coins: {
# xmr: {
# id: 7,
# fullname: "Monero",
# symbol: "xmr",
# active: True,
# allow_deposit: True,
# allow_withdrawal: True,
# withdrawal_fee: 0.02,
# min: 0.001,
# max: 100000,
# increment_unit: 0.001,
# deposit_limits: {'1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0},
# withdrawal_limits: {'1': 10, '2': 15, '3': 100, '4': 100, '5': 200, '6': 300, '7': 350, '8': 400, '9': 500, '10': -1},
# created_at: "2019-12-09T07:14:02.720Z",
# updated_at: "2020-01-16T12:12:53.162Z"
# },
# # ...
# },
# pairs: {
# 'btc-usdt': {
# id: 2,
# name: "btc-usdt",
# pair_base: "btc",
# pair_2: "usdt",
# taker_fees: {'1': 0.3, '2': 0.25, '3': 0.2, '4': 0.18, '5': 0.1, '6': 0.09, '7': 0.08, '8': 0.06, '9': 0.04, '10': 0},
# maker_fees: {'1': 0.1, '2': 0.08, '3': 0.05, '4': 0.03, '5': 0, '6': 0, '7': 0, '8': 0, '9': 0, '10': 0},
# min_size: 0.0001,
# max_size: 1000,
# min_price: 100,
# max_price: 100000,
# increment_size: 0.0001,
# increment_price: 0.05,
# active: True,
# created_at: "2019-12-09T07:15:54.537Z",
# updated_at: "2019-12-09T07:15:54.537Z"
# },
# },
# config: {tiers: 10},
# status: True
# }
#
pairs = self.safe_value(response, 'pairs', {})
keys = list(pairs.keys())
result = []
for i in range(0, len(keys)):
key = keys[i]
market = pairs[key]
id = self.safe_string(market, 'name')
baseId = self.safe_string(market, 'pair_base')
quoteId = self.safe_string(market, 'pair_2')
base = self.common_currency_code(baseId.upper())
quote = self.common_currency_code(quoteId.upper())
symbol = base + '/' + quote
active = self.safe_value(market, 'active')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': {
'price': self.safe_float(market, 'increment_price'),
'amount': self.safe_float(market, 'increment_size'),
},
'limits': {
'amount': {
'min': self.safe_float(market, 'min_size'),
'max': self.safe_float(market, 'max_size'),
},
'price': {
'min': self.safe_float(market, 'min_price'),
'max': self.safe_float(market, 'max_price'),
},
'cost': {'min': None, 'max': None},
},
'info': market,
})
return result
async def fetch_currencies(self, params={}):
response = await self.publicGetConstant(params)
coins = self.safe_value(response, 'coins', {})
keys = list(coins.keys())
result = {}
for i in range(0, len(keys)):
key = keys[i]
currency = coins[key]
id = self.safe_string(currency, 'symbol')
numericId = self.safe_integer(currency, 'id')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'fullname')
active = self.safe_value(currency, 'active')
fee = self.safe_float(currency, 'withdrawal_fee')
precision = self.safe_float(currency, 'increment_unit')
withdrawalLimits = self.safe_value(currency, 'withdrawal_limits', [])
result[code] = {
'id': id,
'numericId': numericId,
'code': code,
'info': currency,
'name': name,
'active': active,
'fee': fee,
'precision': precision,
'limits': {
'amount': {
'min': self.safe_float(currency, 'min'),
'max': self.safe_float(currency, 'max'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': self.safe_value(withdrawalLimits, 0),
},
},
}
return result
async def fetch_order_books(self, symbols=None, limit=None, params={}):
await self.load_markets()
response = await self.publicGetOrderbooks(params)
result = {}
marketIds = list(response.keys())
for i in range(0, len(marketIds)):
marketId = marketIds[i]
orderbook = response[marketId]
symbol = marketId
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
timestamp = self.parse8601(self.safe_string(orderbook, 'timestamp'))
result[symbol] = self.parse_order_book(response[marketId], timestamp)
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
marketId = self.market_id(symbol)
request = {
'symbol': marketId,
}
response = await self.publicGetOrderbooks(self.extend(request, params))
#
# {
# "btc-usdt": {
# "bids": [
# [8836.4, 1.022],
# [8800, 0.0668],
# [8797.75, 0.2398],
# ],
# "asks": [
# [8839.35, 1.5334],
# [8852.6, 0.0579],
# [8860.45, 0.1815],
# ],
# "timestamp": "2020-03-03T02:27:25.147Z"
# },
# "eth-usdt": {},
# # ...
# }
#
orderbook = self.safe_value(response, marketId)
timestamp = self.parse8601(self.safe_string(orderbook, 'timestamp'))
return self.parse_order_book(orderbook, timestamp)
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTicker(self.extend(request, params))
#
# {
# open: 8615.55,
# close: 8841.05,
# high: 8921.1,
# low: 8607,
# last: 8841.05,
# volume: 20.2802,
# timestamp: '2020-03-03T03:11:18.964Z'
# }
#
return self.parse_ticker(response, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.publicGetTickerAll(self.extend(params))
#
# {
# "bch-usdt": {
# "time": "2020-03-02T04:29:45.011Z",
# "open": 341.65,
# "close":337.9,
# "high":341.65,
# "low":337.3,
# "last":337.9,
# "volume":0.054,
# "symbol":"bch-usdt"
# },
# # ...
# }
#
return self.parse_tickers(response, symbols)
def parse_tickers(self, response, symbols=None):
result = {}
keys = list(response.keys())
for i in range(0, len(keys)):
key = keys[i]
ticker = response[key]
symbol = key
market = None
marketId = self.safe_string(ticker, 'symbol', key)
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
result[symbol] = self.parse_ticker(ticker, market)
return self.filter_by_array(result, 'symbol', symbols)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# open: 8615.55,
# close: 8841.05,
# high: 8921.1,
# low: 8607,
# last: 8841.05,
# volume: 20.2802,
# timestamp: '2020-03-03T03:11:18.964Z',
# }
#
# fetchTickers
#
# {
# "time": "2020-03-02T04:29:45.011Z",
# "open": 341.65,
# "close": 337.9,
# "high": 341.65,
# "low": 337.3,
# "last": 337.9,
# "volume": 0.054,
# "symbol": "bch-usdt"
# }
#
symbol = None
marketId = self.safe_string(ticker, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string_2(ticker, 'time', 'timestamp'))
close = self.safe_float(ticker, 'close')
result = {
'symbol': symbol,
'info': ticker,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': self.safe_float(ticker, 'open'),
'close': close,
'last': self.safe_float(ticker, 'last', close),
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': None,
}
return result
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetTrades(self.extend(request, params))
#
# {
# "btc-usdt": [
# {
# "size": 0.5,
# "price": 8830,
# "side": "buy",
# "timestamp": "2020-03-03T04:44:33.034Z"
# },
# # ...
# ]
# }
#
trades = await self.safe_value(response, market['id'], [])
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "size": 0.5,
# "price": 8830,
# "side": "buy",
# "timestamp": "2020-03-03T04:44:33.034Z"
# }
#
# fetchMyTrades(private)
#
# {
# "side": "buy",
# "symbol": "eth-usdt",
# "size": 0.086,
# "price": 226.19,
# "timestamp": "2020-03-03T08:03:55.459Z",
# "fee": 0.1
# }
#
symbol = None
marketId = self.safe_string(trade, 'symbol')
quote = None
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
datetime = self.safe_string(trade, 'timestamp')
timestamp = self.parse8601(datetime)
side = self.safe_string(trade, 'side')
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'size')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
feeCost = self.safe_float(trade, 'fee')
fee = None
if feeCost is not None:
feeCurrencyCode = market['quote'] if (market is not None) else quote
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
result = {
'info': trade,
'id': None,
'timestamp': timestamp,
'datetime': datetime,
'symbol': symbol,
'order': None,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
return result
async def fetch_ohlcv(self, symbol, timeframe='1h', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'resolution': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + " fetchOHLCV requires a 'since' or a 'limit' argument")
else:
end = self.seconds()
start = end - duration * limit
request['to'] = end
request['from'] = start
else:
if limit is None:
request['from'] = int(since / 1000)
request['to'] = self.seconds()
else:
start = int(since / 1000)
request['from'] = start
request['to'] = self.sum(start, duration * limit)
response = await self.publicGetChart(self.extend(request, params))
#
# [
# {
# "time":"2020-03-02T20:00:00.000Z",
# "close":8872.1,
# "high":8872.1,
# "low":8858.6,
# "open":8858.6,
# "symbol":"btc-usdt",
# "volume":1.2922
# },
# ]
#
return self.parse_ohlcvs(response, market)
def parse_ohlcv(self, response, market=None, timeframe='1h', since=None, limit=None):
#
# {
# "time":"2020-03-02T20:00:00.000Z",
# "close":8872.1,
# "high":8872.1,
# "low":8858.6,
# "open":8858.6,
# "symbol":"btc-usdt",
# "volume":1.2922
# }
#
return [
self.parse8601(self.safe_string(response, 'time')),
self.safe_float(response, 'open'),
self.safe_float(response, 'high'),
self.safe_float(response, 'low'),
self.safe_float(response, 'close'),
self.safe_float(response, 'volume'),
]
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetUserBalance(params)
#
# {
# "updated_at": "2020-03-02T22:27:38.428Z",
# "btc_balance": 0,
# "btc_pending": 0,
# "btc_available": 0,
# "eth_balance": 0,
# "eth_pending": 0,
# "eth_available": 0,
# # ...
# }
#
result = {'info': response}
currencyIds = list(self.currencies_by_id.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(response, currencyId + '_available')
account['total'] = self.safe_float(response, currencyId + '_balance')
result[code] = account
return self.parse_balance(result)
async def fetch_open_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': id,
}
response = await self.privateGetUserOrdersOrderId(self.extend(request, params))
#
# {
# "created_at": "2018-03-23T04:14:08.663Z",
# "title": "string",
# "side": "sell",
# "type": "limit",
# "price": 0,
# "size": 0,
# "symbol": "xht-usdt",
# "id": "string",
# "created_by": 1,
# "filled": 0
# }
#
return self.parse_order(response)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privateGetUserOrders(self.extend(request, params))
#
# [
# {
# "created_at":"2020-03-03T08:02:18.639Z",
# "title":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "side":"buy",
# "type":"limit",
# "price":226.19,
# "size":0.086,
# "symbol":"eth-usdt",
# "id":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "created_by":620,
# "filled":0
# }
# ]
#
return self.parse_orders(response, market)
def parse_order(self, order, market=None):
#
# fetchOpenOrder, fetchOpenOrders
#
# {
# "created_at":"2020-03-03T08:02:18.639Z",
# "title":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "side":"buy",
# "type":"limit",
# "price":226.19,
# "size":0.086,
# "symbol":"eth-usdt",
# "id":"5419ff3f-9d25-4af7-bcc2-803926518d76",
# "created_by":620,
# "filled":0
# }
#
symbol = None
marketId = self.safe_string(order, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
if (symbol is None) and (market is not None):
symbol = market['symbol']
id = self.safe_string(order, 'id')
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'size')
filled = self.safe_float(order, 'filled')
cost = None
remaining = None
if filled is not None:
if amount is not None:
remaining = amount - filled
if price is not None:
cost = filled * price
status = 'closed' if (type == 'market') else 'open'
result = {
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': remaining,
'cost': cost,
'trades': None,
'fee': None,
'info': order,
'average': None,
}
return result
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'side': side,
'size': amount,
'type': type,
}
if type != 'market':
order['price'] = price
response = await self.privatePostOrder(self.extend(order, params))
#
# {
# "symbol": "xht-usdt",
# "side": "sell",
# "size": 1,
# "type": "limit",
# "price": 0.1,
# "id": "string",
# "created_by": 34,
# "filled": 0,
# "status": "pending"
# }
#
return self.parse_order(response, market)
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'order_id': id,
}
response = await self.privateDeleteUserOrdersOrderId(self.extend(request, params))
#
# {
# "title": "string",
# "symbol": "xht-usdt",
# "side": "sell",
# "size": 1,
# "type": "limit",
# "price": 0.1,
# "id": "string",
# "created_by": 34,
# "filled": 0
# }
#
return self.parse_order(response)
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.markets(symbol)
request['symbol'] = market['id']
response = await self.privateDeleteUserOrders(self.extend(request, params))
#
# [
# {
# "title": "string",
# "symbol": "xht-usdt",
# "side": "sell",
# "size": 1,
# "type": "limit",
# "price": 0.1,
# "id": "string",
# "created_by": 34,
# "filled": 0
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'symbol': market['id'],
# 'limit': 50, # default 50, max 100
# 'page': 1, # page of data to retrieve
# 'order_by': 'timestamp', # field to order data
# 'order': 'asc', # asc or desc
# 'start_date': 123, # starting date of queried data
# 'end_date': 321, # ending date of queried data
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit # default 50, max 100
if since is not None:
request['start_date'] = self.iso8601(since)
response = await self.privateGetUserTrades(self.extend(request, params))
#
# {
# "count": 1,
# "data": [
# {
# "side": "buy",
# "symbol": "eth-usdt",
# "size": 0.086,
# "price": 226.19,
# "timestamp": "2020-03-03T08:03:55.459Z",
# "fee": 0.1
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
response = await self.privateGetUser(params)
#
# {
# "id": 620,
# "email": "[email protected]",
# "full_name": "",
# "name_verified": False,
# "gender": False,
# "nationality": "",
# "phone_number": "",
# "address": {"city": "", "address": "", "country": "", "postal_code": ""},
# "id_data": {"note": "", "type": "", "number": "", "status": 0},
# "bank_account":[],
# "crypto_wallet":{
# "xrp": "rJtoECs6rPkJoAfgtR8SDDshV6hRHe3X7y:391496555"
# "usdt":"0x1fb4248e167901dfa0d8cdda2243a2126d7ce48d"
# # ...
# },
# "verification_level": 1,
# "otp_enabled": True,
# "activated": True,
# "note": "",
# "username": "user",
# "affiliation_code": "QSWA6G",
# "settings": {
# "chat": {"set_username": False},
# "risk": {"order_portfolio_percentage": 20},
# "audio": {
# "public_trade": False,
# "order_completed": True,
# "order_partially_completed": True
# },
# "language": "en",
# "interface": {"theme": "white","order_book_levels": 10},
# "notification": {
# "popup_order_completed": True,
# "popup_order_confirmation": True,
# "popup_order_partially_filled": True
# }
# },
# "flagged": False,
# "is_hap": False,
# "pin": False,
# "discount": 0,
# "created_at": "2020-03-02T22:27:38.331Z",
# "updated_at": "2020-03-03T07:54:58.315Z",
# "balance": {
# "xht_balance": 0,
# "xht_pending": 0,
# "xht_available": 0,
# # ...
# "updated_at": "2020-03-03T10:21:05.430Z"
# },
# "images": [],
# "fees": {
# "btc-usdt": {"maker_fee": 0.1, "taker_fee": 0.3},
# "eth-usdt": {"maker_fee": 0.1, "taker_fee": 0.3},
# # ...
# }
# }
#
cryptoWallet = self.safe_value(response, 'crypto_wallet')
address = self.safe_string(cryptoWallet, currency['id'])
tag = None
if address is not None:
parts = address.split(':')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': response,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'currency': currency['id'],
# 'limit': 50, # default 50, max 100
# 'page': 1, # page of data to retrieve
# 'order_by': 'timestamp', # field to order data
# 'order': 'asc', # asc or desc
# 'start_date': 123, # starting date of queried data
# 'end_date': 321, # ending date of queried data
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50, max 100
if since is not None:
request['start_date'] = self.iso8601(since)
response = await self.privateGetUserDeposits(self.extend(request, params))
#
# {
# "count": 1,
# "data": [
# {
# "id": 539,
# "amount": 20,
# "fee": 0,
# "address": "0x5c0cc98270d7089408fcbcc8e2131287f5be2306",
# "transaction_id": "0xd4006327a5ec2c41adbdcf566eaaba6597c3d45906abe78ea1a4a022647c2e28",
# "status": True,
# "dismissed": False,
# "rejected": False,
# "description": "",
# "type": "deposit",
# "currency": "usdt",
# "created_at": "2020-03-03T07:56:36.198Z",
# "updated_at": "2020-03-03T08:00:05.674Z",
# "user_id": 620
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'currency': currency['id'],
# 'limit': 50, # default 50, max 100
# 'page': 1, # page of data to retrieve
# 'order_by': 'timestamp', # field to order data
# 'order': 'asc', # asc or desc
# 'start_date': 123, # starting date of queried data
# 'end_date': 321, # ending date of queried data
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if limit is not None:
request['limit'] = limit # default 50, max 100
if since is not None:
request['start_date'] = self.iso8601(since)
response = await self.privateGetUserWithdrawals(self.extend(request, params))
#
# {
# "count": 1,
# "data": [
# {
# "id": 539,
# "amount": 20,
# "fee": 0,
# "address": "0x5c0cc98270d7089408fcbcc8e2131287f5be2306",
# "transaction_id": "0xd4006327a5ec2c41adbdcf566eaaba6597c3d45906abe78ea1a4a022647c2e28",
# "status": True,
# "dismissed": False,
# "rejected": False,
# "description": "",
# "type": "withdrawal",
# "currency": "usdt",
# "created_at": "2020-03-03T07:56:36.198Z",
# "updated_at": "2020-03-03T08:00:05.674Z",
# "user_id": 620
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_transactions(data, currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id": 539,
# "amount": 20,
# "fee": 0,
# "address": "0x5c0cc98270d7089408fcbcc8e2131287f5be2306",
# "transaction_id": "0xd4006327a5ec2c41adbdcf566eaaba6597c3d45906abe78ea1a4a022647c2e28",
# "status": True,
# "dismissed": False,
# "rejected": False,
# "description": "",
# "type": "withdrawal",
# "currency": "usdt",
# "created_at": "2020-03-03T07:56:36.198Z",
# "updated_at": "2020-03-03T08:00:05.674Z",
# "user_id": 620
# }
#
id = self.safe_string(transaction, 'id')
txid = self.safe_string(transaction, 'transaction_id')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
type = self.safe_string(transaction, 'type')
amount = self.safe_float(transaction, 'amount')
address = self.safe_string(transaction, 'address')
addressTo = None
addressFrom = None
tag = None
tagTo = None
tagFrom = None
if address is not None:
parts = address.split(':')
address = self.safe_string(parts, 0)
tag = self.safe_string(parts, 1)
addressTo = address
tagTo = tag
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
status = self.safe_value(transaction, 'status')
dismissed = self.safe_value(transaction, 'dismissed')
rejected = self.safe_value(transaction, 'rejected')
if status:
status = 'ok'
elif dismissed:
status = 'canceled'
elif rejected:
status = 'failed'
else:
status = 'pending'
fee = {
'currency': code,
'cost': self.safe_float(transaction, 'fee'),
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': tagFrom,
'tag': tag,
'tagTo': tagTo,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag is not None:
address += ':' + tag
request = {
'currency': currency['id'],
'amount': amount,
'address': address,
}
# one time password
otp = self.safe_string(params, 'otp_code')
if (otp is not None) or (self.twofa is not None):
if otp is None:
otp = self.oath()
request['otp_code'] = otp
response = await self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
path = '/' + self.version + '/' + self.implode_params(path, params)
if method == 'GET':
if query:
path += '?' + self.urlencode(query)
url = self.urls['api'] + path
if api == 'private':
self.check_required_credentials()
defaultExpires = self.safe_integer_2(self.options, 'api-expires', 'expires', int(self.timeout / 1000))
expires = self.sum(self.seconds(), defaultExpires)
expiresString = str(expires)
auth = method + path + expiresString
headers = {
'api-key': self.encode(self.apiKey),
'api-expires': expiresString,
}
if method == 'POST':
headers['Content-type'] = 'application/json'
if query:
body = self.json(query)
auth += body
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers['api-signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
if (code >= 400) and (code <= 503):
#
# {"message": "Invalid token"}
#
feedback = self.id + ' ' + body
message = self.safe_string(response, 'message')
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
status = str(code)
self.throw_exactly_matched_exception(self.exceptions['exact'], status, feedback)
|
[
"[email protected]"
] | |
af42b85e3f471e7e97d28b7be17f4a38e423e2dd
|
37adb80efb9b75e507440af38e116207b65039ec
|
/backend/green_rice_27742/wsgi.py
|
6b1b876222d02962b56137e0681369fb77328ada
|
[] |
no_license
|
crowdbotics-apps/green-rice-27742
|
c39ba932d25992eef96da74c57bcf5a479014727
|
f9c8cfc358c287ac7d32c983c523b463e5f5a1da
|
refs/heads/master
| 2023-05-25T18:47:07.307988 | 2021-06-04T23:47:41 | 2021-06-04T23:47:41 | 373,982,930 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 409 |
py
|
"""
WSGI config for green_rice_27742 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'green_rice_27742.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
ddba473a2d93e43e7b7d79629b3f73dc5b86a764
|
4f7962d02254ab6e5cf692648c933394ff41c79d
|
/component_sdk/python/tests/google/dataflow/test__launch_python.py
|
246816dd10677750ebd91c1fc5a39a72f081beb4
|
[
"Apache-2.0"
] |
permissive
|
yebrahim/pipelines
|
5414131f5ab176aa7607114e3a0d23db73f5c8c8
|
77df6c2438f4cf6b81c97ecf4dac9fdbac0e3132
|
refs/heads/master
| 2020-04-08T13:23:50.628537 | 2019-03-01T18:35:47 | 2019-03-01T18:35:47 | 159,389,183 | 1 | 0 |
Apache-2.0
| 2018-11-27T19:37:57 | 2018-11-27T19:37:56 | null |
UTF-8
|
Python
| false | false | 2,989 |
py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
import os
from kfp_component.google.dataflow import launch_python
MODULE = 'kfp_component.google.dataflow._launch_python'
@mock.patch('kfp_component.google.dataflow._common_ops.display')
@mock.patch(MODULE + '.stage_file')
@mock.patch(MODULE + '.KfpExecutionContext')
@mock.patch(MODULE + '.DataflowClient')
@mock.patch(MODULE + '.Process')
@mock.patch(MODULE + '.subprocess')
class LaunchPythonTest(unittest.TestCase):
def test_launch_python_succeed(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_client().list_aggregated_jobs.return_value = {
'jobs': []
}
mock_process().read_lines.return_value = [
b'https://console.cloud.google.com/dataflow/locations/us-central1/jobs/job-1?project=project-1'
]
expected_job = {
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_python('/tmp/test.py', 'project-1')
self.assertEqual(expected_job, result)
def test_launch_python_retry_succeed(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_client().list_aggregated_jobs.return_value = {
'jobs': [{
'id': 'job-1',
'name': 'test_job-ctx-1'
}]
}
expected_job = {
'currentState': 'JOB_STATE_DONE'
}
mock_client().get_job.return_value = expected_job
result = launch_python('/tmp/test.py', 'project-1', job_name_prefix='test-job')
self.assertEqual(expected_job, result)
mock_process.assert_not_called()
def test_launch_python_no_job_created(self, mock_subprocess, mock_process,
mock_client, mock_context, mock_stage_file, mock_display):
mock_context().__enter__().context_id.return_value = 'ctx-1'
mock_client().list_aggregated_jobs.return_value = {
'jobs': []
}
mock_process().read_lines.return_value = [
b'no job id',
b'no job id'
]
result = launch_python('/tmp/test.py', 'project-1')
self.assertEqual(None, result)
|
[
"[email protected]"
] | |
bdafe21d3f847430f8a82c37360c237512b69a8c
|
83cf642504313b6ef6527dda52158a6698c24efe
|
/scripts/addons/fd_scripting_tools/autocompletion/suggestions/dynamic/_bpy_fake/__private__/blenddatalinestyles.py
|
da6be1867caf90383dcdbf8a7278bc00bf432072
|
[] |
no_license
|
PyrokinesisStudio/Fluid-Designer-Scripts
|
a4c40b871e8d27b0d76a8025c804d5a41d09128f
|
23f6fca7123df545f0c91bf4617f4de7d9c12e6b
|
refs/heads/master
| 2021-06-07T15:11:27.144473 | 2016-11-08T03:02:37 | 2016-11-08T03:02:37 | 113,630,627 | 1 | 0 | null | 2017-12-09T00:55:58 | 2017-12-09T00:55:58 | null |
UTF-8
|
Python
| false | false | 1,137 |
py
|
from . struct import Struct
from . freestylelinestyle import FreestyleLineStyle
from . bpy_struct import bpy_struct
import mathutils
class BlendDataLineStyles(bpy_struct):
@property
def rna_type(self):
'''(Struct) RNA type definition'''
return Struct()
@property
def is_updated(self):
'''(Boolean)'''
return bool()
def tag(self, value):
'''tag
Parameter:
value: (Boolean)'''
return
def new(self, name):
'''Add a new line style instance to the main database
Parameter:
name: (String) New name for the datablock
Returns:
linestyle: (FreestyleLineStyle) New line style datablock'''
return FreestyleLineStyle()
def remove(self, linestyle):
'''Remove a line style instance from the current blendfile
Parameter:
linestyle: (FreestyleLineStyle) Line style to remove'''
return
def get(key): return FreestyleLineStyle()
def __getitem__(key): return FreestyleLineStyle()
def __iter__(key): yield FreestyleLineStyle()
|
[
"[email protected]"
] | |
da52ce6e0f2e6dbc30ed36da552d9228c8915f07
|
d635abe4bcdb62818c12f00fa3664d4147ecbf8d
|
/bert/train/utils/stateload.py
|
dcae7ee45b2a1639bd0b4d8997cb5c6cdcb920df
|
[] |
no_license
|
mbilab/protein_understanding
|
e82badd8657a9f115d4a617112e00d9d69f19471
|
56a3f17f0557c57d3c25786d128d608629aecd69
|
refs/heads/master
| 2020-07-08T14:38:27.318254 | 2019-12-18T09:46:02 | 2019-12-18T09:46:02 | 203,119,288 | 0 | 0 | null | 2019-08-19T07:18:15 | 2019-08-19T07:18:14 | null |
UTF-8
|
Python
| false | false | 766 |
py
|
def stateLoading(model, pretrained_path):
# Since issue: KeyError: 'unexpected key ...'
# See https://discuss.pytorch.org/t/solved-keyerror-unexpected-key-module-encoder-embedding-weight-in-state-dict/1686/3
# Build a new dict that contains no prefix 'module.', the length of the prefix is 7
# original saved file with DataParallel
from collections import OrderedDict
import torch
state_dict = torch.load(pretrained_path, map_location='cpu')['state_dict']
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[6:] # remove `module.`
new_state_dict[name] = v
# load params
model.load_state_dict(new_state_dict)
return model
|
[
"[email protected]"
] | |
439c1fd1483a411f8f2aa19a035764258d8b0f1f
|
0a8a4bfd6b4ffcfb7c99119c83cb3abe17c4a8f6
|
/examples/frontend_example.py
|
14824c9662fd5ca82359b8f6b574ba602715a9d3
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
google/openhtf
|
58c06e07508f9bb2079070a5ac03898fc68c1778
|
3a9a24987b2b34782fca55a8df8d007167dbb19a
|
refs/heads/master
| 2023-08-23T12:12:54.917649 | 2023-07-27T01:51:17 | 2023-07-27T01:51:43 | 41,519,483 | 471 | 253 |
Apache-2.0
| 2023-09-12T00:47:42 | 2015-08-28T01:14:17 |
Python
|
UTF-8
|
Python
| false | false | 1,431 |
py
|
# Copyright 2018 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple OpenHTF test which launches the web GUI client."""
import openhtf as htf
from openhtf.output.servers import station_server
from openhtf.output.web_gui import web_launcher
from openhtf.plugs import user_input
from openhtf.util import configuration
CONF = configuration.CONF
@htf.measures(htf.Measurement('hello_world_measurement'))
def hello_world(test):
test.logger.info('Hello World!')
test.measurements.hello_world_measurement = 'Hello Again!'
def main():
CONF.load(station_server_port='4444')
with station_server.StationServer() as server:
web_launcher.launch('http://localhost:4444')
for _ in range(5):
test = htf.Test(hello_world)
test.add_output_callbacks(server.publish_final_state)
test.execute(test_start=user_input.prompt_for_test_start())
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
9f2a2c5e001af876451e73bc546f946af0eb6ed8
|
226e962457f3f9d271bdc0ec7cb999d45dd2ab92
|
/plugins/admin.py
|
2cd117d0f1eb608f3a981430e86f4d6694ebbb58
|
[
"Apache-2.0"
] |
permissive
|
Web5design/saxo
|
5109ed2a9c48b2c6e4a2afd6edfe2e28b8279b0f
|
1cf079d13be63557626ebf8163c65c16bd4856c8
|
refs/heads/master
| 2021-01-20T16:30:36.401591 | 2014-01-26T22:23:28 | 2014-01-26T22:23:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,000 |
py
|
# http://inamidst.com/saxo/
# Created by Sean B. Palmer
import saxo
@saxo.command("join", owner=True)
def join(irc):
if irc.arg.startswith("#"):
irc.client("join", irc.arg)
irc.say("Joining %s" % irc.arg)
@saxo.command("leave", owner=True)
def leave(irc):
if irc.arg.startswith("#"):
irc.send("PART", irc.arg)
irc.say("Leaving %s" % irc.arg)
@saxo.command("part", owner=True)
def part(irc):
if irc.arg.startswith("#"):
irc.client("part", irc.arg)
irc.say("Parting %s" % irc.arg)
@saxo.command("prefix", owner=True)
def prefix(irc):
irc.client("prefix", irc.arg)
irc.say("Setting prefix to %r" % irc.arg)
@saxo.command("quit", owner=True)
def quit(irc):
irc.client("quit")
@saxo.command("reload", owner=True)
def reload(irc):
irc.client("reload", irc.sender)
@saxo.command("visit", owner=True)
def visit(irc):
if irc.arg.startswith("#"):
irc.send("JOIN", irc.arg)
irc.say("Visiting %s" % irc.arg)
|
[
"[email protected]"
] | |
7d962129b3321a9da84f6675f947b37340d162dd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02999/s943040694.py
|
66fd8c56fa53d42dca418e7b5c8d0ddc8788e6c2
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 74 |
py
|
x,a=map(int,input().split())
if(x<a):
print("0")
if(x>=a):
print("10")
|
[
"[email protected]"
] | |
ad0ce10579942c717769e5eb6262652d640dfcfa
|
3943378e160590751b195c58f5a817125e487686
|
/findit/engine/ocr.py
|
16990307cb26dfec42b936e006ab54d51c5f9c17
|
[
"MIT"
] |
permissive
|
uzstudio/findit
|
475d101e295aeb2c92df9f80dae78e88bc327a27
|
e3bb233b559f1882209fe95734a933bb82f71c80
|
refs/heads/master
| 2023-01-29T13:06:02.313593 | 2020-12-06T16:02:53 | 2020-12-06T16:02:53 | 319,051,716 | 0 | 0 |
MIT
| 2020-12-06T14:24:31 | 2020-12-06T14:24:31 | null |
UTF-8
|
Python
| false | false | 3,530 |
py
|
import numpy as np
import warnings
import typing
from findit.logger import logger
from findit.engine.base import FindItEngine, FindItEngineResponse
try:
import findtext
except ImportError:
logger.debug("findtext should be installed if you want to use OCR engine")
class OCREngine(FindItEngine):
""" OCR engine, binding to tesseract """
# language settings, same as tesseract
# if you want to use chi_sim and eng, you can set it 'chi_sim+eng'
DEFAULT_LANGUAGE: str = "eng"
# offset for words ( sometimes causes out of range, take care )
DEFAULT_OFFSET: int = 0
# deep query
DEFAULT_DEEP: bool = False
def __init__(self, engine_ocr_lang: str = None, *_, **__):
logger.info(f"engine {self.get_type()} preparing ...")
# check language data before execute function, not here.
self.engine_ocr_lang = engine_ocr_lang or self.DEFAULT_LANGUAGE
self.engine_ocr_offset = self.DEFAULT_OFFSET
self.engine_ocr_deep = self.DEFAULT_DEEP
assert findtext, "findtext should be installed if you want to use OCR engine"
self._ft = findtext.FindText(lang=engine_ocr_lang)
self.engine_ocr_tess_data_dir = self._ft.get_data_home()
self.engine_ocr_available_lang_list = self._ft.get_available_lang()
logger.debug(f"target lang: {self.engine_ocr_lang}")
logger.debug(f"tess data dir: {self.engine_ocr_tess_data_dir}")
logger.debug(f"available language: {self.engine_ocr_available_lang_list}")
logger.info(f"engine {self.get_type()} loaded")
def execute(
self,
template_object: np.ndarray,
target_object: np.ndarray,
engine_ocr_offset: int = None,
engine_ocr_deep: bool = None,
*_,
**__,
) -> FindItEngineResponse:
resp = FindItEngineResponse()
if engine_ocr_offset:
self.engine_ocr_offset = engine_ocr_offset
if engine_ocr_deep:
self.engine_ocr_deep = engine_ocr_deep
# _ft is not JSON serializable
conf_dict = {k: _ for k, _ in self.__dict__.items() if k != "_ft"}
resp.append("conf", conf_dict, important=True)
# check language
for each_lang in self.engine_ocr_lang.split("+"):
if each_lang not in self.engine_ocr_available_lang_list:
resp.append("raw", "this language not available", important=True)
resp.append("ok", False, important=True)
return resp
word_block_list = self._ft.find_word(
image_object=target_object,
deep=self.engine_ocr_deep,
offset=self.engine_ocr_offset,
)
available_result_list = [i for i in word_block_list if i.content]
result_text = self._improve_text_result(
[i.content for i in available_result_list]
)
resp.append("content", result_text, important=True)
resp.append("raw", [i.__dict__ for i in word_block_list])
resp.append("ok", True, important=True)
return resp
@staticmethod
def _improve_text_result(origin: typing.List[str]) -> typing.List[str]:
try:
import jieba
except ImportError:
warnings.warn(
"no package named jieba, you can install it for better ocr result"
)
return origin
new = list()
for each in origin:
text_cut = jieba.cut(each)
new.extend(text_cut)
return list(set(new))
|
[
"[email protected]"
] | |
da49861f0b8aa3629fdd2930c370bbadcbb763d3
|
de382219afb2a5e5a8d0dae18d16d08d0f8a48dc
|
/5zHGkepVqta7IBfS/bd7K98McTkaI3RFS.py
|
bd226e6265077aad77101f4c44508aaedbc4979a
|
[] |
no_license
|
urlib/OEV04H
|
1d862dfef7ec693bf31dd50e5c05b0bcefed423a
|
9f97b73481c3b6a0769ee4e3f07fbe4229be2b13
|
refs/heads/master
| 2021-05-19T18:12:13.600901 | 2020-04-15T03:21:34 | 2020-04-15T03:21:34 | 252,057,207 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 25,958 |
py
|
𧏀具⇬𩹊🐣烤𭁈৵𣳋㺧𡴚𬙍𭬁𨈧谴炕𒅔ȧ𭀼𣑠⦙𘠘𡀀쐝吱𢍝𣝵𐊈𠶲䫰𦓑珃㳨㕊𥃂쁒橢𭵥𨲶𑌫𨤰ꡉནऱẇ뱪𧝍ᛔ𣻺𭇬𝘆݂𮧌𐡰𦳗𠙬𠖫讶𠝛𢜑𦐘𗛭𨑯𣑓𘢋𨴳왔𤟳鷘律ꍀ𠞐愫𧕷𗮤紭𥄚⟿𥴟𩭒𠧴𦆥톴록茇ᶳ𑖈𤨴𠲅ണ杼㮬𧍒턹𭒱𫯨ꊋ𗩱땅헙蹐冉𬄁⅟𝛵𭡫𤶹ゼ璢𡯒ꮶ𨊙𐅞ދὈ狪𫻭𣅮䲀𥓠쪳🛴𡾋絉🙎ݢ𥰯༮䱱𪣽𠔁ꩄ𭟀ꘀ𠁴𭆉𑌣팇ꌦ𬞿𨁗鼏葲𝅁𩺳𣻚𬁐娽캂𭤫◸ᖎ謖鯱𡌳賛𬑲㚼𢾑ᜎ熻𫁷檗𥵷𡡻𝠄𒎒z𗐑㾉𤋆㻏⨗𣸏𠥥汅𑒖𞥁胾𑱰𑋲𫾫鑭㒕𫫐㈖𧮵쏳𗮁𠴒𧗺𡰈𠌗𦋛鋞תּ苾嵒쫺♟𑪔𧄲𪓤𫄀ﵳ퍉韍𠞑糦Ԛꭧ𢦨𫖠𝖒𖫖𣛪𞄘裇𐏐𦹵𡷳𣆉𬁐𤫭𫁕𓊜🀎䒱𭛑𩲦𗌗𪧍峕𥐞끚앯卓⭳𨒌鎏ꭸ𗥜𪝬𠀒ᾅ𦷶㦥롐ݏ𦰞捤𡙵ҿ㵃𘧷𣜯𠓮峐𪄴𨣁𡕤𞥘𑵶먹𥣨💑㯧𡔬捶긔枘𡱞䒄𫸉类榤풚ᔉ⩳𭖃𠖹鲝쌊𘚨茗ペꌷ𧇔𬑕搿否솮𫆜뼘𗮪㵐𩻩둂䟫𫄥𪸅偟𑊈琨𗊾𥵪𥘊༒ロ𔖛𗣐襲꾤𛁖᨟𬔵𨫚𣊇𠪟盭𪮍跺︻续≩𣋳𝧮𫅶𛇾𢢍𘁳𑱰𮥭妟𢹎𪟢𬿰𤣎ጔ𨥭𤦓꤮뚡ⓕ꛷𡏁톔✙ᗜ𧒉嬏鏺魬𨠎懭䟃𮁪𘂮䕘𬛓㓸𓌊曮㢞鞯璈㗱🂅𪂱𝀟棿𤮥饺𡷁☚𘋅𗓄𩻒𧽇𥎊齩𧲝ⴹ장𪐅푐롾𐼽𧉥𦗱𐂺𭕿𪈸럎𫜒𬉠쬛𦠐屵휫𝥹𘎃𪺠𐩆𑦣溔𥄼㲡𗝣䕻𞴜𪧄𢫍𦻚𩥁텠㪐芢ꕋ𭰱豈𣍣伵𨸦쉂𦽘嫂꽩Ὴ𨔗𮕠𤷭顗𐄨𨶈糣𭋬𡞸𮢵𬘦𪨹𭰄𑘟𤘢赊(𬣪뫿𫧆㉌낸箙𗍋𛈱𦵃趡𤺗𪺢𭸖𦩚𣤒퉂凱ᯒ厤𡖤𦆉𡎶㫪Ջ𬷝𘞥먭喿䅸氢𛉁𗒦𦲋㧠𥣥𨈬𬔔𒑬崐🦛𘇷鋅𓇺逆𩔬埚鯥𣻇캋䍚𝡹𛇚„𒐳齿𣷈𦜖𐛦𬏔䁘𗟄玑𦟆锅甕𠧆𑄉𬆯ጛ෪쌠𬌐基𘔯𗪧𤥧𐠪𮕽⻗ࡪ𫲌怉侁䬝㑁𭝒衤锏醈𠝄𞠘𪥷𒄗𧏺䮟𧟨쉈𠾹𢰥≣밒𢻉ꨉ𖩒驘𓄺糽𒂡䕖𫵂𬑗𧛱럁𧨀𥬃𮨾椝𥁩𠕧ⵌ型𬆽𦖵𐿦㓻⋒遺𝔙𧉑𬛊𘖷ꋕ𭇼㏶𬍄𠙒༥虱𗱪ꦩ𐲚㲞Ȉ𫊄𣘟𠨫𐰕𧥎𡊂𧄹🄕𬉃𥻺ꡠ𤹦䀓𘨏𠗱𬫚𠩮瓢⍢𢆌𘗃䦣팀屩𣬟𫷝췝𡌄露𥈬𘠹⫌붡𥒹𡬃𧮱𬲃륱𨗍됬𝣇𨴱㊃𨒰懛𫤮𗍙磀𢲀뤇酣糮𥐁𮙈噕⺢𢕷𗵾㫙㵕𓇏𪰨֡ዎ𖭐웒𪕍𪩙𣟟𩠩🇧ὶא𗩣𭻻덥𐕓𗕮부쭳𝌡𦟤诨𗁦灏朇𠌐㺙𢦬𗅎癵ҕ𝡾⮨𫥔𝑴𩲚𨼌󠆵𩚱𣙮閒𗠇𩇊𘓭䯹煄𗈟㭣𝞿𮋂𩞃𝑪𩠗䢇ꌨ繖ᱯ뮈𝔨𥽿𦧔𡼮𑠄𗆏盭ک𫨚끱䴞𭨍搅𥵊𥫒𭶦𢸖𗏮𦹑𠂹𦆩씟𧕪𥪃昴𗟵𢝪樦𗂿𡇼𥑶𣙘𡔼𨁩蜾𗓕勴𓏥┽𡾽ꓦ𣻍𗰀𗐊𠧰𫭺澣흤𪔴𪪔𦹑𤬻𥐭₺穵𠴦䦍𨟝惇𤩐𪻤𦞫⬇搁𐕣㽠駬𑓓𩹍𪘸𡬷𐑔ꓦ錡淪𬇉⁃퀮𫻾𫍐𑙗䀕殹𘀲븩𠓧𛱡𮩺𨉈𗋒ဠ𛀎賴𩳁𑚗몺𮉯䈳쿛𤈿𦴝𣛻𠘫𭖡錕ꆝ믛𭦎嘰鷀𒆟𬠉𠴎𠍟𝘁壌𛂠꾽𣃯𧬣𒔭婍𫿫৹𫷴🨍ꃭ𭟮ͮ𩹄ᤙ갡ⶖ옠𡇠𠼾⭃齷庁𘨘꼢⬉𣄵𐡦𡣀𤢪𧗔𛂿𐠍儨렳𡤸𭷛𑦲𬨥䩴샆ဵ𬮛𮛚𤑖𧎉㍘𭒩뼶𒆈𥯒𧑸𩯸𦥏𢧆𪒇犸𩓀𤾺𖨢ค𠛄霁㹽䣶𖡞𤵂𥸕쮠𦣲󠆓ਞŐ䄍𭚼𨹒霐𮚂蚋랰𣬞슳𝛫臼𣝈𤐓ꕘ蛲𧴥𦑼㖧𮗡흛𦯃쿅𡿗𪴺𝑀𦸐爯𭩰𨗍𢗩휰𨄃譈𘈳𭴤봸㢸𪚆⚄𤿍𮖬䋇ꭟ้࣫𮮒ᷯ𝋲𐂼⹄Ö蘯엚빭𪧕𧭕𬼩兏鉊𑲯킭뤉𡺛ᒬ𘩿㉿𥼭𗤼𡾐𭈫𣋆ꎾ𡛻𫘷𣴫𨚮𦞄𩑊𢍵⯂慅졎𦿠𗓹🍘𥬁牬殷𩕋𩐬⇊𦽜𗫫𬷃𤌔𠧸弈ꨗ𤥆𛃄옊쥒𧳸ಚ㑓们𣧃𠞮𤣪𭯻㯢𤈳𮨫ւ𬻔𦋝囦𢊢𠋮𘟯𥳶𐬛🚺뽪𠄣𬤃𤻲𪰹𖺒𫂶ꐘ𢌬搢𤼃藓𥫡䁪▢𩉈𫻛稼𢌋𩄆ꄶᰁ𤁍锑惦쎨𢣝𨍄𭤛𨱨橤𩖆궧𨀜홦ꅱ𦺹𧴊𧕨凈𗕩𤦓𡄇썂낃𑴓𬨯𮂸𥂤𝃘𓉣𗞰𒉷𗧡𣈇ė𠾻𦧌突𩋈玖𤅛쎑웎ō𩁍𗔶𠲄𥵳嘆ᒲ𥢌𧯞𒄍𤸧䔤᧬貰ꝓ𣕊𨅿螙蓃𨚩𧆻契𧏕퍫𦢗ꘔ𭈷⑨耛𮑧𥳬晏𭜷𐢙𐴚킺𧩒𭾦󠅓햫𐠢𡆰𬢰𦘍𪇖囍靬ȸ闓辦𤇛𠫊⾲𠒸𦑤圯儋𫈶𦖽𘉆𦇡𐽎䋎🡯𥺳𨗁𭁐𩩾𠟰𭮙𫜣𢶃𥊲葉𭰵瓄貍𔕅㓭𨴔𘢉𧇴𘦪𧫈𝚼𪁪𝇇椼𩚭𫸇𖾂Ɣ𠺈ᒹ險𨬁𦣾鸨乖븱𣔢럏𥧉ꕩ𮍴⋶𠅴鬕𪣾ㅬ𪴯긷𤽨ꏗ𥉬齑ⷡ𤸥윫콎珳墒𣶜𠞱𘅊昍𩋗𨐹𒈟𬪝𗤨𣂜륋㼅𢏆𭷨𑶆💉𩶠垢㹢𘚀䴰𣝴𣶗Գ𬒇贡𮢢皕⿂𥟲𥎧ト𣻴𫤆𪫙⤹𭟞𨯶𡦽ɍ𭟉𧵰𘍔𑨧༄谻⟉𗳪𭲯䊎컯𨱺🅐쩽냱𖫡𫬏䯒假뤢𗸑𧶬о玫碨𢠨𠏘𗢁𠛄逸狮𣬊🥋𤑚⺘𗩀𫪆⇿𣫒좸𦅓𤏥𔗒𦷘𒑒𣫦𤻛鷍噳诹𠉬봣⚉솷📕🜣𧽰𗭞𔗬컍𬣈邹𭞪𭡜𔐀𨊳𗩇櫮𗾿𠏢ܔ𧈫雛⟋鈜𗈑ﲭ珕𧗉𣯡𣓡噥僵솬Ṣ琦𢽖늳𪁹畀𫯩𡏧𤝕𨣱ᬁ𭮹㽩𑣄𝘳📭𠼰𥔩𗖰⛌고듉𬡎䍶𨅛杞吣𡒹𬕧㍆ᓫ𮫺犰𫏗𣁐𢜭𬦩𣒓𩰻쫺㭠𡼀⻉暉瑜𫜑𥍏𧅶𗯈𧳕𫏳˹𦛴𠏑𪫒𨽮𮘖𩎽០캀役𡇠𬷂𨆞䝔𥝪🎟𦝎𩇎𤔔𪯕랉𧰾𭠻𤿾𦃱慄𡰄𩰤💰锏𦾂𔑁圏ꄽꄽﶴ𤪓⧑𤄋𩊣𗷢𨣦핧䐲稷𘋠ᰑ権𦄄𣆗𧊣𤌵㻗𘦭𠔛𥹩⸁𬱇𖺓𧤽奒𠟝𘜠꧕𬽋ꬻ𘠧勞𦣛𞠲竸兺躠ඇ𧕍닢找𫈚𨉍𣕮𩍠𑣥𦾱𘡒𪨦𪿰𥲇襌𪵔𛰸𗴕ꮝ뮵㣍𫤗ⱊ𤠽𦩾𖥍攙戌𦺙𬏍𡑐𫣃𪜨𫍚𤜈㉀ꫵ𨛵𫠶𢆨𦙖鴇䬝앝𧳠𮀛𣻗匴㺚諆𢒠𪅡ꐽ𫹣𮧍𢒒⸤𭦟𢻿꧹𑂑𡠋𤴏𭋃𡚕𩐢톛-沗⊧𭐐𠟄墋⭄𘟨퓀𫞷𢡤䢴컪𢂞𩝛佪𑿮𭍪𨼠ꦮ𩖷𒍌綄兌𠤔𧓒𣣕𗳄𨽋𗗟ꊚ𘒟渋𩺾𗈋𣓘⧬㆗𓈒㉅𣢆褏𮜹녋𬡴싫𖧬툂𣎴약𠋏𗭺𡗑憙𛁬𝈍𤐤ẃ↛𦧝𬱜𘌖﹢𬘏𭑃𬙢𐽃𡛙🂅빑𢍈㟅烁𧍽𪭤莜W뻔𤉵𔑻𐁉𐫴𥙂𥿏𦚪𗦄낤肈𪧒鳄㜼𑙂𡉬᧢佘跡𪋅𩳰𦳨𢯐䭐ඔᏖ칉鲙ᆡ𡢺𩎝挷沚𗞐𡽚ꐔ櫂橼𭈦䑄䧄𦩏贝퉋𭈙𠃏쥿𤼿𣩢𡻧ち𝁠촤𥆞鵐甹驏𗓊𦚩𦢑𥢄핰⼽𡙛𥵬𬳶앴𣉢ㄿ⌶𐝡𧼗㈜𠪝𡏣놛𫽢𭶉𣠄𪻇𐍊턒攝𪲛밡𥹉𗖭𢈐擜𭞻薵ũⷧ亞譓ꬬ𭎏𩯤𦀦螘𣉑𭱁𭵅𭖞𬜹𤒏𗢴渹㉵Ꝏ𢹕鷟缎࠱𩠒𫽊𑆞𮬾𦮒젟𭾙肱䆶𠃒罣胙𦩥𝁲姄𐭍𦴟𤵼㕲𥻊𐊢𢍤𩥭𬘨𫒬⫏깵就𫶘𫳅𘘻鉔䵫𦲬𞺹𤜽𤟞𧙀𭋡屑𗵘詶𬎘⟮膭𝛽혴䘏𣲋𭸋𪔈𢎝讖𨺙𬳀𫙲𗂗𠅚𡂬𑵙𫠻낼序忚鉶ճ𨀑𭙣釋ꄿ𨣽菱𥇣𫷖𡁥𭵃𘞺엘𥬲樾𣿯𫕍𤁔𡞷綼𢓟𡄯𛇒⭎𧂗㡾΅𪸠𓊢馠绣𠘾𮞂륧𒑈𡩌𤇆𩇼𘅡𑊍₻撘𩮾䘒𘋱𭪯𢃥配🁘𠫙䥾𥊭𬘨𥠥丙𬠔⎎𩠿㣜𮙙𐄿𓀈杏𛋀舻𝠠ﳶ𐊊ໟ𣁋𧗊견𫟨재𗦱𢀹𗒼徚𪜷𫈁컿𪿝𗄹똿猏𡓠煭𠲰蕉ﶡ𭂧쑊𒎃𮒳𥾰䤚᧦Ǚ𬻃⮒憐ᩞ𤊔𨕦𨊏⺲隆ꥃ⭳枡朠ƚ𢒻値ᛒﹴ𩍋↻𫱬呑꜠唌𥵷𥚸𥝔𑨬䝝𡱠㊦䣢䛆䤦䫠䍜𫭬ꆉ𩗝𨷸𪂣𭞙𦒽𝆂𐅤𢦴浞灜📨絒ힺ窧𥌥𖡄𥚚𪝷𗸩𪉏𤝫홂𡨹𭍿𠷙𝦅ꛫ𦌒𝧶噑𧦲躯榎🕮𖼴𐮭挎𮅒樽𣗻𗔧勱떙𝛙䗽𣥁𤘹𗉢𢣷🙁屘𨹦𦠯𢤕𒉣𨽥䆡繽𨥎𪹦𗐹𒂴砑𠘝僀‐㒙𠃎琉𧼋𡬸ᲁ𐲲荪𫅛𦖍鲙嵌𪽼㚭嬺僧语𪸝⿳㈝𒎂艟𔓠𗬕𓆪𦊗𖢻帚𮟛𨣖𫩷𣴖𤭗𥱠𥏎𖭗徸𮞦㺉𗲃芟𨏀𗳊壤𥧓𣲋𢱟𫳶𠲹𨣉䝚𪋶𔙀𗤓𠋻ゥꅱ𠠯𒀌𩙬ꕬῦ𦈈⋬瞃ᛂ𠎙󠆕䙂𣚫紤𑰗𭾤ᾦ漽𦸷飺𢜬𫜌𦎣㍡𫻍𑣉⼃𩡬ꛓ𥍒竀𩚧𦮧𗦖𩻧궴앜𗇕𔐷𐛖𡪻𦩆뢺𨐏뙍𐫤ꨲၰ℟𧀘𥅤𥶗︀𝆑吂𫝈𝀧Ԛ煚𝒒𩝧𨳺鿈𣅝进𠍂篢텙괷齦腆荰𪫫ᒆ𧛕➒ᛯ𞲏𥽲𮇧🐈ᚖ𤂢ფ弝𑣀𗵺𘀫𢾌褁𝡘쌨𮥍𗌒檙𤶜𣶵궂𣮖𪪁𧬳𣅎𒊦𘝰𮂠𡖝∊ꪃ튕🟐쾈𡔆𗗶𢮟𨥤ᰂ𗁜옡屘䓩𪫎䙗𮍣𖣓ꎩ𩽲𬫾ሼ𒆬ꍴ𫻊𨉡𦫵𧗯𝂜잜㧏𦞦𮛋䱑𬘓𩔸鈫𤻟𪮘昈𣸋↉𣺺𧲫𡹛𩅲𫩀𢋳忚𮄬뤲ﺲ𨂽𪬉𢇹𬞹𗷼𭾛𤴴그𡮝忭姅𧌢𝐐虜💯𒀩Т𨅎𗄎끫🄩𡌷𪰣蹮𡒭𩃥𝧆𡛗𗗼𡇒薰𡪌𠚍끩멀𩧉𪁜㿠𬱒𛲐䱕𭷻𫪘ⷤ𡜪𑿀𓎘𓊯𮅻㸝𓄱랮𗢷ﱊ嫓肤𐘬𮛫𢹕務𮣤攦䅥𦔨𦭷𦕑첣᥋𫹨𥞒𑵭𬜰귀𓆿𡿤𞢌𩈳𡻢𑧒𒁭㎉𧍘𧸭꿵䋤𫀝𩮱𣶝𤙡↚𧾫ᢽ蛔𭖪𑰮𓃆𢇫ꎪ𐩻𦇁𤾚鋿仡𠢓𠀫𢗍帪𠷶𧴊𢅵𢂦𒓖𠓭❬𢏻𬗿𬝬噞𮁶Ő𧝮芭𑃕𤩙朑陊𘐆ࣖ𤖄팣癐𤔵弇𭰷Ỷࢱ⢔펣𠽁𫛎쬕߾츊뉇愦𡊞᥈𬪁𦾒𣑸𦢂𣛇𗪜𨱋麴𡲺𦝭𫖡䛄ꑸ🚻𢷸묑ᆆh𫮂𝝝𘪢嶎略𡏾䎏⡜𠉌𫚃瑹麦𩩝𦯭𗆽続𬈑𒍂⮨턺ꖵ葳Ԗµ𠐭𮠽𨣁㾜𗝁𠔵폫鏥✰䑨𪩬𤞎燓᷉𦾙𭉊呃𨸯𢃦予騫൯𮔏𐁜𘫙𦩪ᔵ𭚭𢑪橌ૂꭗ䣅𗅬𢆵곁庖𣧤챟쬜𭸀𐴎𑆩𘂬𠧩𡣸儢롰㓥𪑿汪𡳗𨨶犿᭶뮕𤼱𛰣媃𑂋𢝱Ǜ𠵥𬉬㋪֨桭𖼶𤶪𤯿𣙏𮈎𖦫𘦮𭉛ᥚ𡖮𢇧𪊁𢍱𗬇鈄𬛬𘘃𥔆𑂷𡒾𩿄𐒳𠪜𮟲⑹𒈮𤍅𤬿𪺩〶䚶𑩼𝨟䊎𬍼徧𔘸荋𢃅ﳒ𧤱𩧴杚ꌮ𗗛𨖧𪕣𪌝ᙇ𣛔鄋鉼𣎙오𢒆𨒉𣙐𗀣𮙘𗠑𥋹𩝎㗅ᇫꕆയ𭱾爗볚𥎽𛱔𡊫洽꧟𫋤ᑫߨ뱒僨囱𬊴𪑌冫𨦗𬩟𐁂𫚎㘦𗭦𦓲ᗍ𦝮๐𬺒𭩰뒂𗸯𗀠𖠷癭舊诵𫚓𨥂㗚ꂾ鴢ꁾ𣜧𢕐𧢄䅘𗫮ᅕ𧉴𫖡𝈅𗫜𘉜𣰒𣻾𤲥𨚰巹꽜𦟾𝕻𓈜嚢𩒼𬰲𫤌귞𫽖𡬼ক𪮊𭙑𧿴ᄡ𪱆𠴅䉹蛩𦸘欝틝𨂩𠐸ꛚⰁ𡤯𡫜暃𡰝릠𓅐둪¾𦧕𬻓𥈩𬰌尸𠙫沢㸁𭬿𝐌𝘖궄袖𪹢힃姱𑒶銏锌먓𭝿𝙺姅뾳𡠕뼎훓侇𥺞砜𬬴𡃺𡡵𣯐𫠊𧑛냍⌲尼𠽧𪌁𠐃𗌊𭋂𦸑𐧢鸪矈ュ𬣃𥟶𡥩𭰓𬜠𨞜𗭏𐢬𮩗𭊫𧂌𭁳𘒉𢇓𗌊𧮍𣝃⒬嚀𥽵ẅ𫾛⸊𠭬𮓋𬼑𥧭㻍𬒿𣨎鹆琡Ъ壍𑑁𡾚퍽➋顉𣘱䯦超ఽ𗥯鬛炠𢤹𪨼𣨧𫖠𠡓𝀦𫺟峎𡷝𬭹ꓤ𝗂🢗𧘍Ე𝄖𨄍𪟖𦂉𩄭𨱨柦𣫙溣頹𪖥𬗵⃞⸝𮛽𪇎𩫆𣇆𬶻𢟹𧽛𒈉𧋐𮪚㚊ᇆﱤ𒅧𭦵濓ﳦ鉮頠ꞣ𭷛𥴵𭗨ꡞ𪹾𢼑𖦇𣸸𤕨𦄛咞𒆎𪨆怖쉼𘅗콴ꮪ𫠴𩝾ⷉ䵐𨣞슭𪦘婦坪聖䩜⃝𧪴枬퇅𐂰さ겓ꭽ述墭ꑸ䏒ﭓ좉𪖦𨵠㨚𣝮랤𦝊𤄪𠿊Ꮼ檲鿆ㅅ𣾾儁𝝲ᒜ椎挴嬨𘅀𤀺𭔝ฦ𦗻摫𩗝ꈘ𗒘𓇆𧟋𢰏𡐑跨閬㗀䀘䨙𡱀𮈕𫍤𤷻𤷫﹟Վ𩆻𤱓𧺜𦒵𪷼𓂔𔕥𝦩𝘇𧵈沩얌𫁆𢴮𪗟𫲇𭆆𗁤䩋㹫𦓉𘗂舖𥵅ֱ틉ܲ몟𖹝🁝𨤧𥶛𠎉🙡ϟ𑢿𫴬縔𗫬鴒𐦕𔕻𬮷𭤖𨿤𨦗ธ𤙷ꙕ𪌉𣓗젽嗂識𧷊𤫒𮛦𬐐塐Ꭿ롥𩈌𥠞𒓺𗨱ꪞ欶▵𡨌𐲡ᰪ济𠘧𗁥𗸢𡴂𣬝അ𔘩𮫗𭷮ꈟ𨗅🀪𪙓鸯𝟘民𞀔𘨱𡗊㙷𦘆𒒔ꍔ熯🌦⨻폇𛊐𡑪𫨽𫖊뵄𣉥\谚떗𩵐♶ꅉ僬턗⑳ຟ𢃾𫴒芟㣽𮬮蕇䡇灨┙䰢𩓔⾝𒄹𤸸𝅝𫊷什𧀄橺煦𪖕𞢉𣁳뢒𖦹𡛵𠵧鎱𡰕𩋘⧫字扝焟𗥒𘪪𤢭𢢬롽𡑗𪄶殍遊𤘤🚸🠇𑆵ꑰ𦚅𑫭𧗫🦜𧋴鰶叡𠿟龎𠡺吉𩳢𦗭𪻅𪑸㼂겁𥊄𥜔䞬߃𧾬鰞𓄄]𢜃𩨄ⳣ𮯖ꌯ𫼖𛂣𡖼趇ꢴ𠠂𖦕膘∄동넥𭂶쪡𤿅劂𭪞𢕷쨦𘌵趌㗲𦺐𝠬𫅝𧇝璕𖥨谁𘖞걖鋮𭓜ʔ𧟭𨌯𝓾𫟔𦕧𧦤𫑬𩖪𧣋𩓌涂펣𘚺𧖉鯸𡃉𭴖䣾𐂰䤜𒌥𐨦𓍚𤵌𨋞ꤓ𦳟𨭥땃㥚𡈈禦𥫚ᾜ篂𡖬𔗩𑇀𗞔𥟵𤞶𪒬𫨎䅘𬶧𬴩𑨸𠌤෪𩔷𬚾粿𭏾䴷𦻯𪇗𔑀绐𧙌𩮹𥂞𐂕鐑𣺤𨲽𨊵𗜳𡔀𖹫𩪒𑇈흃𒐤螈𦾬磞𭧫🌣⨇熑𔑨𑅫쀞𝟢驐i䱤퐦𗛨𗘸𠻌睼𨴝𝔥𑂙𧏜𣆺䈓玁𘎑𦤙𤀡𦥲🀑ᑻ𞹨𢱉𫋎颒༴湁𗝖𬼀𢲴𗢷𢸖𐦖𢀖𔒹👗Ꮟ巷㿒𦵲𦈰诬𭁴𨞪鶲𣛠🢙𭔷腉𮐮𗌟𐮭⟷𖼇媳ỷꄁ🁃Ɵ釉友驕𫇩힎𐲐镑犫찺𑚂𪱜ﰃ𑈛𫵣𥿄툠ﮢ𤷮迂ڷ䤍톗럱𓀞𭇗𧜟𦧒䙫𡛹蝒趙𐜦𥣻Ꝡ𡸡뷹빥𧞋𢉼륦𞄅죓𦶸쨏𫝔𧧉ᷜ舥𒈚𖤨𓈧𧻓𡷵즴𗐁꠶𢺇Ὸ𬸎𩽯𬵠𤺷骾𮇔៤𪖙Ⴑꐡ𫴠鑹𘋿𦪥🦓𞺶𥨞剏︶𤊞𢡰𠍏𛰦◚仰㵩𨇝ꎞ𤪠滞ﲯ𛄜𤴁象㳀ଣ븣𪍡𬝦덡썫𮯂녱굽瑱ꗙ◮𠬐𖥰𮨈쭛쫅♲𡶷𫯛瞿⡥𑲮𪄖깼𧈽𗛙𢙑㎝獃𗰖໙𦹗☣𗮢𡩈荌쐯匌㥐𝌫𧋃𣝶𥭘𮔼뮻स𩶷杄聢𦈑옻䍈𣀕᭫𡧬𖨧挑솂焈𥆊𞀡𬤄𤐕𤃱𗹄𡯻🌼🤣𗦱𝅛𬲡磊寙以𨲹𓀲ᢘ㐥𬂂千仈𓀓𫧘쮗沵󠄏𩈆˿𣒽뭯酊𫟞🂣ﱻⰣ𨞡𨸙𫊐홥𡟹쇧䣊莶𮬔𤹧ᴠ𗷳𛆠𗏿𩙋ే𤩖젙𭩚𑧖𥠰率骸𠭢𢳜𦡜𦶗𤄶纨𠧊𫲯𤨑𖧢㐈𤱺𡩉챂𐡥𥚆𤤘믞𥋑𣣣𡮋𬺉쵲뤩𦻁𥶞♦𨷦뭐릷𝡵𬫁𞋠𪈆돭눉𦨞𥚒丝繪𑖥𑩶ᬪ𨗒ⱜ䶴𣒆𢷭𫩢𣾋됝𣐜𥀙𩸮𖨗ᅂ𤹡𤇽𢾈𘍄판𥀱𤠱孠𭸛🗽ࣦ𥆠ࠅ𧩋𑄩𬳿𢣂𥹤閉𛰡𑿨𤫶𬫲㌨檲𧹥潊𦒎闥𢘄𥇌ﺪ맫𠁚𭶜䯻斑켰𡌙㿞ⶅ蔬槣ྣ𒇦𪹏Ո䘙🧤﹤깎𘢏𪧫🠃𘥗𠬗凜弾𫍉俋𣕻徽𠵅𡱹𝘡譹𦋘㉥ᝧ㈦𮟊𬥃㟠胓𐒖𩓝𣯪𢨄𘏚𫻜杪臝鞭𛊆ૼ븭鴃𫯏🨱㖸𤎹𩺀𬮉ꈸ皺ᷮ𡻨晝ᓺ𠹓𫱅𗻄𐙤𢬽𠅿盝𤠕茶疽唋촿👪쓅캴𡙇𑫴డ😯𤹴𬖝𦔣淸𗶋𮭧므펔𑀟𠛘𫣪𦥺챽핚쟏닯햋𢃓쨎嬨𔘨𬈻曉𘒯껢𡙉껹첲𒑭颬螸𝕿𘔽䘙𭠫㯔𫆑䀾𦵚읒𘃄ྲྀ듵𤏉Ὢ𤡃騉𪚠𨘐𢅗𪂱𢬃𬅉𨕁𢧶廸杜𭚡𡑨𐚤疴🖻쵊𘧙礬苾𮐹㟬𧽑Ƙ𭼺𤄆𒃃鏁Ⓠ剡𦷤띰𗄦媵萑𭟟鮰⽹𗙝틏𬄅涨㷊ս詸珞𝀷🠠㿑𮞗🕑燙𠚧𝕱𡽶ᛣ𤪓탏鰂𘙸ᥐ𦄾㫠㰀᰻쁌𦕥髓𘋬𨩑𑧠𩳛𣐘焒↔蒭𥷙𠢸𛄑𪗎㹍𡠤郠案⍱ꊛ𮢉𩭛𫭡큖𭡊䴏ᵱ𤘍荴稕뜲𣛐ῃ븃𥪬𩚅🆗⸩ஆ𣦲湘𒃎隶ﹳ쵆ᙔ𦋒굻󠆶푎鱱蘣𨪰𘐔𨛣𛋢㵃𬐶𢉥𘡻𐴗𘈮Ň𫼼𧫇춀𥫷㦫樓𮒿跽㭰𧂞𬠔𤚭쪦ⷢ챮覍ョ𝢇ﺸ𧔅룻𨙵𤑷낃갖ﶃ𡂆𢛇컎Ṫ𨁐뫿𬚛ල𪓩𪧴𦁦𫧧𦩝𠪓إ湍𣣁単𬫾혱뷿𔖉𥓚쾒𬴩𝀎糞𛋪𤢬𗈝𥴣𢦷궬𩊭㈎堨𣷂ӱ𤖢𣰚𪑩𣷗荣𝢟맰쩃Ы𣐠㤺屏튱𣮟횊𧦉疽𢎔𦯃𢍈𭔞𥁪鎢𡁃𪬟𔑙Ⱄ𠲐𗬕𥶿꧁㿲䂬𢢒𬮳凙劇𬆎🄅먝𫰫邏𪣶𨺶拨𭖏𦠔齴𥜟𪻘𠢁𢢍魥潢禃𐃍𫺿𧠻鹻𑁆灘𭐢༳𨬰𖡯🉀𧢇息𗀅詗𪁃ဵ႘흜𫚜ᮬ埩𮧻𑆳𣵪𣙷𦖏𧃅𨉍𘚁춏𫿉굳䐥폈𓎋킗𨌯ב𩞐𪩚夥럮𢑩𒔪𗺻𨶨뵥𦜂妬㯙ً꽓𑀆恪舿뿛𡬍⽓𭰤쁬𠤭𭂫𛁎ൃ㒄鋙𩍮槃𬳚𡖋𧶬𨺯𩙈𢢒뷴路𗼑䁼ꁅ劔릤𨭍┷𩥃𧙑켕𮇖𧰏𗤜僼𨭚쾬稂㓎𡹌𠮩瞴敯𝔦䰀솟𛃍𩦾𗘻𭈛脿멜蓟ꊉ𘝙𢳎𪺎𛈦玬𢧗𑃡𝗄𘒇𭯱𗉻钪㢻𨞮𦹝𐹾𢏇☱과𦔦с𐕑𠱕㍅𖧐䟩縯ꋿ𘨽𨳁𢠆追𭥵𬻗𠆗仆𣓊𫠏䘺𮘝컅𤜂𫐨𒁾𭾍𬳑𣌔ฝ𩥬𫎓𦁴紸뜕এ𢅙嵪쩷𫅢맽춹噫𣂩𫵋㘂𨭀𐍔𩙜𒊫겭𥂤𪩬ꯅ𥲜䇾𧋵𠉬𤹍ᦋ𧉥潢𤓵𛊔𗛍𨛙糳劕𫁹糊𮥚𤰦𢒏𠡹𐳫⎮톙𡏆𣠯𪧡鷆顳𩟑𮩀🀁鄱𡟎削Ꞩ𩁔𭲧톮ꎮ𡐯📸𨵻𩇞䮨𦜧𠓦𘗼瘒ඨ𮀒物𩼊翋𨜖ꗫ跃篘𠊬䶃𤊿𧝴𡼶牄𮅁䯕뎔𝦂࡛랐𫶵𦌫𫕕𠛙󠇌𫸱𤫴𧆫𨚰砼𑿪𭒕떂ⴿ㓑䆐其𒄣鹘碶埩⛱𗔤𤮼株𢇇諠𑨔𮖌𠥽株𨳐𧘻柭𤾥𭯱𭥓瓨༆𭣪Ꝩ🎈𬓾𥗽䆌ᅫ읮𩕅𗘱𭰱𬌋鋊𡪋𗕨𥨳㬑☌ᶢ𥘌𝅁ꖐ텛𮫝𬓘𡇛𓋘𮊺𘑌⦩𪴚𫖘쒙😝辂𐬘𨽡𩄀𞲑𝈜谦𝑽𣿬墝𖫧⺇Ꭸ𢷜凨𗆆撄㡶褞呫𪷃𣑹𗗗𬭑𠩲𦺔𤋞ḯ𫿨𡼋𥏹𨘰ﯣ𗮳𢀭瞑뻸宠𣝏蟠쀕𣨑𭅪첤𗗫𥮵䁃枰圹ഘ𛀀𗬄𘗇🈟𪤿砟𫆸撜𪅮䨾멧ㄈ𓏥Ჟ𭯻📊좇𦩃ꋡ홹𬅭쉳몴𪫲𒉭𘐩뼵⣁𬣵𫞥𥃢韠𫰳𫢡瀴ệ𦰳𝟢𬴻𘃞槏𢵁𣽋為愸𡐛𝡧𤾻ꩶ𦾮븨𐰥뮮𢷥𪘴𡦷𡖀𖼂𤫻弿ʹ𦷾𦑹柀𘀃ر𩪫𠡥𢭥𡍠倮𤳱𧓤⁑🦚ꅲÊ𪝥낡𐦶𠯑繴𣐹𠊠𦅅𨰝ײַ箷䵇𭪽철Ò🁠赡𞄂𦹅𥲂𡶅莰簏𭉜䮢꣨䞉🥟伄𨟊𪆁𥑬𫕼𐇛𓇢𡫎땹𨶆𠡠έ𮦽ꨳ𒂙𧩨괭𧖕𫦇𥲠𠻼𝛋癎𫶷𬥤𦩝𭕢籿🃍䍠䌹챊𡩼𭓍ሮ礴𥦓😻𞡹坂𡈦ﯖ𒎇𤉈西ꬿ𭓩袔𭎥𨎚蠚㡃죨𩠛𬛘𢸱Ͽ𠸰𫂦餴𤵶𦇕鷺𢮾𫽉옴𘨱𡗚𨡸Ќფ𑋴𬎦𝑤蘖𨷽𦊏⦗𩢄𝈾㦲퐾⤲𗳋𪶐癸檶𪼆🃞𦽃𥴧䲵㧊𗁧𩐵𤥚𭪇𬃙🉑𠢄섷𥲈臃𒒨⍄𮬈𘔵𐙃㙂정천鉍놉넮𧹈툷𖡘ꏿ𠚼𪕫𮁐𠐂𧒏닎𤵐𑵳𢽗六🄄🞾斪𘙫𒁩猺⊦𒐦𮤫𭾥𪴞𪗵𦽥𥍦𣌅𨓇캅𧀘죈赑텋𭧹ᤙ蝖𗋁𥶟𭹓𡘆ꗘ𩼣𗵎𨾊ꝝ䍭𨹔极𭒊𘏧𥂉厍柠𛆘𢖑𑄼𣪲𮦠𥽫쏙𘅑ᚯ𫆰𒐲𮟜𐜜𓆣扊𡝣𩮷𦙡𠉢𐢮𠿕ⴞ㊫𫎉핈𫾘켞𠛭𭓆蔒𩯊짨𦸾缶𣴀𨊕禕瞿𫟑𥻠𐭚螾𧕢𬸠𭞉𭄤䠩騈囵匿𠲶𣐅𗻻筐䋘純㝅罉𠂬𤺫𭿬𘄹콭㢨桱둢嶎൦뤜ᴱ윜얕徬𭺖扼뱸苁𩒍𐧍䜲륎𣼃䐷ᓇ㿊፤熖𐽑𮉸𐰀𮫜𝢌𮁏䮈䜣𥡑𩢴ਐ𡧰跷𦶸𝟜𩜦𨐝𤨙🔱멹𝩲𠗹殦ㅗ𣺨𨥕䞡巃𞡪𥸗䄭㟈車આ鮡䇴𩄖𦟃𠮏饌𗡘𡀁𓌖𫴋𣴆扳㑢𡶰ヶୠ🤬突瑫🄡ꎺ▌𐡃𢭮𨯲𑫌𐼥𑄆ﲱ乺𭣺𥙧芣𐡭𣪀럑曚𗦆𧉼𖨨𑂎𡠊𦂘𬊗𭯊棾⠄獥𗧙𪶈𧊼𫻫𥬂𩨹🃗抜𔓉👦𘏫𐽕㚚𦷭𨐥ﯞ𮚲勝𣛜侷ේ⯂𝨏𤼽٧쒛䒔𣔒𡎜𥕽䔪𫇇𥪒증𩃼𠑒鋎◿鋬𐧄𦎂𧒂𪦐𥱞𨎜𥅠𭞣𢭘𮣓祄菔𩻣믦𭙫𗰡𩫺𒋌𨶺𬢟𗼃𧱀𛆘𢱂𤦹𨂪뿛𬟓𦂧𢍳叴𫭦𗐌‐壟𥴬𠘆𫢸𪏴X𥘥𒀜𗊮戬𬓏𥶈𣷊𩱰𘒀𡂹𩈏륥𩜗𫃳𣨦𨄥圫𬒞𪭖𮕾ᚚ匣㦙𤖫𩅘𣜷🙕嶻䧎㤺𠻪韢녾𠜮𬡵𭊱𥙰憙셬߱𧪜𦖢𫥿𔖐⋂쮪𥸓𤙃𢽱𑧅𬙴𫋎ॼ𛄞𥑺░𬻻𠔠䣤╄◇悀뢏𪺯ﭚ𦔟𗽯𩈞𫐰ꮦ𢑚㻥𫊗𦬊𨊒𣟡옞𫼤𤩦ԭ𡊺٩𫊻⤑𓆡錍ፇ𠠞𥨰𬡺봦𣼅𡱼𪮊𧒚෪𩝖뿒ᥢ꼦𪶸𠝣𧻃৸𩁼쑈𘂏𗦭𧓍𭆈𤹈𗗷嘏䬗恶纋𑅚ঀ뢉𔐪欆ꋱ𪇩𢛅𥻃𦛀𧄂𘪟𡙱镵䚪𮆪𥔧𝥫ﰲ🤦쥑𭂝𗷅𫷡𢸋𧑴옾𦞴𫽿𦎖𣷖𨝺𗏖잝襻禲ꚟ蚍𘃉𪰓䔂𗁋韶𗊯셟檀𬱒靦쵝𧳘𬻀𠻉𠥯玠𬹢𧫪𤩄虔𣈩ᅮ쓱孛𗒚ќ𫉜ᷯ𨶀𣵄𣌖𣤓︻𗠤ἅ𩫾𣏬𦩇𨖙𮯔𧦅𫛄𩲊𒀍턢𮀓𢶁𩲖𐐞𝠒𢵒𥩫⨃柠竒𗑺🎇𠪧𞢱쎫聿𑲓箬姘𧀟🏬🆡𫈊傐𨘞𧷘𣋶콤𭤼𭸠𡨑𔒺𤈂飯𑀜𨎮떼吩𦘡𗒿욺嗴𩹒ྋᮽ𦠷漥𫠸픓삆󠆢𡠗㬰𔗃𑚙𡔁𭴑㽎𩶒𠢰🦥琂貭𨬿你甜𩊕툺𣵵絋珿𗒗𨉞𥓺𩌥⤩𬳵𣭭𭭝污𨀰頲𬗾𑋗𩍹藍𧝭ล堵𥲤旵ꕖ싼豶🞃ᴜƭ𭌔蔾𪹖𤢁𪂯𭲆𡣺𝢩𑖘𤁄𤠙𫰋𨻮鶂쟭𨠣𦉲層㐘떭憬蝽𨂄ࡘ𩃎𥿢𑧔骏𐑒뮬酪𥴞𡁶轅𢻥𦪈ᡡ𫨲ᢠ💀씚퍤𧁑쥭䎓𩗞㢶뼐㈬ᄏ𩠡𔔀ꗃ𗟷𦉡𣌜𑨙喴쫿𣠰𦢩𥭄ᄜꗜ𓃵𮟡𨕱𡝫𫖗𘝋ﻴ𣩰𩌼縞𣄞𝓒ퟍ𛃨𭪶𬃝蛶멍闱ᢵ␟岇𑻪༃嘅昑𪪵㝦鯣💈𖢯𧳕𦆚褀𣀮𬑩ꇜ𗆛㎽𤱉䪾嚿𝆉𡦬๓뮵🂏╄㯘𭏿🜔𬨑𬺷儩궎𣫯𩋦𭖱𠘿몎ై𗝃聒𘚹𦴕鿓𐓰适゚𥂷𫞁㑢𧄬𣢨鄞겮ᮢ𗔵𫔞累䮣躂𩕾鎯𨠥𬫹빰𡄫밸𧬪𢵕欛𨽌諼𐊿𢭢킶웧𞠮𨂫𞡐𠸏啤𮚘𧦇𨘔蕗𣡐頏坑붕퉰𩔮䀞𩧫𥍡𑣡𥻤蹯죆喀ﰘ媍𠬻⮂𗘄鮙旘𬿜𤢎𘃬雄睔ꐱȚ𬚟𫋂圮烩讻𗛮銶𬒟𐃮䒘𡚖똘𦠡𬹄𥡅𗟧𮪝凜࿕𥬖𫋳𪸚𧸜𠦔𥺮㳄🕒𣍟ム𓉈𪃳𢙷𒔤𭘼뢹𭪤𘅾𡜡𠙊噌𥸻꿑𡢷𪉴𑠚𡮕ྯ璍쏮𭞗𪮒𧫧𦴬𭉜䭘𡛘𧜭𮗢𛊅𬢋𩎝텹긙𠷮圀𐴘𛅼𬧈ﭫ쬮ﲴ藆뽦ꋡ𤪯𬢉̃𗰿𡘪𢺥𐌐守𠹉쎦툢𠨅騊𪮺篑ꮴ𮚾𘥣𮕁稹㿌첻𓈂𣵄𨎃魻𥦴谡𬫂𮤵띀ὧ𠘯鯊𩫡𢶤𛇳럀㳀𬢼𤞴𢨨ꬻ詑𑠭𧥮𩮦𪤒⾽譩𧵺𥢌𝦲𫔭𧻖쾻𭀝𝛜◊𒓧ᴕ𠂝𠔌𐬯🍇𦐴쨺飧🔙휽嚬𝀘𬵯𮝶𪨗𒔜𩓴𩖄𩡳ﴪ𬒱দ䃟𝗦𒓗𡒒솃𢤀신䮏𣇻貆降ᰝ𤻧𤘢蜒󠄩稥𬠈Ⅽ栕𦒊𫱾𩻏蹟𤳃骓𪻄䪘鋉𫜲𝇧𗐣𫚐𤼰𪯚☩㈼ꄳ᷸𦲂쉬𗂸瑉㻚𫰷/𬥕龢𢻤𗝜󠄯ሸ偽𗼲ꪟ𩺀営𠶯𞥆灬죾❽엷𨻵𐂻钥鴚杼뢩𤩆𑴛𥽒𣁻𨿴熹𨟋퀈𘘔䄄𥪢𬔥鈷𝚮Ჟ騈𧕑𦢟𢕮꾊齯𒎐쯒竩𗾭놦𪺤చ𤲯ꩨ쯯𐫭𗐳ძ𠬙𦩮颧豌䕉𗧁𮇗퐻𪨭𤱼𘪘𪱽칏𬘺𝢽秿𐜩𬨛舵堣𔕔𢁌輹𫗨𐃉㸛𢆓○蜎𫴵흁檁𧇒⿐𠲓〃𐇵簩𩴚𞡾皙葶뾌𩫪𐦧𪨵𡄹𫷐鶧𑀑𥀮𖹲𠜭𢦳𠃑𘪠诚𨽛𤿔𧠇뱭𔔥𦻓𣪒𒌥苇𥾠𪣗뤪𩨑ᣐ𐇩𡄀ॸ䖞ꎏ㖵𢬒㰣筺𬂹貊厅𪹢𪀙𔑚𦊉𩃩𒄶𢡄绉𫤅𑪠𭉳銙鈼𡃓𮤆𭟡‡躩耑𫶠ᩝ𧸐𩝾𬯠𥫖𠏲𥢂𪫀ᒟ𥢓읰밶𘜜᳃𮙔𑒺𗀍㖋𭶍蔔ᦔ𦝬𫩯ᣵ𒈁参ᨥ𝜜鴗𭦸𠳯𥲿𤞭蝠𡱝𦠭𠆈泉𨹶𦣴𑄾𭦂𭘚𥶯𣕩𬪥藂𨣤짨𠲫Ⴕ𤘽𦏊眨𨰂𬩶𝡕𭻴擂𓂳𘑪𫝚𞡮𢝭𣲕𣉈𤑱华曹𬇫𩅔ĝ괱𐔰ᒶ𦚋𤬵따𬱈𗙒𞄵𮁋𤬗𩍎𬤲⦺𢐶𩀑⣹𦝹𩧄𪶔𥩒𥸹뫏𫠌𥠨ㆪ𡖙𦞊昪篥𥋶𝗜𮠔難𪷎𮨢𤁢吅𭮆𦂂䔑ꏜ𡜙甅슅줥𗢅𫧍𖩣챍棊𮪙귁轌𫲃裙补ಫ᾿𒀙🡹𞡧㽠𝜃᳥𡍴𢫫𬔭ᗊ𢬧鱻볕𣂜𘘬𮂪뻧𐭡𪝭𤥐𣺴𭲥𨷰𧟺𢬓𩩦軌𮧬𛱔ࡣ㧜𪚯쏑𣠊𫟚𪝰迓𘃱𬪁쏐𗤂쿤𤺛𥴂ꘞ楶萟齄伬냑𧁰惾𪃥𓎄𗎐忂𮊼𣟧ャ𘏚ꋾ𮗪庋𘋜𘒟𥝉𢲔𐒚𩂻𭓮𡖻𐎭𭨏𥘚𐿬𗠎㼻𡿳ᩝ𬷱𬠤튑𠓭蜇𣆤𩪨𩯢髿𡞨赛𨥛ⲇ𭿗꣨𣲪𣉲𗛇𝂌𩷥𗍹𩜩𩎌𣭫𤐖挬𦅉𠯘𢀁🄟𬫲ᑧ櫨秽䏔𤬷𛀹𪬓𭶐䊗𒃰𮥙𗑑𭹚錨뎴𞋉쪛𖧈𨨛퓱𖬝𧈐𥿏𡪄겘𬊣𨳈𗷄Ჟ듗𫼺𤙍𝝥㌎𧆇𗋮𪵹ㄆ𡰰𭢉対𗈟𤪔𗣆䠙坵𘃠𘫙㚏𤷼ᤶ𝀭𥽤𣃄𡠰絍捄𣻑ᎃ𒉙鸞𨰻𘗪𪞊𩒻𧧃𣨈㎣𫉈훸𨱠𡔄𗚴醒サ씁𣅏拼囐𪨱𧭢૭𫢠𨎂ᕛ歏𪇃𗐘젡🥛𣊐᪇𮒐钺⛩怠銅𡗢ᙁ𬰶𤧦쿀杮𨮾鄊𣤫𫹌앎𣜤🡦꾍鄨𬫋琄螇墆𩁡츺ᒻ孂𭍡𬷵𮦝𦓃𧸟賀𐦫𥧮𬆭𮚉볷𤔸𫔳𤺶𫋧𒓵𦽯쎳𮝙𧑤ݨ𬨜솘ᣁ縧𩍋鿈𦩓릩𣃑𒑫💫🤈皺𨲖𬩁𮏈ꈞ䃔𮕂𪾥𒉳𡨿𠆒㊗𥊝ࣵ梅𬳥胄鉒𮜧𡕖봛𩁕𠼢𖭟𢼳ꩺ𘖄𦕧𐃄𦉘𮨐舾ৡ𠜖𡬘𠷡𨽜𦋿뉬𞥁𣝝𤁰䙷𨁴𩢹妗ି틙혧𮍘瞨퍸끖𧞲𧑟惤⥵ﳅ𥑀🁄𡺀𢶮𪠰앎𩴿狃𖼖𥠑𭄂蚹𐕘𩂴隔𬀵㽗㚤讽𠊿𭚚𨙻쏍㭆𧟙𭓧𢇕l⒞𭹁Ⴙ𑖽舛䲼𥁑꿻𣱧𨯋驖𩳳ဟ𡰎ꕷ覟놣𥑌𣩓𠳸𣍄𢨫𒈿𥌢𧀄𣐁𨲩𫽴𥈟濨䣲ᇛ𣨗劳𑀌𬦆𡊩뭇𗸀ᕉ𩆝ἡ𦼾𓅀𝐤㝒𡗪책𘓗🜯욜𠂞ⲓ𡾄昣㑎丰𣁉𩐝𧓯𢙭𬿕ꦽ𧱔媁𠇕𥈢𫋀𥷽𒔹ᇯ𭻐𢄇𧇨揅𬐳즼𑫏䏄齖먋𣯇𠌃𪍢ఎ𒅂𠤐ಝ𦬊𬓖𫜚𢉀𦚠𝥂𫺡𡓻𨃵ꥦ𡝋푳葔𗦟狊𝨜ⱷ𤞊𠴊ࣧ𫹤샐𤵀𤾣𫔢놷𦜱甔䧘砐邚𘘖綠⑼뻝Ԁ靘ꚏ𩡀𨕐鷗𧶎偊𫾦𠵡𩏛簨𖬾㥭𪔃㫌뤺ࠧ𒂛𩣴𫫪⨏㸵ଌ𥡸𭃱𭠔𪜂䠈캣𦉚𤔱皞𣪫𑂙𩞽𨘥𬞆𢘃㫃햞𗳯ネ𝃭Ɜ忪𣵙𥁬𤥍麉𫕍狥𓊹⯫🙑麫⧬𮨱𗼲𦠾𢍃ㆬꁕ𭢢冎𑨼𦠓𣞪𡀑왮㺽𤿇𗭖蘷塜樤༪Ⲇ𠧣늅쳙𖤰𫝩ᒪ𬕭𠖼撛𫼆祝𥆖𤈆𥹼𭉁쥉ꀟ𠮃㧜綦䠑𠊭Ⲟ𢀶钴𠍾䐑𨿱𬙿𭅹𩻑ꋜ뱎𢼌𠔐𘧪𣱷짭Ԉ𪩱ⓖဨ𡔍㣝釴齰𡆂𓀭𦠩𗮔ड़な𬬷卹᠉틌𧎮ᆻ鶯२𩄜𫆜𭤏𐼚冼ি𐲋𧇝𨽍⌅𨋄𡣬𩠱𡈓𤄌➗𨔯𭸝𦓘𪳂륙妖𗸹𨫡𦌁ᷕ𭄂🠂𨦅𫧝𩈽𡔀떌娛䜬𮪧䇩ݗ𨸻𩑒鯚꼜𖽺𣠜𢦎𨦝𭇛䆪𮕲𬒞ⷌ𝚔𦇥𩐞趈🖑𗏹뵖𫌃揭鉶搈𢫗圡𥃓భ결股窾𥂶𡫕熴𭐺넉稺𠐑놦𠙅𣺆𢮙籛𢔋猔荒浸㽒𨺚跿𐴟𗬩𭠲ᓸ𬋟𠷊愝𫒬𞠥𮨚𨀐𠯅𓁶𨭐𢤗𣃰𬛏𝗬𐜁𠞎왕𘍃𘈚㋥ⳝ娫𞅃僗𬆛쩀𤂮𥷬䴀뛛𡂵㏯𨢩𑠺𫥂𨽤魍𣅄𫳯ݣ宴𑐌➣𪭦𘀵𫸱𩠇턥𡣧𥨠틛𑛆𮀌௶ऌ𗌽鲽煅𤩭ꄢ𩙣ﭸ㣃菊𗯣𘝧倖𪍝壎倂𠲢𗡽𤂤🎫𣤃𗳥孷𢇕儫槭𣢬𥚔旞𣈿囑𐃕㇞𪳲𢺟𡹝훉𨺻𩼘𝠐讪𩪉ァ𝤻𓄆𦍤𖦎𝦞𩩄𡖀»铰𭯔𢞧𥝃𪂷𗏹𫯭ﰵꁝ𪈪뮭𤯘裃푿𢗘㻇𠊼𪀈𦏑텹워𬆥𫑢겈ꏴ𘆗𢠲𩥋𫌠𝝼𢵞テ𬁢𘝄멘껩䃆쉿𗻭𠵀𦂷b밂쁤𘔷𣍷𗊰䆔𫹭𝂄ᇼ𝗙⟯𒑋鿦䇺𥛮梼𭜠𬧯ⷫ啪𤝍컹𐦧𬪤圐𧫮𐝥𮫉𢲇𣥮ꑠ𦪍𧝖𦽍𣒧𣉕廱엹𢦷𨲁퓭𘩶𘕁𓍢쓇りᨩ蒃瓕𭸈𭵵🨖𝡚櫗𨘼𦻈祀훮✬𣁽紮𪂺摡ﺖ⮸䢭콟𮁡𭦳蠗𨜋𪒶𫧫ࣘ𤣞𣼫𑙓蓜౺𥩇𫼚⳥檸榡𡘠蔽繢𠇋븃𠒕𬵰𝝯𭙅䈦纔툺ꠥ弘𥵷𨱑𩆈砷ᓨ𥪉𡑨𨝕丸𨞦搄𣦚ꍐ🞸𬵈𡃒😭𭸩缽𫤜뛠𡑼𪛏盝㈗𥜁𩘬𫹄𠦼붛㘇靇𮮔𗶀𫙍Ⲗ𫎯𧃗𦃩ᗴპ褃ᤸ𭛻ꨧ𠥨𬶇𡊑𫩛鵉𮛭黈𗶕𔓕𩦽𣏙𣊡箖𮂠𬓝𓁞ャ煩䲻𧰱𞲔𡡓𬀜𤤁𫫅𩿗𮏜ﺳ䋫𣌶𬊈𥓓𧎨箛ᥴ𨖇𨲦𓅞𫸪녌𫏑𫩣𡺰ﵜ𡻵᠄넶撿𥐧𝞴𗸘ꓑ𐎍𨗀ᡩ󠆌𤢥𩅵車뼩𫻭𦳦𛇭𥠕𢼱睶𨄉𦿮𥚸𗙘𝈔劊耳𦌜𡑪酇𥔸鞷鷛𭎶𠁇ຏ𠎒𣲌𤾣𦛷𤭉𖡵𥓀𖤮𔓵𘊶𧱎𬘻𢐇謆囚𧛺𝖛𪩳躈𤖲𬁋曀ⷲ즵庇𬍅石偠𑲫𤺹𗔗𒋷𡳘𭌣𮀔ᩳ𦭇炓𗭛𗼆䄹𨱙𨌷톜𫟃훅𣔬베𗥻𩎒墹惡嫬𪬸𧻋Ὠ𢢠𧬴𗻔𭚳룋쏻𤥾🀎𗒽𗽝Ǥ𥙝ᚒዿ𗱄Ūשּׁ𦒽𗥌𪘌箚랗𦙿慦⑻𥴉𪵲늱鑆𨋾𑑖𧲼𧭕䷋𝨝𡧀𒊪𪲧𒐠妝䟿𣱵䟑𢮕𘥁翆𓄄𬴪𬐚⢦꼡᪾郉Ԅ𗪕擇귉𬡋𤠃𦽱鄗𨳧沣蝛𝪩쫌⚹𣋧𢒗轕𮞺⤚𫑇𣎩䷫𛈳🩁痻𢭭᭒㐰𑠝𨺃𤴐𧝛𩂁𥹅󠄈𣓧𭦄𥬏𭠲𩔏竿𭋠𒌀𦹥😛𡃽𠷨𧖂鄿㹋𧚍𫑏𭦵죝🖃罭죚𢴂𪌸뢱𤲦𮆓뢨𫧱ᖨ𠼚𐭐剴ⱋ࿇뺃𡱨힃腴𣤐𥜃𫕫𣟌𨋷𢟿𛃇惗𠋊穕䠀㫅𩄨𨕒ꤚ𩈅𫯷㰈ণ𠙅𫉹𑦾𫛝𩑡ᦆ㿅𧃡𦠻륑𣘰𢴘𖭯𦇃𐽇𮨘᠈ᶟණ𗻖𨮳𨱤𬯕𧈲왼Ƨ𗦞詧𖥯𨌩𠃔𢸊𠅵𘤊𝒢🨿𐱀𝖎郍🠿葃𢹊賱펉杣𝙑⚀𐐼𮓶䐵𣰏𬉉𧪭躚𧤣𬇽𧄐짏𝕮𗁼𩰏🐪𘎎쯼𭳑𡏎咎ꭙ𫥎𦑠𩨁𤌂𩟏𥄧𨷿𗮇𦪖𧪸𘣨똢즰뛜𡊞伌𬹱𩫛놐⇽틏𭚿㳾𩔂𥮥ᅧ𥇂𗸙쫩𫌾緺𪄫𡒾𧼘ᶋ𗯑𐬝🍊𭈰鐓皥𞺯𫯕𬊚𡬑𪡁𩹀Ⱬ𧳾鳿줒𭥢ᝨȳ𩎃語𩖆堰十殄䩠⚜𞸧𪴽渏𦚕𥅎𫍊𨌔𝠸薧嵻𐋎𣨤𠿸䂜අ𪱯Ŏ𠜉𩲿廏𬢦᭰濲﹑𑧄𦖩抣𤤏嘗异𝖶𦀹𡁱𨬯𬪞𣢙𠴶䜼𨈗𠵝翍𐂭𨔣𬓌⃬퓃𝓡𣳲Ḑ𪽙𦄝뿫⤜𣥦𫿶⁏聉𢞲܄𬣃🗶됫𮭡𠪍𥎘나뽰⎘
|
[
"[email protected]"
] | |
d19dc2f371b1a4f5f4558649314377073d2df972
|
40454f9a92ecbfc382d8bb611743bad4ecb76017
|
/service/log_handler.py
|
0d2507a5ad64cc8afdc55d03fbf6baac0d1e7018
|
[
"Apache-2.0"
] |
permissive
|
ibm-devops/devops-workshop-2020
|
cd83135edb5c7ec2adb4647d4b4997b9ff11fc46
|
33fec8ef8cb7e16548b4eb3381b441acbdce0e47
|
refs/heads/master
| 2023-09-06T08:45:35.184681 | 2021-11-23T21:46:49 | 2021-11-23T21:46:49 | 305,249,605 | 0 | 0 |
Apache-2.0
| 2020-10-19T21:40:57 | 2020-10-19T03:11:30 |
Python
|
UTF-8
|
Python
| false | false | 710 |
py
|
import logging
from . import app
############################################################
# set up logging for Flask applications
############################################################
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
app.logger.propagate = False
# Make all log formats consistent
formatter = logging.Formatter(
"[%(asctime)s] [%(levelname)s] [%(module)s] %(message)s", "%Y-%m-%d %H:%M:%S %z"
)
for handler in app.logger.handlers:
handler.setFormatter(formatter)
app.logger.info("Logging handler established")
|
[
"[email protected]"
] | |
a31328a17ae927b249e052e1bdc2fca3299ef970
|
c2be187155aabf59a4c0d3f5065bc26239c0b827
|
/special_crawler/extract_att_data.py
|
4560042f5c90fb46a850df746ac322db6d859754
|
[] |
no_license
|
dankCodeNugs/tmtext
|
1d6c54f79399bfa5e6f3905c0f72ba0be59d8d0d
|
8e2d834775f440def7f57294674b8109b46ee191
|
refs/heads/master
| 2023-03-16T08:49:38.456929 | 2016-12-20T19:45:54 | 2016-12-20T19:45:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,035 |
py
|
#!/usr/bin/python
import re
import HTMLParser
import json
import requests
from lxml import html, etree
from extract_data import Scraper
class ATTScraper(Scraper):
##########################################
############### PREP
##########################################
INVALID_URL_MESSAGE = "Expected URL format is https://www.att.com/.*.html(?)(#sku=sku<skuid>)"
def __init__(self, **kwargs):# **kwargs are presumably (url, bot)
Scraper.__init__(self, **kwargs)
# whether product has any webcollage media
self.wc_360 = 0
self.wc_emc = 0
self.wc_video = 0
self.wc_pdf = 0
self.wc_prodtour = 0
self.videos = None
self.videos_checked = False
self.variants = None
self.variants_checked = False
self.pricearea_html = None
self.pricearea_html_checked = False
self.product_xml = None
self.product_xml_checked = False
self.product_details = None
self.product_details_checked = False
self.product_json = None
def check_url_format(self):
"""Checks product URL format for this scraper instance is valid.
Returns:
True if valid, False otherwise
"""
m = re.match('^https://www.att.com/.*\.html\??(#sku=sku\d+)?$', self.product_page_url)
return not not m
def not_a_product(self):
"""Checks if current page is not a valid product page
(an unavailable product page or other type of method)
Overwrites dummy base class method.
Returns:
True if it's an unavailable product page
False otherwise
"""
try:
if not self.tree_html.xpath('//div[@itemtype="http://schema.org/Product"]'):
raise Exception()
except Exception:
return True
return False
##########################################
############### CONTAINER : NONE
##########################################
def _canonical_link(self):
return self.tree_html.xpath('//link[@rel="canonical"]/@href')[0]
def _url(self):
return self.product_page_url
def _event(self):
return None
def _product_id(self):
return re.findall('sku(\d+)', html.tostring(self.tree_html))[0]
def _site_id(self):
return self._product_id()
def _status(self):
return "success"
##########################################
############### CONTAINER : PRODUCT_INFO
##########################################
def _load_product_json(self):
if not self.product_json:
product_json = json.loads( self.load_page_from_url_with_number_of_retries('https://api.bazaarvoice.com/data/batch.json?passkey=9v8vw9jrx3krjtkp26homrdl8&apiversion=5.5&displaycode=4773-en_us&resource.q0=products&filter.q0=id%3Aeq%3Asku' + self._product_id() + '&stats.q0=questions%2Creviews'))
self.product_json = product_json['BatchedResults']['q0']['Results'][0]
def _get_pricearea_html(self):
if not self.pricearea_html_checked:
self.pricearea_html_checked = True
url = re.match('(.*).html.*', self.product_page_url).group(1) + '.pricearea.xhr.html?locale=en_US&skuId=sku' + self._product_id() + '&pageType=accessoryDetails&_=1461605909259'
self.pricearea_html = html.fromstring( self.load_page_from_url_with_number_of_retries(url))
return self.pricearea_html
def _get_product_xml(self):
if not self.product_xml_checked:
self.product_xml_checked = True
response = requests.get('https://www.att.com/shop/360s/xml/' + self._product_id() + '.xml')
if response.status_code == 200:
self.product_xml = etree.XML(response.content.replace(' encoding="UTF-8"', '').replace('&', '&'))
return self.product_xml
def _get_product_details(self):
if not self.product_details_checked:
self.product_details_checked = True
try:
product_details = json.loads( self.load_page_from_url_with_number_of_retries('https://www.att.com/services/shopwireless/model/att/ecom/api/DeviceDetailsActor/getDeviceProductDetails?includeAssociatedProducts=true&includePrices=true&skuId=sku' + self._product_id()))
self.product_details = product_details['result']['methodReturnValue']
except:
pass
return self.product_details
def _product_name(self):
self._load_product_json()
return self.product_json['Name']
def _product_title(self):
return self.tree_html.xpath('//meta[@property="og:title"]/@content')[0]
def _title_seo(self):
return self._product_title()
def _model(self):
return None
def _upc(self):
self._load_product_json()
return self.product_json['UPCs'][0]
def _model_meta(self):
return None
def _description(self):
return self.tree_html.xpath('//meta[@property="og:description" or @name="og:description"]/@content')[0]
def _long_description(self):
return None
def _variants(self):
if self.variants_checked:
return self.variants
self.variants_checked = True
variants = []
if self._get_product_details():
if len( self.product_details['skuItems']) > 1:
for skuId in self.product_details['skuItems']:
variant_json = self.product_details['skuItems'][skuId]
variant = {
'color' : variant_json['color'],
'selected' : variant_json['selectedSku'],
'price' : self._get_price( variant_json['priceList'])[0],
'outOfStock' : variant_json['outOfStock']
}
variants.append(variant)
else:
for variant_html in self._get_pricearea_html().xpath('//span[@id="colorInput"]/a'):
price = self._clean_text( self.pricearea_html.xpath('//div[@id="dueToday"]/div[contains(@class,"price")]/text()')[0])
out_of_stock = self.pricearea_html.xpath('//div[@id="deliveryPromise"]/@data-outofstock')[0] == 'true'
variant = {
'color' : variant_html.get('title'),
'selected' : 'current' in variant_html.get('class'),
'price' : price,
'outOfStock' : out_of_stock
}
variants.append(variant)
if variants:
self.variants = variants
return self.variants
##########################################
############### CONTAINER : PAGE_ATTRIBUTES
##########################################
def _mobile_image_same(self):
return None
def _image_urls(self):
#images = self.tree_html.xpath('//meta[@property="og:image"]/@content')
images = self.tree_html.xpath('//img[@itemprop="image"]/@src')
if self._get_product_xml():
for image in self.product_xml.xpath('//image_info'):
images.append('https://www.att.com' + image.get('path') + image.get('suffix'))
if images:
return images
def _image_count(self):
if self._image_urls():
return len(self._image_urls())
return 0
def _video_urls(self):
if self.videos_checked:
return self.videos
self.videos_checked = True
videos = []
if self._get_product_xml():
for gvpURL in self.product_xml.xpath('//movie/@gvpURL'):
response = self.load_page_from_url_with_number_of_retries( 'https://www.att.com/global-search/GenericLayer.jsp?q=id:' + gvpURL + '&core=videoservice&handler=select')
for url in re.findall('url_videoMain_en":"([^"]+)"', response):
videos.append( url[2:] + '/'+ url.split('/')[-1] + '.mp4')
if videos:
self.videos = videos
return self.videos
def _video_count(self):
self._video_urls()
if self.videos:
return len(self.videos)
return 0
def _pdf_urls(self):
return None
def _pdf_count(self):
return 0
def _webcollage(self):
"""Uses video and pdf information
to check whether product has any media from webcollage.
Returns:
1 if there is webcollage media
0 otherwise
"""
if self.wc_360 + self.wc_emc + self.wc_pdf + self.wc_prodtour + self.wc_video > 0:
return 1
return 0
def _wc_360(self):
self._webcollage()
return self.wc_360
def _wc_emc(self):
self._webcollage()
return self.wc_emc
def _wc_pdf(self):
self._webcollage()
return self.wc_pdf
def _wc_prodtour(self):
self._webcollage()
return self.wc_prodtour
def _wc_video(self):
self._webcollage()
return self.wc_video
def _htags(self):
htags_dict = {}
htags_dict['h1'] = map(lambda t: self._clean_text(t), self.tree_html.xpath('//h1//text()[normalize-space()!=""]'))
htags_dict['h2'] = map(lambda t: self._clean_text(t), self.tree_html.xpath('//h2//text()[normalize-space()!=""]'))
htags_dict['h1'] = filter(lambda t: not re.match('{{', t), htags_dict['h1'])
return htags_dict
def _keywords(self):
return self.tree_html.xpath('//meta[@name="keywords"]/@content')[0]
##########################################
############### CONTAINER : REVIEWS
##########################################
def _average_review(self):
self._load_product_json()
return round( self.product_json['ReviewStatistics']['AverageOverallRating'], 1)
def _review_count(self):
self._load_product_json()
return self.product_json['ReviewStatistics']['TotalReviewCount']
def _max_review(self):
if self._reviews():
for review in self._reviews():
if not review[1] == 0:
return review[0]
def _min_review(self):
if self._reviews():
for review in self._reviews()[::-1]: # reverses list
if not review[1] == 0:
return review[0]
def _reviews(self):
self._load_product_json()
reviews = []
ratings_distribution = self.product_json['ReviewStatistics']['RatingDistribution']
if ratings_distribution:
for i in range(0,5):
ratingFound = False
for rating in ratings_distribution:
if rating['RatingValue'] == i + 1:
reviews.append([rating['RatingValue'], rating['Count']])
ratingFound = True
break
if not ratingFound:
reviews.append([i+1, 0])
return reviews[::-1] # reverses list
##########################################
############### CONTAINER : SELLERS
##########################################
def _price(self):
if self._get_product_details():
return self._get_price( self._get_selected_variant()['priceList'])[0]
return self._clean_text( self._get_pricearea_html().xpath('//div[@id="dueToday"]/div[contains(@class,"price")]/text()')[0])
def _price_amount(self):
return self._price()[1:]
def _price_currency(self):
return 'USD'
def _in_stores(self):
return 0
def _site_online(self):
return 1
def _site_online_out_of_stock(self):
if self._get_product_details():
if self._get_selected_variant()['outOfStock']:
return 1
return 0
if self._get_pricearea_html().xpath('//div[@id="addToCartDiv"]'):
return 0
return 1
def _in_stores_out_of_stock(self):
return 0
def _marketplace(self):
return 0
def _seller_from_tree(self):
return None
def _marketplace_sellers(self):
return None
def _marketplace_lowest_price(self):
return None
def _temp_price_cut(self):
if self._get_product_details():
return self._get_price( self._get_selected_variant()['priceList'])[1]
if self._get_pricearea_html().xpath('//div[contains(@class,"pricingregular")]//div[contains(@class,"price")]/text()')[0] != self._price():
return 1
return 0
##########################################
############### CONTAINER : CLASSIFICATION
##########################################
def _categories(self):
#self._url().split('/')[3:-1]
breadcrumbs = self.tree_html.xpath('//div[@ng-controller="breadCrumbsController"]/@ng-init')[0]
return re.findall('"title":"([^"]+)"', breadcrumbs)[:-1]
def _category_name(self):
return self._categories()[-1]
def _brand(self):
self._load_product_json()
return self.product_json['Brand']['Name']
##########################################
################ HELPER FUNCTIONS
##########################################
# clean text inside html tags - remove html entities, trim spaces
def _clean_text(self, text):
text = HTMLParser.HTMLParser().unescape(text)
text = re.sub('[\r\n]', '', text)
return text.strip()
def _get_price(self, price_list):
low_price = None
on_sale = 0
for price_json in price_list:
if price_json['leaseTotalMonths'] == 0:
if price_json['salePrice']:
price = price_json['salePrice']
on_sale = 1
else:
price = price_json['dueToday']
if not low_price or price < low_price:
low_price = price
return ('$' + str(low_price), on_sale)
def _get_selected_variant(self):
if self._get_product_details():
for skuId in self.product_details['skuItems']:
variant_json = self.product_details['skuItems'][skuId]
if variant_json['selectedSku'] or len(self.product_details['skuItems']) == 1:
return variant_json
##########################################
################ RETURN TYPES
##########################################
# dictionaries mapping type of info to be extracted to the method that does it
# also used to define types of data that can be requested to the REST service
DATA_TYPES = { \
# CONTAINER : NONE
"url" : _url, \
"event" : _event, \
"product_id" : _product_id, \
"site_id" : _site_id, \
"status" : _status, \
# CONTAINER : PRODUCT_INFO
"product_name" : _product_name, \
"product_title" : _product_title, \
"title_seo" : _title_seo, \
"model" : _model, \
"upc" : _upc,\
"model_meta" : _model_meta, \
"description" : _description, \
"long_description" : _long_description, \
"variants" : _variants, \
# CONTAINER : PAGE_ATTRIBUTES
"image_count" : _image_count,\
"image_urls" : _image_urls, \
"video_count" : _video_count, \
"video_urls" : _video_urls, \
"pdf_count" : _pdf_count, \
"pdf_urls" : _pdf_urls, \
"wc_360": _wc_360, \
"wc_emc": _wc_emc, \
"wc_video": _wc_video, \
"wc_pdf": _wc_pdf, \
"wc_prodtour": _wc_prodtour, \
"webcollage" : _webcollage, \
"htags" : _htags, \
"keywords" : _keywords, \
"canonical_link": _canonical_link,
# CONTAINER : REVIEWS
"review_count" : _review_count, \
"average_review" : _average_review, \
"max_review" : _max_review, \
"min_review" : _min_review, \
"reviews" : _reviews, \
# CONTAINER : SELLERS
"price" : _price, \
"price_amount" : _price_amount, \
"price_currency" : _price_currency, \
"in_stores" : _in_stores, \
"site_online": _site_online, \
"site_online_out_of_stock": _site_online_out_of_stock, \
"in_stores_out_of_stock": _in_stores_out_of_stock, \
"marketplace" : _marketplace, \
"marketplace_sellers" : _marketplace_sellers, \
"marketplace_lowest_price" : _marketplace_lowest_price, \
"temp_price_cut" : _temp_price_cut, \
# CONTAINER : CLASSIFICATION
"categories" : _categories, \
"category_name" : _category_name, \
"brand" : _brand, \
"loaded_in_seconds" : None, \
}
# special data that can't be extracted from the product page
# associated methods return already built dictionary containing the data
DATA_TYPES_SPECIAL = { \
"mobile_image_same" : _mobile_image_same, \
}
|
[
"[email protected]"
] | |
1795fb529d885939637490ca274f1a05e3817c80
|
b44a984ac8cfd183e218d56e1ec5d0d3e72d20fd
|
/Binary Search/二分答案/617. Maximum Average Subarray II/binary_search_prefix_sum.py
|
8b867e3902d4d255a55a878fcda65c14538f88da
|
[] |
no_license
|
atomextranova/leetcode-python
|
61381949f2e78805dfdd0fb221f8497b94b7f12b
|
5fce59e6b9c4079b49e2cfb2a6d2a61a0d729c56
|
refs/heads/master
| 2021-07-15T20:32:12.592607 | 2020-09-21T00:10:27 | 2020-09-21T00:10:27 | 207,622,038 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,089 |
py
|
class Solution:
"""
@param nums: an array with positive and negative numbers
@param k: an integer
@return: the maximum average
"""
def maxAverage(self, nums, k):
# write your code here
if not nums:
return 0
start, end = min(nums), max(nums)
# binary search for possible answer
while start + 1e-5 < end:
mid = (start + end) / 2
if self.can_find_larger_mean(nums, mid, k):
start = mid
else:
end = mid
return start
def can_find_larger_mean(self, nums, target_average, k):
# Construct prefix sum with average deducted
prefix_sum = [0]
for num in nums:
prefix_sum.append(prefix_sum[-1] + num - target_average)
min_prefix_sum = 0
for i in range(k, len(nums) + 1):
# if > 0, => prefix_sum > average
if prefix_sum[i] - min_prefix_sum >= 0:
return True
min_prefix_sum = min(min_prefix_sum, prefix_sum[i - k + 1])
return False
|
[
"[email protected]"
] | |
847cfce4f2586b32847784c08399cc36f2c1c6a0
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/odd_20200715173523.py
|
dce675361390068076286f115c10581f4c208933
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 321 |
py
|
def odd(A):
# return the value that doesn't have a pair
# a dictionary to keep track of the number of times
# an element has been repeated
newDict = {}
for i in A:
if i in newDict:
newDict[i] +=1
else:
newDict[i] = 1
odd([9,3,9,3,9,7,9])
|
[
"[email protected]"
] | |
43ccfcb76efbc913bf2c01360150033452d7793c
|
25a27b6e7ad3f7ef90e7b70d7393fcf4b7cc16b0
|
/Pandas_Stats.py
|
b8a04e49bdb8d45ba1ff49fda54cb3a967501103
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
Bhaney44/Patent-Valuation
|
aeef15892fa81be314799ad48d8669d6ad3f3bc6
|
57af26f9ec60c85cbf6217358b70520a7b916189
|
refs/heads/master
| 2021-03-05T19:19:31.799981 | 2020-09-29T05:41:16 | 2020-09-29T05:41:16 | 246,145,615 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 910 |
py
|
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
df = pd.read_csv('Lite_Coin_12_Mos.csv')
print(df.head())
print(df.tail())
print(df.index)
print(df.columns)
print("-----")
print(df.describe())
#print mediuan
print('median')
print(df.median())
print("-----")
#Mode
print('mode')
print(df.mode())
print("-----")
#Variance
##In probability theory and statistics, variance is the expectation of the squared deviation of a random variable from its mean.
##Informally, it measures how far a set of numbers are spread out from their average value.
print('variance')
print(df.var())
print("-----")
#Co-Variance
print('co-variance')
print(df.cov())
#Cumsum
#print('Cumsum')
#print(df.cumsum())
#Scalar Map
#print('Map')
#print(df.applymap())
#Multiply
#print('Multiply')
#print(df.mul())
#Modulo
#print('Modulo')
#print(df.mod())
|
[
"[email protected]"
] | |
c830c87b455a42a5749d5bbef3033ab9ffb56f2f
|
3d792bcf31843a8329f6c9774a8a58a8c49a8a70
|
/0x06-python-classes/0-square.py
|
3bb5016e742b9094a78be3e986028bbc8b6a28f5
|
[] |
no_license
|
theurikenneth/alx-higher_level_programming
|
a6accbe016bdc62ee3f0e849c8e2c847247fb4d9
|
289a08cffa1bcbecab4550b6fed21296cf88fe66
|
refs/heads/main
| 2023-08-28T08:53:32.676778 | 2021-10-21T06:00:47 | 2021-10-21T06:00:47 | 361,642,411 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 107 |
py
|
#!/usr/bin/python3
"""Defines the square class"""
class Square:
"""Square class. Contains nothing"""
|
[
"[email protected]"
] | |
67286f42c0f5c3196b3760aedd6bbfecb99ed3a6
|
26dcf8e0457156a8bde936d56a59e1099893f8c6
|
/tests/test_init.py
|
55608b06b9def4bb420070c424c03c56bf5aa666
|
[
"MIT"
] |
permissive
|
SilenceWinter/MicroTokenizer
|
fc4212fb9a324e93e707edbe130b518bd782d07a
|
0b617f4b107743f6c7c473a9fac9408d21c56931
|
refs/heads/master
| 2020-03-29T04:31:23.050836 | 2018-09-18T16:40:28 | 2018-09-18T16:40:28 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 968 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import MicroTokenizer
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_DAG(input_text):
result = MicroTokenizer.cut_by_DAG(input_text)
pytest.helpers.assert_token_equals(result, input_text)
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_HMM(input_text):
result = MicroTokenizer.cut_by_HMM(input_text)
pytest.helpers.assert_token_equals(result, input_text)
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_CRF(input_text):
result = MicroTokenizer.cut_by_CRF(input_text)
pytest.helpers.assert_token_equals(result, input_text)
@pytest.mark.parametrize("input_text", pytest.helpers.tokenizer_test_cases())
def test_joint_model(input_text):
result = MicroTokenizer.cut_by_joint_model(input_text)
pytest.helpers.assert_token_equals(result, input_text)
|
[
"[email protected]"
] | |
9f4d4ded1ac98901226e2e651b5c2a77055979bd
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/175/usersdata/267/96307/submittedfiles/lista1.py
|
0d27230ca436ca13864d21627d406bd3d0fc5b36
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 460 |
py
|
# -*- coding: utf-8 -*-
n=int(input('Tamanho da lista: '))
a=[]
par=[]
impar=[]
somaPar=0
contPar=0
somaImpar=0
contImpar=0
for i in range(0,n,1):
elem=int(input('Digite o elemento: '))
a.append(elem)
for i in range(0,len(a),1):
if a[i]%2==0:
somaPar=somaPar+a[i]
contPar=contPar+1
else:
somaImpar=somaImpar+a[i]
contImpar=contImpar+1
print (somaPar)
print(contPar)
print()
print(somaImpar)
print(contImpar)
|
[
"[email protected]"
] | |
bb9a5e375a56114623e45ce385c30a820fc1ac0a
|
c54f5a7cf6de3ed02d2e02cf867470ea48bd9258
|
/pyobjc/PyOpenGL-2.0.2.01/OpenGL/__init__.py
|
6efde92c2a11e6c461d897cf6ad1f636d7cff488
|
[] |
no_license
|
orestis/pyobjc
|
01ad0e731fbbe0413c2f5ac2f3e91016749146c6
|
c30bf50ba29cb562d530e71a9d6c3d8ad75aa230
|
refs/heads/master
| 2021-01-22T06:54:35.401551 | 2009-09-01T09:24:47 | 2009-09-01T09:24:47 | 16,895 | 8 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 841 |
py
|
# This is statement is required by the build system to query build info
if __name__ == '__build__':
raise Exception
def __set_attributes():
global __date__, __version__, __build__
import string, os.path
__date__ = string.join(string.split('$Date: 2004/11/14 23:33:24 $')[1:3], ' ')
filename = os.path.join(os.path.dirname(__file__), 'version')
__version__ = string.strip(open(filename).read())
__build__ = int(string.split(__version__, '.')[3])
__set_attributes()
__author__ = 'Tarn Weisner Burton <[email protected]>\nMike C. Fletcher <[email protected]>'
__doc__ = '''This is PyOpenGL 2. For information regarding PyOpenGL see:
http://pyopengl.sourceforge.net
For information on OpenGL see:
http://www.opengl.org'''
from GL._GL__init__ import __numeric_present__, __numeric_support__
|
[
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] |
ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25
|
d8853cd5771461c6c01d5fc0487103ad116de00a
|
0cd2333e007e28e390e5f0ed203f60a066585464
|
/python/temba_expressions/gen/ExcellentLexer.py
|
39aec5620f3789f7f279c8d4c8cf75250ca20478
|
[
"BSD-3-Clause"
] |
permissive
|
rapidpro/expressions
|
1215f64ec523cbde6ca358c5173dc4f409f86f3b
|
b03d91ec58fc328960bce90ecb5fa49dcf467627
|
refs/heads/master
| 2022-05-30T06:13:14.613925 | 2019-03-25T16:51:30 | 2019-03-25T16:51:30 | 43,113,391 | 3 | 6 |
BSD-3-Clause
| 2022-05-20T20:51:35 | 2015-09-25T06:24:59 |
Java
|
UTF-8
|
Python
| false | false | 15,373 |
py
|
# Generated from Excellent.g4 by ANTLR 4.7
# encoding: utf-8
from __future__ import print_function
from antlr4 import *
from io import StringIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write(u"\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2")
buf.write(u"\30\u00aa\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6")
buf.write(u"\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4")
buf.write(u"\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t")
buf.write(u"\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27")
buf.write(u"\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4")
buf.write(u"\35\t\35\4\36\t\36\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3")
buf.write(u"\6\3\6\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\13")
buf.write(u"\3\f\3\f\3\f\3\r\3\r\3\16\3\16\3\16\3\17\3\17\3\20\3")
buf.write(u"\20\3\21\6\21`\n\21\r\21\16\21a\3\21\3\21\6\21f\n\21")
buf.write(u"\r\21\16\21g\5\21j\n\21\3\22\3\22\3\22\3\22\7\22p\n\22")
buf.write(u"\f\22\16\22s\13\22\3\22\3\22\3\23\3\23\3\23\3\23\3\23")
buf.write(u"\3\24\3\24\3\24\3\24\3\24\3\24\3\25\6\25\u0083\n\25\r")
buf.write(u"\25\16\25\u0084\3\25\3\25\3\25\7\25\u008a\n\25\f\25\16")
buf.write(u"\25\u008d\13\25\3\26\6\26\u0090\n\26\r\26\16\26\u0091")
buf.write(u"\3\26\3\26\3\27\3\27\3\30\3\30\3\30\3\30\3\30\5\30\u009d")
buf.write(u"\n\30\3\31\3\31\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3")
buf.write(u"\35\3\36\3\36\2\2\37\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21")
buf.write(u"\n\23\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24")
buf.write(u"\'\25)\26+\27-\30/\2\61\2\63\2\65\2\67\29\2;\2\3\2\24")
buf.write(u"\3\2\62;\3\2$$\4\2VVvv\4\2TTtt\4\2WWww\4\2GGgg\4\2HH")
buf.write(u"hh\4\2CCcc\4\2NNnn\4\2UUuu\4\2\60\60aa\5\2\13\f\17\17")
buf.write(u"\"\"T\2C\\\u00c2\u00d8\u00da\u00e0\u0102\u0138\u013b")
buf.write(u"\u0149\u014c\u017f\u0183\u0184\u0186\u018d\u0190\u0193")
buf.write(u"\u0195\u0196\u0198\u019a\u019e\u019f\u01a1\u01a2\u01a4")
buf.write(u"\u01ab\u01ae\u01b5\u01b7\u01be\u01c6\u01cf\u01d1\u01dd")
buf.write(u"\u01e0\u01f0\u01f3\u01f6\u01f8\u01fa\u01fc\u0234\u023c")
buf.write(u"\u023d\u023f\u0240\u0243\u0248\u024a\u0250\u0372\u0374")
buf.write(u"\u0378\u0381\u0388\u038c\u038e\u03a3\u03a5\u03ad\u03d1")
buf.write(u"\u03d6\u03da\u03f0\u03f6\u03f9\u03fb\u03fc\u03ff\u0431")
buf.write(u"\u0462\u0482\u048c\u04cf\u04d2\u0530\u0533\u0558\u10a2")
buf.write(u"\u10c7\u10c9\u10cf\u1e02\u1e96\u1ea0\u1f00\u1f0a\u1f11")
buf.write(u"\u1f1a\u1f1f\u1f2a\u1f31\u1f3a\u1f41\u1f4a\u1f4f\u1f5b")
buf.write(u"\u1f61\u1f6a\u1f71\u1fba\u1fbd\u1fca\u1fcd\u1fda\u1fdd")
buf.write(u"\u1fea\u1fee\u1ffa\u1ffd\u2104\u2109\u210d\u210f\u2112")
buf.write(u"\u2114\u2117\u211f\u2126\u212f\u2132\u2135\u2140\u2141")
buf.write(u"\u2147\u2185\u2c02\u2c30\u2c62\u2c66\u2c69\u2c72\u2c74")
buf.write(u"\u2c77\u2c80\u2c82\u2c84\u2ce4\u2ced\u2cef\u2cf4\ua642")
buf.write(u"\ua644\ua66e\ua682\ua69c\ua724\ua730\ua734\ua770\ua77b")
buf.write(u"\ua788\ua78d\ua78f\ua792\ua794\ua798\ua7af\ua7b2\ua7b3")
buf.write(u"\uff23\uff3cS\2c|\u00b7\u00f8\u00fa\u0101\u0103\u0179")
buf.write(u"\u017c\u0182\u0185\u0187\u018a\u0194\u0197\u019d\u01a0")
buf.write(u"\u01a3\u01a5\u01a7\u01aa\u01af\u01b2\u01b6\u01b8\u01c1")
buf.write(u"\u01c8\u01ce\u01d0\u01f5\u01f7\u01fb\u01fd\u023b\u023e")
buf.write(u"\u0244\u0249\u0295\u0297\u02b1\u0373\u0375\u0379\u037f")
buf.write(u"\u0392\u03d0\u03d2\u03d3\u03d7\u03d9\u03db\u03f5\u03f7")
buf.write(u"\u0461\u0463\u0483\u048d\u04c1\u04c4\u0531\u0563\u0589")
buf.write(u"\u1d02\u1d2d\u1d6d\u1d79\u1d7b\u1d9c\u1e03\u1e9f\u1ea1")
buf.write(u"\u1f09\u1f12\u1f17\u1f22\u1f29\u1f32\u1f39\u1f42\u1f47")
buf.write(u"\u1f52\u1f59\u1f62\u1f69\u1f72\u1f7f\u1f82\u1f89\u1f92")
buf.write(u"\u1f99\u1fa2\u1fa9\u1fb2\u1fb6\u1fb8\u1fb9\u1fc0\u1fc6")
buf.write(u"\u1fc8\u1fc9\u1fd2\u1fd5\u1fd8\u1fd9\u1fe2\u1fe9\u1ff4")
buf.write(u"\u1ff6\u1ff8\u1ff9\u210c\u2115\u2131\u213b\u213e\u213f")
buf.write(u"\u2148\u214b\u2150\u2186\u2c32\u2c60\u2c63\u2c6e\u2c73")
buf.write(u"\u2c7d\u2c83\u2cee\u2cf0\u2cf5\u2d02\u2d27\u2d29\u2d2f")
buf.write(u"\ua643\ua66f\ua683\ua69d\ua725\ua733\ua735\ua77a\ua77c")
buf.write(u"\ua77e\ua781\ua789\ua78e\ua790\ua793\ua797\ua799\ua7ab")
buf.write(u"\ua7fc\uab5c\uab66\uab67\ufb02\ufb08\ufb15\ufb19\uff43")
buf.write(u"\uff5c\b\2\u01c7\u01cd\u01f4\u1f91\u1f9a\u1fa1\u1faa")
buf.write(u"\u1fb1\u1fbe\u1fce\u1ffe\u1ffe#\2\u02b2\u02c3\u02c8\u02d3")
buf.write(u"\u02e2\u02e6\u02ee\u02f0\u0376\u037c\u055b\u0642\u06e7")
buf.write(u"\u06e8\u07f6\u07f7\u07fc\u081c\u0826\u082a\u0973\u0e48")
buf.write(u"\u0ec8\u10fe\u17d9\u1845\u1aa9\u1c7f\u1d2e\u1d6c\u1d7a")
buf.write(u"\u1dc1\u2073\u2081\u2092\u209e\u2c7e\u2c7f\u2d71\u2e31")
buf.write(u"\u3007\u3037\u303d\u3100\ua017\ua4ff\ua60e\ua681\ua69e")
buf.write(u"\ua69f\ua719\ua721\ua772\ua78a\ua7fa\ua7fb\ua9d1\ua9e8")
buf.write(u"\uaa72\uaadf\uaaf5\uaaf6\uab5e\uab61\uff72\uffa1\u00ec")
buf.write(u"\2\u00ac\u00bc\u01bd\u01c5\u0296\u05ec\u05f2\u05f4\u0622")
buf.write(u"\u0641\u0643\u064c\u0670\u0671\u0673\u06d5\u06d7\u06fe")
buf.write(u"\u0701\u0712\u0714\u0731\u074f\u07a7\u07b3\u07ec\u0802")
buf.write(u"\u0817\u0842\u085a\u08a2\u08b4\u0906\u093b\u093f\u0952")
buf.write(u"\u095a\u0963\u0974\u0982\u0987\u098e\u0991\u0992\u0995")
buf.write(u"\u09aa\u09ac\u09b2\u09b4\u09bb\u09bf\u09d0\u09de\u09df")
buf.write(u"\u09e1\u09e3\u09f2\u09f3\u0a07\u0a0c\u0a11\u0a12\u0a15")
buf.write(u"\u0a2a\u0a2c\u0a32\u0a34\u0a35\u0a37\u0a38\u0a3a\u0a3b")
buf.write(u"\u0a5b\u0a5e\u0a60\u0a76\u0a87\u0a8f\u0a91\u0a93\u0a95")
buf.write(u"\u0aaa\u0aac\u0ab2\u0ab4\u0ab5\u0ab7\u0abb\u0abf\u0ad2")
buf.write(u"\u0ae2\u0ae3\u0b07\u0b0e\u0b11\u0b12\u0b15\u0b2a\u0b2c")
buf.write(u"\u0b32\u0b34\u0b35\u0b37\u0b3b\u0b3f\u0b63\u0b73\u0b85")
buf.write(u"\u0b87\u0b8c\u0b90\u0b92\u0b94\u0b97\u0b9b\u0b9c\u0b9e")
buf.write(u"\u0bac\u0bb0\u0bbb\u0bd2\u0c0e\u0c10\u0c12\u0c14\u0c2a")
buf.write(u"\u0c2c\u0c3b\u0c3f\u0c8e\u0c90\u0c92\u0c94\u0caa\u0cac")
buf.write(u"\u0cb5\u0cb7\u0cbb\u0cbf\u0ce0\u0ce2\u0ce3\u0cf3\u0cf4")
buf.write(u"\u0d07\u0d0e\u0d10\u0d12\u0d14\u0d3c\u0d3f\u0d50\u0d62")
buf.write(u"\u0d63\u0d7c\u0d81\u0d87\u0d98\u0d9c\u0db3\u0db5\u0dbd")
buf.write(u"\u0dbf\u0dc8\u0e03\u0e32\u0e34\u0e35\u0e42\u0e47\u0e83")
buf.write(u"\u0e84\u0e86\u0e8c\u0e8f\u0e99\u0e9b\u0ea1\u0ea3\u0ea5")
buf.write(u"\u0ea7\u0ea9\u0eac\u0ead\u0eaf\u0eb2\u0eb4\u0eb5\u0ebf")
buf.write(u"\u0ec6\u0ede\u0ee1\u0f02\u0f49\u0f4b\u0f6e\u0f8a\u0f8e")
buf.write(u"\u1002\u102c\u1041\u1057\u105c\u105f\u1063\u1072\u1077")
buf.write(u"\u1083\u1090\u10fc\u10ff\u124a\u124c\u124f\u1252\u1258")
buf.write(u"\u125a\u125f\u1262\u128a\u128c\u128f\u1292\u12b2\u12b4")
buf.write(u"\u12b7\u12ba\u12c0\u12c2\u12c7\u12ca\u12d8\u12da\u1312")
buf.write(u"\u1314\u1317\u131a\u135c\u1382\u1391\u13a2\u13f6\u1403")
buf.write(u"\u166e\u1671\u1681\u1683\u169c\u16a2\u16ec\u16f3\u16fa")
buf.write(u"\u1702\u170e\u1710\u1713\u1722\u1733\u1742\u1753\u1762")
buf.write(u"\u176e\u1770\u1772\u1782\u17b5\u17de\u1844\u1846\u1879")
buf.write(u"\u1882\u18aa\u18ac\u18f7\u1902\u1920\u1952\u196f\u1972")
buf.write(u"\u1976\u1982\u19ad\u19c3\u19c9\u1a02\u1a18\u1a22\u1a56")
buf.write(u"\u1b07\u1b35\u1b47\u1b4d\u1b85\u1ba2\u1bb0\u1bb1\u1bbc")
buf.write(u"\u1be7\u1c02\u1c25\u1c4f\u1c51\u1c5c\u1c79\u1ceb\u1cee")
buf.write(u"\u1cf0\u1cf3\u1cf7\u1cf8\u2137\u213a\u2d32\u2d69\u2d82")
buf.write(u"\u2d98\u2da2\u2da8\u2daa\u2db0\u2db2\u2db8\u2dba\u2dc0")
buf.write(u"\u2dc2\u2dc8\u2dca\u2dd0\u2dd2\u2dd8\u2dda\u2de0\u3008")
buf.write(u"\u303e\u3043\u3098\u30a1\u30fc\u3101\u312f\u3133\u3190")
buf.write(u"\u31a2\u31bc\u31f2\u3201\u3402\u4db7\u4e02\u9fce\ua002")
buf.write(u"\ua016\ua018\ua48e\ua4d2\ua4f9\ua502\ua60d\ua612\ua621")
buf.write(u"\ua62c\ua62d\ua670\ua6e7\ua7f9\ua803\ua805\ua807\ua809")
buf.write(u"\ua80c\ua80e\ua824\ua842\ua875\ua884\ua8b5\ua8f4\ua8f9")
buf.write(u"\ua8fd\ua927\ua932\ua948\ua962\ua97e\ua986\ua9b4\ua9e2")
buf.write(u"\ua9e6\ua9e9\ua9f1\ua9fc\uaa00\uaa02\uaa2a\uaa42\uaa44")
buf.write(u"\uaa46\uaa4d\uaa62\uaa71\uaa73\uaa78\uaa7c\uaab1\uaab3")
buf.write(u"\uaabf\uaac2\uaac4\uaadd\uaade\uaae2\uaaec\uaaf4\uab08")
buf.write(u"\uab0b\uab10\uab13\uab18\uab22\uab28\uab2a\uab30\uabc2")
buf.write(u"\uabe4\uac02\ud7a5\ud7b2\ud7c8\ud7cd\ud7fd\uf902\ufa6f")
buf.write(u"\ufa72\ufadb\ufb1f\ufb2a\ufb2c\ufb38\ufb3a\ufb3e\ufb40")
buf.write(u"\ufbb3\ufbd5\ufd3f\ufd52\ufd91\ufd94\ufdc9\ufdf2\ufdfd")
buf.write(u"\ufe72\ufe76\ufe78\ufefe\uff68\uff71\uff73\uff9f\uffa2")
buf.write(u"\uffc0\uffc4\uffc9\uffcc\uffd1\uffd4\uffd9\uffdc\uffde")
buf.write(u"\'\2\62;\u0662\u066b\u06f2\u06fb\u07c2\u07cb\u0968\u0971")
buf.write(u"\u09e8\u09f1\u0a68\u0a71\u0ae8\u0af1\u0b68\u0b71\u0be8")
buf.write(u"\u0bf1\u0c68\u0c71\u0ce8\u0cf1\u0d68\u0d71\u0de8\u0df1")
buf.write(u"\u0e52\u0e5b\u0ed2\u0edb\u0f22\u0f2b\u1042\u104b\u1092")
buf.write(u"\u109b\u17e2\u17eb\u1812\u181b\u1948\u1951\u19d2\u19db")
buf.write(u"\u1a82\u1a8b\u1a92\u1a9b\u1b52\u1b5b\u1bb2\u1bbb\u1c42")
buf.write(u"\u1c4b\u1c52\u1c5b\ua622\ua62b\ua8d2\ua8db\ua902\ua90b")
buf.write(u"\ua9d2\ua9db\ua9f2\ua9fb\uaa52\uaa5b\uabf2\uabfb\uff12")
buf.write(u"\uff1b\2\u00b0\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2")
buf.write(u"\t\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21")
buf.write(u"\3\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31")
buf.write(u"\3\2\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3")
buf.write(u"\2\2\2\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2")
buf.write(u"+\3\2\2\2\2-\3\2\2\2\3=\3\2\2\2\5?\3\2\2\2\7A\3\2\2\2")
buf.write(u"\tC\3\2\2\2\13E\3\2\2\2\rG\3\2\2\2\17I\3\2\2\2\21K\3")
buf.write(u"\2\2\2\23M\3\2\2\2\25O\3\2\2\2\27R\3\2\2\2\31U\3\2\2")
buf.write(u"\2\33W\3\2\2\2\35Z\3\2\2\2\37\\\3\2\2\2!_\3\2\2\2#k\3")
buf.write(u"\2\2\2%v\3\2\2\2\'{\3\2\2\2)\u0082\3\2\2\2+\u008f\3\2")
buf.write(u"\2\2-\u0095\3\2\2\2/\u009c\3\2\2\2\61\u009e\3\2\2\2\63")
buf.write(u"\u00a0\3\2\2\2\65\u00a2\3\2\2\2\67\u00a4\3\2\2\29\u00a6")
buf.write(u"\3\2\2\2;\u00a8\3\2\2\2=>\7.\2\2>\4\3\2\2\2?@\7*\2\2")
buf.write(u"@\6\3\2\2\2AB\7+\2\2B\b\3\2\2\2CD\7-\2\2D\n\3\2\2\2E")
buf.write(u"F\7/\2\2F\f\3\2\2\2GH\7,\2\2H\16\3\2\2\2IJ\7\61\2\2J")
buf.write(u"\20\3\2\2\2KL\7`\2\2L\22\3\2\2\2MN\7?\2\2N\24\3\2\2\2")
buf.write(u"OP\7>\2\2PQ\7@\2\2Q\26\3\2\2\2RS\7>\2\2ST\7?\2\2T\30")
buf.write(u"\3\2\2\2UV\7>\2\2V\32\3\2\2\2WX\7@\2\2XY\7?\2\2Y\34\3")
buf.write(u"\2\2\2Z[\7@\2\2[\36\3\2\2\2\\]\7(\2\2] \3\2\2\2^`\t\2")
buf.write(u"\2\2_^\3\2\2\2`a\3\2\2\2a_\3\2\2\2ab\3\2\2\2bi\3\2\2")
buf.write(u"\2ce\7\60\2\2df\t\2\2\2ed\3\2\2\2fg\3\2\2\2ge\3\2\2\2")
buf.write(u"gh\3\2\2\2hj\3\2\2\2ic\3\2\2\2ij\3\2\2\2j\"\3\2\2\2k")
buf.write(u"q\7$\2\2lp\n\3\2\2mn\7$\2\2np\7$\2\2ol\3\2\2\2om\3\2")
buf.write(u"\2\2ps\3\2\2\2qo\3\2\2\2qr\3\2\2\2rt\3\2\2\2sq\3\2\2")
buf.write(u"\2tu\7$\2\2u$\3\2\2\2vw\t\4\2\2wx\t\5\2\2xy\t\6\2\2y")
buf.write(u"z\t\7\2\2z&\3\2\2\2{|\t\b\2\2|}\t\t\2\2}~\t\n\2\2~\177")
buf.write(u"\t\13\2\2\177\u0080\t\7\2\2\u0080(\3\2\2\2\u0081\u0083")
buf.write(u"\5/\30\2\u0082\u0081\3\2\2\2\u0083\u0084\3\2\2\2\u0084")
buf.write(u"\u0082\3\2\2\2\u0084\u0085\3\2\2\2\u0085\u008b\3\2\2")
buf.write(u"\2\u0086\u008a\5/\30\2\u0087\u008a\5;\36\2\u0088\u008a")
buf.write(u"\t\f\2\2\u0089\u0086\3\2\2\2\u0089\u0087\3\2\2\2\u0089")
buf.write(u"\u0088\3\2\2\2\u008a\u008d\3\2\2\2\u008b\u0089\3\2\2")
buf.write(u"\2\u008b\u008c\3\2\2\2\u008c*\3\2\2\2\u008d\u008b\3\2")
buf.write(u"\2\2\u008e\u0090\t\r\2\2\u008f\u008e\3\2\2\2\u0090\u0091")
buf.write(u"\3\2\2\2\u0091\u008f\3\2\2\2\u0091\u0092\3\2\2\2\u0092")
buf.write(u"\u0093\3\2\2\2\u0093\u0094\b\26\2\2\u0094,\3\2\2\2\u0095")
buf.write(u"\u0096\13\2\2\2\u0096.\3\2\2\2\u0097\u009d\5\61\31\2")
buf.write(u"\u0098\u009d\5\63\32\2\u0099\u009d\5\65\33\2\u009a\u009d")
buf.write(u"\5\67\34\2\u009b\u009d\59\35\2\u009c\u0097\3\2\2\2\u009c")
buf.write(u"\u0098\3\2\2\2\u009c\u0099\3\2\2\2\u009c\u009a\3\2\2")
buf.write(u"\2\u009c\u009b\3\2\2\2\u009d\60\3\2\2\2\u009e\u009f\t")
buf.write(u"\16\2\2\u009f\62\3\2\2\2\u00a0\u00a1\t\17\2\2\u00a1\64")
buf.write(u"\3\2\2\2\u00a2\u00a3\t\20\2\2\u00a3\66\3\2\2\2\u00a4")
buf.write(u"\u00a5\t\21\2\2\u00a58\3\2\2\2\u00a6\u00a7\t\22\2\2\u00a7")
buf.write(u":\3\2\2\2\u00a8\u00a9\t\23\2\2\u00a9<\3\2\2\2\r\2agi")
buf.write(u"oq\u0084\u0089\u008b\u0091\u009c\3\b\2\2")
return buf.getvalue()
class ExcellentLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
COMMA = 1
LPAREN = 2
RPAREN = 3
PLUS = 4
MINUS = 5
TIMES = 6
DIVIDE = 7
EXPONENT = 8
EQ = 9
NEQ = 10
LTE = 11
LT = 12
GTE = 13
GT = 14
AMPERSAND = 15
DECIMAL = 16
STRING = 17
TRUE = 18
FALSE = 19
NAME = 20
WS = 21
ERROR = 22
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ u"DEFAULT_MODE" ]
literalNames = [ u"<INVALID>",
u"','", u"'('", u"')'", u"'+'", u"'-'", u"'*'", u"'/'", u"'^'",
u"'='", u"'<>'", u"'<='", u"'<'", u"'>='", u"'>'", u"'&'" ]
symbolicNames = [ u"<INVALID>",
u"COMMA", u"LPAREN", u"RPAREN", u"PLUS", u"MINUS", u"TIMES",
u"DIVIDE", u"EXPONENT", u"EQ", u"NEQ", u"LTE", u"LT", u"GTE",
u"GT", u"AMPERSAND", u"DECIMAL", u"STRING", u"TRUE", u"FALSE",
u"NAME", u"WS", u"ERROR" ]
ruleNames = [ u"COMMA", u"LPAREN", u"RPAREN", u"PLUS", u"MINUS", u"TIMES",
u"DIVIDE", u"EXPONENT", u"EQ", u"NEQ", u"LTE", u"LT",
u"GTE", u"GT", u"AMPERSAND", u"DECIMAL", u"STRING", u"TRUE",
u"FALSE", u"NAME", u"WS", u"ERROR", u"UnicodeLetter",
u"UnicodeClass_LU", u"UnicodeClass_LL", u"UnicodeClass_LT",
u"UnicodeClass_LM", u"UnicodeClass_LO", u"UnicodeDigit" ]
grammarFileName = u"Excellent.g4"
def __init__(self, input=None, output=sys.stdout):
super(ExcellentLexer, self).__init__(input, output=output)
self.checkVersion("4.7")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
|
[
"[email protected]"
] | |
c1d33748b6659265d0d0e48b25c94af00752e8c9
|
a7104434e0ddb4575ef0a6cd467bac6620570de8
|
/hunter111.py
|
4726dd46eef032be4a152c39cdce18b129aae670
|
[] |
no_license
|
GauthamAjayKannan/GUVI-1
|
7b276eef3195bec9671eec8bb6bcc588cb5c970e
|
fafabab93df55abcc399f6e2664286ed511fd683
|
refs/heads/master
| 2020-06-25T07:38:08.465414 | 2019-05-17T11:24:53 | 2019-05-17T11:24:53 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
n = int(input())
sum = 0
x = [ [int(e) for e in input().split()] for i in range(n)]
for i in range(n):
sum += x[i][(n-1)-i]
print(sum)
|
[
"[email protected]"
] | |
98f3200707f50fb960acc5eef201747d0b5dfb8a
|
351e2a5ab1a658dcfa6e760fcfb80671d4d95984
|
/ferrua/models/purchase.py
|
8ab59d7972cfc8498c3c4c4064f223afe5a4145d
|
[] |
no_license
|
eneldoserrata/marcos_community_addons
|
02462b006b6c4ece3cfca914bf11d72d9fbd2a0a
|
dfd1f4254c6a59725b32e240f1d654b360c9d7e1
|
refs/heads/master
| 2021-10-11T01:02:08.807210 | 2017-03-09T08:35:54 | 2017-03-09T08:35:54 | 45,713,685 | 4 | 12 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,471 |
py
|
# -*- coding: utf-8 -*-
from openerp import models, fields, api
import openerp.addons.decimal_precision as dp
class PurchaseOrder(models.Model):
_inherit = "purchase.order"
@api.one
def _cal_msi(self):
if self.is_roll_order:
exact = 0
master = 0
lam = 0
for rec in self.roll_order_lines:
if rec.product_roll_id.product_tmpl_id.categ_id.extra_info == "exact":
exact += rec.msi
elif rec.product_roll_id.product_tmpl_id.categ_id.extra_info == "master":
master += rec.msi
elif rec.product_roll_id.product_tmpl_id.categ_id.extra_info == "lamination":
lam += rec.msi
self.msi_sub_exact = exact
self.msi_sub_master = master
self.msi_sub_lam = lam/5
self.msi_sub_total = self.msi_sub_exact+self.msi_sub_master+self.msi_sub_lam
is_roll_order = fields.Boolean("Pedido de rollos", copy=True)
roll_order_lines = fields.One2many("purchase.order.line.roll", "roll_order_id", copy=True)
msi_sub_exact = fields.Float("Subtotal Plan Exact", compute=_cal_msi, default=0)
msi_sub_master = fields.Float("Subtotal Master Rolls", compute=_cal_msi, default=0)
msi_sub_lam = fields.Float(u"Subtotal Laminación", compute=_cal_msi, default=0)
msi_sub_total = fields.Float("Total MSI", compute=_cal_msi, default=0)
@api.multi
def button_confirm(self):
for rec in self:
if rec.is_roll_order:
[line.unlink() for line in rec.order_line]
for roll in rec.roll_order_lines:
new_order_line = self.env["purchase.order.line"].new({"product_id": roll.product_roll_id.id,
"order_id": roll.roll_order_id.id})
new_order_line.onchange_product_id()
new_order_line.product_qty = roll.rolls
new_order_line.price_unit = roll.roll_price
new_order_line.create(new_order_line._convert_to_write(new_order_line._cache))
rec._amount_all()
return super(PurchaseOrder, self).button_confirm()
class PurchaseOrderLine(models.Model):
_name = 'purchase.order.line.roll'
@api.multi
def _cal_msi(self):
for rec in self:
if rec.product_roll_id:
attrs = dict([(att.attribute_id.name, att.name) for att in rec.product_roll_id.attribute_value_ids])
rec.ancho = float(attrs["Banda"])
rec.largo = float(attrs["Largo"])
rec.msi = rec.rolls*rec.ancho*(12*rec.largo/1000)
rec.total_price_msi = rec.msi*rec.price_msi
rec.roll_price = rec.total_price_msi/rec.rolls
roll_order_id = fields.Many2one("purchase.order")
product_roll_id = fields.Many2one('product.product', string='Product', domain=[('purchase_ok', '=', True),('categ_id.extra_info','in',('exact','master','lamination'))], change_default=True, required=True)
rolls = fields.Float("Rollos", default=1)
ancho = fields.Float(u'Ancho"', compute=_cal_msi)
largo = fields.Float(u"Largo'", compute=_cal_msi)
msi = fields.Float("MSI", compute=_cal_msi)
price_msi = fields.Float(string='Precio', required=True, digits=dp.get_precision('Product Price'), default=1)
total_price_msi = fields.Float(string='Total', required=True, digits=dp.get_precision('Product Price'), default=1, compute=_cal_msi)
roll_price = fields.Float(string='Precio por rollo', required=True, digits=dp.get_precision('Product Price'), default=1, compute=_cal_msi)
@api.onchange("rolls")
def onchange_rolls(self):
self._cal_msi()
@api.onchange("price_msi")
def onchange_price_msi(self):
self._cal_msi()
@api.onchange("product_roll_id")
def onchange_roll_order_id(self):
if self.product_roll_id:
attrs = dict([(att.attribute_id.name, att.name) for att in self.product_roll_id.attribute_value_ids])
if attrs.keys() == [u'Largo', u'Banda']:
self._cal_msi()
else:
return {"value": {"product_roll_id": False},
'warning': {'title': u"Error de configuración", 'message': u"El rollo selccionado debe tener las variantes Largo y Ancho definidas"}
}
|
[
"[email protected]"
] | |
14aa6d7fea39d00ecd9884e761e2c35165614022
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Return_Fan_Status.py
|
672937506a4f0ced76a2b9ed9e98dedc286475e7
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860 | 2017-09-21T12:44:17 | 2017-09-21T12:44:17 | 104,251,784 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 368 |
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Fan_Status import Fan_Status
class Return_Fan_Status(Fan_Status):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Return_Fan_Status
|
[
"[email protected]"
] | |
c6f531b07cc9ab53de8e28529b78d4cb2a3ae124
|
c3e34335fde6c8bec8d86f2c5651a7df55759406
|
/test.py
|
4d359a733dae1a5488f91b8e6f793c487b3a29ae
|
[
"MIT"
] |
permissive
|
fancybian/ner-crf2
|
a2ea23dfc0cf528ff103bf35d6f70b6a70fdad5f
|
e4f4fe973057ee5f6ffcc87c8dddc502c981b9bf
|
refs/heads/master
| 2021-05-14T00:24:14.683097 | 2018-01-07T05:26:06 | 2018-01-07T05:26:06 | 116,538,877 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,576 |
py
|
# -*- coding: utf-8 -*-
"""
Script to test a trained CRF model.
train.py must be used before this to train the CRF.
This file must be called with the same identifier that was used during training.
Example usage:
python test.py --identifier="my_experiment" --mycorpus
python test.py --identifier="my_experiment" --germeval
The first command tests on the corpus set in ARTICLES_FILEPATH.
The second command tests on the germeval corpus, whichs path is defined in GERMEVAL_FILEPATH.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import random
import pycrfsuite
from itertools import chain
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelBinarizer
from model.datasets import load_windows, load_articles, generate_examples, Article
import model.features as features
# All capitalized constants come from this file
import config as cfg
random.seed(42)
def main():
"""Main method to handle command line arguments and then call the testing methods."""
parser = argparse.ArgumentParser()
parser.add_argument("--identifier", required=True,
help="A short name/identifier for your experiment, e.g. 'ex42b'.")
parser.add_argument("--mycorpus", required=False, action="store_const", const=True,
help="Whether to test on your corpus, defined via the constant " \
"ARTICLES_FILEPATH.")
parser.add_argument("--germeval", required=False, action="store_const", const=True,
help="Whether to test on the german eval 2014 corpus.")
args = parser.parse_args()
# test on corpus set in ARTICLES_FILEPATH
if args.mycorpus:
test_on_mycorpus(args)
# test on germeval corpus
if args.germeval:
test_on_germeval(args)
if not args.mycorpus and not args.germeval:
print("Expected either --mycorpus or --germeval flag")
def test_on_mycorpus(args):
"""Tests on the corpus set in ARTICLES_FILEPATH.
Prints a full report, including precision, recall and F1 score per label.
Args:
args: Command line arguments as parsed by argparse.ArgumentParser.
"""
print("Testing on mycorpus (%s)..." % (cfg.ARTICLES_FILEPATH))
test_on_articles(args.identifier, load_articles(cfg.ARTICLES_FILEPATH),
nb_append=cfg.COUNT_WINDOWS_TEST)
def test_on_germeval(args):
"""Tests on the germeval corpus.
The germeval filepath is defined in GERMEVAL_FILEPATH.
See https://sites.google.com/site/germeval2014ner/data .
Args:
args: Command line arguments as parsed by argparse.ArgumentParser.
"""
print("Testing on germeval (%s)..." % (cfg.GERMEVAL_FILEPATH))
test_on_articles(args.identifier, load_germeval(cfg.GERMEVAL_FILEPATH))
def test_on_articles(identifier, articles, nb_append=None):
"""Test a trained CRF model on a list of Article objects (annotated text).
Will print a full classification report by label (f1, precision, recall).
Args:
identifier: Identifier of the trained model to be used.
articles: A list of Article objects or a generator for such a list. May only contain
one single Article object.
"""
print("Loading tagger...")
tagger = pycrfsuite.Tagger()
tagger.open(identifier)
# create feature generators
# this may take a while
print("Creating features...")
feature_generators = features.create_features()
# create window generator
print("Loading windows...")
windows = load_windows(articles, cfg.WINDOW_SIZE, feature_generators, only_labeled_windows=True)
# load feature lists and label lists (X, Y)
# this may take a while
all_feature_values_lists = []
correct_label_chains = []
for fvlist, labels in generate_examples(windows, nb_append=nb_append):
all_feature_values_lists.append(fvlist)
correct_label_chains.append(labels)
# generate predicted chains of labels
print("Testing on %d windows..." % (len(all_feature_values_lists)))
predicted_label_chains = [tagger.tag(fvlists) for fvlists in all_feature_values_lists]
# print classification report (precision, recall, f1)
print(bio_classification_report(correct_label_chains, predicted_label_chains))
def load_germeval(filepath):
"""Loads the source of the gereval 2014 corpus and converts it to a list of Article objects.
Args:
filepath: Filepath to the source file, e.g. "/var/foo/NER-de-test.tsv".
Returns:
List of Article
(will contain only one single Article object).
"""
lines = open(filepath, "r").readlines()
lines = [line.decode("utf-8").strip() for line in lines]
# remove lines that are comments
lines = [line for line in lines if line[0:1] != "#"]
# remove all empty lines
lines = [line for line in lines if len(line) > 0]
sentence = []
sentences = []
for line_idx, line in enumerate(lines):
blocks = line.split("\t")
(number, word, tag1, _) = blocks # 4th block would be tag2
number = int(number)
# if we reach the next sentence, add the previous sentence to the 'sentences' container
if (number == 1 and len(sentence) > 0) or line_idx == len(lines) - 1:
sentences.append(sentence)
sentence = []
# convert all labels containing OTH (OTHER) so MISC
if "OTH" in tag1:
tag1 = "MISC"
# Add the word in an annotated way if the tag1 looks like one of the labels in the
# allowed labels (config setting LABELS). We don't check for full equality here, because
# that allows BIO tags (e.g. B-PER) to also be accepted. They will automatically be
# normalized by the Token objects (which will also throw away unnormalizable annotations).
# Notice that we ignore tag2 as tag1 is usually the more important one.
contains_label = any([(label in tag1) for label in cfg.LABELS])
is_blacklisted = any([(bl_label in tag1) for bl_label in ["part", "deriv"]])
if contains_label and not is_blacklisted:
sentence.append(word + "/" + tag1)
else:
sentence.append(word)
return [Article(" ".join(sentence)) for sentence in sentences]
def bio_classification_report(y_true, y_pred):
"""
Classification report for a list of BIO-encoded sequences.
It computes token-level metrics and discards "O" labels.
Note that it requires scikit-learn 0.15+ (or a version from github master)
to calculate averages properly!
Note: This function was copied from
http://nbviewer.ipython.org/github/tpeng/python-crfsuite/blob/master/examples/CoNLL%202002.ipynb
Args:
y_true: True labels, list of strings
y_pred: Predicted labels, list of strings
Returns:
classification report as string
"""
lbin = LabelBinarizer()
y_true_combined = lbin.fit_transform(list(chain.from_iterable(y_true)))
y_pred_combined = lbin.transform(list(chain.from_iterable(y_pred)))
#tagset = set(lbin.classes_) - {NO_NE_LABEL}
tagset = set(lbin.classes_)
tagset = sorted(tagset, key=lambda tag: tag.split('-', 1)[::-1])
class_indices = {cls: idx for idx, cls in enumerate(lbin.classes_)}
return classification_report(
y_true_combined,
y_pred_combined,
labels=[class_indices[cls] for cls in tagset],
target_names=tagset,
)
# ----------------------
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
5f3eb96b3c84ea9d92c2b90fdd6150f799ed0b11
|
38514eac965860d01633c0121c51e580f2b45554
|
/python/taichi/image/__init__.py
|
d716a480d16a35fed7bc0e063dae8efa98f9bc97
|
[
"MIT"
] |
permissive
|
willFederer/taichi
|
e5befcd8d4760fb2b071ab09b05cf2c9d4975b6e
|
59b37adf83fba5a06583dcf6b9bf2a74a985fe3e
|
refs/heads/master
| 2021-01-25T01:21:49.369480 | 2017-06-01T04:33:49 | 2017-06-01T04:33:49 | 94,750,494 | 1 | 0 | null | 2017-06-19T07:47:59 | 2017-06-19T07:47:59 | null |
UTF-8
|
Python
| false | false | 17 |
py
|
from hdr import *
|
[
"[email protected]"
] | |
bb64d0fbc08d3ddcc68c3f4237687110590e6a79
|
de96be8237ee349bee3659adb34bf12e73334f85
|
/google/domains.py
|
543129b75f1341041bb9899956ece3bb035dc2a3
|
[] |
no_license
|
hezhen/spider-course-4
|
f79e44d6ab1001dbb80bb98ef78e9ecd41b75461
|
02e2f65c5625e02d301e920560918f10769f2d6e
|
refs/heads/master
| 2020-03-12T19:03:32.874875 | 2018-10-05T08:42:57 | 2018-10-05T08:42:57 | 130,776,197 | 43 | 33 | null | null | null | null |
UTF-8
|
Python
| false | false | 453 |
py
|
import re
from lxml import etree
f = open('List of Google domains - Wikipedia.htm', 'rb+')
html = f.read()
f.close()
tree = etree.HTML(html)
google_links = []
external_links = tree.xpath('//td/span/a[@class="external text"]')
for external_link in external_links:
link_str = external_link.attrib['href']
if link_str.find('http://google.') != -1:
google_links.append(link_str[7:])
print( '[\"' + '\",\"'.join(google_links) + '\"]')
|
[
"[email protected]"
] | |
57583ef833feb7fed1f2a1302277c5c6f0a9010c
|
6e6785851f2d149faa25f907995a167b4b9a2330
|
/app.py
|
4cab0948530d82f4776386da0b97b2334f0122d3
|
[] |
no_license
|
b1naryth1ef/catify
|
5456929dff5ec2d4e525be5f45e54581455d60d1
|
6b43b17894d8d331be36b6136a4f35023b351416
|
refs/heads/master
| 2020-04-26T15:52:41.036896 | 2013-05-08T07:55:50 | 2013-05-08T07:55:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 960 |
py
|
from flask import Flask, render_template, send_file, request
from StringIO import StringIO
import os, sys, time, base64
import Image
from face_detect import catify
app = Flask(__name__)
app.secret_key = "change_me"
@app.route('/')
def routeRoot():
return render_template("index.html")
@app.route("/catify", methods=["POST"])
def routeCatify():
if request.method == 'POST':
f = request.files['file']
if f and f.filename.rsplit('.', 1)[1] in ["png", "jpg"]:
out = catify(Image.open(f))
img_io = StringIO()
out.save(img_io, 'JPEG', quality=70)
img_io.seek(0)
img_data = base64.b64encode(img_io.read())
return render_template("index.html", imgdata=img_data)#send_file(img_io, mimetype='image/jpeg')
else:
print f.filename.rsplit('.', 1)[1]
return "Error #1"
return "Error #2"
if __name__ == '__main__':
app.run(debug=True)
|
[
"[email protected]"
] | |
8af551e427cc06435e45752c8d1c0fe5586808de
|
846a7668ac964632bdb6db639ab381be11c13b77
|
/android/tools/test/connectivity/acts/tests/google/bt/car_bt/BtCarPairedConnectDisconnectTest.py
|
6a695d6f7fb1d7314d170dfe6d5d9c0e1b554119
|
[] |
no_license
|
BPI-SINOVOIP/BPI-A64-Android8
|
f2900965e96fd6f2a28ced68af668a858b15ebe1
|
744c72c133b9bf5d2e9efe0ab33e01e6e51d5743
|
refs/heads/master
| 2023-05-21T08:02:23.364495 | 2020-07-15T11:27:51 | 2020-07-15T11:27:51 | 143,945,191 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,208 |
py
|
#/usr/bin/env python3.4
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""
Test script to test connect and disconnect sequence between two devices which can run
SL4A. The script does the following:
Setup:
Clear up the bonded devices on both bluetooth adapters and bond the DUTs to each other.
Test (NUM_TEST_RUNS times):
1. Connect A2dpSink and HeadsetClient
1.1. Check that devices are connected.
2. Disconnect A2dpSink and HeadsetClient
2.1 Check that devices are disconnected.
"""
import time
from acts.test_decorators import test_tracker_info
from acts.test_utils.bt.BluetoothBaseTest import BluetoothBaseTest
from acts.base_test import BaseTestClass
from acts.test_utils.bt import bt_test_utils
from acts.test_utils.bt import BtEnum
from acts import asserts
class BtCarPairedConnectDisconnectTest(BluetoothBaseTest):
def setup_class(self):
self.car = self.android_devices[0]
self.ph = self.android_devices[1]
self.car_bt_addr = self.car.droid.bluetoothGetLocalAddress()
self.ph_bt_addr = self.ph.droid.bluetoothGetLocalAddress()
bt_test_utils.setup_multiple_devices_for_bt_test([self.car, self.ph])
# Pair the devices.
# This call may block until some specified timeout in bt_test_utils.py.
result = bt_test_utils.pair_pri_to_sec(
self.car, self.ph, auto_confirm=False)
asserts.assert_true(result, "pair_pri_to_sec returned false.")
# Check for successful setup of test.
devices = self.car.droid.bluetoothGetBondedDevices()
asserts.assert_equal(
len(devices), 1,
"pair_pri_to_sec succeeded but no bonded devices.")
@test_tracker_info(uuid='b0babf3b-8049-4b64-9125-408efb1bbcd2')
@BluetoothBaseTest.bt_test_wrap
def test_pairing(self):
"""
Tests if we can connect two devices over A2dp and then disconnect
Precondition:
1. Devices are paired.
Steps:
1. Set the priority to OFF for all profiles.
2. Initiate connection over A2dp Sink client profile.
Returns:
Pass if True
Fail if False
"""
# Set the priority to OFF for all profiles.
self.car.droid.bluetoothHfpClientSetPriority(
self.ph.droid.bluetoothGetLocalAddress(),
BtEnum.BluetoothPriorityLevel.PRIORITY_OFF.value)
self.ph.droid.bluetoothHspSetPriority(
self.car.droid.bluetoothGetLocalAddress(),
BtEnum.BluetoothPriorityLevel.PRIORITY_OFF.value)
addr = self.ph.droid.bluetoothGetLocalAddress()
if not bt_test_utils.connect_pri_to_sec(
self.car, self.ph,
set([BtEnum.BluetoothProfile.A2DP_SINK.value])):
if not bt_test_utils.is_a2dp_snk_device_connected(self.car, addr):
return False
return True
@test_tracker_info(uuid='a44f13e2-c012-4292-8dd5-9f32a023e297')
@BluetoothBaseTest.bt_test_wrap
def test_connect_disconnect_paired(self):
"""
Tests if we can connect two devices over Headset, A2dp and then disconnect them with success
Precondition:
1. Devices are paired.
Steps:
1. Initiate connection over A2dp Sink and Headset client profiles.
2. Check if the connection succeeded.
Returns:
Pass if True
Fail if False
Priority: 0
"""
NUM_TEST_RUNS = 2
failure = 0
addr = self.ph.droid.bluetoothGetLocalAddress()
for i in range(NUM_TEST_RUNS):
self.log.info("Running test [" + str(i) + "/" + str(NUM_TEST_RUNS)
+ "]")
success = bt_test_utils.connect_pri_to_sec(
self.car, self.ph,
set([
BtEnum.BluetoothProfile.HEADSET_CLIENT.value,
BtEnum.BluetoothProfile.A2DP_SINK.value
]))
# Check if we got connected.
if not success:
self.car.log.info("Not all profiles connected.")
if (bt_test_utils.is_hfp_client_device_connected(self.car,
addr) and
bt_test_utils.is_a2dp_snk_device_connected(self.car,
addr)):
self.car.log.info(
"HFP Client or A2DP SRC connected successfully.")
else:
failure = failure + 1
continue
# Disconnect the devices.
success = bt_test_utils.disconnect_pri_from_sec(
self.car, self.ph, [
BtEnum.BluetoothProfile.HEADSET_CLIENT.value,
BtEnum.BluetoothProfile.A2DP_SINK.value
])
if success is False:
self.car.log.info("Disconnect failed.")
if (bt_test_utils.is_hfp_client_device_connected(self.car,
addr) or
bt_test_utils.is_a2dp_snk_device_connected(self.car,
addr)):
self.car.log.info(
"HFP Client or A2DP SRC failed to disconnect.")
failure = failure + 1
continue
self.log.info("Failure {} total tests {}".format(failure,
NUM_TEST_RUNS))
if failure > 0:
return False
return True
|
[
"[email protected]"
] | |
5128534db0c281779b7258ce43806ef7569a716f
|
c22b9c7c4a854ed985e777bcbecd18870439b334
|
/hardway/print.py
|
1206e22d567c83747e1a692a7553958fbbbdc237
|
[
"BSD-3-Clause"
] |
permissive
|
pezy/python_test
|
ceb35a8a63ca8ebe26ffa5c72ace664718c7b328
|
b019a0d9f267b5071c37fc85c9acaf27e9146625
|
refs/heads/master
| 2021-01-19T01:09:20.820202 | 2016-07-30T08:35:15 | 2016-07-30T08:35:15 | 18,096,404 | 0 | 2 | null | null | null | null |
WINDOWS-1252
|
Python
| false | false | 334 |
py
|
# -- coding: utf-8 --
print "Hello, World!"
print "Hello Again"
print "I like typing this"
print "This is fun"
print 'Yet! Printing.'
print "I'd much rather you 'not'."
print 'I "said" do not touch this.'
print "ÖÐÎÄ£¡"
# this is comment
# you can print one line by ','
print "test just one line,","also one line,","yes good!"
|
[
"[email protected]"
] | |
d7b93c89b2e2a0584c469b21d001ed6aeca48808
|
e0219f54839b1d19a2509d1320d2640c8fe9bb79
|
/zinnia/sitemaps.py
|
128a7a1fe160bfbf74fce522eadd2d57ece876dc
|
[] |
no_license
|
alsoicode/django-blog-zinnia
|
9648bd53e079e9ae1a8a0b64e5ef58821bb54cc2
|
6f015e0944ca60ea0e9cd7c1c2434666f5c544b5
|
refs/heads/master
| 2021-01-17T08:33:34.814990 | 2010-05-11T17:40:08 | 2010-05-11T17:40:08 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,762 |
py
|
"""Sitemaps for Zinnia"""
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from tagging.models import Tag
from tagging.models import TaggedItem
from zinnia.models import Entry
from zinnia.models import Category
from zinnia.managers import entries_published
from zinnia.managers import authors_published
class EntrySitemap(Sitemap):
"""Sitemap for entries"""
priority = 0.5
changefreq = 'never'
def items(self):
return Entry.published.all()
def lastmod(self, obj):
return obj.last_update
class CategorySitemap(Sitemap):
"""Sitemap for categories"""
changefreq = 'monthly'
def cache(self, categories=[]):
len_entries = float(Entry.published.count())
self.cache_categories = {}
for cat in categories:
self.cache_categories[cat.pk] = cat.entries_published_set().count() / len_entries
def items(self):
categories = Category.objects.all()
self.cache(categories)
return categories
def lastmod(self, obj):
entries = entries_published(obj.entry_set)
if not entries:
return None
return entries[0].creation_date
def priority(self, obj):
priority = 0.5 + self.cache_categories[obj.pk]
if priority > 1.0:
priority = 1.0
return '%.1f' % priority
class AuthorSitemap(Sitemap):
"""Sitemap for authors"""
priority = 0.5
changefreq = 'monthly'
def items(self):
return authors_published()
def lastmod(self, obj):
entries = entries_published(obj.entry_set)
if not entries:
return None
return entries[0].creation_date
def location(self, obj):
return reverse('zinnia_author_detail', args=[obj.username])
class TagSitemap(Sitemap):
"""Sitemap for tags"""
changefreq = 'monthly'
def cache(self, tags=[]):
len_entries = float(Entry.published.count())
self.cache_tags = {}
for tag in tags:
entries = TaggedItem.objects.get_by_model(Entry.published.all(), tag)
self.cache_tags[tag.pk] = (entries, entries.count() / len_entries)
def items(self):
tags = Tag.objects.all()
self.cache(tags)
return tags
def lastmod(self, obj):
entries = self.cache_tags[obj.pk][0]
if not entries:
return None
return entries[0].creation_date
def priority(self, obj):
priority = 0.5 + self.cache_tags[obj.pk][1]
if priority > 1.0:
priority = 1.0
return '%.1f' % priority
def location(self, obj):
return reverse('zinnia_tagged_entry_list', args=[obj.name])
|
[
"[email protected]"
] | |
eaa874e35cb8e52f4390453e8611fca1b9c5ce04
|
52a00bbbe9cb90e46a913e9ef5facb04d25cf8b7
|
/todolist/tasks/models.py
|
1fbec0968940b51982867f96c8a02c37a1be2c5e
|
[] |
no_license
|
agbin/todoList
|
9852378487e6d80bf6bf944f357d6b6824a4c694
|
3a5fee7d38c3f55c2f2432d5a47fc619cf2cfce0
|
refs/heads/master
| 2020-03-18T17:30:02.649979 | 2018-09-30T15:13:32 | 2018-09-30T15:13:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 824 |
py
|
from django.db import models
from colorfield.fields import ColorField
class Category(models.Model):
name = models.CharField(
verbose_name="Name",
max_length=120
)
color = ColorField(default='#FF0000')
class Meta:
verbose_name = "Category"
verbose_name_plural = "Categories"
def __str__(self):
return self.name
class Task(models.Model):
category = models.ForeignKey(
Category,
verbose_name="Category",
on_delete=models.CASCADE
)
name = models.CharField(
verbose_name="Name",
max_length=120
)
description = models.TextField(
verbose_name="Description"
)
class Meta:
verbose_name = "Task"
verbose_name_plural = "Task"
def __str__(self):
return self.name
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.