filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_25516
|
import speech_recognition as spr
for index, name in enumerate(spr.Microphone.list_microphone_names()):
print("Microphone with name \"{1}\" found for `Microphone(device_index={0})`".format(index, name))
import pyttsx3
import pyaudio
import webbrowser
import datetime
import wikipedia
import time
import pywhatkit
import wolframalpha
import os
import cv2
import pyautogui
import requests
import bs4
import pandas
from ecapture import ecapture as ec
from pyowm import OWM
import re
import sys
import numpy as np
shihab = pyttsx3.init('sapi5')
voices = shihab.getProperty('voices')
shihab.setProperty('voice', voices[0].id)
def intro():
hour = int(datetime.datetime.now().hour)
if (hour != 0):
reply("Assalamu Alaikum ! I am SK voice assistant , I am waiting to help you!")
def reply(text):
shihab.say(text)
shihab.runAndWait()
#def Hello():
# reply("Hello ! Shihab Sir , Assalamu Alaikum")
def Greetings():
hour = int(datetime.datetime.now().hour)
if hour >= 0 and hour < 10:
greeting = 'Good morning , shihab!'
elif hour >= 10 and hour < 15:
greeting = 'Good afternoon!'
elif hour >= 18 and hour < 5:
greeting = 'Good night'
else:
greeting = 'Good evening!'
reply(greeting)
def order():
global command
listener = spr.Recognizer()
with spr.Microphone() as source:
listener.adjust_for_ambient_noise(source)
print('I am listening...')
reply('I am listening!!')
voice = listener.listen(source)
try:
#command = None # (<-- !!!)
command = listener.recognize_google(voice)
print(command)
if 'shihab' in command:
command = command.replace('shihab', '')
except Exception as e:
print("Try again , sir!")
pass
return command
def msk():
command = order()
if 'hello' in command:
a=command.replace('hello' , '')
reply("Hi , How are you? I am waiting for your order")
time.sleep(3)
elif 'about you' in command:
reply('Assalamu Alaikum. I am SK voice assistant and I was developed by Md. Jafril Alam Shihab.He is a student of CSE of Khulna University of Engineering and Technology. My features are awesome!')
time.sleep(2)
elif ('my friend') in command:
reply("What is the name of your friend")
name1 = order()
reply('welcome and best wishes Mr ' + name1)
time.sleep(3)
elif 'SK' in command:
#a=command.replace('hello' , '')
reply("Lots of love for you!!")
time.sleep(2)
elif 'infopedia' in command:
reply('Sir , Ask me any question.')
ask = order()
app_id = "EVEEQL-J3RQPHTQ8W"
client = wolframalpha.Client('EVEEQL-J3RQPHTQ8W')
result = client.query(ask)
answer = next(result.results).text
print(answer)
reply(answer)
time.sleep(3)
elif 'time' in command:
a = command.replace('time','')
strTime = datetime.datetime.now().strftime("%H:%M:%S")
reply(f"the time is {strTime}")
time.sleep(3)
elif 'play' in command:
song = command.replace('play', '')
reply('playing ' + song)
pywhatkit.playonyt(song)
time.sleep(3)
elif 'play movie' in command:
movie = command.replace('play movie', '')
reply('playing ' + movie)
pywhatkit.playonyt(movie)
time.sleep(3)
elif 'map' in command:
content = command.replace('map', '')
url = 'https://www.google.com/maps/place/' + content + '/&'
webbrowser.get().open(url)
reply(content)
time.sleep(3)
elif 'shut up' in command:
command.replace('shut up','')
reply("Good bye , Md. Jafril Alam shihab ! You are welcomed ")
exit()
elif 'search' in command:
content = command.replace('Google search' , '')
reply('Searching your content , wait' + content)
url = 'https://www.google.com/search?q=' + content
webbrowser.get().open(url)
reply("Your result has found!")
time.sleep(3)
elif 'open browser' in command:
command.replace('open browser', '')
reply('Opening our browser ')
webbrowser.open('https://www.google.com.bd/')
webbrowser.get('mozilla')
time.sleep(3)
elif 'camera' in command:
print("Sir ,Camera is opening!")
reply("Sir ,Camera is opening!")
reply("Sir ,What is the file name?")
name = order()
img = cv2.VideoCapture(0)
image = img.read()[1]
#display = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imshow("Camera",image)
if cv2.waitKey(0) == ord('s'):
cv2.imwrite(f"{name}.jpg",image)
cv2.destroyAllWindows()
time.sleep(3)
elif 'videocamera' in command:
print("Sir ,video Camera is opening!")
reply("Sir ,video Camera is opening!")
#reply("Sir , say name of the file ")
#name = order()
img = cv2.VideoCapture(0)
while (True):
ret , frame = img.read()
shot = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', shot)
if cv2.waitKey(1) == ord('s'):
#cv2.imwrite(f"{name}.png",img)
break
img.release()
cv2.destroyAllWindows()
time.sleep(3)
elif 'screenshot' in command:
reply("And the file name would be...")
name = order()
p=pyautogui.screenshot(f"desktop{name}.png")
reply("The screenshot has captured!!")
#ss.save(r"C:\Users\asus\Pictures\Saved Pictures\{name}.png")
#os.startfile(r"C:\Users\asus\PycharmProjects\SK_assistant\venv\{name}")
time.sleep(3)
elif 'weather of' in command:
city = command.replace('weather of', '')
owm = OWM(API_key='170f3e42dec9a08a4ea288e533ca533c')
obs = owm.weather_at_place(city)
w = obs.get_weather()
w_status = w.get_status()
w_t = w.get_temperature(unit='celsius')
w_wi = w.get_wind()
w_humidity = w.get_humidity()
w_cld = w.get_clouds()
print('Weather of %s is %s . Temperature is %0.4f . Wind speed is %0.2f and humidity is %0.2f percent and cloud is %d' % (city, w_status, w_t['temp'], w_wi['speed'], w_humidity,w_cld))
reply('Weather of %s is %s . Temperature is %0.4f . Wind speed is %0.2f and humidity is %0.2f percent and cloud is %d' % (city, w_status, w_t['temp'], w_wi['speed'], w_humidity,w_cld))
time.sleep(3)
elif 'when did' in command or 'what is ' or 'what do' or 'what does' or 'what did' or 'what are' or 'who is ' or 'who are' or 'who does' or 'who did' or 'where is ' or 'where are' or 'where do' or 'where does' or 'where did' or 'when do' or 'when does' in command or ('wikipedia' in command):
reply('Searching Wikipedia...')
output = command.replace('wikipedia', '')
output = command.replace('what is', '')
output = command.replace('what are', '')
output = command.replace('what do', '')
output = command.replace('what did', '')
output = command.replace('what does', '')
output = command.replace('who is', '')
output = command.replace('who are', '')
output = command.replace('who does', '')
output = command.replace('who did', '')
output = command.replace('where is', '')
output = command.replace('where are', '')
output = command.replace('where do', '')
output = command.replace('where does', '')
output = command.replace('where did', '')
output = command.replace('when do', '')
output = command.replace('when does', '')
output = command.replace('when did', '')
search_results = wikipedia.summary(output, 10)
print(search_results)
reply(search_results)
time.sleep(3)
else:
reply('Ups !! Cannot finding it')
pywhatkit.search(command)
#Hello()
intro()
Greetings()
while True:
msk()
time.sleep(10)
|
the-stack_0_25517
|
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import binascii
import copy
import os
import random
import re
import time
from datetime import datetime
from datetime import timedelta
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms
from cryptography.hazmat.primitives.ciphers import Cipher
from cryptography.hazmat.primitives.ciphers import modes
from svtplay_dl.error import ServiceError
from svtplay_dl.error import UIException
from svtplay_dl.fetcher import VideoRetriever
from svtplay_dl.subtitle import subtitle
from svtplay_dl.utils.http import get_full_url
from svtplay_dl.utils.output import ETA
from svtplay_dl.utils.output import output
from svtplay_dl.utils.output import progress_stream
from svtplay_dl.utils.output import progressbar
class HLSException(UIException):
def __init__(self, url, message):
self.url = url
super().__init__(message)
class LiveHLSException(HLSException):
def __init__(self, url):
super().__init__(url, "This is a live HLS stream, and they are not supported.")
def hlsparse(config, res, url, **kwargs):
streams = {}
if not res:
return streams
if res.status_code > 400:
streams[0] = ServiceError(f"Can't read HLS playlist. {res.status_code}")
return streams
m3u8 = M3U8(res.text)
keycookie = kwargs.pop("keycookie", None)
authorization = kwargs.pop("authorization", None)
httpobject = kwargs.pop("httpobject", None)
output = kwargs.pop("output", None)
channels = kwargs.pop("channels", None)
codec = kwargs.pop("codec", "h264")
media = {}
subtitles = {}
segments = None
if m3u8.master_playlist:
for i in m3u8.master_playlist:
audio_url = None
vcodec = None
chans = None
if i["TAG"] == "EXT-X-MEDIA":
if "AUTOSELECT" in i and (i["AUTOSELECT"].upper() == "YES"):
if i["TYPE"] and i["TYPE"] != "SUBTITLES":
if "URI" in i:
if segments is None:
segments = True
if i["GROUP-ID"] not in media:
media[i["GROUP-ID"]] = []
if "CHANNELS" in i:
if i["CHANNELS"] == "6":
chans = "51"
media[i["GROUP-ID"]].append([i["URI"], chans])
else:
segments = False
if i["TYPE"] == "SUBTITLES":
if "URI" in i:
if i["GROUP-ID"] not in subtitles:
subtitles[i["GROUP-ID"]] = []
item = [i["URI"], i["LANGUAGE"]]
if item not in subtitles[i["GROUP-ID"]]:
subtitles[i["GROUP-ID"]].append(item)
continue
elif i["TAG"] == "EXT-X-STREAM-INF":
if "AVERAGE-BANDWIDTH" in i:
bit_rate = float(i["AVERAGE-BANDWIDTH"]) / 1000
else:
bit_rate = float(i["BANDWIDTH"]) / 1000
if "CODECS" in i:
if i["CODECS"][:3] == "hvc":
vcodec = "hevc"
if i["CODECS"][:3] == "avc":
vcodec = "h264"
if "AUDIO" in i and (i["AUDIO"] in media):
chans = media[i["AUDIO"]][0][1]
audio_url = get_full_url(media[i["AUDIO"]][0][0], url)
urls = get_full_url(i["URI"], url)
else:
continue # Needs to be changed to utilise other tags.
chans = chans if audio_url else channels
codec = vcodec if vcodec else codec
streams[int(bit_rate)] = HLS(
copy.copy(config),
urls,
bit_rate,
cookies=res.cookies,
keycookie=keycookie,
authorization=authorization,
audio=audio_url,
output=output,
segments=bool(segments),
channels=chans,
codec=codec,
**kwargs,
)
if subtitles and httpobject:
for sub in list(subtitles.keys()):
for n in subtitles[sub]:
m3u8s = M3U8(httpobject.request("get", get_full_url(n[0], url), cookies=res.cookies).text)
if "cmore" in url:
subtype = "wrstsegment" # this have been seen in tv4play
else:
subtype = "wrst"
streams[int(random.randint(1, 40))] = subtitle(
copy.copy(config),
subtype,
get_full_url(m3u8s.media_segment[0]["URI"], url),
subfix=n[1],
output=copy.copy(output),
m3u8=m3u8s,
)
elif m3u8.media_segment:
config.set("segments", False)
streams[0] = HLS(
copy.copy(config),
url,
0,
cookies=res.cookies,
keycookie=keycookie,
authorization=authorization,
output=output,
segments=False,
)
else:
streams[0] = ServiceError("Can't find HLS playlist in m3u8 file.")
return streams
class HLS(VideoRetriever):
@property
def name(self):
return "hls"
def download(self):
self.output_extention = "ts"
if self.segments:
if self.audio and not self.config.get("only_video"):
self._download(self.audio, file_name=(copy.copy(self.output), "audio.ts"))
if not self.config.get("only_audio"):
self._download(self.url, file_name=(self.output, "ts"))
else:
# Ignore audio
self.audio = None
self._download(self.url, file_name=(self.output, "ts"))
def _download(self, url, file_name):
cookies = self.kwargs.get("cookies", None)
start_time = time.time()
m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text)
key = None
def random_iv():
return os.urandom(16)
file_d = output(file_name[0], self.config, file_name[1])
if file_d is None:
return
if "EXT-X-MAP" in m3u8.media_segment[0]:
entry = {"URI": get_full_url(m3u8.media_segment[0]["EXT-X-MAP"]["URI"], url), "EXTINF": {"duration": 0}}
if "EXT-X-KEY" in m3u8.media_segment[0]:
entry["EXT-X-KEY"] = {"URI": m3u8.media_segment[0]["EXT-X-KEY"]["URI"]}
m3u8.media_segment.insert(0, entry)
hls_time_stamp = self.kwargs.pop("hls_time_stamp", False)
decryptor = None
size_media = len(m3u8.media_segment)
eta = ETA(size_media)
total_duration = 0
duration = 0
max_duration = 0
for index, i in enumerate(m3u8.media_segment):
if "duration" in i["EXTINF"]:
duration = i["EXTINF"]["duration"]
max_duration = max(max_duration, duration)
total_duration += duration
item = get_full_url(i["URI"], url)
if not self.config.get("silent"):
if self.config.get("live"):
progressbar(size_media, index + 1, "".join(["DU: ", str(timedelta(seconds=int(total_duration)))]))
else:
eta.increment()
progressbar(size_media, index + 1, "".join(["ETA: ", str(eta)]))
data = self.http.request("get", item, cookies=cookies)
if data.status_code == 404:
break
data = data.content
if m3u8.encrypted:
headers = {}
if self.keycookie:
keycookies = self.keycookie
else:
keycookies = cookies
if self.authorization:
headers["authorization"] = self.authorization
# Update key/decryptor
if "EXT-X-KEY" in i:
keyurl = get_full_url(i["EXT-X-KEY"]["URI"], url)
if keyurl and keyurl[:4] == "skd:":
raise HLSException(keyurl, "Can't decrypt beacuse of DRM")
key = self.http.request("get", keyurl, cookies=keycookies, headers=headers).content
iv = binascii.unhexlify(i["EXT-X-KEY"]["IV"][2:].zfill(32)) if "IV" in i["EXT-X-KEY"] else random_iv()
backend = default_backend()
cipher = Cipher(algorithms.AES(key), modes.CBC(iv), backend=backend)
decryptor = cipher.decryptor()
# In some cases the playlist say its encrypted but the files is not.
# This happen on svtplay 5.1ch stream where it started with ID3..
# Adding the other ones is header for mpeg-ts files. third byte is 10 or 11..
if data[:3] != b"ID3" and data[:3] != b"\x47\x40\x11" and data[:3] != b"\x47\x40\x10" and data[4:12] != b"ftypisom":
if decryptor:
data = decryptor.update(data)
else:
raise ValueError("No decryptor found for encrypted hls steam.")
file_d.write(data)
if self.config.get("capture_time") > 0 and total_duration >= self.config.get("capture_time") * 60:
break
if (size_media == (index + 1)) and self.config.get("live"):
sleep_int = (start_time + max_duration * 2) - time.time()
if sleep_int > 0:
time.sleep(sleep_int)
size_media_old = size_media
while size_media_old == size_media:
start_time = time.time()
if hls_time_stamp:
end_time_stamp = (datetime.utcnow() - timedelta(minutes=1, seconds=max_duration * 2)).replace(microsecond=0)
start_time_stamp = end_time_stamp - timedelta(minutes=1)
base_url = url.split(".m3u8")[0]
url = f"{base_url}.m3u8?in={start_time_stamp.isoformat()}&out={end_time_stamp.isoformat()}?"
new_m3u8 = M3U8(self.http.request("get", url, cookies=cookies).text)
for n_m3u in new_m3u8.media_segment:
if not any(d["URI"] == n_m3u["URI"] for d in m3u8.media_segment):
m3u8.media_segment.append(n_m3u)
size_media = len(m3u8.media_segment)
if size_media_old == size_media:
time.sleep(max_duration)
file_d.close()
if not self.config.get("silent"):
progress_stream.write("\n")
self.finished = True
class M3U8:
# Created for hls version <=7
# https://tools.ietf.org/html/rfc8216
MEDIA_SEGMENT_TAGS = ("EXTINF", "EXT-X-BYTERANGE", "EXT-X-DISCONTINUITY", "EXT-X-KEY", "EXT-X-MAP", "EXT-X-PROGRAM-DATE-TIME", "EXT-X-DATERANGE")
MEDIA_PLAYLIST_TAGS = (
"EXT-X-TARGETDURATION",
"EXT-X-MEDIA-SEQUENCE",
"EXT-X-DISCONTINUITY-SEQUENCE",
"EXT-X-ENDLIST",
"EXT-X-PLAYLIST-TYPE",
"EXT-X-I-FRAMES-ONLY",
)
MASTER_PLAYLIST_TAGS = ("EXT-X-MEDIA", "EXT-X-STREAM-INF", "EXT-X-I-FRAME-STREAM-INF", "EXT-X-SESSION-DATA", "EXT-X-SESSION-KEY")
MEDIA_OR_MASTER_PLAYLIST_TAGS = ("EXT-X-INDEPENDENT-SEGMENTS", "EXT-X-START")
TAG_TYPES = {"MEDIA_SEGMENT": 0, "MEDIA_PLAYLIST": 1, "MASTER_PLAYLIST": 2}
def __init__(self, data):
self.version = None
self.media_segment = []
self.media_playlist = {}
self.master_playlist = []
self.encrypted = False
self.independent_segments = False
self.parse_m3u(data)
def __str__(self):
return "Version: {}\nMedia Segment: {}\nMedia Playlist: {}\nMaster Playlist: {}\nEncrypted: {}\tIndependent_segments: {}".format(
self.version,
self.media_segment,
self.media_playlist,
self.master_playlist,
self.encrypted,
self.independent_segments,
)
def parse_m3u(self, data):
if not data.startswith("#EXTM3U"):
raise ValueError("Does not appear to be an 'EXTM3U' file.")
data = data.replace("\r\n", "\n")
lines = data.split("\n")[1:]
last_tag_type = None
tag_type = None
media_segment_info = {}
for index, l in enumerate(lines):
if not l:
continue
elif l.startswith("#EXT"):
info = {}
tag, attr = _get_tag_attribute(l)
if tag == "EXT-X-VERSION":
self.version = int(attr)
# 4.3.2. Media Segment Tags
elif tag in M3U8.MEDIA_SEGMENT_TAGS:
tag_type = M3U8.TAG_TYPES["MEDIA_SEGMENT"]
# 4.3.2.1. EXTINF
if tag == "EXTINF":
if "," in attr:
dur, title = attr.split(",", 1)
else:
dur = attr
title = None
info["duration"] = float(dur)
info["title"] = title
# 4.3.2.2. EXT-X-BYTERANGE
elif tag == "EXT-X-BYTERANGE":
if "@" in attr:
n, o = attr.split("@", 1)
info["n"], info["o"] = (int(n), int(o))
else:
info["n"] = int(attr)
info["o"] = 0
# 4.3.2.3. EXT-X-DISCONTINUITY
elif tag == "EXT-X-DISCONTINUITY":
pass
# 4.3.2.4. EXT-X-KEY
elif tag == "EXT-X-KEY":
self.encrypted = True
info = _get_tuple_attribute(attr)
# 4.3.2.5. EXT-X-MAP
elif tag == "EXT-X-MAP":
info = _get_tuple_attribute(attr)
# 4.3.2.6. EXT-X-PROGRAM-DATE-TIME"
elif tag == "EXT-X-PROGRAM-DATE-TIME":
info = attr
# 4.3.2.7. EXT-X-DATERANGE
elif tag == "EXT-X-DATERANGE":
info = _get_tuple_attribute(attr)
media_segment_info[tag] = info
# 4.3.3. Media Playlist Tags
elif tag in M3U8.MEDIA_PLAYLIST_TAGS:
tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"]
# 4.3.3.1. EXT-X-TARGETDURATION
if tag == "EXT-X-TARGETDURATION":
info = int(attr)
# 4.3.3.2. EXT-X-MEDIA-SEQUENCE
elif tag == "EXT-X-MEDIA-SEQUENCE":
info = int(attr)
# 4.3.3.3. EXT-X-DISCONTINUITY-SEQUENCE
elif tag == "EXT-X-DISCONTINUITY-SEQUENCE":
info = int(attr)
# 4.3.3.4. EXT-X-ENDLIST
elif tag == "EXT-X-ENDLIST":
break
# 4.3.3.5. EXT-X-PLAYLIST-TYPE
elif tag == "EXT-X-PLAYLIST-TYPE":
info = attr
# 4.3.3.6. EXT-X-I-FRAMES-ONLY
elif tag == "EXT-X-I-FRAMES-ONLY":
pass
self.media_playlist[tag] = info
# 4.3.4. Master Playlist Tags
elif tag in M3U8.MASTER_PLAYLIST_TAGS:
tag_type = M3U8.TAG_TYPES["MASTER_PLAYLIST"]
# 4.3.4.1. EXT-X-MEDIA
if tag == "EXT-X-MEDIA":
info = _get_tuple_attribute(attr)
# 4.3.4.2. EXT-X-STREAM-INF
elif tag == "EXT-X-STREAM-INF":
info = _get_tuple_attribute(attr)
if "BANDWIDTH" not in info:
raise ValueError("Can't find 'BANDWIDTH' in 'EXT-X-STREAM-INF'")
info["URI"] = lines[index + 1]
# 4.3.4.3. EXT-X-I-FRAME-STREAM-INF
elif tag == "EXT-X-I-FRAME-STREAM-INF":
info = _get_tuple_attribute(attr)
# 4.3.4.4. EXT-X-SESSION-DATA
elif tag == "EXT-X-SESSION-DATA":
info = _get_tuple_attribute(attr)
# 4.3.4.5. EXT-X-SESSION-KEY
elif tag == "EXT-X-SESSION-KEY":
self.encrypted = True
info = _get_tuple_attribute(attr)
info["TAG"] = tag
self.master_playlist.append(info)
# 4.3.5. Media or Master Playlist Tags
elif tag in M3U8.MEDIA_OR_MASTER_PLAYLIST_TAGS:
tag_type = M3U8.TAG_TYPES["MEDIA_PLAYLIST"]
# 4.3.5.1. EXT-X-INDEPENDENT-SEGMENTS
if tag == "EXT-X-INDEPENDENT-SEGMENTS":
self.independent_segments = True
# 4.3.5.2. EXT-X-START
elif tag == "EXT-X-START":
info = _get_tuple_attribute(attr)
self.media_playlist[tag] = info
# Unused tags
else:
pass
# This is a comment
elif l.startswith("#"):
pass
# This must be a url/uri
else:
tag_type = None
if last_tag_type is M3U8.TAG_TYPES["MEDIA_SEGMENT"]:
media_segment_info["URI"] = l
self.media_segment.append(media_segment_info)
media_segment_info = {}
last_tag_type = tag_type
if self.media_segment and self.master_playlist:
raise ValueError("This 'M3U8' file contains data for both 'Media Segment' and 'Master Playlist'. This is not allowed.")
def _get_tag_attribute(line):
line = line[1:]
try:
search_line = re.search(r"^([A-Z\-]*):(.*)", line)
return search_line.group(1), search_line.group(2)
except Exception:
return line, None
def _get_tuple_attribute(attribute):
attr_tuple = {}
for art_l in re.split(""",(?=(?:[^'"]|'[^']*'|"[^"]*")*$)""", attribute):
if art_l:
name, value = art_l.split("=", 1)
name = name.strip()
# Checks for attribute name
if not re.match(r"^[A-Z0-9\-]*$", name):
raise ValueError("Not a valid attribute name.")
# Remove extra quotes of string
if value.startswith('"') and value.endswith('"'):
value = value[1:-1]
attr_tuple[name] = value
return attr_tuple
|
the-stack_0_25518
|
from copy import deepcopy
import json
import pytest
import time
import threading
import requests
from tests.conftest import (
ARXIV_URL,
PDF_EXTRACTOR_URL,
API_KEY,
CAT,
MAX_RESULTS,
TIME_STEP,
MAX_CONCURRENT_REQUEST_THREADS
)
def test_constructor_success(arxiv_parser):
assert arxiv_parser
assert arxiv_parser._arxiv_url == ARXIV_URL
assert arxiv_parser._pdf_extractor_url == PDF_EXTRACTOR_URL
assert arxiv_parser._api_key == API_KEY
assert arxiv_parser._cat == CAT
assert arxiv_parser._max_results == MAX_RESULTS
assert arxiv_parser._time_step == TIME_STEP
assert arxiv_parser._max_concurrent_request_threads == MAX_CONCURRENT_REQUEST_THREADS
assert arxiv_parser._run_thread == None
assert arxiv_parser._conc_req_num_obs_thread == None
assert arxiv_parser._stopping == None
assert arxiv_parser._request_threads_list == []
assert type(arxiv_parser._fetch_lock) == type(threading.Lock())
assert type(arxiv_parser._pdfs_data_lock) == type(threading.Lock())
assert type(arxiv_parser._thread_being_modified_lock) == type(threading.Lock())
assert type(
arxiv_parser._run_new_request_thread_permission
) == type(arxiv_parser.RunNewRequestThreadPermission())
def test__fetch_atom_feed_from_arxiv_api(arxiv_parser, feed, mocker):
# Success
mocker.patch('src.core.arxiv_parser.request.urlopen', return_value = feed)
assert arxiv_parser._fetch_atom_feed_from_arxiv_api() == feed
def test__extract_pdf_metadatas_from_atom_feed(arxiv_parser, feed, pdf_metadatas_reference):
# Success
pdf_metadatas = arxiv_parser._extract_pdf_metadatas_from_atom_feed(feed)
assert pdf_metadatas == pdf_metadatas_reference
# Empty atom feed
pdf_metadatas = arxiv_parser._extract_pdf_metadatas_from_atom_feed(b'svdfvdfv')
assert pdf_metadatas == []
def test__fetch_new_pdf_data(arxiv_parser, feed, pdf_metadatas_reference, mocker):
# Success
mocker.patch(
'src.core.arxiv_parser.ArXivParser._fetch_atom_feed_from_arxiv_api',
side_effect = [feed, None]
)
expected_feed_extraction_results = [pdf_metadatas_reference[0], pdf_metadatas_reference[1]]
mocker.patch(
'src.core.arxiv_parser.ArXivParser._extract_pdf_metadatas_from_atom_feed',
side_effect = [expected_feed_extraction_results, None]
)
expected_extraction_results = deepcopy(expected_feed_extraction_results)
for i in range(len(expected_extraction_results)):
expected_extraction_results[i]['references'] = []
mocker.patch(
'src.core.arxiv_parser.ArXivParser._process_metadatas_batch'
)
mocked_push_to_task_queue = mocker.patch(
'src.core.arxiv_parser.ArXivParser._push_to_task_queue',
)
arxiv_parser._fetch_new_pdf_data()
mocked_push_to_task_queue.assert_called_once()
def test__request_extraction(
arxiv_parser,
pdf_metadatas_reference,
pdf_extractor_full_response_reference,
mocker
):
# Success
post_mocker = mocker.patch(
'src.core.arxiv_parser.requests.post',
return_value = pdf_extractor_full_response_reference
)
authenticator = requests.auth.HTTPBasicAuth('api_key', arxiv_parser._api_key)
mocker.patch(
'src.core.arxiv_parser.requests.auth.HTTPBasicAuth',
return_value = authenticator
)
pdfs_data = []
arxiv_parser._request_extraction(pdf_metadatas_reference[0], pdfs_data)
post_mocker.assert_called_once_with(
arxiv_parser._pdf_extractor_url,
data = json.dumps(pdf_metadatas_reference[0], indent=4, sort_keys=True),
headers={'Content-Type':'application/json'},
auth=authenticator,
timeout=120
)
def test__push_to_task_queue():
assert True
def test__run(arxiv_parser, mocker):
def side_effect():
time.sleep(0.1)
fetch_mocker = mocker.patch(
'src.core.arxiv_parser.ArXivParser._fetch_new_pdf_data',
side_effect = side_effect
)
arxiv_parser._time_step = 0
run_thread = threading.Thread(target=arxiv_parser._run, args=())
run_thread.start()
time.sleep(0.3)
arxiv_parser._stopping = True
assert fetch_mocker.call_count >= 1
run_thread.join(0.5)
assert not run_thread.is_alive()
def test_start_stop_cron(arxiv_parser, mocker):
mocker.patch('src.core.arxiv_parser.ArXivParser._fetch_new_pdf_data')
arxiv_parser._time_step = 0.1
arxiv_parser.start_cron()
assert arxiv_parser._run_thread.is_alive()
assert arxiv_parser._conc_req_num_obs_thread.is_alive()
arxiv_parser.stop_cron()
assert not arxiv_parser._run_thread
assert not arxiv_parser._conc_req_num_obs_thread
assert not arxiv_parser._stopping
|
the-stack_0_25519
|
# -*- coding: utf-8 -*-
from __future__ import with_statement, print_function, absolute_import
import json
import requests
from requests_oauthlib import OAuth1
from trello.board import Board
from trello.card import Card
from trello.trellolist import List
from trello.organization import Organization
from trello.member import Member
from trello.webhook import WebHook
from trello.exceptions import *
from trello.label import Label
from trello.star import Star
try:
# PyOpenSSL works around some issues in python ssl modules
# In particular in python < 2.7.9 and python < 3.2
# It is not a hard requirement, so it's not listed in requirements.txt
# More info https://urllib3.readthedocs.org/en/latest/security.html#insecureplatformwarning
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except:
pass
class TrelloClient(object):
""" Base class for Trello API access """
def __init__(self, api_key, api_secret=None, token=None, token_secret=None, http_service=requests):
"""
Constructor
:api_key: API key generated at https://trello.com/1/appKey/generate
:api_secret: the secret component of api_key
:token: OAuth token generated by the user in
trello.util.create_oauth_token
:token_secret: the OAuth client secret for the given OAuth token
"""
# client key and secret for oauth1 session
if token or token_secret:
self.oauth = OAuth1(client_key=api_key, client_secret=api_secret,
resource_owner_key=token, resource_owner_secret=token_secret)
else:
self.oauth = None
self.public_only = token is None
self.api_key = api_key
self.api_secret = api_secret
self.resource_owner_key = token
self.resource_owner_secret = token_secret
self.http_service = http_service
def info_for_all_boards(self, actions):
"""
Use this if you want to retrieve info for all your boards in one swoop
"""
if self.public_only:
return None
else:
json_obj = self.fetch_json(
'/members/me/boards/all',
query_params={'actions': actions})
self.all_info = json_obj
def logout(self):
"""Log out of Trello."""
# TODO: This function.
raise NotImplementedError()
def list_boards(self, board_filter="all"):
"""
Returns all boards for your Trello user
:return: a list of Python objects representing the Trello boards.
:rtype: list of Board
Each board has the following noteworthy attributes:
- id: the board's identifier
- name: Name of the board
- desc: Description of the board (optional - may be missing from the
returned JSON)
- closed: Boolean representing whether this board is closed or not
- url: URL to the board
"""
json_obj = self.fetch_json('/members/me/boards/?filter=%s' % board_filter)
return [Board.from_json(self, json_obj=obj) for obj in json_obj]
def list_organizations(self):
"""
Returns all organizations for your Trello user
:return: a list of Python objects representing the Trello organizations.
:rtype: list of Organization
Each organization has the following noteworthy attributes:
- id: the organization's identifier
- name: Name of the organization
- desc: Description of the organization (optional - may be missing from the
returned JSON)
- closed: Boolean representing whether this organization is closed or not
- url: URL to the organization
"""
json_obj = self.fetch_json('members/me/organizations')
return [Organization.from_json(self, obj) for obj in json_obj]
def get_organization(self, organization_id):
"""Get organization
:rtype: Organization
"""
obj = self.fetch_json('/organizations/' + organization_id)
return Organization.from_json(self, obj)
def get_board(self, board_id):
"""Get board
:rtype: Board
"""
obj = self.fetch_json('/boards/' + board_id)
return Board.from_json(self, json_obj=obj)
def add_board(self, board_name, source_board=None, organization_id=None, permission_level='private',
default_lists=True):
"""Create board
:param board_name: Name of the board to create
:param source_board: Optional Board to copy
:param permission_level: Permission level, defaults to private
:rtype: Board
"""
post_args = {'name': board_name, 'prefs_permissionLevel': permission_level}
if source_board is not None:
post_args['idBoardSource'] = source_board.id
if organization_id is not None:
post_args['idOrganization'] = organization_id
if not default_lists:
post_args['defaultLists'] = False
obj = self.fetch_json('/boards', http_method='POST',
post_args=post_args)
return Board.from_json(self, json_obj=obj)
def get_member(self, member_id):
"""Get member
:rtype: Member
"""
return Member(self, member_id).fetch()
def get_card(self, card_id):
"""Get card
:rtype: Card
"""
card_json = self.fetch_json('/cards/' + card_id)
list_json = self.fetch_json('/lists/' + card_json['idList'])
board = self.get_board(card_json['idBoard'])
return Card.from_json(List.from_json(board, list_json), card_json)
def get_list(self, list_id):
"""Get list
:rtype: List
"""
list_json = self.fetch_json('/lists/' + list_id)
board = self.get_board(list_json['idBoard'])
return List.from_json(board, list_json)
def get_label(self, label_id, board_id):
"""Get Label
Requires the parent board id the label is on
:rtype: Label
"""
board = self.get_board(board_id)
label_json = self.fetch_json('/labels/' + label_id)
return Label.from_json(board, label_json)
def fetch_json(
self,
uri_path,
http_method='GET',
headers=None,
query_params=None,
post_args=None,
files=None):
""" Fetch some JSON from Trello """
# explicit values here to avoid mutable default values
if headers is None:
headers = {}
if query_params is None:
query_params = {}
if post_args is None:
post_args = {}
# if files specified, we don't want any data
data = None
if files is None:
data = json.dumps(post_args)
# set content type and accept headers to handle JSON
if http_method in ("POST", "PUT", "DELETE") and not files:
headers['Content-Type'] = 'application/json; charset=utf-8'
headers['Accept'] = 'application/json'
# construct the full URL without query parameters
if uri_path[0] == '/':
uri_path = uri_path[1:]
url = 'https://api.trello.com/1/%s' % uri_path
if self.oauth is None:
query_params['key'] = self.api_key
query_params['token'] = self.api_secret
# perform the HTTP requests, if possible uses OAuth authentication
response = self.http_service.request(http_method, url, params=query_params,
headers=headers, data=data,
auth=self.oauth, files=files)
if response.status_code == 401:
raise Unauthorized("%s at %s" % (response.text, url), response)
if response.status_code != 200:
raise ResourceUnavailable("%s at %s" % (response.text, url), response)
return response.json()
def list_hooks(self, token=None):
"""
Returns a list of all hooks associated with a specific token. If you don't pass in a token,
it tries to use the token associated with the TrelloClient object (if it exists)
"""
token = token or self.resource_owner_key
if token is None:
raise TokenError("You need to pass an auth token in to list hooks.")
else:
url = "/tokens/%s/webhooks" % token
return self._existing_hook_objs(self.fetch_json(url), token)
def _existing_hook_objs(self, hooks, token):
"""
Given a list of hook dicts passed from list_hooks, creates
the hook objects
"""
all_hooks = []
for hook in hooks:
new_hook = WebHook(self, token, hook['id'], hook['description'],
hook['idModel'],
hook['callbackURL'], hook['active'])
all_hooks.append(new_hook)
return all_hooks
def create_hook(self, callback_url, id_model, desc=None, token=None):
"""
Creates a new webhook. Returns the WebHook object created.
There seems to be some sort of bug that makes you unable to create a
hook using httplib2, so I'm using urllib2 for that instead.
"""
token = token or self.resource_owner_key
if token is None:
raise TokenError("You need to pass an auth token in to create a hook.")
url = "https://trello.com/1/tokens/%s/webhooks/" % token
data = {'callbackURL': callback_url, 'idModel': id_model,
'description': desc}
response = self.http_service.post(url, data=data, auth=self.oauth)
if response.status_code == 200:
hook_id = response.json()['id']
return WebHook(self, token, hook_id, desc, id_model, callback_url, True)
else:
return False
def search(self, query, partial_match=False, models=[],
board_ids=[], org_ids=[], card_ids=[]):
"""
Search trello given a query string.
:param str query: A query string up to 16K characters
:param bool partial_match: True means that trello will look for
content that starts with any of the words in your query.
:param list models: Comma-separated list of types of objects to search.
This can be 'actions', 'boards', 'cards', 'members',
or 'organizations'. The default is 'all' models.
:param list board_ids: Comma-separated list of boards to limit search
:param org_ids: Comma-separated list of organizations to limit search
:param card_ids: Comma-separated list of cards to limit search
:return: All objects matching the search criterial. These can
be Cards, Boards, Organizations, and Members. The attributes
of the objects in the results are minimal; the user must call
the fetch method on the resulting objects to get a full set
of attributes populated.
:rtype list:
"""
query_params = {'query': query}
if partial_match:
query_params['partial'] = 'true'
# Limit search to one or more object types
if models:
query_params['modelTypes'] = models
# Limit search to a particular subset of objects
if board_ids:
query_params['idBoards'] = board_ids
if org_ids:
query_params['idOrganizations'] = org_ids
if card_ids:
query_params['idCards'] = card_ids
# Request result fields required to instantiate class objects
query_params['board_fields'] = ['name,url,desc,closed']
query_params['member_fields'] = ['fullName,initials,username']
query_params['organization_fields'] = ['name,url,desc']
json_obj = self.fetch_json('/search', query_params=query_params)
if not json_obj:
return []
results = []
board_cache = {}
for board_json in json_obj.get('boards', []):
# Cache board objects
if board_json['id'] not in board_cache:
board_cache[board_json['id']] = Board.from_json(
self, json_obj=board_json)
results.append(board_cache[board_json['id']])
for card_json in json_obj.get('cards', []):
# Cache board objects
if card_json['idBoard'] not in board_cache:
board_cache[card_json['idBoard']] = Board(
self, card_json['idBoard'])
# Fetch the board attributes as the Board object created
# from the card initially result lacks a URL and name.
# This Board will be stored in Card.parent
board_cache[card_json['idBoard']].fetch()
results.append(Card.from_json(board_cache[card_json['idBoard']],
card_json))
for member_json in json_obj.get('members', []):
results.append(Member.from_json(self, member_json))
for org_json in json_obj.get('organizations', []):
org = Organization.from_json(self, org_json)
results.append(org)
return results
def list_stars(self):
"""
Returns all boardStars for your Trello user
:return: a list of Python objects representing the Trello board stars.
:rtype: list of Board Stars
Each board has the following noteworthy attributes:
- id: the board star's identifier
- idBoard: ID of starred board
- pos: position of the board star
"""
json_obj = self.fetch_json('/members/me/boardStars')
return [Star.from_json(json_obj=obj) for obj in json_obj]
def add_star(self, board_id, position="bottom"):
"""Create a star
:param board_iid: Id of the board to star
:param position: Optional position of the board star
:rtype: Star
"""
post_args = {'idBoard': board_id, 'pos': position}
obj = self.fetch_json('members/me/boardStars', http_method='POST',
post_args=post_args)
return Star.from_json(json_obj=obj)
def delete_star(self, star):
"""Deletes a star
:param board_iid: Id of the board to star
:param position: Optional position of the board star
:rtype: Star
"""
self.fetch_json('members/me/boardStars/{}'.format(star.id), http_method='DELETE')
return star
|
the-stack_0_25522
|
from panda3d.core import *
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from direct.showbase import PythonUtil
class WelcomeValleyManager(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('WelcomeValleyManager')
neverDisable = 1
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def generate(self):
if base.cr.welcomeValleyManager != None:
base.cr.welcomeValleyManager.delete()
base.cr.welcomeValleyManager = self
DistributedObject.DistributedObject.generate(self)
return
def disable(self):
self.ignore(ToontownGlobals.SynchronizeHotkey)
base.cr.welcomeValleyManager = None
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
self.ignore(ToontownGlobals.SynchronizeHotkey)
base.cr.welcomeValleyManager = None
DistributedObject.DistributedObject.delete(self)
return
def requestZoneId(self, origZoneId, callback):
context = self.getCallbackContext(callback)
self.sendUpdate('requestZoneIdMessage', [origZoneId, context])
def requestZoneIdResponse(self, zoneId, context):
self.doCallbackContext(context, [zoneId])
|
the-stack_0_25523
|
""""Contains views to register, login reset password and logout user"""
from flask import Blueprint, request, jsonify
from flask_jwt_extended import jwt_required, get_jwt_identity
from app.models import Business, Review
from app.utils import (
require_json, check_missing_field, remove_more_spaces)
from app.base_view import BaseView
rev = Blueprint('rev', __name__,
url_prefix='/api/v1/businesses/<int:business_id>/reviews')
class ReviewManipulation(BaseView):
"""Method to manipulate business endpoints"""
@jwt_required
@require_json
def post(self, business_id):
"""Endpoint to save the data to the database"""
data = request.get_json()
description = data.get('description')
rating = data.get('rating')
current_user = get_jwt_identity()
data = dict(description=description, rating=rating)
if check_missing_field(**data):
return jsonify(check_missing_field(**data)), 422
description = remove_more_spaces(description)
business = Business.query.filter_by(id=business_id).first()
if business.user_id == current_user:
response = {'message': 'The operation is forbidden for' +
' own business'}
return jsonify(response), 403
review = Review(description, rating, business_id, current_user)
review.save()
response = {'message': 'Review for business with id' +
f' {business_id} created'}
return jsonify(response), 201
review_view = ReviewManipulation.as_view('reviews')
rev.add_url_rule('', view_func=review_view, methods=['POST'])
|
the-stack_0_25525
|
import random
import torch
from PIL import Image
from glob import glob
class Places2(torch.utils.data.Dataset):
def __init__(self, img_root, mask_root, img_transform, mask_transform,
split='train'):
super(Places2, self).__init__()
self.img_transform = img_transform
self.mask_transform = mask_transform
# use about 8M images in the challenge dataset
if split == 'train':
self.paths = glob('{:s}/data_large/**/*.jpg'.format(img_root),
recursive=True)
else:
#self.paths = glob('C:/Users/yatha/OneDrive/Desktop/Github_clones/pytorch-inpainting-with-partial-conv/data/data_large/*.jpg')
self.paths = glob('{:s}/{:s}_large/*'.format(img_root, split))
#self.mask_paths = glob('C:/Users/yatha/OneDrive/Desktop/Github_clones/pytorch-inpainting-with-partial-conv/data/mask_root/*.jpg')
self.mask_paths = glob('{:s}/*.jpg'.format(mask_root))
self.N_mask = len(self.mask_paths)
def __getitem__(self, index):
gt_img = Image.open(self.paths[index])
gt_img = self.img_transform(gt_img.convert('RGB'))
mask = Image.open(self.mask_paths[random.randint(0, self.N_mask - 1)])
mask = self.mask_transform(mask.convert('RGB'))
return gt_img * mask, mask, gt_img
def __len__(self):
return len(self.paths)
|
the-stack_0_25526
|
import sys
DELIMITERS = ". , ; : ? $ @ ^ < > # % ` ! * - = ( ) [ ] { } / \" '".split()
def load_text(filename):
"""
Load lines from a plain-text file and return these as a list, with
trailing newlines stripped.
"""
with open(filename, "r", encoding="utf-8") as input_fd:
lines = input_fd.read().splitlines()
return lines
def save_word_counts(filename, counts):
"""
Save a list of [word, count, percentage] lists to a file, in the form
"word count percentage", one tuple per line.
"""
with open(filename, 'w', encoding="utf-8") as output:
for count in counts:
output.write("%s\n" % " ".join(str(c) for c in count))
def load_word_counts(filename):
"""
Load a list of (word, count, percentage) tuples from a file where each
line is of the form "word count percentage". Lines starting with # are
ignored.
"""
counts = []
with open(filename, "r", encoding="utf-8") as input_fd:
for line in input_fd:
if not line.startswith("#"):
fields = line.split()
counts.append((fields[0], int(fields[1]), float(fields[2])))
return counts
def update_word_counts(line, counts):
"""
Given a string, parse the string and update a dictionary of word
counts (mapping words to counts of their frequencies). DELIMITERS are
removed before the string is parsed. The function is case-insensitive
and words in the dictionary are in lower-case.
"""
for purge in DELIMITERS:
line = line.replace(purge, " ")
words = line.split()
for word in words:
word = word.lower().strip()
if word in counts:
counts[word] += 1
else:
counts[word] = 1
def calculate_word_counts(lines):
"""
Given a list of strings, parse each string and create a dictionary of
word counts (mapping words to counts of their frequencies). DELIMITERS
are removed before the string is parsed. The function is
case-insensitive and words in the dictionary are in lower-case.
"""
counts = {}
for line in lines:
update_word_counts(line, counts)
return counts
def word_count_dict_to_tuples(counts, decrease=True):
"""
Given a dictionary of word counts (mapping words to counts of their
frequencies), convert this into an ordered list of tuples (word,
count). The list is ordered by decreasing count, unless increase is
True.
"""
return sorted(list(counts.items()), key=lambda key_value: key_value[1],
reverse=decrease)
def filter_word_counts(counts, min_length=1):
"""
Given a list of (word, count) tuples, create a new list with only
those tuples whose word is >= min_length.
"""
stripped = []
for (word, count) in counts:
if len(word) >= min_length:
stripped.append((word, count))
return stripped
def calculate_percentages(counts):
"""
Given a list of (word, count) tuples, create a new list (word, count,
percentage) where percentage is the percentage number of occurrences
of this word compared to the total number of words.
"""
total = 0
for count in counts:
total += count[1]
tuples = [(word, count, (float(count) / total) * 100.0)
for (word, count) in counts]
return tuples
def word_count(input_file, output_file, min_length=1):
"""
Load a file, calculate the frequencies of each word in the file and
save in a new file the words, counts and percentages of the total in
descending order. Only words whose length is >= min_length are
included.
"""
lines = load_text(input_file)
counts = calculate_word_counts(lines)
sorted_counts = word_count_dict_to_tuples(counts)
sorted_counts = filter_word_counts(sorted_counts, min_length)
percentage_counts = calculate_percentages(sorted_counts)
save_word_counts(output_file, percentage_counts)
if __name__ == '__main__':
input_file = sys.argv[1]
output_file = sys.argv[2]
min_length = 1
if len(sys.argv) > 3:
min_length = int(sys.argv[3])
word_count(input_file, output_file, min_length)
|
the-stack_0_25527
|
# Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import setuptools
import sys
from setuptools.command.install import install
# Import Pipenv. We support multiple versions.
try:
from pipenv.project import Project
try:
# Pipenv 2022.4.8
from pipenv.utils.dependencies import convert_deps_to_pip
except:
# Older Pipenv
from pipenv.utils import convert_deps_to_pip
except:
exit_msg = (
"pipenv is required to package Streamlit. Please install pipenv and try again."
)
sys.exit(exit_msg)
VERSION = "1.10.0" # PEP-440
NAME = "streamlit"
DESCRIPTION = "The fastest way to build data apps in Python"
LONG_DESCRIPTION = (
"Streamlit's open-source app framework is the easiest way "
"for data scientists and machine learning engineers to "
"create beautiful, performant apps in only a few hours! "
"All in pure Python. All for free."
)
pipfile = Project(chdir=False).parsed_pipfile
packages = pipfile["packages"].copy()
requirements = convert_deps_to_pip(packages, r=False)
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setuptools.setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url="https://streamlit.io",
project_urls={
"Source": "https://github.com/streamlit/streamlit",
},
author="Streamlit Inc",
author_email="[email protected]",
python_requires=">=3.7",
license="Apache 2",
# PEP 561: https://mypy.readthedocs.io/en/stable/installed_packages.html
package_data={"streamlit": ["py.typed", "hello/**/*.py"]},
packages=setuptools.find_packages(exclude=["tests", "tests.*"]),
# Requirements
install_requires=requirements,
zip_safe=False, # install source files not egg
include_package_data=True, # copy html and friends
entry_points={"console_scripts": ["streamlit = streamlit.cli:main"]},
# For Windows so that streamlit * commands work ie.
# - streamlit version
# - streamlit hello
scripts=["bin/streamlit.cmd"],
cmdclass={
"verify": VerifyVersionCommand,
},
)
|
the-stack_0_25529
|
#!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Utility methods, for compatibility between Python version
:author: Thomas Calmant
:copyright: Copyright 2020, Thomas Calmant
:license: Apache License 2.0
:version: 0.4.1
..
Copyright 2020 Thomas Calmant
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
# ------------------------------------------------------------------------------
# Module version
__version_info__ = (0, 4, 1)
__version__ = ".".join(str(x) for x in __version_info__)
# Documentation strings format
__docformat__ = "restructuredtext en"
# ------------------------------------------------------------------------------
if sys.version_info[0] < 3:
# Python 2
# pylint: disable=E1101
import types
try:
STRING_TYPES = (
types.StringType,
types.UnicodeType
)
except NameError:
# Python built without unicode support
STRING_TYPES = (types.StringType,)
NUMERIC_TYPES = (
types.IntType,
types.LongType,
types.FloatType
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
# pylint: disable=E0602
if type(string) is unicode:
return str(string)
return string
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data)
else:
# Python 3
# pylint: disable=E1101
STRING_TYPES = (
bytes,
str
)
NUMERIC_TYPES = (
int,
float
)
def to_bytes(string):
"""
Converts the given string into bytes
"""
if type(string) is bytes:
return string
return bytes(string, "UTF-8")
def from_bytes(data):
"""
Converts the given bytes into a string
"""
if type(data) is str:
return data
return str(data, "UTF-8")
# ------------------------------------------------------------------------------
# Enumerations
try:
import enum
def is_enum(obj):
"""
Checks if an object is from an enumeration class
:param obj: Object to test
:return: True if the object is an enumeration item
"""
return isinstance(obj, enum.Enum)
except ImportError:
# Pre-Python 3.4
def is_enum(_):
"""
Before Python 3.4, enumerations didn't exist.
:param _: Object to test
:return: Always False
"""
return False
# ------------------------------------------------------------------------------
# Common
DictType = dict
ListType = list
TupleType = tuple
ITERABLE_TYPES = (
list,
set, frozenset,
tuple
)
VALUE_TYPES = (
bool,
type(None)
)
PRIMITIVE_TYPES = STRING_TYPES + NUMERIC_TYPES + VALUE_TYPES
|
the-stack_0_25530
|
import glob
import cv2
import pickle
import numpy as np
struct = {}
info = np.loadtxt('./list.csv', dtype=str, delimiter=',')
for item in info[1:]:
if not item[0] in struct:
struct[item[0]] = []
struct[item[0]].append((int(item[1]), int(item[2]), item[3]))
blk_list = [
'0f8deb9380bed80d9c93c28b146f3b71',
'0d0f07bc631a890372f7a920fad9edd4'
]
imgs = {}
def draw(m, x, y, sigma, r):
src = 255
for i in range(0, r):
for alpha in range(0, 256):
dx = 1.*i*np.cos(alpha/128.*np.pi)
dy = 1.*i*np.sin(alpha/128.*np.pi)
dx = int(dx)
dy = int(dy)
try:
m[x+dx,y+dy] = src*np.exp(-1./2*(i**2)/(sigma**2))
except IndexError:
continue
for fn in struct:
if fn in blk_list:
continue
img1 = cv2.imread('./merge/%s_b.jpg' % (fn),-1)
img2 = cv2.imread('./merge/%s_c.jpg' % (fn),-1)
imgx = np.zeros_like(img1)
x = struct[fn][0][0]
try:
y = struct[fn][0][1]
except:
print(struct[fn])
raise
draw(imgx, y, x, 2, 8)
label = struct[fn][0][2]
rl = 1
if label in ['noise','ghost','pity']:
rl = 0
imgs[fn] = [
img1, img2, imgx, rl
]
with open('merged_data_2.pk','wb') as f:
pickle.dump(imgs, f)
import IPython
IPython.embed()
|
the-stack_0_25534
|
import logging
from abc import abstractmethod
from mpi4py import MPI
from ..communication.gRPC.grpc_comm_manager import GRPCCommManager
from ..communication.message import Message
from ..communication.mpi.com_manager import MpiCommunicationManager
from ..communication.mqtt.mqtt_comm_manager import MqttCommManager
from ..communication.observer import Observer
from ..communication.trpc.trpc_comm_manager import TRPCCommManager
class ClientManager(Observer):
def __init__(self, args, comm=None, rank=0, size=0):
self.args = args
self.size = size
self.rank = rank
self.backend = args.communication_method
if args.communication_method == "MPI":
self.com_manager = MpiCommunicationManager(comm, rank, size, node_type="client")
elif args.communication_method == "MQTT":
self.com_manager = MqttCommManager(
args.communication_host, args.communication_port, client_id=rank, client_num=size - 1)
elif args.communication_method == "GRPC":
self.com_manager = GRPCCommManager(
args.communication_host, args.communication_port + rank, ip_config_path=args.grpc_ipconfig_path,
client_id=rank, client_num=size - 1
)
elif args.communication_method == "TRPC":
self.com_manager = TRPCCommManager(args.trpc_master_config_path, process_id=rank, world_size=size)
else:
self.com_manager = MpiCommunicationManager(comm, rank, size, node_type="client")
self.com_manager.add_observer(self)
self.message_handler_dict = dict()
def run(self):
self.register_message_receive_handlers()
self.com_manager.handle_receive_message()
def get_sender_id(self):
return self.rank
def receive_message(self, msg_type, msg_params) -> None:
# logging.info("receive_message. rank_id = %d, msg_type = %s. msg_params = %s" % (
# self.rank, str(msg_type), str(msg_params.get_content())))
handler_callback_func = self.message_handler_dict[msg_type]
handler_callback_func(msg_params)
def send_message(self, message):
msg = Message()
msg.add(Message.MSG_ARG_KEY_TYPE, message.get_type())
msg.add(Message.MSG_ARG_KEY_SENDER, message.get_sender_id())
msg.add(Message.MSG_ARG_KEY_RECEIVER, message.get_receiver_id())
for key, value in message.get_params().items():
# logging.info("%s == %s" % (key, value))
msg.add(key, value)
self.com_manager.send_message(msg)
@abstractmethod
def register_message_receive_handlers(self) -> None:
pass
def register_message_receive_handler(self, msg_type, handler_callback_func):
self.message_handler_dict[msg_type] = handler_callback_func
def finish(self):
logging.info("__finish server")
if self.backend == "MPI":
MPI.COMM_WORLD.Abort()
elif self.backend == "MQTT":
self.com_manager.stop_receive_message()
elif self.backend == "GRPC":
self.com_manager.stop_receive_message()
elif self.backend == "TRPC":
self.com_manager.stop_receive_message()
|
the-stack_0_25535
|
# -*- coding: utf-8 -*-
import io
import demjson
import pandas as pd
import requests
from zvdata.api import df_to_db, init_entities
from zvdata.recorder import Recorder
from zvt.api.common import china_stock_code_to_id
from zvt.domain import StockIndex
from zvt.utils.time_utils import to_pd_timestamp
class ChinaIndexListSpider(Recorder):
data_schema = StockIndex
def __init__(self, batch_size=10, force_update=False, sleeping_time=2.0, provider='exchange') -> None:
self.provider = provider
super(ChinaIndexListSpider, self).__init__(batch_size, force_update, sleeping_time)
def run(self):
# 上证、中证
self.fetch_csi_index()
# 深证
self.fetch_szse_index()
# 国证
self.fetch_cni_index()
def fetch_csi_index(self) -> None:
"""
抓取上证、中证指数列表
"""
url = 'http://www.csindex.com.cn/zh-CN/indices/index' \
'?page={}&page_size={}&data_type=json&class_1=1&class_2=2&class_7=7&class_10=10'
index_list = []
page = 1
page_size = 50
while True:
query_url = url.format(page, page_size)
response = requests.get(query_url)
response_dict = demjson.decode(response.text)
response_index_list = response_dict.get('list', [])
if len(response_index_list) == 0:
break
index_list.extend(response_index_list)
self.logger.info(f'上证、中证指数第 {page} 页抓取完成...')
page += 1
self.sleep()
df = pd.DataFrame(index_list)
df = df[['base_date', 'base_point', 'index_code', 'indx_sname', 'online_date', 'class_eseries']]
df.columns = ['timestamp', 'base_point', 'code', 'name', 'list_date', 'class_eseries']
df['category'] = df['class_eseries'].apply(lambda x: x.split(' ')[0].lower())
df = df.drop('class_eseries', axis=1)
df = df.loc[df['code'].str.contains(r'^\d{6}$')]
self.persist_index(df)
self.logger.info('上证、中证指数列表抓取完成...')
# 抓取上证、中证指数成分股
self.fetch_csi_index_component(df)
self.logger.info('上证、中证指数成分股抓取完成...')
def fetch_csi_index_component(self, df: pd.DataFrame):
"""
抓取上证、中证指数成分股
"""
query_url = 'http://www.csindex.com.cn/uploads/file/autofile/cons/{}cons.xls'
for _, index in df.iterrows():
index_code = index['code']
url = query_url.format(index_code)
try:
response = requests.get(url)
response.raise_for_status()
except requests.HTTPError as error:
self.logger.error(f'{index["name"]} - {index_code} 成分股抓取错误 ({error})')
continue
response_df = pd.read_excel(io.BytesIO(response.content))
index_id = f'index_cn_{index_code}'
response_df = response_df[['成分券代码Constituent Code']].rename(columns={'成分券代码Constituent Code': 'stock_code'})
response_df['id'] = response_df['stock_code'].apply(
lambda x: f'{index_id}_{china_stock_code_to_id(str(x))}')
response_df['entity_id'] = response_df['id']
response_df['stock_id'] = response_df['stock_code'].apply(lambda x: china_stock_code_to_id(str(x)))
response_df['index_id'] = index_id
response_df.drop('stock_code', axis=1, inplace=True)
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{index["name"]} - {index_code} 成分股抓取完成...')
self.sleep()
def fetch_szse_index(self) -> None:
"""
抓取深证指数列表
"""
url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1812_zs&TABKEY=tab1'
response = requests.get(url)
df = pd.read_excel(io.BytesIO(response.content), dtype='str')
df.columns = ['code', 'name', 'timestamp', 'base_point', 'list_date']
df['category'] = 'szse'
df = df.loc[df['code'].str.contains(r'^\d{6}$')]
self.persist_index(df)
self.logger.info('深证指数列表抓取完成...')
# 抓取深证指数成分股
self.fetch_szse_index_component(df)
self.logger.info('深证指数成分股抓取完成...')
def fetch_szse_index_component(self, df: pd.DataFrame):
"""
抓取深证指数成分股
"""
query_url = 'http://www.szse.cn/api/report/ShowReport?SHOWTYPE=xlsx&CATALOGID=1747_zs&TABKEY=tab1&ZSDM={}'
for _, index in df.iterrows():
index_code = index['code']
url = query_url.format(index_code)
response = requests.get(url)
response_df = pd.read_excel(io.BytesIO(response.content), dtype='str')
index_id = f'index_cn_{index_code}'
response_df = response_df[['证券代码']]
response_df['id'] = response_df['证券代码'].apply(lambda x: f'{index_id}_{china_stock_code_to_id(str(x))}')
response_df['entity_id'] = response_df['id']
response_df['stock_id'] = response_df['证券代码'].apply(lambda x: china_stock_code_to_id(str(x)))
response_df['index_id'] = index_id
response_df.drop('证券代码', axis=1, inplace=True)
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{index["name"]} - {index_code} 成分股抓取完成...')
self.sleep()
def fetch_cni_index(self) -> None:
"""
抓取国证指数列表
"""
url = 'http://www.cnindex.com.cn/zstx/jcxl/'
response = requests.get(url)
response.encoding = 'utf-8'
dfs = pd.read_html(response.text)
# 第 9 个 table 之后为非股票指数
dfs = dfs[1:9]
result_df = pd.DataFrame()
for df in dfs:
header = df.iloc[0]
df = df[1:]
df.columns = header
df.astype('str')
result_df = pd.concat([result_df, df])
result_df = result_df.drop('样本股数量', axis=1)
result_df.columns = ['name', 'code', 'timestamp', 'base_point', 'list_date']
result_df['timestamp'] = result_df['timestamp'].apply(lambda x: x.replace('-', ''))
result_df['list_date'] = result_df['list_date'].apply(lambda x: x.replace('-', ''))
result_df['category'] = 'csi'
result_df = result_df.loc[result_df['code'].str.contains(r'^\d{6}$')]
self.persist_index(result_df)
self.logger.info('国证指数列表抓取完成...')
# 抓取国证指数成分股
self.fetch_cni_index_component(result_df)
self.logger.info('国证指数成分股抓取完成...')
def fetch_cni_index_component(self, df: pd.DataFrame):
"""
抓取国证指数成分股
"""
query_url = 'http://www.cnindex.com.cn/docs/yb_{}.xls'
for _, index in df.iterrows():
index_code = index['code']
url = query_url.format(index_code)
try:
response = requests.get(url)
response.raise_for_status()
except requests.HTTPError as error:
self.logger.error(f'{index["name"]} - {index_code} 成分股抓取错误 ({error})')
continue
response_df = pd.read_excel(io.BytesIO(response.content), dtype='str')
index_id = f'index_cn_{index_code}'
try:
response_df = response_df[['样本股代码']]
except KeyError:
response_df = response_df[['证券代码']]
response_df.columns = ['stock_code']
response_df['id'] = response_df['stock_code'].apply(
lambda x: f'{index_id}_{china_stock_code_to_id(str(x))}')
response_df['entity_id'] = response_df['id']
response_df['stock_id'] = response_df['stock_code'].apply(lambda x: china_stock_code_to_id(str(x)))
response_df['index_id'] = index_id
response_df.drop('stock_code', axis=1, inplace=True)
df_to_db(data_schema=self.data_schema, df=response_df, provider=self.provider)
self.logger.info(f'{index["name"]} - {index_code} 成分股抓取完成...')
self.sleep()
def persist_index(self, df) -> None:
df['timestamp'] = df['timestamp'].apply(lambda x: to_pd_timestamp(x))
df['list_date'] = df['list_date'].apply(lambda x: to_pd_timestamp(x))
df['id'] = df['code'].apply(lambda code: f'index_cn_{code}')
df['entity_id'] = df['id']
df['exchange'] = 'cn'
df['entity_type'] = 'index'
df['is_delisted'] = False
df = df.dropna(axis=0, how='any')
df = df.drop_duplicates(subset='id', keep='last')
init_entities(df, entity_type='index', provider=self.provider)
if __name__ == '__main__':
spider = ChinaIndexListSpider(provider='exchange')
spider.run()
|
the-stack_0_25536
|
"""Allow to set up simple automation rules via the config file."""
import importlib
import logging
from typing import Any, Awaitable, Callable, List, Optional, Set
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_DEVICE_ID,
CONF_ENTITY_ID,
CONF_ID,
CONF_PLATFORM,
CONF_ZONE,
EVENT_AUTOMATION_TRIGGERED,
EVENT_HOMEASSISTANT_START,
SERVICE_RELOAD,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.core import Context, CoreState, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import condition, extract_domain_configs, script
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.service import async_register_admin_service
from homeassistant.helpers.typing import TemplateVarsType
from homeassistant.loader import bind_hass
from homeassistant.util.dt import parse_datetime, utcnow
# mypy: allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs, no-warn-return-any
DOMAIN = "automation"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
GROUP_NAME_ALL_AUTOMATIONS = "all automations"
CONF_ALIAS = "alias"
CONF_DESCRIPTION = "description"
CONF_HIDE_ENTITY = "hide_entity"
CONF_CONDITION = "condition"
CONF_ACTION = "action"
CONF_TRIGGER = "trigger"
CONF_CONDITION_TYPE = "condition_type"
CONF_INITIAL_STATE = "initial_state"
CONF_SKIP_CONDITION = "skip_condition"
CONDITION_USE_TRIGGER_VALUES = "use_trigger_values"
CONDITION_TYPE_AND = "and"
CONDITION_TYPE_OR = "or"
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
DEFAULT_HIDE_ENTITY = False
DEFAULT_INITIAL_STATE = True
ATTR_LAST_TRIGGERED = "last_triggered"
ATTR_VARIABLES = "variables"
SERVICE_TRIGGER = "trigger"
_LOGGER = logging.getLogger(__name__)
AutomationActionType = Callable[[HomeAssistant, TemplateVarsType], Awaitable[None]]
def _platform_validator(config):
"""Validate it is a valid platform."""
try:
platform = importlib.import_module(
".{}".format(config[CONF_PLATFORM]), __name__
)
except ImportError:
raise vol.Invalid("Invalid platform specified") from None
return platform.TRIGGER_SCHEMA(config)
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({vol.Required(CONF_PLATFORM): str}, extra=vol.ALLOW_EXTRA),
_platform_validator,
)
],
)
_CONDITION_SCHEMA = vol.All(cv.ensure_list, [cv.CONDITION_SCHEMA])
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HIDE_ENTITY, invalidation_version="0.107"),
vol.Schema(
{
# str on purpose
CONF_ID: str,
CONF_ALIAS: cv.string,
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
vol.Optional(CONF_HIDE_ENTITY, default=DEFAULT_HIDE_ENTITY): cv.boolean,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Optional(CONF_CONDITION): _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SCRIPT_SCHEMA,
}
),
)
@bind_hass
def is_on(hass, entity_id):
"""
Return true if specified automation entity_id is on.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
@callback
def automations_with_entity(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all automations that reference the entity."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
results = []
for automation_entity in component.entities:
if entity_id in automation_entity.referenced_entities:
results.append(automation_entity.entity_id)
return results
@callback
def entities_in_automation(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all entities in a scene."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
automation_entity = component.get_entity(entity_id)
if automation_entity is None:
return []
return list(automation_entity.referenced_entities)
@callback
def automations_with_device(hass: HomeAssistant, device_id: str) -> List[str]:
"""Return all automations that reference the device."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
results = []
for automation_entity in component.entities:
if device_id in automation_entity.referenced_devices:
results.append(automation_entity.entity_id)
return results
@callback
def devices_in_automation(hass: HomeAssistant, entity_id: str) -> List[str]:
"""Return all devices in a scene."""
if DOMAIN not in hass.data:
return []
component = hass.data[DOMAIN]
automation_entity = component.get_entity(entity_id)
if automation_entity is None:
return []
return list(automation_entity.referenced_devices)
async def async_setup(hass, config):
"""Set up the automation."""
hass.data[DOMAIN] = component = EntityComponent(_LOGGER, DOMAIN, hass)
await _async_process_config(hass, config, component)
async def trigger_service_handler(entity, service_call):
"""Handle automation triggers."""
await entity.async_trigger(
service_call.data[ATTR_VARIABLES],
skip_condition=service_call.data[CONF_SKIP_CONDITION],
context=service_call.context,
)
component.async_register_entity_service(
SERVICE_TRIGGER,
{
vol.Optional(ATTR_VARIABLES, default={}): dict,
vol.Optional(CONF_SKIP_CONDITION, default=True): bool,
},
trigger_service_handler,
)
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
async def reload_service_handler(service_call):
"""Remove all automations and load new ones from config."""
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
async_register_admin_service(
hass, DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({}),
)
return True
class AutomationEntity(ToggleEntity, RestoreEntity):
"""Entity to show status of entity."""
def __init__(
self,
automation_id,
name,
trigger_config,
cond_func,
action_script,
hidden,
initial_state,
):
"""Initialize an automation entity."""
self._id = automation_id
self._name = name
self._trigger_config = trigger_config
self._async_detach_triggers = None
self._cond_func = cond_func
self.action_script = action_script
self._last_triggered = None
self._hidden = hidden
self._initial_state = initial_state
self._is_enabled = False
self._referenced_entities: Optional[Set[str]] = None
self._referenced_devices: Optional[Set[str]] = None
@property
def name(self):
"""Name of the automation."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._id
@property
def should_poll(self):
"""No polling needed for automation entities."""
return False
@property
def state_attributes(self):
"""Return the entity state attributes."""
return {ATTR_LAST_TRIGGERED: self._last_triggered}
@property
def hidden(self) -> bool:
"""Return True if the automation entity should be hidden from UIs."""
return self._hidden
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._async_detach_triggers is not None or self._is_enabled
@property
def referenced_devices(self):
"""Return a set of referenced devices."""
if self._referenced_devices is not None:
return self._referenced_devices
referenced = self.action_script.referenced_devices
if self._cond_func is not None:
for conf in self._cond_func.config:
referenced |= condition.async_extract_devices(conf)
for conf in self._trigger_config:
device = _trigger_extract_device(conf)
if device is not None:
referenced.add(device)
self._referenced_devices = referenced
return referenced
@property
def referenced_entities(self):
"""Return a set of referenced entities."""
if self._referenced_entities is not None:
return self._referenced_entities
referenced = self.action_script.referenced_entities
if self._cond_func is not None:
for conf in self._cond_func.config:
referenced |= condition.async_extract_entities(conf)
for conf in self._trigger_config:
for entity_id in _trigger_extract_entities(conf):
referenced.add(entity_id)
self._referenced_entities = referenced
return referenced
async def async_added_to_hass(self) -> None:
"""Startup with initial state or previous state."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
enable_automation = state.state == STATE_ON
last_triggered = state.attributes.get("last_triggered")
if last_triggered is not None:
self._last_triggered = parse_datetime(last_triggered)
_LOGGER.debug(
"Loaded automation %s with state %s from state "
" storage last state %s",
self.entity_id,
enable_automation,
state,
)
else:
enable_automation = DEFAULT_INITIAL_STATE
_LOGGER.debug(
"Automation %s not in state storage, state %s from default is used.",
self.entity_id,
enable_automation,
)
if self._initial_state is not None:
enable_automation = self._initial_state
_LOGGER.debug(
"Automation %s initial state %s overridden from "
"config initial_state",
self.entity_id,
enable_automation,
)
if enable_automation:
await self.async_enable()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on and update the state."""
await self.async_enable()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
await self.async_disable()
async def async_trigger(self, variables, skip_condition=False, context=None):
"""Trigger automation.
This method is a coroutine.
"""
if (
not skip_condition
and self._cond_func is not None
and not self._cond_func(variables)
):
return
# Create a new context referring to the old context.
parent_id = None if context is None else context.id
trigger_context = Context(parent_id=parent_id)
self.async_set_context(trigger_context)
self.hass.bus.async_fire(
EVENT_AUTOMATION_TRIGGERED,
{ATTR_NAME: self._name, ATTR_ENTITY_ID: self.entity_id},
context=trigger_context,
)
_LOGGER.info("Executing %s", self._name)
try:
await self.action_script.async_run(variables, trigger_context)
except Exception as err: # pylint: disable=broad-except
self.action_script.async_log_exception(
_LOGGER, f"Error while executing automation {self.entity_id}", err
)
self._last_triggered = utcnow()
await self.async_update_ha_state()
async def async_will_remove_from_hass(self):
"""Remove listeners when removing automation from Home Assistant."""
await super().async_will_remove_from_hass()
await self.async_disable()
async def async_enable(self):
"""Enable this automation entity.
This method is a coroutine.
"""
if self._is_enabled:
return
self._is_enabled = True
# HomeAssistant is starting up
if self.hass.state != CoreState.not_running:
self._async_detach_triggers = await self._async_attach_triggers()
self.async_write_ha_state()
return
async def async_enable_automation(event):
"""Start automation on startup."""
# Don't do anything if no longer enabled or already attached
if not self._is_enabled or self._async_detach_triggers is not None:
return
self._async_detach_triggers = await self._async_attach_triggers()
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_enable_automation
)
self.async_write_ha_state()
async def async_disable(self):
"""Disable the automation entity."""
if not self._is_enabled:
return
self._is_enabled = False
if self._async_detach_triggers is not None:
self._async_detach_triggers()
self._async_detach_triggers = None
self.async_write_ha_state()
async def _async_attach_triggers(self):
"""Set up the triggers."""
removes = []
info = {"name": self._name}
for conf in self._trigger_config:
platform = importlib.import_module(
".{}".format(conf[CONF_PLATFORM]), __name__
)
remove = await platform.async_attach_trigger(
self.hass, conf, self.async_trigger, info
)
if not remove:
_LOGGER.error("Error setting up trigger %s", self._name)
continue
_LOGGER.info("Initialized trigger %s", self._name)
removes.append(remove)
if not removes:
return None
@callback
def remove_triggers():
"""Remove attached triggers."""
for remove in removes:
remove()
return remove_triggers
@property
def device_state_attributes(self):
"""Return automation attributes."""
if self._id is None:
return None
return {CONF_ID: self._id}
async def _async_process_config(hass, config, component):
"""Process config and add automations.
This method is a coroutine.
"""
entities = []
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
automation_id = config_block.get(CONF_ID)
name = config_block.get(CONF_ALIAS) or f"{config_key} {list_no}"
hidden = config_block[CONF_HIDE_ENTITY]
initial_state = config_block.get(CONF_INITIAL_STATE)
action_script = script.Script(hass, config_block.get(CONF_ACTION, {}), name)
if CONF_CONDITION in config_block:
cond_func = await _async_process_if(hass, config, config_block)
if cond_func is None:
continue
else:
cond_func = None
entity = AutomationEntity(
automation_id,
name,
config_block[CONF_TRIGGER],
cond_func,
action_script,
hidden,
initial_state,
)
entities.append(entity)
if entities:
await component.async_add_entities(entities)
async def _async_process_if(hass, config, p_config):
"""Process if checks."""
if_configs = p_config[CONF_CONDITION]
checks = []
for if_config in if_configs:
try:
checks.append(await condition.async_from_config(hass, if_config, False))
except HomeAssistantError as ex:
_LOGGER.warning("Invalid condition: %s", ex)
return None
def if_action(variables=None):
"""AND all conditions."""
return all(check(hass, variables) for check in checks)
if_action.config = if_configs
return if_action
@callback
def _trigger_extract_device(trigger_conf: dict) -> Optional[str]:
"""Extract devices from a trigger config."""
if trigger_conf[CONF_PLATFORM] != "device":
return None
return trigger_conf[CONF_DEVICE_ID]
@callback
def _trigger_extract_entities(trigger_conf: dict) -> List[str]:
"""Extract entities from a trigger config."""
if trigger_conf[CONF_PLATFORM] in ("state", "numeric_state"):
return trigger_conf[CONF_ENTITY_ID]
if trigger_conf[CONF_PLATFORM] == "zone":
return trigger_conf[CONF_ENTITY_ID] + [trigger_conf[CONF_ZONE]]
if trigger_conf[CONF_PLATFORM] == "geo_location":
return [trigger_conf[CONF_ZONE]]
if trigger_conf[CONF_PLATFORM] == "sun":
return ["sun.sun"]
return []
|
the-stack_0_25537
|
import math
import numpy as np
def load_codebook(net, path, max_conv_bits, max_fc_bits):
conv_layer_num = 0
fc_layer_num = 0
fin = open(path, 'rb')
for name, x in net.named_parameters():
if name.endswith('mask'):
continue
if name.startswith('conv'):
conv_layer_num += 1
elif name.startswith('fc'):
fc_layer_num += 1
nz_num = np.fromfile(fin, dtype=np.uint32, count=conv_layer_num + fc_layer_num)
conv_diff_num = sum(nz_num[:conv_layer_num])
conv_diff = np.fromfile(fin, dtype=np.uint8, count=conv_diff_num)
fc_merge_num = math.floor((sum(nz_num[conv_layer_num:]) + 1) / 2)
fc_merge_diff = np.fromfile(fin, dtype=np.uint8, count=fc_merge_num)
# print(nz_num)
# print(len(conv_diff), conv_diff[-10:])
# print(len(fc_merge_diff), fc_merge_diff[-10:])
# [ 304 11 5353 1 400000 500 5000 10]
# 5669 [ 0 2 0 1 1 1 0 9 8 44]
# 202755 [0 0 0 0 0 0 0 0 0 0]
# Split 8 bits index to 4 bits index
fc_diff = []
for i in range(len(fc_merge_diff)):
fc_diff.append(int(fc_merge_diff[i] / max_fc_bits)) # first 4 bits
fc_diff.append(fc_merge_diff[i] % max_fc_bits) # last 4 bits
fc_num_sum = nz_num[conv_layer_num:].sum()
if fc_num_sum % 2 != 0:
fc_diff = fc_diff[:fc_num_sum]
fc_diff = np.asarray(fc_diff, dtype=np.uint8)
conv_codebook_index = np.fromfile(fin, dtype=np.uint8, count=conv_diff_num)
fc_codebook_index_merge = np.fromfile(fin, dtype=np.uint8, count=fc_merge_num)
codebook_value_num = int(max_conv_bits * (conv_layer_num / 2) + (2 ** max_fc_bits) * (fc_layer_num / 2))
codebook_value = np.fromfile(fin, dtype=np.float32, count=codebook_value_num)
# print(len(conv_codebook_index), conv_codebook_index[-10:])
# print(len(fc_codebook_index_merge), fc_codebook_index_merge[-10:])
# print(len(codebook_value), codebook_value[-10:])
# 5669 [ 2 228 211 229 76 152 23 116 111 25]
# 202755 [200 66 71 152 140 171 86 151 87 197]
# 544 [-0.11808116 -0.06328904 0.1446653 0.05191407 -0.03960273 -0.0174285
# -0.0174285 0.00504891 0.22879101 0.05191407]
# Split 8 bits index to 4 bits index
fc_codebook_index = []
for i in range(len(fc_codebook_index_merge)):
fc_codebook_index.append(int(fc_codebook_index_merge[i] / max_fc_bits)) # first 4 bits
fc_codebook_index.append(fc_codebook_index_merge[i] % max_fc_bits) # last 4 bits
if fc_num_sum % 2 != 0:
fc_codebook_index = fc_codebook_index[:fc_num_sum]
fc_codebook_index = np.asarray(fc_codebook_index, dtype=np.uint8)
print("if_error_more_15", (fc_codebook_index > 15).sum())
print("if_error_less_0", (fc_codebook_index < 0).sum())
print("fc_diff", (fc_codebook_index).sum())
# print(nz_num)
# print(len(conv_diff), conv_diff[-10:])
# print(len(fc_diff), fc_diff[-10:])
# print(len(conv_codebook_index), conv_codebook_index[-10:])
# print(len(fc_codebook_index), fc_codebook_index[-10:])
# print(len(codebook_value), codebook_value[-10:])
# [ 304 11 5353 1 400000 500 5000 10]
# 5669 [ 0 2 0 1 1 1 0 9 8 44]
# 405510 [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# 5669 [ 2 228 211 229 76 152 23 116 111 25]
# 405510 [10 11 5 6 9 7 5 7 12 5]
# 544 [-0.11808116 -0.06328904 0.1446653 0.05191407 -0.03960273 -0.0174285
# -0.0174285 0.00504891 0.22879101 0.05191407]
return conv_layer_num, nz_num, conv_diff, fc_diff, conv_codebook_index, fc_codebook_index, codebook_value
def codebook_to_init(net, conv_layer_length, nz_num, conv_diff, fc_diff, conv_codebook_index, fc_codebook_index,
codebook_value, max_conv_bits, max_fc_bits):
state_dict = net.state_dict()
conv_layer_index = 0
fc_layer_index = 0
codebook_value_index = 0
layer_codebook_value = []
for i, (key, value) in enumerate(state_dict.items()):
shape = value.shape
value = value.view(-1)
value.zero_()
if i < conv_layer_length:
layer_diff = conv_diff[conv_layer_index:conv_layer_index + nz_num[i]]
layer_codebook_index = conv_codebook_index[conv_layer_index:conv_layer_index + nz_num[i]]
if not key.endswith('bias'):
layer_codebook_value = codebook_value[codebook_value_index:codebook_value_index + max_conv_bits]
codebook_value_index += max_conv_bits
conv_layer_index += nz_num[i]
else:
layer_diff = fc_diff[fc_layer_index:fc_layer_index + nz_num[i]]
layer_codebook_index = fc_codebook_index[fc_layer_index:fc_layer_index + nz_num[i]]
if not key.endswith('bias'):
layer_codebook_value = codebook_value[codebook_value_index:codebook_value_index + max_fc_bits]
codebook_value_index += max_fc_bits
fc_layer_index += nz_num[i]
dense_index = 0
sparse_index = 0
while sparse_index < len(layer_diff):
dense_index += layer_diff[sparse_index]
value[dense_index] = float(layer_codebook_value[layer_codebook_index[sparse_index]])
sparse_index += 1
dense_index += 1
value.reshape(shape)
|
the-stack_0_25540
|
# coding=utf-8
from __future__ import absolute_import, print_function
import sys
from six import string_types
__author__ = 'Tyler Butler <[email protected]>'
def add_to_path_if_needed(path):
"""
Adds *path* to the system path if it is not already on it.
Prevents unnecessary 'pollution' of the path with a bunch of redundant entries.
"""
if path not in sys.path:
sys.path.append(path)
def dict_to_querystring(dictionary):
"""Converts a dict to a querystring suitable to be appended to a URL."""
s = u""
for d in dictionary.keys():
s = unicode.format(u"{0}{1}={2}&", s, d, dictionary[d])
return s[:-1]
def get_class(class_string):
"""Given a string representing a path to a class, instantiates that class."""
parts = class_string.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def get_class_string(obj):
if isinstance(obj, string_types):
return obj
mod = obj.__module__
cls = getattr(obj, '__name__', obj.__class__.__name__)
return '.'.join((mod, cls))
|
the-stack_0_25541
|
"""Download Files to your local server
Syntax:
.download
.download url | file.name to download files from a Public Link"""
import asyncio
import math
import os
import time
from datetime import datetime
import aiohttp
from pySmartDL import SmartDL
from telethon import events
from telethon.tl.types import DocumentAttributeVideo
from uniborg.util import admin_cmd, humanbytes, progress, time_formatter
from sample_config import Config
@borg.on(admin_cmd(pattern="download ?(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
mone = await event.edit("Processing ...")
input_str = event.pattern_match.group(1)
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
start = datetime.now()
reply_message = await event.get_reply_message()
try:
c_time = time.time()
downloaded_file_name = await borg.download_media(
reply_message,
Config.TMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, mone, c_time, "trying to download")
)
)
except Exception as e: # pylint:disable=C0103,W0703
await mone.edit(str(e))
else:
end = datetime.now()
ms = (end - start).seconds
await mone.edit("Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms))
elif input_str:
start = datetime.now()
url = input_str
file_name = os.path.basename(url)
to_download_directory = Config.TMP_DOWNLOAD_DIRECTORY
if "|" in input_str:
url, file_name = input_str.split("|")
url = url.strip()
file_name = file_name.strip()
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloader = SmartDL(url, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
display_message = ""
c_time = time.time()
while not downloader.isFinished():
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
now = time.time()
diff = now - c_time
percentage = downloader.get_progress() * 100
speed = downloader.get_speed()
elapsed_time = round(diff) * 1000
progress_str = "[{0}{1}]\nProgress: {2}%".format(
''.join(["█" for i in range(math.floor(percentage / 5))]),
''.join(["░" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = f"trying to download\n"
current_message += f"URL: {url}\n"
current_message += f"File Name: {file_name}\n"
current_message += f"{progress_str}\n"
current_message += f"{humanbytes(downloaded)} of {humanbytes(total_length)}\n"
current_message += f"ETA: {estimated_total_time}"
if round(diff % 10.00) == 0 and current_message != display_message:
await mone.edit(current_message)
display_message = current_message
except Exception as e:
logger.info(str(e))
end = datetime.now()
ms = (end - start).seconds
if os.path.exists(downloaded_file_name):
await mone.edit("Downloaded to `{}` in {} seconds.".format(downloaded_file_name, ms))
else:
await mone.edit("Incorrect URL\n {}".format(input_str))
else:
await mone.edit("Reply to a message to download to my local server.")
async def download_coroutine(session, url, file_name, event, start):
CHUNK_SIZE = 2341
downloaded = 0
display_message = ""
async with session.get(url) as response:
total_length = int(response.headers["Content-Length"])
content_type = response.headers["Content-Type"]
if "text" in content_type and total_length < 500:
return await response.release()
await event.edit("""Initiating Download
URL: {}
File Name: {}
File Size: {}""".format(url, file_name, humanbytes(total_length)))
with open(file_name, "wb") as f_handle:
while True:
chunk = await response.content.read(CHUNK_SIZE)
if not chunk:
break
f_handle.write(chunk)
downloaded += CHUNK_SIZE
now = time.time()
diff = now - start
if round(diff % 5.00) == 0 or downloaded == total_length:
percentage = downloaded * 100 / total_length
speed = downloaded / diff
elapsed_time = round(diff) * 1000
time_to_completion = round(
(total_length - downloaded) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
try:
current_message = """**Download Status**
URL: {}
File Name: {}
File Size: {}
Downloaded: {}
ETA: {}""".format(
url,
file_name,
humanbytes(total_length),
humanbytes(downloaded),
time_formatter(estimated_total_time)
)
if current_message != display_message:
await event.edit(current_message)
display_message = current_message
except Exception as e:
logger.info(str(e))
pass
return await response.release()
|
the-stack_0_25542
|
from random import randint
computador = randint(0, 10)
print('Sou seu computador.. acabei de pensar em um número entre 0 e 10.')
print('será que você consegue adivinhar qual foi? ')
acertou = False
palpites = 0
while not acertou:
jogador = int(input('Qual Seu palpite? '))
palpites += 1
if jogador == computador:
acertou = True
else:
if jogador < computador:
print('Mais... Tente mais uma vez.')
elif jogador > computador:
print('Menos... Tente mais uma vez.')
print('Acertou com {} tentativas. Parabéns'.format(palpites))
|
the-stack_0_25545
|
import json
__author__ = 'Joe'
def export(field_desc):
"""
:param field_desc:
:type field_desc: bitcoder.bitcoder.Encoded
:return:
"""
output = {}
for name, field in field_desc.fields().iteritems():
output[name] = {
'mask': 1**field.length << field.start,
'start': field.start,
}
output = {field_desc.__class__.__name__: output}
return json.dumps(output, sort_keys=True, indent=2)
|
the-stack_0_25550
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
assert len(sys.argv) == 4, sys.argv
(in_file, c_file, h_file) = sys.argv[1:]
def write_file(filename, contents):
open(filename, 'wb').write(contents)
write_file(c_file, open(in_file, 'rb').read())
write_file(h_file, '#define NAME "%s"\n' % in_file)
sys.exit(0)
|
the-stack_0_25551
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import os
import argparse
from PIL import Image
import numpy as np
parser = argparse.ArgumentParser(description="Preporcess")
parser.add_argument("--dataset_path", type=str, default="/cache/data", help="dataset path.")
parser.add_argument("--dataset_type", type=str, default="Set5", help="dataset type.")
parser.add_argument("--save_path", type=str, default="/cache/data", help="save lr dataset path.")
parser.add_argument("--scale", type=int, default="2", help="scale.")
args = parser.parse_args()
MAX_HR_SIZE = 2040
def padding(img, target_shape):
h, w = target_shape[0], target_shape[1]
img_h, img_w, _ = img.shape
dh, dw = h - img_h, w - img_w
if dh < 0 or dw < 0:
raise RuntimeError(f"target_shape is bigger than img.shape, {target_shape} > {img.shape}")
if dh != 0 or dw != 0:
img = np.pad(img, ((0, int(dh)), (0, int(dw)), (0, 0)), "constant")
return img
def run_pre_process(dataset_path, dataset_type, scale, save_path):
"""run pre process"""
lr_path = os.path.join(dataset_path, dataset_type, "LR_bicubic/X" + str(scale))
files = os.listdir(lr_path)
for file in files:
lr = Image.open(os.path.join(lr_path, file))
lr = lr.convert('RGB')
lr = np.array(lr)
target_shape = [MAX_HR_SIZE / scale, MAX_HR_SIZE / scale]
img = padding(lr, target_shape)
save_lr_path = os.path.join(save_path, file)
os.makedirs(save_path, exist_ok=True)
Image.fromarray(img).save(save_lr_path)
if __name__ == "__main__":
run_pre_process(args.dataset_path, args.dataset_type, args.scale, args.save_path)
|
the-stack_0_25552
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import os
import sys
from six import iteritems
import psutil
from prometheus_client import Gauge
from twisted.application import service
from twisted.internet import defer, reactor
from twisted.web.resource import EncodingResourceWrapper, NoResource
from twisted.web.server import GzipEncoderFactory
from twisted.web.static import File
import synapse
import synapse.config.logger
from synapse import events
from synapse.api.urls import (
CONTENT_REPO_PREFIX,
FEDERATION_PREFIX,
LEGACY_MEDIA_PREFIX,
MEDIA_PREFIX,
SERVER_KEY_V2_PREFIX,
STATIC_PREFIX,
WEB_CLIENT_PREFIX,
)
from synapse.app import _base
from synapse.app._base import listen_ssl, listen_tcp, quit_with_error
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.crypto import context_factory
from synapse.federation.transport.server import TransportLayerServer
from synapse.http.additional_resource import AdditionalResource
from synapse.http.server import RootRedirect
from synapse.http.site import SynapseSite
from synapse.metrics import RegistryProxy
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.metrics.resource import METRICS_PREFIX, MetricsResource
from synapse.module_api import ModuleApi
from synapse.python_dependencies import check_requirements
from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory
from synapse.rest import ClientRestResource
from synapse.rest.key.v2 import KeyApiV2Resource
from synapse.rest.media.v0.content_repository import ContentRepoResource
from synapse.server import HomeServer
from synapse.storage import DataStore, are_all_users_on_domain
from synapse.storage.engines import IncorrectDatabaseSetup, create_engine
from synapse.storage.prepare_database import UpgradeDatabaseException, prepare_database
from synapse.util.caches import CACHE_SIZE_FACTOR
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.module_loader import load_module
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
logger = logging.getLogger("synapse.app.homeserver")
def gz_wrap(r):
return EncodingResourceWrapper(r, [GzipEncoderFactory()])
class SynapseHomeServer(HomeServer):
DATASTORE_CLASS = DataStore
def _listener_http(self, config, listener_config):
port = listener_config["port"]
bind_addresses = listener_config["bind_addresses"]
tls = listener_config.get("tls", False)
site_tag = listener_config.get("tag", port)
if tls and config.no_tls:
return
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
resources.update(self._configure_named_resource(
name, res.get("compress", False),
))
additional_resources = listener_config.get("additional_resources", {})
logger.debug("Configuring additional resources: %r",
additional_resources)
module_api = ModuleApi(self, self.get_auth_handler())
for path, resmodule in additional_resources.items():
handler_cls, config = load_module(resmodule)
handler = handler_cls(config, module_api)
resources[path] = AdditionalResource(self, handler.handle_request)
#if WEB_CLIENT_PREFIX in resources:
# root_resource = RootRedirect(WEB_CLIENT_PREFIX)
#else:
# root_resource = NoResource()
root_resource = NoResource()
root_resource = create_resource_tree(resources, root_resource)
if tls:
listen_ssl(
bind_addresses,
port,
SynapseSite(
"synapse.access.https.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
),
self.tls_server_context_factory,
)
else:
listen_tcp(
bind_addresses,
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
self.version_string,
)
)
logger.info("Synapse now listening on port %d", port)
def _configure_named_resource(self, name, compress=False):
"""Build a resource map for a named resource
Args:
name (str): named resource: one of "client", "federation", etc
compress (bool): whether to enable gzip compression for this
resource
Returns:
dict[str, Resource]: map from path to HTTP resource
"""
resources = {}
if name == "client":
client_resource = ClientRestResource(self)
if compress:
client_resource = gz_wrap(client_resource)
resources.update({
"/_matrix/client/api/v1": client_resource,
"/_matrix/client/r0": client_resource,
"/_matrix/client/unstable": client_resource,
"/_matrix/client/v2_alpha": client_resource,
"/_matrix/client/versions": client_resource,
})
if name == "consent":
from synapse.rest.consent.consent_resource import ConsentResource
consent_resource = ConsentResource(self)
if compress:
consent_resource = gz_wrap(consent_resource)
resources.update({
"/_matrix/consent": consent_resource,
})
if name == "federation":
resources.update({
FEDERATION_PREFIX: TransportLayerServer(self),
})
if name in ["static", "client"]:
resources.update({
STATIC_PREFIX: File(
os.path.join(os.path.dirname(synapse.__file__), "static")
),
})
if name in ["media", "federation", "client"]:
if self.get_config().enable_media_repo:
media_repo = self.get_media_repository_resource()
resources.update({
MEDIA_PREFIX: media_repo,
LEGACY_MEDIA_PREFIX: media_repo,
CONTENT_REPO_PREFIX: ContentRepoResource(
self, self.config.uploads_path
),
})
elif name == "media":
raise ConfigError(
"'media' resource conflicts with enable_media_repo=False",
)
if name in ["keys", "federation"]:
resources[SERVER_KEY_V2_PREFIX] = KeyApiV2Resource(self)
if name == "metrics" and self.get_config().enable_metrics:
resources[METRICS_PREFIX] = MetricsResource(RegistryProxy)
if name == "replication":
resources[REPLICATION_PREFIX] = ReplicationRestResource(self)
return resources
def start_listening(self):
config = self.get_config()
for listener in config.listeners:
if listener["type"] == "http":
self._listener_http(config, listener)
elif listener["type"] == "manhole":
listen_tcp(
listener["bind_addresses"],
listener["port"],
manhole(
username="matrix",
password="rabbithole",
globals={"hs": self},
)
)
elif listener["type"] == "replication":
bind_addresses = listener["bind_addresses"]
for address in bind_addresses:
factory = ReplicationStreamProtocolFactory(self)
server_listener = reactor.listenTCP(
listener["port"], factory, interface=address
)
reactor.addSystemEventTrigger(
"before", "shutdown", server_listener.stopListening,
)
elif listener["type"] == "metrics":
if not self.get_config().enable_metrics:
logger.warn(("Metrics listener configured, but "
"enable_metrics is not True!"))
else:
_base.listen_metrics(listener["bind_addresses"],
listener["port"])
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
def run_startup_checks(self, db_conn, database_engine):
all_users_native = are_all_users_on_domain(
db_conn.cursor(), database_engine, self.hostname
)
if not all_users_native:
quit_with_error(
"Found users in database not native to %s!\n"
"You cannot changed a synapse server_name after it's been configured"
% (self.hostname,)
)
try:
database_engine.check_database(db_conn.cursor())
except IncorrectDatabaseSetup as e:
quit_with_error(str(e))
# Gauges to expose monthly active user control metrics
current_mau_gauge = Gauge("synapse_admin_mau:current", "Current MAU")
max_mau_gauge = Gauge("synapse_admin_mau:max", "MAU Limit")
registered_reserved_users_mau_gauge = Gauge(
"synapse_admin_mau:registered_reserved_users",
"Registered users with reserved threepids"
)
def setup(config_options):
"""
Args:
config_options_options: The options passed to Synapse. Usually
`sys.argv[1:]`.
Returns:
HomeServer
"""
try:
config = HomeServerConfig.load_or_generate_config(
"Synapse Homeserver",
config_options,
)
except ConfigError as e:
sys.stderr.write("\n" + str(e) + "\n")
sys.exit(1)
if not config:
# If a config isn't returned, and an exception isn't raised, we're just
# generating config files and shouldn't try to continue.
sys.exit(0)
synapse.config.logger.setup_logging(config, use_worker_options=False)
# check any extra requirements we have now we have a config
check_requirements(config)
events.USE_FROZEN_DICTS = config.use_frozen_dicts
tls_server_context_factory = context_factory.ServerContextFactory(config)
tls_client_options_factory = context_factory.ClientTLSOptionsFactory(config)
database_engine = create_engine(config.database_config)
config.database_config["args"]["cp_openfun"] = database_engine.on_new_connection
hs = SynapseHomeServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
tls_client_options_factory=tls_client_options_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
logger.info("Preparing database: %s...", config.database_config['name'])
try:
with hs.get_db_conn(run_new_connection=False) as db_conn:
prepare_database(db_conn, database_engine, config=config)
database_engine.on_new_connection(db_conn)
hs.run_startup_checks(db_conn, database_engine)
db_conn.commit()
except UpgradeDatabaseException:
sys.stderr.write(
"\nFailed to upgrade database.\n"
"Have you checked for version specific instructions in"
" UPGRADES.rst?\n"
)
sys.exit(1)
logger.info("Database prepared in %s.", config.database_config['name'])
hs.setup()
hs.start_listening()
def start():
hs.get_pusherpool().start()
hs.get_datastore().start_profiling()
hs.get_datastore().start_doing_background_updates()
reactor.callWhenRunning(start)
return hs
class SynapseService(service.Service):
"""A twisted Service class that will start synapse. Used to run synapse
via twistd and a .tac.
"""
def __init__(self, config):
self.config = config
def startService(self):
hs = setup(self.config)
change_resource_limit(hs.config.soft_file_limit)
if hs.config.gc_thresholds:
gc.set_threshold(*hs.config.gc_thresholds)
def stopService(self):
return self._port.stopListening()
def run(hs):
PROFILE_SYNAPSE = False
if PROFILE_SYNAPSE:
def profile(func):
from cProfile import Profile
from threading import current_thread
def profiled(*args, **kargs):
profile = Profile()
profile.enable()
func(*args, **kargs)
profile.disable()
ident = current_thread().ident
profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
hs.hostname, func.__name__, ident
))
return profiled
from twisted.python.threadpool import ThreadPool
ThreadPool._worker = profile(ThreadPool._worker)
reactor.run = profile(reactor.run)
clock = hs.get_clock()
start_time = clock.time()
stats = {}
# Contains the list of processes we will be monitoring
# currently either 0 or 1
stats_process = []
def start_phone_stats_home():
return run_as_background_process("phone_stats_home", phone_stats_home)
@defer.inlineCallbacks
def phone_stats_home():
logger.info("Gathering stats for reporting")
now = int(hs.get_clock().time())
uptime = int(now - start_time)
if uptime < 0:
uptime = 0
stats["homeserver"] = hs.config.server_name
stats["timestamp"] = now
stats["uptime_seconds"] = uptime
version = sys.version_info
stats["python_version"] = "{}.{}.{}".format(
version.major, version.minor, version.micro
)
stats["total_users"] = yield hs.get_datastore().count_all_users()
total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
stats["total_nonbridged_users"] = total_nonbridged_users
daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
for name, count in iteritems(daily_user_type_results):
stats["daily_user_type_" + name] = count
room_count = yield hs.get_datastore().get_room_count()
stats["total_room_count"] = room_count
stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
r30_results = yield hs.get_datastore().count_r30_users()
for name, count in iteritems(r30_results):
stats["r30_users_" + name] = count
daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
stats["daily_sent_messages"] = daily_sent_messages
stats["cache_factor"] = CACHE_SIZE_FACTOR
stats["event_cache_size"] = hs.config.event_cache_size
if len(stats_process) > 0:
stats["memory_rss"] = 0
stats["cpu_average"] = 0
for process in stats_process:
stats["memory_rss"] += process.memory_info().rss
stats["cpu_average"] += int(process.cpu_percent(interval=None))
logger.info("Reporting stats to matrix.org: %s" % (stats,))
try:
yield hs.get_simple_http_client().put_json(
"https://matrix.org/report-usage-stats/push",
stats
)
except Exception as e:
logger.warn("Error reporting stats: %s", e)
def performance_stats_init():
try:
process = psutil.Process()
# Ensure we can fetch both, and make the initial request for cpu_percent
# so the next request will use this as the initial point.
process.memory_info().rss
process.cpu_percent(interval=None)
logger.info("report_stats can use psutil")
stats_process.append(process)
except (AttributeError):
logger.warning(
"Unable to read memory/cpu stats. Disabling reporting."
)
def generate_user_daily_visit_stats():
return run_as_background_process(
"generate_user_daily_visits",
hs.get_datastore().generate_user_daily_visits,
)
# Rather than update on per session basis, batch up the requests.
# If you increase the loop period, the accuracy of user_daily_visits
# table will decrease
clock.looping_call(generate_user_daily_visit_stats, 5 * 60 * 1000)
# monthly active user limiting functionality
def reap_monthly_active_users():
return run_as_background_process(
"reap_monthly_active_users",
hs.get_datastore().reap_monthly_active_users,
)
clock.looping_call(reap_monthly_active_users, 1000 * 60 * 60)
reap_monthly_active_users()
@defer.inlineCallbacks
def generate_monthly_active_users():
current_mau_count = 0
reserved_count = 0
store = hs.get_datastore()
if hs.config.limit_usage_by_mau:
current_mau_count = yield store.get_monthly_active_count()
reserved_count = yield store.get_registered_reserved_users_count()
current_mau_gauge.set(float(current_mau_count))
registered_reserved_users_mau_gauge.set(float(reserved_count))
max_mau_gauge.set(float(hs.config.max_mau_value))
def start_generate_monthly_active_users():
return run_as_background_process(
"generate_monthly_active_users",
generate_monthly_active_users,
)
start_generate_monthly_active_users()
if hs.config.limit_usage_by_mau:
clock.looping_call(start_generate_monthly_active_users, 5 * 60 * 1000)
# End of monthly active user settings
if hs.config.report_stats:
logger.info("Scheduling stats reporting for 3 hour intervals")
clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000)
# We need to defer this init for the cases that we daemonize
# otherwise the process ID we get is that of the non-daemon process
clock.call_later(0, performance_stats_init)
# We wait 5 minutes to send the first set of stats as the server can
# be quite busy the first few minutes
clock.call_later(5 * 60, start_phone_stats_home)
if hs.config.daemonize and hs.config.print_pidfile:
print(hs.config.pid_file)
_base.start_reactor(
"synapse-homeserver",
hs.config.soft_file_limit,
hs.config.gc_thresholds,
hs.config.pid_file,
hs.config.daemonize,
hs.config.cpu_affinity,
logger,
)
def main():
with LoggingContext("main"):
# check base requirements
check_requirements()
hs = setup(sys.argv[1:])
run(hs)
if __name__ == '__main__':
main()
|
the-stack_0_25556
|
import cv2
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
import json
import torch
import pycocotools.mask as mask_util
import numpy as np
from detectron2.structures import Instances, Boxes
def json_to_d2(pred_dict, device):
"""
Client side helper function to deserialize the JSON msg back to d2 outputs
"""
# pred_dict = json.loads(predictions)
for k, v in pred_dict.items():
if k=="pred_boxes":
boxes_to_tensor = torch.FloatTensor(v).to(device)
pred_dict[k] = Boxes(boxes_to_tensor)
if k=="scores":
pred_dict[k] = torch.Tensor(v).to(device)
if k=="pred_classes":
pred_dict[k] = torch.Tensor(v).to(device).to(torch.uint8)
height, width = pred_dict['image_size']
del pred_dict['image_size']
inst = Instances((height, width,), **pred_dict)
return {'instances':inst}
def d2_to_json(predictions):
"""
Server side helper function to serialize the d2 detections into JSON for API passing
"""
instances = predictions["instances"]
output = {}
# Iterate over fields in Instances
for k,v in instances.get_fields().items():
if k in ["scores", "pred_classes"]:
output[k] = v.tolist()
if k=="pred_boxes":
output[k] = v.tensor.tolist()
output['image_size'] = instances.image_size
output = json.dumps(output)
return output
|
the-stack_0_25557
|
# Copyright (c) 2016-2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import opentracing.tracer
from jaeger_client import Config, ConstSampler, ProbabilisticSampler, RateLimitingSampler
from jaeger_client import constants
from jaeger_client.config import DEFAULT_THROTTLER_PORT
from jaeger_client.metrics import MetricsFactory
from jaeger_client.reporter import NullReporter
from tests.test_utils import MockSampler
class ConfigTests(unittest.TestCase):
def test_enabled(self):
c = Config({'enabled': True}, service_name='x')
assert c.enabled
c = Config({'enabled': False}, service_name='x')
assert not c.enabled
def test_reporter_batch_size(self):
c = Config({'reporter_batch_size': 12345}, service_name='x')
assert c.reporter_batch_size == 12345
c = Config({}, service_name='x')
assert c.reporter_batch_size == 10
def test_tags(self):
os.environ['JAEGER_TAGS'] = 'a=b,c=d'
c = Config({'tags': {'e': 'f'}}, service_name='x')
assert c.tags == {'a': 'b', 'c': 'd', 'e': 'f'}
c.create_tracer(NullReporter(), ConstSampler(True))
def test_no_sampler(self):
c = Config({}, service_name='x')
assert c.sampler is None
def test_const_sampler(self):
c = Config({'sampler': {'type': 'const', 'param': True}},
service_name='x')
assert type(c.sampler) is ConstSampler
assert c.sampler.decision
c = Config({'sampler': {'type': 'const', 'param': False}},
service_name='x')
assert type(c.sampler) is ConstSampler
assert not c.sampler.decision
def test_probabilistic_sampler(self):
with self.assertRaises(Exception):
cfg = {'sampler': {'type': 'probabilistic', 'param': 'xx'}}
Config(cfg, service_name='x').sampler
c = Config({'sampler': {'type': 'probabilistic', 'param': 0.5}},
service_name='x')
assert type(c.sampler) is ProbabilisticSampler
assert c.sampler.rate == 0.5
def test_rate_limiting_sampler(self):
with self.assertRaises(Exception):
cfg = {'sampler': {'type': 'rate_limiting', 'param': 'xx'}}
Config(cfg, service_name='x').sampler
c = Config({'sampler': {'type': 'rate_limiting', 'param': 1234}},
service_name='x')
assert type(c.sampler) is RateLimitingSampler
assert c.sampler.traces_per_second == 1234
def test_bad_sampler(self):
c = Config({'sampler': {'type': 'bad-sampler'}}, service_name='x')
with self.assertRaises(ValueError):
c.sampler.is_sampled(0)
def test_object_sampler_sampler(self):
sampler = MockSampler()
c = Config({'sampler': sampler}, service_name='x')
assert c.sampler is sampler
def test_agent_reporting_host(self):
c = Config({}, service_name='x')
assert c.local_agent_reporting_host == 'localhost'
c = Config({'local_agent': {'reporting_host': 'jaeger.local'}}, service_name='x')
assert c.local_agent_reporting_host == 'jaeger.local'
os.environ['JAEGER_AGENT_HOST'] = 'jaeger-env.local'
c = Config({}, service_name='x')
assert c.local_agent_reporting_host == 'jaeger-env.local'
def test_max_tag_value_length(self):
c = Config({}, service_name='x')
assert c.max_tag_value_length == constants.MAX_TAG_VALUE_LENGTH
c = Config({'max_tag_value_length': 333}, service_name='x')
assert c.max_tag_value_length == 333
t = c.create_tracer(NullReporter(), ConstSampler(True))
assert t.max_tag_value_length == 333
def test_max_traceback_length(self):
c = Config({}, service_name='x')
assert c.max_traceback_length == constants.MAX_TRACEBACK_LENGTH
c = Config({'max_traceback_length': 333}, service_name='x')
assert c.max_traceback_length == 333
t = c.create_tracer(NullReporter(), ConstSampler(True))
assert t.max_traceback_length == 333
def test_propagation(self):
c = Config({}, service_name='x')
assert c.propagation == {}
c = Config({'propagation': 'b3'}, service_name='x')
assert len(c.propagation) == 1
def test_throttler(self):
c = Config({
'throttler': {}
}, service_name='x')
assert not c.throttler_group()
assert c.throttler_port == DEFAULT_THROTTLER_PORT
assert c.throttler_refresh_interval == constants.DEFAULT_THROTTLER_REFRESH_INTERVAL
c = Config({
'throttler': {
'port': '1234',
'refresh_interval': '10'
}
}, service_name='x')
assert c.throttler_group()
assert c.throttler_port == 1234
assert c.throttler_refresh_interval == 10
c = Config({}, service_name='x')
assert c.throttler_group() is None
assert c.throttler_port is None
assert c.throttler_refresh_interval is None
def test_for_unexpected_config_entries(self):
with self.assertRaises(Exception):
Config({'unexpected': 'value'}, validate=True)
def test_reporter_queue_size_valid(self):
config = Config({'reporter_queue_size': 100}, service_name='x', validate=True)
assert config.reporter_queue_size == 100
def test_missing_service_name(self):
with self.assertRaises(ValueError):
Config({})
def test_disable_metrics(self):
config = Config({'metrics': False}, service_name='x')
assert isinstance(config._metrics_factory, MetricsFactory)
def test_initialize_tracer(self):
c = Config({}, service_name='x')
tracer = c.initialize_tracer()
assert opentracing.global_tracer() == tracer
def test_initialize_tracer_twice(self):
c = Config({}, service_name='x')
tracer = c.initialize_tracer()
tracer = c.initialize_tracer()
assert tracer is None
def test_default_local_agent_reporting_port(self):
c = Config({}, service_name='x')
assert c.local_agent_reporting_port == 6831
assert c.local_agent_enabled is True
def test_generate_128bit_trace_id(self):
c = Config({}, service_name='x')
assert c.generate_128bit_trace_id is False
c = Config({'generate_128bit_trace_id': True}, service_name='x')
assert c.generate_128bit_trace_id is True
os.environ['JAEGER_TRACEID_128BIT'] = 'true'
c = Config({'generate_128bit_trace_id': False}, service_name='x')
assert c.generate_128bit_trace_id is False
c = Config({}, service_name='x')
assert c.generate_128bit_trace_id is True
os.environ.pop('JAEGER_TRACEID_128BIT')
assert os.getenv('JAEGER_TRACEID_128BIT', None) is None
|
the-stack_0_25558
|
#!/usr/bin/env python3
import sys
def set_path(path: str):
try:
sys.path.index(path)
except ValueError:
sys.path.insert(0, path)
from matplotlib.backends.backend_agg import FigureCanvasAgg
from utils import render, datautils, arguments, pfnet_loss
from gibson2.utils.assets_utils import get_scene_path
from gibson2.envs.igibson_env import iGibsonEnv
import matplotlib.pyplot as plt
import tensorflow as tf
from PIL import Image
import pybullet as p
import numpy as np
import cv2
import gym
import os
# set programatically the path to 'pfnet' directory (alternately can also set PYTHONPATH)
set_path('/media/suresh/research/awesome-robotics/active-slam/catkin_ws/src/sim-environment/src/tensorflow/pfnet')
# set_path('/home/guttikon/awesome_robotics/sim-environment/src/tensorflow/pfnet')
import pfnet
class LocalizeGibsonEnv(iGibsonEnv):
def __init__(self, params):
self.params = params
# create pf model
self.pfnet_model = pfnet.pfnet_model(self.params)
# load model from checkpoint file
if self.params.pfnet_load:
self.pfnet_model.load_weights(self.params.pfnet_load)
print("=====> Loaded pf model from " + params.pfnet_load)
super(LocalizeGibsonEnv, self).__init__(config_file=self.params.config_filename,
scene_id=None,
mode=self.params.mode,
action_timestep=1/10.0,
physics_timestep=1/240.0,
device_idx=self.params.gpu_num,
render_to_tensor=False,
automatic_reset=False)
output_size = 18 + np.prod((56, 56, 3))
# override
self.observation_space = gym.spaces.Box(
low=-np.inf, high=np.inf,
shape=(output_size, ),
dtype=np.float32)
self.task.termination_conditions[0].max_collisions_allowed = self.params.max_step
self.task.termination_conditions[1].max_step = self.params.max_step
print("=====> iGibsonEnv initialized")
if self.params.use_plot:
# code related to displaying results in matplotlib
self.fig = plt.figure(figsize=(7, 7))
self.plt_ax = None
self.env_plts = {
'map_plt': None,
'robot_gt_plt': {
'position_plt': None,
'heading_plt': None,
},
'robot_est_plt': {
'position_plt': None,
'heading_plt': None,
'particles_plt': None,
},
'step_txt_plt': None,
}
#HACK FigureCanvasAgg and ion is not working together
if self.params.store_plot:
self.canvas = FigureCanvasAgg(self.fig)
else:
plt.ion()
plt.show()
def load_miscellaneous_variables(self):
"""
Load miscellaneous variables for book keeping
"""
super(LocalizeGibsonEnv, self).load_miscellaneous_variables()
self.obstacle_map = None
self.pfnet_state = None
self.floor_map = None
self.robot_obs = None
self.robot_pose = None
self.plt_images = []
def reset_variables(self):
"""
Reset bookkeeping variables for the next new episode
"""
super(LocalizeGibsonEnv, self).reset_variables()
self.obstacle_map = None
self.pfnet_state = None
self.floor_map = None
self.robot_obs = None
self.robot_pose = None
self.plt_images = []
def step(self, action):
trajlen = 1
batch_size = self.params.batch_size
num_particles = self.params.num_particles
old_obs = self.robot_obs
floor_map = self.floor_map[0]
old_pfnet_state = self.pfnet_state
old_pose = self.robot_pose[0].numpy()
# perform env step
state, reward, done, info = super(LocalizeGibsonEnv, self).step(action)
# process new env observation
rgb = datautils.process_raw_image(state['rgb'])
robot_state = self.robots[0].calc_state()
custom_state = np.concatenate([robot_state, np.reshape(rgb, [-1])], 0)
# process new robot state
new_pose = self.get_robot_pose(robot_state, floor_map.shape)
# calculate actual odometry b/w old pose and new pose
assert list(old_pose.shape) == [3] and list(new_pose.shape) == [3]
odom = datautils.calc_odometry(old_pose, new_pose)
new_obs = tf.expand_dims(
tf.convert_to_tensor(rgb, dtype=tf.float32)
, axis=0)
odom = tf.expand_dims(
tf.convert_to_tensor(odom, dtype=tf.float32)
, axis=0)
new_pose = tf.expand_dims(
tf.convert_to_tensor(new_pose, dtype=tf.float32)
, axis=0)
odometry = tf.expand_dims(odom, axis=1)
observation = tf.expand_dims(old_obs, axis=1)
# sanity check
assert list(odometry.shape) == [batch_size, trajlen, 3]
assert list(observation.shape) == [batch_size, trajlen, 56, 56, 3]
assert list(old_pfnet_state[0].shape) == [batch_size, num_particles, 3]
assert list(old_pfnet_state[1].shape) == [batch_size, num_particles]
input = [observation, odometry]
model_input = (input, old_pfnet_state)
# if stateful: reset RNN s.t. initial_state is set to initial particles and weights
# if non-stateful: pass the state explicity every step
if self.params.stateful:
self.pfnet_model.layers[-1].reset_states(old_pfnet_state) # RNN layer
# forward pass
output, new_pfnet_state = self.pfnet_model(model_input, training=False)
# compute loss
particles, particle_weights = output # before transition update
true_pose = tf.expand_dims(self.robot_pose, axis=1)
assert list(true_pose.shape) == [batch_size, trajlen, 3]
assert list(particles.shape) == [batch_size, trajlen, num_particles, 3]
assert list(particle_weights.shape) == [batch_size, trajlen, num_particles]
loss_dict = pfnet_loss.compute_loss(particles, particle_weights, true_pose, self.params.map_pixel_in_meters)
reward = reward - tf.squeeze(loss_dict['coords']).numpy() #
self.pfnet_state = new_pfnet_state
self.robot_pose = new_pose
self.robot_obs = new_obs
return custom_state, reward, done, info
def reset(self):
batch_size = self.params.batch_size
map_size = self.params.global_map_size
num_particles = self.params.num_particles
particles_cov = self.params.init_particles_cov
particles_distr = self.params.init_particles_distr
if self.params.use_plot:
#clear subplots
plt.clf()
self.plt_ax = self.fig.add_subplot(111)
self.env_plts = {
'map_plt': None,
'robot_gt_plt': {
'position_plt': None,
'heading_plt': None,
},
'robot_est_plt': {
'position_plt': None,
'heading_plt': None,
'particles_plt': None,
},
'step_txt_plt': None,
}
self.store_results()
# perform env reset
state = super(LocalizeGibsonEnv, self).reset()
# process new env observation
rgb = datautils.process_raw_image(state['rgb'])
robot_state = self.robots[0].calc_state()
custom_state = np.concatenate([robot_state, np.reshape(rgb, [-1])], 0)
# process new env map
floor_map = self.get_floor_map()
obstacle_map = self.get_obstacle_map()
# process new robot state
true_pose = self.get_robot_pose(robot_state, floor_map.shape)
obs = tf.expand_dims(
tf.convert_to_tensor(rgb, dtype=tf.float32)
, axis=0)
true_pose = tf.expand_dims(
tf.convert_to_tensor(true_pose, dtype=tf.float32)
, axis=0)
floor_map = tf.expand_dims(
tf.convert_to_tensor(floor_map, dtype=tf.float32)
, axis=0)
obstacle_map = tf.expand_dims(
tf.convert_to_tensor(obstacle_map, dtype=tf.float32)
, axis=0)
init_particles = tf.convert_to_tensor(
self.get_random_particles(
num_particles,
particles_distr,
true_pose.numpy(),
floor_map,
particles_cov)
, dtype=tf.float32)
init_particle_weights = tf.constant(
np.log(1.0/float(num_particles)),
shape=(batch_size, num_particles),
dtype=tf.float32)
# sanity check
assert list(true_pose.shape) == [batch_size, 3]
assert list(obs.shape) == [batch_size, 56, 56, 3]
assert list(init_particles.shape) == [batch_size, num_particles, 3]
assert list(init_particle_weights.shape) == [batch_size, num_particles]
assert list(floor_map.shape) == [batch_size, map_size[0], map_size[1], map_size[2]]
assert list(obstacle_map.shape) == [batch_size, map_size[0], map_size[1], map_size[2]]
self.pfnet_state = [init_particles, init_particle_weights, obstacle_map]
self.obstacle_map = obstacle_map
self.floor_map = floor_map
self.robot_pose = true_pose
self.robot_obs = obs
return custom_state
def get_robot_pose(self, robot_state, floor_map_shape):
robot_pos = robot_state[0:3] # [x, y, z]
robot_orn = robot_state[3:6] # [r, p, y]
# transform from co-ordinate space to pixel space
robot_pos_xy = datautils.transform_pose(robot_pos[:2], floor_map_shape, self.scene.trav_map_resolution**2) # [x, y]
robot_pose = np.array([robot_pos_xy[0], robot_pos_xy[1], robot_orn[2]]) # [x, y, theta]
return robot_pose
def get_est_pose(self, particles, lin_weights):
batch_size = self.params.batch_size
num_particles = self.params.num_particles
assert list(particles.shape) == [batch_size, num_particles, 3]
assert list(lin_weights.shape) == [batch_size, num_particles]
est_pose = tf.math.reduce_sum(tf.math.multiply(
particles[:, :, :], lin_weights[:, :, None]
), axis=1)
assert list(est_pose.shape) == [batch_size, 3]
# normalize between [-pi, +pi]
part_x, part_y, part_th = tf.unstack(est_pose, axis=-1, num=3) # (k, 3)
part_th = tf.math.floormod(part_th + np.pi, 2*np.pi) - np.pi
est_pose = tf.stack([part_x, part_y, part_th], axis=-1)
return est_pose
def get_obstacle_map(self):
"""
Get the scene obstacle map
"""
obstacle_map = np.array(Image.open(
os.path.join(get_scene_path(self.config.get('scene_id')),
f'floor_{self.task.floor_num}.png')
))
# process image for training
obstacle_map = datautils.process_floor_map(obstacle_map)
return obstacle_map
def get_floor_map(self):
"""
Get the scene floor map (traversability map + obstacle map)
:return ndarray: floor map of current scene (H, W, 1)
"""
obstacle_map = np.array(Image.open(
os.path.join(get_scene_path(self.config.get('scene_id')),
f'floor_{self.task.floor_num}.png')
))
trav_map = np.array(Image.open(
os.path.join(get_scene_path(self.config.get('scene_id')),
f'floor_trav_{self.task.floor_num}.png')
))
trav_map[obstacle_map == 0] = 0
trav_map_erosion=self.config.get('trav_map_erosion', 2)
trav_map = cv2.erode(trav_map, np.ones((trav_map_erosion, trav_map_erosion)))
trav_map[trav_map < 255] = 0
# process image for training
floor_map = datautils.process_floor_map(trav_map)
return floor_map
def get_random_particles(self, num_particles, particles_distr, robot_pose, scene_map, particles_cov):
"""
Sample random particles based on the scene
:param particles_distr: string type of distribution, possible value: [gaussian, uniform]
:param robot_pose: ndarray indicating the robot pose ([batch_size], 3) in pixel space
if None, random particle poses are sampled using unifrom distribution
otherwise, sampled using gaussian distribution around the robot_pose
:param particles_cov: for tracking Gaussian covariance matrix (3, 3)
:param num_particles: integer indicating the number of random particles per batch
:return ndarray: random particle poses (batch_size, num_particles, 3) in pixel space
"""
assert list(robot_pose.shape) == [1, 3]
assert list(particles_cov.shape) == [3, 3]
particles = []
batches = robot_pose.shape[0]
if particles_distr == 'uniform':
# iterate per batch_size
for b_idx in range(batches):
sample_i = 0
b_particles = []
# get bounding box for more efficient sampling
# rmin, rmax, cmin, cmax = self.bounding_box(self.floor_map)
rmin, rmax, cmin, cmax = self.bounding_box(scene_map, robot_pose[b_idx], lmt=100)
while sample_i < num_particles:
particle = np.random.uniform(low=(cmin, rmin, 0.0), high=(cmax, rmax, 2.0*np.pi), size=(3, ))
# reject if mask is zero
if not scene_map[int(np.rint(particle[1])), int(np.rint(particle[0]))]:
continue
b_particles.append(particle)
sample_i = sample_i + 1
particles.append(b_particles)
elif particles_distr == 'gaussian':
# iterate per batch_size
for b_idx in range(batches):
# sample offset from the Gaussian
center = np.random.multivariate_normal(mean=robot_pose[b_idx], cov=particles_cov)
# sample particles from the Gaussian, centered around the offset
particles.append(np.random.multivariate_normal(mean=center, cov=particles_cov, size=num_particles))
else:
raise ValueError
particles = np.stack(particles) # [batch_size, num_particles, 3]
return particles
def bounding_box(self, img, robot_pose=None, lmt=100):
"""
Bounding box of non-zeros in an array.
:param img: numpy array
:param robot_pose: numpy array of robot pose
:param lmt: integer representing width/length of bounding box
:return (int, int, int, int): bounding box indices top_row, bottom_row, left_column, right_column
"""
rows = np.any(img, axis=1)
cols = np.any(img, axis=0)
rmin, rmax = np.where(rows)[0][[0, -1]]
cmin, cmax = np.where(cols)[0][[0, -1]]
if robot_pose is not None:
# futher constraint the bounding box
x, y, _ = robot_pose
rmin = np.rint(y-lmt) if (y-lmt) > rmin else rmin
rmax = np.rint(y+lmt) if (y+lmt) < rmax else rmax
cmin = np.rint(x-lmt) if (x-lmt) > cmin else cmin
cmax = np.rint(x+lmt) if (x+lmt) < cmax else cmax
return rmin, rmax, cmin, cmax
def render(self, mode='human'):
"""
Render plots
"""
# super(LocalizeGibsonEnv, self).render(mode)
if self.params.use_plot:
# environment map
floor_map = self.floor_map[0].numpy()
map_plt = self.env_plts['map_plt']
map_plt = render.draw_floor_map(floor_map, self.plt_ax, map_plt)
self.env_plts['map_plt'] = map_plt
# ground truth robot pose and heading
color = '#7B241C'
robot_pose = self.robot_pose[0].numpy()
position_plt = self.env_plts['robot_gt_plt']['position_plt']
heading_plt = self.env_plts['robot_gt_plt']['heading_plt']
position_plt, heading_plt = render.draw_robot_pose(
robot_pose,
color,
floor_map.shape,
self.plt_ax,
position_plt,
heading_plt)
self.env_plts['robot_gt_plt']['position_plt'] = position_plt
self.env_plts['robot_gt_plt']['heading_plt'] = heading_plt
particles, particle_weights, _ = self.pfnet_state # after transition update
lin_weights = tf.nn.softmax(particle_weights, axis=-1)
# estimated robot pose and heading
color = '#515A5A'
est_pose = self.get_est_pose(particles, lin_weights)[0].numpy() + 10
position_plt = self.env_plts['robot_est_plt']['position_plt']
heading_plt = self.env_plts['robot_est_plt']['heading_plt']
position_plt, heading_plt = render.draw_robot_pose(
est_pose,
color,
floor_map.shape,
self.plt_ax,
position_plt,
heading_plt)
self.env_plts['robot_est_plt']['position_plt'] = position_plt
self.env_plts['robot_est_plt']['heading_plt'] = heading_plt
# particles color coded using weights
particles_plt = self.env_plts['robot_est_plt']['particles_plt']
particles_plt = render.draw_particles_pose(
particles[0].numpy(),
lin_weights[0].numpy(),
floor_map.shape,
particles_plt)
self.env_plts['robot_est_plt']['particles_plt'] = particles_plt
# episode info
step_txt_plt = self.env_plts['step_txt_plt']
step_txt_plt = render.draw_text(
f'episode: {self.current_episode}, step: {self.current_step}',
'#7B241C', self.plt_ax, step_txt_plt)
self.env_plts['step_txt_plt'] = step_txt_plt
self.plt_ax.legend([self.env_plts['robot_gt_plt']['position_plt'],
self.env_plts['robot_est_plt']['position_plt']],
["gt_pose", "est_pose"], loc='upper left')
if self.params.store_plot:
self.canvas.draw()
plt_img = np.array(self.canvas.renderer._renderer)
plt_img = cv2.cvtColor(plt_img, cv2.COLOR_RGB2BGR)
self.plt_images.append(plt_img)
else:
plt.draw()
plt.pause(0.00000000001)
def close(self):
"""
environment close()
"""
super(LocalizeGibsonEnv, self).close()
if self.params.use_plot:
if self.params.store_plot:
self.store_results()
else:
# to prevent plot from closing after environment is closed
plt.ioff()
plt.show()
print("=====> iGibsonEnv closed")
def store_results(self):
if len(self.plt_images) > 0:
fps = 30
frameSize = (self.plt_images[0].shape[0], self.plt_images[0].shape[1])
out = cv2.VideoWriter(
self.params.out_folder + f'episode_run_{self.current_episode}.avi',
cv2.VideoWriter_fourcc(*'XVID'),
fps, frameSize)
for img in self.plt_images:
out.write(img)
out.release()
|
the-stack_0_25560
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import contextlib
import datetime
from neutron_lib import context as neutron_context
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ovs.stream import Stream
from ovsdbapp.backend.ovs_idl import connection
from ovsdbapp.backend.ovs_idl import event as row_event
from ovsdbapp.backend.ovs_idl import idlutils
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import exceptions
from neutron.common.ovn import hash_ring_manager
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db
from neutron.plugins.ml2.drivers.ovn.agent import neutron_agent as n_agent
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class BaseEvent(row_event.RowEvent):
table = None
events = tuple()
def __init__(self):
self.event_name = self.__class__.__name__
super(BaseEvent, self).__init__(self.events, self.table, None)
@abc.abstractmethod
def match_fn(self, event, row, old=None):
"""Define match criteria other than table/event"""
def matches(self, event, row, old=None):
if row._table.name != self.table or event not in self.events:
return False
if not self.match_fn(event, row, old):
return False
LOG.debug("%s : Matched %s, %s, %s %s", self.event_name, self.table,
event, self.conditions, self.old_conditions)
return True
class ChassisEvent(row_event.RowEvent):
"""Chassis create update delete event."""
def __init__(self, driver):
self.driver = driver
self.l3_plugin = directory.get_plugin(constants.L3)
table = 'Chassis'
events = (self.ROW_CREATE, self.ROW_UPDATE, self.ROW_DELETE)
super(ChassisEvent, self).__init__(events, table, None)
self.event_name = 'ChassisEvent'
def handle_ha_chassis_group_changes(self, event, row, old):
"""Handle HA Chassis Group changes.
This method handles the inclusion and removal of Chassis to/from
the default HA Chassis Group.
"""
if not self.driver._ovn_client.is_external_ports_supported():
return
is_gw_chassis = utils.is_gateway_chassis(row)
# If the Chassis being created is not a gateway, ignore it
if not is_gw_chassis and event == self.ROW_CREATE:
return
if event == self.ROW_UPDATE:
is_old_gw = utils.is_gateway_chassis(old)
if is_gw_chassis and is_old_gw:
return
elif not is_gw_chassis and is_old_gw:
# Chassis is not a gateway anymore, treat it as deletion
event = self.ROW_DELETE
elif is_gw_chassis and not is_old_gw:
# Chassis is now a gateway, treat it as creation
event = self.ROW_CREATE
if event == self.ROW_CREATE:
default_group = self.driver.nb_ovn.ha_chassis_group_get(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME).execute(
check_error=True)
# Find what's the lowest priority number current in the group
# and add the new chassis as the new lowest
min_priority = min(
[ch.priority for ch in default_group.ha_chassis],
default=ovn_const.HA_CHASSIS_GROUP_HIGHEST_PRIORITY)
self.driver.nb_ovn.ha_chassis_group_add_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, row.name,
priority=min_priority - 1).execute(check_error=True)
elif event == self.ROW_DELETE:
self.driver.nb_ovn.ha_chassis_group_del_chassis(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME,
row.name, if_exists=True).execute(check_error=True)
def match_fn(self, event, row, old):
if event != self.ROW_UPDATE:
return True
# NOTE(lucasgomes): If the external_ids column wasn't updated
# (meaning, Chassis "gateway" status didn't change) just returns
if not hasattr(old, 'external_ids') and event == self.ROW_UPDATE:
return
if (old.external_ids.get('ovn-bridge-mappings') !=
row.external_ids.get('ovn-bridge-mappings')):
return True
f = utils.is_gateway_chassis
return f(old) != f(row)
def run(self, event, row, old):
host = row.hostname
phy_nets = []
if event != self.ROW_DELETE:
bridge_mappings = row.external_ids.get('ovn-bridge-mappings', '')
mapping_dict = helpers.parse_mappings(bridge_mappings.split(','),
unique_values=False)
phy_nets = list(mapping_dict)
self.driver.update_segment_host_mapping(host, phy_nets)
if utils.is_ovn_l3(self.l3_plugin):
# If chassis lost physnet or has been
# deleted we can limit the scope and
# reschedule only ports from this chassis.
# In other cases we need to reschedule all gw ports.
kwargs = {'event_from_chassis': None}
if event == self.ROW_DELETE:
kwargs['event_from_chassis'] = row.name
elif event == self.ROW_UPDATE:
old_mappings = old.external_ids.get('ovn-bridge-mappings',
set()) or set()
new_mappings = row.external_ids.get('ovn-bridge-mappings',
set()) or set()
if old_mappings:
old_mappings = set(old_mappings.split(','))
if new_mappings:
new_mappings = set(new_mappings.split(','))
mappings_removed = old_mappings - new_mappings
mappings_added = new_mappings - old_mappings
if mappings_removed and not mappings_added:
# Mapping has been only removed. So we can
# limit scope of rescheduling only to impacted
# gateway chassis.
kwargs['event_from_chassis'] = row.name
self.l3_plugin.schedule_unhosted_gateways(**kwargs)
self.handle_ha_chassis_group_changes(event, row, old)
class PortBindingChassisUpdateEvent(row_event.RowEvent):
"""Event for matching a port moving chassis
If the LSP is up and the Port_Binding chassis has just changed,
there is a good chance the host died without cleaning up the chassis
column on the Port_Binding. The port never goes down, so we won't
see update the driver with the LogicalSwitchPortUpdateUpEvent which
only monitors for transitions from DOWN to UP.
"""
def __init__(self, driver):
self.driver = driver
table = 'Port_Binding'
events = (self.ROW_UPDATE,)
super(PortBindingChassisUpdateEvent, self).__init__(
events, table, None)
self.event_name = self.__class__.__name__
def match_fn(self, event, row, old=None):
# NOTE(twilson) ROW_UPDATE events always pass old, but chassis will
# only be set if chassis has changed
old_chassis = getattr(old, 'chassis', None)
if not (row.chassis and old_chassis) or row.chassis == old_chassis:
return False
if row.type == ovn_const.OVN_CHASSIS_REDIRECT:
return False
try:
lsp = self.driver.nb_ovn.lookup('Logical_Switch_Port',
row.logical_port)
except idlutils.RowNotFound:
LOG.warning("Logical Switch Port %(port)s not found for "
"Port_Binding %(binding)s",
{'port': row.logical_port, 'binding': row.uuid})
return False
return bool(lsp.up)
def run(self, event, row, old=None):
self.driver.set_port_status_up(row.logical_port)
class ChassisAgentEvent(BaseEvent):
GLOBAL = True
# NOTE (twilson) Do not run new transactions out of a GLOBAL Event since
# it will be running on every single process, and you almost certainly
# don't want to insert/update/delete something a bajillion times.
def __init__(self, driver):
self.driver = driver
super().__init__()
@property
def table(self):
# It probably doesn't matter, but since agent_chassis_table changes
# in post_fork_initialize(), resolve this at runtime
return self.driver.agent_chassis_table
@table.setter
def table(self, value):
pass
class ChassisAgentDownEvent(ChassisAgentEvent):
events = (BaseEvent.ROW_DELETE,)
def run(self, event, row, old):
for agent in n_agent.AgentCache().agents_by_chassis_private(row):
agent.set_down = True
def match_fn(self, event, row, old=None):
return True
class ChassisAgentDeleteEvent(ChassisAgentEvent):
events = (BaseEvent.ROW_UPDATE,)
table = 'SB_Global'
def match_fn(self, event, row, old=None):
try:
return (old.external_ids.get('delete_agent') !=
row.external_ids['delete_agent'])
except (AttributeError, KeyError):
return False
def run(self, event, row, old):
del n_agent.AgentCache()[row.external_ids['delete_agent']]
class ChassisAgentWriteEvent(ChassisAgentEvent):
events = (BaseEvent.ROW_CREATE, BaseEvent.ROW_UPDATE)
def match_fn(self, event, row, old=None):
# On updates to Chassis_Private because the Chassis has been deleted,
# don't update the AgentCache. We use chassis_private.chassis to return
# data about the agent.
return event == self.ROW_CREATE or (
getattr(old, 'nb_cfg', False) and not
(self.table == 'Chassis_Private' and not row.chassis))
def run(self, event, row, old):
n_agent.AgentCache().update(ovn_const.OVN_CONTROLLER_AGENT, row,
clear_down=event == self.ROW_CREATE)
class ChassisMetadataAgentWriteEvent(ChassisAgentEvent):
events = (BaseEvent.ROW_CREATE, BaseEvent.ROW_UPDATE)
@staticmethod
def _metadata_nb_cfg(row):
return int(
row.external_ids.get(ovn_const.OVN_AGENT_METADATA_SB_CFG_KEY, -1))
@staticmethod
def agent_id(row):
return row.external_ids.get(ovn_const.OVN_AGENT_METADATA_ID_KEY)
def match_fn(self, event, row, old=None):
if not self.agent_id(row):
# Don't create a cached object with an agent_id of 'None'
return False
if event == self.ROW_CREATE:
return True
try:
# On updates to Chassis_Private because the Chassis has been
# deleted, don't update the AgentCache. We use
# chassis_private.chassis to return data about the agent.
if self.table == 'Chassis_Private' and not row.chassis:
return False
return self._metadata_nb_cfg(row) != self._metadata_nb_cfg(old)
except (AttributeError, KeyError):
return False
def run(self, event, row, old):
n_agent.AgentCache().update(ovn_const.OVN_METADATA_AGENT, row,
clear_down=True)
class PortBindingChassisEvent(row_event.RowEvent):
"""Port_Binding update event - set chassis for chassisredirect port.
When a chassisredirect port is updated with chassis, this event get
generated. We will update corresponding router's gateway port with
the chassis's host_id. Later, users can check router's gateway port
host_id to find the location of primary HA router.
"""
def __init__(self, driver):
self.driver = driver
self.l3_plugin = directory.get_plugin(constants.L3)
table = 'Port_Binding'
events = (self.ROW_UPDATE,)
super(PortBindingChassisEvent, self).__init__(
events, table, (('type', '=', ovn_const.OVN_CHASSIS_REDIRECT),))
self.event_name = 'PortBindingChassisEvent'
def run(self, event, row, old):
if not utils.is_ovn_l3(self.l3_plugin):
return
router = host = None
chassis = getattr(row, 'chassis', None)
if chassis:
router = row.datapath.external_ids.get('name', '').replace(
'neutron-', '')
host = chassis[0].hostname
LOG.info("Router %(router)s is bound to host %(host)s",
{'router': router, 'host': host})
self.l3_plugin.update_router_gateway_port_bindings(
router, host)
class LogicalSwitchPortCreateUpEvent(row_event.RowEvent):
"""Row create event - Logical_Switch_Port 'up' = True.
On connection, we get a dump of all ports, so if there is a neutron
port that is down that has since been activated, we'll catch it here.
This event will not be generated for new ports getting created.
"""
def __init__(self, driver):
self.driver = driver
table = 'Logical_Switch_Port'
events = (self.ROW_CREATE,)
super(LogicalSwitchPortCreateUpEvent, self).__init__(
events, table, (('up', '=', True),))
self.event_name = 'LogicalSwitchPortCreateUpEvent'
def run(self, event, row, old):
self.driver.set_port_status_up(row.name)
class LogicalSwitchPortCreateDownEvent(row_event.RowEvent):
"""Row create event - Logical_Switch_Port 'up' = False
On connection, we get a dump of all ports, so if there is a neutron
port that is up that has since been deactivated, we'll catch it here.
This event will not be generated for new ports getting created.
"""
def __init__(self, driver):
self.driver = driver
table = 'Logical_Switch_Port'
events = (self.ROW_CREATE,)
super(LogicalSwitchPortCreateDownEvent, self).__init__(
events, table, (('up', '=', False),))
self.event_name = 'LogicalSwitchPortCreateDownEvent'
def run(self, event, row, old):
self.driver.set_port_status_down(row.name)
class LogicalSwitchPortUpdateUpEvent(row_event.RowEvent):
"""Row update event - Logical_Switch_Port 'up' going from False to True
This happens when the VM goes up.
New value of Logical_Switch_Port 'up' will be True and the old value will
be False.
"""
def __init__(self, driver):
self.driver = driver
table = 'Logical_Switch_Port'
events = (self.ROW_UPDATE,)
super(LogicalSwitchPortUpdateUpEvent, self).__init__(
events, table, (('up', '=', True),),
old_conditions=(('up', '=', False),))
self.event_name = 'LogicalSwitchPortUpdateUpEvent'
def run(self, event, row, old):
self.driver.set_port_status_up(row.name)
class LogicalSwitchPortUpdateDownEvent(row_event.RowEvent):
"""Row update event - Logical_Switch_Port 'up' going from True to False
This happens when the VM goes down.
New value of Logical_Switch_Port 'up' will be False and the old value will
be True.
"""
def __init__(self, driver):
self.driver = driver
table = 'Logical_Switch_Port'
events = (self.ROW_UPDATE,)
super(LogicalSwitchPortUpdateDownEvent, self).__init__(
events, table, (('up', '=', False),),
old_conditions=(('up', '=', True),))
self.event_name = 'LogicalSwitchPortUpdateDownEvent'
def run(self, event, row, old):
self.driver.set_port_status_down(row.name)
class FIPAddDeleteEvent(row_event.RowEvent):
"""Row event - NAT 'dnat_and_snat' entry added or deleted
This happens when a FIP is created or removed.
"""
def __init__(self, driver):
self.driver = driver
table = 'NAT'
events = (self.ROW_CREATE, self.ROW_DELETE)
super(FIPAddDeleteEvent, self).__init__(
events, table, (('type', '=', 'dnat_and_snat'),))
self.event_name = 'FIPAddDeleteEvent'
def run(self, event, row, old):
# When a FIP is added or deleted, we will delete all entries in the
# MAC_Binding table of SB OVSDB corresponding to that IP Address.
# TODO(dalvarez): Remove this workaround once fixed in core OVN:
# https://mail.openvswitch.org/pipermail/ovs-discuss/2018-October/047604.html
self.driver.delete_mac_binding_entries(row.external_ip)
class NeutronPgDropPortGroupCreated(row_event.WaitEvent):
"""WaitEvent for neutron_pg_drop Create event."""
def __init__(self):
table = 'Port_Group'
events = (self.ROW_CREATE,)
conditions = (('name', '=', ovn_const.OVN_DROP_PORT_GROUP_NAME),)
super(NeutronPgDropPortGroupCreated, self).__init__(
events, table, conditions)
self.event_name = 'PortGroupCreated'
class OvnDbNotifyHandler(row_event.RowEventHandler):
def __init__(self, driver):
self.driver = driver
super(OvnDbNotifyHandler, self).__init__()
try:
self._lock = self._RowEventHandler__lock
self._watched_events = self._RowEventHandler__watched_events
except AttributeError:
pass
def notify(self, event, row, updates=None, global_=False):
row = idlutils.frozen_row(row)
matching = self.matching_events(event, row, updates, global_)
for match in matching:
self.notifications.put((match, event, row, updates))
def matching_events(self, event, row, updates, global_=False):
with self._lock:
return tuple(t for t in self._watched_events
if getattr(t, 'GLOBAL', False) == global_ and
self.match(t, event, row, updates))
class Ml2OvnIdlBase(connection.OvsdbIdl):
def __init__(self, remote, schema, probe_interval=(), **kwargs):
if probe_interval == (): # None is a valid value to pass
probe_interval = ovn_conf.get_ovn_ovsdb_probe_interval()
super(Ml2OvnIdlBase, self).__init__(
remote, schema, probe_interval=probe_interval, **kwargs)
class BaseOvnIdl(Ml2OvnIdlBase):
def __init__(self, remote, schema):
self.notify_handler = row_event.RowEventHandler()
super(BaseOvnIdl, self).__init__(remote, schema)
@classmethod
def from_server(cls, connection_string, helper):
helper.register_all()
return cls(connection_string, helper)
def notify(self, event, row, updates=None):
self.notify_handler.notify(event, row, updates)
class BaseOvnSbIdl(Ml2OvnIdlBase):
def __init__(self, remote, schema):
self.notify_handler = row_event.RowEventHandler()
super().__init__(remote, schema)
@classmethod
def from_server(cls, connection_string, helper):
helper.register_table('Chassis')
helper.register_table('Encap')
helper.register_table('Port_Binding')
helper.register_table('Datapath_Binding')
return cls(connection_string, helper)
class OvnIdl(BaseOvnIdl):
def __init__(self, driver, remote, schema):
super(OvnIdl, self).__init__(remote, schema)
self.driver = driver
self.notify_handler = OvnDbNotifyHandler(driver)
# ovsdb lock name to acquire.
# This event lock is used to handle the notify events sent by idl.Idl
# idl.Idl will call notify function for the "update" rpc method it
# receives from the ovsdb-server.
# This event lock is required for the following reasons
# - If there are multiple neutron servers running, OvnWorkers of
# these neutron servers would receive the notify events from
# idl.Idl
#
# - we do not want all the neutron servers to handle these events
#
# - only the neutron server which has the lock will handle the
# notify events.
#
# - In case the neutron server which owns this lock goes down,
# ovsdb server would assign the lock to one of the other neutron
# servers.
self.event_lock_name = "neutron_ovn_event_lock"
def notify(self, event, row, updates=None):
# Do not handle the notification if the event lock is requested,
# but not granted by the ovsdb-server.
if self.is_lock_contended:
return
self.notify_handler.notify(event, row, updates)
@abc.abstractmethod
def post_connect(self):
"""Should be called after the idl has been initialized"""
class OvnIdlDistributedLock(BaseOvnIdl):
def __init__(self, driver, remote, schema):
super(OvnIdlDistributedLock, self).__init__(remote, schema)
self.driver = driver
self.notify_handler = OvnDbNotifyHandler(driver)
self._node_uuid = self.driver.node_uuid
self._hash_ring = hash_ring_manager.HashRingManager(
self.driver.hash_ring_group)
self._last_touch = None
# This is a map of tables that may be new after OVN database is updated
self._tables_to_register = {
'OVN_Southbound': ['Chassis_Private'],
}
def handle_db_schema_changes(self, event, row):
if (event == row_event.RowEvent.ROW_CREATE and
row._table.name == 'Database'):
try:
tables = self._tables_to_register[row.name]
except KeyError:
return
self.update_tables(tables, row.schema[0])
if 'Chassis_Private' == self.driver.agent_chassis_table:
if 'Chassis_Private' not in self.tables:
self.driver.agent_chassis_table = 'Chassis'
else:
if 'Chassis_Private' in self.tables:
self.driver.agent_chassis_table = 'Chassis_Private'
def notify(self, event, row, updates=None):
self.handle_db_schema_changes(event, row)
self.notify_handler.notify(event, row, updates, global_=True)
try:
target_node = self._hash_ring.get_node(str(row.uuid))
except exceptions.HashRingIsEmpty as e:
LOG.error('HashRing is empty, error: %s', e)
return
if target_node != self._node_uuid:
return
# If the worker hasn't been health checked by the maintenance
# thread (see bug #1834498), indicate that it's alive here
time_now = timeutils.utcnow()
touch_timeout = time_now - datetime.timedelta(
seconds=ovn_const.HASH_RING_TOUCH_INTERVAL)
if not self._last_touch or touch_timeout >= self._last_touch:
# NOTE(lucasagomes): Guard the db operation with an exception
# handler. If heartbeating fails for whatever reason, log
# the error and continue with processing the event
try:
ctx = neutron_context.get_admin_context()
ovn_hash_ring_db.touch_node(ctx, self._node_uuid)
self._last_touch = time_now
except Exception:
LOG.exception('Hash Ring node %s failed to heartbeat',
self._node_uuid)
LOG.debug('Hash Ring: Node %(node)s (host: %(hostname)s) '
'handling event "%(event)s" for row %(row)s '
'(table: %(table)s)',
{'node': self._node_uuid, 'hostname': CONF.host,
'event': event, 'row': row.uuid, 'table': row._table.name})
self.notify_handler.notify(event, row, updates)
@abc.abstractmethod
def post_connect(self):
"""Should be called after the idl has been initialized"""
class OvnNbIdl(OvnIdlDistributedLock):
def __init__(self, driver, remote, schema):
super(OvnNbIdl, self).__init__(driver, remote, schema)
self._lsp_update_up_event = LogicalSwitchPortUpdateUpEvent(driver)
self._lsp_update_down_event = LogicalSwitchPortUpdateDownEvent(driver)
self._lsp_create_up_event = LogicalSwitchPortCreateUpEvent(driver)
self._lsp_create_down_event = LogicalSwitchPortCreateDownEvent(driver)
self._fip_create_delete_event = FIPAddDeleteEvent(driver)
self.notify_handler.watch_events([self._lsp_create_up_event,
self._lsp_create_down_event,
self._lsp_update_up_event,
self._lsp_update_down_event,
self._fip_create_delete_event])
@classmethod
def from_server(cls, connection_string, helper, driver):
helper.register_all()
return cls(driver, connection_string, helper)
def unwatch_logical_switch_port_create_events(self):
"""Unwatch the logical switch port create events.
When the ovs idl client connects to the ovsdb-server, it gets
a dump of all logical switch ports as events and we need to process
them at start up.
After the startup, there is no need to watch these events.
So unwatch these events.
"""
self.notify_handler.unwatch_events([self._lsp_create_up_event,
self._lsp_create_down_event])
self._lsp_create_up_event = None
self._lsp_create_down_event = None
def post_connect(self):
self.unwatch_logical_switch_port_create_events()
class OvnSbIdl(OvnIdlDistributedLock):
def __init__(self, driver, remote, schema):
super(OvnSbIdl, self).__init__(driver, remote, schema)
self.notify_handler.watch_events([
ChassisAgentDeleteEvent(self.driver),
ChassisAgentDownEvent(self.driver),
ChassisAgentWriteEvent(self.driver),
ChassisMetadataAgentWriteEvent(self.driver)])
@classmethod
def from_server(cls, connection_string, helper, driver):
if 'Chassis_Private' in helper.schema_json['tables']:
helper.register_table('Chassis_Private')
if 'FDB' in helper.schema_json['tables']:
helper.register_table('FDB')
helper.register_table('Chassis')
helper.register_table('Encap')
helper.register_table('Port_Binding')
helper.register_table('Datapath_Binding')
helper.register_table('MAC_Binding')
helper.register_columns('SB_Global', ['external_ids'])
return cls(driver, connection_string, helper)
def post_connect(self):
"""Watch Chassis events.
When the ovs idl client connects to the ovsdb-server, it gets
a dump of all Chassis create event. We don't need to process them
because there will be sync up at startup. After that, we will watch
the events to make notify work.
"""
self._chassis_event = ChassisEvent(self.driver)
self._portbinding_event = PortBindingChassisEvent(self.driver)
self.notify_handler.watch_events(
[self._chassis_event, self._portbinding_event,
PortBindingChassisUpdateEvent(self.driver)])
class OvnInitPGNbIdl(OvnIdl):
"""Very limited OVN NB IDL.
This IDL is intended to be used only in initialization phase with short
living DB connections.
"""
tables = ['Port_Group', 'Logical_Switch_Port', 'ACL']
def __init__(self, driver, remote, schema):
super(OvnInitPGNbIdl, self).__init__(driver, remote, schema)
self.cond_change(
'Port_Group',
[['name', '==', ovn_const.OVN_DROP_PORT_GROUP_NAME]])
self.neutron_pg_drop_event = NeutronPgDropPortGroupCreated()
self.notify_handler.watch_event(self.neutron_pg_drop_event)
@classmethod
def from_server(cls, connection_string, helper, driver, pg_only=False):
if pg_only:
helper.register_table('Port_Group')
else:
for table in cls.tables:
helper.register_table(table)
return cls(driver, connection_string, helper)
@contextlib.contextmanager
def short_living_ovsdb_api(api_class, idl):
"""Context manager for short living connections to the database.
:param api_class: Class implementing the database calls
(e.g. from the impl_idl module)
:param idl: An instance of IDL class (e.g. instance of OvnNbIdl)
"""
conn = connection.Connection(
idl, timeout=ovn_conf.get_ovn_ovsdb_timeout())
api = api_class(conn)
try:
yield api
finally:
api.ovsdb_connection.stop()
def _check_and_set_ssl_files(schema_name):
if schema_name == 'OVN_Southbound':
priv_key_file = ovn_conf.get_ovn_sb_private_key()
cert_file = ovn_conf.get_ovn_sb_certificate()
ca_cert_file = ovn_conf.get_ovn_sb_ca_cert()
else:
priv_key_file = ovn_conf.get_ovn_nb_private_key()
cert_file = ovn_conf.get_ovn_nb_certificate()
ca_cert_file = ovn_conf.get_ovn_nb_ca_cert()
if priv_key_file:
Stream.ssl_set_private_key_file(priv_key_file)
if cert_file:
Stream.ssl_set_certificate_file(cert_file)
if ca_cert_file:
Stream.ssl_set_ca_cert_file(ca_cert_file)
|
the-stack_0_25562
|
#!/usr/bin/env python3
# Dependencies
from lxml import html, etree
import requests
import pandas as pd
import datetime as dt
class _GetDataFromURL(object):
"""This "private" class does all the heavy lifting of fetching data from the
URL provided, and then returns data to the main `rightmove_data` class
instance. The reason for this is so that all the validation and web-scraping
is done when an instance is created, and afterwards the data is accessible
quickly via methods on the `rightmove_data` instance."""
def __init__(self, url):
"""Initialize an instance of the scraper by passing a URL from the
results of a property search on www.rightmove.co.uk."""
self.url = url
self.first_page = self.make_request(self.url)
self.validate_url()
self.get_results = self.__get_results
def validate_url(self):
"""Basic validation that the URL at least starts in the right format and
returns status code 200."""
real_url = "{}://www.rightmove.co.uk/{}/find.html?"
protocols = ["http", "https"]
types = ["property-to-rent", "property-for-sale", "new-homes-for-sale"]
left_urls = [real_url.format(p, t) for p in protocols for t in types]
conditions = [self.url.startswith(u) for u in left_urls]
conditions.append(self.first_page[1] == 200)
if not any(conditions):
raise ValueError("Invalid rightmove URL:\n\n\t{}".format(self.url))
@property
def rent_or_sale(self):
"""Tag to determine if the search is for properties for rent or sale.
Required beacuse the Xpaths are different for the target elements."""
if "/property-for-sale/" in self.url \
or "/new-homes-for-sale/" in self.url:
return "sale"
elif "/property-to-rent/" in self.url:
return "rent"
else:
raise ValueError("Invalid rightmove URL:\n\n\t{}".format(self.url))
@property
def results_count(self):
"""Returns an integer of the total number of listings as displayed on
the first page of results. Note that not all listings are available to
scrape because rightmove limits the number of accessible pages."""
tree = html.fromstring(self.first_page[0])
xpath = """//span[@class="searchHeader-resultCount"]/text()"""
return int(tree.xpath(xpath)[0].replace(",", ""))
@property
def page_count(self):
"""Returns the number of result pages returned by the search URL. There
are 24 results per page. Note that the website limits results to a
maximum of 42 accessible pages."""
page_count = self.results_count // 24
if self.results_count % 24 > 0: page_count += 1
# Rightmove will return a maximum of 42 results pages, hence:
if page_count > 42: page_count = 42
return page_count
@staticmethod
def make_request(url):
r = requests.get(url)
# Minimise the amount returned to reduce overheads:
return r.content, r.status_code
def get_page(self, request_content):
"""Method to scrape data from a single page of search results. Used
iteratively by the `get_results` method to scrape data from every page
returned by the search."""
# Process the html:
tree = html.fromstring(request_content)
# Set xpath for price:
if self.rent_or_sale == "rent":
xp_prices = """//span[@class="propertyCard-priceValue"]/text()"""
elif self.rent_or_sale == "sale":
xp_prices = """//div[@class="propertyCard-priceValue"]/text()"""
# Set xpaths for listing title, property address, URL, and agent URL:
xp_titles = """//div[@class="propertyCard-details"]\
//a[@class="propertyCard-link"]\
//h2[@class="propertyCard-title"]/text()"""
xp_addresses = """//address[@class="propertyCard-address"]//span/text()"""
xp_weblinks = """//div[@class="propertyCard-details"]\
//a[@class="propertyCard-link"]/@href"""
xp_agent_urls = """//div[@class="propertyCard-contactsItem"]\
//div[@class="propertyCard-branchLogo"]\
//a[@class="propertyCard-branchLogo-link"]/@href"""
# Create data lists from xpaths:
price_pcm = tree.xpath(xp_prices)
titles = tree.xpath(xp_titles)
addresses = tree.xpath(xp_addresses)
base = "http://www.rightmove.co.uk"
weblinks = ["{}{}".format(base, tree.xpath(xp_weblinks)[w]) \
for w in range(len(tree.xpath(xp_weblinks)))]
agent_urls = ["{}{}".format(base, tree.xpath(xp_agent_urls)[a]) \
for a in range(len(tree.xpath(xp_agent_urls)))]
# Store the data in a Pandas DataFrame:
data = [price_pcm, titles, addresses, weblinks, agent_urls]
temp_df = pd.DataFrame(data)
temp_df = temp_df.transpose()
temp_df.columns = ["price", "type", "address", "url", "agent_url"]
# Drop empty rows which come from placeholders in the html:
temp_df = temp_df[temp_df["address"].notnull()]
return temp_df
@property
def __get_results(self):
"""Pandas DataFrame with all results returned by the search."""
# Create DataFrame of the first page (which has already been requested):
results = self.get_page(self.first_page[0])
# Iterate through the rest of the pages scraping results:
if self.page_count > 1:
for p in range(1, self.page_count + 1, 1):
# Create the URL of the specific results page:
p_url = "{}&index={}".format(str(self.url), str((p * 24)))
# Make the request:
rc = self.make_request(p_url)
# Requests to scrape lots of pages eventually get status 400, so:
if rc[1] != 200: break
# Create a temporary dataframe of page results:
temp_df = self.get_page(rc[0])
# Concatenate the temporary dataframe with the full dataframe:
frames = [results, temp_df]
results = pd.concat(frames)
# Reset the index:
results.reset_index(inplace=True, drop=True)
# Convert price column to numeric type:
results["price"].replace(regex=True, inplace=True, to_replace=r"\D", value=r"")
results["price"] = pd.to_numeric(results["price"])
# Extract postcodes to a separate column:
pat = r"\b([A-Za-z][A-Za-z]?[0-9][0-9]?[A-Za-z]?)\b"
results["postcode"] = results["address"].str.extract(pat, expand=True)
# Extract number of bedrooms from "type" to a separate column:
pat = r"\b([\d][\d]?)\b"
results["number_bedrooms"] = results.type.str.extract(pat, expand=True)
results.loc[results["type"].str.contains("studio", case=False), "number_bedrooms"] = 0
# Clean up annoying white spaces and newlines in "type" column:
for row in range(len(results)):
type_str = results.loc[row, "type"]
clean_str = type_str.strip("\n").strip()
results.loc[row, "type"] = clean_str
# Add column with datetime when the search was run (i.e. now):
now = dt.datetime.today()
results["search_date"] = now
return results
class rightmove_data(object):
"""The `rightmove_data` web scraper collects structured data on properties
returned by a search performed on www.rightmove.co.uk
An instance of the class created with a rightmove URL provides attributes to
easily access data from the search results, the most useful being
`get_results`, which returns all results as a Pandas DataFrame object.
"""
def __init__(self, url):
"""Initialize the scraper with a URL from the results of a property
search performed on www.rightmove.co.uk"""
self.__request_object = _GetDataFromURL(url)
self.__url = url
@property
def url(self):
return self.__url
@property
def get_results(self):
"""Pandas DataFrame of all results returned by the search."""
return self.__request_object.get_results
@property
def results_count(self):
"""Total number of results returned by `get_results`. Note that the
rightmove website may state a much higher number of results; this is
because they artificially restrict the number of results pages that can
be accessed to 42."""
return len(self.get_results)
@property
def average_price(self):
"""Average price of all results returned by `get_results` (ignoring
results which don't list a price)."""
total = self.get_results["price"].dropna().sum()
return int(total / self.results_count)
def summary(self, by="number_bedrooms"):
"""Pandas DataFrame summarising the the results by mean price and count.
By default grouped by the `number_bedrooms` column but will accept any
column name from `get_results` as a grouper."""
df = self.get_results.dropna(axis=0, subset=["price"])
groupers = {"price":["count", "mean"]}
df = df.groupby(df[by]).agg(groupers).astype(int)
df.columns = df.columns.get_level_values(1)
df.reset_index(inplace=True)
if "number_bedrooms" in df.columns:
df["number_bedrooms"] = df["number_bedrooms"].astype(int)
df.sort_values(by=["number_bedrooms"], inplace=True)
else:
df.sort_values(by=["count"], inplace=True, ascending=False)
return df.reset_index(drop=True)
|
the-stack_0_25563
|
import numpy as np
import torch
from numpy import linalg as la, random as rnd
import pymanopt
from pymanopt.manifolds import Sphere
from pymanopt.solvers import TrustRegions
def dominant_eigenvector(A):
"""
Returns the dominant eigenvector of the symmetric matrix A.
Note: For the same A, this should yield the same as the dominant invariant
subspace example with p = 1.
"""
m, n = A.shape
assert m == n, "matrix must be square"
assert np.allclose(np.sum(A - A.T), 0), "matrix must be symmetric"
manifold = Sphere(n)
solver = TrustRegions()
@pymanopt.function.PyTorch
def cost(x):
return -x.matmul(torch.from_numpy(A).matmul(x))
problem = pymanopt.Problem(manifold, cost)
xopt = solver.solve(problem)
return xopt.squeeze()
if __name__ == "__main__":
# Generate random problem data.
n = 128
A = rnd.randn(n, n)
A = 0.5 * (A + A.T)
# Calculate the actual solution by a conventional eigenvalue decomposition.
w, v = la.eig(A)
x = v[:, np.argmax(w)]
# Solve the problem with pymanopt.
xopt = dominant_eigenvector(A)
# Make sure both vectors have the same direction. Both are valid
# eigenvectors, of course, but for comparison we need to get rid of the
# ambiguity.
if np.sign(x[0]) != np.sign(xopt[0]):
xopt = -xopt
# Print information about the solution.
print('')
print("l2-norm of x: %f" % la.norm(x))
print("l2-norm of xopt: %f" % la.norm(xopt))
print("solution found: %s" % np.allclose(x, xopt, rtol=1e-3))
print("l2-error: %f" % la.norm(x - xopt))
|
the-stack_0_25564
|
# ---------------------------------------------------------------------
# fm.reportalarmdetail application
# ---------------------------------------------------------------------
# Copyright (C) 2007-2020 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
from io import BytesIO
from zipfile import ZipFile, ZIP_DEFLATED
from tempfile import TemporaryFile
# Third-party modules
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
# NOC modules
from noc.lib.app.extapplication import ExtApplication, view
from noc.sa.interfaces.base import StringParameter, IntParameter, ObjectIdParameter
from noc.sa.models.useraccess import UserAccess
from noc.sa.models.administrativedomain import AdministrativeDomain
from noc.core.translation import ugettext as _
from noc.lib.app.reportdatasources.base import ReportDataSource
from noc.lib.app.reportdatasources.loader import loader
class ReportAlarmDetailApplication(ExtApplication):
menu = _("Reports") + "|" + _("Alarm Detail")
title = _("Alarm Detail")
SEGMENT_PATH_DEPTH = 7
CONTAINER_PATH_DEPTH = 7
@view(
"^download/$",
method=["GET"],
access="launch",
api=True,
validate={
"from_date": StringParameter(required=False),
"to_date": StringParameter(required=False),
"min_duration": IntParameter(required=False),
"max_duration": IntParameter(required=False),
"min_objects": IntParameter(required=False),
"min_subscribers": IntParameter(required=False),
"source": StringParameter(
default="both", choices=["active", "both", "archive", "long_archive"]
),
"segment": ObjectIdParameter(required=False),
"administrative_domain": IntParameter(required=False),
"resource_group": ObjectIdParameter(required=False),
"ex_resource_group": StringParameter(required=False),
"alarm_class": ObjectIdParameter(required=False),
"subscribers": StringParameter(required=False),
"ids": StringParameter(required=False),
"columns": StringParameter(required=False),
"o_format": StringParameter(choices=["csv", "csv_zip", "xlsx"]),
},
)
def api_report(
self,
request,
o_format,
from_date=None,
to_date=None,
min_duration=0,
max_duration=0,
min_objects=0,
min_subscribers=0,
segment=None,
administrative_domain=None,
resource_group=None,
ex_resource_group=None,
columns=None,
source="both",
alarm_class=None,
subscribers=None,
ids=None,
enable_autowidth=False,
):
filters = []
ads = []
if administrative_domain:
ads = AdministrativeDomain.get_nested_ids(administrative_domain)
if not request.user.is_superuser:
user_ads = UserAccess.get_domains(request.user)
if administrative_domain and ads:
if administrative_domain not in user_ads:
ads = list(set(ads) & set(user_ads))
if not ads:
return HttpResponse(
"<html><body>Permission denied: Invalid Administrative Domain</html></body>"
)
else:
ads = user_ads
if ids:
ids = ids.split()
fd = datetime.datetime.now()
td = None
elif from_date:
fd = datetime.datetime.strptime(from_date, "%d.%m.%Y")
td = datetime.datetime.strptime(to_date, "%d.%m.%Y") + datetime.timedelta(days=1)
else:
return HttpResponseBadRequest(_("One param - FROM_DATE or IDS required"))
for name, values in [
("min_duration", min_duration),
("max_duration", max_duration),
("min_objects", min_objects),
("min_subscribers", min_subscribers),
("segment", segment),
("adm_path", ads),
("resource_group", resource_group),
("ex_resource_group", ex_resource_group),
("alarm_class", alarm_class),
("subscribers", subscribers),
("source", source),
]:
if not values:
continue
if values and isinstance(values, list):
filters += [{"name": name, "values": values}]
elif values:
filters += [{"name": name, "values": [values]}]
if source in ["long_archive"]:
report_ds = "reportdsalarmsbiarchive"
o_format = "csv_zip"
if td - fd > datetime.timedelta(days=390):
return HttpResponseBadRequest(
_(
"Report more than 1 year not allowed. If nedeed - request it from Administrator"
)
)
else:
report_ds = "reportdsalarms"
report: ReportDataSource = loader[report_ds]
if not report:
return HttpResponseNotFound(_(f"Report DataSource {report_ds} Not found"))
data = report(
fields=columns.split(","),
objectids=ids or [],
allobjectids=False,
start=fd,
end=td,
filters=filters,
)
# filename = f'{report_name}_detail_report_{datetime.datetime.now().strftime("%Y%m%d")}'
filename = "alarms.csv"
if o_format == "csv":
response = HttpResponse(data.report_csv(), content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="%s"' % filename
return response
elif o_format == "csv_zip":
response = BytesIO()
f = TemporaryFile(mode="w+b")
f.write(data.report_csv())
f.seek(0)
with ZipFile(response, "w", compression=ZIP_DEFLATED) as zf:
zf.writestr(filename, f.read())
zf.filename = "%s.zip" % filename
response.seek(0)
response = HttpResponse(response.getvalue(), content_type="application/zip")
response["Content-Disposition"] = 'attachment; filename="%s.zip"' % filename
return response
elif o_format == "xlsx":
response = HttpResponse(data.report_xlsx(), content_type="application/vnd.ms-excel")
response["Content-Disposition"] = 'attachment; filename="alarms.xlsx"'
response.close()
return response
|
the-stack_0_25565
|
#!/usr/bin/env python
# Copyright 2021 Takenori Yoshimura
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--skip-header", action="store_true", help="if true, skip first line"
)
parser.add_argument("text", type=str, help="text to be cleaned")
args = parser.parse_args()
with open(args.text, "r", encoding="utf-8") as fid:
if args.skip_header:
fid.readline()
for line in fid.readlines():
id, content = line.split(",")
content = re.sub("(.*?)", "", content.rstrip())
content = re.sub("「(.*?)」", "\\1", content)
print("%s %s" % (id, content))
|
the-stack_0_25567
|
# Copyright (c) 2013 Ondrej Kipila <ok100 at openmailbox dot org>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
"""Metadata-related code."""
import os
import random
from threading import Lock
import plyr
import lyvi
class Metadata:
"""A class which holds metadata for the currently playing song."""
artist = None
album = None
title = None
file = None
_lyrics = None
_artistbio = None
_guitartabs = None
_backdrops = None
_cover = None
@property
def lyrics(self):
return self._lyrics
@lyrics.setter
def lyrics(self, value):
"""Update ui and save the lyrics."""
self._lyrics = value
lyvi.ui.update()
if lyvi.ui.autoscroll:
lyvi.ui.autoscroll.reset()
if lyvi.config['save_lyrics']:
self.save('lyrics', lyvi.config['save_lyrics'])
@property
def artistbio(self):
return self._artistbio
@artistbio.setter
def artistbio(self, value):
"""Update UI."""
self._artistbio = value
lyvi.ui.update()
@property
def guitartabs(self):
return self._guitartabs
@guitartabs.setter
def guitartabs(self, value):
"""Update UI."""
self._guitartabs = value
lyvi.ui.update()
@property
def backdrops(self):
return self._backdrops
@backdrops.setter
def backdrops(self, value):
"""Update background."""
self._backdrops = value
if lyvi.bg:
lyvi.bg.update()
@property
def cover(self):
return self._cover
@cover.setter
def cover(self, value):
"""Update background and save the cover."""
self._cover = value
if lyvi.bg:
lyvi.bg.update()
if lyvi.config['save_cover']:
self.save('cover', lyvi.config['save_cover_filename'])
def __init__(self):
"""Initialize the class."""
cache_dir = os.path.join(os.environ['HOME'], '.local/share/lyvi')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self.cache = plyr.Database(cache_dir)
self.lock = Lock()
def set_tags(self):
"""Set all tag properties to the actual values."""
self.artist = lyvi.player.artist
self.title = lyvi.player.title
self.album = lyvi.player.album
self.file = lyvi.player.file
def reset_tags(self):
"""Set all tag and metadata properties to None."""
self.artist = self.title = self.album = self.file = None
self.lyrics = self.artistbio = self.guitartabs = None
self.backdrops = self.cover = None
def delete(self, type, artist, title, album):
"""Delete metadata from the cache.
Keyword arguments:
type -- type of the metadata
artist -- artist tag
title -- title tag
album -- album tag
"""
if artist and title and album:
self.cache.delete(plyr.Query(get_type=type, artist=artist, title=title, album=album))
def save(self, type, file):
"""Save the given metadata type.
Keyword arguments:
type -- type of the metadata
file -- path to the file metadata will be saved to
Some special substrings can be used in the filename:
<filename> -- name of the current song without extension
<songdir> -- directory containing the current song
<artist> -- artist of the current song
<title> -- title of the current song
<album> -- album of the current song
"""
data = getattr(self, type)
if self.file and data and data != 'Searching...':
for k, v in {
'<filename>': os.path.splitext(os.path.basename(self.file))[0],
'<songdir>': os.path.dirname(self.file),
'<artist>': self.artist,
'<title>': self.title,
'<album>': self.album
}.items():
file = file.replace(k, v)
if not os.path.exists(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
if not os.path.exists(file):
mode = 'wb' if isinstance(data, bytes) else 'w'
with open(file, mode) as f:
f.write(data)
def _query(self, type, normalize=True, number=1):
"""Return a list containing results from glyr.Query,
or None if some tags are missing.
Keyword arguments:
type -- type of the metadata
normalize -- whether the search strings should be normalized by glyr
"""
try:
query = plyr.Query(
number=number,
parallel=20,
get_type=type,
artist=self.artist or '',
title=self.title or '',
album=self.album or ''
)
except AttributeError: # Missing tags?
return None
query.useragent = lyvi.USERAGENT
query.database = self.cache
if not normalize:
query.normalize = ('none', 'artist', 'album', 'title')
return query.commit()
def get(self, type):
"""Download and set the metadata for the given property.
Keyword arguments:
type -- type of the metadata
"""
if lyvi.ui.view == type:
lyvi.ui.home()
artist = self.artist
title = self.title
number = 1
if type in ('lyrics', 'artistbio', 'guitartabs'):
setattr(self, type, 'Searching...')
elif type in ('backdrops', 'cover'):
setattr(self, type, None)
if type == 'backdrops':
number = 20
items = (self._query(type, number=number)
or self._query(type, number=number, normalize=False))
data = None
if items:
if type == 'backdrops':
data = random.choice(items).data
elif type == 'cover':
data = items[0].data
else:
data = items[0].data.decode()
with self.lock:
if artist == self.artist and title == self.title:
setattr(self, type, data)
|
the-stack_0_25568
|
#!/usr/bin/env python
#
# Simple VXI-11 commandline interface
#
# Copyright (c) 2014-2017 Michael Walle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Description:
# Commands are sent to the VXI-11 device after every newline. If the command
# ends in '?' the response is received.
#
import time
import sys
import logging
import readline
from optparse import OptionParser
from . import __version__
from .vxi11 import Instrument, Vxi11Exception
try: input = raw_input
except NameError: pass
LOCAL_COMMANDS = {
'%SLEEP': (1, 1, lambda a: time.sleep(float(a[0])/1000)),
}
def process_local_command(cmd):
args = cmd.split()
if args[0] in LOCAL_COMMANDS:
cmd_info = LOCAL_COMMANDS[args[0]]
if cmd_info[0] <= len(args[1:]) <= cmd_info[1]:
cmd_info[2](args[1:])
else:
print('Invalid number of arguments for command %s' % args[0])
else:
print('Unknown command "%s"' % cmd)
def main():
usage = 'usage: %prog [options] <host> [<name>]'
parser = OptionParser(usage=usage)
parser.add_option('-d', action='store_true', dest='debug',
help='enable debug messages')
parser.add_option('-v', action='store_true', dest='verbose',
help='be more verbose')
parser.add_option('-V', action='store_true', dest='version',
help='show version')
parser.add_option('--always-check-esr', action='store_true',
dest='check_esr',
help='Check the error status register after every command')
(options, args) = parser.parse_args()
if options.version:
print('vxi11-cli v%s' % (__version__,))
sys.exit(0)
logging.basicConfig()
if options.verbose:
logging.getLogger('vxi11').setLevel(logging.INFO)
if options.debug:
logging.getLogger('vxi11').setLevel(logging.DEBUG)
if len(args) < 1:
print(parser.format_help())
sys.exit(1)
host = args[0]
name = None
if len(args) > 1:
name = args[1]
v = Instrument(host, name)
v.open()
print("Enter command to send. Quit with 'q'. Read with '?'.")
try:
while True:
cmd = input('=> ')
if cmd == 'q':
break
if cmd.startswith('%'):
process_local_command(cmd)
continue
if len(cmd) > 0:
is_query = cmd.split(' ')[0][-1] == '?'
try:
if is_query:
if len(cmd) > 1:
v.write(cmd)
print(v.read())
else:
v.write(cmd)
if options.check_esr:
esr = int(v.ask('*ESR?').strip())
if esr != 0:
print('Warning: ESR was %d' % esr)
except Vxi11Exception:
e = sys.exc_info()[1]
print('ERROR: %s' % e)
except EOFError:
print('exiting...')
v.close()
if __name__ == '__main__':
main()
|
the-stack_0_25571
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
A meta data loader for sampling from different datasets / training tasks
A prefetch loader to speedup data loading
Modified from Nvidia Deep Learning Examples
(https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch).
"""
import random
import torch
from torch.utils.data import DataLoader
from utils.distributed import any_broadcast
class MetaLoader(object):
""" wraps multiple data loader """
def __init__(self, loaders, accum_steps=1, distributed=False):
assert isinstance(loaders, dict)
self.name2loader = {}
self.name2iter = {}
self.sampling_pools = []
for n, l in loaders.items():
if isinstance(l, tuple):
l, r = l
elif isinstance(l, DataLoader):
r = 1
else:
raise ValueError()
self.name2loader[n] = l
self.name2iter[n] = iter(l)
self.sampling_pools.extend([n]*r)
self.accum_steps = accum_steps
self.distributed = distributed
self.step = 0
def __iter__(self):
""" this iterator will run indefinitely """
task = self.sampling_pools[0]
while True:
if self.step % self.accum_steps == 0:
task = random.choice(self.sampling_pools)
if self.distributed:
# make sure all process is training same task
task = any_broadcast(task, 0)
self.step += 1
iter_ = self.name2iter[task]
try:
batch = next(iter_)
except StopIteration:
iter_ = iter(self.name2loader[task])
batch = next(iter_)
self.name2iter[task] = iter_
yield task, batch
def move_to_cuda(batch):
if isinstance(batch, torch.Tensor):
return batch.cuda(non_blocking=True)
elif isinstance(batch, list):
new_batch = [move_to_cuda(t) for t in batch]
elif isinstance(batch, tuple):
new_batch = tuple(move_to_cuda(t) for t in batch)
elif isinstance(batch, dict):
new_batch = {n: move_to_cuda(t) for n, t in batch.items()}
else:
return batch
return new_batch
def record_cuda_stream(batch):
if isinstance(batch, torch.Tensor):
batch.record_stream(torch.cuda.current_stream())
elif isinstance(batch, list) or isinstance(batch, tuple):
for t in batch:
record_cuda_stream(t)
elif isinstance(batch, dict):
for t in batch.values():
record_cuda_stream(t)
else:
pass
class PrefetchLoader(object):
"""
overlap compute and cuda data transfer
(copied and then modified from nvidia apex)
"""
def __init__(self, loader):
self.loader = loader
self.stream = torch.cuda.Stream()
def __iter__(self):
loader_it = iter(self.loader)
self.preload(loader_it)
batch = self.next(loader_it)
while batch is not None:
yield batch
batch = self.next(loader_it)
def __len__(self):
return len(self.loader)
def preload(self, it):
try:
self.batch = next(it)
except StopIteration:
self.batch = None
return
# if record_stream() doesn't work, another option is to make sure
# device inputs are created on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input,
# device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target,
# device='cuda')
# Need to make sure the memory allocated for next_* is not still in use
# by the main stream at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.batch = move_to_cuda(self.batch)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this
# side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
def next(self, it):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is not None:
record_cuda_stream(batch)
self.preload(it)
return batch
def __getattr__(self, name):
method = self.loader.__getattribute__(name)
return method
|
the-stack_0_25572
|
"""findcdn is a security research and reporting tool.
findcdn determine what CDN a domain has and prints or exports the results as json.
EXIT STATUS
This utility exits with one of the following values:
0 The list of domain's CDNs were successfully exported or printed to a file.
>0 An error occurred.
Usage:
findcdn file <fileIn> [options]
findcdn list <domain>... [options]
findcdn (-h | --help)
Options:
-h --help Show this message.
--version Show the current version.
-o FILE --output=FILE If specified, then the JSON output file will be
created at the specified value.
-v --verbose Includes additional print statements.
--all Includes domains with and without a CDN
in output.
-d --double Run the checks twice to increase accuracy.
-t --threads=<thread_count> Number of threads, otherwise use default.
--timeout=<timeout> Max duration in seconds to wait for a domain to
conclude processing, otherwise use default.
--user_agent=<user_agent> Set the user agent to use, otherwise
use default.
--checks=<checks> Select detection types; possible values:
cname (c), HTTP headers (h), nameservers (n),
whois data (w). Default: "chnw"
"""
# Standard Python Libraries
import datetime
import json
import os
import sys
from typing import Any, Dict, List
# Third-Party Libraries
import docopt
from schema import And, Or, Schema, SchemaError, Use
import validators
# Internal Libraries
from ._version import __version__
from .cdnEngine import run_checks
from .findcdn_err import FileWriteError, InvalidDomain, NoDomains, OutputFileExists
# Global Variables
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36"
TIMEOUT = 60 # Time in seconds
THREADS = 0 # If 0 then cdnEngine uses CPU count to set thread count
CHECKS = "chnw" # cnames, headers, nameservers, whois_data
def write_json(json_dump: str, output: str, verbose: bool, interactive: bool):
"""Write dict as JSON to output file."""
try:
with open(output, "x") as outfile:
outfile.write(json_dump)
except FileExistsError:
raise OutputFileExists(output)
except Exception as e:
raise FileWriteError(e)
def main(
domain_list: List[str],
output_path: str = None,
verbose: bool = False,
all_domains: bool = False,
interactive: bool = False,
double_in: bool = False,
threads: int = THREADS,
timeout: int = TIMEOUT,
user_agent: str = USER_AGENT,
checks: str = CHECKS,
) -> str:
"""Take in a list of domains and determine the CDN for each return (JSON, number of successful jobs)."""
# Make sure the list passed is got something in it
if len(domain_list) <= 0:
raise NoDomains("error")
# Validate domains in list
for item in domain_list:
if validators.domain(item) is not True:
raise InvalidDomain(item)
# Show the validated domains if in verbose mode
if verbose:
print("%d Domains Validated" % len(domain_list))
# Define domain dict and counter for json
domain_dict = {}
CDN_count = 0
# Check domain list
processed_list, cnt = run_checks(
domain_list,
threads,
timeout,
user_agent,
interactive,
verbose,
double_in,
checks,
)
# Parse the domain data
for domain in processed_list:
# Track the count of the domain has cdns
if len(domain.cdns) > 0:
CDN_count += 1
# Setup formatting for json output
if len(domain.cdns) > 0 or all_domains:
domain_dict[domain.url] = {
"IP": str(domain.ip)[1:-1],
"cdns": str(domain.cdns)[1:-1],
"cdns_by_names": str(domain.cdns_by_name)[1:-1],
}
# Create JSON from the results and return (results, successful jobs)
json_dict = {}
json_dict["date"] = datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
json_dict["cdn_count"] = str(CDN_count)
json_dict["domains"] = domain_dict # type: ignore
json_dump = json.dumps(json_dict, indent=4, sort_keys=False)
# Show the dump to stdout if verbose or interactive
if (output_path is None and interactive) or verbose:
print(json_dump)
# Export to file if file provided
if output_path is not None:
write_json(json_dump, output_path, verbose, interactive)
if interactive or verbose:
print(
"Domain processing completed.\n%d domains had CDN's out of %d."
% (CDN_count, len(domain_list))
)
if verbose:
print(f"{cnt} jobs completed!")
# Return json dump to callee
return json_dump
def interactive() -> None:
"""Collect command arguments and run the main program."""
# Obtain arguments from docopt
args: Dict[str, str] = docopt.docopt(__doc__, version=__version__)
# Check for None params then set default if found
if args["--user_agent"] is None:
args["--user_agent"] = USER_AGENT
if args["--threads"] is None:
args["--threads"] = THREADS
if args["--timeout"] is None:
args["--timeout"] = TIMEOUT
if args["--checks"] is None:
args["--checks"] = CHECKS
# Validate and convert arguments as needed with schema
schema: Schema = Schema(
{
"--output": Or(
None,
And(
str,
lambda filename: not os.path.isfile(filename),
error='Output file "' + str(args["--output"]) + '" already exists!',
),
),
"<fileIn>": Or(
None,
And(
str,
lambda filename: os.path.isfile(filename),
error='Input file "' + str(args["<fileIn>"]) + '" does not exist!',
),
),
"--threads": And(
Use(int),
lambda thread_count: thread_count >= 0,
error="Thread count must be a positive number",
),
"--timeout": And(
Use(int),
lambda timeout: timeout > 0,
error="The timeout duration must be a number greater than 0",
),
"--user_agent": And(
str,
error="The user agent must be a string.",
),
"--checks": And(
str,
lambda checks: set(checks) <= {'c', 'h', 'n', 'w'},
error="Checks can be the following characters: chnw"
),
"<domain>": And(list, error="Please format the domains as a list."),
str: object, # Don't care about other keys, if any
}
)
try:
validated_args: Dict[str, Any] = schema.validate(args)
except SchemaError as err:
# Exit because one or more of the arguments were invalid
print(err, file=sys.stderr)
sys.exit(1)
# Add domains to a list
domain_list = []
if validated_args["file"]:
try:
with open(validated_args["<fileIn>"]) as f:
domain_list = [line.rstrip() for line in f]
except IOError as e:
print("A file error occurred: %s" % e, file=sys.stderr)
sys.exit(1)
else:
domain_list = validated_args["<domain>"]
# Start main runner of program with supplied inputs.
try:
main(
domain_list,
validated_args["--output"],
validated_args["--verbose"],
validated_args["--all"],
True, # Launch in interactive mode.
validated_args["--double"],
validated_args["--threads"],
validated_args["--timeout"],
validated_args["--user_agent"],
validated_args["--checks"]
)
# Check for all potential exceptions
except OutputFileExists as ofe:
print(ofe.message)
sys.exit(1)
except FileWriteError as fwe:
print(fwe.message)
sys.exit(2)
except InvalidDomain as invdom:
print(invdom.message)
sys.exit(3)
except NoDomains as nd:
print(nd.message)
sys.exit(4)
|
the-stack_0_25573
|
# coding=utf-8
# Copyright Studio Ousia and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LUKE model. """
import math
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN, gelu
from ...file_utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward
from ...utils import logging
from .configuration_luke import LukeConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LukeConfig"
_TOKENIZER_FOR_DOC = "LukeTokenizer"
_CHECKPOINT_FOR_DOC = "studio-ousia/luke-base"
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST = [
"studio-ousia/luke-base",
"studio-ousia/luke-large",
# See all LUKE models at https://huggingface.co/models?filter=luke
]
@dataclass
class BaseLukeModelOutputWithPooling(BaseModelOutputWithPooling):
"""
Base class for outputs of the LUKE model.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
Sequence of entity hidden-states at the output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of
each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output
of each layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length + entity_length, sequence_length + entity_length)`. Attentions weights after the attention
softmax, used to compute the weighted average in the self-attention heads.
"""
entity_last_hidden_state: torch.FloatTensor = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class BaseLukeModelOutput(BaseModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
entity_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, entity_length, hidden_size)`):
Sequence of entity hidden-states at the output of the last layer of the model.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output
of each layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
entity_last_hidden_state: torch.FloatTensor = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class LukeMaskedLMOutput(ModelOutput):
"""
Base class for model's outputs, with potential hidden states and attentions.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
The sum of masked language modeling (MLM) loss and entity prediction loss.
mlm_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
mep_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked entity prediction (MEP) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
entity_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the entity prediction head (scores for each entity vocabulary token before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output
of each layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
mlm_loss: Optional[torch.FloatTensor] = None
mep_loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
entity_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class EntityClassificationOutput(ModelOutput):
"""
Outputs of entity classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of
each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output
of each layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class EntityPairClassificationOutput(ModelOutput):
"""
Outputs of entity pair classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of
each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output
of each layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@dataclass
class EntitySpanClassificationOutput(ModelOutput):
"""
Outputs of entity span classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of
each layer plus the initial embedding outputs.
entity_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape `(batch_size, entity_length, hidden_size)`. Entity hidden-states of the model at the output
of each layer plus the initial entity embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the
weighted average in the self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
entity_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class LukeEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
class LukeEntityEmbeddings(nn.Module):
def __init__(self, config: LukeConfig):
super().__init__()
self.config = config
self.entity_embeddings = nn.Embedding(config.entity_vocab_size, config.entity_emb_size, padding_idx=0)
if config.entity_emb_size != config.hidden_size:
self.entity_embedding_dense = nn.Linear(config.entity_emb_size, config.hidden_size, bias=False)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, entity_ids: torch.LongTensor, position_ids: torch.LongTensor, token_type_ids: torch.LongTensor = None
):
if token_type_ids is None:
token_type_ids = torch.zeros_like(entity_ids)
entity_embeddings = self.entity_embeddings(entity_ids)
if self.config.entity_emb_size != self.config.hidden_size:
entity_embeddings = self.entity_embedding_dense(entity_embeddings)
position_embeddings = self.position_embeddings(position_ids.clamp(min=0))
position_embedding_mask = (position_ids != -1).type_as(position_embeddings).unsqueeze(-1)
position_embeddings = position_embeddings * position_embedding_mask
position_embeddings = torch.sum(position_embeddings, dim=-2)
position_embeddings = position_embeddings / position_embedding_mask.sum(dim=-2).clamp(min=1e-7)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = entity_embeddings + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LukeSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
f"heads {config.num_attention_heads}."
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.use_entity_aware_attention = config.use_entity_aware_attention
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
if self.use_entity_aware_attention:
self.w2e_query = nn.Linear(config.hidden_size, self.all_head_size)
self.e2w_query = nn.Linear(config.hidden_size, self.all_head_size)
self.e2e_query = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
word_size = word_hidden_states.size(1)
if entity_hidden_states is None:
concat_hidden_states = word_hidden_states
else:
concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)
key_layer = self.transpose_for_scores(self.key(concat_hidden_states))
value_layer = self.transpose_for_scores(self.value(concat_hidden_states))
if self.use_entity_aware_attention and entity_hidden_states is not None:
# compute query vectors using word-word (w2w), word-entity (w2e), entity-word (e2w), entity-entity (e2e)
# query layers
w2w_query_layer = self.transpose_for_scores(self.query(word_hidden_states))
w2e_query_layer = self.transpose_for_scores(self.w2e_query(word_hidden_states))
e2w_query_layer = self.transpose_for_scores(self.e2w_query(entity_hidden_states))
e2e_query_layer = self.transpose_for_scores(self.e2e_query(entity_hidden_states))
# compute w2w, w2e, e2w, and e2e key vectors used with the query vectors computed above
w2w_key_layer = key_layer[:, :, :word_size, :]
e2w_key_layer = key_layer[:, :, :word_size, :]
w2e_key_layer = key_layer[:, :, word_size:, :]
e2e_key_layer = key_layer[:, :, word_size:, :]
# compute attention scores based on the dot product between the query and key vectors
w2w_attention_scores = torch.matmul(w2w_query_layer, w2w_key_layer.transpose(-1, -2))
w2e_attention_scores = torch.matmul(w2e_query_layer, w2e_key_layer.transpose(-1, -2))
e2w_attention_scores = torch.matmul(e2w_query_layer, e2w_key_layer.transpose(-1, -2))
e2e_attention_scores = torch.matmul(e2e_query_layer, e2e_key_layer.transpose(-1, -2))
# combine attention scores to create the final attention score matrix
word_attention_scores = torch.cat([w2w_attention_scores, w2e_attention_scores], dim=3)
entity_attention_scores = torch.cat([e2w_attention_scores, e2e_attention_scores], dim=3)
attention_scores = torch.cat([word_attention_scores, entity_attention_scores], dim=2)
else:
query_layer = self.transpose_for_scores(self.query(concat_hidden_states))
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in LukeModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
output_word_hidden_states = context_layer[:, :word_size, :]
if entity_hidden_states is None:
output_entity_hidden_states = None
else:
output_entity_hidden_states = context_layer[:, word_size:, :]
if output_attentions:
outputs = (output_word_hidden_states, output_entity_hidden_states, attention_probs)
else:
outputs = (output_word_hidden_states, output_entity_hidden_states)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
class LukeSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LukeAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LukeSelfAttention(config)
self.output = LukeSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
raise NotImplementedError("LUKE does not support the pruning of attention heads")
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
word_size = word_hidden_states.size(1)
self_outputs = self.self(
word_hidden_states,
entity_hidden_states,
attention_mask,
head_mask,
output_attentions,
)
if entity_hidden_states is None:
concat_self_outputs = self_outputs[0]
concat_hidden_states = word_hidden_states
else:
concat_self_outputs = torch.cat(self_outputs[:2], dim=1)
concat_hidden_states = torch.cat([word_hidden_states, entity_hidden_states], dim=1)
attention_output = self.output(concat_self_outputs, concat_hidden_states)
word_attention_output = attention_output[:, :word_size, :]
if entity_hidden_states is None:
entity_attention_output = None
else:
entity_attention_output = attention_output[:, word_size:, :]
# add attentions if we output them
outputs = (word_attention_output, entity_attention_output) + self_outputs[2:]
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class LukeIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput
class LukeOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LukeLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LukeAttention(config)
self.intermediate = LukeIntermediate(config)
self.output = LukeOutput(config)
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
):
word_size = word_hidden_states.size(1)
self_attention_outputs = self.attention(
word_hidden_states,
entity_hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
)
if entity_hidden_states is None:
concat_attention_output = self_attention_outputs[0]
else:
concat_attention_output = torch.cat(self_attention_outputs[:2], dim=1)
outputs = self_attention_outputs[2:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, concat_attention_output
)
word_layer_output = layer_output[:, :word_size, :]
if entity_hidden_states is None:
entity_layer_output = None
else:
entity_layer_output = layer_output[:, word_size:, :]
outputs = (word_layer_output, entity_layer_output) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class LukeEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LukeLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
word_hidden_states,
entity_hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_word_hidden_states = () if output_hidden_states else None
all_entity_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)
all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
if self.gradient_checkpointing and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
word_hidden_states,
entity_hidden_states,
attention_mask,
layer_head_mask,
)
else:
layer_outputs = layer_module(
word_hidden_states,
entity_hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
)
word_hidden_states = layer_outputs[0]
if entity_hidden_states is not None:
entity_hidden_states = layer_outputs[1]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[2],)
if output_hidden_states:
all_word_hidden_states = all_word_hidden_states + (word_hidden_states,)
all_entity_hidden_states = all_entity_hidden_states + (entity_hidden_states,)
if not return_dict:
return tuple(
v
for v in [
word_hidden_states,
all_word_hidden_states,
all_self_attentions,
entity_hidden_states,
all_entity_hidden_states,
]
if v is not None
)
return BaseLukeModelOutput(
last_hidden_state=word_hidden_states,
hidden_states=all_word_hidden_states,
attentions=all_self_attentions,
entity_last_hidden_state=entity_hidden_states,
entity_hidden_states=all_entity_hidden_states,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
class LukePooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class EntityPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.entity_emb_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class EntityPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.transform = EntityPredictionHeadTransform(config)
self.decoder = nn.Linear(config.entity_emb_size, config.entity_vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.entity_vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class LukePreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LukeConfig
base_model_prefix = "luke"
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
if module.embedding_dim == 1: # embedding for bias parameters
module.weight.data.zero_()
else:
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, LukeEncoder):
module.gradient_checkpointing = value
LUKE_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config ([`LukeConfig`]): Model configuration class with all the parameters of the
model. Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model
weights.
"""
LUKE_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`LukeTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
entity_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`):
Indices of entity tokens in the entity vocabulary.
Indices can be obtained using [`LukeTokenizer`]. See
[`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for
details.
entity_attention_mask (`torch.FloatTensor` of shape `(batch_size, entity_length)`, *optional*):
Mask to avoid performing attention on padding entity token indices. Mask values selected in `[0, 1]`:
- 1 for entity tokens that are **not masked**,
- 0 for entity tokens that are **masked**.
entity_token_type_ids (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Segment token indices to indicate first and second portions of the entity token inputs. Indices are
selected in `[0, 1]`:
- 0 corresponds to a *portion A* entity token,
- 1 corresponds to a *portion B* entity token.
entity_position_ids (`torch.LongTensor` of shape `(batch_size, entity_length, max_mention_length)`, *optional*):
Indices of positions of each input entity in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare LUKE model transformer outputting raw hidden-states for both word tokens and entities without any specific head on top.",
LUKE_START_DOCSTRING,
)
class LukeModel(LukePreTrainedModel):
_keys_to_ignore_on_load_missing = [r"position_ids"]
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = LukeEmbeddings(config)
self.entity_embeddings = LukeEntityEmbeddings(config)
self.encoder = LukeEncoder(config)
self.pooler = LukePooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def get_entity_embeddings(self):
return self.entity_embeddings.entity_embeddings
def set_entity_embeddings(self, value):
self.entity_embeddings.entity_embeddings = value
def _prune_heads(self, heads_to_prune):
raise NotImplementedError("LUKE does not support the pruning of attention heads")
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BaseLukeModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeModel
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base")
>>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
# Compute the contextualized entity representation corresponding to the entity mention "Beyoncé"
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
>>> encoding = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
>>> outputs = model(**encoding)
>>> word_last_hidden_state = outputs.last_hidden_state
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
# Input Wikipedia entities to obtain enriched contextualized representations of word tokens
>>> text = "Beyoncé lives in Los Angeles."
>>> entities = ["Beyoncé", "Los Angeles"] # Wikipedia entity titles corresponding to the entity mentions "Beyoncé" and "Los Angeles"
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> encoding = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
>>> outputs = model(**encoding)
>>> word_last_hidden_state = outputs.last_hidden_state
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if entity_ids is not None:
entity_seq_length = entity_ids.size(1)
if entity_attention_mask is None:
entity_attention_mask = torch.ones((batch_size, entity_seq_length), device=device)
if entity_token_type_ids is None:
entity_token_type_ids = torch.zeros((batch_size, entity_seq_length), dtype=torch.long, device=device)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
# First, compute word embeddings
word_embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
# Second, compute extended attention mask
extended_attention_mask = self.get_extended_attention_mask(attention_mask, entity_attention_mask)
# Third, compute entity embeddings and concatenate with word embeddings
if entity_ids is None:
entity_embedding_output = None
else:
entity_embedding_output = self.entity_embeddings(entity_ids, entity_position_ids, entity_token_type_ids)
# Fourth, send embeddings through the model
encoder_outputs = self.encoder(
word_embedding_output,
entity_embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# Fifth, get the output. LukeModel outputs the same as BertModel, namely sequence_output of shape (batch_size, seq_len, hidden_size)
sequence_output = encoder_outputs[0]
# Sixth, we compute the pooled_output, word_sequence_output and entity_sequence_output based on the sequence_output
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseLukeModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
entity_last_hidden_state=encoder_outputs.entity_last_hidden_state,
entity_hidden_states=encoder_outputs.entity_hidden_states,
)
def get_extended_attention_mask(
self, word_attention_mask: torch.LongTensor, entity_attention_mask: Optional[torch.LongTensor]
):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
word_attention_mask (`torch.LongTensor`):
Attention mask for word tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
entity_attention_mask (`torch.LongTensor`, *optional*):
Attention mask for entity tokens with ones indicating tokens to attend to, zeros for tokens to ignore.
Returns:
`torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
"""
attention_mask = word_attention_mask
if entity_attention_mask is not None:
attention_mask = torch.cat([attention_mask, entity_attention_mask], dim=-1)
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(f"Wrong shape for attention_mask (shape {attention_mask.shape})")
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask)) * mask
return incremental_indices.long() + padding_idx
# Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead
class LukeLMHead(nn.Module):
"""Roberta Head for masked language modeling."""
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, features, **kwargs):
x = self.dense(features)
x = gelu(x)
x = self.layer_norm(x)
# project back to size of vocabulary with bias
x = self.decoder(x)
return x
def _tie_weights(self):
# To tie those two weights if they get disconnected (on TPU or when the bias is resized)
self.bias = self.decoder.bias
@add_start_docstrings(
"""
The LUKE model with a language modeling head and entity prediction head on top for masked language modeling and
masked entity prediction.
""",
LUKE_START_DOCSTRING,
)
class LukeForMaskedLM(LukePreTrainedModel):
_keys_to_ignore_on_save = [
r"lm_head.decoder.weight",
r"lm_head.decoder.bias",
r"entity_predictions.decoder.weight",
]
_keys_to_ignore_on_load_missing = [
r"position_ids",
r"lm_head.decoder.weight",
r"lm_head.decoder.bias",
r"entity_predictions.decoder.weight",
]
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.lm_head = LukeLMHead(config)
self.entity_predictions = EntityPredictionHead(config)
self.loss_fn = nn.CrossEntropyLoss(ignore_index=-1)
# Initialize weights and apply final processing
self.post_init()
def tie_weights(self):
super().tie_weights()
self._tie_or_clone_weights(self.entity_predictions.decoder, self.luke.entity_embeddings.entity_embeddings)
def get_output_embeddings(self):
return self.lm_head.decoder
def set_output_embeddings(self, new_embeddings):
self.lm_head.decoder = new_embeddings
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=LukeMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
labels=None,
entity_labels=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
entity_labels (`torch.LongTensor` of shape `(batch_size, entity_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
loss = None
mlm_loss = None
logits = self.lm_head(outputs.last_hidden_state)
if labels is not None:
mlm_loss = self.loss_fn(logits.view(-1, self.config.vocab_size), labels.view(-1))
if loss is None:
loss = mlm_loss
mep_loss = None
entity_logits = self.entity_predictions(outputs.entity_last_hidden_state)
if entity_labels is not None:
mep_loss = self.loss_fn(entity_logits.view(-1, self.config.entity_vocab_size), entity_labels.view(-1))
if loss is None:
loss = mep_loss
else:
loss = loss + mep_loss
if not return_dict:
output = (logits, entity_logits, outputs.hidden_states, outputs.entity_hidden_states, outputs.attentions)
if mlm_loss is not None and mep_loss is not None:
return (loss, mlm_loss, mep_loss) + output
elif mlm_loss is not None:
return (loss, mlm_loss) + output
elif mep_loss is not None:
return (loss, mep_loss) + output
else:
return output
return LukeMaskedLMOutput(
loss=loss,
mlm_loss=mlm_loss,
mep_loss=mep_loss,
logits=logits,
entity_logits=entity_logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
The LUKE model with a classification head on top (a linear layer on top of the hidden state of the first entity
token) for entity classification tasks, such as Open Entity.
""",
LUKE_START_DOCSTRING,
)
class LukeForEntityClassification(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=EntityClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss
is used for the single-label classification. In this case, labels should contain the indices that should be
in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary
cross entropy loss is used for the multi-label classification. In this case, labels should only contain
`[0, 1]`, where 0 and 1 indicate false and true, respectively.
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeForEntityClassification
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
>>> model = LukeForEntityClassification.from_pretrained("studio-ousia/luke-large-finetuned-open-entity")
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncé"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: person
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
feature_vector = outputs.entity_last_hidden_state[:, 0, :]
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 1:
loss = nn.functional.cross_entropy(logits, labels)
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
output = (
logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
)
return ((loss,) + output) if loss is not None else output
return EntityClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
The LUKE model with a classification head on top (a linear layer on top of the hidden states of the two entity
tokens) for entity pair classification tasks, such as TACRED.
""",
LUKE_START_DOCSTRING,
)
class LukeForEntityPairClassification(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 2, config.num_labels, False)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=EntityPairClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)` or `(batch_size, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size,)`, the cross entropy loss
is used for the single-label classification. In this case, labels should contain the indices that should be
in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, num_labels)`, the binary
cross entropy loss is used for the multi-label classification. In this case, labels should only contain
`[0, 1]`, where 0 and 1 indicate false and true, respectively.
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeForEntityPairClassification
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
>>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
>>> text = "Beyoncé lives in Los Angeles."
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncé" and "Los Angeles"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: per:cities_of_residence
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
feature_vector = torch.cat(
[outputs.entity_last_hidden_state[:, 0, :], outputs.entity_last_hidden_state[:, 1, :]], dim=1
)
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 1, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 1:
loss = nn.functional.cross_entropy(logits, labels)
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
output = (
logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
)
return ((loss,) + output) if loss is not None else output
return EntityPairClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
The LUKE model with a span classification head on top (a linear layer on top of the hidden states output) for tasks
such as named entity recognition.
""",
LUKE_START_DOCSTRING,
)
class LukeForEntitySpanClassification(LukePreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.luke = LukeModel(config)
self.num_labels = config.num_labels
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(LUKE_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=EntitySpanClassificationOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
entity_ids=None,
entity_attention_mask=None,
entity_token_type_ids=None,
entity_position_ids=None,
entity_start_positions=None,
entity_end_positions=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
entity_start_positions (`torch.LongTensor`):
The start positions of entities in the word token sequence.
entity_end_positions (`torch.LongTensor`):
The end positions of entities in the word token sequence.
labels (`torch.LongTensor` of shape `(batch_size, entity_length)` or `(batch_size, entity_length, num_labels)`, *optional*):
Labels for computing the classification loss. If the shape is `(batch_size, entity_length)`, the cross
entropy loss is used for the single-label classification. In this case, labels should contain the indices
that should be in `[0, ..., config.num_labels - 1]`. If the shape is `(batch_size, entity_length, num_labels)`, the binary cross entropy loss is used for the multi-label classification. In this case,
labels should only contain `[0, 1]`, where 0 and 1 indicate false and true, respectively.
Returns:
Examples:
```python
>>> from transformers import LukeTokenizer, LukeForEntitySpanClassification
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
>>> model = LukeForEntitySpanClassification.from_pretrained("studio-ousia/luke-large-finetuned-conll-2003")
>>> text = "Beyoncé lives in Los Angeles"
# List all possible entity spans in the text
>>> word_start_positions = [0, 8, 14, 17, 21] # character-based start positions of word tokens
>>> word_end_positions = [7, 13, 16, 20, 28] # character-based end positions of word tokens
>>> entity_spans = []
>>> for i, start_pos in enumerate(word_start_positions):
... for end_pos in word_end_positions[i:]:
... entity_spans.append((start_pos, end_pos))
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_indices = logits.argmax(-1).squeeze().tolist()
>>> for span, predicted_class_idx in zip(entity_spans, predicted_class_indices):
... if predicted_class_idx != 0:
... print(text[span[0]:span[1]], model.config.id2label[predicted_class_idx])
Beyoncé PER
Los Angeles LOC
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.luke(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
entity_ids=entity_ids,
entity_attention_mask=entity_attention_mask,
entity_token_type_ids=entity_token_type_ids,
entity_position_ids=entity_position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
hidden_size = outputs.last_hidden_state.size(-1)
entity_start_positions = entity_start_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
start_states = torch.gather(outputs.last_hidden_state, -2, entity_start_positions)
entity_end_positions = entity_end_positions.unsqueeze(-1).expand(-1, -1, hidden_size)
end_states = torch.gather(outputs.last_hidden_state, -2, entity_end_positions)
feature_vector = torch.cat([start_states, end_states, outputs.entity_last_hidden_state], dim=2)
feature_vector = self.dropout(feature_vector)
logits = self.classifier(feature_vector)
loss = None
if labels is not None:
# When the number of dimension of `labels` is 2, cross entropy is used as the loss function. The binary
# cross entropy is used otherwise.
if labels.ndim == 2:
loss = nn.functional.cross_entropy(logits.view(-1, self.num_labels), labels.view(-1))
else:
loss = nn.functional.binary_cross_entropy_with_logits(logits.view(-1), labels.view(-1).type_as(logits))
if not return_dict:
output = (
logits,
outputs.hidden_states,
outputs.entity_hidden_states,
outputs.attentions,
)
return ((loss,) + output) if loss is not None else output
return EntitySpanClassificationOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
entity_hidden_states=outputs.entity_hidden_states,
attentions=outputs.attentions,
)
|
the-stack_0_25574
|
import unicodedata
import os
import time
import json
import re
import sys
import ctypes
import copy
import numpy as np
import socket
import subprocess
import paramiko
import platform
import decouple
from datetime import datetime
from pylabnet.network.core.generic_server import GenericServer
import pyqtgraph as pg
import pyqtgraph.exporters
#import netifaces as ni
def str_to_float(in_val):
"""Convert human-readable exponential form to float.
:param in_val: (str) input string of the following formats:
'float_number' --> float_number
'float_number + white_space + exp_prefix + unit_string'
--> float_number * 10**exp_value
Supported exp prefixes: ['T', 'G', 'M', 'k', '', 'm', 'u', 'n', 'p']
Warning: format 'just exp_prefix without unit_string' is not
supported: if only one symbol is given after 'float_number',
it will be interpreted as unit and exponent will be set to 10**0.
Examples: '1.2 us' --> 1.2e-6
'-4.5 mV' --> -4.5e-3
'10.1 GHz' --> 1.01e10
'1.56 s' --> 1.56
'1.56 m' --> 1.56 [interpreted as 1.56 meters, not as 1.56e-3]
:return: (float) extracted value without unit
"""
if isinstance(in_val, (float, int)):
return in_val
# Split string into mantissa and exp_prefix + unit
item_list = in_val.split()
# Extract mantissa exp_prefix if included
mantissa = float(item_list[0])
# Extract exp_prefix (a single letter) if included
try:
exp_prefix_unit = item_list[1]
if len(exp_prefix_unit) > 1:
exp_prefix = item_list[1][0]
else:
exp_prefix = ''
except IndexError:
exp_prefix = ''
# Convert exp_prefix into exp_value
if exp_prefix == 'T':
exp_value = 12
elif exp_prefix == 'G':
exp_value = 9
elif exp_prefix == 'M':
exp_value = 6
elif exp_prefix == 'k':
exp_value = 3
elif exp_prefix == '':
exp_value = 0
elif exp_prefix == 'm':
exp_value = -3
elif exp_prefix == 'u':
exp_value = -6
elif exp_prefix == 'n':
exp_value = -9
elif exp_prefix == 'p':
exp_value = -12
else:
# The case of multi-letter unit without prefix: '1.5 Hz'
# the first letter 'H' is not an exp prefix
exp_value = 0
return mantissa * (10 ** exp_value)
def pwr_to_float(in_val):
# FIXME: implement
# if isinstance(in_val, float):
# return in_val
# #
# # Determine whether the power is given in Volts or dBm
# #
# # Split string into mantissa and exp_prefix + unit
# item_list = in_val.split()
#
# # Extract exp_prefix (a single letter) if included
# try:
# exp_prefix_unit = item_list[1]
#
# if len(exp_prefix_unit) > 1:
# exp_prefix = item_list[1][0]
# else:
# exp_prefix = ''
# except IndexError:
# exp_prefix = ''
return str_to_float(in_val=in_val)
def slugify(value, allow_unicode=False):
"""
Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens.
Remove characters that aren't alphanumerics, underscores, or hyphens.
Convert to lowercase. Also strip leading and trailing whitespace.
From Django 2.2
"""
value = str(value)
if allow_unicode:
value = unicodedata.normalize('NFKC', value)
else:
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')
value = re.sub(r'[^\w\s-]', '', value.lower()).strip()
return re.sub(r'[-\s]+', '-', value)
def get_dated_subdirectory_filepath(directory, filename=None):
'''Creates directory structure folder_path/YEAR/MONTH/DAY/filename
:folder_path: Upper level directory
:filename: Name of file. Will be slugified. If None just returns directory
Return:
:filepath: Path to file in newly created structure.
'''
# Create subdirectory structure: YEAR/MONTH/DAY
dated_path = os.path.join(directory, time.strftime('%Y'), time.strftime('%m'), time.strftime('%d'))
# create folders if they don't exists yet
os.makedirs(dated_path, exist_ok=True)
# Define full file path
if filename is None:
filepath = dated_path
else:
filepath = os.path.join(dated_path, f'{slugify(filename)}')
return filepath
def dict_to_str(dic, separate='\n'):
""" Converts a dictionary to a nicely formatted string
:param dic: (dict) to convert
:param separate: (str) string to use to separate dictionary elements
:return: (str) of dictionary content
"""
dict_str = ''
for key, value in dic.items():
dict_str += '{}: {}{}'.format(key, value, separate)
return dict_str.rstrip()
def remove_spaces(st):
""" Removes spaces from a string
:param st: (str) input string with spaces
:return: (str) string without any spaces
"""
return st.replace(' ', '')
def parse_args():
""" Parses command line arguments into dictionary format, assuming keywords of the form --kw for each argument"""
arg_index = 1
arg_dict = {}
while arg_index < len(sys.argv) - 1:
arg_dict[sys.argv[arg_index][2:]] = sys.argv[arg_index + 1]
arg_index += 2
return arg_dict
def unpack_launcher(**kwargs):
""" Unpacks the launcher kwargs for easy use in launcher method definition within script modules.
Copy paste the following implementation at the top of script.launch() method:
logger, loghost logport, clients, guis, params = unpack_launcher(**kwargs)
:param kwargs: (dict) contains all keyword arguments required for launching a script from launcher module
e.g.: dict(logger=log, loghost='localhost', clients=[client1, client2], guis=[gui_client1, gui_client2]),
logport=1234, params=experimental_parameters_container)
Note that experimental parameters should go in "params" and can be customized to contain all other
script specific stuff
:return: (tuple) logger, logport, clients, guis, params
"""
logger = kwargs['logger']
loghost = kwargs['loghost']
clients = kwargs['clients']
guis = kwargs['guis']
logport = kwargs['logport']
params = kwargs['params']
return logger, loghost, logport, clients, guis, params
def show_console():
""" Shows the active console.
Useful for processes where console is typically hidden but user input is suddenly required
"""
operating_system = get_os()
if operating_system == 'Windows':
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 9)
def hide_console():
""" Hides the active console.
Useful for processes where console is not needed (isntead, there is a GUI to use)
:os: (string) Which operating system is used ('Windows' and 'Linux' supported)
"""
operating_system = get_os()
if operating_system == 'Windows':
ctypes.windll.user32.ShowWindow(ctypes.windll.kernel32.GetConsoleWindow(), 0)
def create_server(service, logger=None, host='localhost'):
""" Attempts to create a server with randomly chosen port numbers
:param service: service from which to launch a server
:param logger: instance of LogClient for logging
:param host: (optinal) IP address of host
:return: (tuple) instance of server created, port number (int) created on
"""
timeout = 0
while timeout < 1000:
try:
port = np.random.randint(1024, 49151)
server = GenericServer(
host=host,
port=port,
service=service
)
timeout = 9999
except ConnectionRefusedError:
msg_str = f'Failed to create update server with port {port}'
if logger is None:
print(msg_str)
else:
logger.warn(f'Failed to create update server with port {port}')
timeout += 1
return server, port
def setup_full_service(service_class, module, logger=None, host='localhost'):
""" Creates a Service and a server, adds info to logger and starts server
:param service_class: Service class to instantiate (not the instance itself)
:param module: module to assign to service
:param logger: instance of LogClient
:param host: (str) hostname
"""
service = service_class()
service.assign_module(module)
server, port = create_server(service, logger=logger, host=host)
logger.update_data(data=dict(port=port))
server.start()
def value_to_bitval(value, bits=8, min=0, max=1):
""" Converts a value to a bits-bit number for range min to max
:param value: (float) value to convert
:param bits: (int) number of bits of resolution
:param min: (float) minimum of range
:param max: (float) maximum of range
:return: (int) value in bits-bit (e.g. 8-bit from 0 to 2^8-1)
"""
# Convert value to scale of 0 to 1
scaled_value = (value - min) / (max - min)
return int(scaled_value * (2**bits - 1))
def bitval_to_value(bitval, bits=8, min=0, max=1):
""" Converts a bits-bit number into its physical value for range from min to max
:param bitval: (int) value in bits-bit (e.g. 8-bit from 0 to 2^8-1)
:param bits: (int) number of bits of resolution
:param min: (float) minimum of range
:param max: (float) maximum of range
:return: (float) physical value
"""
# Convert value to scale of 0 to 1
scaled_value = bitval / (2**bits - 1)
return scaled_value * (max - min) + min
def generate_widgets(widget_dict):
""" Generates a list of widget names based on a supplied dictionary
Assumes widgets follow a naming convention of "widget_base_name_i" where i is the index
(this function is helpful when one has many widgets with the same base_name)
:param widget_dict: (dict) containing widget base names as keys, and number of instances as
values
"""
widgets = ()
for widget_name, instances in widget_dict.items():
widgets = widgets + ([f'{widget_name}_{instance+1}' for instance in range(instances)],)
return widgets
def generate_filepath(filename=None, directory=None, date_dir=False):
""" Generates filepath for saving.
:param dir: (str) directory to save to
:param filename: (str) name of file to save
:param date_dir: (bool) whether or not to use date sub-directory
"""
if directory is None:
directory = os.getcwd()
if filename is None:
filename = str(datetime.now().strftime('%H_%M_%S'))
else:
filename += str(datetime.now().strftime('_%H_%M_%S'))
if date_dir:
filepath = get_dated_subdirectory_filepath(directory, filename)
else:
filepath = os.path.join(directory, filename)
return filepath
def generic_save(data, filename=None, directory=None, date_dir=False):
""" Saves data as txt file
:param dir: (str) directory to save to
:param filename: (str) name of file to save
:param date_dir: (bool) whether or not to use date sub-directory
"""
filepath = generate_filepath(filename, directory, date_dir)
if not filepath.endswith('.txt'):
filepath += '.txt'
try:
np.savetxt(filepath, data)
except OSError:
os.mkdir(directory)
np.savetxt(filepath, data)
except ValueError:
# TODO: Potentially incorporate with logger and except hook
pass
def save_metadata(log, filename=None, directory=None, date_dir=False):
""" Saves metadata stored in the logger
:param log: (LogClient)
:param dir: (str) directory to save to
:param filename: (str) name of file to save
:param date_dir: (bool) whether or not to use date sub-directory
"""
filepath = generate_filepath(f'{filename}_metadata', directory, date_dir)
if not filepath.endswith('.json'):
filepath += '.json'
try:
with open(filepath, 'w') as outfile:
json.dump(log.get_metadata(), outfile, indent=4)
except TypeError:
log.warn('Did not save metadata')
except OSError:
log.warn('Did not save metadata')
def plotly_figure_save(plotly_figure, filename=None, directory=None, date_dir=False):
""" Saves plotly_figure as png
:param dir: (str) directory to save to
:param filename: (str) name of file to save
:param date_dir: (bool) whether or not to use date sub-directory
"""
filepath = generate_filepath(filename, directory, date_dir)
plotly_figure.write_image(f'{filepath}.png')
def pyqtgraph_save(widget, filename=None, directory=None, date_dir=False):
""" Saves pyqtgraph figure to png
:param dir: (str) directory to save to
:param filename: (str) name of file to save
:param date_dir: (bool) whether or not to use date sub-directory
"""
filepath = generate_filepath(filename, directory, date_dir) + '.png'
exporter = pg.exporters.ImageExporter(widget)
exporter.export(filepath)
def load_config(config_filename, folder_root=None, logger=None):
""" Load configuration data stored in JSON format
:config_filename: (str) Name of config. file, wihtout the .json ending
:folder_root: (str) Name of folder where the config files are stored. If None,
use pylabnet/config
:logger: (object) Instance of logger.
Returns data as python dictionary, or None if
"""
filepath = get_config_filepath(config_filename, folder_root)
try:
# Opening JSON file
f = open(filepath)
# returns JSON object as
# a dictionary
data = json.load(f)
try:
logger.info(f'Successfully loaded settings from {config_filename}.json.')
# Dont raise error if logger doesn't exist
except AttributeError:
pass
except FileNotFoundError:
data = None
try:
logger.error(f'Settings file {filepath} not found.')
except AttributeError:
raise
return data
def get_config_directory():
""" Returns the config directory """
return os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'configs'
))
def load_device_config(device, config, logger=None):
""" Returns the device config directory
:param device: (str) name of the device folder
:param config: (str) name of the specific device config file
:param logger: instance of LogHandler
"""
filepath = os.path.join(get_config_directory(), 'devices', device, f'{config}.json')
try:
f = open(filepath)
# returns JSON object as
# a dictionary
data = json.load(f)
try:
logger.info(f'Successfully loaded settings from {config}.json.')
# Dont raise error if logger doesn't exist
except AttributeError:
pass
except FileNotFoundError:
data = None
try:
logger.error(f'Settings file {filepath} not found.')
except AttributeError:
raise
return data
def load_script_config(script, config, logger=None):
""" Returns the script config directory
:param script: (str) name of the script folder
:param config: (str) name of the specific script config file
:param logger: instance of LogHandler
"""
filepath = os.path.join(get_config_directory(), 'scripts', script, f'{config}.json')
try:
f = open(filepath)
# returns JSON object as
# a dictionary
data = json.load(f)
try:
logger.info(f'Successfully loaded settings from {config}.json.')
# Dont raise error if logger doesn't exist
except AttributeError:
pass
except FileNotFoundError:
data = None
try:
logger.error(f'Settings file {filepath} not found.')
except AttributeError:
raise
return data
def get_config_filepath(config_filename, folder_root=None):
""" Gets the config filepath
:param config_filename: (str) name of configuration file to save.
Can be an existing config file with other configuration parameters
:folder_root: (str) Name of folder where the config files are stored. If None,
use pylabnet/config
"""
if folder_root is None:
filepath = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'configs',
f'{config_filename}.json'
)
)
else:
filepath = os.path.join(folder_root, f'{config_filename}.json')
return filepath
def get_gui_widgets(gui, **kwargs):
""" Returns the GUI widget objects specified in kwargs
:param gui: (Window) main window gui object containing other widgets
:param kwargs: keyword arguments with argument name being the name
of the widget (str, widget_name) and argument value an integer specifying the
number of copies of that widget
For more than 1 widget copy, assumes the name is assigned as
widget_name_1, widget_name_2, etc.
:return: (dict) dictionary with keywords as widget name and values
as either individual widgets or list of widgets in case of multiple
similarly named widgets
"""
widgets = dict()
for widget_name, widget_number in kwargs.items():
# Check if it is multiple named widgets
if widget_number > 1:
widget_list = []
for widget_index in range(widget_number):
widget_list.append(getattr(
gui,
f'{widget_name}_{widget_index+1}'
))
widgets[widget_name] = widget_list
else:
widgets[widget_name] = getattr(gui, widget_name)
return widgets
def get_legend_from_graphics_view(legend_widget: pg.GraphicsView):
""" Configures and returns a legend widget given a GraphicsView
:param legend_widget: instance of GraphicsView object
:return: pg.LegendItem
"""
legend = pg.LegendItem()
view_box = pg.ViewBox()
legend_widget.setCentralWidget(view_box)
legend.setParentItem(view_box)
legend.anchor((0, 0), (0, 0))
return legend
def add_to_legend(legend: pg.LegendItem, curve: pg.PlotItem, curve_name):
""" Adds a curve to a legend
:param legend: pg.LegendItem to add to
:param curve: pg.PlotItem containing the relevant curve
:param curve_name: (str) name of curve
"""
legend.addItem(
curve,
' - ' + curve_name
)
def fill_2dlist(list_in):
""" Turns a 2D list of irregular dimensions into a 2D numpy array
Assuming only last dimension of list is incomplete
:param list_in: input list
:return: (numpy.ndarray) 2D array with missing elements padded as zeros
"""
list_manipulate = copy.deepcopy(list_in)
if len(list_in) > 1:
list_manipulate[-1] += [list_manipulate[-1][0]] * (len(list_manipulate[0]) - len(list_manipulate[-1]))
return np.array(list_manipulate)
def find_keys(input_dict, key_name):
"""Returns value of dictionary if key_name is either the key of the dict (normal dictionary lookup),
or an element of a key that is a tuple or list.
:input_dict: Dictionary to search.
:key_name: Key to lookup.
"""
found = []
for k, v in input_dict.items():
if type(k) in [list, tuple, dict] and key_name in k:
found.append(v)
elif key_name == k:
found.append(v)
return found
def find_client(clients, settings, client_type, client_config=None, logger=None):
""" Finds a particular client from client dictionary passed from launcher
:param clients: (dict) client dictionary
:param settings: (dict) configuration dictionary for script
:param client_type: (str) type of server (e.g. nidaqmx)
:param client_config: (str, optional) name of config file for specific device
only needed if multiple devices of the same type are used in this script
:param logger: (LogHandler)
"""
found_clients = find_keys(clients, client_type)
num_clients = len(found_clients)
# If no matched clients, log an error
if num_clients == 0:
logger.error(f'Client {client_type} could not be found')
# If > 1 matched clients, try to use the device config file to match
elif num_clients > 1:
if client_config is not None:
device_settings = load_device_config(client_type, client_config, logger)
# Search through clients using device IDs
found_clients = find_keys(clients, device_settings['device_id'])
# If single client, return, otherwise log error
num_clients = len(found_clients)
if num_clients == 1:
return found_clients[0]
elif num_clients == 0:
logger.error(f'Client ID {device_settings["device_id"]} not found')
else:
logger.error(f'Multiple clients with client ID {device_settings["device_id"]} found')
# If only 1 matched client, just return
else:
return found_clients[0]
def launch_device_server(server, dev_config, log_ip, log_port, server_port, debug=False, logger=None):
""" Launches a new device server
:param server: (str) name of the server. Should be the directory in which the
relevant server config file is located, and should have a corresponding
launch file server.py in pylabnet.launchers.servers
:param dev_config: (str) name of the config file for the server, which specifies
the device_id and also any SSH info
:param log_ip: (str) logger IP address
:param log_port: (int) logger port number
:param server_port: (int) port number of server to use
:param debug: (bool) whether or not to debug the server launching
:param logger: (LogHandler)
"""
# First load device config into dict
config_dict = load_device_config(server, dev_config)
if 'disabled' in config_dict and config_dict['disabled'] == 'True':
msg_str = f'Device {server} launching is disabled'
if logger is None:
print(msg_str)
else:
logger.error(msg_str)
return
# Check if we should SSH in
if 'ssh_config' in config_dict:
ssh = True
# Load SSH parameters
ssh_params = config_dict['ssh_config']
hostname = ssh_params['hostname']
host_ip = ssh_params['ip']
# SSH in
ssh = paramiko.SSHClient()
ssh.load_system_host_keys()
try:
ssh.connect(host_ip, username=hostname, password=decouple.config('LOCALHOST_PW'))
msg_str = f'Successfully connected via SSH to {hostname}@{host_ip}'
if logger is None:
print(msg_str)
else:
logger.info(msg_str)
except TimeoutError:
msg_str = f'Failed to setup SSH connection to {hostname}@{host_ip}'
if logger is None:
print(msg_str)
else:
logger.error(msg_str)
# Set command arguments
python_path = ssh_params['python_path']
launch_path = ssh_params['script_path']
start = ""
# Kill processes if required
if 'kill_all' in ssh_params and ssh_params['kill_all'] == "True":
msg_str = f'Killing all python processes on {hostname}@{host_ip}'
if logger is None:
print(msg_str)
else:
logger.warn(msg_str)
ssh.exec_command('taskkill /F /IM python.exe /T')
else:
ssh = False
start = f'start "{server}_server, '
start += time.strftime("%Y-%m-%d, %H:%M:%S", time.gmtime())
start += '" '
host_ip = get_ip()
python_path = sys.executable
launch_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'launchers',
'pylabnet_server.py'
)
if server_port is None:
server_port = np.random.randint(1024, 49151)
# Build command()
operating_system = get_os()
if operating_system == 'Windows':
cmd = f'{start}"{python_path}" "{launch_path}" '
elif operating_system == 'Linux':
cmd = f'{python_path} {launch_path} '
else:
raise UnsupportedOSException
cmd += f'--logip {log_ip} --logport {log_port} '
cmd += f'--serverport {server_port} --server {server} '
cmd += f'--device_id "{config_dict["device_id"]}" '
cmd += f'--config {dev_config} --debug {debug}'
if len(cmd) > 8191:
if logger is not None:
logger.error('Cmd too long! Server will not instantiate!')
return
else:
if logger is not None:
logger.info("Cmd len: " + str(len(cmd)))
if ssh:
msg_str = f'Executing command on {hostname}:\n{cmd}'
if logger is None:
print(msg_str)
else:
logger.info(msg_str)
ssh.exec_command(cmd)
else:
subprocess.Popen(cmd, shell=True)
return host_ip, server_port
def launch_script(script, config, log_ip, log_port, debug_flag, server_debug_flag, num_clients, client_cmd, logger=None):
""" Launches a script
:param script: (str) name of the script. Should be the directory in which the
relevant script config file is located
:param config: (str) name of the config file for the script, which specifies
the device server info and script launching directory (and also script
parameters, if desired)
:param log_ip: (str) logger IP address
:param log_port: (int) logger port number
:param debug_flag: (bool) whether or not to debug the script/launcher
:param server_debug_flag: (bool) whether or not to debug on the
server-launching level
:param num_clients: (int) total number of clients to the log server
:param client_cmd: (str) a series of commandline arguments specifying the
client dictionary details for passing to the launcher
"""
launch_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'launchers',
'launcher.py'
)
# Build command
operating_system = get_os()
if operating_system == 'Windows':
cmd = f'start "{script}_server, '
cmd += time.strftime("%Y-%m-%d, %H:%M:%S", time.gmtime())
cmd += f'" "{sys.executable}" "{launch_path}" '
elif operating_system == 'Linux':
cmd = f'{sys.executable} {launch_path} '
else:
raise UnsupportedOSException
cmd += f'--logip {log_ip} --logport {log_port} '
cmd += f'--script {script} --num_clients {num_clients} '
cmd += f'--config {config} --debug {debug_flag} '
cmd += f'--server_debug {server_debug_flag}'
cmd += client_cmd
if len(cmd) > 8191:
if logger is not None:
logger.error('Cmd too long! Server will not instantiate!')
return
else:
if logger is not None:
logger.info("Cmd len: " + str(len(cmd)))
subprocess.Popen(cmd, shell=True)
def get_ip():
""" Returns a primary IP address
:network_interface: (str) Used for Linux compatibility. Network interface of target IP address.
Can be found out by running ifconfig.
"""
operating_system = get_os()
if operating_system == 'Linux':
import netifaces as ni
if operating_system == 'Windows':
# Retrieve subnet from config dict
try:
subnet = load_config('network_configuration')['subnet']
except:
subnet = '140'
ip_list = socket.gethostbyname_ex(socket.gethostname())[2]
if len(ip_list) == 1:
return ip_list[0]
else:
filtered_ip = [ip for ip in ip_list if ip.startswith(subnet)]
if len(filtered_ip) == 0:
return ip_list[0]
else:
return filtered_ip[0]
elif operating_system == 'Linux':
try:
network_interface = load_config('network_config')['network_interface']
except AttributeError:
return socket.gethostbyname(socket.gethostname())
import netifaces as ni
ip = ni.ifaddresses(network_interface)[ni.AF_INET][0]['addr']
return ip
def HDAWG_to_breakout_box(pin):
if pin < 8 or (pin < 24 and pin >= 16):
print("these pins are not mapped to the dio breakout box")
return None
else:
if int(np.floor(pin / 4)) == 2:
board = 0
if int(np.floor(pin / 4)) == 3:
board = 1
if int(np.floor(pin / 4)) == 6:
board = 2
if int(np.floor(pin / 4)) == 7:
board = 3
channel = np.mod(pin, 4)
return board, channel
def breakout_box_to_HDAWG(board, channel):
if board > 4 or channel > 4:
print("non existing board or channel for dio breakout box")
return None
else:
if board == 0:
pin = 8
if board == 1:
pin = 12
if board == 2:
pin = 24
if board == 3:
pin = 28
pin = pin + channel
return pin
def get_os():
"""Read out operating system"""
pf = platform.system()
if pf == 'Linux':
operating_system = 'Linux'
elif pf == 'Windows':
operating_system = 'Windows'
elif pf == "Darwin":
operating_system = 'mac_os'
else:
operating_system = pf
return operating_system
def set_graph_background(widget):
""" Sets the background color for pyqtgraph related widgets to pylabnet style
:param widget: base graph or legend widget
"""
try:
widget.getViewBox().setBackgroundColor('#19232D')
# In case this widget does ont have a parent viewBox
except AttributeError:
pass
try:
widget.setBackground('#19232D')
# In case this widget does ont have a parent viewBox
except AttributeError:
pass
class UnsupportedOSException(Exception):
"""Raised when the operating system is not supported."""
|
the-stack_0_25575
|
#
# File that determines what each URL points to. This uses _Python_ regular
# expressions, not Perl's.
#
# See:
# http://diveintopython.org/regular_expressions/street_addresses.html#re.matching.2.3
#
from django.conf.urls import url
from .views import (
topic,
list_commands,
list_topics,
list_recipes,
display_org,
command_help,
lore_categories,
)
urlpatterns = [
url(r"^recipes/", list_recipes, name="list_recipes"),
url(r"^org/(?P<object_id>[\w\s]+)/$", display_org, name="display_org"),
url(r"^commands/(?P<cmd_key>[\+@\_\w\s]+)/$", command_help, name="command_help"),
url(r"^commands/$", list_commands, name="list_commands"),
url(r"^(?P<object_key>[\w\s]+)/$", topic, name="topic"),
url(r"^$", list_topics, name="list_topics"),
url(r"^lore/(?P<object_id>[\w\s]+)/$", lore_categories, name="lore"),
]
|
the-stack_0_25576
|
# Copyright 2018 Martin Haesemeyer. All rights reserved.
#
# Licensed under the MIT license
"""
Script for quantitative correspondence between zebrafish and neural network activity clusters
"""
import numpy as np
from scipy.signal import convolve
import matplotlib.pyplot as pl
import matplotlib as mpl
import seaborn as sns
import h5py
from typing import Dict
import pickle
import os
from global_defs import GlobalDefs
import analysis as a
import core as c
from mo_types import MoTypes
from Figure4 import mpath
class RegionResults:
"""
This is an exact copy from analyzeSensMotor.py of ImagingAnalysis
"""
def __init__(self, name, activities, membership, regressors, original_labels, region_indices):
self.name = name
self.region_acts = activities
self.region_mem = membership
self.regressors = regressors
self.regs_clust_labels = original_labels
self.region_indices = region_indices
self.full_averages = None
# file definitions
base_path = "./model_data/Adam_1e-4/sepInput_mixTrain/"
paths_512 = [f+'/' for f in os.listdir(base_path) if "_3m512_" in f]
def create_corr_mat(network, zfish, times, start_time, end_time=None):
corr_mat = np.full((network.shape[1], zfish.shape[1]), np.nan)
if end_time is None or end_time < start_time:
valid = times >= start_time
else:
valid = np.logical_and(times <= end_time, times >= start_time)
for n in range(corr_mat.shape[0]):
net_act = network[valid, n]
for z in range(corr_mat.shape[1]):
zf_reg = zfish[valid, z]
corr_mat[n, z] = np.corrcoef(net_act, zf_reg)[0, 1]
return corr_mat
def greedy_max_clust(corr_mat, threshold, col_names):
"""
Tries to find best correlated row above threshold for each column giving preference to making a match
for each column even if this requires picking a worse match in another column
:param corr_mat: The pairwise correlations
:param threshold: The minimal correlation to consider
:param col_names: The names of the columns
:return: Dictionary with rows as keys and matched column names as values
"""
col_matches = np.full(corr_mat.shape[1], -2)
work_mat = corr_mat.copy()
work_mat[corr_mat < threshold] = 0
first_run = True
while np.any(col_matches == -2):
for col in range(corr_mat.shape[1]):
if col_matches[col] > -2:
continue
if np.all(work_mat[:, col] == 0):
# no possible assignment - mark as completed but un-assigned
col_matches[col] = -1
continue
if np.sum(work_mat[:, col] > 0) == 1:
# if this is the only choice, assign and mark that row as used
col_matches[col] = np.argmax(work_mat[:, col])
work_mat[col_matches[col], :] = 0
continue
if not first_run:
col_matches[col] = np.argmax(work_mat[:, col])
work_mat[col_matches[col], :] = 0
# indicate that all "loners" have already been assigned
first_run = False
# recode column matches into row matches
row_matches = np.full(corr_mat.shape[0], -1)
for ix, cm in enumerate(col_matches):
if cm < 0:
continue
row_matches[cm] = ix
return {ix: col_names[row_matches[ix]] if row_matches[ix] != -1 else ix for ix in range(corr_mat.shape[0])}
if __name__ == "__main__":
save_folder = "./DataFigures/ZF_ANN_Correspondence/"
if not os.path.exists(save_folder):
os.makedirs(save_folder)
sns.reset_orig()
mpl.rcParams['pdf.fonttype'] = 42
mo = MoTypes(False)
std = c.GradientData.load_standards("gd_training_data.hdf5")
ana = a.Analyzer(mo, std, "sim_store.hdf5", "activity_store.hdf5")
# load zebrafish region results and create Rh56 regressor matrix for FastON, SlowON, FastOFF, SlowOFF
result_labels = ["Rh6"]
region_results = {} # type: Dict[str, RegionResults]
analysis_file = h5py.File('regiondata.hdf5', 'r')
for rl in result_labels:
region_results[rl] = pickle.loads(np.array(analysis_file[rl]))
analysis_file.close()
rh_56_calcium = region_results["Rh6"].regressors[:, :-1]
# the names of these regressors according to Haesemeyer et al., 2018
reg_names = ["Fast ON", "Slow ON", "Fast OFF", "Slow OFF"]
# load and interpolate temperature stimulus
dfile = h5py.File("stimFile.hdf5", 'r')
tsin = np.array(dfile['sine_L_H_temp'])
x = np.arange(tsin.size) # stored at 20 Hz !
xinterp = np.linspace(0, tsin.size, tsin.size * GlobalDefs.frame_rate // 20)
temperature = np.interp(xinterp, x, tsin)
dfile.close()
# get activity data
all_cells = []
all_ids = []
for i, p in enumerate(paths_512):
cell_res, ids = ana.temperature_activity(mpath(base_path, p), temperature, i)
all_cells.append(cell_res)
all_ids.append(ids)
all_cells = np.hstack(all_cells)
all_ids = np.hstack(all_ids)
# convolve activity with nuclear gcamp calcium kernel
tau_on = 1.4 # seconds
tau_on *= GlobalDefs.frame_rate # in frames
tau_off = 2 # seconds
tau_off *= GlobalDefs.frame_rate # in frames
kframes = np.arange(10 * GlobalDefs.frame_rate) # 10 s long kernel
kernel = 2 ** (-kframes / tau_off) * (1 - 2 ** (-kframes / tau_on))
kernel = kernel / kernel.sum()
# convolve with our kernel
for i in range(all_cells.shape[1]):
all_cells[:, i] = convolve(all_cells[:, i], kernel, mode='full')[:all_cells.shape[0]]
# load activity clusters from file or create if necessary
clust_ids = a.cluster_activity(8, all_cells, "cluster_info.hdf5")[0]
# create ANN cluster centroid matrix
ann_cluster_centroids = np.zeros((all_cells.shape[0]//3, 8))
for i in range(8):
centroid = np.mean(all_cells[:, clust_ids == i], 1)
ann_cluster_centroids[:, i] = a.trial_average(centroid[:, None], 3).ravel()
# interpolate fish calcium data to network time base
ca_time = np.linspace(0, 165, rh_56_calcium.shape[0])
net_time = np.linspace(0, 165, ann_cluster_centroids.shape[0])
zf_cluster_centroids = np.zeros((net_time.size, rh_56_calcium.shape[1]))
for i in range(rh_56_calcium.shape[1]):
zf_cluster_centroids[:, i] = np.interp(net_time, ca_time, rh_56_calcium[:, i])
# perform all pairwise correlations between the network and zebrafish units during sine stimulus phase
cm_sine = create_corr_mat(ann_cluster_centroids, zf_cluster_centroids, net_time, 60, 105)
assignment = greedy_max_clust(cm_sine, 0.6, reg_names)
assign_labels = [assignment[k] for k in range(cm_sine.shape[0])]
# plot correlation matrix
fig, ax = pl.subplots()
sns.heatmap(cm_sine, vmin=-1, vmax=1, center=0, annot=True, xticklabels=reg_names, yticklabels=assign_labels, ax=ax,
cmap="RdBu_r")
ax.set_xlabel("Zebrafish cell types")
ax.set_ylabel("ANN clusters")
fig.tight_layout()
fig.savefig(save_folder + "ZFish_ANN_Correspondence.pdf", type="pdf")
|
the-stack_0_25578
|
import os
import numpy as np
import time
import torch
from utilities import pad_truncate_sequence
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def forward(model, x, batch_size):
"""Forward data to model in mini-batch.
Args:
model: object
x: (N, segment_samples)
batch_size: int
Returns:
output_dict: dict, e.g. {
'frame_output': (segments_num, frames_num, classes_num),
'onset_output': (segments_num, frames_num, classes_num),
...}
"""
output_dict = {}
device = next(model.parameters()).device
pointer = 0
total_segments = int(np.ceil(len(x) / batch_size))
while True:
print('Segment {} / {}'.format(pointer, total_segments))
if pointer >= len(x):
break
batch_waveform = move_data_to_device(x[pointer : pointer + batch_size], device)
pointer += batch_size
with torch.no_grad():
model.eval()
batch_output_dict = model(batch_waveform)
for key in batch_output_dict.keys():
append_to_dict(output_dict, key, batch_output_dict[key].data.cpu().numpy())
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict
|
the-stack_0_25579
|
# imports
# import splinter, beautifulsoup and chrome driver
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
import datetime as dt
from webdriver_manager.chrome import ChromeDriverManager
# scrape all function
def scrape_all():
#set up splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
#goal is to return json with all the data, so it can be loaded into MongoDB
#get info from the news page
news_title, news_p = scrape_news(browser)
# build dictionary using info from scrapes
marsData = {
"newsTitle": news_title,
"newsParagraph": news_p,
"featuredImage": scrape_feature_img(browser),
"facts": scrape_facts_page(browser),
"hemispheres": scrape_hemispheres(browser),
"lastUpdated": dt.datetime.now()
}
# stop webdriver
browser.quit()
#display output
return marsData
# scrape mars news page
def scrape_news(browser):
# go to Mars Nasa news site
url = 'https://redplanetscience.com/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# convert browser html into soup object
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
# grab title
news_title = slide_elem.find('div', class_='content_title').get_text()
# grab paragraph
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
#return title and news paragraph
return news_title, news_p
# scrape through featured image page
def scrape_feature_img(browser):
#visit url
url = 'https://spaceimages-mars.com/'
browser.visit(url)
# find full image button
full_image_link = browser.find_by_tag('button')[1]
full_image_link.click()
#parse resulting html with soup
html = browser.html
image_soup = soup(html, 'html.parser')
#find image url
img_url_rel = image_soup.find('img', class_='fancybox-image').get('src')
#use base url to create absolute url
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
#return image url
return img_url
# scrape through facts page
def scrape_facts_page(browser):
url = 'https://galaxyfacts-mars.com/'
#browser.visit(url)
tb=pd.read_html(url)
table=tb[1]
table.to_html()
html = browser.html
fact_soup = soup(html, 'html.parser')
#facts location
#factsLocation = fact_soup.find('div', class_="diamgram mt-4")
#factsTable = factsLocation.find('table') # grab the html code for table
#create empty string
facts = ""
#add text to empty string then return
#facts += str(factsTable)
return facts
# scrape through hemispheres pages
def scrape_hemispheres(browser):
#base url
url = 'https://marshemispheres.com/'
browser.visit(url)
hemisphere_image_urls = []
#set up loop
for i in range(4):
#loops through each page
#hemisphere info dictionary
hemisphereInfo = {}
# We have to find the elements on each loop to avoid a stale element exception
browser.find_by_css('a.product-item img')[i].click()
# Next, we find the Sample image anchor tag and extract the href
sample = browser.links.find_by_text('Sample').first
hemisphereInfo["img_url"] = sample['href']
# Get Hemisphere title
hemisphereInfo['title'] = browser.find_by_css('h2.title').text
# Append hemisphere object to list
hemisphere_image_urls.append(hemisphereInfo)
# Finally, we navigate backwards
browser.back()
#return hemisphere urls with titles
return hemisphere_image_urls
# set up as flask app
if __name__ == "__main__":
print(scrape_all())
|
the-stack_0_25582
|
"""
==========================================
ORB feature detector and binary descriptor
==========================================
This example demonstrates the ORB feature detection and binary description
algorithm. It uses an oriented FAST detection method and the rotated BRIEF
descriptors.
Unlike BRIEF, ORB is comparatively scale- and rotation-invariant while still
employing the very efficient Hamming distance metric for matching. As such, it
is preferred for real-time applications.
"""
from skimage import data
from skimage import transform as tf
from skimage.feature import (match_descriptors, corner_harris,
corner_peaks, ORB, plot_matches)
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
img1 = rgb2gray(data.lena())
img2 = tf.rotate(img1, 180)
tform = tf.AffineTransform(scale=(1.3, 1.1), rotation=0.5,
translation=(0, -200))
img3 = tf.warp(img1, tform)
descriptor_extractor = ORB(n_keypoints=200)
descriptor_extractor.detect_and_extract(img1)
keypoints1 = descriptor_extractor.keypoints
descriptors1 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img2)
keypoints2 = descriptor_extractor.keypoints
descriptors2 = descriptor_extractor.descriptors
descriptor_extractor.detect_and_extract(img3)
keypoints3 = descriptor_extractor.keypoints
descriptors3 = descriptor_extractor.descriptors
matches12 = match_descriptors(descriptors1, descriptors2, cross_check=True)
matches13 = match_descriptors(descriptors1, descriptors3, cross_check=True)
fig, ax = plt.subplots(nrows=2, ncols=1)
plt.gray()
plot_matches(ax[0], img1, img2, keypoints1, keypoints2, matches12)
ax[0].axis('off')
plot_matches(ax[1], img1, img3, keypoints1, keypoints3, matches13)
ax[1].axis('off')
plt.show()
|
the-stack_0_25583
|
import torch
import matplotlib.pyplot as plt
import numpy as np
from src.utils import bmtm, bmtv, bmmt, bbmv
from src.lie_algebra import SO3
class BaseNet(torch.nn.Module):
def __init__(self, in_dim, out_dim, c0, dropout, ks, ds, momentum):
super().__init__()
self.in_dim = in_dim
self.out_dim = out_dim
# channel dimension
c1 = 2*c0
c2 = 2*c1
c3 = 2*c2
# kernel dimension (odd number)
k0 = ks[0]
k1 = ks[1]
k2 = ks[2]
k3 = ks[3]
# dilation dimension
d0 = ds[0]
d1 = ds[1]
d2 = ds[2]
# padding
p0 = (k0-1) + d0*(k1-1) + d0*d1*(k2-1) + d0*d1*d2*(k3-1)#510,make up for dilation in NN
# nets
self.cnn = torch.nn.Sequential(
torch.nn.ReplicationPad1d((p0, 0)), # padding at start
torch.nn.Conv1d(in_dim, c0, k0, dilation=1),
torch.nn.BatchNorm1d(c0, momentum=momentum),
torch.nn.GELU(),
torch.nn.Dropout(dropout),
torch.nn.Conv1d(c0, c1, k1, dilation=d0),
torch.nn.BatchNorm1d(c1, momentum=momentum),
torch.nn.GELU(),
torch.nn.Dropout(dropout),
torch.nn.Conv1d(c1, c2, k2, dilation=d0*d1),
torch.nn.BatchNorm1d(c2, momentum=momentum),
torch.nn.GELU(),
torch.nn.Dropout(dropout),
torch.nn.Conv1d(c2, c3, k3, dilation=d0*d1*d2),
torch.nn.BatchNorm1d(c3, momentum=momentum),
torch.nn.GELU(),
torch.nn.Dropout(dropout),
torch.nn.Conv1d(c3, out_dim, 1, dilation=1),
torch.nn.ReplicationPad1d((0, 0)), # no padding at end
)
# for normalizing inputs
self.mean_u = torch.nn.Parameter(torch.zeros(in_dim),
requires_grad=False)
self.std_u = torch.nn.Parameter(torch.ones(in_dim),
requires_grad=False)
def forward(self, us):
u = self.norm(us).transpose(1, 2)
y = self.cnn(u)
return y
def norm(self, us):
return (us-self.mean_u)/self.std_u
def set_normalized_factors(self, mean_u, std_u):
self.mean_u = torch.nn.Parameter(mean_u.cuda(), requires_grad=False)
self.std_u = torch.nn.Parameter(std_u.cuda(), requires_grad=False)
class GyroNet(BaseNet):
def __init__(self, in_dim, out_dim, c0, dropout, ks, ds, momentum,
gyro_std):
super().__init__(in_dim, out_dim, c0, dropout, ks, ds, momentum)
gyro_std = torch.Tensor(gyro_std)
self.gyro_std = torch.nn.Parameter(gyro_std, requires_grad=False)
gyro_Rot = 0.05*torch.randn(3, 3).cuda()
self.gyro_Rot = torch.nn.Parameter(gyro_Rot)# !! C, and set it as parameter to optimize?
self.Id3 = torch.eye(3).cuda()
def forward(self, us):
ys = super().forward(us)
Rots = (self.Id3 + self.gyro_Rot).expand(us.shape[0], us.shape[1], 3, 3) # random noise on Rot
Rot_us = bbmv(Rots, us[:, :, :3])
return self.gyro_std*ys.transpose(1, 2) + Rot_us #fomula 5,(3,1)*()???
|
the-stack_0_25589
|
# 训练
from datetime import datetime
import os
import pickle
import numpy as np
import config
import torch
from torch import optim,nn
from process.dataloader_classify import train_loader,train_data
from process.dataloader_classify import test_loader,test_data
from model.C3D_denseNeXt import DenseNet
from tools.common_tools import show_confMat,plot_line,ModelTrainer
# 由于训练不能一次性完成,所以选择加载模型文件的方式分步训练
if __name__ == '__main__':
# ===========================
mat_list_ = []
loss_rec = {"train": [], "valid": []}
acc_rec = {"train": [], "valid": []}
with open(r'D:\modelling/files/acc_classify_.json', 'rb') as file1:
acc_rec = pickle.load(file1)
with open(r'D:\modelling/files/loss_classify_.json', 'rb') as file2:
loss_rec = pickle.load(file2)
with open(r'D:\modelling/files/mat_classify_.json', 'rb') as file3:
mat_list_ = pickle.load(file3)
# ============================
model = DenseNet(n_input_channels=1, num_init_features=64,
growth_rate=32,
block_config=(3, 6, 12, 8), num_classes=4).to(config.device)
# adam优化器加上动量吃显存 densenet 本身就吃显存
# 在前几个训练过程中观察到好像有振荡的行为
optimizer = optim.SGD(params=model.parameters(), lr=config.lr, momentum=0.9, weight_decay=1e-4) # 选择优化器
# optimizer = optim.Adam(model.parameters(), lr=config.lr,weight_decay=1e-4)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, gamma=0.1, milestones=config.milestones)
model.load_state_dict(torch.load('D:\modelling/checkpoint/model23.pkl'))
optimizer.load_state_dict(torch.load('D:\modelling/checkpoint/optimizer23.pkl'))
# todo 训练的时候不一定加载最好的,但是测试要加载最好的
# todo 我们加载进入模型的那几个(字典)列表可能有重复
# todo 中间略过了个回合(训练时,先一个回合,中午6个回合,下午两个回合,这两个回合相当于中午那三个567回合白干了)对应于列表里的456到时候删除即可
# todo 发现准确率在之前总是忽高忽低,这可能是由于我们学习率设置太大的缘故
# todo 一般batch_size大的话,学习率才设置大一些
# 1702 777 1150 855 这个是平衡比例后的权重设计(0.38 + 0.17 + 0.26 + 0.19)
# 为了增大红色预警召回率,增大后面。
loss_function = torch.nn.CrossEntropyLoss(weight=torch.from_numpy(np.array([0.19,0.17,0.26,0.19 * 2])).float())# 在这里加上weight
loss_function.to(config.device)
modelName = 'DenseNet' # Options: C3D or R2Plus1D or R3D
# print("Device being used:", config.device)
# TODO 需要修改的地方就是加载的模型 每运行一次都要进行修改 还有train/epoch
best_acc = max(acc_rec["valid"])
best_epoch = 0
# 配置超参数
num_classes = 4
MAX_EPOCH = 300 # 参考论文中 4.2 Training
BATCH_SIZE = 64 # 参考论文中 4.2 Training
log_interval = 1
val_interval = 1
start_epoch = -1
class_names = ("none","yellow","orange","red")
now_time = datetime.now()
time_str = datetime.strftime(now_time, '%m-%d_%H-%M')
log_dir = os.path.join(config.current_file, "results", time_str)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# print(log_dir)
for epoch in range(24, 30):
# 走过多少个epoch,就从几开始,接下来就是7
# 训练(data_loader, model, loss_f, optimizer, epoch_id, device, max_epoch)
loss_train, acc_train, mat_train = ModelTrainer.train(train_loader, model, loss_function, optimizer, epoch, config.device, MAX_EPOCH)
loss_valid, acc_valid, mat_valid = ModelTrainer.valid(test_loader, model, loss_function, config.device)
# 统计
mat_list_.append((mat_train,mat_valid))
# 统计用于绘图
print("Epoch[{:0>3}/{:0>3}] Train Acc: {:.2%} Valid Acc:{:.2%} Train loss:{:.4f} Valid loss:{:.4f} LR:{}".format(
epoch + 1, MAX_EPOCH, acc_train, acc_valid, loss_train, loss_valid, optimizer.param_groups[0]["lr"]))
# 看一下学习率是多少
# optimizer.param_groups[0]:长度为6的字典,包括[‘amsgrad’, ‘params’, ‘lr’, ‘betas’, ‘weight_decay’, ‘eps’]这6个参数
scheduler.step() # 更新学习率
# 绘图
loss_rec["train"].append(loss_train), loss_rec["valid"].append(loss_valid)
acc_rec["train"].append(acc_train), acc_rec["valid"].append(acc_valid)
# show_confMat(mat_train, class_names, "train", log_dir, verbose=epoch == MAX_EPOCH-1)
# show_confMat(mat_valid, class_names, "valid", log_dir, verbose=epoch == MAX_EPOCH-1)
#
# plt_x = np.arange(1, epoch+2)
# plot_line(plt_x, loss_rec["train"], plt_x, loss_rec["valid"], mode="loss", out_dir=log_dir)
# plot_line(plt_x, acc_rec["train"], plt_x, acc_rec["valid"], mode="acc", out_dir=log_dir)
if (epoch % 1 == 0) or (best_acc < max(acc_rec["valid"])):
best_acc = max(acc_rec["valid"])
checkpoint = {"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"epoch": epoch,
"best_acc": best_acc}
path_checkpoint = os.path.join(log_dir, "checkpoint_best{}.pkl".format(epoch))
torch.save(checkpoint, path_checkpoint)
torch.save(model.state_dict(), "D:\modelling/checkpoint/model{}.pkl".format(epoch))
torch.save(optimizer.state_dict(),"D:\modelling/checkpoint/optimizer{}.pkl".format(epoch))
print(" done ~~~~ {}, best acc: {} in :{} epochs. ".format(datetime.strftime(datetime.now(), '%m-%d_%H-%M'),
best_acc, best_epoch))
now_time = datetime.now()
time_str = datetime.strftime(now_time, '%m-%d_%H-%M')
print(time_str)
# 考虑如何执行这段代码
# 1:首先不要设置过多的epoch,训练几个回合
# 2:训练完成以后,保存loss和acc
# 3:在下次训练回合之前,读入之前保存的loss,acc,并且读入pkl文件
with open(r'D:\modelling/files/acc_classify_.json','wb') as file1:
pickle.dump(acc_rec,file=file1) # wb 覆盖原先文件
with open(r'D:\modelling/files/loss_classify_.json', 'wb') as file2:
pickle.dump(loss_rec, file=file2)
with open(r'D:\modelling/files/mat_classify_.json', 'wb') as file3:
pickle.dump(mat_list_, file=file3)
# todo 建立一个map字典 以表示对应关系
# {'label_1': 0, 'label_2': 1, 'label_3': 2, 'label_4': 3}
# ##########################################################
# 之前我们选择的十分类模型其参数已经基本上训练的很好了,如何才能迁移到新的分类模型上呢?
# 在这里保持我们的卷积层参数不变 其线性层发生了较大的变化 希望模型训练起来比较容易一点
# model_dict = model.state_dict() # 这里的 model 是我们新训练的模型,其参数仅仅是何凯明参数
# pretrained_dict = torch.load('D://modelling//checkpoint_classify//previeus//model32.pkl')
# # 这是导入我们之前10分类模型的参数 这里的参数跟新模型有所不同 我们只保留卷积参数
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in ['conv1.weight', 'conv1.bias', 'conv2.weight', 'conv2.bias', 'conv3.weight', 'conv3.bias', 'conv4a.weight', 'conv4a.bias', 'conv4b.weight', 'conv4b.bias', 'conv5a.weight', 'conv5a.bias', 'conv5b.weight', 'conv5b.bias', 'conv6a.weight', 'conv6a.bias', 'conv6b.weight', 'conv6b.bias']}
# # 这一步是筛选
# model_dict.update(pretrained_dict) # 更新新模型卷积层的参数
# model.load_state_dict(model_dict)
# 运行一次以后 就消掉 因为有更好的保存下来
# ##########################################################
|
the-stack_0_25590
|
from enum import Enum, auto
class Colors:
"""Hex Color values"""
Error = 0xE20000
ClemsonOrange = 0xF56600
class DesignatedChannels(Enum):
"""Enum that defines possible designated channels for the bot to use"""
message_log = auto()
moderation_log = auto()
error_log = auto()
startup_log = auto()
user_join_log = auto()
class DiscordLimits:
MessageLength = 1900
|
the-stack_0_25591
|
"""yatube URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.flatpages import views
from django.conf.urls.static import static
from django.conf.urls import handler404, handler500
from . import settings
urlpatterns = [
path("about/", include("django.contrib.flatpages.urls")),
path("auth/", include("users.urls")),
path("auth/", include("django.contrib.auth.urls")),
path("admin/", admin.site.urls),
path('about-us/', views.flatpage, {'url': '/about-us/'}, name='about'),
path('terms/', views.flatpage, {'url': '/terms/'}, name='terms'),
path('about-author/', views.flatpage, {'url': '/about-author/'}, name='about-author'),
path('about-spec/', views.flatpage, {'url': '/about-spec/'}, name='about-spec'),
path("", include("posts.urls")),
]
handler404 = "posts.views.page_not_found" # noqa
handler500 = "posts.views.server_error" # noqa
if settings.DEBUG:
import debug_toolbar
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += (path("__debug__/", include(debug_toolbar.urls)),)
|
the-stack_0_25594
|
import json
import uuid
from django.db.models import Q
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes, action
from django.http import Http404
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.filters import SearchFilter
from rest_framework.generics import get_object_or_404
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.viewsets import ModelViewSet
from rest_framework_csv import renderers
from profiles.models import Organization, Membership
from tasks.models import Task
from api.serializers.submissions import SubmissionCreationSerializer, SubmissionSerializer, SubmissionFilesSerializer
from competitions.models import Submission, Phase, CompetitionParticipant
from leaderboards.strategies import put_on_leaderboard_by_submission_rule
from leaderboards.models import SubmissionScore, Column, Leaderboard
class SubmissionViewSet(ModelViewSet):
queryset = Submission.objects.all().order_by('-pk')
permission_classes = []
filter_backends = (DjangoFilterBackend, SearchFilter)
filter_fields = ('phase__competition', 'phase', 'status')
search_fields = ('data__data_file', 'description', 'name', 'owner__username')
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [renderers.CSVRenderer]
def check_object_permissions(self, request, obj):
if self.action in ['submission_leaderboard_connection']:
if obj.is_specific_task_re_run:
raise PermissionDenied("Cannot add task-specific submission re-runs to leaderboards.")
return
if self.request and self.request.method in ('POST', 'PUT', 'PATCH'):
not_bot_user = self.request.user.is_authenticated and not self.request.user.is_bot
if self.action in ['update_fact_sheet', 're_run_submission']:
# get_queryset will stop us from re-running something we're not supposed to
pass
elif not self.request.user.is_authenticated or not_bot_user:
try:
if request.data.get('secret') is None or uuid.UUID(request.data.get('secret')) != obj.secret:
raise PermissionDenied("Submission secrets do not match")
except TypeError:
raise ValidationError(f"Secret: ({request.data.get('secret')}) not a valid UUID")
def get_serializer_class(self):
if self.request and self.request.method in ('POST', 'PUT', 'PATCH'):
return SubmissionCreationSerializer
else:
return SubmissionSerializer
def get_queryset(self):
# On GETs lets optimize the query to reduce DB calls
qs = super().get_queryset()
if self.request.method == 'GET':
if not self.request.user.is_authenticated:
return Submission.objects.none()
if not self.request.user.is_superuser and not self.request.user.is_staff and not self.request.user.is_bot:
# if you're the creator of the submission or a collaborator on the competition
qs = qs.filter(
Q(owner=self.request.user) |
Q(phase__competition__created_by=self.request.user) |
Q(phase__competition__collaborators__in=[self.request.user.pk])
)
qs = qs.select_related(
'phase',
'phase__competition',
'participant',
'participant__user',
'owner',
'data',
).prefetch_related(
'children',
'scores',
'scores__column',
'task',
)
elif self.action in ['delete_many', 're_run_many_submissions']:
try:
pks = list(self.request.data)
except TypeError as err:
raise ValidationError(f'Error {err}')
qs = qs.filter(pk__in=pks)
if not self.request.user.is_superuser and not self.request.user.is_staff:
if qs.filter(
Q(owner=self.request.user) |
Q(phase__competition__created_by=self.request.user) |
Q(phase__competition__collaborators__in=[self.request.user.pk])
) is not qs:
ValidationError("Request Contained Submissions you don't have authorization for")
if self.action in ['re_run_many_submissions']:
print(f'debug {qs}')
print(f'debug {qs.first().status}')
qs = qs.filter(status__in=[Submission.FINISHED, Submission.FAILED, Submission.CANCELLED])
print(f'debug {qs}')
return qs
def create(self, request, *args, **kwargs):
if 'organization' in request.data and request.data['organization'] is not None:
organization = get_object_or_404(Organization, pk=request.data['organization'])
try:
membership = organization.membership_set.get(user=request.user)
except Membership.DoesNotExist:
raise ValidationError('You must be apart of a organization to submit for them')
if membership.group not in Membership.PARTICIPANT_GROUP:
raise ValidationError('You do not have participant permissions for this group')
return super(SubmissionViewSet, self).create(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
submission = self.get_object()
if request.user != submission.owner and not self.has_admin_permission(request.user, submission):
raise PermissionDenied("Cannot interact with submission you did not make")
self.perform_destroy(submission)
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=False, methods=('DELETE',))
def delete_many(self, request, *args, **kwargs):
qs = self.get_queryset()
if not qs:
return Response({'Submission search returned empty'}, status=status.HTTP_404_NOT_FOUND)
qs.delete()
return Response({})
def get_renderer_context(self):
"""We override this to pass some context to the CSV renderer"""
context = super().get_renderer_context()
# The CSV renderer will only include these fields in context["header"]
# Human names for the fields
context["labels"] = {
'owner': 'Owner',
'created_when': 'Created When',
'status': 'Status',
'phase_name': 'Phase',
}
context["header"] = [k for k in context["labels"].keys()]
return context
def has_admin_permission(self, user, submission):
competition = submission.phase.competition
return user.is_authenticated and (user.is_superuser or user in competition.all_organizers or user.is_bot)
@action(detail=True, methods=('POST', 'DELETE'))
def submission_leaderboard_connection(self, request, pk):
submission = self.get_object()
phase = submission.phase
if not (request.user.is_superuser or request.user == submission.owner):
if not phase.competition.collaborators.filter(pk=request.user.pk).exists():
raise Http404
if submission.phase.leaderboard.submission_rule in Leaderboard.AUTO_SUBMISSION_RULES and not request.user.is_superuser:
raise ValidationError("Users are not allowed to edit the leaderboard on this Competition")
if request.method == 'POST':
# Removing any existing submissions on leaderboard unless multiples are allowed
if submission.phase.leaderboard.submission_rule != Leaderboard.ADD_DELETE_MULTIPLE:
Submission.objects.filter(phase=phase, owner=submission.owner).update(leaderboard=None)
leaderboard = phase.leaderboard
if submission.has_children:
Submission.objects.filter(parent=submission).update(leaderboard=leaderboard)
else:
submission.leaderboard = leaderboard
submission.save()
if request.method == 'DELETE':
if submission.phase.leaderboard.submission_rule not in [Leaderboard.ADD_DELETE, Leaderboard.ADD_DELETE_MULTIPLE]:
raise ValidationError("You are not allowed to remove a submission on this phase")
submission.leaderboard = None
submission.save()
Submission.objects.filter(parent=submission).update(leaderboard=None)
return Response({})
@action(detail=True, methods=('GET',))
def cancel_submission(self, request, pk):
submission = self.get_object()
if not self.has_admin_permission(request.user, submission):
if submission.owner != request.user:
raise PermissionDenied(f'You do not have permission to cancel submissions')
for child in submission.children.all():
child.cancel()
canceled = submission.cancel()
return Response({'canceled': canceled})
@action(detail=True, methods=('POST',))
def re_run_submission(self, request, pk):
submission = self.get_object()
task_key = request.query_params.get('task_key')
if not self.has_admin_permission(request.user, submission):
raise PermissionDenied('You do not have permission to re-run submissions')
# We want to avoid re-running a submission that isn't finished yet, because the tasks associated
# with the submission and maybe other important details have not been finalized yet. I.e. if you
# rapidly click the "re-run submission" button, a submission may not have been processed by a
# site worker and be in a funky state (race condition) -- this should resolve that
if submission.status not in (Submission.FINISHED, Submission.FAILED, Submission.CANCELLED):
raise PermissionDenied('Cannot request a re-run on a submission that has not finished processing.')
# Rerun submission on different task. Will flag submission with is_specific_task_re_run=True
if task_key:
rerun_kwargs = {
'task': get_object_or_404(Task, key=task_key),
}
else:
rerun_kwargs = {}
new_sub = submission.re_run(**rerun_kwargs)
return Response({'id': new_sub.id})
@action(detail=False, methods=('POST',))
def re_run_many_submissions(self, request):
qs = self.get_queryset()
for submission in qs:
submission.re_run()
return Response({})
@action(detail=True, methods=('GET',))
def get_details(self, request, pk):
submission = super().get_object()
if submission.phase.hide_output:
if not self.has_admin_permission(self.request.user, submission):
raise PermissionDenied("Cannot access submission details while phase marked to hide output.")
data = SubmissionFilesSerializer(submission, context=self.get_serializer_context()).data
return Response(data)
@action(detail=True, methods=('GET',))
def toggle_public(self, request, pk):
submission = super().get_object()
if not self.has_admin_permission(request.user, submission):
raise PermissionDenied(f'You do not have permission to publish this submissions')
is_public = not submission.is_public
submission.data.is_public = is_public
submission.data.save(send=False)
submission.is_public = is_public
submission.save()
return Response({})
@action(detail=True, methods=('PATCH',))
def update_fact_sheet(self, request, pk):
if not isinstance(request.data, dict):
if isinstance(request.data, str):
try:
request_data = json.loads(request.data)
except ValueError:
return ValidationError('Invalid JSON')
else:
request_data = request.data
request_submission = super().get_object()
top_level_submission = request_submission.parent or request_submission
# Validate fact_sheet using serializer
data = self.get_serializer(top_level_submission).data
data['fact_sheet_answers'] = request.data
serializer = self.get_serializer(data=data, instance=top_level_submission)
serializer.is_valid(raise_exception=True)
# Use Queryset to update Submissions
Submission.objects.filter(Q(parent=top_level_submission) | Q(id=top_level_submission.id)).update(fact_sheet_answers=request_data)
return Response({})
@api_view(['POST'])
@permission_classes((AllowAny,)) # permissions are checked via the submission secret
def upload_submission_scores(request, submission_pk):
submission = get_object_or_404(Submission, pk=submission_pk)
submission_rule = submission.phase.leaderboard.submission_rule
try:
if uuid.UUID(request.data.get("secret")) != submission.secret:
raise PermissionDenied("Submission secrets do not match")
except TypeError:
raise ValidationError("Secret not a valid UUID")
if "scores" not in request.data:
raise ValidationError("'scores' required.")
competition_columns = submission.phase.leaderboard.columns.values_list('key', flat=True)
for column_key, score in request.data.get("scores").items():
if column_key not in competition_columns:
continue
score = SubmissionScore.objects.create(
score=score,
column=Column.objects.get(leaderboard=submission.phase.leaderboard, key=column_key)
)
submission.scores.add(score)
if submission.parent:
submission.parent.scores.add(score)
submission.parent.calculate_scores()
else:
submission.calculate_scores()
put_on_leaderboard_by_submission_rule(request, submission_pk, submission_rule)
return Response()
@api_view(('GET',))
def can_make_submission(request, phase_id):
phase = get_object_or_404(Phase, id=phase_id)
user_is_approved = phase.competition.participants.filter(
user=request.user,
status=CompetitionParticipant.APPROVED
).exists()
if request.user.is_bot and phase.competition.allow_robot_submissions and not user_is_approved:
CompetitionParticipant.objects.create(
user=request.user,
competition=phase.competition,
status=CompetitionParticipant.APPROVED
)
user_is_approved = True
if user_is_approved:
can_make_submission, reason_why_not = phase.can_user_make_submissions(request.user)
else:
can_make_submission, reason_why_not = False, "User not approved to participate in this competition"
return Response({
"can": can_make_submission,
"reason": reason_why_not,
})
|
the-stack_0_25595
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Provides support utility for rescaling data."""
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from iris.cube import Cube
from numpy import ndarray
def rescale(
data: ndarray,
data_range: Optional[Union[Tuple[float, float], List[float]]] = None,
scale_range: Union[Tuple[float, float], List[float]] = (0.0, 1.0),
clip: bool = False,
) -> ndarray:
"""
Rescale data array so that data_min => scale_min
and data_max => scale max.
All adjustments are linear
Args:
data:
Source values
data_range:
List containing two floats
Lowest and highest source value to rescale.
Default value of None is converted to [min(data), max(data)]
scale_range:
List containing two floats
Lowest and highest value after rescaling.
Defaults to (0., 1.)
clip:
If True, points where data were outside the scaling range
will be set to the scale min or max appropriately.
Default is False which continues the scaling beyond min and
max.
Returns:
Output array of scaled data. Has same shape as data.
"""
data_min = np.min(data) if data_range is None else data_range[0]
data_max = np.max(data) if data_range is None else data_range[1]
scale_min = scale_range[0]
scale_max = scale_range[1]
# Range check
if data_min == data_max:
raise ValueError(
"Cannot rescale a zero input range ({} -> {})".format(data_min, data_max)
)
if scale_min == scale_max:
raise ValueError(
"Cannot rescale a zero output range ({} -> {})".format(scale_min, scale_max)
)
result = (
(data - data_min) * (scale_max - scale_min) / (data_max - data_min)
) + scale_min
if clip:
result = np.clip(result, scale_min, scale_max)
return result
def apply_double_scaling(
data_cube: Cube,
scaled_cube: Cube,
data_vals: Tuple[float, float, float],
scaling_vals: Tuple[float, float, float],
combine_function: Callable[[ndarray, ndarray], ndarray] = np.minimum,
) -> ndarray:
"""
From data_cube, an array of limiting values is created based on a linear
rescaling from three data_vals to three scaling_vals.
The three values refer to a lower-bound, a mid-point and an upper-bound.
This rescaled data_cube is combined with scaled_cube to produce an array
containing either the higher or lower value as needed.
Args:
data_cube:
Data from which to create a rescaled data array
scaled_cube:
Data already in the rescaled frame of reference which will be
combined with the rescaled data_cube using the combine_function.
data_vals:
Lower, mid and upper points to rescale data_cube from
scaling_vals:
Lower, mid and upper points to rescale data_cube to
combine_function:
Function that takes two arrays of the same shape and returns
one array of the same shape.
Expected to be numpy.minimum (default) or numpy.maximum.
Returns:
Output data from data_cube after rescaling and combining with
scaled_cube.
This array will have the same dimensions as scaled_cube.
"""
# Where data are below the specified mid-point (data_vals[1]):
# Set rescaled_data to be a rescaled value between the first and mid-point
# Elsewhere
# Set rescaled_data to be a rescaled value between the mid- and last point
rescaled_data = np.where(
data_cube.data < data_vals[1],
rescale(
data_cube.data,
data_range=(data_vals[0], data_vals[1]),
scale_range=(scaling_vals[0], scaling_vals[1]),
clip=True,
),
rescale(
data_cube.data,
data_range=(data_vals[1], data_vals[2]),
scale_range=(scaling_vals[1], scaling_vals[2]),
clip=True,
),
)
# Ensure scaled_cube is no larger or smaller than the rescaled_data:
return combine_function(scaled_cube.data, rescaled_data)
|
the-stack_0_25596
|
# -*- Python -*-
# This file is licensed under a pytorch-style license
# See frontends/pytorch/LICENSE for license information.
import torch
import torch_mlir
# RUN: %PYTHON %s | npcomp-opt | FileCheck %s
mb = torch_mlir.ModuleBuilder()
N = 3
Cin = 16
Cout = 4
w = 10
h = 10
model = torch.nn.Conv2d(Cin, Cout, (3,3))
ref_model = torch.nn.Conv2d(Cin, Cout, (3,3))
ref_model.weight.data = model.weight.clone()
ref_model.bias.data = model.bias.clone()
softmax = torch.nn.LogSoftmax(dim=1)
loss = torch.nn.NLLLoss()
tensor = torch.randn(N, Cin, h, w)
with mb.capture_function("conv2d_fwd", [tensor]) as f:
result = model(tensor)
f.returns([result])
# Generated with mlir/utils/generate-test-checks.py
# This is very deterministic and a change test is appropriate.
# CHECK-LABEL: func @conv2d_fwd(
# CHECK-SAME: %[[VAL_0:.*]]: !numpy.ndarray<[3,16,10,10]:f32>) -> !numpy.ndarray<[3,4,8,8]:f32> {
# CHECK: %[[VAL_1:.*]] = constant opaque<"", "0xDEADBEEF"> : tensor<4x16x3x3xf32>
# CHECK: %[[VAL_2:.*]] = constant opaque<"", "0xDEADBEEF"> : tensor<4xf32>
# CHECK: %[[VAL_3:.*]] = constant 1 : i64
# CHECK: %[[VAL_4:.*]] = constant 1 : i64
# CHECK: %[[VAL_5:.*]] = basicpy.build_list %[[VAL_3]], %[[VAL_4]] : (i64, i64) -> !basicpy.ListType
# CHECK: %[[VAL_6:.*]] = constant 0 : i64
# CHECK: %[[VAL_7:.*]] = constant 0 : i64
# CHECK: %[[VAL_8:.*]] = basicpy.build_list %[[VAL_6]], %[[VAL_7]] : (i64, i64) -> !basicpy.ListType
# CHECK: %[[VAL_9:.*]] = constant 1 : i64
# CHECK: %[[VAL_10:.*]] = constant 1 : i64
# CHECK: %[[VAL_11:.*]] = basicpy.build_list %[[VAL_9]], %[[VAL_10]] : (i64, i64) -> !basicpy.ListType
# CHECK: %[[VAL_12:.*]] = constant false
# CHECK: %[[VAL_13:.*]] = constant 0 : i64
# CHECK: %[[VAL_14:.*]] = constant 0 : i64
# CHECK: %[[VAL_15:.*]] = basicpy.build_list %[[VAL_13]], %[[VAL_14]] : (i64, i64) -> !basicpy.ListType
# CHECK: %[[VAL_16:.*]] = constant 1 : i64
# CHECK: %[[VAL_17:.*]] = numpy.create_array_from_tensor %[[VAL_1]] : (tensor<4x16x3x3xf32>) -> !numpy.ndarray<[4,16,3,3]:f32>
# CHECK: %[[VAL_18:.*]] = numpy.create_array_from_tensor %[[VAL_2]] : (tensor<4xf32>) -> !numpy.ndarray<[4]:f32>
# CHECK: %[[VAL_19:.*]] = torch.kernel_call "aten::convolution" %[[VAL_0]], %[[VAL_17]], %[[VAL_18]], %[[VAL_5]], %[[VAL_8]], %[[VAL_11]], %[[VAL_12]], %[[VAL_15]], %[[VAL_16]] : (!numpy.ndarray<[3,16,10,10]:f32>, !numpy.ndarray<[4,16,3,3]:f32>, !numpy.ndarray<[4]:f32>, !basicpy.ListType, !basicpy.ListType, !basicpy.ListType, i1, !basicpy.ListType, i64) -> !numpy.ndarray<[3,4,8,8]:f32>
# CHECK: return %[[VAL_19]] : !numpy.ndarray<[3,4,8,8]:f32>
# CHECK: }
mb.module.operation.print(large_elements_limit=2)
#print(mb.module)
|
the-stack_0_25598
|
# Webhooks for external integrations.
import json
import os
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
MESSAGE_TEMPLATE = (
'Author: {}\n'
'Build status: {} {}\n'
'Details: [build log]({})\n'
'Comment: {}'
)
@api_key_only_webhook_view('Gocd')
@has_request_variables
def api_gocd_webhook(request: HttpRequest, user_profile: UserProfile,
payload: Dict[str, Any]=REQ(argument_type='body'),
) -> HttpResponse:
modifications = payload['build_cause']['material_revisions'][0]['modifications'][0]
result = payload['stages'][0]['result']
material = payload['build_cause']['material_revisions'][0]['material']
if result == "Passed":
emoji = ':thumbs_up:'
elif result == "Failed":
emoji = ':thumbs_down:'
build_details_file = os.path.join(os.path.dirname(__file__), 'fixtures/build_details.json')
with open(build_details_file) as f:
contents = json.load(f)
build_link = contents["build_details"]["_links"]["pipeline"]["href"]
body = MESSAGE_TEMPLATE.format(
modifications['user_name'],
result,
emoji,
build_link,
modifications['comment'],
)
branch = material['description'].split(",")
topic = branch[0].split(" ")[1]
check_send_webhook_message(request, user_profile, topic, body)
return json_success()
|
the-stack_0_25602
|
from datetime import datetime, timedelta
from provider.utils import content_size_match
URL = "http://burgerking.hu/offers"
IMG_URL = "http://burgerking.hu/sites/burgerking.hu/files/HetkozNapiBKmenu_Mindentermek_lista_1000x550px.jpg"
burgerking_menu = {
0: "Whopper",
1: "Big King",
2: "Western Whopper",
3: "Whopper",
4: "Deluxe csirkemell"
}
def getMenu(today):
day = today.weekday()
IMG_SIZE = "181621"
if content_size_match(IMG_URL, IMG_SIZE) and day < 5:
menu = f"Akciós napi menü: {burgerking_menu[day]}"
else:
menu = ''
return menu
menu = {
'name': 'Burger King',
'id': 'bk',
'url': URL,
'get': getMenu,
'ttl': timedelta(hours=12),
'cards': ['szep', 'erzs']
}
|
the-stack_0_25605
|
import pytest
from hyperopt import STATUS_OK
from hyperas.distributions import choice, uniform
import six.moves.cPickle as pickle
from elephas.hyperparam import HyperParamModel
pytest.mark.usefixtures("spark_context")
def data():
from keras.datasets import mnist
from keras.utils import np_utils
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['acc'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model.to_yaml(),
'weights': pickle.dumps(model.get_weights())}
def test_hyper_param_model(spark_context):
hyperparam_model = HyperParamModel(spark_context)
hyperparam_model.minimize(model=model, data=data, max_evals=1)
|
the-stack_0_25607
|
import glob
import re
import os.path as osp
from ..utils.data import BaseImageDataset
class SYSU(BaseImageDataset):
"""
Market1501
Reference:
Zheng et al. Scalable Person Re-identification: A Benchmark. ICCV 2015.
URL: http://www.liangzheng.org/Project/project_reid.html
Dataset statistics:
# identities: 1501 (+1 for background)
# images: 12936 (train) + 3368 (query) + 15913 (gallery)
"""
dataset_dir = 'sysu_reid'
def __init__(self, root='/home/haoluo/data', verbose=True, **kwargs):
super(SYSU, self).__init__()
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'train_raw')
self.query_dir = osp.join(self.dataset_dir, 'query_raw')
self.gallery_dir = osp.join(self.dataset_dir, 'test_raw')
self._check_before_run()
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
if verbose:
print("=> SYSU loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, self.num_train_imgs, self.num_train_cams = self.get_imagedata_info(self.train)
self.num_query_pids, self.num_query_imgs, self.num_query_cams = self.get_imagedata_info(self.query)
self.num_gallery_pids, self.num_gallery_imgs, self.num_gallery_cams = self.get_imagedata_info(self.gallery)
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'pid_([-\d]+)_s([-\d]+)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1:
continue # junk images are just ignored
assert 0 <= pid <= 5531 # pid == 0 means background
if relabel:
pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset
|
the-stack_0_25608
|
"""
This file is very long and growing, but it was decided to not split it yet, as
it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989
Instead of splitting it was decided to define sections here:
- Configuration / Settings
- Autouse fixtures
- Common arguments
- Missing values & co.
- Classes
- Indices
- Series'
- DataFrames
- Operators & Operations
- Data sets/files
- Time zones
- Dtypes
- Misc
"""
from collections import abc
from datetime import (
date,
datetime,
time,
timedelta,
timezone,
)
from decimal import Decimal
import operator
import os
from dateutil.tz import (
tzlocal,
tzutc,
)
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pytz import (
FixedOffset,
utc,
)
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import (
DatetimeTZDtype,
IntervalDtype,
)
import pandas as pd
from pandas import (
DataFrame,
Interval,
Period,
Series,
Timedelta,
Timestamp,
)
import pandas._testing as tm
from pandas.core import ops
from pandas.core.indexes.api import (
Index,
MultiIndex,
)
try:
import pyarrow as pa
except ImportError:
has_pyarrow = False
else:
del pa
has_pyarrow = True
zoneinfo = None
if pd.compat.PY39:
# Import "zoneinfo" could not be resolved (reportMissingImports)
import zoneinfo # type: ignore[no-redef]
# Until https://github.com/numpy/numpy/issues/19078 is sorted out, just suppress
suppress_npdev_promotion_warning = pytest.mark.filterwarnings(
"ignore:Promotion of numbers and bools:FutureWarning"
)
# ----------------------------------------------------------------
# Configuration / Settings
# ----------------------------------------------------------------
# pytest
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true", help="skip slow tests")
parser.addoption("--skip-network", action="store_true", help="skip network tests")
parser.addoption("--skip-db", action="store_true", help="skip db tests")
parser.addoption(
"--run-high-memory", action="store_true", help="run high memory tests"
)
parser.addoption("--only-slow", action="store_true", help="run only slow tests")
parser.addoption(
"--strict-data-files",
action="store_true",
help="Fail if a test is skipped for missing data file.",
)
def ignore_doctest_warning(item: pytest.Item, path: str, message: str) -> None:
"""Ignore doctest warning.
Parameters
----------
item : pytest.Item
pytest test item.
path : str
Module path to Python object, e.g. "pandas.core.frame.DataFrame.append". A
warning will be filtered when item.name ends with in given path. So it is
sufficient to specify e.g. "DataFrame.append".
message : str
Message to be filtered.
"""
if item.name.endswith(path):
item.add_marker(pytest.mark.filterwarnings(f"ignore:{message}"))
def pytest_collection_modifyitems(items, config):
skip_slow = config.getoption("--skip-slow")
only_slow = config.getoption("--only-slow")
skip_network = config.getoption("--skip-network")
skip_db = config.getoption("--skip-db")
marks = [
(pytest.mark.slow, "slow", skip_slow, "--skip-slow"),
(pytest.mark.network, "network", skip_network, "--network"),
(pytest.mark.db, "db", skip_db, "--skip-db"),
]
# Warnings from doctests that can be ignored; place reason in comment above.
# Each entry specifies (path, message) - see the ignore_doctest_warning function
ignored_doctest_warnings = [
# Deprecations where the docstring will emit a warning
("DataFrame.append", "The frame.append method is deprecated"),
("Series.append", "The series.append method is deprecated"),
("dtypes.common.is_categorical", "is_categorical is deprecated"),
("Categorical.replace", "Categorical.replace is deprecated"),
("dtypes.common.is_extension_type", "'is_extension_type' is deprecated"),
("Index.is_mixed", "Index.is_mixed is deprecated"),
("MultiIndex._is_lexsorted", "MultiIndex.is_lexsorted is deprecated"),
# Docstring divides by zero to show behavior difference
("missing.mask_zero_div_zero", "divide by zero encountered"),
# Docstring demonstrates the call raises a warning
("_validators.validate_axis_style_args", "Use named arguments"),
]
for item in items:
if config.getoption("--doctest-modules") or config.getoption(
"--doctest-cython", default=False
):
# autouse=True for the add_doctest_imports can lead to expensive teardowns
# since doctest_namespace is a session fixture
item.add_marker(pytest.mark.usefixtures("add_doctest_imports"))
for path, message in ignored_doctest_warnings:
ignore_doctest_warning(item, path, message)
# mark all tests in the pandas/tests/frame directory with "arraymanager"
if "/frame/" in item.nodeid:
item.add_marker(pytest.mark.arraymanager)
item.add_marker(suppress_npdev_promotion_warning)
for (mark, kwd, skip_if_found, arg_name) in marks:
if kwd in item.keywords:
# If we're skipping, no need to actually add the marker or look for
# other markers
if skip_if_found:
item.add_marker(pytest.mark.skip(f"skipping due to {arg_name}"))
break
item.add_marker(mark)
if only_slow and "slow" not in item.keywords:
item.add_marker(pytest.mark.skip("skipping due to --only-slow"))
# Hypothesis
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
# 2022-02-09: Changed deadline from 500 -> None. Deadline leads to
# non-actionable, flaky CI failures (# GH 24641, 44969, 45118, 44969)
deadline=None,
suppress_health_check=(hypothesis.HealthCheck.too_slow,),
)
hypothesis.settings.load_profile("ci")
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())
)
for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
),
)
for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12),
),
)
@pytest.fixture
def add_doctest_imports(doctest_namespace):
"""
Make `np` and `pd` names available for doctests.
"""
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
# ----------------------------------------------------------------
# Autouse fixtures
# ----------------------------------------------------------------
@pytest.fixture(autouse=True)
def configure_tests():
"""
Configure settings for all tests and test modules.
"""
pd.set_option("chained_assignment", "raise")
# ----------------------------------------------------------------
# Common arguments
# ----------------------------------------------------------------
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis={repr(x)}")
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[1, "columns"], ids=lambda x: f"axis={repr(x)}")
def axis_1(request):
"""
Fixture for returning aliases of axis 1 of a DataFrame.
"""
return request.param
@pytest.fixture(params=[True, False, None])
def observed(request):
"""
Pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatibility
if we decide to change the default (and would need to warn if this
parameter is not passed).
"""
return request.param
@pytest.fixture(params=[True, False, None])
def ordered(request):
"""
Boolean 'ordered' parameter for Categorical.
"""
return request.param
@pytest.fixture(params=["first", "last", False])
def keep(request):
"""
Valid values for the 'keep' parameter used in
.duplicated or .drop_duplicates
"""
return request.param
@pytest.fixture(params=["both", "neither", "left", "right"])
def inclusive_endpoints_fixture(request):
"""
Fixture for trying all interval 'inclusive' parameters.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
"""
Fixture for trying all interval closed parameters.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed.
"""
return request.param
@pytest.fixture(
params=[
None,
"gzip",
"bz2",
"zip",
"xz",
"tar",
pytest.param("zstd", marks=td.skip_if_no("zstandard")),
]
)
def compression(request):
"""
Fixture for trying common compression types in compression tests.
"""
return request.param
@pytest.fixture(
params=[
"gzip",
"bz2",
"zip",
"xz",
"tar",
pytest.param("zstd", marks=td.skip_if_no("zstandard")),
]
)
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case.
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable.
"""
return request.param
@pytest.fixture(params=["inner", "outer", "left", "right"])
def join_type(request):
"""
Fixture for trying all types of join operations.
"""
return request.param
@pytest.fixture(params=["nlargest", "nsmallest"])
def nselect_method(request):
"""
Fixture for trying all nselect methods.
"""
return request.param
# ----------------------------------------------------------------
# Missing values & co.
# ----------------------------------------------------------------
@pytest.fixture(params=tm.NULL_OBJECTS, ids=lambda x: type(x).__name__)
def nulls_fixture(request):
"""
Fixture for each null type in pandas.
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once.
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
@pytest.fixture(params=tm.NP_NAT_OBJECTS, ids=lambda x: type(x).__name__)
def np_nat_fixture(request):
"""
Fixture for each NaT type in numpy.
"""
return request.param
# Generate cartesian product of np_nat_fixture:
np_nat_fixture2 = np_nat_fixture
# ----------------------------------------------------------------
# Classes
# ----------------------------------------------------------------
@pytest.fixture(params=[DataFrame, Series])
def frame_or_series(request):
"""
Fixture to parametrize over DataFrame and Series.
"""
return request.param
# error: List item 0 has incompatible type "Type[Index]"; expected "Type[IndexOpsMixin]"
@pytest.fixture(
params=[Index, Series], ids=["index", "series"] # type: ignore[list-item]
)
def index_or_series(request):
"""
Fixture to parametrize over Index and Series, made necessary by a mypy
bug, giving an error:
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
See GH#29725
"""
return request.param
# Generate cartesian product of index_or_series fixture:
index_or_series2 = index_or_series
@pytest.fixture(params=[Index, Series, pd.array], ids=["index", "series", "array"])
def index_or_series_or_array(request):
"""
Fixture to parametrize over Index, Series, and ExtensionArray
"""
return request.param
@pytest.fixture(params=[Index, Series, DataFrame, pd.array], ids=lambda x: x.__name__)
def box_with_array(request):
"""
Fixture to test behavior for Index, Series, DataFrame, and pandas Array
classes
"""
return request.param
box_with_array2 = box_with_array
@pytest.fixture
def dict_subclass():
"""
Fixture for a dictionary subclass.
"""
class TestSubDict(dict):
def __init__(self, *args, **kwargs) -> None:
dict.__init__(self, *args, **kwargs)
return TestSubDict
@pytest.fixture
def non_dict_mapping_subclass():
"""
Fixture for a non-mapping dictionary subclass.
"""
class TestNonDictMapping(abc.Mapping):
def __init__(self, underlying_dict) -> None:
self._data = underlying_dict
def __getitem__(self, key):
return self._data.__getitem__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
return TestNonDictMapping
# ----------------------------------------------------------------
# Indices
# ----------------------------------------------------------------
@pytest.fixture
def multiindex_year_month_day_dataframe_random_data():
"""
DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data
"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
@pytest.fixture
def lexsorted_two_level_string_multiindex():
"""
2-level MultiIndex, lexsorted, with string names.
"""
return MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
@pytest.fixture
def multiindex_dataframe_random_data(lexsorted_two_level_string_multiindex):
"""DataFrame with 2 level MultiIndex with random data"""
index = lexsorted_two_level_string_multiindex
return DataFrame(
np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp")
)
def _create_multiindex():
"""
MultiIndex used to test the general functionality of this object
"""
# See Also: tests.multi.conftest.idx
major_axis = Index(["foo", "bar", "baz", "qux"])
minor_axis = Index(["one", "two"])
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index_names = ["first", "second"]
return MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=index_names,
verify_integrity=False,
)
def _create_mi_with_dt64tz_level():
"""
MultiIndex with a level that is a tzaware DatetimeIndex.
"""
# GH#8367 round trip with pickle
return MultiIndex.from_product(
[[1, 2], ["a", "b"], pd.date_range("20130101", periods=3, tz="US/Eastern")],
names=["one", "two", "three"],
)
indices_dict = {
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
"uint": tm.makeUIntIndex(100),
"range": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"complex64": tm.makeFloatIndex(100).astype("complex64"),
"complex128": tm.makeFloatIndex(100).astype("complex128"),
"num_int64": tm.makeNumericIndex(100, dtype="int64"),
"num_int32": tm.makeNumericIndex(100, dtype="int32"),
"num_int16": tm.makeNumericIndex(100, dtype="int16"),
"num_int8": tm.makeNumericIndex(100, dtype="int8"),
"num_uint64": tm.makeNumericIndex(100, dtype="uint64"),
"num_uint32": tm.makeNumericIndex(100, dtype="uint32"),
"num_uint16": tm.makeNumericIndex(100, dtype="uint16"),
"num_uint8": tm.makeNumericIndex(100, dtype="uint8"),
"num_float64": tm.makeNumericIndex(100, dtype="float64"),
"num_float32": tm.makeNumericIndex(100, dtype="float32"),
"bool-object": tm.makeBoolIndex(10).astype(object),
"bool-dtype": Index(np.random.randn(10) < 0),
"categorical": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
"mi-with-dt64tz-level": _create_mi_with_dt64tz_level(),
"multi": _create_multiindex(),
"repeats": Index([0, 0, 1, 1, 2, 2]),
"nullable_int": Index(np.arange(100), dtype="Int64"),
"nullable_uint": Index(np.arange(100), dtype="UInt16"),
"nullable_float": Index(np.arange(100), dtype="Float32"),
"nullable_bool": Index(np.arange(100).astype(bool), dtype="boolean"),
"string-python": Index(pd.array(tm.makeStringIndex(100), dtype="string[python]")),
}
if has_pyarrow:
idx = Index(pd.array(tm.makeStringIndex(100), dtype="string[pyarrow]"))
indices_dict["string-pyarrow"] = idx
@pytest.fixture(params=indices_dict.keys())
def index(request):
"""
Fixture for many "simple" kinds of indices.
These indices are unlikely to cover corner cases, e.g.
- no names
- no NaTs/NaNs
- no values near implementation bounds
- ...
"""
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
# Needed to generate cartesian product of indices
index_fixture2 = index
@pytest.fixture(
params=[
key for key in indices_dict if not isinstance(indices_dict[key], MultiIndex)
]
)
def index_flat(request):
"""
index fixture, but excluding MultiIndex cases.
"""
key = request.param
return indices_dict[key].copy()
# Alias so we can test with cartesian product of index_flat
index_flat2 = index_flat
@pytest.fixture(
params=[
key
for key in indices_dict
if not isinstance(indices_dict[key], MultiIndex) and indices_dict[key].is_unique
]
)
def index_flat_unique(request):
"""
index_flat with uniqueness requirement.
"""
key = request.param
return indices_dict[key].copy()
@pytest.fixture(
params=[
key
for key in indices_dict
if not (
key in ["int", "uint", "range", "empty", "repeats", "bool-dtype"]
or key.startswith("num_")
)
and not isinstance(indices_dict[key], MultiIndex)
]
)
def index_with_missing(request):
"""
Fixture for indices with missing values.
Integer-dtype and empty cases are excluded because they cannot hold missing
values.
MultiIndex is excluded because isna() is not defined for MultiIndex.
"""
# GH 35538. Use deep copy to avoid illusive bug on np-dev
# GHA pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
vals = ind.values
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
# For setting missing values in the top level of MultiIndex
vals = ind.tolist()
vals[0] = (None,) + vals[0][1:]
vals[-1] = (None,) + vals[-1][1:]
return MultiIndex.from_tuples(vals)
else:
vals[0] = None
vals[-1] = None
return type(ind)(vals)
# ----------------------------------------------------------------
# Series'
# ----------------------------------------------------------------
@pytest.fixture
def string_series():
"""
Fixture for Series of floats with Index of unique strings
"""
s = tm.makeStringSeries()
s.name = "series"
return s
@pytest.fixture
def object_series():
"""
Fixture for Series of dtype object with Index of unique strings
"""
s = tm.makeObjectSeries()
s.name = "objects"
return s
@pytest.fixture
def datetime_series():
"""
Fixture for Series of floats with DatetimeIndex
"""
s = tm.makeTimeSeries()
s.name = "ts"
return s
def _create_series(index):
"""Helper for the _series dict"""
size = len(index)
data = np.random.randn(size)
return Series(data, index=index, name="a")
_series = {
f"series-with-{index_id}-index": _create_series(index)
for index_id, index in indices_dict.items()
}
@pytest.fixture
def series_with_simple_index(index):
"""
Fixture for tests on series with changing types of indices.
"""
return _create_series(index)
@pytest.fixture
def series_with_multilevel_index():
"""
Fixture with a Series with a 2-level MultiIndex.
"""
arrays = [
["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
data = np.random.randn(8)
ser = Series(data, index=index)
ser[3] = np.NaN
return ser
_narrow_series = {
f"{dtype.__name__}-series": tm.make_rand_series(name="a", dtype=dtype)
for dtype in tm.NARROW_NP_DTYPES
}
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
@pytest.fixture(params=_index_or_series_objs.keys())
def index_or_series_obj(request):
"""
Fixture for tests on indexes, series and series with a narrow dtype
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
# ----------------------------------------------------------------
# DataFrames
# ----------------------------------------------------------------
@pytest.fixture
def int_frame():
"""
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
A B C D
vpBeWjM651 1 0 1 0
5JyxmrP1En -1 0 0 0
qEDaoD49U2 -1 1 0 0
m66TkTfsFe 0 0 0 0
EHPaNzEUFm -1 0 -1 0
fpRJCevQhi 2 0 0 0
OlQvnmfi3Q 0 0 -2 0
... .. .. .. ..
uB1FPlz4uP 0 0 0 1
EcSe6yNzCU 0 0 -1 0
L50VudaiI8 -1 1 -2 0
y3bpw4nwIp 0 -1 0 0
H0RdLLwrCT 1 1 0 0
rY82K0vMwm 0 0 0 0
1OPIUjnkjk 2 0 0 0
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData()).astype("int64")
@pytest.fixture
def datetime_frame():
"""
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
A B C D
2000-01-03 -1.122153 0.468535 0.122226 1.693711
2000-01-04 0.189378 0.486100 0.007864 -1.216052
2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
2000-01-06 0.430050 0.894352 0.090719 0.036939
2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
2000-01-10 -0.752633 0.328434 -0.815325 0.699674
2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
... ... ... ... ...
2000-02-03 1.642618 -0.579288 0.046005 1.385249
2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
2000-02-07 -2.656149 -0.601387 1.410148 0.444150
2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
2000-02-09 1.377373 0.398619 1.008453 -0.928207
2000-02-10 0.473194 -0.636677 0.984058 0.511519
2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
[30 rows x 4 columns]
"""
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def float_frame():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465
qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901
tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433
wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651
M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938
QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053
r78Jwns6dn -0.653707 0.883127 0.682199 0.206159
... ... ... ... ...
IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316
lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999
qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121
yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962
65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987
eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871
xSucinXxuV -1.263557 0.252799 -0.552247 0.400426
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData())
@pytest.fixture
def mixed_type_frame():
"""
Fixture for DataFrame of float/int/string columns with RangeIndex
Columns are ['a', 'b', 'c', 'float32', 'int32'].
"""
return DataFrame(
{
"a": 1.0,
"b": 2,
"c": "foo",
"float32": np.array([1.0] * 10, dtype="float32"),
"int32": np.array([1] * 10, dtype="int32"),
},
index=np.arange(10),
)
@pytest.fixture
def rand_series_with_duplicate_datetimeindex():
"""
Fixture for Series with a DatetimeIndex that has duplicates.
"""
dates = [
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
return Series(np.random.randn(len(dates)), index=dates)
# ----------------------------------------------------------------
# Scalars
# ----------------------------------------------------------------
@pytest.fixture(
params=[
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(Interval(left=0.1, right=0.5), IntervalDtype("float64", "right")),
(Period("2012-01", freq="M"), "period[M]"),
(Period("2012-02-01", freq="D"), "period[D]"),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
(Timedelta(seconds=500), "timedelta64[ns]"),
]
)
def ea_scalar_and_dtype(request):
return request.param
# ----------------------------------------------------------------
# Operators & Operations
# ----------------------------------------------------------------
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.and_,
ops.rand_,
operator.xor,
ops.rxor,
operator.or_,
ops.ror_,
]
)
def all_binary_operators(request):
"""
Fixture for operator and roperator arithmetic, comparison, and logical ops.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
]
)
def all_arithmetic_functions(request):
"""
Fixture for operator and roperator arithmetic functions.
Notes
-----
This includes divmod and rdivmod, whereas all_arithmetic_operators
does not.
"""
return request.param
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
"prod",
"std",
"var",
"median",
"kurt",
"skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
_all_reductions = _all_numeric_reductions + _all_boolean_reductions
@pytest.fixture(params=_all_reductions)
def all_reductions(request):
"""
Fixture for all (boolean + numeric) reduction names.
"""
return request.param
@pytest.fixture(
params=[
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.lt,
operator.le,
]
)
def comparison_op(request):
"""
Fixture for operator module comparison functions.
"""
return request.param
@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"])
def compare_operators_no_eq_ne(request):
"""
Fixture for dunder names for compare operations except == and !=
* >=
* >
* <
* <=
"""
return request.param
@pytest.fixture(
params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"]
)
def all_logical_operators(request):
"""
Fixture for dunder names for common logical operations
* |
* &
* ^
"""
return request.param
# ----------------------------------------------------------------
# Data sets/files
# ----------------------------------------------------------------
@pytest.fixture
def strict_data_files(pytestconfig):
"""
Returns the configuration for the test setting `--strict-data-files`.
"""
return pytestconfig.getoption("--strict-data-files")
@pytest.fixture
def datapath(strict_data_files):
"""
Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if strict_data_files:
raise ValueError(
f"Could not find file {path} and --strict-data-files is set."
)
else:
pytest.skip(f"Could not find {path}.")
return path
return deco
@pytest.fixture
def iris(datapath):
"""
The iris dataset as a DataFrame.
"""
return pd.read_csv(datapath("io", "data", "csv", "iris.csv"))
# ----------------------------------------------------------------
# Time zones
# ----------------------------------------------------------------
TIMEZONES = [
None,
"UTC",
"US/Eastern",
"Asia/Tokyo",
"dateutil/US/Pacific",
"dateutil/Asia/Singapore",
"+01:15",
"-02:15",
"UTC+01:15",
"UTC-02:15",
tzutc(),
tzlocal(),
FixedOffset(300),
FixedOffset(0),
FixedOffset(-300),
timezone.utc,
timezone(timedelta(hours=1)),
timezone(timedelta(hours=-1), name="foo"),
]
if zoneinfo is not None:
TIMEZONES.extend([zoneinfo.ZoneInfo("US/Pacific"), zoneinfo.ZoneInfo("UTC")])
TIMEZONE_IDS = [repr(i) for i in TIMEZONES]
@td.parametrize_fixture_doc(str(TIMEZONE_IDS))
@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))
@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
# Generate cartesian product of tz_aware_fixture:
tz_aware_fixture2 = tz_aware_fixture
_UTCS = ["utc", "dateutil/UTC", utc, tzutc(), timezone.utc]
if zoneinfo is not None:
_UTCS.append(zoneinfo.ZoneInfo("UTC"))
@pytest.fixture(params=_UTCS)
def utc_fixture(request):
"""
Fixture to provide variants of UTC timezone strings and tzinfo objects.
"""
return request.param
utc_fixture2 = utc_fixture
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
@pytest.fixture(params=tm.STRING_DTYPES)
def string_dtype(request):
"""
Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(
params=[
"string[python]",
pytest.param(
"string[pyarrow]", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
),
]
)
def nullable_string_dtype(request):
"""
Parametrized fixture for string dtypes.
* 'string[python]'
* 'string[pyarrow]'
"""
return request.param
@pytest.fixture(
params=[
"python",
pytest.param("pyarrow", marks=td.skip_if_no("pyarrow", min_version="1.0.0")),
]
)
def string_storage(request):
"""
Parametrized fixture for pd.options.mode.string_storage.
* 'python'
* 'pyarrow'
"""
return request.param
# Alias so we can test with cartesian product of string_storage
string_storage2 = string_storage
@pytest.fixture(params=tm.BYTES_DTYPES)
def bytes_dtype(request):
"""
Parametrized fixture for bytes dtypes.
* bytes
* 'bytes'
"""
return request.param
@pytest.fixture(params=tm.OBJECT_DTYPES)
def object_dtype(request):
"""
Parametrized fixture for object dtypes.
* object
* 'object'
"""
return request.param
@pytest.fixture(
params=[
"object",
"string[python]",
pytest.param(
"string[pyarrow]", marks=td.skip_if_no("pyarrow", min_version="1.0.0")
),
]
)
def any_string_dtype(request):
"""
Parametrized fixture for string dtypes.
* 'object'
* 'string[python]'
* 'string[pyarrow]'
"""
return request.param
@pytest.fixture(params=tm.DATETIME64_DTYPES)
def datetime64_dtype(request):
"""
Parametrized fixture for datetime64 dtypes.
* 'datetime64[ns]'
* 'M8[ns]'
"""
return request.param
@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
"""
Parametrized fixture for timedelta64 dtypes.
* 'timedelta64[ns]'
* 'm8[ns]'
"""
return request.param
@pytest.fixture
def fixed_now_ts():
"""
Fixture emits fixed Timestamp.now()
"""
return Timestamp(
year=2021, month=1, day=1, hour=12, minute=4, second=13, microsecond=22
)
@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES)
def float_numpy_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_EA_DTYPES)
def float_ea_dtype(request):
"""
Parameterized fixture for float dtypes.
* 'Float32'
* 'Float64'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_NUMPY_DTYPES + tm.FLOAT_EA_DTYPES)
def any_float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
* 'Float32'
* 'Float64'
"""
return request.param
@pytest.fixture(params=tm.COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex
* 'complex64'
* 'complex128'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_INT_NUMPY_DTYPES)
def any_signed_int_numpy_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int
* 'int8'
* 'int16'
* 'int32'
* 'int64'
"""
return request.param
@pytest.fixture(params=tm.UNSIGNED_INT_NUMPY_DTYPES)
def any_unsigned_int_numpy_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* 'uint8'
* 'uint16'
* 'uint32'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES)
def any_int_numpy_dtype(request):
"""
Parameterized fixture for any integer dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_EA_DTYPES)
def any_int_ea_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_NUMPY_DTYPES + tm.ALL_INT_EA_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_EA_DTYPES + tm.FLOAT_EA_DTYPES)
def any_numeric_ea_dtype(request):
"""
Parameterized fixture for any nullable integer dtype and
any float ea dtypes.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
* 'Float32'
* 'Float64'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_INT_EA_DTYPES)
def any_signed_int_ea_dtype(request):
"""
Parameterized fixture for any signed nullable integer dtype.
* 'Int8'
* 'Int16'
* 'Int32'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_REAL_NUMPY_DTYPES)
def any_real_numpy_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* bool
* 'bool'
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
* complex
* 'complex64'
* 'complex128'
* str
* 'str'
* 'U'
* bytes
* 'bytes'
* 'datetime64[ns]'
* 'M8[ns]'
* 'timedelta64[ns]'
* 'm8[ns]'
* object
* 'object'
"""
return request.param
# categoricals are handled separately
_any_skipna_inferred_dtype = [
("string", ["a", np.nan, "c"]),
("string", ["a", pd.NA, "c"]),
("mixed", ["a", pd.NaT, "c"]), # pd.NaT not considered valid by is_string_array
("bytes", [b"a", np.nan, b"c"]),
("empty", [np.nan, np.nan, np.nan]),
("empty", []),
("mixed-integer", ["a", np.nan, 2]),
("mixed", ["a", np.nan, 2.0]),
("floating", [1.0, np.nan, 2.0]),
("integer", [1, np.nan, 2]),
("mixed-integer-float", [1, np.nan, 2.0]),
("decimal", [Decimal(1), np.nan, Decimal(2)]),
("boolean", [True, np.nan, False]),
("boolean", [True, pd.NA, False]),
("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]),
("datetime", [Timestamp("20130101"), np.nan, Timestamp("20180101")]),
("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),
# The following two dtypes are commented out due to GH 23554
# ('complex', [1 + 1j, np.nan, 2 + 2j]),
# ('timedelta64', [np.timedelta64(1, 'D'),
# np.nan, np.timedelta64(2, 'D')]),
("timedelta", [timedelta(1), np.nan, timedelta(2)]),
("time", [time(1), np.nan, time(2)]),
("period", [Period(2013), pd.NaT, Period(2018)]),
("interval", [Interval(0, 1), np.nan, Interval(0, 2)]),
]
ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
def any_skipna_inferred_dtype(request):
"""
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> import pandas._libs.lib as lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
"""
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# ----------------------------------------------------------------
# Misc
# ----------------------------------------------------------------
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
# GH#35711 make sure sqlite history file handle is not leaked
from traitlets.config import Config # isort:skip
c = Config()
c.HistoryManager.hist_file = ":memory:"
return InteractiveShell(config=c)
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
"""
Yields scipy sparse matrix classes.
"""
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@pytest.fixture(
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
]
)
def tick_classes(request):
"""
Fixture for Tick based datetime offsets available for a time series.
"""
return request.param
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture()
def fsspectest():
pytest.importorskip("fsspec")
from fsspec import register_implementation
from fsspec.implementations.memory import MemoryFileSystem
from fsspec.registry import _registry as registry
class TestMemoryFS(MemoryFileSystem):
protocol = "testmem"
test = [None]
def __init__(self, **kwargs) -> None:
self.test[0] = kwargs.pop("test", None)
super().__init__(**kwargs)
register_implementation("testmem", TestMemoryFS, clobber=True)
yield TestMemoryFS()
registry.pop("testmem", None)
TestMemoryFS.test[0] = None
TestMemoryFS.store.clear()
@pytest.fixture(
params=[
("foo", None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
# possibly-matching NAs
(np.nan, np.nan, np.nan),
(np.nan, pd.NaT, None),
(np.nan, pd.NA, None),
(pd.NA, pd.NA, pd.NA),
]
)
def names(request):
"""
A 3-tuple of names, the first two for operands, the last for a result.
"""
return request.param
@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc])
def indexer_sli(request):
"""
Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__
"""
return request.param
@pytest.fixture(params=[tm.loc, tm.iloc])
def indexer_li(request):
"""
Parametrize over loc.__getitem__, iloc.__getitem__
"""
return request.param
@pytest.fixture(params=[tm.setitem, tm.iloc])
def indexer_si(request):
"""
Parametrize over __setitem__, iloc.__setitem__
"""
return request.param
@pytest.fixture(params=[tm.setitem, tm.loc])
def indexer_sl(request):
"""
Parametrize over __setitem__, loc.__setitem__
"""
return request.param
@pytest.fixture(params=[tm.at, tm.loc])
def indexer_al(request):
"""
Parametrize over at.__setitem__, loc.__setitem__
"""
return request.param
@pytest.fixture(params=[tm.iat, tm.iloc])
def indexer_ial(request):
"""
Parametrize over iat.__setitem__, iloc.__setitem__
"""
return request.param
@pytest.fixture
def using_array_manager():
"""
Fixture to check if the array manager is being used.
"""
return pd.options.mode.data_manager == "array"
|
the-stack_0_25609
|
import requests
from requests.adapters import HTTPAdapter
from bs4 import BeautifulSoup
import sys
se = requests.Session() # 模拟登陆
requests.adapters.DEFAULT_RETRIES = 15
se.mount('http://', HTTPAdapter(max_retries=3)) # 重联
se.mount('https://', HTTPAdapter(max_retries=3))
class Pixiv(object):
def __init__(self):
self.base_url = 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index'
self.login_url = 'https://accounts.pixiv.net/api/login?lang=zh'
self.search_url = 'https://www.pixiv.net/search.php'
self.main_url = 'https://www.pixiv.net'
self.target_url = 'https://www.pixiv.net/member_illust.php?mode=medium&illust_id='
self.headers = {
'Referer': 'https://accounts.pixiv.net/login?lang=zh&source=pc&view_type=page&ref=wwwtop_accounts_index',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'
' AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
}
self.pixiv_id = '[email protected]', # [email protected]
self.password = 'yjw3616807', # knxy0616
self.post_key = []
self.return_to = 'https://www.pixiv.net/'
self.load_path = './id_pic/' # 存放图片路径
def login(self):
post_key_xml = se.get(self.base_url, headers=self.headers).text
post_key_soup = BeautifulSoup(post_key_xml, 'lxml')
self.post_key = post_key_soup.find('input')['value']
# 构造请求体
data = {
'pixiv_id': self.pixiv_id,
'password': self.password,
'post_key': self.post_key,
'return_to': self.return_to
}
se.post(self.login_url, data=data, headers=self.headers)
def download(self):
pixiv_id=sys.argv[1]
# 获取原图url
temp_url = self.target_url+pixiv_id
temp_clear = se.get(temp_url, headers=self.headers)
clear_soup = BeautifulSoup(temp_clear.text, features="html.parser")
#name = self.validateTitle(title[i].text) # 图片名称
op = clear_soup.prettify().find('"original":"')
ed = clear_soup.prettify().find('},"tags')
original_url = clear_soup.prettify()[op + 12:ed - 1]
adapt_url = original_url.replace('\/', '/')
img = se.get(adapt_url, headers=self.headers)
with open(self.load_path + pixiv_id + '.jpg', 'wb') as f: # 图片要用b,对text要合法化处理
f.write(img.content) # 保存图片
if __name__ == '__main__':
pixiv = Pixiv()
pixiv.login()
pixiv.download()
print("System Exit")
|
the-stack_0_25610
|
import sys
import numpy as np
sys.path.append('..')
import utils
def test_morph_denoise():
params = dict(
size_open = 3,
size_close = 5,
not_used_1 = 50,
not_used_50 = "some_string"
)
np.random.seed(0)
img = np.random.random((50, 50, 1))
img = (img > 0.5).astype(float)
img = utils.morph_denoise(img, **params)
assert img is not None
|
the-stack_0_25611
|
#!/usr/bin/env python
from __future__ import print_function
import socket
import sys
import time
import string
import random
import signal
import sys
import os
import re
INTERVAL = 1000 #unit ms
LEN =64
IP=""
PORT=0
MAX_TEST_COUNT=1
count=0
count_of_received=0
rtt_sum=0.0
rtt_min=99999999.0
rtt_max=0.0
fail_count=0
def signal_handler(signal, frame):
if count!=0 and count_of_received!=0:
print('')
print('--- ping statistics ---')
if count!=0:
print('%d packets transmitted, %d received, %.2f%% packet loss'%(count,count_of_received, (count-count_of_received)*100.0/count))
if count_of_received!=0:
print('rtt min/avg/max = %.2f/%.2f/%.2f ms'%(rtt_min,rtt_sum/count_of_received,rtt_max))
os._exit(0)
def random_string(length):
return ''.join(random.choice(string.ascii_letters+ string.digits ) for m in range(length))
def is_domain(domain):
domain_regex = re.compile(
r'(?:[A-Z0-9_](?:[A-Z0-9-_]{0,247}[A-Z0-9])?\.)+(?:[A-Z]{2,6}|[A-Z0-9-]{2,}(?<!-))\Z', re.IGNORECASE)
return True if domain_regex.match(domain) else False
if len(sys.argv) != 3 and len(sys.argv)!=4 :
print(""" usage:""")
print(""" this_program <dest_ip> <dest_port>""")
print(""" this_program <dest_ip> <dest_port> "<options>" """)
print()
print(""" options:""")
print(""" LEN the length of payload, unit:byte""")
print(""" INTERVAL the seconds waited between sending each packet, as well as the timeout for reply packet, unit: ms""")
print()
print(" examples:")
print(" ./udpping.py 44.55.66.77 4000")
print(' ./udpping.py 44.55.66.77 4000 "LEN=400;INTERVAL=2000;MAX_TEST_COUNT=1"')
print(" ./udpping.py fe80::5400:ff:aabb:ccdd 4000")
print()
exit()
IP=sys.argv[1]
PORT=int(sys.argv[2])
is_ipv6=0
if is_domain(IP):
IP = socket.gethostbyname(IP)
if IP.find(":")!=-1:
is_ipv6=1
if len(sys.argv)==4:
exec(sys.argv[3])
if LEN<5:
print("LEN must be >=5")
exit()
if INTERVAL<50:
print("INTERVAL must be >=50")
exit()
signal.signal(signal.SIGINT, signal_handler)
if not is_ipv6:
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
else:
sock = socket.socket(socket.AF_INET6,socket.SOCK_DGRAM)
print("UDPping %s via port %d with %d bytes of payload"% (IP,PORT,LEN))
sys.stdout.flush()
while True:
if count >= MAX_TEST_COUNT:
break
payload= random_string(LEN)
sock.sendto(payload.encode(), (IP, PORT))
time_of_send=time.time()
deadline = time.time() + INTERVAL/1000.0
received=0
rtt=0.0
while True:
timeout=deadline - time.time()
if timeout <0:
break
#print "timeout=",timeout
sock.settimeout(timeout)
try:
recv_data,addr = sock.recvfrom(65536)
if recv_data== payload.encode() and addr[0]==IP and addr[1]==PORT:
rtt=((time.time()-time_of_send)*1000)
print("Reply from",IP,"seq=%d"%count, "time=%.2f"%(rtt),"ms")
sys.stdout.flush()
received=1
break
except socket.timeout:
break
except :
pass
count+= 1
if received==1:
count_of_received+=1
rtt_sum+=rtt
rtt_max=max(rtt_max,rtt)
rtt_min=min(rtt_min,rtt)
else:
print("Request timed out")
fail_count+=1
sys.stdout.flush()
time_remaining=deadline-time.time()
if(time_remaining>0):
time.sleep(time_remaining)
if fail_count >= MAX_TEST_COUNT:
exit(1)
|
the-stack_0_25612
|
import random
from datetime import timedelta
from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Union
from unittest import mock
import ujson
from django.conf import settings
from django.core.exceptions import ValidationError
from django.http import HttpRequest, HttpResponse
from django.utils.timezone import now as timezone_now
from zerver.decorator import JsonableError
from zerver.lib import cache
from zerver.lib.actions import (
bulk_add_subscriptions,
bulk_get_subscriber_user_ids,
bulk_remove_subscriptions,
can_access_stream_user_ids,
create_stream_if_needed,
do_add_default_stream,
do_add_streams_to_default_stream_group,
do_change_default_stream_group_description,
do_change_default_stream_group_name,
do_change_stream_post_policy,
do_change_user_role,
do_create_default_stream_group,
do_create_realm,
do_deactivate_stream,
do_deactivate_user,
do_get_streams,
do_remove_default_stream,
do_remove_default_stream_group,
do_remove_streams_from_default_stream_group,
do_set_realm_property,
ensure_stream,
gather_subscriptions,
gather_subscriptions_helper,
get_average_weekly_stream_traffic,
get_default_streams_for_realm,
get_stream,
lookup_default_stream_groups,
round_to_2_significant_digits,
validate_user_access_to_subscribers_helper,
)
from zerver.lib.message import aggregate_unread_data, get_raw_unread_data
from zerver.lib.response import json_error, json_success
from zerver.lib.stream_recipient import StreamRecipientMap
from zerver.lib.stream_subscription import (
get_active_subscriptions_for_stream_id,
num_subscribers_for_stream_id,
)
from zerver.lib.streams import (
access_stream_by_id,
access_stream_by_name,
create_streams_if_needed,
filter_stream_authorization,
list_to_streams,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import (
get_subscription,
queries_captured,
reset_emails_in_zulip_realm,
tornado_redirected_to_list,
)
from zerver.lib.test_runner import slow
from zerver.models import (
DefaultStream,
DefaultStreamGroup,
Message,
Realm,
Recipient,
Stream,
Subscription,
UserMessage,
UserProfile,
active_non_guest_user_ids,
flush_per_request_caches,
get_client,
get_default_stream_groups,
get_realm,
get_user,
get_user_profile_by_id_in_realm,
)
from zerver.views.streams import compose_views
class TestMiscStuff(ZulipTestCase):
def test_empty_results(self) -> None:
# These are essentially just tests to ensure line
# coverage for codepaths that won't ever really be
# called in practice.
user_profile = self.example_user('cordelia')
result = bulk_get_subscriber_user_ids(
stream_dicts=[],
user_profile=user_profile,
sub_dict={},
stream_recipient=StreamRecipientMap(),
)
self.assertEqual(result, {})
streams = do_get_streams(
user_profile=user_profile,
include_public=False,
include_subscribed=False,
include_all_active=False,
include_default=False,
)
self.assertEqual(streams, [])
class TestCreateStreams(ZulipTestCase):
def test_creating_streams(self) -> None:
stream_names = ['new1', 'new2', 'new3']
stream_descriptions = ['des1', 'des2', 'des3']
realm = get_realm('zulip')
# Test stream creation events.
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
ensure_stream(realm, "Public stream", invite_only=False)
self.assert_length(events, 1)
self.assertEqual(events[0]['event']['type'], 'stream')
self.assertEqual(events[0]['event']['op'], 'create')
# Send public stream creation event to all active users.
self.assertEqual(events[0]['users'], active_non_guest_user_ids(realm.id))
self.assertEqual(events[0]['event']['streams'][0]['name'], "Public stream")
events = []
with tornado_redirected_to_list(events):
ensure_stream(realm, "Private stream", invite_only=True)
self.assert_length(events, 1)
self.assertEqual(events[0]['event']['type'], 'stream')
self.assertEqual(events[0]['event']['op'], 'create')
# Send private stream creation event to only realm admins.
self.assertEqual(len(events[0]['users']), 2)
self.assertTrue(self.example_user("iago").id in events[0]['users'])
self.assertTrue(self.example_user("desdemona").id in events[0]['users'])
self.assertEqual(events[0]['event']['streams'][0]['name'], "Private stream")
new_streams, existing_streams = create_streams_if_needed(
realm,
[{"name": stream_name,
"description": stream_description,
"invite_only": True,
"stream_post_policy": Stream.STREAM_POST_POLICY_ADMINS}
for (stream_name, stream_description) in zip(stream_names, stream_descriptions)])
self.assertEqual(len(new_streams), 3)
self.assertEqual(len(existing_streams), 0)
actual_stream_names = {stream.name for stream in new_streams}
self.assertEqual(actual_stream_names, set(stream_names))
actual_stream_descriptions = {stream.description for stream in new_streams}
self.assertEqual(actual_stream_descriptions, set(stream_descriptions))
for stream in new_streams:
self.assertTrue(stream.invite_only)
self.assertTrue(stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS)
new_streams, existing_streams = create_streams_if_needed(
realm,
[{"name": stream_name,
"description": stream_description,
"invite_only": True}
for (stream_name, stream_description) in zip(stream_names, stream_descriptions)])
self.assertEqual(len(new_streams), 0)
self.assertEqual(len(existing_streams), 3)
actual_stream_names = {stream.name for stream in existing_streams}
self.assertEqual(actual_stream_names, set(stream_names))
actual_stream_descriptions = {stream.description for stream in existing_streams}
self.assertEqual(actual_stream_descriptions, set(stream_descriptions))
for stream in existing_streams:
self.assertTrue(stream.invite_only)
def test_create_api_multiline_description(self) -> None:
user = self.example_user("hamlet")
realm = user.realm
self.login_user(user)
post_data = {'subscriptions': ujson.dumps([{"name": 'new_stream',
"description": "multi\nline\ndescription"}]),
'invite_only': ujson.dumps(False)}
result = self.api_post(user, "/api/v1/users/me/subscriptions", post_data,
subdomain="zulip")
self.assert_json_success(result)
stream = get_stream("new_stream", realm)
self.assertEqual(stream.description, 'multi line description')
def test_history_public_to_subscribers_on_stream_creation(self) -> None:
realm = get_realm('zulip')
stream_dicts: List[Mapping[str, Any]] = [
{
"name": "publicstream",
"description": "Public stream with public history",
},
{
"name": "privatestream",
"description": "Private stream with non-public history",
"invite_only": True,
},
{
"name": "privatewithhistory",
"description": "Private stream with public history",
"invite_only": True,
"history_public_to_subscribers": True,
},
{
"name": "publictrywithouthistory",
"description": "Public stream without public history (disallowed)",
"invite_only": False,
"history_public_to_subscribers": False,
},
]
created, existing = create_streams_if_needed(realm, stream_dicts)
self.assertEqual(len(created), 4)
self.assertEqual(len(existing), 0)
for stream in created:
if stream.name == 'publicstream':
self.assertTrue(stream.history_public_to_subscribers)
if stream.name == 'privatestream':
self.assertFalse(stream.history_public_to_subscribers)
if stream.name == 'privatewithhistory':
self.assertTrue(stream.history_public_to_subscribers)
if stream.name == 'publictrywithouthistory':
self.assertTrue(stream.history_public_to_subscribers)
def test_history_public_to_subscribers_zephyr_realm(self) -> None:
realm = get_realm('zephyr')
stream, created = create_stream_if_needed(realm, "private_stream", invite_only=True)
self.assertTrue(created)
self.assertTrue(stream.invite_only)
self.assertFalse(stream.history_public_to_subscribers)
stream, created = create_stream_if_needed(realm, "public_stream", invite_only=False)
self.assertTrue(created)
self.assertFalse(stream.invite_only)
self.assertFalse(stream.history_public_to_subscribers)
def test_auto_mark_stream_created_message_as_read_for_stream_creator(self) -> None:
# This test relies on email == delivery_email for
# convenience.
reset_emails_in_zulip_realm()
realm = Realm.objects.get(name='Zulip Dev')
iago = self.example_user('iago')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
aaron = self.example_user('aaron')
# Establish a stream for notifications.
announce_stream = ensure_stream(realm, "announce", False, "announcements here.")
realm.notifications_stream_id = announce_stream.id
realm.save(update_fields=['notifications_stream_id'])
self.subscribe(iago, announce_stream.name)
self.subscribe(hamlet, announce_stream.name)
notification_bot = UserProfile.objects.get(full_name="Notification Bot")
self.login_user(iago)
initial_message_count = Message.objects.count()
initial_usermessage_count = UserMessage.objects.count()
data = {
"subscriptions": '[{"name":"brand new stream","description":""}]',
"history_public_to_subscribers": 'true',
"invite_only": 'false',
"announce": 'true',
"principals": ujson.dumps([iago.id, aaron.id, cordelia.id, hamlet.id]),
"stream_post_policy": '1',
}
response = self.client_post("/json/users/me/subscriptions", data)
final_message_count = Message.objects.count()
final_usermessage_count = UserMessage.objects.count()
expected_response = {
"result": "success",
"msg": "",
"subscribed": {
"[email protected]": ["brand new stream"],
"[email protected]": ["brand new stream"],
"[email protected]": ["brand new stream"],
"[email protected]": ["brand new stream"],
},
"already_subscribed": {},
}
self.assertEqual(response.status_code, 200)
self.assertEqual(ujson.loads(response.content.decode()), expected_response)
# 2 messages should be created, one in announce and one in the new stream itself.
self.assertEqual(final_message_count - initial_message_count, 2)
# 4 UserMessages per subscriber: One for each of the subscribers, plus 1 for
# each user in the notifications stream.
announce_stream_subs = Subscription.objects.filter(recipient=announce_stream.recipient)
self.assertEqual(final_usermessage_count - initial_usermessage_count,
4 + announce_stream_subs.count())
def get_unread_stream_data(user: UserProfile) -> List[Dict[str, Any]]:
raw_unread_data = get_raw_unread_data(user)
aggregated_data = aggregate_unread_data(raw_unread_data)
return aggregated_data['streams']
stream_id = Stream.objects.get(name='brand new stream').id
iago_unread_messages = get_unread_stream_data(iago)
hamlet_unread_messages = get_unread_stream_data(hamlet)
# The stream creation messages should be unread for Hamlet
self.assertEqual(len(hamlet_unread_messages), 2)
# According to the code in zerver/views/streams/add_subscriptions_backend
# the notification stream message is sent first, then the new stream's message.
self.assertEqual(hamlet_unread_messages[0]['sender_ids'][0], notification_bot.id)
self.assertEqual(hamlet_unread_messages[1]['stream_id'], stream_id)
# But it should be marked as read for Iago, the stream creator.
self.assertEqual(len(iago_unread_messages), 0)
class RecipientTest(ZulipTestCase):
def test_recipient(self) -> None:
realm = get_realm('zulip')
stream = get_stream('Verona', realm)
recipient = Recipient.objects.get(
type_id=stream.id,
type=Recipient.STREAM,
)
self.assertEqual(str(recipient), f'<Recipient: Verona ({stream.id}, {Recipient.STREAM})>')
class StreamAdminTest(ZulipTestCase):
def test_make_stream_public(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.make_stream('private_stream', invite_only=True)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
params = {
'stream_name': ujson.dumps('private_stream'),
'is_private': ujson.dumps(False),
}
stream_id = get_stream('private_stream', user_profile.realm).id
result = self.client_patch(f"/json/streams/{stream_id}", params)
self.assert_json_error(result, 'Invalid stream id')
stream = self.subscribe(user_profile, 'private_stream')
self.assertFalse(stream.is_in_zephyr_realm)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
params = {
'stream_name': ujson.dumps('private_stream'),
'is_private': ujson.dumps(False),
}
result = self.client_patch(f"/json/streams/{stream_id}", params)
self.assert_json_success(result)
realm = user_profile.realm
stream = get_stream('private_stream', realm)
self.assertFalse(stream.invite_only)
self.assertTrue(stream.history_public_to_subscribers)
def test_make_stream_private(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
realm = user_profile.realm
self.make_stream('public_stream', realm=realm)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
params = {
'stream_name': ujson.dumps('public_stream'),
'is_private': ujson.dumps(True),
}
stream_id = get_stream('public_stream', realm).id
result = self.client_patch(f"/json/streams/{stream_id}", params)
self.assert_json_success(result)
stream = get_stream('public_stream', realm)
self.assertTrue(stream.invite_only)
self.assertFalse(stream.history_public_to_subscribers)
def test_make_stream_public_zephyr_mirror(self) -> None:
user_profile = self.mit_user('starnine')
self.login_user(user_profile)
realm = user_profile.realm
self.make_stream('target_stream', realm=realm, invite_only=True)
self.subscribe(user_profile, 'target_stream')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
params = {
'stream_name': ujson.dumps('target_stream'),
'is_private': ujson.dumps(False),
}
stream_id = get_stream('target_stream', realm).id
result = self.client_patch(f"/json/streams/{stream_id}", params,
subdomain="zephyr")
self.assert_json_success(result)
stream = get_stream('target_stream', realm)
self.assertFalse(stream.invite_only)
self.assertFalse(stream.history_public_to_subscribers)
def test_make_stream_private_with_public_history(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
realm = user_profile.realm
self.make_stream('public_history_stream', realm=realm)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
params = {
'stream_name': ujson.dumps('public_history_stream'),
'is_private': ujson.dumps(True),
'history_public_to_subscribers': ujson.dumps(True),
}
stream_id = get_stream('public_history_stream', realm).id
result = self.client_patch(f"/json/streams/{stream_id}", params)
self.assert_json_success(result)
stream = get_stream('public_history_stream', realm)
self.assertTrue(stream.invite_only)
self.assertTrue(stream.history_public_to_subscribers)
def test_try_make_stream_public_with_private_history(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
realm = user_profile.realm
self.make_stream('public_stream', realm=realm)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
params = {
'stream_name': ujson.dumps('public_stream'),
'is_private': ujson.dumps(False),
'history_public_to_subscribers': ujson.dumps(False),
}
stream_id = get_stream('public_stream', realm).id
result = self.client_patch(f"/json/streams/{stream_id}", params)
self.assert_json_success(result)
stream = get_stream('public_stream', realm)
self.assertFalse(stream.invite_only)
self.assertTrue(stream.history_public_to_subscribers)
def test_deactivate_stream_backend(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
stream = self.make_stream('new_stream')
self.subscribe(user_profile, stream.name)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
result = self.client_delete(f'/json/streams/{stream.id}')
self.assert_json_success(result)
subscription_exists = get_active_subscriptions_for_stream_id(stream.id).filter(
user_profile=user_profile,
).exists()
self.assertFalse(subscription_exists)
def test_deactivate_stream_removes_default_stream(self) -> None:
stream = self.make_stream('new_stream')
do_add_default_stream(stream)
self.assertEqual(1, DefaultStream.objects.filter(stream_id=stream.id).count())
do_deactivate_stream(stream)
self.assertEqual(0, DefaultStream.objects.filter(stream_id=stream.id).count())
def test_deactivate_stream_removes_stream_from_default_stream_groups(self) -> None:
realm = get_realm('zulip')
streams_to_keep = []
for stream_name in ["stream1", "stream2"]:
stream = ensure_stream(realm, stream_name)
streams_to_keep.append(stream)
streams_to_remove = []
stream = ensure_stream(realm, "stream3")
streams_to_remove.append(stream)
all_streams = streams_to_keep + streams_to_remove
def get_streams(group: DefaultStreamGroup) -> List[Stream]:
return list(group.streams.all().order_by('name'))
group_name = "group1"
description = "This is group1"
do_create_default_stream_group(realm, group_name, description, all_streams)
default_stream_groups = get_default_stream_groups(realm)
self.assertEqual(get_streams(default_stream_groups[0]), all_streams)
do_deactivate_stream(streams_to_remove[0])
self.assertEqual(get_streams(default_stream_groups[0]), streams_to_keep)
def test_vacate_private_stream_removes_default_stream(self) -> None:
stream = self.make_stream('new_stream', invite_only=True)
self.subscribe(self.example_user("hamlet"), stream.name)
do_add_default_stream(stream)
self.assertEqual(1, DefaultStream.objects.filter(stream_id=stream.id).count())
self.unsubscribe(self.example_user("hamlet"), stream.name)
self.assertEqual(0, DefaultStream.objects.filter(stream_id=stream.id).count())
# Fetch stream again from database.
stream = Stream.objects.get(id=stream.id)
self.assertTrue(stream.deactivated)
def test_deactivate_stream_backend_requires_existing_stream(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.make_stream('new_stream')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
result = self.client_delete('/json/streams/999999999')
self.assert_json_error(result, 'Invalid stream id')
def test_deactivate_stream_backend_requires_realm_admin(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, 'new_stream')
stream_id = get_stream('new_stream', user_profile.realm).id
result = self.client_delete(f'/json/streams/{stream_id}')
self.assert_json_error(result, 'Must be an organization administrator')
def test_private_stream_live_updates(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.make_stream('private_stream', invite_only=True)
self.subscribe(user_profile, 'private_stream')
self.subscribe(self.example_user("cordelia"), 'private_stream')
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
stream_id = get_stream('private_stream', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'description': ujson.dumps('Test description')})
self.assert_json_success(result)
# Should be just a description change event
self.assert_length(events, 1)
cordelia = self.example_user('cordelia')
prospero = self.example_user('prospero')
notified_user_ids = set(events[-1]['users'])
self.assertIn(user_profile.id, notified_user_ids)
self.assertIn(cordelia.id, notified_user_ids)
self.assertNotIn(prospero.id, notified_user_ids)
events = []
with tornado_redirected_to_list(events):
stream_id = get_stream('private_stream', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('whatever')})
self.assert_json_success(result)
# Should be a name event, an email address event and a notification event
self.assert_length(events, 3)
notified_user_ids = set(events[0]['users'])
self.assertIn(user_profile.id, notified_user_ids)
self.assertIn(cordelia.id, notified_user_ids)
self.assertNotIn(prospero.id, notified_user_ids)
notified_with_bot_users = events[-1]['users']
notified_with_bot_user_ids = []
notified_with_bot_user_ids.append(notified_with_bot_users[0]['id'])
notified_with_bot_user_ids.append(notified_with_bot_users[1]['id'])
self.assertIn(user_profile.id, notified_with_bot_user_ids)
self.assertIn(cordelia.id, notified_with_bot_user_ids)
self.assertNotIn(prospero.id, notified_with_bot_user_ids)
def test_rename_stream(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
realm = user_profile.realm
stream = self.subscribe(user_profile, 'stream_name1')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
result = self.client_patch(f'/json/streams/{stream.id}',
{'new_name': ujson.dumps('stream_name1')})
self.assert_json_error(result, "Stream already has that name!")
result = self.client_patch(f'/json/streams/{stream.id}',
{'new_name': ujson.dumps('Denmark')})
self.assert_json_error(result, "Stream name 'Denmark' is already taken.")
result = self.client_patch(f'/json/streams/{stream.id}',
{'new_name': ujson.dumps('denmark ')})
self.assert_json_error(result, "Stream name 'denmark' is already taken.")
# Do a rename that is case-only--this should succeed.
result = self.client_patch(f'/json/streams/{stream.id}',
{'new_name': ujson.dumps('sTREAm_name1')})
self.assert_json_success(result)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
stream_id = get_stream('stream_name1', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('stream_name2')})
self.assert_json_success(result)
event = events[1]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='name',
value='stream_name2',
stream_id=stream_id,
name='sTREAm_name1',
))
notified_user_ids = set(events[1]['users'])
self.assertRaises(Stream.DoesNotExist, get_stream, 'stream_name1', realm)
stream_name2_exists = get_stream('stream_name2', realm)
self.assertTrue(stream_name2_exists)
self.assertEqual(notified_user_ids, set(active_non_guest_user_ids(realm.id)))
self.assertIn(user_profile.id,
notified_user_ids)
self.assertIn(self.example_user('prospero').id,
notified_user_ids)
self.assertNotIn(self.example_user('polonius').id,
notified_user_ids)
# Test case to handle unicode stream name change
# *NOTE: Here Encoding is needed when Unicode string is passed as an argument*
with tornado_redirected_to_list(events):
stream_id = stream_name2_exists.id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('नया नाम'.encode())})
self.assert_json_success(result)
# While querying, system can handle unicode strings.
stream_name_uni_exists = get_stream('नया नाम', realm)
self.assertTrue(stream_name_uni_exists)
# Test case to handle changing of unicode stream name to newer name
# NOTE: Unicode string being part of URL is handled cleanly
# by client_patch call, encoding of URL is not needed.
with tornado_redirected_to_list(events):
stream_id = stream_name_uni_exists.id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('नाम में क्या रक्खा हे'.encode())})
self.assert_json_success(result)
# While querying, system can handle unicode strings.
self.assertRaises(Stream.DoesNotExist, get_stream, 'नया नाम', realm)
stream_name_new_uni_exists = get_stream('नाम में क्या रक्खा हे', realm)
self.assertTrue(stream_name_new_uni_exists)
# Test case to change name from one language to other.
with tornado_redirected_to_list(events):
stream_id = stream_name_new_uni_exists.id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('français'.encode())})
self.assert_json_success(result)
stream_name_fr_exists = get_stream('français', realm)
self.assertTrue(stream_name_fr_exists)
# Test case to change name to mixed language name.
with tornado_redirected_to_list(events):
stream_id = stream_name_fr_exists.id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('français name'.encode())})
self.assert_json_success(result)
stream_name_mixed_exists = get_stream('français name', realm)
self.assertTrue(stream_name_mixed_exists)
# Test case for notified users in private streams.
stream_private = self.make_stream('stream_private_name1', realm=user_profile.realm, invite_only=True)
self.subscribe(self.example_user('cordelia'), 'stream_private_name1')
del events[:]
with tornado_redirected_to_list(events):
stream_id = get_stream('stream_private_name1', realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('stream_private_name2')})
self.assert_json_success(result)
notified_user_ids = set(events[1]['users'])
self.assertEqual(notified_user_ids, can_access_stream_user_ids(stream_private))
self.assertIn(self.example_user('cordelia').id, notified_user_ids)
# An important corner case is that all organization admins are notified.
self.assertIn(self.example_user('iago').id, notified_user_ids)
# The current user, Hamlet was made an admin and thus should be notified too.
self.assertIn(user_profile.id, notified_user_ids)
self.assertNotIn(self.example_user('prospero').id,
notified_user_ids)
def test_rename_stream_requires_realm_admin(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.make_stream('stream_name1')
stream_id = get_stream('stream_name1', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('stream_name2')})
self.assert_json_error(result, 'Must be an organization administrator')
def test_notify_on_stream_rename(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.make_stream('stream_name1')
stream = self.subscribe(user_profile, 'stream_name1')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
result = self.client_patch(f'/json/streams/{stream.id}',
{'new_name': ujson.dumps('stream_name2')})
self.assert_json_success(result)
# Inspect the notification message sent
message = self.get_last_message()
actual_stream = Stream.objects.get(id=message.recipient.type_id)
message_content = f'@_**King Hamlet|{user_profile.id}** renamed stream **stream_name1** to **stream_name2**.'
self.assertEqual(actual_stream.name, 'stream_name2')
self.assertEqual(actual_stream.realm_id, user_profile.realm_id)
self.assertEqual(message.recipient.type, Recipient.STREAM)
self.assertEqual(message.content, message_content)
self.assertEqual(message.sender.email, '[email protected]')
self.assertEqual(message.sender.realm, get_realm(settings.SYSTEM_BOT_REALM))
def test_realm_admin_can_update_unsub_private_stream(self) -> None:
iago = self.example_user('iago')
hamlet = self.example_user('hamlet')
self.login_user(iago)
result = self.common_subscribe_to_streams(iago, ["private_stream"],
dict(principals=ujson.dumps([hamlet.id])),
invite_only=True)
self.assert_json_success(result)
stream_id = get_stream('private_stream', iago.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_name': ujson.dumps('new_private_stream')})
self.assert_json_success(result)
result = self.client_patch(f'/json/streams/{stream_id}',
{'new_description': ujson.dumps('new description')})
self.assert_json_success(result)
# But cannot change stream type.
result = self.client_patch(f'/json/streams/{stream_id}',
{'stream_name': ujson.dumps('private_stream'),
'is_private': ujson.dumps(True)})
self.assert_json_error(result, "Invalid stream id")
def test_change_stream_description(self) -> None:
user_profile = self.example_user('iago')
self.login_user(user_profile)
realm = user_profile.realm
self.subscribe(user_profile, 'stream_name1')
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
stream_id = get_stream('stream_name1', realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'description': ujson.dumps('Test description')})
self.assert_json_success(result)
event = events[0]['event']
self.assertEqual(event, dict(
op='update',
type='stream',
property='description',
value='Test description',
rendered_description='<p>Test description</p>',
stream_id=stream_id,
name='stream_name1',
))
notified_user_ids = set(events[0]['users'])
stream = get_stream('stream_name1', realm)
self.assertEqual(notified_user_ids, set(active_non_guest_user_ids(realm.id)))
self.assertIn(user_profile.id,
notified_user_ids)
self.assertIn(self.example_user('prospero').id,
notified_user_ids)
self.assertNotIn(self.example_user('polonius').id,
notified_user_ids)
self.assertEqual('Test description', stream.description)
result = self.client_patch(f'/json/streams/{stream_id}',
{'description': ujson.dumps('a' * 1025)})
self.assert_json_error(
result,
f"description is too long (limit: {Stream.MAX_DESCRIPTION_LENGTH} characters)",
)
result = self.client_patch(f'/json/streams/{stream_id}',
{'description': ujson.dumps('a\nmulti\nline\ndescription')})
self.assert_json_success(result)
stream = get_stream('stream_name1', realm)
self.assertEqual(stream.description, 'a multi line description')
# Verify that we don't render inline URL previews in this code path.
with self.settings(INLINE_URL_EMBED_PREVIEW=True):
result = self.client_patch(f'/json/streams/{stream_id}',
{'description': ujson.dumps('See https://zulip.com/team')})
self.assert_json_success(result)
stream = get_stream('stream_name1', realm)
self.assertEqual(
stream.rendered_description,
'<p>See <a href="https://zulip.com/team">https://zulip.com/team</a></p>',
)
def test_change_stream_description_requires_realm_admin(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, 'stream_name1')
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER)
stream_id = get_stream('stream_name1', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'description': ujson.dumps('Test description')})
self.assert_json_error(result, 'Must be an organization administrator')
def test_change_to_stream_post_policy_admins(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, 'stream_name1')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
stream_id = get_stream('stream_name1', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'is_announcement_only': ujson.dumps(True)})
self.assert_json_success(result)
stream = get_stream('stream_name1', user_profile.realm)
self.assertTrue(stream.stream_post_policy == Stream.STREAM_POST_POLICY_ADMINS)
def test_change_stream_post_policy_requires_realm_admin(self) -> None:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
self.subscribe(user_profile, 'stream_name1')
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER)
do_set_realm_property(user_profile.realm, 'waiting_period_threshold', 10)
def test_non_admin(how_old: int, is_new: bool, policy: int) -> None:
user_profile.date_joined = timezone_now() - timedelta(days=how_old)
user_profile.save()
self.assertEqual(user_profile.is_new_member, is_new)
stream_id = get_stream('stream_name1', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'stream_post_policy': ujson.dumps(policy)})
self.assert_json_error(result, 'Must be an organization administrator')
policies = [Stream.STREAM_POST_POLICY_ADMINS, Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS]
for policy in policies:
test_non_admin(how_old=15, is_new=False, policy=policy)
test_non_admin(how_old=5, is_new=True, policy=policy)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
for policy in policies:
stream_id = get_stream('stream_name1', user_profile.realm).id
result = self.client_patch(f'/json/streams/{stream_id}',
{'stream_post_policy': ujson.dumps(policy)})
self.assert_json_success(result)
stream = get_stream('stream_name1', user_profile.realm)
self.assertEqual(stream.stream_post_policy, policy)
def set_up_stream_for_deletion(self, stream_name: str, invite_only: bool=False,
subscribed: bool=True) -> Stream:
"""
Create a stream for deletion by an administrator.
"""
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
stream = self.make_stream(stream_name, invite_only=invite_only)
# For testing deleting streams you aren't on.
if subscribed:
self.subscribe(user_profile, stream_name)
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
return stream
def delete_stream(self, stream: Stream) -> None:
"""
Delete the stream and assess the result.
"""
active_name = stream.name
realm = stream.realm
stream_id = stream.id
# Simulate that a stream by the same name has already been
# deactivated, just to exercise our renaming logic:
ensure_stream(realm, "!DEACTIVATED:" + active_name)
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
result = self.client_delete('/json/streams/' + str(stream_id))
self.assert_json_success(result)
# We no longer send subscription events for stream deactivations.
sub_events = [e for e in events if e['event']['type'] == 'subscription']
self.assertEqual(sub_events, [])
stream_events = [e for e in events if e['event']['type'] == 'stream']
self.assertEqual(len(stream_events), 1)
event = stream_events[0]['event']
self.assertEqual(event['op'], 'delete')
self.assertEqual(event['streams'][0]['stream_id'], stream.id)
with self.assertRaises(Stream.DoesNotExist):
Stream.objects.get(realm=get_realm("zulip"), name=active_name)
# A deleted stream's name is changed, is deactivated, is invite-only,
# and has no subscribers.
deactivated_stream_name = "!!DEACTIVATED:" + active_name
deactivated_stream = get_stream(deactivated_stream_name, realm)
self.assertTrue(deactivated_stream.deactivated)
self.assertTrue(deactivated_stream.invite_only)
self.assertEqual(deactivated_stream.name, deactivated_stream_name)
subscribers = self.users_subscribed_to_stream(
deactivated_stream_name, realm)
self.assertEqual(subscribers, [])
# It doesn't show up in the list of public streams anymore.
result = self.client_get("/json/streams?include_subscribed=false")
public_streams = [s["name"] for s in result.json()["streams"]]
self.assertNotIn(active_name, public_streams)
self.assertNotIn(deactivated_stream_name, public_streams)
# Even if you could guess the new name, you can't subscribe to it.
result = self.client_post(
"/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([{"name": deactivated_stream_name}])})
self.assert_json_error(
result, f"Unable to access stream ({deactivated_stream_name}).")
def test_you_must_be_realm_admin(self) -> None:
"""
You must be on the realm to create a stream.
"""
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
other_realm = Realm.objects.create(string_id='other')
stream = self.make_stream('other_realm_stream', realm=other_realm)
result = self.client_delete('/json/streams/' + str(stream.id))
self.assert_json_error(result, 'Must be an organization administrator')
# Even becoming a realm admin doesn't help us for an out-of-realm
# stream.
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
result = self.client_delete('/json/streams/' + str(stream.id))
self.assert_json_error(result, 'Invalid stream id')
def test_delete_public_stream(self) -> None:
"""
When an administrator deletes a public stream, that stream is not
visible to users at all anymore.
"""
stream = self.set_up_stream_for_deletion("newstream")
self.delete_stream(stream)
def test_delete_private_stream(self) -> None:
"""
Administrators can delete private streams they are on.
"""
stream = self.set_up_stream_for_deletion("newstream", invite_only=True)
self.delete_stream(stream)
def test_delete_streams_youre_not_on(self) -> None:
"""
Administrators can delete public streams they aren't on, including
private streams in their realm.
"""
pub_stream = self.set_up_stream_for_deletion(
"pubstream", subscribed=False)
self.delete_stream(pub_stream)
priv_stream = self.set_up_stream_for_deletion(
"privstream", subscribed=False, invite_only=True)
self.delete_stream(priv_stream)
def attempt_unsubscribe_of_principal(self, query_count: int, target_users: List[UserProfile],
is_admin: bool=False, is_subbed: bool=True, invite_only: bool=False,
target_users_subbed: bool=True, using_legacy_emails: bool=False,
other_sub_users: Sequence[UserProfile]=[]) -> HttpResponse:
# Set up the main user, who is in most cases an admin.
if is_admin:
user_profile = self.example_user('iago')
else:
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
# Set up the stream.
stream_name = "hümbüǵ"
self.make_stream(stream_name, invite_only=invite_only)
# Set up the principal to be unsubscribed.
principals = []
for user in target_users:
if using_legacy_emails:
principals.append(user.email)
else:
principals.append(user.id)
# Subscribe the admin and/or principal as specified in the flags.
if is_subbed:
self.subscribe(user_profile, stream_name)
if target_users_subbed:
for user in target_users:
self.subscribe(user, stream_name)
for user in other_sub_users:
self.subscribe(user, stream_name)
with queries_captured() as queries:
result = self.client_delete(
"/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([stream_name]),
"principals": ujson.dumps(principals)})
self.assert_length(queries, query_count)
# If the removal succeeded, then assert that Cordelia is no longer subscribed.
if result.status_code not in [400]:
subbed_users = self.users_subscribed_to_stream(stream_name, user_profile.realm)
for user in target_users:
self.assertNotIn(user, subbed_users)
return result
def test_cant_remove_others_from_stream(self) -> None:
"""
If you're not an admin, you can't remove other people from streams.
"""
result = self.attempt_unsubscribe_of_principal(
query_count=3, target_users=[self.example_user('cordelia')], is_admin=False, is_subbed=True,
invite_only=False, target_users_subbed=True)
self.assert_json_error(
result, "This action requires administrative rights")
def test_admin_remove_others_from_public_stream(self) -> None:
"""
If you're an admin, you can remove people from public streams, even
those you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
query_count=21, target_users=[self.example_user('cordelia')], is_admin=True, is_subbed=True,
invite_only=False, target_users_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_removed"]), 0)
def test_admin_remove_multiple_users_from_stream(self) -> None:
"""
If you're an admin, you can remove multiple users from a stream,
"""
result = self.attempt_unsubscribe_of_principal(
query_count=30, target_users=[self.example_user('cordelia'), self.example_user('prospero')],
is_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 2)
self.assertEqual(len(json["not_removed"]), 0)
def test_admin_remove_others_from_subbed_private_stream(self) -> None:
"""
If you're an admin, you can remove other people from private streams you
are on.
"""
result = self.attempt_unsubscribe_of_principal(
query_count=21, target_users=[self.example_user('cordelia')], is_admin=True, is_subbed=True,
invite_only=True, target_users_subbed=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_removed"]), 0)
def test_admin_remove_others_from_unsubbed_private_stream(self) -> None:
"""
If you're an admin, you can remove people from private
streams you aren't on.
"""
result = self.attempt_unsubscribe_of_principal(
query_count=21, target_users=[self.example_user('cordelia')], is_admin=True, is_subbed=False,
invite_only=True, target_users_subbed=True, other_sub_users=[self.example_user("othello")])
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_removed"]), 0)
def test_cant_remove_others_from_stream_legacy_emails(self) -> None:
result = self.attempt_unsubscribe_of_principal(
query_count=3, is_admin=False, is_subbed=True, invite_only=False,
target_users=[self.example_user('cordelia')], target_users_subbed=True,
using_legacy_emails=True)
self.assert_json_error(
result, "This action requires administrative rights")
def test_admin_remove_others_from_stream_legacy_emails(self) -> None:
result = self.attempt_unsubscribe_of_principal(
query_count=21, target_users=[self.example_user('cordelia')], is_admin=True, is_subbed=True,
invite_only=False, target_users_subbed=True, using_legacy_emails=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 1)
self.assertEqual(len(json["not_removed"]), 0)
def test_admin_remove_multiple_users_from_stream_legacy_emails(self) -> None:
result = self.attempt_unsubscribe_of_principal(
query_count=30, target_users=[self.example_user('cordelia'), self.example_user('prospero')],
is_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, using_legacy_emails=True)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 2)
self.assertEqual(len(json["not_removed"]), 0)
def test_create_stream_policy_setting(self) -> None:
"""
When realm.create_stream_policy setting is Realm.POLICY_MEMBERS_ONLY then
test that any user can create a stream.
When realm.create_stream_policy setting is Realm.POLICY_ADMINS_ONLY then
test that only admins can create a stream.
When realm.create_stream_policy setting is Realm.POLICY_FULL_MEMBERS_ONLY then
test that admins and users with accounts older than the waiting period can create a stream.
"""
user_profile = self.example_user('hamlet')
user_profile.date_joined = timezone_now()
user_profile.save()
self.login_user(user_profile)
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER)
# Allow all members to create streams.
do_set_realm_property(user_profile.realm, 'create_stream_policy',
Realm.POLICY_MEMBERS_ONLY)
# Set waiting period to 10 days.
do_set_realm_property(user_profile.realm, 'waiting_period_threshold', 10)
# Can successfully create stream despite being less than waiting period and not an admin,
# due to create stream policy.
stream_name = ['all_members']
result = self.common_subscribe_to_streams(user_profile, stream_name)
self.assert_json_success(result)
# Allow only administrators to create streams.
do_set_realm_property(user_profile.realm, 'create_stream_policy',
Realm.POLICY_ADMINS_ONLY)
# Cannot create stream because not an admin.
stream_name = ['admins_only']
result = self.common_subscribe_to_streams(user_profile, stream_name)
self.assert_json_error(result, 'User cannot create streams.')
# Make current user an admin.
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
# Can successfully create stream as user is now an admin.
stream_name = ['admins_only']
result = self.common_subscribe_to_streams(user_profile, stream_name)
self.assert_json_success(result)
# Allow users older than the waiting period to create streams.
do_set_realm_property(user_profile.realm, 'create_stream_policy',
Realm.POLICY_FULL_MEMBERS_ONLY)
# Can successfully create stream despite being under waiting period because user is admin.
stream_name = ['waiting_period_as_admin']
result = self.common_subscribe_to_streams(user_profile, stream_name)
self.assert_json_success(result)
# Make current user no longer an admin.
do_change_user_role(user_profile, UserProfile.ROLE_MEMBER)
# Cannot create stream because user is not an admin and is not older than the waiting
# period.
stream_name = ['waiting_period']
result = self.common_subscribe_to_streams(user_profile, stream_name)
self.assert_json_error(result, 'User cannot create streams.')
# Make user account 11 days old..
user_profile.date_joined = timezone_now() - timedelta(days=11)
user_profile.save()
# Can successfully create stream now that account is old enough.
stream_name = ['waiting_period']
result = self.common_subscribe_to_streams(user_profile, stream_name)
self.assert_json_success(result)
def test_invite_to_stream_by_invite_period_threshold(self) -> None:
"""
Non admin users with account age greater or equal to the invite
to stream threshold should be able to invite others to a stream.
"""
hamlet_user = self.example_user('hamlet')
hamlet_user.date_joined = timezone_now()
hamlet_user.save()
cordelia_user = self.example_user('cordelia')
cordelia_user.date_joined = timezone_now()
cordelia_user.save()
do_set_realm_property(hamlet_user.realm, 'invite_to_stream_policy',
Realm.POLICY_FULL_MEMBERS_ONLY)
cordelia_user_id = cordelia_user.id
self.login_user(hamlet_user)
do_change_user_role(hamlet_user, UserProfile.ROLE_REALM_ADMINISTRATOR)
# Hamlet creates a stream as an admin..
stream_name = ['waitingperiodtest']
result = self.common_subscribe_to_streams(hamlet_user, stream_name)
self.assert_json_success(result)
# Can only invite users to stream if their account is ten days old..
do_change_user_role(hamlet_user, UserProfile.ROLE_MEMBER)
do_set_realm_property(hamlet_user.realm, 'waiting_period_threshold', 10)
# Attempt and fail to invite Cordelia to the stream..
result = self.common_subscribe_to_streams(hamlet_user, stream_name, {"principals": ujson.dumps([cordelia_user_id])})
self.assert_json_error(result,
"Your account is too new to modify other users' subscriptions.")
# Anyone can invite users..
do_set_realm_property(hamlet_user.realm, 'waiting_period_threshold', 0)
# Attempt and succeed to invite Cordelia to the stream..
result = self.common_subscribe_to_streams(hamlet_user, stream_name, {"principals": ujson.dumps([cordelia_user_id])})
self.assert_json_success(result)
# Set threshold to 20 days..
do_set_realm_property(hamlet_user.realm, 'waiting_period_threshold', 20)
# Make Hamlet's account 21 days old..
hamlet_user.date_joined = timezone_now() - timedelta(days=21)
hamlet_user.save()
# Unsubscribe Cordelia..
self.unsubscribe(cordelia_user, stream_name[0])
# Attempt and succeed to invite Aaron to the stream..
result = self.common_subscribe_to_streams(hamlet_user, stream_name, {"principals": ujson.dumps([cordelia_user_id])})
self.assert_json_success(result)
def test_remove_already_not_subbed(self) -> None:
"""
Trying to unsubscribe someone who already isn't subscribed to a stream
fails gracefully.
"""
result = self.attempt_unsubscribe_of_principal(
query_count=11, target_users=[self.example_user('cordelia')], is_admin=True, is_subbed=False,
invite_only=False, target_users_subbed=False)
json = self.assert_json_success(result)
self.assertEqual(len(json["removed"]), 0)
self.assertEqual(len(json["not_removed"]), 1)
def test_remove_invalid_user(self) -> None:
"""
Trying to unsubscribe an invalid user from a stream fails gracefully.
"""
admin = self.example_user('iago')
self.login_user(admin)
self.assertTrue(admin.is_realm_admin)
stream_name = "hümbüǵ"
self.make_stream(stream_name)
result = self.client_delete("/json/users/me/subscriptions",
{"subscriptions": ujson.dumps([stream_name]),
"principals": ujson.dumps([99])})
self.assert_json_error(
result,
"User not authorized to execute queries on behalf of '99'",
status_code=403)
class DefaultStreamTest(ZulipTestCase):
def get_default_stream_names(self, realm: Realm) -> Set[str]:
streams = get_default_streams_for_realm(realm.id)
stream_names = [s.name for s in streams]
return set(stream_names)
def test_add_and_remove_default_stream(self) -> None:
realm = get_realm("zulip")
stream = ensure_stream(realm, "Added Stream")
orig_stream_names = self.get_default_stream_names(realm)
do_add_default_stream(stream)
new_stream_names = self.get_default_stream_names(realm)
added_stream_names = new_stream_names - orig_stream_names
self.assertEqual(added_stream_names, {'Added Stream'})
# idempotentcy--2nd call to add_default_stream should be a noop
do_add_default_stream(stream)
self.assertEqual(self.get_default_stream_names(realm), new_stream_names)
# start removing
do_remove_default_stream(stream)
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
# idempotentcy--2nd call to remove_default_stream should be a noop
do_remove_default_stream(stream)
self.assertEqual(self.get_default_stream_names(realm), orig_stream_names)
def test_api_calls(self) -> None:
user_profile = self.example_user('hamlet')
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.login_user(user_profile)
stream_name = 'stream ADDED via api'
stream = ensure_stream(user_profile.realm, stream_name)
result = self.client_post('/json/default_streams', dict(stream_id=stream.id))
self.assert_json_success(result)
self.assertTrue(stream_name in self.get_default_stream_names(user_profile.realm))
# look for it
self.subscribe(user_profile, stream_name)
payload = dict(
include_public='true',
include_default='true',
)
result = self.client_get('/json/streams', payload)
self.assert_json_success(result)
streams = result.json()['streams']
default_streams = {
stream['name']
for stream in streams
if stream['is_default']
}
self.assertEqual(default_streams, {stream_name})
other_streams = {
stream['name']
for stream in streams
if not stream['is_default']
}
self.assertTrue(len(other_streams) > 0)
# and remove it
result = self.client_delete('/json/default_streams', dict(stream_id=stream.id))
self.assert_json_success(result)
self.assertFalse(stream_name in self.get_default_stream_names(user_profile.realm))
# Test admin can't add unsubscribed private stream
stream_name = "private_stream"
stream = self.make_stream(stream_name, invite_only=True)
self.subscribe(self.example_user('iago'), stream_name)
result = self.client_post('/json/default_streams', dict(stream_id=stream.id))
self.assert_json_error(result, "Invalid stream id")
self.subscribe(user_profile, stream_name)
result = self.client_post('/json/default_streams', dict(stream_id=stream.id))
self.assert_json_success(result)
self.assertTrue(stream_name in self.get_default_stream_names(user_profile.realm))
# Test admin can remove unsubscribed private stream
self.unsubscribe(user_profile, stream_name)
result = self.client_delete('/json/default_streams', dict(stream_id=stream.id))
self.assert_json_success(result)
self.assertFalse(stream_name in self.get_default_stream_names(user_profile.realm))
class DefaultStreamGroupTest(ZulipTestCase):
def test_create_update_and_remove_default_stream_group(self) -> None:
realm = get_realm("zulip")
# Test creating new default stream group
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 0)
streams = []
for stream_name in ["stream1", "stream2", "stream3"]:
stream = ensure_stream(realm, stream_name)
streams.append(stream)
def get_streams(group: DefaultStreamGroup) -> List[Stream]:
return list(group.streams.all().order_by('name'))
group_name = "group1"
description = "This is group1"
do_create_default_stream_group(realm, group_name, description, streams)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(default_stream_groups[0].description, description)
self.assertEqual(get_streams(default_stream_groups[0]), streams)
# Test adding streams to existing default stream group
group = lookup_default_stream_groups(["group1"], realm)[0]
new_stream_names = ["stream4", "stream5"]
new_streams = []
for new_stream_name in new_stream_names:
new_stream = ensure_stream(realm, new_stream_name)
new_streams.append(new_stream)
streams.append(new_stream)
do_add_streams_to_default_stream_group(realm, group, new_streams)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(get_streams(default_stream_groups[0]), streams)
# Test removing streams from existing default stream group
do_remove_streams_from_default_stream_group(realm, group, new_streams)
remaining_streams = streams[0:3]
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(get_streams(default_stream_groups[0]), remaining_streams)
# Test changing default stream group description
new_description = "group1 new description"
do_change_default_stream_group_description(realm, group, new_description)
default_stream_groups = get_default_stream_groups(realm)
self.assertEqual(default_stream_groups[0].description, new_description)
self.assert_length(default_stream_groups, 1)
# Test changing default stream group name
new_group_name = "new group1"
do_change_default_stream_group_name(realm, group, new_group_name)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, new_group_name)
self.assertEqual(get_streams(default_stream_groups[0]), remaining_streams)
# Test removing default stream group
do_remove_default_stream_group(realm, group)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 0)
# Test creating a default stream group which contains a default stream
do_add_default_stream(remaining_streams[0])
with self.assertRaisesRegex(
JsonableError, "'stream1' is a default stream and cannot be added to 'new group1'"):
do_create_default_stream_group(realm, new_group_name, "This is group1", remaining_streams)
def test_api_calls(self) -> None:
self.login('hamlet')
user_profile = self.example_user('hamlet')
realm = user_profile.realm
do_change_user_role(user_profile, UserProfile.ROLE_REALM_ADMINISTRATOR)
# Test creating new default stream group
stream_names = ["stream1", "stream2", "stream3"]
group_name = "group1"
description = "This is group1"
streams = []
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 0)
for stream_name in stream_names:
stream = ensure_stream(realm, stream_name)
streams.append(stream)
result = self.client_post('/json/default_stream_groups/create',
{"group_name": group_name, "description": description,
"stream_names": ujson.dumps(stream_names)})
self.assert_json_success(result)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(default_stream_groups[0].description, description)
self.assertEqual(list(default_stream_groups[0].streams.all().order_by("id")), streams)
# Try adding the same streams to the group.
result = self.client_post('/json/default_stream_groups/create',
{"group_name": group_name, "description": description,
"stream_names": ujson.dumps(stream_names)})
self.assert_json_error(result, "Default stream group 'group1' already exists")
# Test adding streams to existing default stream group
group_id = default_stream_groups[0].id
new_stream_names = ["stream4", "stream5"]
new_streams = []
for new_stream_name in new_stream_names:
new_stream = ensure_stream(realm, new_stream_name)
new_streams.append(new_stream)
streams.append(new_stream)
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result, "Missing 'op' argument")
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "invalid", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result, 'Invalid value for "op". Specify one of "add" or "remove".')
result = self.client_patch("/json/default_stream_groups/12345/streams",
{"op": "add", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result, "Default stream group with id '12345' does not exist.")
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams", {"op": "add"})
self.assert_json_error(result, "Missing 'stream_names' argument")
do_add_default_stream(new_streams[0])
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "add", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result, "'stream4' is a default stream and cannot be added to 'group1'")
do_remove_default_stream(new_streams[0])
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "add", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_success(result)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(list(default_stream_groups[0].streams.all().order_by('name')), streams)
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "add", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result,
"Stream 'stream4' is already present in default stream group 'group1'")
# Test removing streams from default stream group
result = self.client_patch("/json/default_stream_groups/12345/streams",
{"op": "remove", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result, "Default stream group with id '12345' does not exist.")
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "remove", "stream_names": ujson.dumps(["random stream name"])})
self.assert_json_error(result, "Invalid stream name 'random stream name'")
streams.remove(new_streams[0])
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "remove", "stream_names": ujson.dumps([new_stream_names[0]])})
self.assert_json_success(result)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(list(default_stream_groups[0].streams.all().order_by('name')), streams)
result = self.client_patch(f"/json/default_stream_groups/{group_id}/streams",
{"op": "remove", "stream_names": ujson.dumps(new_stream_names)})
self.assert_json_error(result, "Stream 'stream4' is not present in default stream group 'group1'")
# Test changing description of default stream group
new_description = "new group1 description"
result = self.client_patch(f"/json/default_stream_groups/{group_id}",
{"group_name": group_name, "op": "change"})
self.assert_json_error(result, 'You must pass "new_description" or "new_group_name".')
result = self.client_patch("/json/default_stream_groups/12345",
{"op": "change", "new_description": ujson.dumps(new_description)})
self.assert_json_error(result, "Default stream group with id '12345' does not exist.")
result = self.client_patch(f"/json/default_stream_groups/{group_id}",
{"group_name": group_name,
"op": "change",
"new_description": ujson.dumps(new_description)})
self.assert_json_success(result)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, group_name)
self.assertEqual(default_stream_groups[0].description, new_description)
# Test changing name of default stream group
new_group_name = "new group1"
do_create_default_stream_group(realm, "group2", "", [])
result = self.client_patch(f"/json/default_stream_groups/{group_id}",
{"op": "change", "new_group_name": ujson.dumps("group2")})
self.assert_json_error(result, "Default stream group 'group2' already exists")
new_group = lookup_default_stream_groups(["group2"], realm)[0]
do_remove_default_stream_group(realm, new_group)
result = self.client_patch(f"/json/default_stream_groups/{group_id}",
{"op": "change", "new_group_name": ujson.dumps(group_name)})
self.assert_json_error(result, "This default stream group is already named 'group1'")
result = self.client_patch(f"/json/default_stream_groups/{group_id}",
{"op": "change", "new_group_name": ujson.dumps(new_group_name)})
self.assert_json_success(result)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 1)
self.assertEqual(default_stream_groups[0].name, new_group_name)
self.assertEqual(default_stream_groups[0].description, new_description)
# Test deleting a default stream group
result = self.client_delete(f'/json/default_stream_groups/{group_id}')
self.assert_json_success(result)
default_stream_groups = get_default_stream_groups(realm)
self.assert_length(default_stream_groups, 0)
result = self.client_delete(f'/json/default_stream_groups/{group_id}')
self.assert_json_error(result, f"Default stream group with id '{group_id}' does not exist.")
def test_invalid_default_stream_group_name(self) -> None:
self.login('iago')
user_profile = self.example_user('iago')
realm = user_profile.realm
stream_names = ["stream1", "stream2", "stream3"]
description = "This is group1"
streams = []
for stream_name in stream_names:
stream = ensure_stream(realm, stream_name)
streams.append(stream)
result = self.client_post('/json/default_stream_groups/create',
{"group_name": "", "description": description,
"stream_names": ujson.dumps(stream_names)})
self.assert_json_error(result, "Invalid default stream group name ''")
result = self.client_post('/json/default_stream_groups/create',
{"group_name": 'x'*100, "description": description,
"stream_names": ujson.dumps(stream_names)})
self.assert_json_error(result, "Default stream group name too long (limit: {} characters)"
.format(DefaultStreamGroup.MAX_NAME_LENGTH))
result = self.client_post('/json/default_stream_groups/create',
{"group_name": "abc\000", "description": description,
"stream_names": ujson.dumps(stream_names)})
self.assert_json_error(result, "Default stream group name 'abc\000' contains NULL (0x00) characters.")
# Also test that lookup_default_stream_groups raises an
# error if we pass it a bad name. This function is used
# during registration, but it's a bit heavy to do a full
# test of that.
with self.assertRaisesRegex(JsonableError, 'Invalid default stream group invalid-name'):
lookup_default_stream_groups(['invalid-name'], realm)
class SubscriptionPropertiesTest(ZulipTestCase):
def test_set_stream_color(self) -> None:
"""
A POST request to /api/v1/users/me/subscriptions/properties with stream_id and
color data sets the stream color, and for that stream only. Also, make sure that
any invalid hex color codes are bounced.
"""
test_user = self.example_user('hamlet')
self.login_user(test_user)
old_subs, _ = gather_subscriptions(test_user)
sub = old_subs[0]
stream_id = sub['stream_id']
new_color = "#ffffff" # TODO: ensure that this is different from old_color
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "color",
"stream_id": stream_id,
"value": "#ffffff"}])})
self.assert_json_success(result)
new_subs = gather_subscriptions(test_user)[0]
found_sub = None
for sub in new_subs:
if sub['stream_id'] == stream_id:
found_sub = sub
break
assert(found_sub is not None)
self.assertEqual(found_sub['color'], new_color)
new_subs.remove(found_sub)
for sub in old_subs:
if sub['stream_id'] == stream_id:
found_sub = sub
break
old_subs.remove(found_sub)
self.assertEqual(old_subs, new_subs)
invalid_color = "3ffrff"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "color",
"stream_id": stream_id,
"value": invalid_color}])})
self.assert_json_error(result, "color is not a valid hex color code")
def test_set_color_missing_stream_id(self) -> None:
"""
Updating the color property requires a `stream_id` key.
"""
test_user = self.example_user('hamlet')
self.login_user(test_user)
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "color",
"value": "#ffffff"}])})
self.assert_json_error(
result, "stream_id key is missing from subscription_data[0]")
def test_set_color_unsubscribed_stream_id(self) -> None:
"""
Updating the color property requires a subscribed stream.
"""
test_user = self.example_user("hamlet")
self.login_user(test_user)
subscribed, unsubscribed, never_subscribed = gather_subscriptions_helper(test_user)
not_subbed = unsubscribed + never_subscribed
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "color",
"stream_id": not_subbed[0]["stream_id"],
"value": "#ffffff"}])})
self.assert_json_error(
result, "Not subscribed to stream id {}".format(not_subbed[0]["stream_id"]))
def test_set_color_missing_color(self) -> None:
"""
Updating the color property requires a color.
"""
test_user = self.example_user('hamlet')
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "color",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(
result, "value key is missing from subscription_data[0]")
def test_set_stream_wildcard_mentions_notify(self) -> None:
"""
A POST request to /api/v1/users/me/subscriptions/properties with wildcard_mentions_notify
sets the property.
"""
test_user = self.example_user('hamlet')
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
sub = subs[0]
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "wildcard_mentions_notify",
"stream_id": sub["stream_id"],
"value": True}])})
self.assert_json_success(result)
updated_sub = get_subscription(sub['name'], test_user)
self.assertIsNotNone(updated_sub)
self.assertEqual(updated_sub.wildcard_mentions_notify, True)
def test_set_pin_to_top(self) -> None:
"""
A POST request to /api/v1/users/me/subscriptions/properties with stream_id and
pin_to_top data pins the stream.
"""
user = self.example_user('hamlet')
self.login_user(user)
old_subs, _ = gather_subscriptions(user)
sub = old_subs[0]
stream_id = sub['stream_id']
new_pin_to_top = not sub['pin_to_top']
result = self.api_post(user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "pin_to_top",
"stream_id": stream_id,
"value": new_pin_to_top}])})
self.assert_json_success(result)
updated_sub = get_subscription(sub['name'], user)
self.assertIsNotNone(updated_sub)
self.assertEqual(updated_sub.pin_to_top, new_pin_to_top)
def test_change_is_muted(self) -> None:
test_user = self.example_user('hamlet')
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
sub = Subscription.objects.get(recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user)
self.assertEqual(sub.is_muted, False)
events: List[Mapping[str, Any]] = []
property_name = "is_muted"
with tornado_redirected_to_list(events):
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": True,
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]['event']['property'], 'in_home_view')
self.assertEqual(events[0]['event']['value'], False)
sub = Subscription.objects.get(recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user)
self.assertEqual(sub.is_muted, True)
events = []
legacy_property_name = 'in_home_view'
with tornado_redirected_to_list(events):
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": legacy_property_name,
"value": True,
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]['event']['property'], 'in_home_view')
self.assertEqual(events[0]['event']['value'], True)
self.assert_json_success(result)
sub = Subscription.objects.get(recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user)
self.assertEqual(sub.is_muted, False)
events = []
with tornado_redirected_to_list(events):
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": legacy_property_name,
"value": False,
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_success(result)
self.assert_length(events, 1)
self.assertEqual(events[0]['event']['property'], 'in_home_view')
self.assertEqual(events[0]['event']['value'], False)
sub = Subscription.objects.get(recipient__type=Recipient.STREAM,
recipient__type_id=subs[0]["stream_id"],
user_profile=test_user)
self.assertEqual(sub.is_muted, True)
def test_set_subscription_property_incorrect(self) -> None:
"""
Trying to set a property incorrectly returns a JSON error.
"""
test_user = self.example_user('hamlet')
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
property_name = "is_muted"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a boolean')
property_name = "in_home_view"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a boolean')
property_name = "desktop_notifications"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a boolean')
property_name = "audible_notifications"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a boolean')
property_name = "push_notifications"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a boolean')
property_name = "email_notifications"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a boolean')
property_name = "wildcard_mentions_notify"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f"{property_name} is not a boolean")
property_name = "color"
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": property_name,
"value": False,
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
f'{property_name} is not a string')
def test_json_subscription_property_invalid_stream(self) -> None:
test_user = self.example_user("hamlet")
self.login_user(test_user)
stream_id = 1000
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "is_muted",
"stream_id": stream_id,
"value": False}])})
self.assert_json_error(result, "Invalid stream id")
def test_set_invalid_property(self) -> None:
"""
Trying to set an invalid property returns a JSON error.
"""
test_user = self.example_user('hamlet')
self.login_user(test_user)
subs = gather_subscriptions(test_user)[0]
result = self.api_post(test_user, "/api/v1/users/me/subscriptions/properties",
{"subscription_data": ujson.dumps([{"property": "bad",
"value": "bad",
"stream_id": subs[0]["stream_id"]}])})
self.assert_json_error(result,
"Unknown subscription property: bad")
class SubscriptionRestApiTest(ZulipTestCase):
def test_basic_add_delete(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
# add
request = {
'add': ujson.dumps([{'name': 'my_test_stream_1'}]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_success(result)
streams = self.get_streams(user)
self.assertTrue('my_test_stream_1' in streams)
# now delete the same stream
request = {
'delete': ujson.dumps(['my_test_stream_1']),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_success(result)
streams = self.get_streams(user)
self.assertTrue('my_test_stream_1' not in streams)
def test_add_with_color(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
# add with color proposition
request = {
'add': ujson.dumps([{'name': 'my_test_stream_2', 'color': '#afafaf'}]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_success(result)
# incorrect color format
request = {
'subscriptions': ujson.dumps([{'name': 'my_test_stream_3', 'color': '#0g0g0g'}]),
}
result = self.api_post(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result, 'subscriptions[0]["color"] is not a valid hex color code')
def test_api_valid_property(self) -> None:
"""
Trying to set valid json returns success message.
"""
user = self.example_user('hamlet')
self.login_user(user)
subs = gather_subscriptions(user)[0]
result = self.api_patch(user, "/api/v1/users/me/subscriptions/{}".format(subs[0]["stream_id"]),
{'property': 'color', 'value': '#c2c2c2'})
self.assert_json_success(result)
def test_api_invalid_property(self) -> None:
"""
Trying to set an invalid property returns a JSON error.
"""
user = self.example_user('hamlet')
self.login_user(user)
subs = gather_subscriptions(user)[0]
result = self.api_patch(user, "/api/v1/users/me/subscriptions/{}".format(subs[0]["stream_id"]),
{'property': 'invalid', 'value': 'somevalue'})
self.assert_json_error(result,
"Unknown subscription property: invalid")
def test_api_invalid_stream_id(self) -> None:
"""
Trying to set an invalid stream id returns a JSON error.
"""
user = self.example_user("hamlet")
self.login_user(user)
result = self.api_patch(user, "/api/v1/users/me/subscriptions/121",
{'property': 'is_muted', 'value': 'somevalue'})
self.assert_json_error(result,
"Invalid stream id")
def test_bad_add_parameters(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
def check_for_error(val: Any, expected_message: str) -> None:
request = {
'add': ujson.dumps(val),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result, expected_message)
check_for_error(['foo'], 'add[0] is not a dict')
check_for_error([{'bogus': 'foo'}], 'name key is missing from add[0]')
check_for_error([{'name': {}}], 'add[0]["name"] is not a string')
def test_bad_principals(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
request = {
'add': ujson.dumps([{'name': 'my_new_stream'}]),
'principals': ujson.dumps([{}]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result, 'principals is not an allowed_type')
def test_bad_delete_parameters(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
request = {
'delete': ujson.dumps([{'name': 'my_test_stream_1'}]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result, "delete[0] is not a string")
def test_add_or_delete_not_specified(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
result = self.api_patch(user, "/api/v1/users/me/subscriptions", {})
self.assert_json_error(result,
'Nothing to do. Specify at least one of "add" or "delete".')
def test_patch_enforces_valid_stream_name_check(self) -> None:
"""
Only way to force an error is with a empty string.
"""
user = self.example_user('hamlet')
self.login_user(user)
invalid_stream_name = ""
request = {
'delete': ujson.dumps([invalid_stream_name]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result,
f"Invalid stream name '{invalid_stream_name}'")
def test_stream_name_too_long(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
long_stream_name = "a" * 61
request = {
'delete': ujson.dumps([long_stream_name]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result,
"Stream name too long (limit: 60 characters).")
def test_stream_name_contains_null(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
stream_name = "abc\000"
request = {
'delete': ujson.dumps([stream_name]),
}
result = self.api_patch(user, "/api/v1/users/me/subscriptions", request)
self.assert_json_error(result,
f"Stream name '{stream_name}' contains NULL (0x00) characters.")
def test_compose_views_rollback(self) -> None:
'''
The compose_views function() is used under the hood by
update_subscriptions_backend. It's a pretty simple method in terms of
control flow, but it uses a Django rollback, which may make it brittle
code when we upgrade Django. We test the functions's rollback logic
here with a simple scenario to avoid false positives related to
subscription complications.
'''
user_profile = self.example_user('hamlet')
user_profile.full_name = 'Hamlet'
user_profile.save()
def method1(req: HttpRequest, user_profile: UserProfile) -> HttpResponse:
user_profile.full_name = 'Should not be committed'
user_profile.save()
return json_success()
def method2(req: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_error('random failure')
with self.assertRaises(JsonableError):
compose_views(None, user_profile, [(method1, {}), (method2, {})])
user_profile = self.example_user('hamlet')
self.assertEqual(user_profile.full_name, 'Hamlet')
class SubscriptionAPITest(ZulipTestCase):
def setUp(self) -> None:
"""
All tests will be logged in as hamlet. Also save various useful values
as attributes that tests can access.
"""
super().setUp()
self.user_profile = self.example_user('hamlet')
self.test_email = self.user_profile.email
self.test_user = self.user_profile
self.login_user(self.user_profile)
self.test_realm = self.user_profile.realm
self.streams = self.get_streams(self.user_profile)
def make_random_stream_names(self, existing_stream_names: List[str]) -> List[str]:
"""
Helper function to make up random stream names. It takes
existing_stream_names and randomly appends a digit to the end of each,
but avoids names that appear in the list names_to_avoid.
"""
random_streams = []
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.test_realm)]
for stream in existing_stream_names:
random_stream = stream + str(random.randint(0, 9))
if random_stream not in all_stream_names:
random_streams.append(random_stream)
return random_streams
def test_successful_subscriptions_list(self) -> None:
"""
Calling /api/v1/users/me/subscriptions should successfully return your subscriptions.
"""
result = self.api_get(self.test_user, "/api/v1/users/me/subscriptions")
self.assert_json_success(result)
json = result.json()
self.assertIn("subscriptions", json)
for stream in json['subscriptions']:
self.assertIsInstance(stream['name'], str)
self.assertIsInstance(stream['color'], str)
self.assertIsInstance(stream['invite_only'], bool)
# check that the stream name corresponds to an actual
# stream; will throw Stream.DoesNotExist if it doesn't
get_stream(stream['name'], self.test_realm)
list_streams = [stream['name'] for stream in json["subscriptions"]]
# also check that this matches the list of your subscriptions
self.assertEqual(sorted(list_streams), sorted(self.streams))
def helper_check_subs_before_and_after_add(self, subscriptions: List[str],
other_params: Dict[str, Any],
subscribed: List[str],
already_subscribed: List[str],
email: str, new_subs: List[str],
realm: Realm,
invite_only: bool=False) -> None:
"""
Check result of adding subscriptions.
You can add subscriptions for yourself or possibly many
principals, which is why e-mails map to subscriptions in the
result.
The result json is of the form
{"msg": "",
"result": "success",
"already_subscribed": {self.example_email("iago"): ["Venice", "Verona"]},
"subscribed": {self.example_email("iago"): ["Venice8"]}}
"""
result = self.common_subscribe_to_streams(self.test_user, subscriptions,
other_params, invite_only=invite_only)
self.assert_json_success(result)
json = result.json()
self.assertEqual(sorted(subscribed), sorted(json["subscribed"][email]))
self.assertEqual(sorted(already_subscribed), sorted(json["already_subscribed"][email]))
user = get_user(email, realm)
new_streams = self.get_streams(user)
self.assertEqual(sorted(new_streams), sorted(new_subs))
def test_successful_subscriptions_add(self) -> None:
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and should determine which are new subscriptions vs
which were already subscribed. We add 2 new streams to the
list of subscriptions and confirm the right number of events
are generated.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
add_streams = ["Verona2", "Denmark5"]
self.assertNotEqual(len(add_streams), 0) # necessary for full test coverage
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
self.helper_check_subs_before_and_after_add(self.streams + add_streams, {},
add_streams, self.streams, self.test_email,
self.streams + add_streams, self.test_realm)
self.assert_length(events, 8)
def test_successful_subscriptions_add_with_announce(self) -> None:
"""
Calling POST /json/users/me/subscriptions should successfully add
streams, and should determine which are new subscriptions vs
which were already subscribed. We add 2 new streams to the
list of subscriptions and confirm the right number of events
are generated.
"""
self.assertNotEqual(len(self.streams), 0)
add_streams = ["Verona2", "Denmark5"]
self.assertNotEqual(len(add_streams), 0)
events: List[Mapping[str, Any]] = []
other_params = {
'announce': 'true',
}
notifications_stream = get_stream(self.streams[0], self.test_realm)
self.test_realm.notifications_stream_id = notifications_stream.id
self.test_realm.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(self.test_email))
with tornado_redirected_to_list(events):
self.helper_check_subs_before_and_after_add(self.streams + add_streams, other_params,
add_streams, self.streams, self.test_email,
self.streams + add_streams, self.test_realm)
self.assertEqual(len(events), 9)
def test_successful_subscriptions_notifies_pm(self) -> None:
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = self.example_user("iago")
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])[:1]
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': ujson.dumps([self.user_profile.id]),
},
)
self.assert_json_success(result)
def test_successful_subscriptions_notifies_stream(self) -> None:
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee = self.example_user("iago")
invitee_full_name = 'Iago'
current_stream = self.get_streams(invitee)[0]
invite_streams = self.make_random_stream_names([current_stream])[:1]
notifications_stream = get_stream(current_stream, self.test_realm)
self.test_realm.notifications_stream_id = notifications_stream.id
self.test_realm.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(invitee.email))
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data=dict(
announce='true',
principals= ujson.dumps([self.user_profile.id]),
),
)
self.assert_json_success(result)
msg = self.get_second_to_last_message()
self.assertEqual(msg.recipient.type, Recipient.STREAM)
self.assertEqual(msg.sender_id, self.notification_bot().id)
expected_msg = f"@_**{invitee_full_name}|{invitee.id}** created a new stream #**{invite_streams[0]}**."
self.assertEqual(msg.content, expected_msg)
def test_successful_cross_realm_notification(self) -> None:
"""
Calling POST /json/users/me/subscriptions in a new realm
should notify with a proper new stream link
"""
realm = do_create_realm("testrealm", "Test Realm")
notifications_stream = Stream.objects.get(name='general', realm=realm)
realm.notifications_stream = notifications_stream
realm.save()
invite_streams = ["cross_stream"]
user = self.example_user('AARON')
user.realm = realm
user.save()
# Delete the UserProfile from the cache so the realm change will be
# picked up
cache.cache_delete(cache.user_profile_by_email_cache_key(user.email))
result = self.common_subscribe_to_streams(
user,
invite_streams,
extra_post_data=dict(
announce='true',
),
subdomain="testrealm",
)
self.assert_json_success(result)
msg = self.get_second_to_last_message()
self.assertEqual(msg.recipient.type, Recipient.STREAM)
self.assertEqual(msg.sender_id, self.notification_bot().id)
stream_id = Stream.objects.latest('id').id
expected_rendered_msg = f'<p><span class="user-mention silent" data-user-id="{user.id}">{user.full_name}</span> created a new stream <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-{invite_streams[0]}">#{invite_streams[0]}</a>.</p>'
self.assertEqual(msg.rendered_content, expected_rendered_msg)
def test_successful_subscriptions_notifies_with_escaping(self) -> None:
"""
Calling POST /json/users/me/subscriptions should notify when a new stream is created.
"""
invitee_full_name = 'Iago'
invitee = self.example_user('iago')
current_stream = self.get_streams(invitee)[0]
notifications_stream = get_stream(current_stream, self.test_realm)
self.test_realm.notifications_stream_id = notifications_stream.id
self.test_realm.save()
invite_streams = ['strange ) \\ test']
result = self.common_subscribe_to_streams(
invitee,
invite_streams,
extra_post_data={
'announce': 'true',
'principals': ujson.dumps([self.user_profile.id]),
},
)
self.assert_json_success(result)
msg = self.get_second_to_last_message()
self.assertEqual(msg.sender_id, self.notification_bot().id)
expected_msg = f"@_**{invitee_full_name}|{invitee.id}** created a new stream #**{invite_streams[0]}**."
self.assertEqual(msg.content, expected_msg)
def test_non_ascii_stream_subscription(self) -> None:
"""
Subscribing to a stream name with non-ASCII characters succeeds.
"""
self.helper_check_subs_before_and_after_add(self.streams + ["hümbüǵ"], {},
["hümbüǵ"], self.streams, self.test_email,
self.streams + ["hümbüǵ"], self.test_realm)
def test_subscriptions_add_too_long(self) -> None:
"""
Calling POST /json/users/me/subscriptions on a stream whose name is >60
characters should return a JSON error.
"""
# character limit is 60 characters
long_stream_name = "a" * 61
result = self.common_subscribe_to_streams(self.test_user, [long_stream_name])
self.assert_json_error(result,
"Stream name too long (limit: 60 characters).")
def test_subscriptions_add_stream_with_null(self) -> None:
"""
Calling POST /json/users/me/subscriptions on a stream whose name contains
null characters should return a JSON error.
"""
stream_name = "abc\000"
result = self.common_subscribe_to_streams(self.test_user, [stream_name])
self.assert_json_error(result,
f"Stream name '{stream_name}' contains NULL (0x00) characters.")
def test_user_settings_for_adding_streams(self) -> None:
with mock.patch('zerver.models.UserProfile.can_create_streams', return_value=False):
result = self.common_subscribe_to_streams(self.test_user, ['stream1'])
self.assert_json_error(result, 'User cannot create streams.')
with mock.patch('zerver.models.UserProfile.can_create_streams', return_value=True):
result = self.common_subscribe_to_streams(self.test_user, ['stream2'])
self.assert_json_success(result)
# User should still be able to subscribe to an existing stream
with mock.patch('zerver.models.UserProfile.can_create_streams', return_value=False):
result = self.common_subscribe_to_streams(self.test_user, ['stream2'])
self.assert_json_success(result)
def test_can_create_streams(self) -> None:
othello = self.example_user('othello')
othello.role = UserProfile.ROLE_REALM_ADMINISTRATOR
self.assertTrue(othello.can_create_streams())
othello.role = UserProfile.ROLE_MEMBER
othello.realm.create_stream_policy = Realm.POLICY_ADMINS_ONLY
self.assertFalse(othello.can_create_streams())
othello.realm.create_stream_policy = Realm.POLICY_MEMBERS_ONLY
othello.role = UserProfile.ROLE_GUEST
self.assertFalse(othello.can_create_streams())
othello.role = UserProfile.ROLE_MEMBER
othello.realm.waiting_period_threshold = 1000
othello.realm.create_stream_policy = Realm.POLICY_FULL_MEMBERS_ONLY
othello.date_joined = timezone_now() - timedelta(days=(othello.realm.waiting_period_threshold - 1))
self.assertFalse(othello.can_create_streams())
othello.date_joined = timezone_now() - timedelta(days=(othello.realm.waiting_period_threshold + 1))
self.assertTrue(othello.can_create_streams())
def test_user_settings_for_subscribing_other_users(self) -> None:
"""
You can't subscribe other people to streams if you are a guest or your account is not old
enough.
"""
user_profile = self.example_user("cordelia")
invitee_user_id = user_profile.id
realm = user_profile.realm
do_set_realm_property(realm, "create_stream_policy", Realm.POLICY_MEMBERS_ONLY)
do_set_realm_property(realm, "invite_to_stream_policy",
Realm.POLICY_ADMINS_ONLY)
result = self.common_subscribe_to_streams(
self.test_user, ['stream1'], {"principals": ujson.dumps([invitee_user_id])})
self.assert_json_error(
result, "Only administrators can modify other users' subscriptions.")
do_set_realm_property(realm, "invite_to_stream_policy",
Realm.POLICY_MEMBERS_ONLY)
result = self.common_subscribe_to_streams(
self.test_user, ['stream2'], {"principals": ujson.dumps([
self.test_user.id, invitee_user_id])})
self.assert_json_success(result)
self.unsubscribe(user_profile, "stream2")
do_set_realm_property(realm, "invite_to_stream_policy",
Realm.POLICY_FULL_MEMBERS_ONLY)
do_set_realm_property(realm, "waiting_period_threshold", 100000)
result = self.common_subscribe_to_streams(
self.test_user, ['stream2'], {"principals": ujson.dumps([invitee_user_id])})
self.assert_json_error(
result, "Your account is too new to modify other users' subscriptions.")
do_set_realm_property(realm, "waiting_period_threshold", 0)
result = self.common_subscribe_to_streams(
self.test_user, ['stream2'], {"principals": ujson.dumps([invitee_user_id])})
self.assert_json_success(result)
def test_can_subscribe_other_users(self) -> None:
"""
You can't subscribe other people to streams if you are a guest or your account is not old
enough.
"""
othello = self.example_user('othello')
do_change_user_role(othello, UserProfile.ROLE_REALM_ADMINISTRATOR)
self.assertTrue(othello.can_subscribe_other_users())
do_change_user_role(othello, UserProfile.ROLE_MEMBER)
do_change_user_role(othello, UserProfile.ROLE_GUEST)
self.assertFalse(othello.can_subscribe_other_users())
do_change_user_role(othello, UserProfile.ROLE_MEMBER)
do_set_realm_property(othello.realm, "waiting_period_threshold", 1000)
do_set_realm_property(othello.realm, "invite_to_stream_policy",
Realm.POLICY_FULL_MEMBERS_ONLY)
othello.date_joined = timezone_now() - timedelta(days=(othello.realm.waiting_period_threshold - 1))
self.assertFalse(othello.can_subscribe_other_users())
othello.date_joined = timezone_now() - timedelta(days=(othello.realm.waiting_period_threshold + 1))
self.assertTrue(othello.can_subscribe_other_users())
def test_subscriptions_add_invalid_stream(self) -> None:
"""
Calling POST /json/users/me/subscriptions on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid name is the empty string
invalid_stream_name = ""
result = self.common_subscribe_to_streams(self.test_user, [invalid_stream_name])
self.assert_json_error(result,
f"Invalid stream name '{invalid_stream_name}'")
def assert_adding_subscriptions_for_principal(self, invitee_data: Union[str, int], invitee_realm: Realm,
streams: List[str], invite_only: bool=False) -> None:
"""
Calling POST /json/users/me/subscriptions on behalf of another principal (for
whom you have permission to add subscriptions) should successfully add
those subscriptions and send a message to the subscribee notifying
them.
"""
if isinstance(invitee_data, str):
other_profile = get_user(invitee_data, invitee_realm)
else:
other_profile = get_user_profile_by_id_in_realm(invitee_data, invitee_realm)
current_streams = self.get_streams(other_profile)
self.assertIsInstance(other_profile, UserProfile)
self.assertNotEqual(len(current_streams), 0) # necessary for full test coverage
self.assertNotEqual(len(streams), 0) # necessary for full test coverage
streams_to_sub = streams[:1] # just add one, to make the message easier to check
streams_to_sub.extend(current_streams)
self.helper_check_subs_before_and_after_add(streams_to_sub,
{"principals": ujson.dumps([invitee_data])}, streams[:1],
current_streams, other_profile.email, streams_to_sub,
invitee_realm, invite_only=invite_only)
# verify that a welcome message was sent to the stream
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, msg.recipient.STREAM)
self.assertEqual(msg.topic_name(), 'stream events')
self.assertEqual(msg.sender.email, settings.NOTIFICATION_BOT)
self.assertIn('Stream created by @_**', msg.content)
def test_multi_user_subscription(self) -> None:
user1 = self.example_user("cordelia")
user2 = self.example_user("iago")
realm = get_realm("zulip")
streams_to_sub = ['multi_user_stream']
events: List[Mapping[str, Any]] = []
flush_per_request_caches()
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=ujson.dumps([user1.id, user2.id])),
)
self.assert_length(queries, 39)
self.assert_length(events, 7)
for ev in [x for x in events if x['event']['type'] not in ('message', 'stream')]:
if ev['event']['op'] == 'add':
self.assertEqual(
set(ev['event']['subscriptions'][0]['subscribers']),
{user1.id, user2.id},
)
else:
# Check "peer_add" events for streams users were
# never subscribed to, in order for the neversubscribed
# structure to stay up-to-date.
self.assertEqual(ev['event']['op'], 'peer_add')
stream = get_stream('multi_user_stream', realm)
self.assertEqual(num_subscribers_for_stream_id(stream.id), 2)
# Now add ourselves
events = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=ujson.dumps([self.test_user.id])),
)
self.assert_length(queries, 14)
self.assert_length(events, 2)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user(self.test_email, self.test_realm).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
{user1.id, user2.id, self.test_user.id},
)
self.assertNotIn(self.example_user('polonius').id, add_peer_event['users'])
self.assertEqual(len(add_peer_event['users']), 11)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_id'], self.user_profile.id)
stream = get_stream('multi_user_stream', realm)
self.assertEqual(num_subscribers_for_stream_id(stream.id), 3)
# Finally, add othello.
events = []
user_profile = self.example_user('othello')
email3 = user_profile.email
user3 = user_profile
realm3 = user_profile.realm
stream = get_stream('multi_user_stream', realm)
with tornado_redirected_to_list(events):
bulk_add_subscriptions([stream], [user_profile])
self.assert_length(events, 2)
add_event, add_peer_event = events
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [get_user(email3, realm3).id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
{user1.id, user2.id, user3.id, self.test_user.id},
)
# We don't send a peer_add event to othello
self.assertNotIn(user_profile.id, add_peer_event['users'])
self.assertNotIn(self.example_user('polonius').id, add_peer_event['users'])
self.assertEqual(len(add_peer_event['users']), 11)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_id'], user_profile.id)
def test_private_stream_subscription(self) -> None:
realm = get_realm("zulip")
# Create a private stream with Hamlet subscribed
stream_name = "private"
stream = ensure_stream(realm, stream_name, invite_only=True)
existing_user_profile = self.example_user('hamlet')
bulk_add_subscriptions([stream], [existing_user_profile])
# Now subscribe Cordelia to the stream, capturing events
user_profile = self.example_user('cordelia')
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
bulk_add_subscriptions([stream], [user_profile])
self.assert_length(events, 3)
create_event, add_event, add_peer_event = events
self.assertEqual(create_event['event']['type'], 'stream')
self.assertEqual(create_event['event']['op'], 'create')
self.assertEqual(create_event['users'], [user_profile.id])
self.assertEqual(create_event['event']['streams'][0]['name'], stream_name)
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [user_profile.id])
self.assertEqual(
set(add_event['event']['subscriptions'][0]['subscribers']),
{user_profile.id, existing_user_profile.id},
)
# We don't send a peer_add event to othello, but we do send peer_add event to
# all realm admins.
self.assertNotIn(user_profile.id, add_peer_event['users'])
self.assertEqual(len(add_peer_event['users']), 3)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_id'], user_profile.id)
# Do not send stream creation event to realm admin users
# even if realm admin is subscribed to stream cause realm admin already get
# private stream creation event on stream creation.
new_stream = ensure_stream(realm, "private stream", invite_only=True)
events = []
with tornado_redirected_to_list(events):
bulk_add_subscriptions([new_stream], [self.example_user("iago")])
self.assert_length(events, 3)
create_event, add_event, add_peer_event = events
self.assertEqual(create_event['event']['type'], 'stream')
self.assertEqual(create_event['event']['op'], 'create')
self.assertEqual(create_event['users'], [])
self.assertEqual(add_event['event']['type'], 'subscription')
self.assertEqual(add_event['event']['op'], 'add')
self.assertEqual(add_event['users'], [self.example_user("iago").id])
self.assertEqual(len(add_peer_event['users']), 1)
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
self.assertEqual(add_peer_event['event']['user_id'], self.example_user("iago").id)
def test_subscribe_to_stream_post_policy_admins_stream(self) -> None:
"""
Members can subscribe to streams where only admins can post
"""
member = self.example_user("AARON")
stream = self.make_stream('stream1')
do_change_stream_post_policy(stream, Stream.STREAM_POST_POLICY_ADMINS)
result = self.common_subscribe_to_streams(member, ["stream1"])
self.assert_json_success(result)
json = result.json()
self.assertEqual(json["subscribed"], {member.email: ["stream1"]})
self.assertEqual(json["already_subscribed"], {})
def test_subscribe_to_stream_post_policy_restrict_new_members_stream(self) -> None:
"""
New members can subscribe to streams where they can not post
"""
new_member_email = self.nonreg_email('test')
self.register(new_member_email, "test")
new_member = self.nonreg_user('test')
do_set_realm_property(new_member.realm, 'waiting_period_threshold', 10)
self.assertTrue(new_member.is_new_member)
stream = self.make_stream('stream1')
do_change_stream_post_policy(stream, Stream.STREAM_POST_POLICY_RESTRICT_NEW_MEMBERS)
result = self.common_subscribe_to_streams(new_member, ["stream1"])
self.assert_json_success(result)
json = result.json()
self.assertEqual(json["subscribed"], {new_member.email: ["stream1"]})
self.assertEqual(json["already_subscribed"], {})
def test_guest_user_subscribe(self) -> None:
"""Guest users cannot subscribe themselves to anything"""
guest_user = self.example_user("polonius")
result = self.common_subscribe_to_streams(guest_user, ["Denmark"])
self.assert_json_error(result, "Not allowed for guest users")
# Verify the internal checks also block guest users.
stream = get_stream("Denmark", guest_user.realm)
self.assertEqual(filter_stream_authorization(guest_user, [stream]),
([], [stream]))
# Test UserProfile.can_create_streams for guest users.
streams_raw = [{
'invite_only': False,
'history_public_to_subscribers': None,
'name': 'new_stream',
'stream_post_policy': Stream.STREAM_POST_POLICY_EVERYONE,
}]
with self.assertRaisesRegex(JsonableError, "User cannot create streams."):
list_to_streams(streams_raw, guest_user)
stream = self.make_stream('private_stream', invite_only=True)
result = self.common_subscribe_to_streams(guest_user, ["private_stream"])
self.assert_json_error(result, "Not allowed for guest users")
self.assertEqual(filter_stream_authorization(guest_user, [stream]),
([], [stream]))
def test_users_getting_add_peer_event(self) -> None:
"""
Check users getting add_peer_event is correct
"""
streams_to_sub = ['multi_user_stream']
othello = self.example_user('othello')
cordelia = self.example_user('cordelia')
iago = self.example_user('iago')
orig_user_ids_to_subscribe = [self.test_user.id, othello.id]
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=ujson.dumps(orig_user_ids_to_subscribe)))
new_user_ids_to_subscribe = [iago.id, cordelia.id]
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=ujson.dumps(new_user_ids_to_subscribe)),
)
add_peer_events = [events[2], events[3]]
for add_peer_event in add_peer_events:
self.assertEqual(add_peer_event['event']['type'], 'subscription')
self.assertEqual(add_peer_event['event']['op'], 'peer_add')
event_sent_to_ids = add_peer_event['users']
for user_id in new_user_ids_to_subscribe:
# Make sure new users subscribed to stream is not in
# peer_add event recipient list
self.assertNotIn(user_id, event_sent_to_ids)
for old_user in orig_user_ids_to_subscribe:
# Check non new users are in peer_add event recipient list.
self.assertIn(old_user, event_sent_to_ids)
def test_users_getting_remove_peer_event(self) -> None:
"""
Check users getting add_peer_event is correct
"""
user1 = self.example_user("othello")
user2 = self.example_user("cordelia")
user3 = self.example_user("hamlet")
user4 = self.example_user("iago")
user5 = self.example_user("AARON")
stream1 = self.make_stream('stream1')
stream2 = self.make_stream('stream2')
private = self.make_stream('private_stream', invite_only=True)
self.subscribe(user1, 'stream1')
self.subscribe(user2, 'stream1')
self.subscribe(user3, 'stream1')
self.subscribe(user2, 'stream2')
self.subscribe(user1, 'private_stream')
self.subscribe(user2, 'private_stream')
self.subscribe(user3, 'private_stream')
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
bulk_remove_subscriptions(
[user1, user2],
[stream1, stream2, private],
get_client("website"),
)
peer_events = [e for e in events
if e['event'].get('op') == 'peer_remove']
notifications = set()
for event in peer_events:
for user_id in event['users']:
for stream_name in event['event']['subscriptions']:
removed_user_id = event['event']['user_id']
notifications.add((user_id, removed_user_id, stream_name))
# POSITIVE CASES FIRST
self.assertIn((user3.id, user1.id, 'stream1'), notifications)
self.assertIn((user4.id, user1.id, 'stream1'), notifications)
self.assertIn((user3.id, user2.id, 'stream1'), notifications)
self.assertIn((user4.id, user2.id, 'stream1'), notifications)
self.assertIn((user1.id, user2.id, 'stream2'), notifications)
self.assertIn((user3.id, user2.id, 'stream2'), notifications)
self.assertIn((user4.id, user2.id, 'stream2'), notifications)
self.assertIn((user3.id, user1.id, 'private_stream'), notifications)
self.assertIn((user3.id, user2.id, 'private_stream'), notifications)
self.assertIn((user4.id, user1.id, 'private_stream'), notifications)
self.assertIn((user4.id, user2.id, 'private_stream'), notifications)
# NEGATIVE
# don't be notified if you are being removed yourself
self.assertNotIn((user1.id, user1.id, 'stream1'), notifications)
# don't send false notifications for folks that weren't actually
# subscribed int he first place
self.assertNotIn((user3.id, user1.id, 'stream2'), notifications)
# don't send notifications for random people
self.assertNotIn((user3.id, user4.id, 'stream2'), notifications)
# don't send notifications to unsubscribed non realm admin users for private streams
self.assertNotIn((user5.id, user1.id, 'private_stream'), notifications)
def test_bulk_subscribe_MIT(self) -> None:
mit_user = self.mit_user('starnine')
realm = get_realm("zephyr")
stream_names = [f"stream_{i}" for i in range(40)]
streams = [
self.make_stream(stream_name, realm=realm)
for stream_name in stream_names]
for stream in streams:
stream.is_in_zephyr_realm = True
stream.save()
events: List[Mapping[str, Any]] = []
with tornado_redirected_to_list(events):
with queries_captured() as queries:
self.common_subscribe_to_streams(
mit_user,
stream_names,
dict(principals=ujson.dumps([mit_user.id])),
subdomain="zephyr",
)
# Make sure Zephyr mirroring realms such as MIT do not get
# any tornado subscription events
self.assert_length(events, 0)
self.assert_length(queries, 5)
events = []
with tornado_redirected_to_list(events):
bulk_remove_subscriptions(
users=[mit_user],
streams=streams,
acting_client=get_client('website'),
)
self.assert_length(events, 0)
def test_bulk_subscribe_many(self) -> None:
# Create a whole bunch of streams
streams = [f"stream_{i}" for i in range(20)]
for stream_name in streams:
self.make_stream(stream_name)
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_user,
streams,
dict(principals=ujson.dumps([self.test_user.id])),
)
# Make sure we don't make O(streams) queries
self.assert_length(queries, 16)
def test_subscriptions_add_for_principal(self) -> None:
"""
You can subscribe other people to streams.
"""
invitee = self.example_user("iago")
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee.id, invitee.realm, invite_streams)
def test_subscriptions_add_for_principal_legacy_emails(self) -> None:
invitee = self.example_user("iago")
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee.email, invitee.realm, invite_streams)
def test_subscriptions_add_for_principal_deactivated(self) -> None:
"""
You can't subscribe deactivated people to streams.
"""
target_profile = self.example_user("cordelia")
post_data = dict(
principals=ujson.dumps([target_profile.id]),
)
result = self.common_subscribe_to_streams(self.test_user, "Verona", post_data)
self.assert_json_success(result)
do_deactivate_user(target_profile)
result = self.common_subscribe_to_streams(self.test_user, "Denmark", post_data)
self.assert_json_error(
result,
f"User not authorized to execute queries on behalf of '{target_profile.id}'",
status_code=403)
def test_subscriptions_add_for_principal_invite_only(self) -> None:
"""
You can subscribe other people to invite only streams.
"""
invitee = self.example_user("iago")
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(invitee.id, invitee.realm, invite_streams,
invite_only=True)
def test_non_ascii_subscription_for_principal(self) -> None:
"""
You can subscribe other people to streams even if they containing
non-ASCII characters.
"""
iago = self.example_user('iago')
self.assert_adding_subscriptions_for_principal(iago.id, get_realm('zulip'), ["hümbüǵ"])
def test_subscription_add_invalid_principal_legacy_emails(self) -> None:
"""
Calling subscribe on behalf of a principal that does not exist
should return a JSON error.
"""
invalid_principal = "[email protected]"
invalid_principal_realm = get_realm("zulip")
# verify that invalid_principal actually doesn't exist
with self.assertRaises(UserProfile.DoesNotExist):
get_user(invalid_principal, invalid_principal_realm)
result = self.common_subscribe_to_streams(self.test_user, self.streams,
{"principals": ujson.dumps([invalid_principal])})
self.assert_json_error(
result,
f"User not authorized to execute queries on behalf of '{invalid_principal}'",
status_code=403,
)
def test_subscription_add_invalid_principal(self) -> None:
invalid_principal = 999
invalid_principal_realm = get_realm("zulip")
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_id_in_realm(invalid_principal, invalid_principal_realm)
result = self.common_subscribe_to_streams(self.test_user, self.streams,
{"principals": ujson.dumps([invalid_principal])})
self.assert_json_error(
result,
f"User not authorized to execute queries on behalf of '{invalid_principal}'",
status_code=403,
)
def test_subscription_add_principal_other_realm(self) -> None:
"""
Calling subscribe on behalf of a principal in another realm
should return a JSON error.
"""
profile = self.mit_user('starnine')
principal = profile.id
# verify that principal exists (thus, the reason for the error is the cross-realming)
self.assertIsInstance(profile, UserProfile)
result = self.common_subscribe_to_streams(self.test_user, self.streams,
{"principals": ujson.dumps([principal])})
self.assert_json_error(
result,
f"User not authorized to execute queries on behalf of '{principal}'",
status_code=403,
)
def helper_check_subs_before_and_after_remove(self, subscriptions: List[str],
json_dict: Dict[str, Any],
email: str, new_subs: List[str],
realm: Realm) -> None:
"""
Check result of removing subscriptions.
Unlike adding subscriptions, you can only remove subscriptions
for yourself, so the result format is different.
{"msg": "",
"removed": ["Denmark", "Scotland", "Verona"],
"not_removed": ["Rome"], "result": "success"}
"""
result = self.client_delete("/json/users/me/subscriptions",
{"subscriptions": ujson.dumps(subscriptions)})
self.assert_json_success(result)
json = result.json()
for key, val in json_dict.items():
self.assertEqual(sorted(val), sorted(json[key])) # we don't care about the order of the items
user = get_user(email, realm)
new_streams = self.get_streams(user)
self.assertEqual(sorted(new_streams), sorted(new_subs))
def test_successful_subscriptions_remove(self) -> None:
"""
Calling DELETE /json/users/me/subscriptions should successfully remove streams,
and should determine which were removed vs which weren't subscribed to.
We cannot randomly generate stream names because the remove code
verifies whether streams exist.
"""
self.assertGreaterEqual(len(self.streams), 2)
streams_to_remove = self.streams[1:]
not_subbed = []
for stream in Stream.objects.all():
if stream.name not in self.streams:
not_subbed.append(stream.name)
random.shuffle(not_subbed)
self.assertNotEqual(len(not_subbed), 0) # necessary for full test coverage
try_to_remove = not_subbed[:3] # attempt to remove up to 3 streams not already subbed to
streams_to_remove.extend(try_to_remove)
self.helper_check_subs_before_and_after_remove(streams_to_remove,
{"removed": self.streams[1:], "not_removed": try_to_remove},
self.test_email, [self.streams[0]], self.test_realm)
def test_subscriptions_remove_fake_stream(self) -> None:
"""
Calling DELETE /json/users/me/subscriptions on a stream that doesn't exist
should return a JSON error.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
streams_to_remove = random_streams[:1] # pick only one fake stream, to make checking the error message easy
result = self.client_delete("/json/users/me/subscriptions",
{"subscriptions": ujson.dumps(streams_to_remove)})
self.assert_json_error(result, f"Stream(s) ({random_streams[0]}) do not exist")
def helper_subscriptions_exists(self, stream: str, expect_success: bool, subscribed: bool) -> None:
"""
Call /json/subscriptions/exists on a stream and expect a certain result.
"""
result = self.client_post("/json/subscriptions/exists",
{"stream": stream})
json = result.json()
if expect_success:
self.assert_json_success(result)
else:
self.assertEqual(result.status_code, 404)
if subscribed:
self.assertIn("subscribed", json)
self.assertEqual(json["subscribed"], subscribed)
def test_successful_subscriptions_exists_subbed(self) -> None:
"""
Calling /json/subscriptions/exist on a stream to which you are subbed
should return that it exists and that you are subbed.
"""
self.assertNotEqual(len(self.streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(self.streams[0], True, True)
def test_successful_subscriptions_exists_not_subbed(self) -> None:
"""
Calling /json/subscriptions/exist on a stream to which you are not
subbed should return that it exists and that you are not subbed.
"""
all_stream_names = [stream.name for stream in Stream.objects.filter(realm=self.test_realm)]
streams_not_subbed = list(set(all_stream_names) - set(self.streams))
self.assertNotEqual(len(streams_not_subbed), 0) # necessary for full test coverage
self.helper_subscriptions_exists(streams_not_subbed[0], True, False)
def test_subscriptions_does_not_exist(self) -> None:
"""
Calling /json/subscriptions/exist on a stream that doesn't exist should
return that it doesn't exist.
"""
random_streams = self.make_random_stream_names(self.streams)
self.assertNotEqual(len(random_streams), 0) # necessary for full test coverage
self.helper_subscriptions_exists(random_streams[0], False, False)
def test_subscriptions_exist_invalid_name(self) -> None:
"""
Calling /json/subscriptions/exist on a stream whose name is invalid (as
defined by valid_stream_name in zerver/views.py) should return a JSON
error.
"""
# currently, the only invalid stream name is the empty string
invalid_stream_name = ""
result = self.client_post("/json/subscriptions/exists",
{"stream": invalid_stream_name})
self.assert_json_error(result, "Invalid stream name ''")
def test_existing_subscriptions_autosubscription(self) -> None:
"""
Call /json/subscriptions/exist on an existing stream and autosubscribe to it.
"""
stream_name = "new_public_stream"
cordelia = self.example_user('cordelia')
result = self.common_subscribe_to_streams(cordelia, [stream_name],
invite_only=False)
result = self.client_post("/json/subscriptions/exists",
{"stream": stream_name, "autosubscribe": "false"})
self.assert_json_success(result)
self.assertIn("subscribed", result.json())
self.assertFalse(result.json()["subscribed"])
result = self.client_post("/json/subscriptions/exists",
{"stream": stream_name, "autosubscribe": "true"})
self.assert_json_success(result)
self.assertIn("subscribed", result.json())
self.assertTrue(result.json()["subscribed"])
def test_existing_subscriptions_autosubscription_private_stream(self) -> None:
"""Call /json/subscriptions/exist on an existing private stream with
autosubscribe should fail.
"""
stream_name = "Saxony"
cordelia = self.example_user('cordelia')
result = self.common_subscribe_to_streams(cordelia, [stream_name],
invite_only=True)
stream = get_stream(stream_name, self.test_realm)
result = self.client_post("/json/subscriptions/exists",
{"stream": stream_name, "autosubscribe": "true"})
# We can't see invite-only streams here
self.assert_json_error(result, "Invalid stream name 'Saxony'", status_code=404)
# Importantly, we are not now subscribed
self.assertEqual(num_subscribers_for_stream_id(stream.id), 1)
# A user who is subscribed still sees the stream exists
self.login('cordelia')
result = self.client_post("/json/subscriptions/exists",
{"stream": stream_name, "autosubscribe": "false"})
self.assert_json_success(result)
self.assertIn("subscribed", result.json())
self.assertTrue(result.json()["subscribed"])
def get_subscription(self, user_profile: UserProfile, stream_name: str) -> Subscription:
stream = get_stream(stream_name, self.test_realm)
return Subscription.objects.get(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
recipient__type_id=stream.id,
)
def test_subscriptions_add_notification_default_none(self) -> None:
"""
When creating a subscription, the desktop, push, and audible notification
settings for that stream are none. A value of None means to use the values
inherited from the global notification settings.
"""
user_profile = self.example_user('iago')
invitee_user_id = user_profile.id
invitee_realm = user_profile.realm
user_profile.enable_stream_desktop_notifications = True
user_profile.enable_stream_push_notifications = True
user_profile.enable_stream_audible_notifications = True
user_profile.enable_stream_email_notifications = True
user_profile.save()
current_stream = self.get_streams(user_profile)[0]
invite_streams = self.make_random_stream_names([current_stream])
self.assert_adding_subscriptions_for_principal(invitee_user_id, invitee_realm, invite_streams)
subscription = self.get_subscription(user_profile, invite_streams[0])
with mock.patch('zerver.models.Recipient.__str__', return_value='recip'):
self.assertEqual(str(subscription),
'<Subscription: '
f'<UserProfile: {user_profile.email} {user_profile.realm}> -> recip>')
self.assertIsNone(subscription.desktop_notifications)
self.assertIsNone(subscription.push_notifications)
self.assertIsNone(subscription.audible_notifications)
self.assertIsNone(subscription.email_notifications)
def test_mark_messages_as_unread_on_unsubscribe(self) -> None:
realm = get_realm("zulip")
user = self.example_user("iago")
random_user = self.example_user("hamlet")
stream1 = ensure_stream(realm, "stream1", invite_only=False)
stream2 = ensure_stream(realm, "stream2", invite_only=False)
private = ensure_stream(realm, "private_stream", invite_only=True)
self.subscribe(user, "stream1")
self.subscribe(user, "stream2")
self.subscribe(user, "private_stream")
self.subscribe(random_user, "stream1")
self.subscribe(random_user, "stream2")
self.subscribe(random_user, "private_stream")
self.send_stream_message(random_user, "stream1", "test", "test")
self.send_stream_message(random_user, "stream2", "test", "test")
self.send_stream_message(random_user, "private_stream", "test", "test")
def get_unread_stream_data() -> List[Dict[str, Any]]:
raw_unread_data = get_raw_unread_data(user)
aggregated_data = aggregate_unread_data(raw_unread_data)
return aggregated_data['streams']
result = get_unread_stream_data()
self.assert_length(result, 3)
self.assertEqual(result[0]['stream_id'], stream1.id)
self.assertEqual(result[1]['stream_id'], stream2.id)
self.assertEqual(result[2]['stream_id'], private.id)
# Unsubscribing should mark all the messages in stream2 as read
self.unsubscribe(user, "stream2")
self.unsubscribe(user, "private_stream")
self.subscribe(user, "stream2")
self.subscribe(user, "private_stream")
result = get_unread_stream_data()
self.assert_length(result, 1)
self.assertEqual(result[0]['stream_id'], stream1.id)
def test_gather_subscriptions_excludes_deactivated_streams(self) -> None:
"""
Check that gather_subscriptions_helper does not include deactivated streams in its
results.
"""
realm = get_realm("zulip")
admin_user = self.example_user("iago")
non_admin_user = self.example_user("cordelia")
self.login_user(admin_user)
for stream_name in ["stream1", "stream2", "stream3"]:
self.make_stream(stream_name, realm=realm, invite_only=False)
self.subscribe(admin_user, stream_name)
self.subscribe(non_admin_user, stream_name)
self.subscribe(self.example_user("othello"), stream_name)
def delete_stream(stream_name: str) -> None:
stream_id = get_stream(stream_name, realm).id
result = self.client_delete(f'/json/streams/{stream_id}')
self.assert_json_success(result)
# Deleted/deactivated stream should not be returned in the helper results
admin_before_delete = gather_subscriptions_helper(admin_user)
non_admin_before_delete = gather_subscriptions_helper(non_admin_user)
# Delete our stream
delete_stream("stream1")
# Get subs after delete
admin_after_delete = gather_subscriptions_helper(admin_user)
non_admin_after_delete = gather_subscriptions_helper(non_admin_user)
# Compare results - should be 1 stream less
self.assertTrue(
len(admin_before_delete[0]) == len(admin_after_delete[0]) + 1,
'Expected exactly 1 less stream from gather_subscriptions_helper')
self.assertTrue(
len(non_admin_before_delete[0]) == len(non_admin_after_delete[0]) + 1,
'Expected exactly 1 less stream from gather_subscriptions_helper')
def test_validate_user_access_to_subscribers_helper(self) -> None:
"""
Ensure the validate_user_access_to_subscribers_helper is properly raising
ValidationError on missing user, user not-in-realm.
"""
user_profile = self.example_user('othello')
realm_name = 'no_othello_allowed'
realm = do_create_realm(realm_name, 'Everyone but Othello is allowed')
stream_dict = {
'name': 'publicstream',
'description': 'Public stream with public history',
'realm_id': realm.id,
}
# For this test to work, othello can't be in the no_othello_here realm
self.assertNotEqual(user_profile.realm.id, realm.id, 'Expected othello user to not be in this realm.')
# This should result in missing user
with self.assertRaises(ValidationError):
validate_user_access_to_subscribers_helper(None, stream_dict, lambda: True)
# This should result in user not in realm
with self.assertRaises(ValidationError):
validate_user_access_to_subscribers_helper(user_profile, stream_dict, lambda: True)
def test_subscriptions_query_count(self) -> None:
"""
Test database query count when creating stream with api/v1/users/me/subscriptions.
"""
user1 = self.example_user("cordelia")
user2 = self.example_user("iago")
new_streams = [
'query_count_stream_1',
'query_count_stream_2',
'query_count_stream_3',
]
# Test creating a public stream when realm does not have a notification stream.
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_user,
[new_streams[0]],
dict(principals=ujson.dumps([user1.id, user2.id])),
)
self.assert_length(queries, 39)
# Test creating private stream.
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_user,
[new_streams[1]],
dict(principals=ujson.dumps([user1.id, user2.id])),
invite_only=True,
)
self.assert_length(queries, 39)
# Test creating a public stream with announce when realm has a notification stream.
notifications_stream = get_stream(self.streams[0], self.test_realm)
self.test_realm.notifications_stream_id = notifications_stream.id
self.test_realm.save()
with queries_captured() as queries:
self.common_subscribe_to_streams(
self.test_user,
[new_streams[2]],
dict(
announce='true',
principals=ujson.dumps([user1.id, user2.id]),
),
)
self.assert_length(queries, 50)
class GetStreamsTest(ZulipTestCase):
def test_streams_api_for_bot_owners(self) -> None:
hamlet = self.example_user('hamlet')
test_bot = self.create_test_bot('foo', hamlet, bot_owner=hamlet)
assert test_bot is not None
realm = get_realm('zulip')
self.login_user(hamlet)
# Check it correctly lists the bot owner's subs with
# include_owner_subscribed=true
result = self.api_get(
test_bot,
"/api/v1/streams?include_owner_subscribed=true&include_public=false&include_subscribed=false")
owner_subs = self.api_get(hamlet, "/api/v1/users/me/subscriptions")
self.assert_json_success(result)
json = result.json()
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(owner_subs)
owner_subs_json = ujson.loads(owner_subs.content)
self.assertEqual(sorted([s["name"] for s in json["streams"]]),
sorted([s["name"] for s in owner_subs_json["subscriptions"]]))
# Check it correctly lists the bot owner's subs and the
# bot's subs
self.subscribe(test_bot, 'Scotland')
result = self.api_get(
test_bot,
"/api/v1/streams?include_owner_subscribed=true&include_public=false&include_subscribed=true",
)
self.assert_json_success(result)
json = result.json()
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted([s["name"] for s in json["streams"]])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.append('Scotland')
expected.sort()
self.assertEqual(actual, expected)
# Check it correctly lists the bot owner's subs + all public streams
self.make_stream('private_stream', realm=realm, invite_only=True)
self.subscribe(test_bot, 'private_stream')
result = self.api_get(
test_bot,
"/api/v1/streams?include_owner_subscribed=true&include_public=true&include_subscribed=false",
)
self.assert_json_success(result)
json = result.json()
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted([s["name"] for s in json["streams"]])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.extend(['Rome', 'Venice', 'Scotland'])
expected.sort()
self.assertEqual(actual, expected)
# Check it correctly lists the bot owner's subs + all public streams +
# the bot's subs
result = self.api_get(
test_bot,
"/api/v1/streams?include_owner_subscribed=true&include_public=true&include_subscribed=true",
)
self.assert_json_success(result)
json = result.json()
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
actual = sorted([s["name"] for s in json["streams"]])
expected = [s["name"] for s in owner_subs_json["subscriptions"]]
expected.extend(['Rome', 'Venice', 'Scotland', 'private_stream'])
expected.sort()
self.assertEqual(actual, expected)
def test_all_active_streams_api(self) -> None:
url = '/api/v1/streams?include_all_active=true'
# Check non-superuser can't use include_all_active
normal_user = self.example_user('cordelia')
result = self.api_get(normal_user, url)
self.assertEqual(result.status_code, 400)
# Even realm admin users can't see all
# active streams (without additional privileges).
admin_user = self.example_user('iago')
self.assertTrue(admin_user.is_realm_admin)
result = self.api_get(admin_user, url)
self.assertEqual(result.status_code, 400)
'''
HAPPY PATH:
We can get all active streams ONLY if we are
an API "super user". We typically create
api-super-user accounts for things like
Zephyr/Jabber mirror API users, but here
we just "knight" Hamlet for testing expediency.
'''
super_user = self.example_user('hamlet')
super_user.is_api_super_user = True
super_user.save()
result = self.api_get(super_user, url)
self.assert_json_success(result)
json = result.json()
self.assertIn('streams', json)
self.assertIsInstance(json['streams'], list)
stream_names = {s['name'] for s in json['streams']}
self.assertEqual(
stream_names,
{'Venice', 'Denmark', 'Scotland', 'Verona', 'Rome'},
)
def test_public_streams_api(self) -> None:
"""
Ensure that the query we use to get public streams successfully returns
a list of streams
"""
user = self.example_user('hamlet')
realm = get_realm('zulip')
self.login_user(user)
# Check it correctly lists the user's subs with include_public=false
result = self.api_get(user, "/api/v1/streams?include_public=false")
result2 = self.api_get(user, "/api/v1/users/me/subscriptions")
self.assert_json_success(result)
json = result.json()
self.assertIn("streams", json)
self.assertIsInstance(json["streams"], list)
self.assert_json_success(result2)
json2 = ujson.loads(result2.content)
self.assertEqual(sorted([s["name"] for s in json["streams"]]),
sorted([s["name"] for s in json2["subscriptions"]]))
# Check it correctly lists all public streams with include_subscribed=false
result = self.api_get(user, "/api/v1/streams?include_public=true&include_subscribed=false")
self.assert_json_success(result)
json = result.json()
all_streams = [stream.name for stream in
Stream.objects.filter(realm=realm)]
self.assertEqual(sorted(s["name"] for s in json["streams"]),
sorted(all_streams))
class StreamIdTest(ZulipTestCase):
def test_get_stream_id(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
stream = gather_subscriptions(user)[0][0]
result = self.client_get("/json/get_stream_id?stream={}".format(stream['name']))
self.assert_json_success(result)
self.assertEqual(result.json()['stream_id'], stream['stream_id'])
def test_get_stream_id_wrong_name(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
result = self.client_get("/json/get_stream_id?stream=wrongname")
self.assert_json_error(result, "Invalid stream name 'wrongname'")
class InviteOnlyStreamTest(ZulipTestCase):
def test_must_be_subbed_to_send(self) -> None:
"""
If you try to send a message to an invite-only stream to which
you aren't subscribed, you'll get a 400.
"""
user = self.example_user('hamlet')
self.login_user(user)
# Create Saxony as an invite-only stream.
self.assert_json_success(
self.common_subscribe_to_streams(user, ["Saxony"],
invite_only=True))
cordelia = self.example_user("cordelia")
with self.assertRaises(JsonableError):
self.send_stream_message(cordelia, "Saxony")
def test_list_respects_invite_only_bit(self) -> None:
"""
Make sure that /api/v1/users/me/subscriptions properly returns
the invite-only bit for streams that are invite-only
"""
user = self.example_user('hamlet')
self.login_user(user)
result1 = self.common_subscribe_to_streams(user, ["Saxony"], invite_only=True)
self.assert_json_success(result1)
result2 = self.common_subscribe_to_streams(user, ["Normandy"], invite_only=False)
self.assert_json_success(result2)
result = self.api_get(user, "/api/v1/users/me/subscriptions")
self.assert_json_success(result)
self.assertIn("subscriptions", result.json())
for sub in result.json()["subscriptions"]:
if sub['name'] == "Normandy":
self.assertEqual(sub['invite_only'], False, "Normandy was mistakenly marked private")
if sub['name'] == "Saxony":
self.assertEqual(sub['invite_only'], True, "Saxony was not properly marked private")
@slow("lots of queries")
def test_inviteonly(self) -> None:
# Creating an invite-only stream is allowed
hamlet = self.example_user('hamlet')
othello = self.example_user('othello')
stream_name = "Saxony"
result = self.common_subscribe_to_streams(hamlet, [stream_name], invite_only=True)
self.assert_json_success(result)
json = result.json()
self.assertEqual(json["subscribed"], {hamlet.email: [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Subscribing oneself to an invite-only stream is not allowed
self.login_user(othello)
result = self.common_subscribe_to_streams(othello, [stream_name])
self.assert_json_error(result, 'Unable to access stream (Saxony).')
# authorization_errors_fatal=False works
self.login_user(othello)
result = self.common_subscribe_to_streams(othello, [stream_name],
extra_post_data={'authorization_errors_fatal': ujson.dumps(False)})
self.assert_json_success(result)
json = result.json()
self.assertEqual(json["unauthorized"], [stream_name])
self.assertEqual(json["subscribed"], {})
self.assertEqual(json["already_subscribed"], {})
# Inviting another user to an invite-only stream is allowed
self.login_user(hamlet)
result = self.common_subscribe_to_streams(
hamlet, [stream_name],
extra_post_data={'principals': ujson.dumps([othello.id])})
self.assert_json_success(result)
json = result.json()
self.assertEqual(json["subscribed"], {othello.email: [stream_name]})
self.assertEqual(json["already_subscribed"], {})
# Make sure both users are subscribed to this stream
stream_id = get_stream(stream_name, hamlet.realm).id
result = self.api_get(hamlet, f"/api/v1/streams/{stream_id}/members")
self.assert_json_success(result)
json = result.json()
self.assertTrue(othello.email in json['subscribers'])
self.assertTrue(hamlet.email in json['subscribers'])
class GetSubscribersTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.user_profile = self.example_user('hamlet')
self.login_user(self.user_profile)
def assert_user_got_subscription_notification(self, expected_msg: str) -> None:
# verify that the user was sent a message informing them about the subscription
msg = self.get_last_message()
self.assertEqual(msg.recipient.type, msg.recipient.PERSONAL)
self.assertEqual(msg.sender_id, self.notification_bot().id)
def non_ws(s: str) -> str:
return s.replace('\n', '').replace(' ', '')
self.assertEqual(non_ws(msg.content), non_ws(expected_msg))
def check_well_formed_result(self, result: Dict[str, Any], stream_name: str, realm: Realm) -> None:
"""
A successful call to get_subscribers returns the list of subscribers in
the form:
{"msg": "",
"result": "success",
"subscribers": [self.example_email("hamlet"), self.example_email("prospero")]}
"""
self.assertIn("subscribers", result)
self.assertIsInstance(result["subscribers"], list)
true_subscribers = [user_profile.email for user_profile in self.users_subscribed_to_stream(
stream_name, realm)]
self.assertEqual(sorted(result["subscribers"]), sorted(true_subscribers))
def make_subscriber_request(self, stream_id: int, user: Optional[UserProfile]=None) -> HttpResponse:
if user is None:
user = self.user_profile
return self.api_get(user, f"/api/v1/streams/{stream_id}/members")
def make_successful_subscriber_request(self, stream_name: str) -> None:
stream_id = get_stream(stream_name, self.user_profile.realm).id
result = self.make_subscriber_request(stream_id)
self.assert_json_success(result)
self.check_well_formed_result(result.json(),
stream_name, self.user_profile.realm)
def test_subscriber(self) -> None:
"""
get_subscribers returns the list of subscribers.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]['name']
self.make_successful_subscriber_request(stream_name)
@slow("common_subscribe_to_streams is slow")
def test_gather_subscriptions(self) -> None:
"""
gather_subscriptions returns correct results with only 3 queries
(We also use this test to verify subscription notifications to
folks who get subscribed to streams.)
"""
streams = [f"stream_{i}" for i in range(10)]
for stream_name in streams:
self.make_stream(stream_name)
users_to_subscribe = [
self.user_profile.id,
self.example_user("othello").id,
self.example_user("cordelia").id,
]
ret = self.common_subscribe_to_streams(
self.user_profile,
streams,
dict(principals=ujson.dumps(users_to_subscribe)))
self.assert_json_success(ret)
msg = '''
@**King Hamlet** subscribed you to the following streams:
* #**stream_0**
* #**stream_1**
* #**stream_2**
* #**stream_3**
* #**stream_4**
* #**stream_5**
* #**stream_6**
* #**stream_7**
* #**stream_8**
* #**stream_9**
'''
self.assert_user_got_subscription_notification(msg)
# Subscribe ourself first.
ret = self.common_subscribe_to_streams(
self.user_profile,
["stream_invite_only_1"],
dict(principals=ujson.dumps([self.user_profile.id])),
invite_only=True)
self.assert_json_success(ret)
# Now add in other users, and this should trigger messages
# to notify the user.
ret = self.common_subscribe_to_streams(
self.user_profile,
["stream_invite_only_1"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True)
self.assert_json_success(ret)
msg = '''
@**King Hamlet** subscribed you to the stream #**stream_invite_only_1**.
'''
self.assert_user_got_subscription_notification(msg)
with queries_captured() as queries:
subscribed_streams, _ = gather_subscriptions(
self.user_profile, include_subscribers=True)
self.assertTrue(len(subscribed_streams) >= 11)
for sub in subscribed_streams:
if not sub["name"].startswith("stream_"):
continue
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
self.assert_length(queries, 6)
@slow("common_subscribe_to_streams is slow")
def test_never_subscribed_streams(self) -> None:
"""
Check never_subscribed streams are fetched correctly and not include invite_only streams.
"""
realm = get_realm("zulip")
users_to_subscribe = [
self.example_user("othello").id,
self.example_user("cordelia").id,
]
public_streams = [
'test_stream_public_1',
'test_stream_public_2',
'test_stream_public_3',
'test_stream_public_4',
'test_stream_public_5',
]
private_streams = [
'test_stream_invite_only_1',
'test_stream_invite_only_2',
]
def create_public_streams() -> None:
for stream_name in public_streams:
self.make_stream(stream_name, realm=realm)
ret = self.common_subscribe_to_streams(
self.user_profile,
public_streams,
dict(principals=ujson.dumps(users_to_subscribe)),
)
self.assert_json_success(ret)
create_public_streams()
def create_private_streams() -> None:
ret = self.common_subscribe_to_streams(
self.user_profile,
private_streams,
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True,
)
self.assert_json_success(ret)
create_private_streams()
def get_never_subscribed() -> List[Dict[str, Any]]:
with queries_captured() as queries:
sub_data = gather_subscriptions_helper(self.user_profile)
never_subscribed = sub_data[2]
self.assert_length(queries, 5)
# Ignore old streams.
never_subscribed = [
dct for dct in never_subscribed
if dct['name'].startswith('test_')
]
return never_subscribed
never_subscribed = get_never_subscribed()
# Invite only stream should not be there in never_subscribed streams
self.assertEqual(len(never_subscribed), len(public_streams))
for stream_dict in never_subscribed:
name = stream_dict['name']
self.assertFalse('invite_only' in name)
self.assertTrue(len(stream_dict["subscribers"]) == len(users_to_subscribe))
# Send private stream subscribers to all realm admins.
def test_admin_case() -> None:
self.user_profile.role = UserProfile.ROLE_REALM_ADMINISTRATOR
# Test realm admins can get never subscribed private stream's subscribers.
never_subscribed = get_never_subscribed()
self.assertEqual(
len(never_subscribed),
len(public_streams) + len(private_streams),
)
for stream_dict in never_subscribed:
self.assertTrue(len(stream_dict["subscribers"]) == len(users_to_subscribe))
test_admin_case()
def test_gather_subscribed_streams_for_guest_user(self) -> None:
guest_user = self.example_user("polonius")
stream_name_sub = "public_stream_1"
self.make_stream(stream_name_sub, realm=get_realm("zulip"))
self.subscribe(guest_user, stream_name_sub)
stream_name_unsub = "public_stream_2"
self.make_stream(stream_name_unsub, realm=get_realm("zulip"))
self.subscribe(guest_user, stream_name_unsub)
self.unsubscribe(guest_user, stream_name_unsub)
stream_name_never_sub = "public_stream_3"
self.make_stream(stream_name_never_sub, realm=get_realm("zulip"))
normal_user = self.example_user("aaron")
self.subscribe(normal_user, stream_name_sub)
self.subscribe(normal_user, stream_name_unsub)
self.subscribe(normal_user, stream_name_unsub)
subs, unsubs, neversubs = gather_subscriptions_helper(guest_user)
# Guest users get info about subscribed public stream's subscribers
expected_stream_exists = False
for sub in subs:
if sub["name"] == stream_name_sub:
expected_stream_exists = True
self.assertEqual(len(sub["subscribers"]), 2)
self.assertTrue(expected_stream_exists)
# Guest users don't get info about unsubscribed public stream's subscribers
expected_stream_exists = False
for unsub in unsubs:
if unsub["name"] == stream_name_unsub:
expected_stream_exists = True
self.assertNotIn("subscribers", unsub)
self.assertTrue(expected_stream_exists)
# Guest user don't get data about never subscribed public stream's data
self.assertEqual(len(neversubs), 0)
def test_previously_subscribed_private_streams(self) -> None:
admin_user = self.example_user("iago")
non_admin_user = self.example_user("cordelia")
stream_name = "private_stream"
self.make_stream(stream_name, realm=get_realm("zulip"), invite_only=True)
self.subscribe(admin_user, stream_name)
self.subscribe(non_admin_user, stream_name)
self.subscribe(self.example_user("othello"), stream_name)
self.unsubscribe(admin_user, stream_name)
self.unsubscribe(non_admin_user, stream_name)
# Test admin user gets previously subscribed private stream's subscribers.
sub_data = gather_subscriptions_helper(admin_user)
unsubscribed_streams = sub_data[1]
self.assertEqual(len(unsubscribed_streams), 1)
self.assertEqual(len(unsubscribed_streams[0]["subscribers"]), 1)
# Test non admin users cannot get previously subscribed private stream's subscribers.
sub_data = gather_subscriptions_helper(non_admin_user)
unsubscribed_streams = sub_data[1]
self.assertEqual(len(unsubscribed_streams), 1)
self.assertFalse('subscribers' in unsubscribed_streams[0])
def test_gather_subscriptions_mit(self) -> None:
"""
gather_subscriptions returns correct results with only 3 queries
"""
# Subscribe only ourself because invites are disabled on mit.edu
mit_user_profile = self.mit_user('starnine')
user_id = mit_user_profile.id
users_to_subscribe = [user_id, self.mit_user("espuser").id]
for email in users_to_subscribe:
stream = self.subscribe(mit_user_profile, "mit_stream")
self.assertTrue(stream.is_in_zephyr_realm)
ret = self.common_subscribe_to_streams(
mit_user_profile,
["mit_invite_only"],
dict(principals=ujson.dumps(users_to_subscribe)),
invite_only=True,
subdomain="zephyr")
self.assert_json_success(ret)
with queries_captured() as queries:
subscribed_streams, _ = gather_subscriptions(
mit_user_profile, include_subscribers=True)
self.assertTrue(len(subscribed_streams) >= 2)
for sub in subscribed_streams:
if not sub["name"].startswith("mit_"):
raise AssertionError("Unexpected stream!")
if sub["name"] == "mit_invite_only":
self.assertTrue(len(sub["subscribers"]) == len(users_to_subscribe))
else:
self.assertTrue(len(sub["subscribers"]) == 0)
self.assert_length(queries, 6)
def test_nonsubscriber(self) -> None:
"""
Even a non-subscriber to a public stream can query a stream's membership
with get_subscribers.
"""
# Create a stream for which Hamlet is the only subscriber.
stream_name = "Saxony"
self.common_subscribe_to_streams(self.user_profile, [stream_name])
other_user = self.example_user("othello")
# Fetch the subscriber list as a non-member.
self.login_user(other_user)
self.make_successful_subscriber_request(stream_name)
def test_subscriber_private_stream(self) -> None:
"""
A subscriber to a private stream can query that stream's membership.
"""
stream_name = "Saxony"
self.common_subscribe_to_streams(self.user_profile, [stream_name],
invite_only=True)
self.make_successful_subscriber_request(stream_name)
stream_id = get_stream(stream_name, self.user_profile.realm).id
# Verify another user can't get the data.
self.login('cordelia')
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_error(result, 'Invalid stream id')
# But an organization administrator can
self.login('iago')
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_success(result)
def test_json_get_subscribers_stream_not_exist(self) -> None:
"""
json_get_subscribers also returns the list of subscribers for a stream.
"""
stream_id = 99999999
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_error(result, 'Invalid stream id')
def test_json_get_subscribers(self) -> None:
"""
json_get_subscribers in zerver/views/streams.py
also returns the list of subscribers for a stream, when requested.
"""
stream_name = gather_subscriptions(self.user_profile)[0][0]['name']
stream_id = get_stream(stream_name, self.user_profile.realm).id
expected_subscribers = gather_subscriptions(
self.user_profile, include_subscribers=True)[0][0]['subscribers']
result = self.client_get(f"/json/streams/{stream_id}/members")
self.assert_json_success(result)
result_dict = result.json()
self.assertIn('subscribers', result_dict)
self.assertIsInstance(result_dict['subscribers'], list)
subscribers: List[str] = []
for subscriber in result_dict['subscribers']:
self.assertIsInstance(subscriber, str)
subscribers.append(subscriber)
self.assertEqual(set(subscribers), set(expected_subscribers))
def test_nonsubscriber_private_stream(self) -> None:
"""
A non-subscriber non realm admin user to a private stream can't query that stream's membership.
But unsubscribed realm admin users can query private stream's membership.
"""
# Create a private stream for which Hamlet is the only subscriber.
stream_name = "NewStream"
self.common_subscribe_to_streams(self.user_profile, [stream_name],
invite_only=True)
user_profile = self.example_user('othello')
# Try to fetch the subscriber list as a non-member & non-realm-admin-user.
stream_id = get_stream(stream_name, user_profile.realm).id
result = self.make_subscriber_request(stream_id, user=user_profile)
self.assert_json_error(result, "Invalid stream id")
# Try to fetch the subscriber list as a non-member & realm-admin-user.
self.login('iago')
self.make_successful_subscriber_request(stream_name)
class AccessStreamTest(ZulipTestCase):
def test_access_stream(self) -> None:
"""
A comprehensive security test for the access_stream_by_* API functions.
"""
# Create a private stream for which Hamlet is the only subscriber.
hamlet = self.example_user('hamlet')
stream_name = "new_private_stream"
self.login_user(hamlet)
self.common_subscribe_to_streams(hamlet, [stream_name],
invite_only=True)
stream = get_stream(stream_name, hamlet.realm)
othello = self.example_user('othello')
# Nobody can access a stream that doesn't exist
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(hamlet, 501232)
with self.assertRaisesRegex(JsonableError, "Invalid stream name 'invalid stream'"):
access_stream_by_name(hamlet, "invalid stream")
# Hamlet can access the private stream
(stream_ret, rec_ret, sub_ret) = access_stream_by_id(hamlet, stream.id)
self.assertEqual(stream.id, stream_ret.id)
assert sub_ret is not None
self.assertEqual(sub_ret.recipient, rec_ret)
self.assertEqual(sub_ret.recipient.type_id, stream.id)
(stream_ret2, rec_ret2, sub_ret2) = access_stream_by_name(hamlet, stream.name)
self.assertEqual(stream_ret.id, stream_ret2.id)
self.assertEqual(sub_ret, sub_ret2)
self.assertEqual(rec_ret, rec_ret2)
# Othello cannot access the private stream
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(othello, stream.id)
with self.assertRaisesRegex(JsonableError, "Invalid stream name 'new_private_stream'"):
access_stream_by_name(othello, stream.name)
# Both Othello and Hamlet can access a public stream that only
# Hamlet is subscribed to in this realm
public_stream_name = "public_stream"
self.common_subscribe_to_streams(hamlet, [public_stream_name],
invite_only=False)
public_stream = get_stream(public_stream_name, hamlet.realm)
access_stream_by_id(othello, public_stream.id)
access_stream_by_name(othello, public_stream.name)
access_stream_by_id(hamlet, public_stream.id)
access_stream_by_name(hamlet, public_stream.name)
# Nobody can access a public stream in another realm
mit_realm = get_realm("zephyr")
mit_stream = ensure_stream(mit_realm, "mit_stream", invite_only=False)
sipbtest = self.mit_user("sipbtest")
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(hamlet, mit_stream.id)
with self.assertRaisesRegex(JsonableError, "Invalid stream name 'mit_stream'"):
access_stream_by_name(hamlet, mit_stream.name)
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(sipbtest, stream.id)
with self.assertRaisesRegex(JsonableError, "Invalid stream name 'new_private_stream'"):
access_stream_by_name(sipbtest, stream.name)
# MIT realm users cannot access even public streams in their realm
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(sipbtest, mit_stream.id)
with self.assertRaisesRegex(JsonableError, "Invalid stream name 'mit_stream'"):
access_stream_by_name(sipbtest, mit_stream.name)
# But they can access streams they are subscribed to
self.common_subscribe_to_streams(sipbtest, [mit_stream.name], subdomain="zephyr")
access_stream_by_id(sipbtest, mit_stream.id)
access_stream_by_name(sipbtest, mit_stream.name)
def test_stream_access_by_guest(self) -> None:
guest_user_profile = self.example_user('polonius')
self.login_user(guest_user_profile)
stream_name = "public_stream_1"
stream = self.make_stream(stream_name, guest_user_profile.realm, invite_only=False)
# Guest user don't have access to unsubscribed public streams
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(guest_user_profile, stream.id)
# Guest user have access to subscribed public streams
self.subscribe(guest_user_profile, stream_name)
(stream_ret, rec_ret, sub_ret) = access_stream_by_id(guest_user_profile, stream.id)
assert sub_ret is not None
self.assertEqual(stream.id, stream_ret.id)
self.assertEqual(sub_ret.recipient, rec_ret)
self.assertEqual(sub_ret.recipient.type_id, stream.id)
stream_name = "private_stream_1"
stream = self.make_stream(stream_name, guest_user_profile.realm, invite_only=True)
# Obviously, a guest user doesn't have access to unsubscribed private streams either
with self.assertRaisesRegex(JsonableError, "Invalid stream id"):
access_stream_by_id(guest_user_profile, stream.id)
# Guest user have access to subscribed private streams
self.subscribe(guest_user_profile, stream_name)
(stream_ret, rec_ret, sub_ret) = access_stream_by_id(guest_user_profile, stream.id)
assert sub_ret is not None
self.assertEqual(stream.id, stream_ret.id)
self.assertEqual(sub_ret.recipient, rec_ret)
self.assertEqual(sub_ret.recipient.type_id, stream.id)
class StreamTrafficTest(ZulipTestCase):
def test_average_weekly_stream_traffic_calculation(self) -> None:
# No traffic data for the stream
self.assertEqual(
get_average_weekly_stream_traffic(42, timezone_now() - timedelta(days=300), {1: 4003}), 0)
# using high numbers here to make it more likely to catch small errors in the denominators
# of the calculations. That being said we don't want to go over 100, since then the 2
# significant digits calculation gets applied
# old stream
self.assertEqual(
get_average_weekly_stream_traffic(42, timezone_now() - timedelta(days=300), {42: 98*4+3}), 98)
# stream between 7 and 27 days old
self.assertEqual(
get_average_weekly_stream_traffic(42, timezone_now() - timedelta(days=10), {42: (98*10+9) // 7}), 98)
# stream less than 7 days old
self.assertEqual(
get_average_weekly_stream_traffic(42, timezone_now() - timedelta(days=5), {42: 100}), None)
# average traffic between 0 and 1
self.assertEqual(
get_average_weekly_stream_traffic(42, timezone_now() - timedelta(days=300), {42: 1}), 1)
def test_round_to_2_significant_digits(self) -> None:
self.assertEqual(120, round_to_2_significant_digits(116))
|
the-stack_0_25614
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import gc
import numpy as np
import os
import threading
import weakref
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.autograph.core import ag_ctx
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework import type_spec
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.gradients # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.util import compat
class ResourceTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBuildGraph(self):
with self.cached_session():
pt = test_ops.stub_resource_handle_op(container="a", shared_name="b")
test_ops.resource_create_op(pt).run()
@test_util.run_deprecated_v1
def testInitialize(self):
with self.cached_session():
handle = test_ops.stub_resource_handle_op(container="a", shared_name="b")
resources.register_resource(
handle=handle,
create_op=test_ops.resource_create_op(handle),
is_initialized_op=test_ops.resource_initialized_op(handle))
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 1)
resources.initialize_resources(resources.shared_resources()).run()
self.assertEquals(
len(
resources.report_uninitialized_resources(
resources.shared_resources()).eval()), 0)
class TensorAndShapeTest(test_util.TensorFlowTestCase):
def testShape(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertEqual(tensor_shape.unknown_shape(), t.get_shape())
t.set_shape([1, 2, 3])
self.assertEqual([1, 2, 3], t.get_shape())
def testIterable(self):
if not context.executing_eagerly():
self.skipTest("Eager-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "Cannot iterate"):
next(iter(t))
def testIterableGraph(self):
if context.executing_eagerly():
self.skipTest("Graph-mode test")
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
with self.assertRaisesRegexp(TypeError, "iterating.*not allowed in Graph"):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
next(iter(t))
with self.assertRaisesRegexp(
TypeError, "iterating.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
next(iter(t))
def testImplicitBool(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.bool])
t = op.outputs[0]
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*not allowed in Graph"):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph did not convert"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.ENABLED):
bool(t)
with self.assertRaisesRegexp(
TypeError, "using.*as a.*bool.*AutoGraph is disabled"):
with ag_ctx.ControlStatusCtx(ag_ctx.Status.DISABLED):
bool(t)
def testAddShape(self):
with self.cached_session():
a = array_ops.zeros([2, 3])
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual([2, 3], c.shape)
@test_util.run_deprecated_v1
def testUnknownDim(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
b = array_ops.placeholder(dtype=dtypes.float32, shape=[2, None, 3])
c = a + b
self.assertEqual([2, None, 3], c.shape.as_list())
@test_util.run_deprecated_v1
def testUnknownShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=None)
b = array_ops.ones([1, 3])
c = a + b
self.assertEqual(tensor_shape.unknown_shape(), c.shape)
@test_util.run_deprecated_v1
def testScalarShape(self):
with self.cached_session():
a = array_ops.placeholder(dtype=dtypes.float32, shape=[])
b = array_ops.ones([])
c = a + b
self.assertEqual(tensor_shape.TensorShape([]), c.shape)
@test_util.run_deprecated_v1
def testShapeFunctionError(self):
with self.cached_session():
a = array_ops.ones([1, 2, 3])
b = array_ops.ones([4, 5, 6])
with self.assertRaisesRegexp(
ValueError, r"Dimensions must be equal, but are 2 and 5 for .*add"
r".*Add(V2)?.* with input shapes: \[1,2,3\], \[4,5,6\]."):
_ = a + b
def testNumpyArray(self):
with ops.Graph().as_default():
x = array_ops.ones((3, 4), name="test_ones")
with self.assertRaisesRegexp(NotImplementedError,
r"Cannot convert a symbolic.+test_ones"):
np.array(x)
with self.assertRaisesRegexp(TypeError, "not well defined.+test_ones"):
len(x)
# EagerTensors should still behave as numpy arrays.
with context.eager_mode():
x = array_ops.ones((3, 4))
self.assertAllEqual(x, np.ones((3, 4)))
self.assertAllEqual(np.array(x), np.ones((3, 4)))
self.assertEqual(len(x), 3)
def testRef(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x1.experimental_ref())
self.assertEqual(x2.experimental_ref(), x2.experimental_ref())
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
self.assertEqual(y.experimental_ref(), y.experimental_ref())
self.assertEqual(z.experimental_ref(), z.experimental_ref())
self.assertEqual(w.experimental_ref(), w.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), y.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), z.experimental_ref())
self.assertNotEqual(x1.experimental_ref(), w.experimental_ref())
self.assertNotEqual(y.experimental_ref(), z.experimental_ref())
self.assertNotEqual(y.experimental_ref(), w.experimental_ref())
self.assertNotEqual(z.experimental_ref(), w.experimental_ref())
def testRefDeref(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertIs(x1, x1.experimental_ref().deref())
self.assertIs(x2, x2.experimental_ref().deref())
self.assertIs(x1, x2.experimental_ref().deref())
self.assertIs(x2, x1.experimental_ref().deref())
self.assertIs(y, y.experimental_ref().deref())
self.assertIs(z, z.experimental_ref().deref())
self.assertIsNot(x1, y.experimental_ref().deref())
self.assertIsNot(x1, z.experimental_ref().deref())
self.assertIsNot(x1, w.experimental_ref().deref())
self.assertIsNot(y, z.experimental_ref().deref())
self.assertIsNot(y, w.experimental_ref().deref())
self.assertIsNot(z, w.experimental_ref().deref())
def testRefInSet(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_set = {
x1.experimental_ref(),
x2.experimental_ref(),
y.experimental_ref(),
z.experimental_ref(),
w.experimental_ref(),
}
self.assertEqual(len(tensor_set), 4)
self.assertIn(x1.experimental_ref(), tensor_set)
self.assertIn(x2.experimental_ref(), tensor_set)
self.assertIn(y.experimental_ref(), tensor_set)
self.assertIn(z.experimental_ref(), tensor_set)
self.assertIn(w.experimental_ref(), tensor_set)
def testRefInDict(self):
x1 = constant_op.constant(3)
x2 = x1
y = constant_op.constant(3)
z = constant_op.constant([6, 10])
w = variables.Variable(5)
self.assertEqual(x1.experimental_ref(), x2.experimental_ref())
tensor_dict = {
x1.experimental_ref(): "x1",
y.experimental_ref(): "y",
z.experimental_ref(): "z",
w.experimental_ref(): "w",
}
self.assertEqual(len(tensor_dict), 4)
# Overwriting x1
tensor_dict[x2.experimental_ref()] = "x2"
self.assertEqual(len(tensor_dict), 4)
self.assertEqual(tensor_dict[x1.experimental_ref()], "x2")
self.assertEqual(tensor_dict[x2.experimental_ref()], "x2")
self.assertEqual(tensor_dict[y.experimental_ref()], "y")
self.assertEqual(tensor_dict[z.experimental_ref()], "z")
self.assertEqual(tensor_dict[w.experimental_ref()], "w")
def testTensorRefStrong(self):
x = constant_op.constant(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
def testVariableRefStrong(self):
x = variables.Variable(1.)
x_ref = x.experimental_ref()
del x
self.assertIsNotNone(x_ref.deref())
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesTest(test_util.TensorFlowTestCase):
def testToTensor(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = ops.IndexedSlices(values, indices)
with self.assertRaises(ValueError):
tensor = ops.convert_to_tensor(x, name="tensor")
self.assertEqual(tensor_shape.TensorShape(None), x.shape)
dense_shape = constant_op.constant([3, 2])
y = ops.IndexedSlices(values, indices, dense_shape)
tensor = ops.convert_to_tensor(y, name="tensor")
self.assertAllEqual(tensor.shape, y.shape)
self.assertAllEqual(self.evaluate(tensor), [[2, 3], [0, 0], [5, 7]])
@test_util.run_gpu_only
def testEagerCopy(self):
with context.eager_mode():
var = variables.Variable([[0.0], [0.0], [0.0], [0.0]], name="tensor")
with backprop.GradientTape() as tape:
a = array_ops.gather(array_ops.gather(var, [0, 1]), [0, 1])
b = array_ops.gather(array_ops.gather(var, [2, 3]), [0, 1])
r = special_math_ops.einsum("ij,ij->i", a, b)
g = tape.gradient(r, [var])[0]
values = g.values if isinstance(g, ops.IndexedSlices) else g
self.assertAllEqual(values.get_shape(), [4, 1])
def testNegation(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = -ops.IndexedSlices(values, indices)
self.assertAllEqual(x.values, [[-2, -3], [-5, -7]])
self.assertAllEqual(x.indices, [0, 2])
def testScalarMul(self):
values = constant_op.constant([2, 3, 5, 7], shape=[2, 2])
indices = constant_op.constant([0, 2])
x = math_ops.scalar_mul(-2, ops.IndexedSlices(values, indices))
self.assertAllEqual(x.values, [[-4, -6], [-10, -14]])
self.assertAllEqual(x.indices, [0, 2])
@test_util.run_all_in_graph_and_eager_modes
class IndexedSlicesSpecTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
def assertAllTensorsEqual(self, list1, list2):
self.assertLen(list1, len(list2))
for (t1, t2) in zip(list1, list2):
self.assertAllEqual(t1, t2)
def testConstruction(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1._shape.rank, None)
self.assertEqual(spec1._values_dtype, dtypes.float32)
self.assertEqual(spec1._indices_dtype, dtypes.int64)
self.assertEqual(spec1._dense_shape_dtype, None)
self.assertEqual(spec1._indices_shape.as_list(), [None])
spec2 = indexed_slices.IndexedSlicesSpec([None, None], dtypes.string,
dtypes.int32, dtypes.int64, [10])
self.assertEqual(spec2._shape.as_list(), [None, None])
self.assertEqual(spec2._values_dtype, dtypes.string)
self.assertEqual(spec2._indices_dtype, dtypes.int32)
self.assertEqual(spec2._dense_shape_dtype, dtypes.int64)
self.assertEqual(spec2._indices_shape.as_list(), [10])
def testValueType(self):
spec1 = indexed_slices.IndexedSlicesSpec()
self.assertEqual(spec1.value_type, ops.IndexedSlices)
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(shape=[5, None, None]),
(tensor_shape.TensorShape([5, None, None]), dtypes.float32,
dtypes.int64, None, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.int32, dense_shape_dtype=dtypes.int64),
(tensor_shape.TensorShape(None), dtypes.int32, dtypes.int64,
dtypes.int64, tensor_shape.TensorShape([None]))),
(indexed_slices.IndexedSlicesSpec(indices_shape=[100]),
(tensor_shape.TensorShape(None), dtypes.float32, dtypes.int64, None,
tensor_shape.TensorShape([100]))),
]) # pyformat: disable
def testSerialize(self, spec, expected):
serialization = spec._serialize()
# TensorShape has an unconventional definition of equality, so we can't use
# assertEqual directly here. But repr() is deterministic and lossless for
# the expected values, so we can use that instead.
self.assertEqual(repr(serialization), repr(expected))
@parameterized.parameters([
(indexed_slices.IndexedSlicesSpec(dtype=dtypes.string), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
)),
(indexed_slices.IndexedSlicesSpec(
dtype=dtypes.string, dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec(None, dtypes.string),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([None], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32), (
tensor_spec.TensorSpec([None, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
(indexed_slices.IndexedSlicesSpec(
shape=[5, 10, 15], dense_shape_dtype=dtypes.int32,
indices_shape=[20]), (
tensor_spec.TensorSpec([20, 10, 15], dtypes.float32),
tensor_spec.TensorSpec([20], dtypes.int64),
tensor_spec.TensorSpec([3], dtypes.int32),
)),
])
def testComponentSpecs(self, spec, expected):
self.assertEqual(spec._component_specs, expected)
@parameterized.parameters([
{
"spec": indexed_slices.IndexedSlicesSpec(),
"values": [3.0, 5.0],
"indices": [5, 10]
},
{
"spec":
indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32),
"values": [3.0, 5.0],
"indices": [5, 10],
"dense_shape": [100]
},
])
def testToFromComponents(self, spec, indices, values, dense_shape=None):
x = ops.IndexedSlices(indices, values, dense_shape)
actual_components = spec._to_components(x)
if dense_shape is None:
self.assertAllTensorsEqual(actual_components, [indices, values])
else:
self.assertAllTensorsEqual(actual_components,
[indices, values, dense_shape])
st_reconstructed = spec._from_components(actual_components)
self.assertAllEqual(x.indices, st_reconstructed.indices)
self.assertAllEqual(x.values, st_reconstructed.values)
if dense_shape is None:
self.assertIs(st_reconstructed.dense_shape, None)
else:
self.assertAllEqual(x.dense_shape, st_reconstructed.dense_shape)
@test_util.run_v1_only("IndexedSlicesValue is deprecated in v2")
def testFromNumpyComponents(self):
indices = np.array([3, 8])
values = np.array([1.0, 9.0])
dense_shape = np.array([100])
spec1 = indexed_slices.IndexedSlicesSpec(dense_shape_dtype=dtypes.int32)
st1 = spec1._from_components((values, indices, dense_shape))
self.assertIsInstance(st1, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st1.indices, indices)
self.assertAllEqual(st1.values, values)
self.assertAllEqual(st1.dense_shape, dense_shape)
spec2 = indexed_slices.IndexedSlicesSpec()
st2 = spec2._from_components((values, indices))
self.assertIsInstance(st2, indexed_slices.IndexedSlicesValue)
self.assertAllEqual(st2.indices, indices)
self.assertAllEqual(st2.values, values)
self.assertIs(st2.dense_shape, None)
class NodeDefConstructorTest(test_util.TensorFlowTestCase):
def testNoArgs(self):
nodedef = ops._NodeDef("None", "bar")
self.assertProtoEquals("op: 'None' name: 'bar'", nodedef)
def _apply_op(g, *args, **kwargs):
op = g.create_op(*args, **kwargs)
if len(op.outputs) == 1:
return op.outputs[0]
else:
return op.outputs
class OperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t._value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t._value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation(ops._NodeDef("None", "myop"), ops.Graph(), [], [])
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation(ops._NodeDef("None", "op2"), ops.Graph(), [], [])
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation(
ops._NodeDef("RefOutputFloatOutput", "op1"), g, [],
[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEquals([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation(
ops._NodeDef("RefInputFloatInput", "op2"),
g, [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32])
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEquals([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], [])
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()))
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = control_flow_ops.no_op()
with self.assertRaisesRegexp(TypeError,
r"Can't convert Operation '.*' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation(node_def, ops.Graph(), [], [dtypes.float32])
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32])
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEquals will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegexp(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [y, y])
self.assertEquals(x.consumers(), [])
self.assertEquals(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEquals(list(z.op.inputs), [x, y])
self.assertEquals(x.consumers(), [z.op])
self.assertEquals(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEquals(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegexp(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."
):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
@eager_function.defun
def test():
output = control_flow_ops.while_loop(lambda x: x < 3, lambda x: x + 1,
[1])
while_op = output.op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T", [t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertEqual(len(while_op.inputs), orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertEqual(len(x.op.op_def.input_arg), 0)
self.assertEqual(len(x.op.op_def.output_arg), 1)
self.assertRegexpMatches(z.op.op_def.name, "Add(V2)?")
self.assertEqual(len(z.op.op_def.input_arg), 2)
self.assertEqual(len(z.op.op_def.output_arg), 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegexp(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegexp(
AttributeError, "'tuple' object has no attribute 'append'"):
op.inputs.append(None)
class CreateOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
op1 = g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
with g.device("/device:GPU:0"):
op2 = g.create_op(
"FloatOutputStringOutput", [], [dtypes.float32, dtypes.string], None,
name="myop2")
op3 = g.create_op(
"Foo3",
[list(op1.values())[0], list(op2.values())[1], list(op2.values())[0]],
[dtypes.float32, dtypes.int32],
None,
name="myop3")
self.assertDeviceEqual(None, op1.device)
self.assertDeviceEqual("/device:GPU:0", op2.device)
self.assertDeviceEqual(None, op3.device)
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", op1.node_def)
self.assertProtoEquals(
"name:'myop2' op:'FloatOutputStringOutput' device:'/device:GPU:0'",
op2.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo3'",
op3.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = g.create_op(
"RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = g.create_op(
"RefInputFloatInput", [ref_t, nonref_t], [],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
op3 = g.create_op("TwoFloatInputs", [ref_t, nonref_t], [], name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testFinalized(self):
g = ops.Graph()
g.finalize()
with self.assertRaises(RuntimeError):
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# Test unfinalize.
g._unsafe_unfinalize()
g.create_op("FloatOutput", [], [dtypes.float32], None, name="myop1")
# NOTE(skyewm): these cases test the private Graph._create_op_from_tf_operation
# method. Arguably we should only test the public APIs that depend on this
# method. However, this logic is complex and tricky, and it can be difficult to
# ascertain if we have adequate coverage (e.g. a graph may run successfully if
# the control flow context isn't set properly, but a more complicated use case
# that might not be obvious to test will fail). Thus we instead explicitly test
# the low-level behavior.
class CreateOpFromTFOperationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c_op = ops._create_c_op(
g, ops._NodeDef("IntInputIntOutput", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "IntInputIntOutput")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.unknown_shape())
self.assertEqual(list(op.inputs), [x])
self.assertEqual(op.control_inputs, [])
self.assertEqual(op.graph, g)
self.assertEqual(x.consumers(), [op])
self.assertIsNotNone(op.traceback)
self.assertEqual(g.get_operation_by_name("myop"), op)
self.assertEqual(g.get_tensor_by_name("myop:0"), op.outputs[0])
def testShape(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
c_op = ops._create_c_op(g, ops._NodeDef("Identity", "myop"), [x], [])
op = g._create_op_from_tf_operation(c_op)
self.assertEqual(op.name, "myop")
self.assertEqual(op.type, "Identity")
self.assertEqual(len(op.outputs), 1)
self.assertEqual(op.outputs[0].shape, tensor_shape.TensorShape([2, 3]))
def testUniqueName(self):
g = ops.Graph()
with g.as_default():
c_op = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop"), [], [])
c_op2 = ops._create_c_op(g, ops._NodeDef("IntOutput", "myop_1"), [], [])
op = g._create_op_from_tf_operation(c_op)
op2 = g._create_op_from_tf_operation(c_op2)
# Create ops with same names as op1 and op2. We expect the new names to be
# uniquified.
op3 = test_ops.int_output(name="myop").op
op4 = test_ops.int_output(name="myop_1").op
self.assertEqual(op.name, "myop")
self.assertEqual(op2.name, "myop_1")
self.assertEqual(op3.name, "myop_2")
self.assertEqual(op4.name, "myop_1_1")
@test_util.run_v1_only("b/120545219")
def testCond(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def true_fn():
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "cond/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return x
control_flow_ops.cond(x < 10, true_fn, lambda: x)
op = g.get_operation_by_name("cond/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "cond/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Switch")
self.assertEqual(op_input.inputs[0], x)
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"cond/cond_text")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
self.assertEqual(op.name, "myloop/myop")
self.assertEqual(op.type, "IntInput")
self.assertEqual(op.outputs, [])
op_input = op.inputs[0].op
self.assertEqual(op_input.type, "Enter")
self.assertEqual(list(op_input.inputs), [x])
self.assertEqual(op.graph, g)
# pylint: disable=protected-access
self.assertIsNotNone(op._get_control_flow_context())
self.assertEqual(op._get_control_flow_context().name,
"myloop/while_context")
# pylint: enable=protected-access
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithInternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
def body(i):
c = constant_op.constant(1.0, name="c")
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
c = g.get_operation_by_name("myloop/c")
self.assertIsNotNone(c)
# Internal control dep is preserved
self.assertEqual(op.control_inputs, [c])
@test_util.run_v1_only("b/120545219")
def testWhileLoopWithExternalControlDep(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
c = constant_op.constant(1.0)
def body(i):
ops._create_c_op(ops.get_default_graph(),
ops._NodeDef("IntInput", "myloop/myop"), [x], [])
with ops.control_dependencies([c]):
new_ops = g._add_new_tf_operations()
self.assertEqual(len(new_ops), 1)
return i
control_flow_ops.while_loop(lambda i: i < 10, body, [0], name="myloop")
op = g.get_operation_by_name("myloop/myop")
self.assertIsNotNone(op)
# External control dep is removed and replaced with internal control dep
self.assertNotEqual(op.control_inputs[0], c.op)
self.assertIsNotNone(op.control_inputs[0]._get_control_flow_context())
class ApplyOpTest(test_util.TensorFlowTestCase):
def testNodeDefArgs(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
with g.device("/device:GPU:0"):
t2 = _apply_op(
g, "TwoIntOutputs", [], [dtypes.int32, dtypes.int32], name="myop2")
t3 = _apply_op(
g,
"Foo1", [t1, t2[1], t2[0]], [dtypes.float32, dtypes.int32],
name="myop3")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, list))
self.assertTrue(isinstance(t3, list))
self.assertTrue(isinstance(t3[0], ops.Tensor))
self.assertEqual("myop1", t1._as_node_def_input())
self.assertEqual("myop2", t2[0]._as_node_def_input())
self.assertEqual("myop2:1", t2[1]._as_node_def_input())
self.assertEqual("myop3", t3[0]._as_node_def_input())
# Validate that we got the right ops as well
self.assertProtoEquals("name:'myop1' op:'FloatOutput'", t1.op.node_def)
self.assertProtoEquals(
"name:'myop2' op:'TwoIntOutputs' device:'/device:GPU:0'",
t2[0].op.node_def)
self.assertProtoEquals(
"name:'myop3' input:'myop1' input:'myop2:1' input:'myop2' op:'Foo1'",
t3[0].op.node_def)
def testReferenceInput(self):
g = ops.Graph()
ref_t, nonref_t = _apply_op(
g, "RefOutputFloatOutput", [], [dtypes.float32_ref, dtypes.float32],
name="op1")
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'",
ref_t.op.node_def)
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
out_2 = _apply_op(
g,
"RefInputFloatInputIntOutput", [ref_t, nonref_t], [dtypes.int32],
input_types=[dtypes.float32_ref, dtypes.float32],
name="op2")
self.assertProtoEquals(
"op:'RefInputFloatInputIntOutput' name:'op2' input:'op1' input:'op1:1'",
out_2.op.node_def)
out_3 = _apply_op(
g, "TwoFloatInputsIntOutput", [ref_t, nonref_t], [dtypes.int32],
name="op3")
self.assertProtoEquals(
"op:'TwoFloatInputsIntOutput' name:'op3' input:'op1' input:'op1:1'",
out_3.op.node_def)
class NameStackTest(test_util.TensorFlowTestCase):
def testBasics(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_2", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_2", g.unique_name("foo"))
self.assertEqual("foo_1_1", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_1", g.unique_name("foo_1"))
self.assertEqual("foo_1_2", g.unique_name("foo_1", mark_as_used=False))
self.assertEqual("foo_1_2", g.unique_name("foo_1"))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2", mark_as_used=False))
self.assertEqual("foo_1_2_1", g.unique_name("foo_1_2"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo", g.unique_name("foo"))
self.assertEqual("bar/foo_1", g.unique_name("foo", mark_as_used=False))
self.assertEqual("bar/foo_1", g.unique_name("foo"))
with g.name_scope(None):
self.assertEqual("foo_3", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_3", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz/foo_1", g.unique_name("foo"))
with g.name_scope("baz"):
self.assertEqual(
"bar/baz_1/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo", g.unique_name("foo"))
self.assertEqual(
"bar/baz_1/foo_1", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar/baz_1/foo_1", g.unique_name("foo"))
with g.name_scope("quux"):
self.assertEqual("quux/foo", g.unique_name("foo", mark_as_used=False))
self.assertEqual("quux/foo", g.unique_name("foo"))
with g.name_scope("bar"):
with g.name_scope("baz"):
self.assertEqual(
"bar_1/baz/foo", g.unique_name(
"foo", mark_as_used=False))
self.assertEqual("bar_1/baz/foo", g.unique_name("foo"))
self.assertEqual("foo_4", g.unique_name("foo", mark_as_used=False))
self.assertEqual("foo_4", g.unique_name("foo"))
self.assertEqual("bar_2", g.unique_name("bar", mark_as_used=False))
self.assertEqual("bar_2", g.unique_name("bar"))
@test_util.run_deprecated_v1
def testNameAndVariableScope(self):
with self.cached_session() as sess:
with sess.graph.name_scope("l0"):
with variable_scope.variable_scope("l1"):
with sess.graph.name_scope("l1") as scope:
self.assertEqual("l0/l1/l1/", scope)
self.assertEqual(
"l0/l1/l1/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l1/foo", sess.graph.unique_name("foo"))
with sess.graph.name_scope("l2") as scope:
self.assertEqual("l0/l1/l2/", scope)
self.assertEqual(
"l0/l1/l2/foo",
sess.graph.unique_name(
"foo", mark_as_used=False))
self.assertEqual("l0/l1/l2/foo", sess.graph.unique_name("foo"))
def testOutOfOrderUniqueName(self):
g = ops.Graph()
self.assertEqual("foo_2", g.unique_name("foo_2"))
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("foo_1", g.unique_name("foo"))
self.assertEqual("foo_3", g.unique_name("foo"))
def testUniqueNameCaseInsensitivity(self):
g = ops.Graph()
self.assertEqual("foo", g.unique_name("foo"))
self.assertEqual("Foo_1", g.unique_name("Foo"))
with g.name_scope("bar"):
self.assertEqual("bar/foo", g.unique_name("foo"))
with g.name_scope("Bar"):
self.assertEqual("Bar_1/foo", g.unique_name("foo"))
def testInvalidNameRaisesError(self):
g = ops.Graph()
with g.name_scope(""): # Should not raise
pass
with g.name_scope("foo/"): # Should not raise
with g.name_scope("_bar"): # Should not raise
pass
with self.assertRaises(ValueError):
with g.name_scope("foo:0"):
pass
with self.assertRaises(ValueError):
with g.name_scope("_bar"):
pass
class NameTest(test_util.TensorFlowTestCase):
def testGenerateName(self):
g = ops.Graph()
op0 = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
self.assertEqual("TwoFloatOutputs", op0.name)
self.assertEqual("TwoFloatOutputs:0", op0.outputs[0].name)
self.assertEqual("TwoFloatOutputs:1", op0.outputs[1].name)
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput", op1.name)
self.assertEqual("FloatOutput:0", op1.outputs[0].name)
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertEqual("FloatOutput_1", op2.name)
self.assertEqual("FloatOutput_1:0", op2.outputs[0].name)
op3 = g.create_op("FloatOutput", [], [dtypes.float32], name="my_op")
self.assertEqual("my_op", op3.name)
self.assertEqual("my_op:0", op3.outputs[0].name)
def testNameScope(self):
g = ops.Graph()
with g.name_scope("foo") as foo:
self.assertEqual("foo/", foo)
with g.name_scope("foo2") as foo2:
self.assertEqual("foo/foo2/", foo2)
with g.name_scope(None) as empty1:
self.assertEqual("", empty1)
with g.name_scope("foo3") as foo3:
self.assertEqual("foo3/", foo3)
with g.name_scope("") as empty2:
self.assertEqual("", empty2)
self.assertEqual("FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar") as scope:
self.assertEqual("bar/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual("bar/FloatOutput_1",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from "with .. as", that values is used as-is.
self.assertEqual(
"bar", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
with g.name_scope("baz") as scope:
with g.name_scope("quux"):
self.assertEqual("baz/quux/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
# If you use the value from the enclosing "with .. as", nothing is pushed.
with g.name_scope(scope):
self.assertEqual("baz/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
self.assertEqual(
"baz", g.create_op(
"FloatOutput", [], [dtypes.float32], name=scope).name)
self.assertEqual(
"trailing",
g.create_op(
"FloatOutput", [], [dtypes.float32], name="trailing/").name)
with g.name_scope("bar"):
self.assertEqual("bar_1/FloatOutput",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
with g.name_scope("bar/"):
self.assertEqual("bar/FloatOutput_2",
g.create_op("FloatOutput", [], [dtypes.float32]).name)
class DeviceTest(test_util.TensorFlowTestCase):
def testNoDevice(self):
g = ops.Graph()
op = g.create_op("FloatOutput", [], [dtypes.float32])
self.assertDeviceEqual(None, op.device)
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput" }
""", gd)
def testEagerBackingDevice(self):
with context.eager_mode():
with ops.device("/device:CPU:0"):
t = constant_op.constant(1.0)
self.assertRegexpMatches(t.device, "/device:CPU:0")
self.assertRegexpMatches(t.backing_device, "/device:CPU:0")
def testDevicePartialString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testDeviceFull(self):
g = ops.Graph()
with g.device(
pydev.DeviceSpec(
job="worker", replica=2, task=0, device_type="CPU",
device_index=3)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/task:0/device:CPU:3" }
""", gd)
def testNesting(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingString(self):
g = ops.Graph()
with g.device("/job:worker/replica:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:3/task:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:3/task:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testNestingOverrideGpuCpu(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker/replica:2/device:GPU:2"):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:2/device:GPU:2" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNestingWithMergeDeviceFunction(self):
g = ops.Graph()
with g.device(pydev.merge_device("/device:GPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:worker")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/device:CPU:0")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device(None)):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStrings(self):
g = ops.Graph()
with g.device("/device:GPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:worker"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:0"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(""):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/device:GPU:0" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/device:CPU:0" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps/device:CPU:0" }
""", gd)
def testNestingWithDeviceStringWildcard(self):
g = ops.Graph()
with g.device("/device:GPU:7"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:GPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:*"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/device:CPU:5"):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/device:GPU:7" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/device:CPU:*" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/device:CPU:5" }
""", gd)
def testNestingErrorGraph(self):
g = ops.Graph()
scope = g.device("/device:GPU:8")
scope.__enter__()
with g.device("/device:GPU:9"):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNestingErrorEager(self):
with context.eager_mode():
scope = ops.device("/device:CPU:0")
scope.__enter__()
with ops.device(None):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
def testNoneClearsDefault(self):
g = ops.Graph()
with g.device("/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def testNoneIgnoresOuterDeviceFunction(self):
g = ops.Graph()
with g.device(lambda op: "/job:worker/replica:2/device:CPU:1"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None):
g.create_op("FloatOutput", [], [dtypes.float32])
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
node { name: "FloatOutput_1" op: "FloatOutput" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2/device:CPU:1" }
""", gd)
def _overwritingDeviceFunction(self, unused_op):
# This device function unconditionally overwrites the device of ops.
#
# NOTE(mrry): Writing device functions like this is not
# recommended. Instead, in most cases you should use
# `pydev.merge_device("/job:ps")` or simply `"/job:ps"` as the
# argument to `tf.device()` and the device component will be merged in.
return "/job:overwrite"
def testOverwritingBehavior(self):
g = ops.Graph()
with g.device(self._overwritingDeviceFunction):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device("/job:ps"): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(pydev.merge_device("/job:ps")): # Will be overwritten.
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device("/job:ps"):
g.create_op("FloatOutput", [], [dtypes.float32])
with g.device(None): # Disables overwriting device function
with g.device(pydev.merge_device("/job:ps")):
g.create_op("FloatOutput", [], [dtypes.float32])
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:overwrite" }
node { name: "FloatOutput_3" op: "FloatOutput"
device: "/job:ps" }
node { name: "FloatOutput_4" op: "FloatOutput"
device: "/job:ps" }
""", gd)
class MultithreadedGraphStateTest(test_util.TensorFlowTestCase):
class TestThread(threading.Thread):
def __init__(self, graph, replica_id):
super(MultithreadedGraphStateTest.TestThread, self).__init__()
self._graph = graph
self._replica_id = replica_id
# This thread sets this event when it mutated the graph. The caller can
# wait for that.
self.has_mutated_graph = threading.Event()
# This thread waits for when it should continue. The caller can set this
# event.
self.should_continue = threading.Event()
def run(self):
# Mutate a graph's stack, then set `has_mutated_graph`, then wait for
# `should_continue`, then add an op to the graph affected by the graph's
# stack.
raise NotImplementedError("must be implemented in descendants")
def testDeviceFunctionStack(self):
class DeviceSettingThread(self.TestThread):
def run(self):
with g.device("/job:worker/replica:{}".format(self._replica_id)):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
# If `switch_to_thread` isn't called, then device placement of the ops
# below is not deterministic.
g.switch_to_thread_local()
threads = [DeviceSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
""", gd)
def testColocateWith(self):
class ColocatingThread(self.TestThread):
def __init__(self, graph, replica_id, op_to_colocate_with):
super(ColocatingThread, self).__init__(graph, replica_id)
self._op_to_colocate_with = op_to_colocate_with
def run(self):
with g.colocate_with(self._op_to_colocate_with):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
ops_to_colocate_with = []
for i in range(3):
with g.device("/job:worker/replica:{}".format(i)):
ops_to_colocate_with.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `device` and `attr` values for
# the ops below are not deterministic.
g.switch_to_thread_local()
threads = [
ColocatingThread(g, i, ops_to_colocate_with[i]) for i in range(3)
]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput"
device: "/job:worker/replica:0" }
node { name: "ColocateWithMe_1" op: "FloatOutput"
device: "/job:worker/replica:1" }
node { name: "ColocateWithMe_2" op: "FloatOutput"
device: "/job:worker/replica:2" }
node { name: "FloatOutput_0" op: "FloatOutput"
device: "/job:worker/replica:0"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_0"}}}}
node { name: "FloatOutput_1" op: "FloatOutput"
device: "/job:worker/replica:1"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_1"}}}}
node { name: "FloatOutput_2" op: "FloatOutput"
device: "/job:worker/replica:2"
attr { key: "_class"
value { list {
s: "loc:@ColocateWithMe_2"}}}}
""", gd)
def testControlDependencies(self):
class DependingThread(self.TestThread):
def __init__(self, graph, replica_id, dependency_op):
super(DependingThread, self).__init__(graph, replica_id)
self._dependency_op = dependency_op
def run(self):
with g.control_dependencies([self._dependency_op]):
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="FloatOutput_{}".format(self._replica_id))
g = ops.Graph()
dependency_ops = []
for i in range(3):
dependency_ops.append(
g.create_op(
"FloatOutput", [], [dtypes.float32],
name="ColocateWithMe_{}".format(i)))
# If `switch_to_thread` isn't called, then `input` values for the ops below
# are not deterministic.
g.switch_to_thread_local()
threads = [DependingThread(g, i, dependency_ops[i]) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
gd = g.as_graph_def()
self.assertProtoEqualsVersion("""
node { name: "ColocateWithMe_0" op: "FloatOutput" }
node { name: "ColocateWithMe_1" op: "FloatOutput" }
node { name: "ColocateWithMe_2" op: "FloatOutput" }
node { name: "FloatOutput_0" op: "FloatOutput"
input: "^ColocateWithMe_0" }
node { name: "FloatOutput_1" op: "FloatOutput"
input: "^ColocateWithMe_1" }
node { name: "FloatOutput_2" op: "FloatOutput"
input: "^ColocateWithMe_2" }
""", gd)
def testNameStack(self):
class NameSettingThread(self.TestThread):
def run(self):
with g.name_scope("foo"):
op1 = g.create_op("FloatOutput", [], [dtypes.float32])
self.has_mutated_graph.set()
self.should_continue.wait()
self.should_continue.clear()
op2 = g.create_op("FloatOutput", [], [dtypes.float32])
self.result = (op1, op2)
g = ops.Graph()
threads = [NameSettingThread(g, i) for i in range(3)]
for t in threads:
t.start()
t.has_mutated_graph.wait()
t.has_mutated_graph.clear()
for t in threads:
t.should_continue.set()
t.join()
suffixes = ["", "_1", "_2"]
for t, s in zip(threads, suffixes):
self.assertEquals("foo" + s + "/FloatOutput", t.result[0].name)
self.assertEquals("foo" + s + "/FloatOutput_1", t.result[1].name)
class ObjectWithName(object):
def __init__(self, name):
self._name = name
@property
def name(self):
return self._name
class CollectionTest(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
def test_defun(self):
with context.eager_mode():
@eager_function.defun
def defun():
ops.add_to_collection("int", 1)
ops.add_to_collection("tensor", constant_op.constant(2))
@eager_function.defun
def inner_defun():
self.assertEqual(ops.get_collection("int"), [1])
three = ops.get_collection("tensor")[0] + ops.get_collection("int")[0]
ops.add_to_collection("int", 2)
self.assertEqual(ops.get_collection("int"), [1, 2])
ops.add_to_collection("foo", "bar")
self.assertEqual(ops.get_collection("foo"), ["bar"])
return three
self.assertEqual(ops.get_collection("int"), [1])
three = inner_defun()
self.assertEqual(ops.get_collection("int"), [1])
self.assertEqual(ops.get_collection("foo"), [])
return three
three = defun()
self.assertEqual(three.numpy(), 3)
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
class RegistrationTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testRegisterGradients(self):
x = test_ops.float_output()
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyGrad, fn)
def testOverrideGradients(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "copy_override"}):
y = test_ops.copy_op(x)
fn = ops.get_gradient_function(y.op)
self.assertEqual(_CopyOverrideGrad, fn)
def testNonExistentOverride(self):
g = ops.Graph()
with g.as_default():
x = test_ops.float_output()
with g.gradient_override_map({"CopyOp": "unknown_override"}):
y = test_ops.copy_op(x)
with self.assertRaisesRegexp(LookupError, "unknown_override"):
ops.get_gradient_function(y.op)
class ComparisonTest(test_util.TensorFlowTestCase):
def testMembershipAllowed(self):
g = ops.Graph()
t1 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop1")
t2 = _apply_op(g, "FloatOutput", [], [dtypes.float32], name="myop2")
self.assertTrue(isinstance(t1, ops.Tensor))
self.assertTrue(isinstance(t2, ops.Tensor))
self.assertTrue(t1 in [t1])
self.assertTrue(t1 not in [t2])
class ControlDependenciesTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
g = ops.Graph()
with g.as_default():
# Creating unregistered ops with _apply_op() doesn't work with the C API
# TODO(skyewm): address this more consistently. Possible solutions are
# to use registered ops in all tests, create a way to register ops in
# Python tests, or conditionally disable the op registration check in
# the C API.
a = constant_op.constant(1.0)
b = constant_op.constant(1.0)
with g.control_dependencies([a]):
c = constant_op.constant(1.0)
d = array_ops.identity(b)
e = array_ops.identity(c)
self.assertEqual(c.op.control_inputs, [a.op])
self.assertEqual(d.op.control_inputs, [a.op])
# e should be dominated by c.
self.assertEqual(e.op.control_inputs, [])
@test_util.run_in_graph_and_eager_modes
def testEager(self):
def future():
future.calls += 1
return constant_op.constant(2.0)
future.calls = 0
if context.executing_eagerly():
a = constant_op.constant(1.0)
b = future
with ops.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(future.calls, 1)
else:
g = ops.Graph()
with g.as_default():
a = constant_op.constant(1.0)
b = future()
with g.control_dependencies([a, b]):
c = constant_op.constant(3.0)
self.assertEqual(c.op.control_inputs, [a.op, b.op])
self.assertEqual(future.calls, 1)
def testBasicWithConversion(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
class ConvertibleObj(object):
def _as_graph_element(self):
return a
with g.control_dependencies([ConvertibleObj()]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(c.op.control_inputs, [a.op])
def testNested(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1, a_2, a_3, a_4]):
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
b_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_1.op, a_2.op, a_3.op, a_4.op],
b_1.op.control_inputs)
self.assertItemsEqual(b_1.op.control_inputs, b_2.op.control_inputs)
def testClear(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with g.control_dependencies(None):
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies(None):
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testComplex(self):
g = ops.Graph()
# Usage pattern:
# * Nodes a_i are constants defined at the outermost scope, and are used
# as control inputs for the ith nested scope.
# * Nodes b_i are defined as Mul(a_3, a_4) at each scope.
# * Nodes c_i are defined as Mul(a_1, b_1) at each scope.
# * Nodes d_i are defined as Mul(b_i, c_i) at each scope.
# * Nodes e_i are defined as Mul(e_i-1, e_i-1) at each scope i > 1.
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
b_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_1 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_1, c_1],
[dtypes.float32])
e_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_2]):
b_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_2, c_2],
[dtypes.float32])
e_2 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_1, e_1],
[dtypes.float32])
with g.control_dependencies([a_3]):
b_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_3, c_3],
[dtypes.float32])
e_3 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_2, e_2],
[dtypes.float32])
with g.control_dependencies([a_4]):
b_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_3, a_4],
[dtypes.float32])
c_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [a_1, b_1],
[dtypes.float32])
d_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [b_4, c_4],
[dtypes.float32])
e_4 = _apply_op(g, "TwoFloatInputsFloatOutput", [e_3, e_3],
[dtypes.float32])
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_2.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_3.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_4.op.control_inputs)
self.assertItemsEqual([], c_1.op.control_inputs)
self.assertItemsEqual([a_2.op], c_2.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op], c_3.op.control_inputs)
self.assertItemsEqual([a_2.op, a_3.op, a_4.op], c_4.op.control_inputs)
self.assertItemsEqual([], d_1.op.control_inputs)
self.assertItemsEqual([], d_2.op.control_inputs)
self.assertItemsEqual([], d_3.op.control_inputs)
self.assertItemsEqual([], d_4.op.control_inputs)
self.assertItemsEqual([a_1.op], e_1.op.control_inputs)
self.assertItemsEqual([a_2.op], e_2.op.control_inputs)
self.assertItemsEqual([a_3.op], e_3.op.control_inputs)
self.assertItemsEqual([a_4.op], e_4.op.control_inputs)
def testRepeatedDependency(self):
g = ops.Graph()
a = g.create_op("TwoFloatOutputs", [], [dtypes.float32, dtypes.float32])
a_0, a_1 = a.outputs
with g.control_dependencies([a_0]):
b = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a_1]):
c = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [a])
self.assertEqual(c.op.control_inputs, [a])
def testNoControlDependencyWithDataDependency(self):
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.control_dependencies([a]):
b = _apply_op(g, "Identity", [a], [dtypes.float32])
self.assertEqual(b.op.control_inputs, [])
class OpScopeTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testNames(self):
with ops.name_scope("foo", skip_on_eager=False) as foo:
self.assertEqual("foo/", foo)
with ops.name_scope("foo2", skip_on_eager=False) as foo2:
self.assertEqual("foo/foo2/", foo2)
with ops.name_scope(None, skip_on_eager=False) as empty1:
self.assertEqual("", empty1)
with ops.name_scope("foo3", skip_on_eager=False) as foo3:
self.assertEqual("foo3/", foo3)
with ops.name_scope("", skip_on_eager=False) as empty2:
self.assertEqual("", empty2)
with ops.name_scope("foo/", skip_on_eager=False) as outer_foo:
self.assertEqual("foo/", outer_foo)
with ops.name_scope("", skip_on_eager=False) as empty3:
self.assertEqual("", empty3)
with ops.name_scope("foo4", skip_on_eager=False) as foo4:
self.assertEqual("foo/foo4/", foo4)
with ops.name_scope("foo5//", skip_on_eager=False) as foo5:
self.assertEqual("foo5//", foo5)
with ops.name_scope("foo6", skip_on_eager=False) as foo6:
self.assertEqual("foo5//foo6/", foo6)
with ops.name_scope("/", skip_on_eager=False) as foo7:
self.assertEqual("/", foo7)
with ops.name_scope("//", skip_on_eager=False) as foo8:
self.assertEqual("//", foo8)
with ops.name_scope("a//b/c", skip_on_eager=False) as foo9:
self.assertEqual("foo/a//b/c/", foo9)
with ops.name_scope("a//b/c", skip_on_eager=False) as foo10:
self.assertEqual("a//b/c/", foo10)
@test_util.run_in_graph_and_eager_modes
def testEagerDefaultScopeName(self):
with ops.name_scope(None, "default", skip_on_eager=False) as scope:
self.assertEqual(scope, "default/")
with ops.name_scope(None, "default2", skip_on_eager=False) as scope2:
self.assertEqual(scope2, "default/default2/")
@test_util.run_in_graph_and_eager_modes
def testNameScopeV2IsReEntrant(self):
foo = ops.name_scope_v2("foo")
bar = ops.name_scope_v2("bar")
with foo as scope_name:
self.assertEqual("foo/", scope_name)
with foo as scope_name:
self.assertEqual("foo/foo/", scope_name)
with bar as scope_name:
self.assertEqual("foo/bar/", scope_name)
with foo as scope_name:
self.assertEqual("foo/bar/foo/", scope_name)
with bar as scope_name:
self.assertEqual("bar/", scope_name)
@test_util.run_deprecated_v1
def testNoScopeName(self):
g0 = ops.Graph()
values = [
g0.create_op("A", [], [dtypes.float32]),
g0.create_op("B", [], [dtypes.float32])
]
with self.assertRaises(ValueError):
with ops.name_scope(None, values=values):
pass
with self.assertRaises(ValueError):
with ops.name_scope(None, None, values):
pass
@test_util.run_deprecated_v1
def testEmptyScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
with ops.name_scope("", values=[a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope("", "my_default_scope", [a, b]) as scope:
self.assertEqual("", scope)
self.assertEqual(g0, ops.get_default_graph())
@test_util.run_deprecated_v1
def testDefaultScopeName(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
scope_name = "my_scope"
default_scope_name = "my_default_scope"
with ops.name_scope(scope_name, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with ops.name_scope(None, default_scope_name, [a, b]) as scope:
self.assertEqual("%s/" % default_scope_name, scope)
self.assertEqual(g0, ops.get_default_graph())
with self.assertRaises(TypeError):
with ops.name_scope(scope_name, [a, b]):
pass
def _testGraphElements(self, graph_elements):
scope_name = "my_scope"
with ops.name_scope(scope_name, values=graph_elements) as scope:
self.assertEqual("%s/" % scope_name, scope)
self.assertEqual(graph_elements[0].graph, ops.get_default_graph())
g1 = ops.Graph()
a = g1.create_op("A", [], [dtypes.float32])
with self.assertRaises(ValueError):
with ops.name_scope(scope_name, values=graph_elements + [a]):
pass
@test_util.run_deprecated_v1
def testTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, b])
@test_util.run_deprecated_v1
def testSparseTensor(self):
g0 = ops.Graph()
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
sparse = sparse_tensor.SparseTensor(
_apply_op(g0, "Int64Output", [], [dtypes.int64]),
_apply_op(g0, "FloatOutput", [], [dtypes.float32]),
_apply_op(g0, "Int64Output", [], [dtypes.int64]))
self._testGraphElements([a, sparse, b])
@test_util.run_deprecated_v1
def testVariable(self):
g0 = ops.Graph()
with g0.as_default():
variable = variables.Variable([1.0])
a = g0.create_op("A", [], [dtypes.float32])
b = g0.create_op("B", [], [dtypes.float32])
self._testGraphElements([a, variable, b])
class InitScopeTest(test_util.TensorFlowTestCase):
def testClearsControlDependencies(self):
g = ops.Graph()
a_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
a_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with g.as_default():
with g.control_dependencies([a_1]):
with g.control_dependencies([a_2]):
with ops.init_scope():
with g.control_dependencies([a_3]):
with g.control_dependencies([a_4]):
# deps [a_3, a_4]
b_3_4 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps = [a_3]
b_3 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to None
b_none = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1, a_2]
b_1_2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
# deps back to [a_1]
b_1 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
with ops.init_scope():
# deps are None again
b_none2 = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertItemsEqual([a_3.op, a_4.op], b_3_4.op.control_inputs)
self.assertItemsEqual([a_3.op], b_3.op.control_inputs)
self.assertItemsEqual([], b_none.op.control_inputs)
self.assertItemsEqual([a_1.op, a_2.op], b_1_2.op.control_inputs)
self.assertItemsEqual([a_1.op], b_1.op.control_inputs)
self.assertItemsEqual([], b_none2.op.control_inputs)
def testLiftsOpsFromFunctions(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with g2.as_default():
with ops.init_scope():
_ = constant_op.constant(1.0)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
def testPreservesDevices(self):
g0 = ops.Graph()
with g0.as_default(), ops.device("CPU:0"):
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
with g1.as_default():
with ops.device("GPU:0"):
with ops.init_scope():
# init_scope should preserve device set under `g1`.
on_gpu = constant_op.constant(1.0)
self.assertEqual(on_gpu.device, "/device:GPU:0")
still_on_gpu = constant_op.constant(1.0)
self.assertEqual(still_on_gpu.device, "/device:GPU:0")
blank = constant_op.constant(1.0)
self.assertEqual(blank.device, "")
with ops.init_scope():
now_on_cpu = constant_op.constant(1.0)
self.assertEqual(now_on_cpu.device, "/device:CPU:0")
on_cpu = constant_op.constant(1.0)
self.assertEqual(on_cpu.device, "/device:CPU:0")
def testComposes(self):
g0 = ops.Graph()
g1 = ops.Graph()
g1._building_function = True # pylint: disable=protected-access
g2 = ops.Graph()
g2._building_function = True # pylint: disable=protected-access
g3 = ops.Graph()
g3._building_function = False # pylint: disable=protected-access
with g0.as_default():
with g1.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 1)
with g2.as_default():
with ops.init_scope():
# This op should be lifted into g0.
_ = constant_op.constant(1.0)
self.assertIs(g0, ops.get_default_graph())
with g3.as_default():
with ops.init_scope():
# This op should be lifted into g3, because g3 is not building a
# function.
_ = constant_op.constant(1.0)
self.assertIs(g3, ops.get_default_graph())
self.assertEqual(len(g3.get_operations()), 1)
self.assertEqual(len(g2.get_operations()), 0)
self.assertEqual(len(g1.get_operations()), 0)
self.assertEqual(len(g0.get_operations()), 2)
def testEscapesToEagerContext(self):
g = ops.Graph()
g._building_function = True # pylint: disable=protected-access
with context.eager_mode():
with context.graph_mode():
with g.as_default():
with ops.init_scope():
# Because g is building a function, init_scope should
# escape out to the eager context.
self.assertTrue(context.executing_eagerly())
# g should be reinstated as the default graph, and the
# graph context should be re-entered.
self.assertIs(g, ops.get_default_graph())
self.assertFalse(context.executing_eagerly())
def testStaysInEagerWhenOnlyEagerContextActive(self):
with context.eager_mode():
with ops.init_scope():
self.assertTrue(context.eager_mode())
self.assertTrue(context.eager_mode())
def testEscapesDefunWhenInEagerMode(self):
def function_with_variables():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(3)
return self.v.assign_add(1)
with context.eager_mode():
# Each invocation of function_with_variables recreates a variable.
self.assertEqual(4, int(function_with_variables()))
self.assertEqual(4, int(function_with_variables()))
compiled = eager_function.defun(function_with_variables)
# The init_scope in function_with_variables lifts the variable out
# of the graph function constructed by defun; hence,
# compiled now appears to be stateful.
self.assertEqual(4, int(compiled()))
self.assertEqual(5, int(compiled()))
def testEscapesDefunWhenInGraphMode(self):
def function_with_variables(name):
with ops.init_scope():
_ = variable_scope.get_variable(name, shape=(1,))
g = ops.Graph()
with g.as_default():
with self.cached_session():
# First ensure that graphs that are not building functions are
# not escaped.
function_with_variables("foo")
with self.assertRaisesRegexp(ValueError,
r"Variable foo already exists.*"):
# This will fail because reuse is not set to True.
function_with_variables("foo")
compiled = eager_function.defun(function_with_variables)
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
# The second call to `compiled` should not create variables: the
# init_scope has lifted the variable creation code out of the defun.
compiled("bar")
self.assertEqual(
len(ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)), 2)
def testEscapesNestedDefun(self):
def inner_function():
with ops.init_scope():
self.v = resource_variable_ops.ResourceVariable(1)
return self.v.assign_add(2)
def outer_function(inner=None):
with ops.init_scope():
self.v0 = resource_variable_ops.ResourceVariable(0)
return self.v0.assign_add(1) + inner()
with context.eager_mode():
# Each invocation of outer_function recreates variables.
self.assertEqual(4, int(outer_function(inner=inner_function)))
self.assertEqual(4, int(outer_function(inner=inner_function)))
compiled_inner = eager_function.defun(inner_function)
compiled_outer = eager_function.defun(outer_function)
# The init_scope lifts variables out of the graph functions
# constructed by defun; hence, compiled_outer should now appear to be
# stateful.
self.assertEqual(4, int(compiled_outer(inner=compiled_inner)))
self.assertEqual(7, int(compiled_outer(inner=compiled_inner)))
@test_util.run_v1_only("b/120545219")
def testFallsBackToGlobalGraphWhenAllGraphsAreBuildingFunctions(self):
with context.graph_mode():
ops.reset_default_graph()
# This doesn't push anything onto the graph stack, but it does
# set the stack's global graph.
global_graph = ops.get_default_graph()
fn_graph = ops.Graph()
# pylint: disable=protected-access
fn_graph._building_function = True
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with fn_graph.as_default():
self.assertEqual(len(ops._default_graph_stack.stack), 1)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 1)
dummy = constant_op.constant(1.0)
self.assertEqual(len(ops._default_graph_stack.stack), 1)
# Note that the global graph is _not_ on the graph stack.
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# Ensure that `dummy` was added to the global graph.
self.assertEqual(global_graph, dummy.graph)
# pylint: enable=protected-access
def testInstallsDefaultGraphWhenGraphStackIsEmptyInGraphMode(self):
with context.graph_mode():
# pylint: disable=protected-access
self.assertEqual(len(ops._default_graph_stack.stack), 0)
with ops.init_scope():
self.assertGreater(len(ops._default_graph_stack.stack), 0)
self.assertEqual(len(ops._default_graph_stack.stack), 0)
# pylint: enable=protected-access
def testPreservesNameScopeInGraphConstruction(self):
with ops.Graph().as_default():
function_graph = ops.Graph()
with function_graph.as_default():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
self.assertEqual(ops.get_name_scope(), "inner")
self.assertEqual(ops.get_name_scope(), "")
def testEnteringGraphFromEagerIsSticky(self):
with context.eager_mode():
g = ops.Graph()
with g.as_default():
with ops.init_scope():
self.assertFalse(context.executing_eagerly())
self.assertEqual(g, ops.get_default_graph())
def testMixGraphEager(self):
with context.eager_mode():
c = constant_op.constant(1.0)
with ops.Graph().as_default():
with self.assertRaisesRegexp(
RuntimeError, "Attempting to capture an EagerTensor"):
math_ops.add(c, c)
c2 = constant_op.constant(2.0)
with self.assertRaisesRegexp(
TypeError, "Graph tensors"):
math_ops.add(c2, c2)
def testPreservesNameScopeInEagerExecution(self):
with context.eager_mode():
def foo():
with ops.name_scope("inner", skip_on_eager=False), ops.init_scope():
if context.executing_eagerly():
# A trailing slash is always appended when eager execution is
# enabled.
self.assertEqual(context.context().scope_name, "inner/")
else:
self.assertEqual(ops.get_name_scope(), "inner")
foo()
self.assertEqual(ops.get_name_scope(), "")
foo_compiled = eager_function.defun(foo)
foo_compiled()
self.assertEqual(ops.get_name_scope(), "")
def testExecutingEagerlyOutsideFunctions(self):
@def_function.function
def f():
return ops.executing_eagerly_outside_functions()
with context.graph_mode():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
# Need self.evaluate for these as the return type of functions is
# tensors.
self.assertFalse(self.evaluate(f()))
with context.eager_mode():
self.assertTrue(ops.executing_eagerly_outside_functions())
self.assertTrue(f())
with ops.Graph().as_default():
self.assertFalse(ops.executing_eagerly_outside_functions())
with session.Session():
self.assertFalse(self.evaluate(f()))
class GraphTest(test_util.TensorFlowTestCase):
def setUp(self):
ops.reset_default_graph()
def _AssertDefault(self, expected):
self.assertIs(expected, ops.get_default_graph())
def testResetDefaultGraphNesting(self):
g0 = ops.Graph()
with self.assertRaises(AssertionError):
with g0.as_default():
ops.reset_default_graph()
def testGraphContextManagerCancelsEager(self):
with context.eager_mode():
with ops.Graph().as_default():
self.assertFalse(context.executing_eagerly())
def testGraphContextManager(self):
g0 = ops.Graph()
with g0.as_default() as g1:
self.assertIs(g0, g1)
def testDefaultGraph(self):
orig = ops.get_default_graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
g0 = ops.Graph()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
context_manager_0 = g0.as_default()
self.assertFalse(ops.has_default_graph())
self._AssertDefault(orig)
with context_manager_0 as g0:
self._AssertDefault(g0)
with ops.Graph().as_default() as g1:
self.assertTrue(ops.has_default_graph())
self._AssertDefault(g1)
self._AssertDefault(g0)
self._AssertDefault(orig)
self.assertFalse(ops.has_default_graph())
def testPreventFeeding(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_feedable(a))
g.prevent_feeding(a)
self.assertFalse(g.is_feedable(a))
@test_util.run_deprecated_v1
def testPreventFetching(self):
g = ops.Graph()
a = constant_op.constant(2.0)
self.assertTrue(g.is_fetchable(a))
g.prevent_fetching(a.op)
self.assertFalse(g.is_fetchable(a))
def testAsGraphElementConversions(self):
class ConvertibleObj(object):
def _as_graph_element(self):
return "FloatOutput:0"
class NonConvertibleObj(object):
pass
g = ops.Graph()
a = _apply_op(g, "FloatOutput", [], [dtypes.float32])
self.assertEqual(a, g.as_graph_element(ConvertibleObj()))
with self.assertRaises(TypeError):
g.as_graph_element(NonConvertibleObj())
# Regression test against creating custom __del__ functions in classes
# involved in cyclic references, e.g. Graph and Operation. (Python won't gc
# cycles that require calling a __del__ method, because the __del__ method can
# theoretically increase the object's refcount to "save" it from gc, and any
# already-deleted objects in the cycle would have be to restored.)
def testGarbageCollected(self):
# Create a graph we can delete and a weak reference to monitor if it's gc'd
g = ops.Graph()
g_ref = weakref.ref(g)
# Create some ops
with g.as_default():
a = constant_op.constant(2.0)
b = constant_op.constant(3.0)
c = math_ops.add(a, b)
# Create a session we can delete
with session.Session(graph=g) as sess:
self.evaluate(c)
# Delete all references and trigger gc
del g
del a
del b
del c
del sess
gc.collect()
self.assertIsNone(g_ref())
def testRunnableAfterInvalidShape(self):
with ops.Graph().as_default():
with self.assertRaises(ValueError):
math_ops.add([1, 2], [1, 2, 3])
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
def testRunnableAfterInvalidShapeWithKernelLabelMap(self):
g = ops.Graph()
with g.as_default():
with g._kernel_label_map({"KernelLabelRequired": "overload_1"}):
with self.assertRaises(ValueError):
test_ops.kernel_label_required(1)
a = constant_op.constant(1)
with session.Session() as sess:
self.evaluate(a)
class AttrScopeTest(test_util.TensorFlowTestCase):
def _get_test_attrs(self):
x = control_flow_ops.no_op()
try:
a = compat.as_text(x.get_attr("_A"))
except ValueError:
a = None
try:
b = compat.as_text(x.get_attr("_B"))
except ValueError:
b = None
return (a, b)
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual((None, None), self._get_test_attrs())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
a1 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("foo"))
}):
a2 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": None,
"_B": attr_value_pb2.AttrValue(s=compat.as_bytes("bar"))
}):
a3 = self._get_test_attrs()
with sess.graph._attr_scope({
"_A": attr_value_pb2.AttrValue(s=compat.as_bytes("baz"))
}):
a4 = self._get_test_attrs()
a5 = self._get_test_attrs()
a6 = self._get_test_attrs()
a7 = self._get_test_attrs()
self.assertAllEqual((None, None), a1)
self.assertAllEqual(("foo", None), a2)
self.assertAllEqual((None, "bar"), a3)
self.assertAllEqual(("baz", "bar"), a4)
self.assertAllEqual((None, "bar"), a5)
self.assertAllEqual(("foo", None), a6)
self.assertAllEqual((None, None), a7)
class KernelLabelTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testNoLabel(self):
with self.cached_session():
self.assertAllEqual(b"My label is: default",
test_ops.kernel_label().eval())
@test_util.run_deprecated_v1
def testLabelMap(self):
with self.cached_session() as sess:
default_1 = test_ops.kernel_label()
# pylint: disable=protected-access
with sess.graph._kernel_label_map({"KernelLabel": "overload_1"}):
overload_1_1 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": "overload_2"}):
overload_2 = test_ops.kernel_label()
with sess.graph._kernel_label_map({"KernelLabel": ""}):
default_2 = test_ops.kernel_label()
overload_1_2 = test_ops.kernel_label()
# pylint: enable=protected-access
default_3 = test_ops.kernel_label()
self.assertAllEqual(b"My label is: default", self.evaluate(default_1))
self.assertAllEqual(b"My label is: default", self.evaluate(default_2))
self.assertAllEqual(b"My label is: default", self.evaluate(default_3))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_1))
self.assertAllEqual(b"My label is: overload_1",
self.evaluate(overload_1_2))
self.assertAllEqual(b"My label is: overload_2", self.evaluate(overload_2))
class AsGraphDefTest(test_util.TensorFlowTestCase):
def testGraphDefVersion(self):
"""Test that the graphdef version is plumbed through to kernels."""
with ops.Graph().as_default() as g:
version = g.graph_def_versions.producer
with self.session(graph=g):
v = test_ops.graph_def_version().eval()
self.assertEqual(version, v)
def testAddShapes(self):
with ops.Graph().as_default() as g:
t1, t2, t3, t4, t5 = _apply_op(g, "FiveFloatOutputs", [],
[dtypes.float32] * 5)
t1.set_shape(None)
t2.set_shape([])
t3.set_shape([None])
t4.set_shape([43, 37])
t5.set_shape([43, None])
b = constant_op.constant(1.0) # pylint: disable=unused-variable
gd = g.as_graph_def(add_shapes=True)
self.assertProtoEqualsVersion("""
node { name: "FiveFloatOutputs" op: "FiveFloatOutputs"
attr {
key: "_output_shapes"
value {
list {
shape { unknown_rank: true }
shape { }
shape { dim { size: -1 } }
shape { dim { size: 43 } dim { size: 37 } }
shape { dim { size: 43 } dim { size: -1 } }
}
}
}
}
node { name: "Const" op: "Const"
attr {
key: "_output_shapes"
value {
list {
shape { }
}
}
}
attr {
key: "dtype"
value { type: DT_FLOAT }
}
attr {
key: "value"
value {
tensor {
dtype: DT_FLOAT
tensor_shape { }
float_val: 1.0 } } } }
""", gd)
@ops.RegisterStatistics("a", "flops")
def _calc_a_forward_flops(unused_graph, unused_node):
return ops.OpStats("flops", 20)
class StatisticsTest(test_util.TensorFlowTestCase):
def testRegisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("a", "an_a")
flops = ops.get_stats_for_node_def(graph, node, "flops")
self.assertEqual(20, flops.value)
missing_stat = ops.get_stats_for_node_def(graph, node, "missing_stat")
self.assertEqual(None, missing_stat.value)
def testUnregisteredNode(self):
graph = ops.Graph()
node = ops._NodeDef("b", "a_b")
weight_params = ops.get_stats_for_node_def(graph, node, "weight_params")
self.assertEqual(None, weight_params.value)
def testAccumulateStatistics(self):
flops_total = ops.OpStats("flops")
self.assertEqual(None, flops_total.value)
second_flops = ops.OpStats("flops", 3)
flops_total += second_flops
self.assertEqual(3, flops_total.value)
class DeviceStackTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasicDeviceAssignmentMetadata(self):
def device_func(unused_op):
return "/cpu:*"
const_zero = constant_op.constant([0.0], name="zero")
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.device("/cpu:0"):
const_two = constant_op.constant([2.0], name="two")
with ops.device(device_func):
const_three = constant_op.constant(3.0, name="three")
self.assertEqual(0, len(const_zero.op._device_assignments))
one_list = const_one.op._device_assignments
self.assertEqual(1, len(one_list))
self.assertEqual("/cpu", one_list[0].obj)
self.assertEqual("ops_test.py", os.path.basename(one_list[0].filename))
two_list = const_two.op._device_assignments
self.assertEqual(2, len(two_list))
devices = [t.obj for t in two_list]
self.assertEqual(set(["/cpu", "/cpu:0"]), set(devices))
three_list = const_three.op._device_assignments
self.assertEqual(1, len(three_list))
func_description = three_list[0].obj
expected_regex = r"device_func<.*ops_test.py, [0-9]+"
self.assertRegexpMatches(func_description, expected_regex)
@test_util.run_deprecated_v1
def testDeviceAssignmentMetadataForGraphDeviceAndTfDeviceFunctions(self):
with ops.device("/cpu"):
const_one = constant_op.constant([1.0], name="one")
with ops.get_default_graph().device("/cpu"):
const_two = constant_op.constant([2.0], name="two")
one_metadata = const_one.op._device_assignments[0]
two_metadata = const_two.op._device_assignments[0]
# Verify both types of device assignment return the right stack info.
self.assertRegexpMatches("ops_test.py",
os.path.basename(one_metadata.filename))
self.assertEqual(one_metadata.filename, two_metadata.filename)
self.assertEqual(one_metadata.lineno + 2, two_metadata.lineno)
class ColocationGroupTest(test_util.TensorFlowTestCase):
@test_util.run_deprecated_v1
def testBasic(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], a.op.colocation_groups())
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
with self.assertRaises(ValueError):
c.op.get_attr("_class")
@test_util.run_deprecated_v1
def testBasicColocationMetadata(self):
const_two = constant_op.constant([2.0], name="two")
with ops.colocate_with(const_two.op):
const_three = constant_op.constant(3.0, name="three")
locations_dict = const_three.op._colocation_dict
self.assertIn("two", locations_dict)
metadata = locations_dict["two"]
self.assertIsNone(metadata.obj)
# Check that this test's filename is recorded as the file containing the
# colocation statement.
self.assertEqual("ops_test.py", os.path.basename(metadata.filename))
@test_util.run_deprecated_v1
def testColocationDeviceInteraction(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
# 'b' is created in the scope of /cpu:0, but it is
# colocated with 'a', which is on '/device:GPU:0'. colocate_with
# overrides devices because it is a stronger constraint.
b = constant_op.constant(3.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual(a.op.device, b.op.device)
@test_util.run_deprecated_v1
def testColocationCanonicalization(self):
with ops.device("/device:GPU:0"):
_ = constant_op.constant(2.0)
with ops.device(lambda op: "/device:GPU:0"):
b = constant_op.constant(3.0)
with ops.get_default_graph().colocate_with(b):
with ops.device("/device:GPU:0"):
c = constant_op.constant(4.0)
# A's device will be /device:GPU:0
# B's device will be /device:GPU:0
# C's device will be /device:GPU:0 because it
# inherits B's device name, after canonicalizing the names.
self.assertEqual(b.op.device, c.op.device)
@test_util.run_deprecated_v1
def testLocationOverrides(self):
with ops.device("/cpu:0"):
with ops.device("/device:GPU:0"):
a = constant_op.constant([2.0], name="a")
# Note that this colocation is "redundant", since we are
# within the scope of "/device:GPU:0". However, we would like to
# preserve in the GraphDef that these two ops should be
# colocated in a portable way.
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
c = constant_op.constant(4.0)
d = constant_op.constant(5.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual("/device:GPU:0", a.op.device)
self.assertEqual(a.op.device, b.op.device)
# Test that device function stack is restored.
self.assertEqual("/device:GPU:0", c.op.device)
self.assertEqual("/device:CPU:0", d.op.device)
@test_util.run_deprecated_v1
def testNestedColocateWith(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0)
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@a"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testMultiColocationGroups(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@a", b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocationIgnoreStack(self):
a = constant_op.constant([2.0], name="a")
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(a.op):
with ops.colocate_with(b.op, ignore_existing=True):
c = constant_op.constant(4.0)
self.assertEqual(set([b"loc:@b"]), set(c.op.colocation_groups()))
@test_util.run_deprecated_v1
def testColocateWithReset(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(None, ignore_existing=True):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
self.assertEqual([b"loc:@c"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateWithInitialNoneThenNested(self):
a = constant_op.constant([2.0], name="a")
with ops.colocate_with(a.op):
with ops.colocate_with(None, ignore_existing=True):
b = constant_op.constant(3.0, name="b")
with ops.colocate_with(b.op):
c = constant_op.constant(4.0, name="c")
self.assertEqual([b"loc:@b"], b.op.colocation_groups())
self.assertEqual([b"loc:@b"], c.op.colocation_groups())
@test_util.run_deprecated_v1
def testColocateVariables(self):
a = variables.Variable([2.0], name="a")
with ops.colocate_with(a.op):
b = variables.Variable([3.0], name="b")
self.assertEqual([b"loc:@a"], b.op.colocation_groups())
def testColocateWithVariableInFunction(self):
v = variables.Variable(1.)
@def_function.function
def f():
with ops.colocate_with(v):
return array_ops.ones([], name="output")
f()
graph_def = f.get_concrete_function().graph.as_graph_def()
wrap_function.function_from_graph_def(graph_def, [], ["output"])
class DeprecatedTest(test_util.TensorFlowTestCase):
def testSuccess(self):
with ops.Graph().as_default() as g:
test_util.set_producer_version(g, 7)
old = test_ops.old()
with self.session(graph=g):
old.run()
def _error(self):
return ((r"Op Old is not available in GraphDef version %d\. "
r"It has been removed in version 8\. For reasons\.") %
versions.GRAPH_DEF_VERSION)
def testGraphConstructionFail(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(NotImplementedError, self._error()):
test_ops.old()
class DenseTensorLikeTypeTest(test_util.TensorFlowTestCase):
def testSuccess(self):
op = ops.Operation(
ops._NodeDef("FloatOutput", "myop"), ops.Graph(), [], [dtypes.float32])
t = op.outputs[0]
self.assertTrue(ops.is_dense_tensor_like(t))
v = variables.Variable([17])
self.assertTrue(ops.is_dense_tensor_like(v))
class BadClassNoName(object):
pass
class BadClassBadName(object):
def name(self):
pass
class BadClassNoDtype(object):
@property
def name(self):
pass
class BadClassBadDtype(object):
@property
def name(self):
pass
def dtype(self):
pass
def testBadClass(self):
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoName)
with self.assertRaisesRegexp(TypeError, "`name`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadName)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassNoDtype)
with self.assertRaisesRegexp(TypeError, "`dtype`"):
ops.register_dense_tensor_like_type(
DenseTensorLikeTypeTest.BadClassBadDtype)
class NameScopeTest(test_util.TensorFlowTestCase):
def testStripAndPrependScope(self):
strs = [
"hidden1/hidden1/weights", # Same prefix. Should strip.
"hidden1///hidden1/weights", # Extra "/". Should strip.
"^hidden1/hidden1/weights", # Same prefix. Should strip.
"loc:@hidden1/hidden1/weights", # Same prefix. Should strip.
"hhidden1/hidden1/weights", # Different prefix. Should keep.
"hidden1"
] # Not a prefix. Should keep.
expected_striped = [
"hidden1/weights", "hidden1/weights", "^hidden1/weights",
"loc:@hidden1/weights", "hhidden1/hidden1/weights", "hidden1"
]
expected_prepended = [
"hidden2/hidden1/weights", "hidden2/hidden1/weights",
"^hidden2/hidden1/weights", "loc:@hidden2/hidden1/weights",
"hidden2/hhidden1/hidden1/weights", "hidden2/hidden1"
]
name_scope_to_strip = "hidden1"
name_scope_to_add = "hidden2"
for es, ep, s in zip(expected_striped, expected_prepended, strs):
striped = ops.strip_name_scope(s, name_scope_to_strip)
self.assertEqual(es, striped)
self.assertEqual(ep, ops.prepend_name_scope(striped, name_scope_to_add))
def testGetNameScope(self):
with ops.Graph().as_default() as g:
with ops.name_scope("scope1"):
with ops.name_scope("scope2"):
with ops.name_scope("scope3"):
self.assertEqual("scope1/scope2/scope3", g.get_name_scope())
self.assertEqual("scope1/scope2", g.get_name_scope())
self.assertEqual("scope1", g.get_name_scope())
self.assertEqual("", g.get_name_scope())
def testTwoGraphs(self):
def f():
g1 = ops.Graph()
g2 = ops.Graph()
with g1.as_default():
with g2.as_default():
with ops.name_scope("_"):
pass
self.assertRaisesRegexp(ValueError, "'_' is not a valid scope name", f)
class EnableEagerExecutionTest(test_util.TensorFlowTestCase):
@test_util.run_v1_only("b/120545219")
def testBadArgumentsToEnableEagerExecution(self):
with self.assertRaisesRegexp(TypeError, "config must be a tf.ConfigProto"):
ops.enable_eager_execution(context.DEVICE_PLACEMENT_SILENT)
with self.assertRaisesRegexp(ValueError, "device_policy must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, c)
with self.assertRaisesRegexp(ValueError, "execution_mode must be one of"):
c = config_pb2.ConfigProto()
ops.enable_eager_execution(c, execution_mode=c)
class _TupleTensor(composite_tensor.CompositeTensor):
"""`Tensor`-like `tuple`-like for custom `Tensor` conversion masquerading."""
def __init__(self, components):
super(_TupleTensor, self).__init__()
self._components = tuple(ops.convert_to_tensor(c) for c in components)
@property
def _type_spec(self):
return _TupleTensorSpec(type_spec.from_value(c) for c in self._components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
class _TupleTensorSpec(type_spec.TypeSpec):
def __init__(self, specs):
self._specs = specs
value_type = property(lambda self: _TupleTensor)
_component_specs = property(lambda self: self._specs)
def _to_components(self, value):
return value._components
def _from_components(self, components):
return _TupleTensor(*components)
def _serialize(self):
return (self._specs,)
class _MyTuple(object):
"""Pretend user-side class for `ConvertToCompositeTensorTest ."""
def __init__(self, components):
super(_MyTuple, self).__init__()
self._components = tuple(components)
def __getitem__(self, key):
return self._components[key]
def __len__(self):
return len(self._components)
def __iter__(self):
return iter(self._components)
ops.register_tensor_conversion_function(
_MyTuple, conversion_func=lambda x, *_, **__: _TupleTensor(x))
class CustomConvertToCompositeTensorTest(test_util.TensorFlowTestCase):
def testCompositeTensorConversion(self):
"""Tests that a user can register a CompositeTensor converter."""
x = _MyTuple((1, [2., 3.], [[4, 5], [6, 7]]))
y = ops.convert_to_tensor_or_composite(x)
self.assertFalse(tensor_util.is_tensor(y))
self.assertIsInstance(y, _TupleTensor)
self.assertLen(y, len(x))
for x_, y_ in zip(x, y):
self.assertIsInstance(y_, ops.Tensor)
self.assertTrue(tensor_util.is_tensor(y_))
self.assertAllEqual(x_, tensor_util.constant_value(y_))
if __name__ == "__main__":
googletest.main()
|
the-stack_0_25618
|
"""
1 - Chamando uma variavel como uma funcao
def fala_oi():
print("oi")
variavel = fala_oi
print(variavel()) #chamando uma variavel que é uma func"""
"""#2 - Chamando uma funcao que está dentro de outra funcao
def master():
def slave():
return "oi"
return slave #tenho que chamar dentro da funcao mae
variavel = master()
print(variavel())"""
"""# exemplo 03 - funcao decoradora
def master(funcao):
def slave():
print("Agora estou decorada")
funcao()
return slave
@master # ou posso usar: fala_oi = master(fala_oi) ou
def fala_oi():
print("oi")
fala_oi() # Esta é por si só uma func DECORADORA"""
"""# Exemplo 04 - Decoradores com funcoes com argumentos
def master(funcao):
def slave(*args,**kargs): #inicialmente slave nao leva argumento, por isso quando decoro outra func com master, da erro.
print("Agora estou decorada")
funcao(*args,**kargs)
return slave
@master
def outra_func(msg):
print(msg)
outra_func("Amigo estou aqui")"""
# Exemplo 05 - Testando a velocidade com o que uma func é executada
from time import time
from time import sleep
def velocidade(funcao):
def interna(*args,**kwargs):
start_time = time()
resultado = funcao(*args, **kwargs)
end_time = time()
tempo_gasto = (end_time - start_time) * 1000
print(f"A função {funcao.__name__} levou {tempo_gasto:.2f} ms para ser executada")
return resultado
return interna
@velocidade
def demora():
for i in range(5):
sleep(1)
demora()
|
the-stack_0_25621
|
import functools
from typing import Any, Dict, List, Tuple
import torch.nn as nn
from ..blocks import SwinBlock
from ..classifiers import LinearClassifier
from ..downsamples import SwinDownsample
from ..heads import SwinHead
from ..junctions import AddJunction
from ..loaders import load_swin_parameters
from ..modules import LayerNorm2d
from ..operations import SwinOperation
from ..stems import SwinPatchStem
def clone_params(params, **kwargs):
new_params = params.copy()
new_params.update(kwargs)
return new_params
def make_swin_layers(
depths: List[int],
channels: int,
feature_size: int,
window_size: int,
attn_heads: int,
) -> List[Tuple[int, int, Dict[str, Any]]]:
layers = []
attn_params = {'operation_type': 'attn', 'window_size': window_size}
mlp_params = {'operation_type': 'mlp', 'window_size': 0,
'shift_size': 0, 'attn_heads': 0}
for i, depth in enumerate(depths):
attn_params['feature_size'] = feature_size
attn_params['attn_heads'] = attn_heads
mlp_params['feature_size'] = feature_size
for d in range(depth):
stride = 2 if i != 0 and d == 0 else 1
if d % 2 == 1 and feature_size > window_size:
attn_params['shift_size'] = window_size // 2
else:
attn_params['shift_size'] = 0
layers.append((channels, stride, attn_params.copy()))
layers.append((channels, 1, mlp_params.copy()))
channels *= 2
feature_size //= 2
attn_heads *= 2
return layers
imagenet_params = dict(
stem=SwinPatchStem,
block=SwinBlock,
operation=SwinOperation,
downsample=SwinDownsample,
junction=AddJunction,
head=SwinHead,
classifier=LinearClassifier,
normalization=functools.partial(nn.LayerNorm, eps=1e-5),
activation=lambda *args, **kwargs: nn.GELU(),
gate_normalization=functools.partial(LayerNorm2d, eps=1e-5),
gate_activation=lambda *args, **kwargs: nn.GELU(),
)
imagenet_models = {
'SwinTinyPatch4-224': clone_params(
imagenet_params,
layers=make_swin_layers(
depths=[2, 2, 6, 2], channels=96,
feature_size=56, window_size=7, attn_heads=3),
stem_channels=96, head_channels=768,
patch_size=4, mlp_ratio=4.0,
timm_name='swin_tiny_patch4_window7_224',
timm_loader=load_swin_parameters),
'SwinSmallPatch4-224': clone_params(
imagenet_params,
layers=make_swin_layers(
depths=[2, 2, 18, 2], channels=96,
feature_size=56, window_size=7, attn_heads=3),
stem_channels=96, head_channels=768,
patch_size=4, mlp_ratio=4.0,
timm_name='swin_small_patch4_window7_224',
timm_loader=load_swin_parameters),
'SwinBasePatch4-224': clone_params(
imagenet_params,
layers=make_swin_layers(
depths=[2, 2, 18, 2], channels=128,
feature_size=56, window_size=7, attn_heads=4),
stem_channels=128, head_channels=1024,
patch_size=4, mlp_ratio=4.0,
timm_name='swin_base_patch4_window7_224',
timm_loader=load_swin_parameters),
'SwinBasePatch4-384': clone_params(
imagenet_params,
layers=make_swin_layers(
depths=[2, 2, 18, 2], channels=128,
feature_size=96, window_size=12, attn_heads=4),
stem_channels=128, head_channels=1024,
patch_size=4, mlp_ratio=4.0,
timm_name='swin_base_patch4_window12_384',
timm_loader=load_swin_parameters),
'SwinLargePatch4-224': clone_params(
imagenet_params,
layers=make_swin_layers(
depths=[2, 2, 18, 2], channels=192,
feature_size=56, window_size=7, attn_heads=6),
stem_channels=192, head_channels=1536,
patch_size=4, mlp_ratio=4.0,
timm_name='swin_large_patch4_window7_224',
timm_loader=load_swin_parameters),
'SwinLargePatch4-384': clone_params(
imagenet_params,
layers=make_swin_layers(
depths=[2, 2, 18, 2], channels=192,
feature_size=96, window_size=12, attn_heads=6),
stem_channels=192, head_channels=1536,
patch_size=4, mlp_ratio=4.0,
timm_name='swin_large_patch4_window12_384',
timm_loader=load_swin_parameters),
}
imagenet_models.update({
'SwinBasePatch4-224-22k': clone_params(
imagenet_models['SwinBasePatch4-224'],
timm_name='swin_base_patch4_window7_224_in22k',
),
'SwinBasePatch4-384-22k': clone_params(
imagenet_models['SwinBasePatch4-384'],
timm_name='swin_base_patch4_window12_384_in22k',
),
'SwinLargePatch4-224-22k': clone_params(
imagenet_models['SwinLargePatch4-224'],
timm_name='swin_large_patch4_window7_224_in22k',
),
'SwinLargePatch4-384-22k': clone_params(
imagenet_models['SwinLargePatch4-384'],
timm_name='swin_large_patch4_window12_384_in22k',
),
})
|
the-stack_0_25622
|
import difflib
import json
import posixpath
import sys
import threading
import unittest
from collections import Counter
from contextlib import contextmanager
from copy import copy
from difflib import get_close_matches
from functools import wraps
from unittest.util import safe_repr
from urllib.parse import (
parse_qsl, unquote, urlencode, urljoin, urlparse, urlsplit, urlunparse,
)
from urllib.request import url2pathname
from django.apps import apps
from django.conf import settings
from django.core import mail
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.core.files import locks
from django.core.handlers.wsgi import WSGIHandler, get_path_info
from django.core.management import call_command
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal
from django.core.servers.basehttp import ThreadedWSGIServer, WSGIRequestHandler
from django.db import DEFAULT_DB_ALIAS, connection, connections, transaction
from django.forms.fields import CharField
from django.http import QueryDict
from django.http.request import split_domain_port, validate_host
from django.test.client import Client
from django.test.html import HTMLParseError, parse_html
from django.test.signals import setting_changed, template_rendered
from django.test.utils import (
CaptureQueriesContext, ContextList, compare_xml, modify_settings,
override_settings,
)
from django.utils.decorators import classproperty
from django.views.static import serve
__all__ = ('TestCase', 'TransactionTestCase',
'SimpleTestCase', 'skipIfDBFeature', 'skipUnlessDBFeature')
def to_list(value):
"""
Put value into a list if it's not already one. Return an empty list if
value is None.
"""
if value is None:
value = []
elif not isinstance(value, list):
value = [value]
return value
def assert_and_parse_html(self, html, user_msg, msg):
try:
dom = parse_html(html)
except HTMLParseError as e:
standardMsg = '%s\n%s' % (msg, e)
self.fail(self._formatMessage(user_msg, standardMsg))
return dom
class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection):
self.test_case = test_case
self.num = num
super().__init__(connection)
def __exit__(self, exc_type, exc_value, traceback):
super().__exit__(exc_type, exc_value, traceback)
if exc_type is not None:
return
executed = len(self)
self.test_case.assertEqual(
executed, self.num,
"%d queries executed, %d expected\nCaptured queries were:\n%s" % (
executed, self.num,
'\n'.join(
'%d. %s' % (i, query['sql']) for i, query in enumerate(self.captured_queries, start=1)
)
)
)
class _AssertTemplateUsedContext:
def __init__(self, test_case, template_name):
self.test_case = test_case
self.template_name = template_name
self.rendered_templates = []
self.rendered_template_names = []
self.context = ContextList()
def on_template_render(self, sender, signal, template, context, **kwargs):
self.rendered_templates.append(template)
self.rendered_template_names.append(template.name)
self.context.append(copy(context))
def test(self):
return self.template_name in self.rendered_template_names
def message(self):
return '%s was not rendered.' % self.template_name
def __enter__(self):
template_rendered.connect(self.on_template_render)
return self
def __exit__(self, exc_type, exc_value, traceback):
template_rendered.disconnect(self.on_template_render)
if exc_type is not None:
return
if not self.test():
message = self.message()
if self.rendered_templates:
message += ' Following templates were rendered: %s' % (
', '.join(self.rendered_template_names)
)
else:
message += ' No template was rendered.'
self.test_case.fail(message)
class _AssertTemplateNotUsedContext(_AssertTemplateUsedContext):
def test(self):
return self.template_name not in self.rendered_template_names
def message(self):
return '%s was rendered.' % self.template_name
class _DatabaseFailure:
def __init__(self, wrapped, message):
self.wrapped = wrapped
self.message = message
def __call__(self):
raise AssertionError(self.message)
class SimpleTestCase(unittest.TestCase):
# The class we'll use for the test client self.client.
# Can be overridden in derived classes.
client_class = Client
_overridden_settings = None
_modified_settings = None
databases = set()
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in SimpleTestCase '
'subclasses. Either subclass TestCase or TransactionTestCase to ensure '
'proper test isolation or add %(alias)r to %(test)s.databases to silence '
'this failure.'
)
_disallowed_connection_methods = [
('connect', 'connections'),
('temporary_connection', 'connections'),
('cursor', 'queries'),
('chunked_cursor', 'queries'),
]
@classmethod
def setUpClass(cls):
super().setUpClass()
if cls._overridden_settings:
cls._cls_overridden_context = override_settings(**cls._overridden_settings)
cls._cls_overridden_context.enable()
if cls._modified_settings:
cls._cls_modified_context = modify_settings(cls._modified_settings)
cls._cls_modified_context.enable()
cls._add_databases_failures()
@classmethod
def _validate_databases(cls):
if cls.databases == '__all__':
return frozenset(connections)
for alias in cls.databases:
if alias not in connections:
message = '%s.%s.databases refers to %r which is not defined in settings.DATABASES.' % (
cls.__module__,
cls.__qualname__,
alias,
)
close_matches = get_close_matches(alias, list(connections))
if close_matches:
message += ' Did you mean %r?' % close_matches[0]
raise ImproperlyConfigured(message)
return frozenset(cls.databases)
@classmethod
def _add_databases_failures(cls):
cls.databases = cls._validate_databases()
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, operation in cls._disallowed_connection_methods:
message = cls._disallowed_database_msg % {
'test': '%s.%s' % (cls.__module__, cls.__qualname__),
'alias': alias,
'operation': operation,
}
method = getattr(connection, name)
setattr(connection, name, _DatabaseFailure(method, message))
@classmethod
def _remove_databases_failures(cls):
for alias in connections:
if alias in cls.databases:
continue
connection = connections[alias]
for name, _ in cls._disallowed_connection_methods:
method = getattr(connection, name)
setattr(connection, name, method.wrapped)
@classmethod
def tearDownClass(cls):
cls._remove_databases_failures()
if hasattr(cls, '_cls_modified_context'):
cls._cls_modified_context.disable()
delattr(cls, '_cls_modified_context')
if hasattr(cls, '_cls_overridden_context'):
cls._cls_overridden_context.disable()
delattr(cls, '_cls_overridden_context')
super().tearDownClass()
def __call__(self, result=None):
"""
Wrapper around default __call__ method to perform common Django test
set up. This means that user-defined Test Cases aren't required to
include a call to super().setUp().
"""
testMethod = getattr(self, self._testMethodName)
skipped = (
getattr(self.__class__, "__unittest_skip__", False) or
getattr(testMethod, "__unittest_skip__", False)
)
if not skipped:
try:
self._pre_setup()
except Exception:
result.addError(self, sys.exc_info())
return
super().__call__(result)
if not skipped:
try:
self._post_teardown()
except Exception:
result.addError(self, sys.exc_info())
return
def _pre_setup(self):
"""
Perform pre-test setup:
* Create a test client.
* Clear the mail test outbox.
"""
self.client = self.client_class()
mail.outbox = []
def _post_teardown(self):
"""Perform post-test things."""
pass
def settings(self, **kwargs):
"""
A context manager that temporarily sets a setting and reverts to the
original value when exiting the context.
"""
return override_settings(**kwargs)
def modify_settings(self, **kwargs):
"""
A context manager that temporarily applies changes a list setting and
reverts back to the original value when exiting the context.
"""
return modify_settings(**kwargs)
def assertRedirects(self, response, expected_url, status_code=302,
target_status_code=200, msg_prefix='',
fetch_redirect_response=True):
"""
Assert that a response redirected to a specific URL and that the
redirect URL can be loaded.
Won't work for external links since it uses the test client to do a
request (use fetch_redirect_response=False to check such links without
fetching them).
"""
if msg_prefix:
msg_prefix += ": "
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(
response.redirect_chain,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
self.assertEqual(
response.redirect_chain[0][1], status_code,
msg_prefix + "Initial response didn't redirect as expected: Response code was %d (expected %d)"
% (response.redirect_chain[0][1], status_code)
)
url, status_code = response.redirect_chain[-1]
scheme, netloc, path, query, fragment = urlsplit(url)
self.assertEqual(
response.status_code, target_status_code,
msg_prefix + "Response didn't redirect as expected: Final Response code was %d (expected %d)"
% (response.status_code, target_status_code)
)
else:
# Not a followed redirect
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Response didn't redirect as expected: Response code was %d (expected %d)"
% (response.status_code, status_code)
)
url = response.url
scheme, netloc, path, query, fragment = urlsplit(url)
# Prepend the request path to handle relative path redirects.
if not path.startswith('/'):
url = urljoin(response.request['PATH_INFO'], url)
path = urljoin(response.request['PATH_INFO'], path)
if fetch_redirect_response:
# netloc might be empty, or in cases where Django tests the
# HTTP scheme, the convention is for netloc to be 'testserver'.
# Trust both as "internal" URLs here.
domain, port = split_domain_port(netloc)
if domain and not validate_host(domain, settings.ALLOWED_HOSTS):
raise ValueError(
"The test client is unable to fetch remote URLs (got %s). "
"If the host is served by Django, add '%s' to ALLOWED_HOSTS. "
"Otherwise, use assertRedirects(..., fetch_redirect_response=False)."
% (url, domain)
)
redirect_response = response.client.get(path, QueryDict(query), secure=(scheme == 'https'))
# Get the redirection page, using the same client that was used
# to obtain the original response.
self.assertEqual(
redirect_response.status_code, target_status_code,
msg_prefix + "Couldn't retrieve redirection page '%s': response code was %d (expected %d)"
% (path, redirect_response.status_code, target_status_code)
)
self.assertURLEqual(
url, expected_url,
msg_prefix + "Response redirected to '%s', expected '%s'" % (url, expected_url)
)
def assertURLEqual(self, url1, url2, msg_prefix=''):
"""
Assert that two URLs are the same, ignoring the order of query string
parameters except for parameters with the same name.
For example, /path/?x=1&y=2 is equal to /path/?y=2&x=1, but
/path/?a=1&a=2 isn't equal to /path/?a=2&a=1.
"""
def normalize(url):
"""Sort the URL's query string parameters."""
url = str(url) # Coerce reverse_lazy() URLs.
scheme, netloc, path, params, query, fragment = urlparse(url)
query_parts = sorted(parse_qsl(query))
return urlunparse((scheme, netloc, path, params, urlencode(query_parts), fragment))
self.assertEqual(
normalize(url1), normalize(url2),
msg_prefix + "Expected '%s' to equal '%s'." % (url1, url2)
)
def _assert_contains(self, response, text, status_code, msg_prefix, html):
# If the response supports deferred rendering and hasn't been rendered
# yet, then ensure that it does get rendered before proceeding further.
if hasattr(response, 'render') and callable(response.render) and not response.is_rendered:
response.render()
if msg_prefix:
msg_prefix += ": "
self.assertEqual(
response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code)
)
if response.streaming:
content = b''.join(response.streaming_content)
else:
content = response.content
if not isinstance(text, bytes) or html:
text = str(text)
content = content.decode(response.charset)
text_repr = "'%s'" % text
else:
text_repr = repr(text)
if html:
content = assert_and_parse_html(self, content, None, "Response's content is not valid HTML:")
text = assert_and_parse_html(self, text, None, "Second argument is not valid HTML:")
real_count = content.count(text)
return (text_repr, real_count, msg_prefix)
def assertContains(self, response, text, count=None, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` occurs ``count`` times in the content of the response.
If ``count`` is None, the count doesn't matter - the assertion is true
if the text occurs at least once in the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of %s in response (expected %d)" % (real_count, text_repr, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find %s in response" % text_repr)
def assertNotContains(self, response, text, status_code=200, msg_prefix='', html=False):
"""
Assert that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected) and that
``text`` doesn't occurs in the content of the response.
"""
text_repr, real_count, msg_prefix = self._assert_contains(
response, text, status_code, msg_prefix, html)
self.assertEqual(real_count, 0, msg_prefix + "Response should not contain %s" % text_repr)
def assertFormError(self, response, form, field, errors, msg_prefix=''):
"""
Assert that a form used to render the response has a specific field
error.
"""
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + "Response did not use any contexts to render the response")
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_form = False
for i, context in enumerate(contexts):
if form not in context:
continue
found_form = True
for err in errors:
if field:
if field in context[form].errors:
field_errors = context[form].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on form '%s' in"
" context %d does not contain the error '%s'"
" (actual errors: %s)" %
(field, form, i, err, repr(field_errors))
)
elif field in context[form].fields:
self.fail(
msg_prefix + "The field '%s' on form '%s' in context %d contains no errors" %
(field, form, i)
)
else:
self.fail(
msg_prefix + "The form '%s' in context %d does not contain the field '%s'" %
(form, i, field)
)
else:
non_field_errors = context[form].non_field_errors()
self.assertTrue(
err in non_field_errors,
msg_prefix + "The form '%s' in context %d does not"
" contain the non-field error '%s'"
" (actual errors: %s)" %
(form, i, err, non_field_errors or 'none')
)
if not found_form:
self.fail(msg_prefix + "The form '%s' was not used to render the response" % form)
def assertFormsetError(self, response, formset, form_index, field, errors,
msg_prefix=''):
"""
Assert that a formset used to render the response has a specific error.
For field errors, specify the ``form_index`` and the ``field``.
For non-field errors, specify the ``form_index`` and the ``field`` as
None.
For non-form errors, specify ``form_index`` as None and the ``field``
as None.
"""
# Add punctuation to msg_prefix
if msg_prefix:
msg_prefix += ": "
# Put context(s) into a list to simplify processing.
contexts = to_list(response.context)
if not contexts:
self.fail(msg_prefix + 'Response did not use any contexts to '
'render the response')
# Put error(s) into a list to simplify processing.
errors = to_list(errors)
# Search all contexts for the error.
found_formset = False
for i, context in enumerate(contexts):
if formset not in context:
continue
found_formset = True
for err in errors:
if field is not None:
if field in context[formset].forms[form_index].errors:
field_errors = context[formset].forms[form_index].errors[field]
self.assertTrue(
err in field_errors,
msg_prefix + "The field '%s' on formset '%s', "
"form %d in context %d does not contain the "
"error '%s' (actual errors: %s)" %
(field, formset, form_index, i, err, repr(field_errors))
)
elif field in context[formset].forms[form_index].fields:
self.fail(
msg_prefix + "The field '%s' on formset '%s', form %d in context %d contains no errors"
% (field, formset, form_index, i)
)
else:
self.fail(
msg_prefix + "The formset '%s', form %d in context %d does not contain the field '%s'"
% (formset, form_index, i, field)
)
elif form_index is not None:
non_field_errors = context[formset].forms[form_index].non_field_errors()
self.assertFalse(
not non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain any non-field errors." % (formset, form_index, i)
)
self.assertTrue(
err in non_field_errors,
msg_prefix + "The formset '%s', form %d in context %d "
"does not contain the non-field error '%s' (actual errors: %s)"
% (formset, form_index, i, err, repr(non_field_errors))
)
else:
non_form_errors = context[formset].non_form_errors()
self.assertFalse(
not non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain any non-form errors." % (formset, i)
)
self.assertTrue(
err in non_form_errors,
msg_prefix + "The formset '%s' in context %d does not "
"contain the non-form error '%s' (actual errors: %s)"
% (formset, i, err, repr(non_form_errors))
)
if not found_formset:
self.fail(msg_prefix + "The formset '%s' was not used to render the response" % formset)
def _assert_template_used(self, response, template_name, msg_prefix):
if response is None and template_name is None:
raise TypeError('response and/or template_name argument must be provided')
if msg_prefix:
msg_prefix += ": "
if template_name is not None and response is not None and not hasattr(response, 'templates'):
raise ValueError(
"assertTemplateUsed() and assertTemplateNotUsed() are only "
"usable on responses fetched using the Django test Client."
)
if not hasattr(response, 'templates') or (response is None and template_name):
if response:
template_name = response
response = None
# use this template with context manager
return template_name, None, msg_prefix
template_names = [t.name for t in response.templates if t.name is not None]
return None, template_names, msg_prefix
def assertTemplateUsed(self, response=None, template_name=None, msg_prefix='', count=None):
"""
Assert that the template with the provided name was used in rendering
the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix)
if context_mgr_template:
# Use assertTemplateUsed as context manager.
return _AssertTemplateUsedContext(self, context_mgr_template)
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(
template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s"
% (template_name, ', '.join(template_names))
)
if count is not None:
self.assertEqual(
template_names.count(template_name), count,
msg_prefix + "Template '%s' was expected to be rendered %d "
"time(s) but was actually rendered %d time(s)."
% (template_name, count, template_names.count(template_name))
)
def assertTemplateNotUsed(self, response=None, template_name=None, msg_prefix=''):
"""
Assert that the template with the provided name was NOT used in
rendering the response. Also usable as context manager.
"""
context_mgr_template, template_names, msg_prefix = self._assert_template_used(
response, template_name, msg_prefix
)
if context_mgr_template:
# Use assertTemplateNotUsed as context manager.
return _AssertTemplateNotUsedContext(self, context_mgr_template)
self.assertFalse(
template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering the response" % template_name
)
@contextmanager
def _assert_raises_or_warns_cm(self, func, cm_attr, expected_exception, expected_message):
with func(expected_exception) as cm:
yield cm
self.assertIn(expected_message, str(getattr(cm, cm_attr)))
def _assertFooMessage(self, func, cm_attr, expected_exception, expected_message, *args, **kwargs):
callable_obj = None
if args:
callable_obj, *args = args
cm = self._assert_raises_or_warns_cm(func, cm_attr, expected_exception, expected_message)
# Assertion used in context manager fashion.
if callable_obj is None:
return cm
# Assertion was passed a callable.
with cm:
callable_obj(*args, **kwargs)
def assertRaisesMessage(self, expected_exception, expected_message, *args, **kwargs):
"""
Assert that expected_message is found in the message of a raised
exception.
Args:
expected_exception: Exception class expected to be raised.
expected_message: expected error message string value.
args: Function to be called and extra positional args.
kwargs: Extra kwargs.
"""
return self._assertFooMessage(
self.assertRaises, 'exception', expected_exception, expected_message,
*args, **kwargs
)
def assertWarnsMessage(self, expected_warning, expected_message, *args, **kwargs):
"""
Same as assertRaisesMessage but for assertWarns() instead of
assertRaises().
"""
return self._assertFooMessage(
self.assertWarns, 'warning', expected_warning, expected_message,
*args, **kwargs
)
def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=''):
"""
Assert that a form field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in empty_values
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **{**field_kwargs, 'required': False})
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [required.error_messages['required']]
for e in required.empty_values:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length': 2, 'max_length': 20})
self.assertIsInstance(fieldclass(*field_args, **field_kwargs), fieldclass)
def assertHTMLEqual(self, html1, html2, msg=None):
"""
Assert that two HTML snippets are semantically the same.
Whitespace in most cases is ignored, and attribute ordering is not
significant. The arguments must be valid HTML.
"""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 != dom2:
standardMsg = '%s != %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
diff = ('\n' + '\n'.join(difflib.ndiff(
str(dom1).splitlines(), str(dom2).splitlines(),
)))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertHTMLNotEqual(self, html1, html2, msg=None):
"""Assert that two HTML snippets are not semantically equivalent."""
dom1 = assert_and_parse_html(self, html1, msg, 'First argument is not valid HTML:')
dom2 = assert_and_parse_html(self, html2, msg, 'Second argument is not valid HTML:')
if dom1 == dom2:
standardMsg = '%s == %s' % (
safe_repr(dom1, True), safe_repr(dom2, True))
self.fail(self._formatMessage(msg, standardMsg))
def assertInHTML(self, needle, haystack, count=None, msg_prefix=''):
needle = assert_and_parse_html(self, needle, None, 'First argument is not valid HTML:')
haystack = assert_and_parse_html(self, haystack, None, 'Second argument is not valid HTML:')
real_count = haystack.count(needle)
if count is not None:
self.assertEqual(
real_count, count,
msg_prefix + "Found %d instances of '%s' in response (expected %d)" % (real_count, needle, count)
)
else:
self.assertTrue(real_count != 0, msg_prefix + "Couldn't find '%s' in response" % needle)
def assertJSONEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except ValueError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertEqual(data, expected_data, msg=msg)
def assertJSONNotEqual(self, raw, expected_data, msg=None):
"""
Assert that the JSON fragments raw and expected_data are not equal.
Usual JSON non-significant whitespace rules apply as the heavyweight
is delegated to the json library.
"""
try:
data = json.loads(raw)
except json.JSONDecodeError:
self.fail("First argument is not valid JSON: %r" % raw)
if isinstance(expected_data, str):
try:
expected_data = json.loads(expected_data)
except json.JSONDecodeError:
self.fail("Second argument is not valid JSON: %r" % expected_data)
self.assertNotEqual(data, expected_data, msg=msg)
def assertXMLEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are semantically the same.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if not result:
standardMsg = '%s != %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
diff = ('\n' + '\n'.join(
difflib.ndiff(xml1.splitlines(), xml2.splitlines())
))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
def assertXMLNotEqual(self, xml1, xml2, msg=None):
"""
Assert that two XML snippets are not semantically equivalent.
Whitespace in most cases is ignored and attribute ordering is not
significant. The arguments must be valid XML.
"""
try:
result = compare_xml(xml1, xml2)
except Exception as e:
standardMsg = 'First or second argument is not valid XML\n%s' % e
self.fail(self._formatMessage(msg, standardMsg))
else:
if result:
standardMsg = '%s == %s' % (safe_repr(xml1, True), safe_repr(xml2, True))
self.fail(self._formatMessage(msg, standardMsg))
class TransactionTestCase(SimpleTestCase):
# Subclasses can ask for resetting of auto increment sequence before each
# test case
reset_sequences = False
# Subclasses can enable only a subset of apps for faster tests
available_apps = None
# Subclasses can define fixtures which will be automatically installed.
fixtures = None
databases = {DEFAULT_DB_ALIAS}
_disallowed_database_msg = (
'Database %(operation)s to %(alias)r are not allowed in this test. '
'Add %(alias)r to %(test)s.databases to ensure proper test isolation '
'and silence this failure.'
)
# If transactions aren't available, Django will serialize the database
# contents into a fixture during setup and flush and reload them
# during teardown (as flush does not restore data from migrations).
# This can be slow; this flag allows enabling on a per-case basis.
serialized_rollback = False
def _pre_setup(self):
"""
Perform pre-test setup:
* If the class has an 'available_apps' attribute, restrict the app
registry to these applications, then fire the post_migrate signal --
it must run with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, install those fixtures.
"""
super()._pre_setup()
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=self.available_apps,
enter=True,
)
for db_name in self._databases_names(include_mirrors=False):
emit_post_migrate_signal(verbosity=0, interactive=False, db=db_name)
try:
self._fixture_setup()
except Exception:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(
sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False,
)
raise
# Clear the queries_log so that it's less likely to overflow (a single
# test probably won't execute 9K queries). If queries_log overflows,
# then assertNumQueries() doesn't work.
for db_name in self._databases_names(include_mirrors=False):
connections[db_name].queries_log.clear()
@classmethod
def _databases_names(cls, include_mirrors=True):
# Only consider allowed database aliases, including mirrors or not.
return [
alias for alias in connections
if alias in cls.databases and (
include_mirrors or not connections[alias].settings_dict['TEST']['MIRROR']
)
]
def _reset_sequences(self, db_name):
conn = connections[db_name]
if conn.features.supports_sequence_reset:
sql_list = conn.ops.sequence_reset_by_name_sql(
no_style(), conn.introspection.sequence_list())
if sql_list:
with transaction.atomic(using=db_name):
with conn.cursor() as cursor:
for sql in sql_list:
cursor.execute(sql)
def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False):
# Reset sequences
if self.reset_sequences:
self._reset_sequences(db_name)
# Provide replica initial data from migrated apps, if needed.
if self.serialized_rollback and hasattr(connections[db_name], "_test_serialized_contents"):
if self.available_apps is not None:
apps.unset_available_apps()
connections[db_name].creation.deserialize_db_from_string(
connections[db_name]._test_serialized_contents
)
if self.available_apps is not None:
apps.set_available_apps(self.available_apps)
if self.fixtures:
# We have to use this slightly awkward syntax due to the fact
# that we're using *args and **kwargs together.
call_command('loaddata', *self.fixtures,
**{'verbosity': 0, 'database': db_name})
def _should_reload_connections(self):
return True
def _post_teardown(self):
"""
Perform post-test things:
* Flush the contents of the database to leave a clean slate. If the
class has an 'available_apps' attribute, don't fire post_migrate.
* Force-close the connection so the next test gets a clean cursor.
"""
try:
self._fixture_teardown()
super()._post_teardown()
if self._should_reload_connections():
# Some DB cursors include SQL statements as part of cursor
# creation. If you have a test that does a rollback, the effect
# of these statements is lost, which can affect the operation of
# tests (e.g., losing a timezone setting causing objects to be
# created with the wrong time). To make sure this doesn't
# happen, get a clean connection at the start of every test.
for conn in connections.all():
conn.close()
finally:
if self.available_apps is not None:
apps.unset_available_apps()
setting_changed.send(sender=settings._wrapped.__class__,
setting='INSTALLED_APPS',
value=settings.INSTALLED_APPS,
enter=False)
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
# Flush the database
inhibit_post_migrate = (
self.available_apps is not None or
( # Inhibit the post_migrate signal when using serialized
# rollback to avoid trying to recreate the serialized data.
self.serialized_rollback and
hasattr(connections[db_name], '_test_serialized_contents')
)
)
call_command('flush', verbosity=0, interactive=False,
database=db_name, reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_migrate=inhibit_post_migrate)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True, msg=None):
items = map(transform, qs)
if not ordered:
return self.assertEqual(Counter(items), Counter(values), msg=msg)
values = list(values)
# For example qs.iterator() could be passed as qs, but it does not
# have 'ordered' attribute.
if len(values) > 1 and hasattr(qs, 'ordered') and not qs.ordered:
raise ValueError("Trying to compare non-ordered queryset "
"against more than one ordered values")
return self.assertEqual(list(items), values, msg=msg)
def assertNumQueries(self, num, func=None, *args, using=DEFAULT_DB_ALIAS, **kwargs):
conn = connections[using]
context = _AssertNumQueriesContext(self, num, conn)
if func is None:
return context
with context:
func(*args, **kwargs)
def connections_support_transactions(aliases=None):
"""
Return whether or not all (or specified) connections support
transactions.
"""
conns = connections.all() if aliases is None else (connections[alias] for alias in aliases)
return all(conn.features.supports_transactions for conn in conns)
class TestCase(TransactionTestCase):
"""
Similar to TransactionTestCase, but use `transaction.atomic()` to achieve
test isolation.
In most situations, TestCase should be preferred to TransactionTestCase as
it allows faster execution. However, there are some situations where using
TransactionTestCase might be necessary (e.g. testing some transactional
behavior).
On database backends with no transaction support, TestCase behaves as
TransactionTestCase.
"""
@classmethod
def _enter_atomics(cls):
"""Open atomic blocks for multiple databases."""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened by the previous method."""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def _databases_support_transactions(cls):
return connections_support_transactions(cls.databases)
@classmethod
def setUpClass(cls):
super().setUpClass()
if not cls._databases_support_transactions():
return
cls.cls_atomics = cls._enter_atomics()
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
try:
call_command('loaddata', *cls.fixtures, **{'verbosity': 0, 'database': db_name})
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
try:
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
cls._remove_databases_failures()
raise
@classmethod
def tearDownClass(cls):
if cls._databases_support_transactions():
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super().tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase."""
pass
def _should_reload_connections(self):
if self._databases_support_transactions():
return False
return super()._should_reload_connections()
def _fixture_setup(self):
if not self._databases_support_transactions():
# If the backend does not support transactions, we should reload
# class data before each test
self.setUpTestData()
return super()._fixture_setup()
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
if not self._databases_support_transactions():
return super()._fixture_teardown()
try:
for db_name in reversed(self._databases_names()):
if self._should_check_constraints(connections[db_name]):
connections[db_name].check_constraints()
finally:
self._rollback_atomics(self.atomics)
def _should_check_constraints(self, connection):
return (
connection.features.can_defer_constraint_checks and
not connection.needs_rollback and connection.is_usable()
)
class CheckCondition:
"""Descriptor class for deferred condition checking."""
def __init__(self, *conditions):
self.conditions = conditions
def add_condition(self, condition, reason):
return self.__class__(*self.conditions, (condition, reason))
def __get__(self, instance, cls=None):
# Trigger access for all bases.
if any(getattr(base, '__unittest_skip__', False) for base in cls.__bases__):
return True
for condition, reason in self.conditions:
if condition():
# Override this descriptor's value and set the skip reason.
cls.__unittest_skip__ = True
cls.__unittest_skip_why__ = reason
return True
return False
def _deferredSkip(condition, reason, name):
def decorator(test_func):
nonlocal condition
if not (isinstance(test_func, type) and
issubclass(test_func, unittest.TestCase)):
@wraps(test_func)
def skip_wrapper(*args, **kwargs):
if (args and isinstance(args[0], unittest.TestCase) and
connection.alias not in getattr(args[0], 'databases', {})):
raise ValueError(
"%s cannot be used on %s as %s doesn't allow queries "
"against the %r database." % (
name,
args[0],
args[0].__class__.__qualname__,
connection.alias,
)
)
if condition():
raise unittest.SkipTest(reason)
return test_func(*args, **kwargs)
test_item = skip_wrapper
else:
# Assume a class is decorated
test_item = test_func
databases = getattr(test_item, 'databases', None)
if not databases or connection.alias not in databases:
# Defer raising to allow importing test class's module.
def condition():
raise ValueError(
"%s cannot be used on %s as it doesn't allow queries "
"against the '%s' database." % (
name, test_item, connection.alias,
)
)
# Retrieve the possibly existing value from the class's dict to
# avoid triggering the descriptor.
skip = test_func.__dict__.get('__unittest_skip__')
if isinstance(skip, CheckCondition):
test_item.__unittest_skip__ = skip.add_condition(condition, reason)
elif skip is not True:
test_item.__unittest_skip__ = CheckCondition((condition, reason))
return test_item
return decorator
def skipIfDBFeature(*features):
"""Skip a test if a database has at least one of the named features."""
return _deferredSkip(
lambda: any(getattr(connection.features, feature, False) for feature in features),
"Database has feature(s) %s" % ", ".join(features),
'skipIfDBFeature',
)
def skipUnlessDBFeature(*features):
"""Skip a test unless a database has all the named features."""
return _deferredSkip(
lambda: not all(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support feature(s): %s" % ", ".join(features),
'skipUnlessDBFeature',
)
def skipUnlessAnyDBFeature(*features):
"""Skip a test unless a database has any of the named features."""
return _deferredSkip(
lambda: not any(getattr(connection.features, feature, False) for feature in features),
"Database doesn't support any of the feature(s): %s" % ", ".join(features),
'skipUnlessAnyDBFeature',
)
class QuietWSGIRequestHandler(WSGIRequestHandler):
"""
A WSGIRequestHandler that doesn't log to standard output any of the
requests received, so as to not clutter the test result output.
"""
def log_message(*args):
pass
class FSFilesHandler(WSGIHandler):
"""
WSGI middleware that intercepts calls to a directory, as defined by one of
the *_ROOT settings, and serves those files, publishing them under *_URL.
"""
def __init__(self, application):
self.application = application
self.base_url = urlparse(self.get_base_url())
super().__init__()
def _should_handle(self, path):
"""
Check if the path should be handled. Ignore the path if:
* the host is provided as part of the base_url
* the request's path isn't under the media path (or equal)
"""
return path.startswith(self.base_url[2]) and not self.base_url[1]
def file_path(self, url):
"""Return the relative path to the file on disk for the given URL."""
relative_url = url[len(self.base_url[2]):]
return url2pathname(relative_url)
def get_response(self, request):
from django.http import Http404
if self._should_handle(request.path):
try:
return self.serve(request)
except Http404:
pass
return super().get_response(request)
def serve(self, request):
os_rel_path = self.file_path(request.path)
os_rel_path = posixpath.normpath(unquote(os_rel_path))
# Emulate behavior of django.contrib.staticfiles.views.serve() when it
# invokes staticfiles' finders functionality.
# TODO: Modify if/when that internal API is refactored
final_rel_path = os_rel_path.replace('\\', '/').lstrip('/')
return serve(request, final_rel_path, document_root=self.get_base_dir())
def __call__(self, environ, start_response):
if not self._should_handle(get_path_info(environ)):
return self.application(environ, start_response)
return super().__call__(environ, start_response)
class _StaticFilesHandler(FSFilesHandler):
"""
Handler for serving static files. A private class that is meant to be used
solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.STATIC_ROOT
def get_base_url(self):
return settings.STATIC_URL
class _MediaFilesHandler(FSFilesHandler):
"""
Handler for serving the media files. A private class that is meant to be
used solely as a convenience by LiveServerThread.
"""
def get_base_dir(self):
return settings.MEDIA_ROOT
def get_base_url(self):
return settings.MEDIA_URL
class LiveServerThread(threading.Thread):
"""Thread for running a live http server while the tests are running."""
def __init__(self, host, static_handler, connections_override=None, port=0):
self.host = host
self.port = port
self.is_ready = threading.Event()
self.error = None
self.static_handler = static_handler
self.connections_override = connections_override
super().__init__()
def run(self):
"""
Set up the live server and databases, and then loop over handling
HTTP requests.
"""
if self.connections_override:
# Override this thread's database connections with the ones
# provided by the main thread.
for alias, conn in self.connections_override.items():
connections[alias] = conn
try:
# Create the handler for serving static and media files
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
self.httpd = self._create_server()
# If binding to port zero, assign the port allocated by the OS.
if self.port == 0:
self.port = self.httpd.server_address[1]
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
finally:
connections.close_all()
def _create_server(self):
return ThreadedWSGIServer((self.host, self.port), QuietWSGIRequestHandler, allow_reuse_address=False)
def terminate(self):
if hasattr(self, 'httpd'):
# Stop the WSGI server
self.httpd.shutdown()
self.httpd.server_close()
self.join()
class LiveServerTestCase(TransactionTestCase):
"""
Do basically the same as TransactionTestCase but also launch a live HTTP
server in a separate thread so that the tests may use another testing
framework, such as Selenium for example, instead of the built-in dummy
client.
It inherits from TransactionTestCase instead of TestCase because the
threads don't share the same transactions (unless if using in-memory sqlite)
and each thread needs to commit all their transactions so that the other
thread can see the changes.
"""
host = 'localhost'
port = 0
server_thread_class = LiveServerThread
static_handler = _StaticFilesHandler
@classproperty
def live_server_url(cls):
return 'http://%s:%s' % (cls.host, cls.server_thread.port)
@classproperty
def allowed_host(cls):
return cls.host
@classmethod
def setUpClass(cls):
super().setUpClass()
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if conn.vendor == 'sqlite' and conn.is_in_memory_db():
# Explicitly enable thread-shareability for this connection
conn.inc_thread_sharing()
connections_override[conn.alias] = conn
cls._live_server_modified_settings = modify_settings(
ALLOWED_HOSTS={'append': cls.allowed_host},
)
cls._live_server_modified_settings.enable()
cls.server_thread = cls._create_server_thread(connections_override)
cls.server_thread.daemon = True
cls.server_thread.start()
# Wait for the live server to be ready
cls.server_thread.is_ready.wait()
if cls.server_thread.error:
# Clean up behind ourselves, since tearDownClass won't get called in
# case of errors.
cls._tearDownClassInternal()
raise cls.server_thread.error
@classmethod
def _create_server_thread(cls, connections_override):
return cls.server_thread_class(
cls.host,
cls.static_handler,
connections_override=connections_override,
port=cls.port,
)
@classmethod
def _tearDownClassInternal(cls):
# There may not be a 'server_thread' attribute if setUpClass() for some
# reasons has raised an exception.
if hasattr(cls, 'server_thread'):
# Terminate the live server's thread
cls.server_thread.terminate()
# Restore sqlite in-memory database connections' non-shareability.
for conn in cls.server_thread.connections_override.values():
conn.dec_thread_sharing()
@classmethod
def tearDownClass(cls):
cls._tearDownClassInternal()
cls._live_server_modified_settings.disable()
super().tearDownClass()
class SerializeMixin:
"""
Enforce serialization of TestCases that share a common resource.
Define a common 'lockfile' for each set of TestCases to serialize. This
file must exist on the filesystem.
Place it early in the MRO in order to isolate setUpClass()/tearDownClass().
"""
lockfile = None
@classmethod
def setUpClass(cls):
if cls.lockfile is None:
raise ValueError(
"{}.lockfile isn't set. Set it to a unique value "
"in the base class.".format(cls.__name__))
cls._lockfile = open(cls.lockfile)
locks.lock(cls._lockfile, locks.LOCK_EX)
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._lockfile.close()
|
the-stack_0_25624
|
from pythonwarrior.units.base import UnitBase
class Archer(UnitBase):
def __init__(self):
super(Archer, self).__init__()
self.max_health = 7
self.add_abilities('shoot_', 'look')
@property
def shoot_power(self):
return 3
@property
def character(self):
return 'a'
def play_turn(self, turn):
for direction in ['forward', 'left', 'right']:
for space in turn.look(direction):
if space.is_player():
turn.shoot_(direction)
return
elif not space.is_empty():
break
|
the-stack_0_25626
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
#
#
from .basehandler import BaseHandler
from ppmessage.db.models import OrgGroup
from ppmessage.db.models import AppInfo
from ppmessage.db.models import FileInfo
from ppmessage.db.models import DeviceUser
from ppmessage.db.models import ConversationInfo
from ppmessage.db.models import ConversationUserData
from ppmessage.core.redis import redis_hash_to_dict
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.core.constant import REDIS_AMD_KEY
from ppmessage.core.constant import CONVERSATION_TYPE
from ppmessage.core.constant import CONVERSATION_STATUS
import copy
import uuid
import json
import time
import hashlib
import logging
import datetime
class PPComCreateConversationHandler(BaseHandler):
"""
For the member_list length == 1, if the conversation has been created
return the existed conversation
For the group_uuid != None, if the conversation has been created
return the existed conversation
"""
def _return(self, _conversation_uuid, _request):
_redis = self.application.redis
_app_uuid = _request.get("app_uuid")
_user_uuid = _request.get("user_uuid")
_conversation = redis_hash_to_dict(_redis, ConversationInfo, _conversation_uuid)
if _conversation == None:
self.setErrorCode(API_ERR.NO_CONVERSATION)
return
_r = self.getReturnData()
_r.update(_conversation)
_key = ConversationUserData.__tablename__ + ".app_uuid." + _app_uuid + \
".user_uuid." + _user_uuid + ".conversation_uuid." + _conversation_uuid
_data_uuid = _redis.get(_key)
if _data_uuid != None:
_key = ConversationUserData.__tablename__ + ".uuid." + _data_uuid
logging.info(_redis.hgetall(_key))
_data = _redis.hmget(_key, ["conversation_name", "conversation_icon"])
logging.info("---------%s--------" % str(_data))
_r["conversation_name"] = _data[0]
_r["conversation_icon"] = _data[1]
logging.info(_r)
return
def _create(self, _member_uuid, _request):
_app_uuid = _request.get("app_uuid")
_user_uuid = _request.get("user_uuid")
_redis = self.application.redis
_key = DeviceUser.__tablename__ + ".uuid." + _user_uuid
_portal_user_name = _redis.hget(_key, "user_fullname")
_portal_user_icon = _redis.hget(_key, "user_icon")
_key = DeviceUser.__tablename__ + ".uuid." + _member_uuid
_member_user_name = _redis.hget(_key, "user_fullname")
_member_user_icon = _redis.hget(_key, "user_icon")
_conversation_uuid = str(uuid.uuid1())
_values = {
"uuid": _conversation_uuid,
"app_uuid": _app_uuid,
"user_uuid": _user_uuid,
"assigned_uuid": _member_uuid,
"conversation_type": CONVERSATION_TYPE.P2S,
"status": CONVERSATION_STATUS.NEW,
}
# create it
_row = ConversationInfo(**_values)
_row.async_add(_redis)
_row.create_redis_keys(_redis)
_row = ConversationUserData(uuid=str(uuid.uuid1()),
app_uuid=_app_uuid,
user_uuid=_user_uuid,
conversation_uuid=_conversation_uuid,
conversation_type=CONVERSATION_TYPE.P2S,
conversation_name=_member_user_name,
conversation_icon=_member_user_icon,
conversation_status=CONVERSATION_STATUS.NEW)
_row.async_add(_redis)
_row.create_redis_keys(_redis)
_row = ConversationUserData(uuid=str(uuid.uuid1()),
app_uuid=_app_uuid,
user_uuid=_member_uuid,
conversation_uuid=_conversation_uuid,
conversation_type=CONVERSATION_TYPE.P2S,
conversation_name=_portal_user_name,
conversation_icon=_portal_user_icon,
conversation_status=CONVERSATION_STATUS.NEW)
_row.async_add(_redis)
_row.create_redis_keys(_redis)
self._return(_conversation_uuid, _request)
return
def _existed(self, _request):
_app_uuid = _request.get("app_uuid")
_user_uuid = _request.get("user_uuid")
_member_list = _request.get("member_list")
_redis = self.application.redis
if _member_list != None and isinstance(_member_list, list) == True and len(_member_list) == 1:
_assigned_uuid = _member_list[0]
if _assigned_uuid == None:
return False
_key = ConversationInfo.__tablename__ + ".app_uuid." + _app_uuid + \
".user_uuid." + _user_uuid + ".assigned_uuid." + _assigned_uuid
_conversation_uuid = _redis.get(_key)
if _conversation_uuid != None:
_key = ConversationUserData.__tablename__ + ".conversation_uuid." + _conversation_uuid
_count = _redis.scard(_key)
if _count == 2:
self._return(_conversation_uuid, _request)
_r = self.getReturnData()
return True
return False
if _member_list == None:
_key = ConversationUserData.__tablename__ + ".app_uuid." + _app_uuid + ".user_uuid." + _user_uuid
_conversations = _redis.smembers(_key)
if len(_conversations) == 0:
return False
_pi = _redis.pipeline()
for _conversation_uuid in _conversations:
_key = ConversationInfo.__tablename__ + ".uuid." + _conversation_uuid
_pi.hget(_key, "updatetime")
_updatetime = _pi.execute()
_unsorted = zip(_conversations, _updatetime)
_sorted = sorted(_unsorted, lambda x,y: cmp(x[1], y[1]), reverse=True)
self._return(_sorted[0][0], _request)
return True
return False
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCOM)
return
def _Task(self):
super(PPComCreateConversationHandler, self)._Task()
_request = json.loads(self.request.body)
_app_uuid = _request.get("app_uuid")
_user_uuid = _request.get("user_uuid")
_member_list = _request.get("member_list")
if _app_uuid == None or _user_uuid == None:
self.setErrorCode(API_ERR.NO_PARA)
return
if self._existed(_request):
return
# assume ppcom only want to talk with only one
if _member_list != None and isinstance(_member_list, list) == True and len(_member_list) == 1:
self._create(_member_list[0], _request)
return
_value = {"app_uuid": _app_uuid, "user_uuid": _user_uuid}
_value = json.dumps(_value)
_hash = hashlib.sha1(_value)
_key = REDIS_AMD_KEY + ".amd_hash." + _hash
self.application.redis.set(_key, _value)
_key = REDIS_AMD_KEY
self.application.redis.rpush(_key, _hash)
_key = REDIS_AMD_KEY + ".app_uuid." + _app_uuid
self.application.redis.sadd(_key, _hash)
return
|
the-stack_0_25627
|
import json
import unittest
from unittest.mock import patch, call, Mock, MagicMock, mock_open
from botocore.exceptions import ClientError
from datetime import datetime
from workteam.src import workteam
from common import _utils
from . import test_utils
required_args = [
'--region', 'us-west-2',
'--team_name', 'test-team',
'--description', 'fake team'
]
class WorkTeamTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
parser = workteam.create_parser()
cls.parser = parser
def test_create_parser(self):
self.assertIsNotNone(self.parser)
def test_main(self):
# Mock out all of utils except parser
workteam._utils = MagicMock()
workteam._utils.add_default_client_arguments = _utils.add_default_client_arguments
# Set some static returns
workteam._utils.create_workteam.return_value = 'arn:aws:sagemaker:us-east-1:999999999999:work-team'
with patch('builtins.open', mock_open()) as file_open:
workteam.main(required_args)
# Check if correct requests were created and triggered
workteam._utils.create_workteam.assert_called()
# Check the file outputs
file_open.assert_has_calls([
call('/tmp/workteam_arn.txt', 'w')
])
file_open().write.assert_has_calls([
call('arn:aws:sagemaker:us-east-1:999999999999:work-team')
])
def test_workteam(self):
mock_client = MagicMock()
mock_args = self.parser.parse_args(required_args)
response = _utils.create_workteam(mock_client, vars(mock_args))
mock_client.create_workteam.assert_called_once_with(
Description='fake team',
MemberDefinitions=[{'CognitoMemberDefinition': {'UserPool': '', 'UserGroup': '', 'ClientId': ''}}], Tags=[],
WorkteamName='test-team'
)
def test_sagemaker_exception_in_workteam(self):
mock_client = MagicMock()
mock_exception = ClientError({"Error": {"Message": "SageMaker broke"}}, "workteam")
mock_client.create_workteam.side_effect = mock_exception
mock_args = self.parser.parse_args(required_args)
with self.assertRaises(Exception):
_utils.create_workteam(mock_client, vars(mock_args))
def test_get_workteam_output_from_job(self):
mock_client = MagicMock()
mock_client.create_workteam.return_value = {"WorkteamArn": "fake-arn"}
self.assertEqual(_utils.create_workteam(mock_client, vars(self.parser.parse_args(required_args))), 'fake-arn')
def test_pass_most_arguments(self):
arguments = required_args + ['--sns_topic', 'fake-topic', '--tags', '{"fake_key": "fake_value"}']
response = _utils.create_workteam_request(vars(self.parser.parse_args(arguments)))
self.assertEqual(response, {'WorkteamName': 'test-team',
'MemberDefinitions': [{'CognitoMemberDefinition': {'UserPool': '', 'UserGroup': '', 'ClientId': ''}}],
'Description': 'fake team',
'NotificationConfiguration' : {'NotificationTopicArn': 'fake-topic'},
'Tags': [{'Key': 'fake_key', 'Value': 'fake_value'}]
})
|
the-stack_0_25628
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowTranscodingsTemplateResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total': 'int',
'domain': 'str',
'templates': 'list[AppQualityInfo]'
}
attribute_map = {
'total': 'total',
'domain': 'domain',
'templates': 'templates'
}
def __init__(self, total=None, domain=None, templates=None):
"""ShowTranscodingsTemplateResponse - a model defined in huaweicloud sdk"""
super(ShowTranscodingsTemplateResponse, self).__init__()
self._total = None
self._domain = None
self._templates = None
self.discriminator = None
if total is not None:
self.total = total
if domain is not None:
self.domain = domain
if templates is not None:
self.templates = templates
@property
def total(self):
"""Gets the total of this ShowTranscodingsTemplateResponse.
查询结果的总元素数量
:return: The total of this ShowTranscodingsTemplateResponse.
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ShowTranscodingsTemplateResponse.
查询结果的总元素数量
:param total: The total of this ShowTranscodingsTemplateResponse.
:type: int
"""
self._total = total
@property
def domain(self):
"""Gets the domain of this ShowTranscodingsTemplateResponse.
播放域名
:return: The domain of this ShowTranscodingsTemplateResponse.
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this ShowTranscodingsTemplateResponse.
播放域名
:param domain: The domain of this ShowTranscodingsTemplateResponse.
:type: str
"""
self._domain = domain
@property
def templates(self):
"""Gets the templates of this ShowTranscodingsTemplateResponse.
转码模板
:return: The templates of this ShowTranscodingsTemplateResponse.
:rtype: list[AppQualityInfo]
"""
return self._templates
@templates.setter
def templates(self, templates):
"""Sets the templates of this ShowTranscodingsTemplateResponse.
转码模板
:param templates: The templates of this ShowTranscodingsTemplateResponse.
:type: list[AppQualityInfo]
"""
self._templates = templates
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowTranscodingsTemplateResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_25629
|
import torch
import random
import torch.autograd as autograd
import torch.nn as nn
import os
import numpy as np
import pandas as pd
from utils.conf import args, set_random_seed
from models.Set2Seq2Choice import Set2Seq2Choice
from preprocesses.DataIterator import ChooseDataset, FruitSeqDataset
from preprocesses.Voc import Voc
from analysis.training_sim_check import sim_check
from models.Losses import seq_cross_entropy_loss
from train_models.train_set2seq2choice import get_batches4sim_check, eval_model
from train_models.train_set2seq2seq_3phases import knowledge_generation_phase
def game_play_phase(
model, train_set, dev_set, sim_chk_inset, sim_chk_batchset,
m_optimiser, s_optimiser, l_optimiser,
clip=args.clip, generation_idx=0, alpha=0.1
):
max_dev_acc = 0.
print_loss = 0.
print_acc = 0.
training_losses = []
training_acc = []
training_in_spkh_sim = []
training_in_msg_sim = []
training_in_lish_sim = []
training_spk_lis_sim = []
eval_acc = []
num_play_iter = args.num_play_iter+1 # if not generation_idx == args.num_generation else args.num_play_iter*3+1
accumulated_acc = 0.
for iter in range(1, num_play_iter):
for data_batch in train_set:
acc, loss = train_epoch(
model,
data_batch,
m_optimiser,
s_optimiser,
l_optimiser
)
print_loss += loss
print_acc += acc
break_flag = False
if iter % args.print_freq == 0:
print_loss_avg = print_loss / (args.print_freq * len(train_set))
print_acc_avg = print_acc / (args.print_freq * len(train_set))
print("Generation: {}; Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg acc: {:.4f};".format(
generation_idx, iter, iter / args.num_play_iter * 100, print_loss_avg, print_acc_avg))
training_acc.append(print_acc_avg)
training_losses.append(print_loss_avg)
accumulated_acc = accumulated_acc * (1 - alpha) + print_acc_avg * alpha
if accumulated_acc > args.early_stop:
break_flag = True
print_acc = 0.
print_loss = 0.
if iter % args.eval_freq == 0 or break_flag:
dev_acc, dev_loss = eval_model(model, dev_set)
if dev_acc > max_dev_acc:
max_dev_acc = dev_acc
eval_acc.append(dev_acc)
print("Generation: {}; [EVAL]Iteration: {}; Loss: {:.4f}; Avg Acc: {:.4f}; Best Acc: {:.4f}".format(
generation_idx, iter, dev_loss, dev_acc, max_dev_acc))
if iter % args.sim_chk_freq == 0 or break_flag:
in_spk_sim, in_msg_sim, in_lis_sim, spk_lis_sim = sim_check(
model, sim_chk_inset, sim_chk_batchset
)
training_in_spkh_sim.append(in_spk_sim)
training_in_msg_sim.append(in_msg_sim)
training_in_lish_sim.append(in_lis_sim)
training_spk_lis_sim.append(spk_lis_sim)
print('Generation: {}; [SIM]Iteration: {}; In-SpkHidden Sim: {:.4f}; In-Msg Sim: {:.4f}; In-LisHidden Sim: {:.4f}'.format(
generation_idx, iter, in_spk_sim, in_msg_sim, in_lis_sim))
if break_flag:
break
return training_losses, training_acc, training_in_spkh_sim, training_in_msg_sim, training_in_lish_sim, training_spk_lis_sim, eval_acc
def listener_warming_up_phase(
model, train_set, dev_set, m_optimiser, s_optimiser, l_optimiser,
clip=args.clip, generation_idx=0
):
print_loss = 0.
print_acc = 0.
model.speaker.eval()
for iter in range(1, args.num_lwarmup_iter+1):
for data_batch in train_set:
acc, loss = train_epoch(
model,
data_batch,
m_optimiser,
s_optimiser,
l_optimiser
)
print_loss += loss
print_acc += acc
if iter % args.print_freq == 0:
print_loss_avg = print_loss / (args.print_freq * len(train_set))
print_acc_avg = print_acc / (args.print_freq * len(train_set))
print("Generation: {}; Warming Up Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg acc: {:.4f};".format(
generation_idx, iter, iter / args.num_lwarmup_iter * 100, print_loss_avg, print_acc_avg
))
print_loss = 0.
print_acc = 0.
model.speaker.train()
def _speaker_learn_(model, data_batch, target, tgt_mask):
input_var = data_batch['correct']['input']
input_mask = data_batch['correct']['input_mask']
message, msg_logits, _ = model.speaker(input_var, input_mask)
loss_max_len = min(message.shape[0], target.shape[0])
loss, _, _, _, tok_acc, seq_acc\
= seq_cross_entropy_loss(msg_logits, target, tgt_mask, loss_max_len)
return loss, tok_acc, seq_acc
def speaker_learning_phase(model, m_optimizer, s_optimizer, input_set, target_set, tgt_mask_set,
generation_idx=0, clip=args.clip):
assert len(input_set) == len(target_set)
assert len(target_set) == len(tgt_mask_set)
print_loss = 0.
print_seq_acc = 0.
print_tok_acc = 0.
for iter in range(1, args.num_spklearn_iter+1):
for idx, data_batch in enumerate(input_set):
m_optimizer.zero_grad()
s_optimizer.zero_grad()
loss, tok_acc, seq_acc = \
_speaker_learn_(model, data_batch, target_set[idx], tgt_mask_set[idx])
loss.mean().backward()
nn.utils.clip_grad_norm_(model.speaker.parameters(), clip)
m_optimizer.step()
s_optimizer.step()
print_loss += loss.mean()
print_seq_acc += seq_acc
print_tok_acc += tok_acc
if iter % args.print_freq == 0:
print_loss_avg = print_loss / (args.print_freq * len(input_set))
print_seq_acc_avg = print_seq_acc / (args.print_freq * len(input_set))
print_tok_acc_avg = print_tok_acc / (args.print_freq * len(input_set))
print("Generation: {}; Speaker Learning Phase; Iteration: {}; Percent complete: {:.1f}%; Avg loss: {:.4f}; Avg seq acc: {:.4f}; Avg tok acc: {:.4f}".format(
generation_idx, iter, iter / args.num_spklearn_iter * 100, print_loss_avg, print_seq_acc_avg, print_tok_acc_avg
))
print_loss = 0.
print_seq_acc = 0.
print_tok_acc = 0.
def train_generation(
model, train_set, dev_set, learn_set, sim_chk_inset, sim_chk_batchset,
clip=args.clip, generation_idx=0
):
m_optimiser = args.optimiser(model.parameters(), lr=args.learning_rate)
s_optimiser = args.optimiser(model.speaker.decoder.parameters(),
lr=args.learning_rate * args.speaker_ratio)
l_optimiser = args.optimiser(model.listener.parameters(),
lr=args.learning_rate * args.listener_ratio)
training_losses, training_acc, training_in_spkh_sim, training_in_msg_sim, \
training_in_lish_sim, training_spk_lis_sim, eval_acc = \
game_play_phase(model, train_set, dev_set, sim_chk_inset, sim_chk_batchset, m_optimiser, s_optimiser, l_optimiser, clip, generation_idx)
if not generation_idx == args.num_generation:
random.shuffle(learn_set.databatch_set)
reproduced_msg_set, reproduced_msg_masks = \
knowledge_generation_phase(model, learn_set)
print('Generation: {}; Message Reproduction Phase Done.'.format(generation_idx))
model.reset_speaker()
print('Generation: {}; Speaker Reset Done.'.format(generation_idx))
model.reset_listener()
print('Generation: {}; Listener Reset Done.'.format(generation_idx))
m_optimiser = args.optimiser(model.parameters(), lr=args.learning_rate)
s_optimiser = args.optimiser(model.speaker.decoder.parameters(),
lr=args.learning_rate * args.speaker_ratio)
l_optimiser = args.optimiser(model.listener.parameters(),
lr=args.learning_rate * args.listener_ratio)
speaker_learning_phase(model, m_optimiser, s_optimiser, \
learn_set, reproduced_msg_set, reproduced_msg_masks, generation_idx, clip)
print('Generation: {}; Speaker Learning Phase Done.'.format(generation_idx))
listener_warming_up_phase(model, train_set, dev_set, m_optimiser, s_optimiser, l_optimiser, clip, generation_idx)
print('Generation: {}; Listener Warming Up Phase Done.'.format(generation_idx))
del reproduced_msg_set
del reproduced_msg_masks
return training_losses, training_acc, training_in_spkh_sim, training_in_msg_sim, training_in_lish_sim, eval_acc
def train_epoch(model, data_batch, m_optimiser, s_optimiser, l_optimiser, clip=args.clip):
m_optimiser.zero_grad()
s_optimiser.zero_grad()
l_optimiser.zero_grad()
# model.speaker.eval()
loss, print_loss, acc, c_correct, log_msg_prob, log_choose_prob,\
baseline, spk_entropy = model(data_batch)
if args.msg_mode == 'REINFORCE':
(c_correct.detach() * log_msg_prob + 0.05 * spk_entropy).mean().backward()
(c_correct.detach() * log_choose_prob).mean().backward()
elif args.msg_mode == 'SCST':
((c_correct.detach()-baseline.detach()) * log_msg_prob).mean().backward()
((c_correct.detach()-baseline.detach()) * log_choose_prob).mean().backward()
elif args.msg_mode == 'GUMBEL':
loss.mean().backward()
else:
raise NotImplementedError
nn.utils.clip_grad_norm_(model.parameters(), clip)
m_optimiser.step()
s_optimiser.step()
l_optimiser.step()
return acc, print_loss
def train():
print('building vocabulary...')
voc = Voc()
print('done')
print('loading data and building batches...')
train_set = ChooseDataset(voc, dataset_file_path=args.train_file)
dev_set = ChooseDataset(voc, dataset_file_path=args.dev_file)
learn_set = ChooseDataset(voc, dataset_file_path=args.train_file, batch_size=1)
print('done')
if args.param_file is not None:
print('loading saved parameters from ' + args.param_file + '...')
checkpoint = torch.load(args.param_file, map_location=args.device)
train_args = checkpoint['args']
voc = checkpoint['voc']
print('done')
print('arguments for training:')
print(train_args)
print('rebuilding model...')
model = Set2Seq2Choice(voc.num_words).to(args.device)
model.load_state_dict(checkpoint['model'])
print('\tdone')
else:
print('building model...')
model = Set2Seq2Choice(voc.num_words).to(args.device)
print('done')
print('preparing data for testing topological similarity...')
sim_chk_inset, sim_chk_batchset = get_batches4sim_check(voc, args.data_file)
print('done')
print('initialising...')
start_iteration = 1
training_losses = []
training_acc = []
training_in_spkh_sim = []
training_in_msg_sim = []
training_in_lish_sim = []
training_spk_lis_sim = []
eval_acc = []
print('done')
in_spk_sim, in_msg_sim, in_lis_sim = sim_check(
model, sim_chk_inset, sim_chk_batchset
)
print('[SIM]Iteration: {}; In-SpkHidden Sim: {:.4f}; In-Msg Sim: {:.4f}; In-LisHidden Sim: {:.4f}'.format(
0, in_spk_sim, in_msg_sim, in_lis_sim))
print('training...')
for iter in range(start_iteration, args.num_generation+1):
training_records = train_generation(
model, train_set, dev_set, learn_set, sim_chk_inset, sim_chk_batchset,
generation_idx=iter
)
training_losses += training_records[0]
training_acc += training_records[1]
training_in_spkh_sim += training_records[2]
training_in_msg_sim+= training_records[3]
training_in_lish_sim += training_records[4]
training_spk_lis_sim += training_records[5]
eval_acc += training_records[6]
if iter % args.save_freq == 0:
path_join = 'set2seq2choice_' + str(args.num_words) + '_' + args.msg_mode
path_join += '_hard' if not args.soft else '_soft'
directory = os.path.join(args.save_dir, path_join)
if not os.path.exists(directory):
os.makedirs(directory)
torch.save({
'iteration': iter,
'model': model.state_dict(),
'voc': voc,
'args': args,
'records': {
'training_loss': training_losses,
'training_acc': training_acc,
'training_in_spkh_sim': training_in_spkh_sim,
'training_in_msg_sim': training_in_msg_sim,
'training_in_lish_sim': training_in_lish_sim,
'training_spkh_lish_sim': training_spk_lis_sim,
'eval_acc': eval_acc,
}
}, os.path.join(directory, '{}_{:.4f}_{}.tar'.format(iter, eval_acc[-1], 'checkpoint')))
if __name__ == '__main__':
set_random_seed(1234)
with autograd.detect_anomaly():
print('with detect_anomaly')
if args.test:
# test()
raise NotImplementedError
else:
train()
|
the-stack_0_25630
|
# Copyright (c) 2016, Manito Networks, LLC
# All rights reserved.
# Array of protocol numbers and names according to IANA http:{"Name": ""},//www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
protocol_type = {
0:{"Name":"HOPOPT"},
1:{"Name": "ICMP","Category":"ICMP"},
2:{"Name": "IGMP","Category":"Multicast"},
3:{"Name": "GGP"},
4:{"Name": "IP-in-IP","Category":"Tunnel"},
5:{"Name": "ST"},
6:{"Name": "TCP","Category":"Transport"},
7:{"Name": "CBT"},
8:{"Name": "EGP","Category":"Routing"},
9:{"Name": "IGP","Category":"Routing"},
10:{"Name": "BBN-RCC-MON"},
11:{"Name": "NVP-II"},
12:{"Name": "PUP"},
13:{"Name": "ARGUS"},
14:{"Name": "EMCON"},
15:{"Name": "XNET"},
16:{"Name": "CHAOS"},
17:{"Name": "UDP","Category":"Transport"},
18:{"Name": "MUX"},
19:{"Name": "DCN-MEAS"},
20:{"Name": "HMP"},
21:{"Name": "PRM"},
22:{"Name": "XNS-IDP"},
23:{"Name": "TRUNK-1"},
24:{"Name": "TRUNK-2"},
25:{"Name": "LEAF-1"},
26:{"Name": "LEAF-2"},
27:{"Name": "RDP"},
28:{"Name": "IRTP"},
29:{"Name": "ISO-TP4"},
30:{"Name": "NETBLT"},
31:{"Name": "MFE-NSP"},
32:{"Name": "MERIT-INP"},
33:{"Name": "DCCP"},
34:{"Name": "3PC"},
35:{"Name": "IDPR"},
36:{"Name": "XTP"},
37:{"Name": "DDP"},
38:{"Name": "IDPR-CMTP"},
39:{"Name": "TP++"},
40:{"Name": "IL"},
41:{"Name": "IPv6 6in4","Category":"Tunnel"},
42:{"Name": "SDRP","Category":"Routing"},
43:{"Name": "IPv6 Routing","Category":"IPv6"},
44:{"Name": "IPv6 Fragment","Category":"IPv6"},
45:{"Name": "IDRP"},
46:{"Name": "RSVP"},
47:{"Name": "GRE","Category":"Tunnel"},
48:{"Name": "DSR"},
49:{"Name": "BNA"},
50:{"Name": "ESP","Category":"Tunnel"},
51:{"Name": "AH","Category":"Tunnel"},
52:{"Name": "I-NLSP"},
53:{"Name": "SWIPE"},
54:{"Name": "NARP"},
55:{"Name": "IP Mobility"},
56:{"Name": "TLSP"},
57:{"Name": "SKIP"},
58:{"Name": "IPv6 ICMP","Category":"ICMP"},
59:{"Name": "IPv6 NoNxt","Category":"IPv6"},
60:{"Name": "IPv6 Options","Category":"IPv6"},
61:{"Name": "Host Internal Protocol"},
62:{"Name": "CFTP"},
63:{"Name": "Local Network"},
64:{"Name": "SAT-EXPAK"},
65:{"Name": "KRYPTOLAN"},
66:{"Name": "RVD"},
67:{"Name": "IPPC"},
68:{"Name": "Distributed File System"},
69:{"Name": "SAT-MON"},
70:{"Name": "VISA"},
71:{"Name": "IPCV"},
72:{"Name": "CPNX"},
73:{"Name": "CPHB"},
74:{"Name": "WSN"},
75:{"Name": "PVP"},
76:{"Name": "BR-SAT-MON"},
77:{"Name": "SUN-ND"},
78:{"Name": "WB-MON"},
79:{"Name": "WB-EXPAK"},
80:{"Name": "ISO-IP"},
81:{"Name": "VMTP"},
82:{"Name": "SECURE-VMTP"},
83:{"Name": "VINES"},
84:{"Name": "TTP / IPTM"},
85:{"Name": "NSFNET-IGP"},
86:{"Name": "DGP"},
87:{"Name": "TCF"},
88:{"Name": "EIGRP","Category":"Routing"},
89:{"Name": "OSPF","Category":"Routing"},
90:{"Name": "Sprite-RPC"},
91:{"Name": "LARP"},
92:{"Name": "MTP"},
93:{"Name": "AX.25"},
94:{"Name": "IPIP","Category":"Tunnel"},
95:{"Name": "MICP"},
96:{"Name": "SCC-SP"},
97:{"Name": "ETHERIP","Category":"Tunnel"},
98:{"Name": "ENCAP","Category":"Tunnel"},
99:{"Name": "Private Encryption Scheme"},
100:{"Name": "GMTP"},
101:{"Name": "IFMP"},
102:{"Name": "PNNI"},
103:{"Name": "PIM","Category":"Multicast"},
104:{"Name": "ARIS"},
105:{"Name": "SCPS"},
106:{"Name": "QNX"},
107:{"Name": "A/N"},
108:{"Name": "IPComp"},
109:{"Name": "SNP"},
110:{"Name": "Compaq-Peer"},
111:{"Name": "IPX-in-IP","Category":"Tunnel"},
112:{"Name": "VRRP","Category":"Failover"},
113:{"Name": "PGM"},
114:{"Name": "0-Hop Protocol"},
115:{"Name": "L2TP","Category":"Tunnel"},
116:{"Name": "DDX"},
117:{"Name": "IATP"},
118:{"Name": "STP"},
119:{"Name": "SRP"},
120:{"Name": "UTI"},
121:{"Name": "SMP"},
122:{"Name": "SM"},
123:{"Name": "PTP"},
124:{"Name": "ISIS","Category":"Routing"},
125:{"Name": "FIRE"},
126:{"Name": "CRTP"},
127:{"Name": "CRUDP"},
128:{"Name": "SSCOPMCE"},
129:{"Name": "IPLT"},
130:{"Name": "SPS"},
131:{"Name": "PIPE"},
132:{"Name": "SCTP"},
133:{"Name": "Fibre Channel"},
134:{"Name": "RSVP-E2E-IGNORE"},
135:{"Name": "Mobility Header"},
136:{"Name": "UDPLite"},
137:{"Name": "MPLS-in-IP"},
138:{"Name": "MANET"},
139:{"Name": "HIP"},
140:{"Name": "Shim6","Category":"IPv6"},
141:{"Name": "WESP"},
142:{"Name": "ROHC"},
253:{"Name": "Experimentation and Testing"},
254:{"Name": "Experimentation and Testing"},
255:{"Name": "Reserved"}
}
|
the-stack_0_25631
|
import re
class MessageHandler:
__track_template = u'<b>{username}</b> is listening to <a href="{track_url}">{track}</a> of artist <i>{artist}</i>'
def __init__(self, logger, lastfm_client, telegram_botapi_client):
self.__logger = logger
self.__lastfm_client = lastfm_client
self.__telegram_botapi_client = telegram_botapi_client
def handle_message(self, message):
self.__logger.info('Handling message ' + str(message.message_id))
if message.text == None:
self.__logger.info('No text in message ' + str(message.message_id))
return
m = re.search(r'/recent_tracks (\S+)', message.text) or re.search(r'^What is (\S+) listening to?', message.text)
if m != None:
username = m.group(1)
tracks = self.__lastfm_client.get_recent_tracks(username)
if len(tracks) > 0:
first_track = tracks[0]
self.__logger.info('Found track ' + first_track.name + ' for user ' + username)
chat_id = message.chat.chat_id
formatted_msg = self.__track_template.format(username=username, \
track_url=first_track.url, \
track=first_track.name, \
artist=first_track.artist)
self.__telegram_botapi_client.send_message(chat_id, formatted_msg, 'HTML')
else:
self.__logger.info('Message did not match any known command')
|
the-stack_0_25634
|
import os
import numpy as np
import cv2
import csv
import utils
def argsProcessor():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outputDir", help="output Directory of Data")
parser.add_argument("-i", "--inputDir", help="input Directory of data")
return parser.parse_args()
args = argsProcessor()
outputDir = args.outputDir
inputDir = args.inputDir
if (not os.path.isdir(outputDir)):
os.mkdir(outputDir)
import csv
with open(outputDir+ "/gt.csv", 'a') as csvfile:
spamwriter_1 = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for image in os.listdir(inputDir):
if image.endswith("jpg"):
if os.path.isfile(inputDir+ "/"+image+ ".csv"):
with open(inputDir+ "/"+image+ ".csv", 'r') as csvfile:
spamwriter = csv.reader(csvfile, delimiter=' ',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
img = cv2.imread(inputDir + "/" + image)
print (image)
gt= []
for row in spamwriter:
gt.append(row)
# img = cv2.circle(img, (int(float(row[0])), int(float(row[1]))), 2,(255,0,0),90)
gt =np.array(gt).astype(np.float32)
# print gt
gt = gt / (img.shape[0], img.shape[1])
gt = gt * (1080, int((1080.0 / img.shape[0] * img.shape[1])))
img = cv2.resize(img, ( int((1080.0/img.shape[0]*img.shape[1])),1080))
# for a in range(0,4):
# img = cv2.circle(img, tuple((gt[a].astype(int))), 2, (255, 0, 0), 9)
# cv2.imwrite("asda.jpg", img)
# 0/0
for angle in range(0,271,90):
img_rotate, gt_rotate = utils.rotate(img, gt,angle)
for random_crop in range(0,1):
img_list, gt_list = utils.getCorners(img_rotate, gt_rotate)
for a in range(0,4):
cv2.circle(img_list[a], tuple(gt_list[a]), 2,(255,0,0),2)
cv2.imwrite(outputDir + "/" + image + str(angle) + str(random_crop) + str(a) + ".jpg", img_list[a])
spamwriter_1.writerow(( image + str(angle) +str(random_crop) + str(a) +".jpg", tuple(gt_list[a])))
|
the-stack_0_25636
|
r"""
Solve SAT problems Integer Linear Programming
The class defined here is a :class:`~sage.sat.solvers.satsolver.SatSolver` that
solves its instance using :class:`MixedIntegerLinearProgram`. Its performance
can be expected to be slower than when using
:class:`~sage.sat.solvers.cryptominisat.cryptominisat.CryptoMiniSat`.
"""
from satsolver import SatSolver
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
class SatLP(SatSolver):
def __init__(self, solver=None):
r"""
Initializes the instance
INPUT:
- ``solver`` -- (default: ``None``) Specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method
:meth:`solve <sage.numerical.mip.MixedIntegerLinearProgram.solve>`
of the class
:class:`MixedIntegerLinearProgram <sage.numerical.mip.MixedIntegerLinearProgram>`.
EXAMPLE::
sage: S=SAT(solver="LP"); S
an ILP-based SAT Solver
"""
SatSolver.__init__(self)
self._LP = MixedIntegerLinearProgram()
self._vars = self._LP.new_variable(binary=True)
def var(self):
"""
Return a *new* variable.
EXAMPLE::
sage: S=SAT(solver="LP"); S
an ILP-based SAT Solver
sage: S.var()
1
"""
nvars = n = self._LP.number_of_variables()
while nvars==self._LP.number_of_variables():
n += 1
self._vars[n] # creates the variable if needed
return n
def nvars(self):
"""
Return the number of variables.
EXAMPLE::
sage: S=SAT(solver="LP"); S
an ILP-based SAT Solver
sage: S.var()
1
sage: S.var()
2
sage: S.nvars()
2
"""
return self._LP.number_of_variables()
def add_clause(self, lits):
"""
Add a new clause to set of clauses.
INPUT:
- ``lits`` - a tuple of integers != 0
.. note::
If any element ``e`` in ``lits`` has ``abs(e)`` greater
than the number of variables generated so far, then new
variables are created automatically.
EXAMPLE::
sage: S=SAT(solver="LP"); S
an ILP-based SAT Solver
sage: for u,v in graphs.CycleGraph(6).edges(labels=False):
....: u,v = u+1,v+1
....: S.add_clause((u,v))
....: S.add_clause((-u,-v))
"""
if 0 in lits:
raise ValueError("0 should not appear in the clause: {}".format(lits))
p = self._LP
p.add_constraint(p.sum(self._vars[x] if x>0 else 1-self._vars[-x] for x in lits)
>=1)
def __call__(self):
"""
Solve this instance.
OUTPUT:
- If this instance is SAT: A tuple of length ``nvars()+1``
where the ``i``-th entry holds an assignment for the
``i``-th variables (the ``0``-th entry is always ``None``).
- If this instance is UNSAT: ``False``
EXAMPLE::
sage: def is_bipartite_SAT(G):
....: S=SAT(solver="LP"); S
....: for u,v in G.edges(labels=False):
....: u,v = u+1,v+1
....: S.add_clause((u,v))
....: S.add_clause((-u,-v))
....: return S
sage: S = is_bipartite_SAT(graphs.CycleGraph(6))
sage: S() # random
[None, True, False, True, False, True, False]
sage: True in S()
True
sage: S = is_bipartite_SAT(graphs.CycleGraph(7))
sage: S()
False
"""
try:
self._LP.solve()
except MIPSolverException:
return False
b = self._LP.get_values(self._vars)
n = max(b)
return [None]+[bool(b.get(i,0)) for i in range(1,n+1)]
def __repr__(self):
"""
TESTS::
sage: S=SAT(solver="LP"); S
an ILP-based SAT Solver
"""
return "an ILP-based SAT Solver"
|
the-stack_0_25637
|
import pickle
from pathlib import Path
from definitions import OUTPUT_DIR
from bs4 import BeautifulSoup
import os
class UserManual:
def __init__(self):
self.base_path = Path(OUTPUT_DIR) / "user_manual"
def extract_md(self, md_file):
with open(md_file, "r", encoding='utf-8') as file_obj:
content = file_obj.read()
content = (content.replace("#", ""))
return content
def get_files(self):
file_names = [os.path.join(path, file_name)
for path, _, file_list in os.walk(str(self.base_path))
for file_name in file_list if file_name.endswith('.md')]
return file_names
def extract_all(self):
file_names = self.get_files()
res = []
for file_path in file_names:
res.append(self.extract_md(file_path))
self.save(res)
return res
def save_2_pickle(self, path, res):
"""
将结果序列化保存到文件
:return:
"""
with open(path, "wb") as file_handler:
pickle.dump(res, file_handler, protocol=pickle.HIGHEST_PROTOCOL)
def save(self, res):
self.save_2_pickle(str(Path(OUTPUT_DIR) / "user_manual.pickle"), res)
if __name__ == '__main__':
user_manual_service = UserManual()
user_manual_service.extract_all()
|
the-stack_0_25638
|
from easydict import EasyDict as edict
# init
__C_CARPK = edict()
cfg_data = __C_CARPK
__C_CARPK.STD_SIZE = (544,960)
__C_CARPK.TRAIN_SIZE = (408,720)
__C_CARPK.DATA_PATH = '/home/zhangli/yhs/ProcessedData/ProcessedData/CARPK'
__C_CARPK.MEAN_STD = ([0.452016860247, 0.447249650955, 0.431981861591],[0.23242045939, 0.224925786257, 0.221840232611])
__C_CARPK.LABEL_FACTOR = 1
__C_CARPK.LOG_PARA = 1000.
__C_CARPK.RESUME_MODEL = ''#model path
__C_CARPK.TRAIN_BATCH_SIZE = 4 #imgs
__C_CARPK.VAL_BATCH_SIZE = 4 #
__C_CARPK.TEST_BATCH_SIZE = 4 #
|
the-stack_0_25639
|
"""Define our own locale negotiator that looks at the browser
preferences, as well as the ``_`` translation function.
"""
from pyramid.i18n import TranslationStringFactory
from webob.acceptparse import NilAccept
_ = TranslationStringFactory('brouz')
def locale_negotiator(request):
"""Return a locale name by looking at the ``Accept-Language`` HTTP
header.
"""
settings = request.registry.settings
available_languages = settings['pyramid.available_languages'].split()
header = request.accept_language
if isinstance(header, NilAccept):
# If the header is absent or empty, we get a 'NilAccept'
# object, whose 'best_match()' method returns the first item
# in 'available_languages'. This may or may not be our default
# locale name, so here we will work around this.
return None
return header.best_match(available_languages)
|
the-stack_0_25641
|
# Copyright 2021 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class V1beta1CloudEventDelivery(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'status': 'V1beta1CloudEventDeliveryState',
'target': 'str'
}
attribute_map = {
'status': 'status',
'target': 'target'
}
def __init__(self, status=None, target=None, local_vars_configuration=None): # noqa: E501
"""V1beta1CloudEventDelivery - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._status = None
self._target = None
self.discriminator = None
if status is not None:
self.status = status
if target is not None:
self.target = target
@property
def status(self):
"""Gets the status of this V1beta1CloudEventDelivery. # noqa: E501
:return: The status of this V1beta1CloudEventDelivery. # noqa: E501
:rtype: V1beta1CloudEventDeliveryState
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1beta1CloudEventDelivery.
:param status: The status of this V1beta1CloudEventDelivery. # noqa: E501
:type: V1beta1CloudEventDeliveryState
"""
self._status = status
@property
def target(self):
"""Gets the target of this V1beta1CloudEventDelivery. # noqa: E501
Target points to an addressable # noqa: E501
:return: The target of this V1beta1CloudEventDelivery. # noqa: E501
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this V1beta1CloudEventDelivery.
Target points to an addressable # noqa: E501
:param target: The target of this V1beta1CloudEventDelivery. # noqa: E501
:type: str
"""
self._target = target
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1CloudEventDelivery):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1CloudEventDelivery):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_25643
|
#!/usr/bin/python
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import csv
import os
import argparse
"""
usage:
Processes all .jpg, .png, .bmp and .gif files found in the specified directory and its subdirectories.
--PATH ( Path to directory of images or path to directory with subdirectory of images). e.g Path/To/Directory/
--Model_PATH path to the tensorflow model
"""
parser = argparse.ArgumentParser(description='Crystal Detection Program')
parser.add_argument('--PATH', type=str,
help='path to image directory. Recursively finds all image files in directory and sub directories') # path to image directory or containing sub directories.
parser.add_argument('--MODEL_PATH', type=str, default='./savedmodel', help='the file path to the tensorflow model ')
args = vars(parser.parse_args())
PATH = args['PATH']
model_path = args['MODEL_PATH']
crystal_images = [os.path.join(dp, f) for dp, dn, filenames in os.walk(PATH) for f in filenames if
os.path.splitext(f)[1] in ['.jpg', 'png', 'bmp', 'gif']]
size = len(crystal_images)
def load_images(file_list):
for i in file_list:
file = open(i)
yield {"image_bytes": [file.read()]}, i
iterator = load_images(crystal_images)
with open(PATH + 'results.csv', 'w') as csvfile:
Writer = csv.writer(csvfile, delimiter=' ', quotechar=' ', quoting=csv.QUOTE_MINIMAL)
predicter = tf.contrib.predictor.from_saved_model(model_path)
dic = {}
k = 0
for _ in range(size):
data, name = next(iterator)
results = predicter(data)
vals = results['scores'][0]
vals = vals * 100
print('Image path: ' + name, 'Crystal: ' + str(vals[0]), 'Other: ' + str(vals[1]),
'Precipitate: ' + str(vals[2]), 'Clear ' + str(vals[3]))
Writer.writerow(['Image path: ' + name, 'Crystal: ' + str(vals[0]), 'Other: ' + str(vals[1]),
'Precipitate: ' + str(vals[2]), 'Clear: ' + str(vals[3])])
|
the-stack_0_25644
|
import re
from ztag.annotation import Annotation
from ztag.annotation import OperatingSystem
from ztag.annotation import Type
from ztag.annotation import Manufacturer
from ztag import protocols
import ztag.test
class FtpSony(Annotation):
protocol = protocols.FTP
subprotocol = protocols.FTP.BANNER
port = None
manufact_1_re = re.compile(
"^220 Welcome to SONY Network Camera",
re.IGNORECASE
)
manufact_2_re = re.compile("^220-Sony Network Camera", re.IGNORECASE)
product_2_re = re.compile("Network Camera ([-A-Z0-9_]+)", re.IGNORECASE)
tests = {
"FtpSony_1": {
"global_metadata": {
"device_type": Type.CAMERA,
"manufacturer": Manufacturer.SONY,
"product": "SNC-EP520"
}
},
"FtpSony_2": {
"global_metadata": {
"device_type": Type.CAMERA,
"manufacturer": Manufacturer.SONY
}
},
}
def process(self, obj, meta):
banner = obj["banner"]
if self.manufact_1_re.search(banner):
meta.global_metadata.device_type = Type.CAMERA
meta.global_metadata.manufacturer = Manufacturer.SONY
return meta
if self.manufact_2_re.search(banner):
meta.global_metadata.device_type = Type.CAMERA
meta.global_metadata.manufacturer = Manufacturer.SONY
product = self.product_2_re.search(banner).group(1)
meta.global_metadata.product = product
return meta
""" Tests
"220-Sony Network Camera SNC-EP520\r\n220 \r\n"
"220 Welcome to SONY Network Camera\r\n"
"220-Sony Network Camera SNC-CH140\r\n220 \r\n"
"220 Welcome to SONY Network Camera\r\n"
"220 Welcome to SONY Network Camera\r\n"
"220-Sony Network Camera SNC-ER580\r\n220 \r\n"
"220-Sony Network Camera SNC-DH160\r\n220 \r\n"
"""
|
the-stack_0_25645
|
from unittest.mock import patch
from tests.test_rrpproxy_base import TestRRPProxyBase
class TestRRPProxyCheckContact(TestRRPProxyBase):
@patch('rrpproxy.RRPProxy.call')
def test_calls_call_correctly(self, call_mock):
response = self.proxy.check_contact('CONTACT-A')
call_mock.assert_called_once_with('CheckContact', contact='CONTACT-A')
self.assertEqual(response, call_mock.return_value)
|
the-stack_0_25647
|
import sys, random
assert sys.version_info >= (3,7), "This script requires at least Python 3.7"
import random
game_start = input("Hello! Do you want to play a guessing game? [YES/NO]: ")
if (game_start == "Yes" or game_start == "yes" or game_start == "YES"):
game_type = input("Let's play a guessing game! What kind do you want? [NUMBERS/GAME COMPANIES]: ")
if (game_type == "Numbers" or game_type == "numbers" or game_type == "NUMBERS"):
number_choice = random.randrange(1, 100)
while True:
print("I'm thinking of a number between 1 and 100...Guess what number it is!")
number_guess = input("Guess: ")
if(int(number_guess) == number_choice):
print("You got it!")
break
if(int(number_guess) > number_choice):
print("Too high.")
if(int(number_guess) < number_choice):
print("Too low.")
else:
("I don't understand your answer. Please try again.")
if (game_type == "Game Companies" or game_type == "game companies" or game_type == "Game companies" or game_type == "GAME COMPANIES"):
gamecompanyList = ["Nintendo", "Sega", "Ubisoft", "Naughty Dog", "Telltale", "Square Enix", "Konami", "Electronic Arts", "Microsoft Studios", "Sony Computer Entertainment"]
gamecompanyChoice = random.choice(gamecompanyList)
while True:
print("Out of this list, I'm thinking of a random game company...Guess what it is!")
gamecompany_guess = input(str(gamecompanyList) + "Guess: ")
if (gamecompany_guess != gamecompanyChoice):
print("Try again!")
else:
print("You got it! Congrats!")
break
else:
print("I don't understand your answer. Please try again.")
elif (game_start == "No" or game_start == "no" or game_start == "NO"):
print("You're no fun.")
else:
print("I don't understand your answer. Please try again.")
|
the-stack_0_25653
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import os.path
import copy
import spack.util.environment
class Cp2k(MakefilePackage, CudaPackage):
"""CP2K is a quantum chemistry and solid state physics software package
that can perform atomistic simulations of solid state, liquid, molecular,
periodic, material, crystal, and biological systems
"""
homepage = 'https://www.cp2k.org'
url = 'https://github.com/cp2k/cp2k/releases/download/v3.0.0/cp2k-3.0.tar.bz2'
git = 'https://github.com/cp2k/cp2k.git'
list_url = 'https://github.com/cp2k/cp2k/releases'
maintainers = ['dev-zero']
version('7.1', sha256='ccd711a09a426145440e666310dd01cc5772ab103493c4ae6a3470898cd0addb')
version('6.1', sha256='af803558e0a6b9e9d9ce8a3ab955ba32bacd179922455424e061c82c9fefa34b')
version('5.1', sha256='e23613b593354fa82e0b8410e17d94c607a0b8c6d9b5d843528403ab09904412')
version('4.1', sha256='4a3e4a101d8a35ebd80a9e9ecb02697fb8256364f1eccdbe4e5a85d31fe21343')
version('3.0', sha256='1acfacef643141045b7cbade7006f9b7538476d861eeecd9658c9e468dc61151')
version('master', branch='master', submodules="True")
variant('mpi', default=True, description='Enable MPI support')
variant('openmp', default=True, description='Enable OpenMP support')
variant('smm', default='libxsmm', values=('libxsmm', 'libsmm', 'blas'),
description='Library for small matrix multiplications')
variant('plumed', default=False, description='Enable PLUMED support')
variant('libint', default=True,
description='Use libint, required for HFX (and possibly others)')
variant('libxc', default=True,
description='Support additional functionals via libxc')
variant('pexsi', default=False,
description=('Enable the alternative PEXSI method'
'for density matrix evaluation'))
variant('elpa', default=False,
description='Enable optimised diagonalisation routines from ELPA')
variant('sirius', default=False,
description=('Enable planewave electronic structure'
' calculations via SIRIUS'))
variant('cosma', default=False, description='Use COSMA for p?gemm')
# override cuda_arch from CudaPackage since we only support one arch
# at a time and only specific ones for which we have parameter files
# for optimal kernels
variant('cuda_arch',
description='CUDA architecture',
default='none',
values=('none', '35', '37', '60', '70'),
multi=False)
variant('cuda_arch_35_k20x', default=False,
description=('CP2K (resp. DBCSR) has specific parameter sets for'
' different GPU models. Enable this when building'
' with cuda_arch=35 for a K20x instead of a K40'))
variant('cuda_fft', default=False,
description=('Use CUDA also for FFTs in the PW part of CP2K'))
variant('cuda_blas', default=False,
description=('Use CUBLAS for general matrix operations in DBCSR'))
HFX_LMAX_RANGE = range(4, 8)
variant('lmax',
description='Maximum supported angular momentum (HFX and others)',
default='5',
values=tuple(map(str, HFX_LMAX_RANGE)),
multi=False)
depends_on('python', type='build')
depends_on('blas')
depends_on('lapack')
depends_on('fftw-api@3')
# require libxsmm-1.11+ since 1.10 can leak file descriptors in Fortran
depends_on('[email protected]:~header-only', when='smm=libxsmm')
# use pkg-config (support added in libxsmm-1.10) to link to libxsmm
depends_on('pkgconfig', type='build', when='smm=libxsmm')
# ... and in CP2K 7.0+ for linking to libint2
depends_on('pkgconfig', type='build', when='[email protected]:')
depends_on('pkgconfig', type='build', when='[email protected]:')
# libint & libxc are always statically linked
depends_on('[email protected]:1.2', when='[email protected]:6.9', type='build')
for lmax in HFX_LMAX_RANGE:
# libint2 can be linked dynamically again
depends_on('[email protected]:+fortran tune=cp2k-lmax-{0}'.format(lmax),
when='[email protected]: lmax={0}'.format(lmax))
depends_on('[email protected]:3.99.0', when='+libxc@:5.5999', type='build')
depends_on('[email protected]:4.99.0', when='[email protected]:6.9', type='build')
depends_on('[email protected]:4.99.0', when='[email protected]:')
depends_on('mpi@2:', when='+mpi')
depends_on('scalapack', when='+mpi')
depends_on('cosma+scalapack', when='+cosma')
depends_on('cosma+cuda+scalapack', when='+cosma+cuda')
depends_on('[email protected]:2016.13+openmp', when='+openmp+elpa@:5.999')
depends_on('[email protected]:2017.11+openmp', when='[email protected]:6.999')
depends_on('[email protected]:+openmp', when='[email protected]:')
depends_on('[email protected]:2016.13~openmp', when='~openmp+elpa@:5.999')
depends_on('[email protected]:2017.11~openmp', when='[email protected]:6.999')
depends_on('[email protected]:~openmp', when='[email protected]:')
depends_on('plumed+shared+mpi', when='+plumed+mpi')
depends_on('plumed+shared~mpi', when='+plumed~mpi')
# while we link statically against PEXSI, its own deps may be linked in
# dynamically, therefore can't set this as pure build-type dependency.
depends_on('[email protected]:0.9.999', when='+pexsi@:4.999')
depends_on('[email protected]:', when='[email protected]:')
# only OpenMP should be consistenly used, all other common things
# like ELPA, SCALAPACK are independent and Spack will ensure that
# a consistent/compat. combination is pulled in to the dependency graph.
depends_on('sirius@:6.999+fortran+vdwxc+shared+openmp', when='@:7.999+sirius+openmp')
depends_on('sirius@:6.999+fortran+vdwxc+shared~openmp', when='@:7.999+sirius~openmp')
depends_on('sirius@7:+fortran+vdwxc+shared+openmp', when='@8:+sirius+openmp')
# the bundled libcusmm uses numpy in the parameter prediction (v7+)
# which is written using Python 3
depends_on('py-numpy', when='@7:+cuda', type='build')
depends_on('[email protected]:', when='@7:+cuda', type='build')
# PEXSI, ELPA, COSMA and SIRIUS depend on MPI
conflicts('~mpi', '+pexsi')
conflicts('~mpi', '+elpa')
conflicts('~mpi', '+sirius')
conflicts('~mpi', '+cosma')
conflicts('+sirius', '@:6.999') # sirius support was introduced in 7+
conflicts('+cosma', '@:7.999') # COSMA support was introduced in 8+
conflicts('~cuda', '+cuda_fft')
conflicts('~cuda', '+cuda_blas')
# Apparently [email protected] needs an "experimental" version of libwannier.a
# which is only available contacting the developer directly. See INSTALL
# in the stage of [email protected]
depends_on('wannier90', when='@3.0+mpi', type='build')
# CP2K needs compiler specific compilation flags, e.g. optflags
conflicts('%apple-clang')
conflicts('%clang')
conflicts('%nag')
# please set variants: smm=blas by configuring packages.yaml or install
# cp2k with option smm=blas on aarch64
conflicts('smm=libxsmm', when='target=aarch64:', msg='libxsmm is not available on arm')
conflicts('^fftw~openmp', when='+openmp')
conflicts('^openblas threads=none', when='+openmp')
conflicts('^openblas threads=pthreads', when='+openmp')
conflicts('~openmp', when='@8:', msg='Building without OpenMP is not supported in CP2K 8+')
@property
def makefile_architecture(self):
return '{0.architecture}-{0.compiler.name}'.format(self.spec)
@property
def makefile_version(self):
return '{prefix}{suffix}'.format(
prefix='p' if '+mpi' in self.spec else 's',
suffix='smp' if '+openmp' in self.spec else 'opt'
)
@property
def makefile(self):
makefile_basename = '.'.join([
self.makefile_architecture, self.makefile_version
])
return os.path.join('arch', makefile_basename)
@property
def archive_files(self):
return [os.path.join(self.stage.source_path, self.makefile)]
def edit(self, spec, prefix):
pkgconf = which('pkg-config')
if '^fftw' in spec:
fftw = spec['fftw:openmp' if '+openmp' in spec else 'fftw']
fftw_header_dir = fftw.headers.directories[0]
elif '^intel-mkl' in spec:
fftw = spec['intel-mkl']
fftw_header_dir = fftw.headers.directories[0] + '/fftw'
elif '^intel-parallel-studio+mkl' in spec:
fftw = spec['intel-parallel-studio']
fftw_header_dir = '<NOTFOUND>'
for incdir in [join_path(f, 'fftw')
for f in fftw.headers.directories]:
if os.path.exists(incdir):
fftw_header_dir = incdir
break
optimization_flags = {
'gcc': [
'-O2',
'-funroll-loops',
'-ftree-vectorize',
],
'intel': ['-O2', '-pc64', '-unroll', ],
'pgi': ['-fast'],
'nvhpc': ['-fast'],
'cray': ['-O2'],
'xl': ['-O3'],
}
dflags = ['-DNDEBUG']
cppflags = [
'-D__FFTW3',
'-I{0}'.format(fftw_header_dir),
]
if '^mpi@3:' in spec:
cppflags.append('-D__MPI_VERSION=3')
elif '^mpi@2:' in spec:
cppflags.append('-D__MPI_VERSION=2')
cflags = optimization_flags[self.spec.compiler.name][:]
cxxflags = optimization_flags[self.spec.compiler.name][:]
fcflags = optimization_flags[self.spec.compiler.name][:]
nvflags = ['-O3']
ldflags = []
libs = []
gpuver = ''
if '%intel' in spec:
cflags.append('-fp-model precise')
cxxflags.append('-fp-model precise')
fcflags += [
'-fp-model precise',
'-heap-arrays 64',
'-g',
'-traceback',
]
elif '%gcc' in spec:
fcflags += [
'-ffree-form',
'-ffree-line-length-none',
'-ggdb', # make sure we get proper Fortran backtraces
]
elif '%pgi' in spec or '%nvhpc' in spec:
fcflags += ['-Mfreeform', '-Mextend']
elif '%cray' in spec:
fcflags += ['-emf', '-ffree', '-hflex_mp=strict']
elif '%xl' in spec:
fcflags += ['-qpreprocess', '-qstrict', '-q64']
ldflags += ['-Wl,--allow-multiple-definition']
if '+openmp' in spec:
cflags.append(self.compiler.openmp_flag)
cxxflags.append(self.compiler.openmp_flag)
fcflags.append(self.compiler.openmp_flag)
ldflags.append(self.compiler.openmp_flag)
nvflags.append('-Xcompiler="{0}"'.format(
self.compiler.openmp_flag))
elif '%cray' in spec: # Cray enables OpenMP by default
cflags += ['-hnoomp']
cxxflags += ['-hnoomp']
fcflags += ['-hnoomp']
ldflags += ['-hnoomp']
if '@7:' in spec: # recent versions of CP2K use C++14 CUDA code
cxxflags.append(self.compiler.cxx14_flag)
nvflags.append(self.compiler.cxx14_flag)
ldflags.append(fftw.libs.search_flags)
if '[email protected]' in spec:
ldflags.insert(0, '-Wl,--allow-multiple-definition')
if '+plumed' in self.spec:
dflags.extend(['-D__PLUMED2'])
cppflags.extend(['-D__PLUMED2'])
libs.extend([
os.path.join(self.spec['plumed'].prefix.lib,
'libplumed.{0}'.format(dso_suffix))
])
cc = spack_cc if '~mpi' in spec else spec['mpi'].mpicc
cxx = spack_cxx if '~mpi' in spec else spec['mpi'].mpicxx
fc = spack_fc if '~mpi' in spec else spec['mpi'].mpifc
# Intel
if '%intel' in spec:
cppflags.extend([
'-D__INTEL',
'-D__HAS_ISO_C_BINDING',
'-D__USE_CP2K_TRACE',
])
fcflags.extend([
'-diag-disable 8290,8291,10010,10212,11060',
'-free',
'-fpp'
])
# FFTW, LAPACK, BLAS
lapack = spec['lapack'].libs
blas = spec['blas'].libs
ldflags.append((lapack + blas).search_flags)
libs.extend([str(x) for x in (fftw.libs, lapack, blas)])
if '^intel-mkl' in spec or '^intel-parallel-studio+mkl' in spec:
cppflags += ['-D__MKL']
elif '^accelerate' in spec:
cppflags += ['-D__ACCELERATE']
if '+cosma' in spec:
# add before ScaLAPACK to override the p?gemm symbols
cosma = spec['cosma'].libs
ldflags.append(cosma.search_flags)
libs.extend(cosma)
# MPI
if '+mpi' in spec:
cppflags.extend([
'-D__parallel',
'-D__SCALAPACK'
])
scalapack = spec['scalapack'].libs
ldflags.append(scalapack.search_flags)
libs.extend(scalapack)
libs.extend(spec['mpi:cxx'].libs)
libs.extend(self.compiler.stdcxx_libs)
if 'wannier90' in spec:
cppflags.append('-D__WANNIER90')
wannier = os.path.join(
spec['wannier90'].libs.directories[0], 'libwannier.a'
)
libs.append(wannier)
if '+libint' in spec:
cppflags += ['-D__LIBINT']
if '@:6.9' in spec:
cppflags += [
'-D__LIBINT_MAX_AM=6',
'-D__LIBDERIV_MAX_AM1=5',
]
# libint-1.x.y has to be linked statically to work around
# inconsistencies in its Fortran interface definition
# (short-int vs int) which otherwise causes segfaults at
# runtime due to wrong offsets into the shared library
# symbols.
libs.extend([
os.path.join(
spec['libint'].libs.directories[0], 'libderiv.a'),
os.path.join(
spec['libint'].libs.directories[0], 'libint.a'),
])
else:
fcflags += pkgconf('--cflags', 'libint2', output=str).split()
libs += pkgconf('--libs', 'libint2', output=str).split()
if '+libxc' in spec:
cppflags += ['-D__LIBXC']
if '@:6.9' in spec:
libxc = spec['libxc:fortran,static']
cppflags += [libxc.headers.cpp_flags]
ldflags.append(libxc.libs.search_flags)
libs.append(str(libxc.libs))
else:
fcflags += pkgconf('--cflags', 'libxcf03', output=str).split()
libs += pkgconf('--libs', 'libxcf03', output=str).split()
if '+pexsi' in spec:
cppflags.append('-D__LIBPEXSI')
fcflags.append('-I' + os.path.join(
spec['pexsi'].prefix, 'fortran'))
libs.extend([
os.path.join(spec['pexsi'].libs.directories[0],
'libpexsi.a'),
os.path.join(spec['superlu-dist'].libs.directories[0],
'libsuperlu_dist.a'),
os.path.join(
spec['parmetis'].libs.directories[0],
'libparmetis.{0}'.format(dso_suffix)
),
os.path.join(
spec['metis'].libs.directories[0],
'libmetis.{0}'.format(dso_suffix)
),
])
if '+elpa' in spec:
elpa = spec['elpa']
elpa_suffix = '_openmp' if '+openmp' in elpa else ''
elpa_incdir = elpa.headers.directories[0]
fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'modules'))]
libs.append(os.path.join(elpa.libs.directories[0],
('libelpa{elpa_suffix}.{dso_suffix}'
.format(elpa_suffix=elpa_suffix,
dso_suffix=dso_suffix))))
if spec.satisfies('@:4.999'):
if elpa.satisfies('@:2014.5.999'):
cppflags.append('-D__ELPA')
elif elpa.satisfies('@2014.6:2015.10.999'):
cppflags.append('-D__ELPA2')
else:
cppflags.append('-D__ELPA3')
else:
cppflags.append('-D__ELPA={0}{1:02d}'
.format(elpa.version[0],
int(elpa.version[1])))
fcflags += ['-I{0}'.format(os.path.join(elpa_incdir, 'elpa'))]
if spec.satisfies('+sirius'):
sirius = spec['sirius']
cppflags.append('-D__SIRIUS')
fcflags += ['-I{0}'.format(sirius.prefix.include.sirius)]
libs += list(sirius.libs)
if spec.satisfies('+cuda'):
cppflags += ['-D__ACC']
libs += ['-lcudart', '-lnvrtc', '-lcuda']
if spec.satisfies('+cuda_blas'):
cppflags += ['-D__DBCSR_ACC=2']
libs += ['-lcublas']
else:
cppflags += ['-D__DBCSR_ACC']
if spec.satisfies('+cuda_fft'):
cppflags += ['-D__PW_CUDA']
libs += ['-lcufft', '-lcublas']
cuda_arch = spec.variants['cuda_arch'].value
if cuda_arch:
gpuver = {
'35': 'K40',
'37': 'K80',
'60': 'P100',
'70': 'V100',
}[cuda_arch]
if (cuda_arch == '35'
and spec.satisfies('+cuda_arch_35_k20x')):
gpuver = 'K20X'
if 'smm=libsmm' in spec:
lib_dir = os.path.join(
'lib', self.makefile_architecture, self.makefile_version
)
mkdirp(lib_dir)
try:
copy(env['LIBSMM_PATH'], os.path.join(lib_dir, 'libsmm.a'))
except KeyError:
raise KeyError('Point environment variable LIBSMM_PATH to '
'the absolute path of the libsmm.a file')
except IOError:
raise IOError('The file LIBSMM_PATH pointed to does not '
'exist. Note that it must be absolute path.')
cppflags.extend([
'-D__HAS_smm_dnn',
'-D__HAS_smm_vec',
])
libs.append('-lsmm')
elif 'smm=libxsmm' in spec:
cppflags += ['-D__LIBXSMM']
cppflags += pkgconf('--cflags-only-other', 'libxsmmf',
output=str).split()
fcflags += pkgconf('--cflags-only-I', 'libxsmmf',
output=str).split()
libs += pkgconf('--libs', 'libxsmmf', output=str).split()
dflags.extend(cppflags)
cflags.extend(cppflags)
cxxflags.extend(cppflags)
fcflags.extend(cppflags)
nvflags.extend(cppflags)
with open(self.makefile, 'w') as mkf:
if '+plumed' in spec:
mkf.write('# include Plumed.inc as recommended by'
'PLUMED to include libraries and flags')
mkf.write('include {0}\n'.format(
spec['plumed'].package.plumed_inc
))
mkf.write('\n# COMPILER, LINKER, TOOLS\n\n')
mkf.write('FC = {0}\n'
'CC = {1}\n'
'CXX = {2}\n'
'LD = {3}\n'
.format(fc, cc, cxx, fc))
if '%intel' in spec:
intel_bin_dir = ancestor(self.compiler.cc)
# CPP is a commented command in Intel arch of CP2K
# This is the hack through which cp2k developers avoid doing :
#
# ${CPP} <file>.F > <file>.f90
#
# and use `-fpp` instead
mkf.write('CPP = # {0} -P\n'.format(spack_cc))
mkf.write('AR = {0}/xiar -r\n'.format(intel_bin_dir))
else:
mkf.write('CPP = # {0} -E\n'.format(spack_cc))
mkf.write('AR = ar -r\n')
if spec.satisfies('+cuda'):
mkf.write('NVCC = {0}\n'.format(
os.path.join(spec['cuda'].prefix, 'bin', 'nvcc')))
# Write compiler flags to file
def fflags(var, lst):
return '{0} = {1}\n\n'.format(
var,
' \\\n\t'.join(lst))
mkf.write('\n# FLAGS & LIBRARIES\n')
mkf.write(fflags('DFLAGS', dflags))
mkf.write(fflags('CPPFLAGS', cppflags))
mkf.write(fflags('CFLAGS', cflags))
mkf.write(fflags('CXXFLAGS', cxxflags))
mkf.write(fflags('NVFLAGS', nvflags))
mkf.write(fflags('FCFLAGS', fcflags))
mkf.write(fflags('LDFLAGS', ldflags))
mkf.write(fflags('LIBS', libs))
if '%intel' in spec:
mkf.write(fflags('LDFLAGS_C', ldflags + ['-nofor_main']))
mkf.write('# CP2K-specific flags\n\n')
mkf.write('GPUVER = {0}\n'.format(gpuver))
mkf.write('DATA_DIR = {0}\n'.format(self.prefix.share.data))
@property
def build_directory(self):
build_dir = self.stage.source_path
if self.spec.satisfies('@:6.9999'):
# prior to version 7.1 was the Makefile located in makefiles/
build_dir = os.path.join(build_dir, 'makefiles')
return build_dir
@property
def build_targets(self):
return [
'ARCH={0}'.format(self.makefile_architecture),
'VERSION={0}'.format(self.makefile_version)
]
def build(self, spec, prefix):
# Apparently the Makefile bases its paths on PWD
# so we need to set PWD = self.build_directory
with spack.util.environment.set_env(PWD=self.build_directory):
super(Cp2k, self).build(spec, prefix)
def install(self, spec, prefix):
exe_dir = os.path.join('exe', self.makefile_architecture)
install_tree(exe_dir, self.prefix.bin)
install_tree('data', self.prefix.share.data)
def check(self):
data_dir = os.path.join(self.stage.source_path, 'data')
# CP2K < 7 still uses $PWD to detect the current working dir
# and Makefile is in a subdir, account for both facts here:
with spack.util.environment.set_env(CP2K_DATA_DIR=data_dir,
PWD=self.build_directory):
with working_dir(self.build_directory):
make('test', *self.build_targets)
|
the-stack_0_25656
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2018, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import os
import tempfile
import re
import shutil
from anima import logger
class Archiver(object):
"""Archives a Maya scene for external use.
This utility class can flatten a maya scene including all its references in
to a default maya project folder structure and can retrieve and connect the
references to the original ones when the original file is returned back.
"""
default_workspace_content = """// Anima Archiver Default Project Definition
workspace -fr "translatorData" "data";
workspace -fr "offlineEdit" "scenes/edits";
workspace -fr "renderData" "renderData";
workspace -fr "scene" "scenes";
workspace -fr "3dPaintTextures" "sourceimages/3dPaintTextures";
workspace -fr "eps" "data";
workspace -fr "OBJexport" "data";
workspace -fr "mel" "scripts";
workspace -fr "furShadowMap" "renderData/fur/furShadowMap";
workspace -fr "particles" "cache/particles";
workspace -fr "audio" "sound";
workspace -fr "scripts" "scripts";
workspace -fr "sound" "sound";
workspace -fr "DXF_FBX export" "data";
workspace -fr "furFiles" "renderData/fur/furFiles";
workspace -fr "depth" "renderData/depth";
workspace -fr "autoSave" "autosave";
workspace -fr "furAttrMap" "renderData/fur/furAttrMap";
workspace -fr "diskCache" "data";
workspace -fr "fileCache" "cache/nCache";
workspace -fr "ASS Export" "data";
workspace -fr "FBX export" "data";
workspace -fr "sourceImages" "sourceimages";
workspace -fr "FBX" "data";
workspace -fr "DAE_FBX export" "data";
workspace -fr "movie" "movies";
workspace -fr "Alembic" "data";
workspace -fr "DAE_FBX" "data";
workspace -fr "iprImages" "renderData/iprImages";
workspace -fr "mayaAscii" "scenes";
workspace -fr "furImages" "renderData/fur/furImages";
workspace -fr "furEqualMap" "renderData/fur/furEqualMap";
workspace -fr "illustrator" "data";
workspace -fr "DXF_FBX" "data";
workspace -fr "mayaBinary" "scenes";
workspace -fr "move" "data";
workspace -fr "images" "images";
workspace -fr "fluidCache" "cache/nCache/fluid";
workspace -fr "clips" "clips";
workspace -fr "ASS" "data";
workspace -fr "OBJ" "data";
workspace -fr "templates" "assets";
workspace -fr "shaders" "renderData/shaders";
"""
default_project_structure = """assets
autosave
cache
cache/nCache
cache/nCache/fluid
cache/particles
clips
data
images
movies
renderData
renderData/depth
renderData/fur
renderData/fur/furAttrMap
renderData/fur/furEqualMap
renderData/fur/furFiles
renderData/fur/furImages
renderData/fur/furShadowMap
renderData/iprImages
renderData/shaders
scenes
scenes/edits
scenes/refs
scripts
sound
sourceimages
sourceimages/3dPaintTextures"""
def __init__(self, exclude_mask=None):
if exclude_mask is None:
exclude_mask = []
self.exclude_mask = exclude_mask
@classmethod
def create_default_project(cls, path, name='DefaultProject'):
"""Creates default maya project structure along with a suitable
workspace.mel file.
:param str path: The path that the default project structure will be
created.
:return:
"""
project_path = os.path.join(path, name)
# lets create the structure
for dir_name in cls.default_project_structure.split('\n'):
dir_path = os.path.join(project_path, dir_name)
try:
os.makedirs(dir_path)
except OSError:
pass
# create the workspace.mel
workspace_mel_path = os.path.join(project_path, 'workspace.mel')
with open(workspace_mel_path, 'w+') as f:
f.writelines(cls.default_workspace_content)
return project_path
def flatten(self, path, project_name='DefaultProject'):
"""Flattens the given maya scene in to a new default project externally
that is without opening it and returns the project path.
It will also flatten all the referenced files, textures, image planes,
Arnold Scene Source and Redshift Proxy files.
:param path: The path to the file which wanted to be flattened
:return:
"""
# create a new Default Project
tempdir = tempfile.gettempdir()
from stalker import Repository
all_repos = Repository.query.all()
default_project_path = \
self.create_default_project(path=tempdir, name=project_name)
logger.debug(
'creating new default project at: %s' % default_project_path
)
ref_paths = \
self._move_file_and_fix_references(path, default_project_path)
while len(ref_paths):
ref_path = ref_paths.pop(0)
if self.exclude_mask \
and os.path.splitext(ref_path)[1] in self.exclude_mask:
logger.debug('skipping: %s' % ref_path)
continue
# fix different OS paths
for repo in all_repos:
if repo.is_in_repo(ref_path):
ref_path = repo.to_native_path(ref_path)
new_ref_paths = \
self._move_file_and_fix_references(
ref_path,
default_project_path,
scenes_folder='scenes/refs'
)
# extend ref_paths with new ones
for new_ref_path in new_ref_paths:
if new_ref_path not in ref_paths:
ref_paths.append(new_ref_path)
return default_project_path
def _move_file_and_fix_references(self, path, project_path,
scenes_folder='scenes',
refs_folder='scenes/refs'):
"""Moves the given maya file to the given project path and moves any
references of it to
:param str path: The path of the maya file
:param str project_path: The project path
:param str scenes_folder: The scenes folder to store the original maya
scene.
:param str refs_folder: The references folder to replace reference
paths with.
:return list: returns a list of paths
"""
# fix any env vars
path = os.path.expandvars(path)
original_file_name = os.path.basename(path)
logger.debug('original_file_name: %s' % original_file_name)
new_file_path = \
os.path.join(project_path, scenes_folder, original_file_name)
scenes_folder_lut = {
'.ma': 'scenes/refs',
# image files
'.jpg': 'sourceimages',
'.png': 'sourceimages',
'.tif': 'sourceimages',
'.tga': 'sourceimages',
'.exr': 'sourceimages',
'.hdr': 'sourceimages',
# RSProxy and arnold proxies
'.rs': 'sourceimages',
'.ass': 'sourceimages',
}
ref_paths = []
# skip the file if it doesn't exist
if not os.path.exists(path):
# return early
return ref_paths
# only get new ref paths for '.ma' files
if path.endswith('.ma'):
# read the data of the original file
with open(path) as f:
data = f.read()
ref_paths = self._extract_references(data)
# fix all reference paths
for ref_path in ref_paths:
ref_ext = os.path.splitext(ref_path)[-1]
data = data.replace(
ref_path,
'%s/%s' % (
scenes_folder_lut.get(ref_ext, refs_folder),
os.path.basename(ref_path)
)
)
# now write all the data back to a new temp scene
with open(new_file_path, 'w+') as f:
f.write(data)
else:
# fix for UDIM texture paths
# if the path contains 1001 or u1_v1 than find the other
# textures
# dirty patch
# move image files in to the sourceimages folder
# along with the RedshiftProxy files
file_extension = os.path.splitext(path)[1]
new_file_path = \
os.path.join(
project_path,
scenes_folder_lut.get(
file_extension,
refs_folder
),
original_file_name
)
import glob
new_file_paths = [new_file_path]
if '1001' in new_file_path or 'u1_v1' in new_file_path.lower():
# get the rest of the textures
new_file_paths = glob.glob(
new_file_path
.replace('1001', '*')
.replace('u1_v1', 'u*_v*')
.replace('U1_V1', 'U*_V*')
)
for p in new_file_paths:
print(p)
# just copy the file
for new_file_path in new_file_paths:
try:
shutil.copy(path, new_file_path)
except IOError:
pass
return ref_paths
def _extract_references(self, data):
"""returns the list of references in the given maya file
:param str data: The content of the maya scene file
:return:
"""
path_regex = r'\$REPO[\w\d\/_\.@]+'
# so we have all the data
# extract references
ref_paths = re.findall(path_regex, data)
# also check for any paths that is starting with any of the $REPO
# variable value
for k in os.environ.keys():
if k.startswith('REPO'):
# consider this as a repository path and find all of the paths
# starting with this value
repo_path = os.environ[k]
path_regex = r'\%s[\w\d\/_\.@]+' % repo_path
temp_ref_paths = re.findall(path_regex, data)
ref_paths += temp_ref_paths
new_ref_paths = []
for ref_path in ref_paths:
if os.path.splitext(ref_path)[1] not in self.exclude_mask:
new_ref_paths.append(ref_path)
ref_paths = new_ref_paths
return ref_paths
@classmethod
def _extract_local_references(cls, data):
"""returns the list of local references (references that are referenced
from scenes/refs folder) in the given maya file
:param str data: The content of the maya scene file
:return:
"""
path_regex = r'scenes/refs/[\w\d\/_\.@]+'
# so we have all the data
# extract references
ref_paths = re.findall(path_regex, data)
return ref_paths
@classmethod
def archive(cls, path):
"""Creates a zip file containing the given directory.
:param path: Path to the archived directory.
:return:
"""
import zipfile
dir_name = os.path.basename(path)
zip_path = os.path.join(tempfile.gettempdir(), '%s.zip' % dir_name)
parent_path = os.path.dirname(path) + '/'
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED,
allowZip64=True) as z:
for current_dir_path, dir_names, file_names in os.walk(path):
for dir_name in dir_names:
dir_path = os.path.join(current_dir_path, dir_name)
arch_path = dir_path[len(parent_path):]
z.write(dir_path, arch_path)
for file_name in file_names:
file_path = os.path.join(current_dir_path, file_name)
arch_path = file_path[len(parent_path):]
z.write(file_path, arch_path)
return zip_path
@classmethod
def bind_to_original(cls, path):
"""Binds all the references to the original Versions in the repository.
Given a maya scene file, this method will find the originals of the
references in the database and will replace them with the originals.
:param str path: The path of the maya file.
:return:
"""
# TODO: This will not fix the sound or texture files, that is anything
# other than a maya scene file.
# get all reference paths
with open(path) as f:
data = f.read()
ref_paths = cls._extract_local_references(data)
for ref_path in ref_paths:
ref_file_name = os.path.basename(ref_path)
# try to find a corresponding Stalker Version instance with it
from stalker import Version
version = Version.query\
.filter(Version.full_path.endswith(ref_file_name))\
.first()
if version:
# replace it
data = data.replace(
ref_path,
version.full_path
)
if len(ref_paths):
# save the file over itself
with open(path, 'w+') as f:
f.write(data)
|
the-stack_0_25657
|
import logging
from collections import namedtuple
from .main import special_command, RAW_QUERY
TableInfo = namedtuple("TableInfo", ['checks', 'relkind', 'hasindex',
'hasrules', 'hastriggers', 'hasoids', 'tablespace', 'reloptions', 'reloftype',
'relpersistence'])
log = logging.getLogger(__name__)
@special_command('\\l', '\\l', 'List databases.', arg_type=RAW_QUERY)
def list_databases(cur, **_):
query = 'SELECT datname FROM pg_database;'
cur.execute(query)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
else:
return [(None, None, None, cur.statusmessage)]
@special_command('\\du', '\\du[+] [pattern]', 'List roles.')
def list_roles(cur, pattern, verbose):
"""
Returns (title, rows, headers, status)
"""
sql = '''SELECT r.rolname, r.rolsuper, r.rolinherit,
r.rolcreaterole, r.rolcreatedb, r.rolcanlogin,
r.rolconnlimit, r.rolvaliduntil,
ARRAY(SELECT b.rolname
FROM pg_catalog.pg_auth_members m
JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
WHERE m.member = r.oid) as memberof''' + (''',
pg_catalog.shobj_description(r.oid, 'pg_authid') AS description'''
if verbose else '') + """, r.rolreplication
FROM pg_catalog.pg_roles r """
params = []
if pattern:
_, schema = sql_name_pattern(pattern)
sql += 'WHERE r.rolname ~ %s'
params.append(schema)
sql = cur.mogrify(sql + " ORDER BY 1", params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
@special_command('\\dn', '\\dn[+] [pattern]', 'List schemas.')
def list_schemas(cur, pattern, verbose):
"""
Returns (title, rows, headers, status)
"""
sql = '''SELECT n.nspname AS "Name",
pg_catalog.pg_get_userbyid(n.nspowner) AS "Owner"''' + (''',
pg_catalog.array_to_string(n.nspacl, E'\\n') AS "Access privileges",
pg_catalog.obj_description(n.oid, 'pg_namespace') AS "Description"''' if verbose else '') + """
FROM pg_catalog.pg_namespace n WHERE n.nspname """
params = []
if pattern:
_, schema = sql_name_pattern(pattern)
sql += '~ %s'
params.append(schema)
else:
sql += "!~ '^pg_' AND n.nspname <> 'information_schema'"
sql = cur.mogrify(sql + " ORDER BY 1", params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
def list_objects(cur, pattern, verbose, relkinds):
"""
Returns (title, rows, header, status)
This method is used by list_tables, list_views, and list_indexes
relkinds is a list of strings to filter pg_class.relkind
"""
schema_pattern, table_pattern = sql_name_pattern(pattern)
if verbose:
verbose_columns = '''
,pg_catalog.pg_size_pretty(pg_catalog.pg_table_size(c.oid)) as "Size",
pg_catalog.obj_description(c.oid, 'pg_class') as "Description" '''
else:
verbose_columns = ''
sql = '''SELECT n.nspname as "Schema",
c.relname as "Name",
CASE c.relkind
WHEN 'r' THEN 'table' WHEN 'v' THEN 'view'
WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index'
WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special'
WHEN 'f' THEN 'foreign table' END
as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
''' + verbose_columns + '''
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relkind = ANY(%s) '''
params = [relkinds]
if schema_pattern:
sql += ' AND n.nspname ~ %s'
params.append(schema_pattern)
else:
sql += '''
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid) '''
if table_pattern:
sql += ' AND c.relname ~ %s'
params.append(table_pattern)
sql = cur.mogrify(sql + ' ORDER BY 1, 2', params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
@special_command('\\dt', '\\dt[+] [pattern]', 'List tables.')
def list_tables(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['r', ''])
@special_command('\\dv', '\\dv[+] [pattern]', 'List views.')
def list_views(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['v', 's', ''])
@special_command('\\ds', '\\ds[+] [pattern]', 'List sequences.')
def list_sequences(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['S', 's', ''])
@special_command('\\di', '\\di[+] [pattern]', 'List indexes.')
def list_indexes(cur, pattern, verbose):
return list_objects(cur, pattern, verbose, ['i', 's', ''])
@special_command('\\df', '\\df[+] [pattern]', 'List functions.')
def list_functions(cur, pattern, verbose):
if verbose:
verbose_columns = '''
,CASE
WHEN p.provolatile = 'i' THEN 'immutable'
WHEN p.provolatile = 's' THEN 'stable'
WHEN p.provolatile = 'v' THEN 'volatile'
END as "Volatility",
pg_catalog.pg_get_userbyid(p.proowner) as "Owner",
l.lanname as "Language",
p.prosrc as "Source code",
pg_catalog.obj_description(p.oid, 'pg_proc') as "Description" '''
verbose_table = ''' LEFT JOIN pg_catalog.pg_language l
ON l.oid = p.prolang'''
else:
verbose_columns = verbose_table = ''
sql = '''
SELECT n.nspname as "Schema",
p.proname as "Name",
pg_catalog.pg_get_function_result(p.oid)
as "Result data type",
pg_catalog.pg_get_function_arguments(p.oid)
as "Argument data types",
CASE
WHEN p.proisagg THEN 'agg'
WHEN p.proiswindow THEN 'window'
WHEN p.prorettype = 'pg_catalog.trigger'::pg_catalog.regtype
THEN 'trigger'
ELSE 'normal'
END as "Type" ''' + verbose_columns + '''
FROM pg_catalog.pg_proc p
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
''' + verbose_table + '''
WHERE '''
schema_pattern, func_pattern = sql_name_pattern(pattern)
params = []
if schema_pattern:
sql += ' n.nspname ~ %s '
params.append(schema_pattern)
else:
sql += ' pg_catalog.pg_function_is_visible(p.oid) '
if func_pattern:
sql += ' AND p.proname ~ %s '
params.append(func_pattern)
if not (schema_pattern or func_pattern):
sql += ''' AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema' '''
sql = cur.mogrify(sql + ' ORDER BY 1, 2, 4', params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
@special_command('\\dT', '\\dT[S+] [pattern]', 'List data types')
def list_datatypes(cur, pattern, verbose):
assert True
sql = '''SELECT n.nspname as "Schema",
pg_catalog.format_type(t.oid, NULL) AS "Name", '''
if verbose:
sql += r''' t.typname AS "Internal name",
CASE
WHEN t.typrelid != 0
THEN CAST('tuple' AS pg_catalog.text)
WHEN t.typlen < 0
THEN CAST('var' AS pg_catalog.text)
ELSE CAST(t.typlen AS pg_catalog.text)
END AS "Size",
pg_catalog.array_to_string(
ARRAY(
SELECT e.enumlabel
FROM pg_catalog.pg_enum e
WHERE e.enumtypid = t.oid
ORDER BY e.enumsortorder
), E'\n') AS "Elements",
pg_catalog.array_to_string(t.typacl, E'\n')
AS "Access privileges",
pg_catalog.obj_description(t.oid, 'pg_type')
AS "Description"'''
else:
sql += ''' pg_catalog.obj_description(t.oid, 'pg_type')
as "Description" '''
sql += ''' FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n
ON n.oid = t.typnamespace
WHERE (t.typrelid = 0 OR
( SELECT c.relkind = 'c'
FROM pg_catalog.pg_class c
WHERE c.oid = t.typrelid))
AND NOT EXISTS(
SELECT 1
FROM pg_catalog.pg_type el
WHERE el.oid = t.typelem
AND el.typarray = t.oid) '''
schema_pattern, type_pattern = sql_name_pattern(pattern)
params = []
if schema_pattern:
sql += ' AND n.nspname ~ %s '
params.append(schema_pattern)
else:
sql += ' AND pg_catalog.pg_type_is_visible(t.oid) '
if type_pattern:
sql += ''' AND (t.typname ~ %s
OR pg_catalog.format_type(t.oid, NULL) ~ %s) '''
params.extend(2 * [type_pattern])
if not (schema_pattern or type_pattern):
sql += ''' AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema' '''
sql = cur.mogrify(sql + ' ORDER BY 1, 2', params)
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
@special_command('describe', 'DESCRIBE [pattern]', '', hidden=True, case_sensitive=False)
@special_command('\\d', '\\d [pattern]', 'List or describe tables, views and sequences.')
def describe_table_details(cur, pattern, verbose):
"""
Returns (title, rows, headers, status)
"""
# This is a simple \d command. No table name to follow.
if not pattern:
sql = """SELECT n.nspname as "Schema", c.relname as "Name",
CASE c.relkind WHEN 'r' THEN 'table'
WHEN 'v' THEN 'view'
WHEN 'm' THEN 'materialized view'
WHEN 'i' THEN 'index'
WHEN 'S' THEN 'sequence'
WHEN 's' THEN 'special'
WHEN 'f' THEN 'foreign table'
END as "Type",
pg_catalog.pg_get_userbyid(c.relowner) as "Owner"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','m','S','f','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY 1,2 """
log.debug(sql)
cur.execute(sql)
if cur.description:
headers = [x[0] for x in cur.description]
return [(None, cur, headers, cur.statusmessage)]
# This is a \d <tablename> command. A royal pain in the ass.
schema, relname = sql_name_pattern(pattern)
where = []
params = []
if not pattern:
where.append('pg_catalog.pg_table_is_visible(c.oid)')
if schema:
where.append('n.nspname ~ %s')
params.append(schema)
if relname:
where.append('c.relname ~ %s')
params.append(relname)
sql = """SELECT c.oid, n.nspname, c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
""" + ('WHERE ' + ' AND '.join(where) if where else '') + """
ORDER BY 2,3"""
sql = cur.mogrify(sql, params)
# Execute the sql, get the results and call describe_one_table_details on each table.
log.debug(sql)
cur.execute(sql)
if not (cur.rowcount > 0):
return [(None, None, None, 'Did not find any relation named %s.' % pattern)]
results = []
for oid, nspname, relname in cur.fetchall():
results.append(describe_one_table_details(cur, nspname, relname, oid, verbose))
return results
def describe_one_table_details(cur, schema_name, relation_name, oid, verbose):
if verbose:
suffix = """pg_catalog.array_to_string(c.reloptions || array(select
'toast.' || x from pg_catalog.unnest(tc.reloptions) x), ', ')"""
else:
suffix = "''"
sql ="""SELECT c.relchecks, c.relkind, c.relhasindex,
c.relhasrules, c.relhastriggers, c.relhasoids,
%s,
c.reltablespace,
CASE WHEN c.reloftype = 0 THEN ''
ELSE c.reloftype::pg_catalog.regtype::pg_catalog.text
END,
c.relpersistence
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_class tc ON (c.reltoastrelid = tc.oid)
WHERE c.oid = '%s'""" % (suffix, oid)
# Create a namedtuple called tableinfo and match what's in describe.c
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
tableinfo = TableInfo._make(cur.fetchone())
else:
return (None, None, None, 'Did not find any relation with OID %s.' % oid)
# If it's a seq, fetch it's value and store it for later.
if tableinfo.relkind == 'S':
# Do stuff here.
sql = '''SELECT * FROM "%s"."%s"''' % (schema_name, relation_name)
log.debug(sql)
cur.execute(sql)
if not (cur.rowcount > 0):
return (None, None, None, 'Something went wrong.')
seq_values = cur.fetchone()
# Get column info
sql = """SELECT a.attname, pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) for 128)
FROM pg_catalog.pg_attrdef d WHERE d.adrelid = a.attrelid AND d.adnum =
a.attnum AND a.atthasdef), a.attnotnull, a.attnum, (SELECT c.collname
FROM pg_catalog.pg_collation c, pg_catalog.pg_type t WHERE c.oid =
a.attcollation AND t.oid = a.atttypid AND a.attcollation <>
t.typcollation) AS attcollation"""
if tableinfo.relkind == 'i':
sql += """, pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE)
AS indexdef"""
else:
sql += """, NULL AS indexdef"""
if tableinfo.relkind == 'f':
sql += """, CASE WHEN attfdwoptions IS NULL THEN '' ELSE '(' ||
array_to_string(ARRAY(SELECT quote_ident(option_name) || ' '
|| quote_literal(option_value) FROM
pg_options_to_table(attfdwoptions)), ', ') || ')' END AS
attfdwoptions"""
else:
sql += """, NULL AS attfdwoptions"""
if verbose:
sql += """, a.attstorage"""
sql += """, CASE WHEN a.attstattarget=-1 THEN NULL ELSE
a.attstattarget END AS attstattarget"""
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or tableinfo.relkind == 'f' or
tableinfo.relkind == 'c'):
sql += """, pg_catalog.col_description(a.attrelid,
a.attnum)"""
sql += """ FROM pg_catalog.pg_attribute a WHERE a.attrelid = '%s' AND
a.attnum > 0 AND NOT a.attisdropped ORDER BY a.attnum; """ % oid
log.debug(sql)
cur.execute(sql)
res = cur.fetchall()
title = (tableinfo.relkind, schema_name, relation_name)
# Set the column names.
headers = ['Column', 'Type']
show_modifiers = False
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or tableinfo.relkind == 'f' or
tableinfo.relkind == 'c'):
headers.append('Modifiers')
show_modifiers = True
if (tableinfo.relkind == 'S'):
headers.append("Value")
if (tableinfo.relkind == 'i'):
headers.append("Definition")
if (tableinfo.relkind == 'f'):
headers.append("FDW Options")
if (verbose):
headers.append("Storage")
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
headers.append("Stats target")
# Column comments, if the relkind supports this feature. */
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or
tableinfo.relkind == 'c' or tableinfo.relkind == 'f'):
headers.append("Description")
view_def = ''
# /* Check if table is a view or materialized view */
if ((tableinfo.relkind == 'v' or tableinfo.relkind == 'm') and verbose):
sql = """SELECT pg_catalog.pg_get_viewdef('%s'::pg_catalog.oid, true)""" % oid
log.debug(sql)
cur.execute(sql)
if cur.rowcount > 0:
view_def = cur.fetchone()
# Prepare the cells of the table to print.
cells = []
for i, row in enumerate(res):
cell = []
cell.append(row[0]) # Column
cell.append(row[1]) # Type
if show_modifiers:
modifier = ''
if row[5]:
modifier += ' collate %s' % row[5]
if row[3]:
modifier += ' not null'
if row[2]:
modifier += ' default %s' % row[2]
cell.append(modifier)
# Sequence
if tableinfo.relkind == 'S':
cell.append(seq_values[i])
# Index column
if TableInfo.relkind == 'i':
cell.append(row[6])
# /* FDW options for foreign table column, only for 9.2 or later */
if tableinfo.relkind == 'f':
cell.append(row[7])
if verbose:
storage = row[8]
if storage[0] == 'p':
cell.append('plain')
elif storage[0] == 'm':
cell.append('main')
elif storage[0] == 'x':
cell.append('extended')
elif storage[0] == 'e':
cell.append('external')
else:
cell.append('???')
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
cell.append(row[9])
# /* Column comments, if the relkind supports this feature. */
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'v' or
tableinfo.relkind == 'm' or
tableinfo.relkind == 'c' or tableinfo.relkind == 'f'):
cell.append(row[10])
cells.append(cell)
# Make Footers
status = []
if (tableinfo.relkind == 'i'):
# /* Footer information about an index */
sql = """SELECT i.indisunique, i.indisprimary, i.indisclustered,
i.indisvalid, (NOT i.indimmediate) AND EXISTS (SELECT 1 FROM
pg_catalog.pg_constraint WHERE conrelid = i.indrelid AND conindid =
i.indexrelid AND contype IN ('p','u','x') AND condeferrable) AS
condeferrable, (NOT i.indimmediate) AND EXISTS (SELECT 1 FROM
pg_catalog.pg_constraint WHERE conrelid = i.indrelid AND conindid =
i.indexrelid AND contype IN ('p','u','x') AND condeferred) AS
condeferred, a.amname, c2.relname, pg_catalog.pg_get_expr(i.indpred,
i.indrelid, true) FROM pg_catalog.pg_index i, pg_catalog.pg_class c,
pg_catalog.pg_class c2, pg_catalog.pg_am a WHERE i.indexrelid = c.oid
AND c.oid = '%s' AND c.relam = a.oid AND i.indrelid = c2.oid;""" % oid
log.debug(sql)
cur.execute(sql)
(indisunique, indisprimary, indisclustered, indisvalid,
deferrable, deferred, indamname, indtable, indpred) = cur.fetchone()
if indisprimary:
status.append("primary key, ")
elif indisunique:
status.append("unique, ")
status.append("%s, " % indamname)
#/* we assume here that index and table are in same schema */
status.append('for table "%s.%s"' % (schema_name, indtable))
if indpred:
status.append(", predicate (%s)" % indpred)
if indisclustered:
status.append(", clustered")
if indisvalid:
status.append(", invalid")
if deferrable:
status.append(", deferrable")
if deferred:
status.append(", initially deferred")
status.append('\n')
#add_tablespace_footer(&cont, tableinfo.relkind,
#tableinfo.tablespace, true);
elif tableinfo.relkind == 'S':
# /* Footer information about a sequence */
# /* Get the column that owns this sequence */
sql = ("SELECT pg_catalog.quote_ident(nspname) || '.' ||"
"\n pg_catalog.quote_ident(relname) || '.' ||"
"\n pg_catalog.quote_ident(attname)"
"\nFROM pg_catalog.pg_class c"
"\nINNER JOIN pg_catalog.pg_depend d ON c.oid=d.refobjid"
"\nINNER JOIN pg_catalog.pg_namespace n ON n.oid=c.relnamespace"
"\nINNER JOIN pg_catalog.pg_attribute a ON ("
"\n a.attrelid=c.oid AND"
"\n a.attnum=d.refobjsubid)"
"\nWHERE d.classid='pg_catalog.pg_class'::pg_catalog.regclass"
"\n AND d.refclassid='pg_catalog.pg_class'::pg_catalog.regclass"
"\n AND d.objid=%s \n AND d.deptype='a'" % oid)
log.debug(sql)
cur.execute(sql)
result = cur.fetchone()
if result:
status.append("Owned by: %s" % result[0])
#/*
#* If we get no rows back, don't show anything (obviously). We should
#* never get more than one row back, but if we do, just ignore it and
#* don't print anything.
#*/
elif (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
#/* Footer information about a table */
if (tableinfo.hasindex):
sql = "SELECT c2.relname, i.indisprimary, i.indisunique, i.indisclustered, "
sql += "i.indisvalid, "
sql += "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true),\n "
sql += ("pg_catalog.pg_get_constraintdef(con.oid, true), "
"contype, condeferrable, condeferred")
sql += ", c2.reltablespace"
sql += ("\nFROM pg_catalog.pg_class c, pg_catalog.pg_class c2, "
"pg_catalog.pg_index i\n")
sql += " LEFT JOIN pg_catalog.pg_constraint con ON (conrelid = i.indrelid AND conindid = i.indexrelid AND contype IN ('p','u','x'))\n"
sql += ("WHERE c.oid = '%s' AND c.oid = i.indrelid AND i.indexrelid = c2.oid\n"
"ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname;") % oid
log.debug(sql)
result = cur.execute(sql)
if (cur.rowcount > 0):
status.append("Indexes:\n")
for row in cur:
#/* untranslated index name */
status.append(' "%s"' % row[0])
#/* If exclusion constraint, print the constraintdef */
if row[7] == "x":
status.append(row[6])
else:
#/* Label as primary key or unique (but not both) */
if row[1]:
status.append(" PRIMARY KEY,")
elif row[2]:
if row[7] == "u":
status.append(" UNIQUE CONSTRAINT,")
else:
status.append(" UNIQUE,")
# /* Everything after "USING" is echoed verbatim */
indexdef = row[5]
usingpos = indexdef.find(" USING ")
if (usingpos >= 0):
indexdef = indexdef[(usingpos + 7):]
status.append(" %s" % indexdef)
# /* Need these for deferrable PK/UNIQUE indexes */
if row[8]:
status.append(" DEFERRABLE")
if row[9]:
status.append(" INITIALLY DEFERRED")
# /* Add these for all cases */
if row[3]:
status.append(" CLUSTER")
if not row[4]:
status.append(" INVALID")
status.append('\n')
# printTableAddFooter(&cont, buf.data);
# /* Print tablespace of the index on the same line */
# add_tablespace_footer(&cont, 'i',
# atooid(PQgetvalue(result, i, 10)),
# false);
# /* print table (and column) check constraints */
if (tableinfo.checks):
sql = ("SELECT r.conname, "
"pg_catalog.pg_get_constraintdef(r.oid, true)\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'c'\n"
"ORDER BY 1;" % oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Check constraints:\n")
for row in cur:
#/* untranslated contraint name and def */
status.append(" \"%s\" %s" % row)
status.append('\n')
#/* print foreign-key constraints (there are none if no triggers) */
if (tableinfo.hastriggers):
sql = ("SELECT conname,\n"
" pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1;" %
oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Foreign-key constraints:\n")
for row in cur:
#/* untranslated constraint name and def */
status.append(" \"%s\" %s\n" % row)
#/* print incoming foreign-key references (none if no triggers) */
if (tableinfo.hastriggers):
sql = ("SELECT conrelid::pg_catalog.regclass, conname,\n"
" pg_catalog.pg_get_constraintdef(c.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint c\n"
"WHERE c.confrelid = '%s' AND c.contype = 'f' ORDER BY 1;" %
oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Referenced by:\n")
for row in cur:
status.append(" TABLE \"%s\" CONSTRAINT \"%s\" %s\n" % row)
# /* print rules */
if (tableinfo.hasrules and tableinfo.relkind != 'm'):
sql = ("SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true)), "
"ev_enabled\n"
"FROM pg_catalog.pg_rewrite r\n"
"WHERE r.ev_class = '%s' ORDER BY 1;" %
oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
for category in range(4):
have_heading = False
for row in cur:
if category == 0 and row[2] == 'O':
list_rule = True
elif category == 1 and row[2] == 'D':
list_rule = True
elif category == 2 and row[2] == 'A':
list_rule = True
elif category == 3 and row[2] == 'R':
list_rule = True
if not list_rule:
continue
if not have_heading:
if category == 0:
status.append("Rules:")
if category == 1:
status.append("Disabled rules:")
if category == 2:
status.append("Rules firing always:")
if category == 3:
status.append("Rules firing on replica only:")
have_heading = True
# /* Everything after "CREATE RULE" is echoed verbatim */
ruledef = row[1]
status.append(" %s" % ruledef)
if (view_def):
#/* Footer information about a view */
status.append("View definition:\n")
status.append("%s \n" % view_def)
#/* print rules */
if tableinfo.hasrules:
sql = ("SELECT r.rulename, trim(trailing ';' from pg_catalog.pg_get_ruledef(r.oid, true))\n"
"FROM pg_catalog.pg_rewrite r\n"
"WHERE r.ev_class = '%s' AND r.rulename != '_RETURN' ORDER BY 1;" % oid)
log.debug(sql)
cur.execute(sql)
if (cur.rowcount > 0):
status.append("Rules:\n")
for row in cur:
#/* Everything after "CREATE RULE" is echoed verbatim */
ruledef = row[1]
status.append(" %s\n" % ruledef)
#/*
# * Print triggers next, if any (but only user-defined triggers). This
# * could apply to either a table or a view.
# */
if tableinfo.hastriggers:
sql = ( "SELECT t.tgname, "
"pg_catalog.pg_get_triggerdef(t.oid, true), "
"t.tgenabled\n"
"FROM pg_catalog.pg_trigger t\n"
"WHERE t.tgrelid = '%s' AND " % oid);
sql += "NOT t.tgisinternal"
sql += "\nORDER BY 1;"
log.debug(sql)
cur.execute(sql)
if cur.rowcount > 0:
#/*
#* split the output into 4 different categories. Enabled triggers,
#* disabled triggers and the two special ALWAYS and REPLICA
#* configurations.
#*/
for category in range(4):
have_heading = False;
list_trigger = False;
for row in cur:
#/*
# * Check if this trigger falls into the current category
# */
tgenabled = row[2]
if category ==0:
if (tgenabled == 'O' or tgenabled == True):
list_trigger = True
elif category ==1:
if (tgenabled == 'D' or tgenabled == False):
list_trigger = True
elif category ==2:
if (tgenabled == 'A'):
list_trigger = True
elif category ==3:
if (tgenabled == 'R'):
list_trigger = True
if list_trigger == False:
continue;
# /* Print the category heading once */
if not have_heading:
if category == 0:
status.append("Triggers:")
elif category == 1:
status.append("Disabled triggers:")
elif category == 2:
status.append("Triggers firing always:")
elif category == 3:
status.append("Triggers firing on replica only:")
status.append('\n')
have_heading = True
#/* Everything after "TRIGGER" is echoed verbatim */
tgdef = row[1]
triggerpos = tgdef.find(" TRIGGER ")
if triggerpos >= 0:
tgdef = triggerpos + 9;
status.append(" %s\n" % row[1][tgdef:])
#/*
#* Finish printing the footer information about a table.
#*/
if (tableinfo.relkind == 'r' or tableinfo.relkind == 'm' or
tableinfo.relkind == 'f'):
# /* print foreign server name */
if tableinfo.relkind == 'f':
#/* Footer information about foreign table */
sql = ("SELECT s.srvname,\n"
" array_to_string(ARRAY(SELECT "
" quote_ident(option_name) || ' ' || "
" quote_literal(option_value) FROM "
" pg_options_to_table(ftoptions)), ', ') "
"FROM pg_catalog.pg_foreign_table f,\n"
" pg_catalog.pg_foreign_server s\n"
"WHERE f.ftrelid = %s AND s.oid = f.ftserver;" % oid)
log.debug(sql)
cur.execute(sql)
row = cur.fetchone()
# /* Print server name */
status.append("Server: %s\n" % row[0])
# /* Print per-table FDW options, if any */
if (row[1]):
status.append("FDW Options: (%s)\n" % ftoptions)
#/* print inherited tables */
sql = ("SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c, "
"pg_catalog.pg_inherits i WHERE c.oid=i.inhparent AND "
"i.inhrelid = '%s' ORDER BY inhseqno;" % oid)
log.debug(sql)
cur.execute(sql)
spacer = ''
if cur.rowcount > 0:
status.append("Inherits")
for row in cur:
status.append("%s: %s,\n" % (spacer, row))
spacer = ' ' * len('Inherits')
#/* print child tables */
sql = ("SELECT c.oid::pg_catalog.regclass FROM pg_catalog.pg_class c,"
" pg_catalog.pg_inherits i WHERE c.oid=i.inhrelid AND"
" i.inhparent = '%s' ORDER BY"
" c.oid::pg_catalog.regclass::pg_catalog.text;" % oid)
log.debug(sql)
cur.execute(sql)
if not verbose:
#/* print the number of child tables, if any */
if (cur.rowcount > 0):
status.append("Number of child tables: %d (Use \d+ to list"
"them.)\n" % cur.rowcount)
else:
spacer = ''
if (cur.rowcount >0):
status.append('Child tables')
#/* display the list of child tables */
for row in cur:
status.append("%s: %s,\n" % (spacer, row))
spacer = ' ' * len('Child tables')
#/* Table type */
if (tableinfo.reloftype):
status.append("Typed table of type: %s\n" % tableinfo.reloftype)
#/* OIDs, if verbose and not a materialized view */
if (verbose and tableinfo.relkind != 'm'):
status.append("Has OIDs: %s\n" %
("yes" if tableinfo.hasoids else "no"))
#/* Tablespace info */
#add_tablespace_footer(&cont, tableinfo.relkind, tableinfo.tablespace,
#true);
# /* reloptions, if verbose */
if (verbose and tableinfo.reloptions):
status.append("Options: %s\n" % tableinfo.reloptions)
return (None, cells, headers, "".join(status))
def sql_name_pattern(pattern):
"""
Takes a wildcard-pattern and converts to an appropriate SQL pattern to be
used in a WHERE clause.
Returns: schema_pattern, table_pattern
>>> sql_name_pattern('foo*."b""$ar*"')
('^(foo.*)$', '^(b"\\\\$ar\\\\*)$')
"""
inquotes = False
relname = ''
schema = None
pattern_len = len(pattern)
i = 0
while i < pattern_len:
c = pattern[i]
if c == '"':
if inquotes and i + 1 < pattern_len and pattern[i + 1] == '"':
relname += '"'
i += 1
else:
inquotes = not inquotes
elif not inquotes and c.isupper():
relname += c.lower()
elif not inquotes and c == '*':
relname += '.*'
elif not inquotes and c == '?':
relname += '.'
elif not inquotes and c == '.':
# Found schema/name separator, move current pattern to schema
schema = relname
relname = ''
else:
# Dollar is always quoted, whether inside quotes or not.
if c == '$' or inquotes and c in '|*+?()[]{}.^\\':
relname += '\\'
relname += c
i += 1
if relname:
relname = '^(' + relname + ')$'
if schema:
schema = '^(' + schema + ')$'
return schema, relname
|
the-stack_0_25658
|
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" All BGP constant values """
# some handy things to know
BGP_MAX_PACKET_SIZE = 4096
BGP_MARKER_SIZE = 16 # size of BGP marker
BGP_HEADER_SIZE = 19 # size of BGP header, including marker
BGP_MIN_OPEN_MSG_SIZE = 29
BGP_MIN_UPDATE_MSG_SIZE = 23
BGP_MIN_NOTIFICATION_MSG_SIZE = 21
BGP_MIN_KEEPALVE_MSG_SIZE = BGP_HEADER_SIZE
BGP_TCP_PORT = 179
BGP_ROUTE_DISTINGUISHER_SIZE = 8
# BGP message types
BGP_OPEN = 1
BGP_UPDATE = 2
BGP_NOTIFICATION = 3
BGP_KEEPALIVE = 4
BGP_ROUTE_REFRESH = 5
BGP_CAPABILITY = 6
BGP_ROUTE_REFRESH_CISCO = 0x80
BGP_SIZE_OF_PATH_ATTRIBUTE = 2
# attribute flags, from RFC1771
BGP_ATTR_FLAG_OPTIONAL = 0x80
BGP_ATTR_FLAG_TRANSITIVE = 0x40
BGP_ATTR_FLAG_PARTIAL = 0x20
BGP_ATTR_FLAG_EXTENDED_LENGTH = 0x10
# SSA flags
BGP_SSA_TRANSITIVE = 0x8000
BGP_SSA_TYPE = 0x7FFF
# SSA Types
BGP_SSA_L2TPv3 = 1
BGP_SSA_mGRE = 2
BGP_SSA_IPSec = 3
BGP_SSA_MPLS = 4
BGP_SSA_L2TPv3_IN_IPSec = 5
BGP_SSA_mGRE_IN_IPSec = 6
# AS_PATH segment types
AS_SET = 1 # RFC1771
AS_SEQUENCE = 2 # RFC1771
AS_CONFED_SET = 4 # RFC1965 has the wrong values, corrected in
AS_CONFED_SEQUENCE = 3 # draft-ietf-idr-bgp-confed-rfc1965bis-01.txt
# OPEN message Optional Parameter types
BGP_OPTION_AUTHENTICATION = 1 # RFC1771
BGP_OPTION_CAPABILITY = 2 # RFC2842
# attribute types
BGPTYPE_ORIGIN = 1 # RFC1771
BGPTYPE_AS_PATH = 2 # RFC1771
BGPTYPE_NEXT_HOP = 3 # RFC1771
BGPTYPE_MULTI_EXIT_DISC = 4 # RFC1771
BGPTYPE_LOCAL_PREF = 5 # RFC1771
BGPTYPE_ATOMIC_AGGREGATE = 6 # RFC1771
BGPTYPE_AGGREGATOR = 7 # RFC1771
BGPTYPE_COMMUNITIES = 8 # RFC1997
BGPTYPE_ORIGINATOR_ID = 9 # RFC2796
BGPTYPE_CLUSTER_LIST = 10 # RFC2796
BGPTYPE_DPA = 11 # work in progress
BGPTYPE_ADVERTISER = 12 # RFC1863
BGPTYPE_RCID_PATH = 13 # RFC1863
BGPTYPE_MP_REACH_NLRI = 14 # RFC2858
BGPTYPE_MP_UNREACH_NLRI = 15 # RFC2858
BGPTYPE_EXTENDED_COMMUNITY = 16 # Draft Ramachandra
BGPTYPE_NEW_AS_PATH = 17 # draft-ietf-idr-as4bytes
BGPTYPE_NEW_AGGREGATOR = 18 # draft-ietf-idr-as4bytes
BGPTYPE_SAFI_SPECIFIC_ATTR = 19 # draft-kapoor-nalawade-idr-bgp-ssa-00.txt
BGPTYPE_PMSI_TUNNEL = 22 # RFC 6514
BGPTYPE_TUNNEL_ENCAPS_ATTR = 23 # RFC5512
BGPTYPE_LINK_STATE = 29
BGPTYPE_LARGE_COMMUNITY = 32
BGPTYPE_ATTRIBUTE_SET = 128
# BGP Tunnel Encapsulation Attribute Tunnel Types
BGP_TUNNEL_ENCAPS_RESERVED = 0
BGP_TUNNEL_ENCAPS_L2TPV3_OVER_IP = 1
BGP_TUNNEL_ENCAPS_GRE = 2
BGP_TUNNEL_ENCAPS_TRANSMIT_TUNNEL_ENDPOINT = 3
BGP_TUNNEL_ENCAPS_IPSEC_TUNNEL_MODE = 4
BGP_TUNNEL_ENCAPS_IP_IN_IP_TUNNEL_WITH_IPSEC = 5
BGP_TUNNEL_ENCAPS_MPLS_IN_IP_TUNNEL_WITH_IPSEC = 6
BGP_TUNNEL_ENCAPS_IP_IN_IP = 7
BGP_TUNNEL_ENCAPS_VXLAN = 8
BGP_TUNNEL_ENCAPS_NVGRE = 9
BGP_TUNNEL_ENCAPS_MPLS = 10
BGP_TUNNEL_ENCAPS_MPLS_IN_GRE = 11
BGP_TUNNEL_ENCAPS_VXLAN_GRE = 12
BGP_TUNNEL_ENCAPS_MPLS_IN_UDP = 13
BGP_TUNNEL_ENCAPS_IPV6_TUNNEL = 14
BGP_TUNNEL_ENCAPS_SR_TE_POLICY_TYPE = 15
BGP_TUNNEL_ENCAPS_BARE = 16
# Segment Sub-TLV type
BGP_SRTE_SEGMENT_SUBTLV_MPLS = 1
BGP_SRTE_SEGMENT_SUBTLV_IPV6 = 2
BGP_SRTE_SEGMENT_SUBTLV_IPV4_SID = 3
BGP_SRTE_SEGMENT_SUBTLV_IPV6_SID = 4
BGP_SRTE_SEGMENT_SUBTLV_IPV4_INDEX_SID = 5
BGP_SRTE_SEGMENT_SUBTLV_IPV4_ADDR_SID = 6
BGP_SRTE_SEGMENT_SUBTLV_IPV6_INDEX_SID = 7
BGP_SRTE_SEGMENT_SUBTLV_IPV6_ADDR_SID = 8
# VPN Route Target #
BGP_EXT_COM_RT_0 = 0x0002 # Route Target,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RT_1 = 0x0102 # Route Target,Format IPv4 address(4bytes):AN(2bytes)
BGP_EXT_COM_RT_2 = 0x0202 # Route Target,Format AS(4bytes):AN(2bytes)
# Route Origin (SOO site of Origin)
BGP_EXT_COM_RO_0 = 0x0003 # Route Origin,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RO_1 = 0x0103 # Route Origin,Format IP address:AN(2bytes)
BGP_EXT_COM_RO_2 = 0x0203 # Route Origin,Format AS(4bytes):AN(2bytes)
# BGP Flow Spec
BGP_EXT_REDIRECT_NH = 0x0800 # redirect to ipv4/v6 nexthop
BGP_EXT_TRA_RATE = 0x8006 # traffic-rate 2-byte as#, 4-byte float
BGP_EXT_TRA_ACTION = 0x8007 # traffic-action bitmask
BGP_EXT_REDIRECT_VRF = 0x8008 # redirect 6-byte Route Target
BGP_EXT_TRA_MARK = 0x8009 # traffic-marking DSCP value
# Transitive Opaque
BGP_EXT_COM_OSPF_ROUTE_TYPE = 0x0306 # OSPF Route Type
BGP_EXT_COM_COLOR = 0x030b # Color
BGP_EXT_COM_COLOR_00 = 0x030b0000 # Color-00
BGP_EXT_COM_COLOR_01 = 0x030b4000 # Color-01
BGP_EXT_COM_COLOR_10 = 0x030b8000 # Color-10
BGP_EXT_COM_COLOR_11 = 0x030bc000 # Color-11
BGP_EXT_COM_ENCAP = 0x030c # BGP_EXT_COM_ENCAP = 0x030c
BGP_EXT_COM_DEFAULT_GATEWAY = 0x030d # Default Gateway
# BGP EVPN
BGP_EXT_COM_EVPN_MAC_MOBIL = 0x0600 # Mac Mobility
BGP_EXT_COM_EVPN_ESI_MPLS_LABEL = 0x0601 # ESI MPLS Label
BGP_EXT_COM_EVPN_ES_IMPORT = 0x0602 # ES Import
BGP_EXT_COM_EVPN_ROUTE_MAC = 0x0603 # EVPN Router MAC Extended Community
# BGP cost cummunity
BGP_EXT_COM_COST = 0x4301
# BGP link bandwith
BGP_EXT_COM_LINK_BW = 0x4004
# Unkonw
BGP_EXT_COM_UNKNOW = 0x0000
BGP_EXT_COM_DICT = {
'redirect-vrf': 32776, # redirect 6-byte Route Target
'traffic-marking-dscp': 32777, # traffic-marking DSCP value
'traffic-rate': 32774, # traffic-rate 2-byte as#, 4-byte float
'traffic-action': 32775, # traffic-action Sample (bit 46) Terminal Action (bit 47)
'color': 779, # Color
# Color, leftmost 2 bits of reserved field = 00, CO bits = 00,
# srpolicy -> IGP
'color-00': 51052544,
# Color, leftmost 2 bits of reserved field = 01, CO bits = 01,
# srpolicy -> same afi null endpoint -> any null endpoint -> IGP
'color-01': 51068928,
# Color, leftmost 2 bits of reserved field = 10, CO bits = 10,
# srpolicy -> same afi null endpoint -> any null endpoint -> same afi endpoint -> any endpoint -> IGP
'color-10': 51085312,
# Color, leftmost 2 bits of reserved field = 11, CO bits = 11,
# treated like color-00
'color-11': 51101696,
'encapsulation': 780, # BGP_EXT_COM_ENCAP = 0x030c
'es-import': 1538, # ES Import
'router-mac': 1539 # EVPN Router MAC Extended Community
}
BGP_EXT_COM_STR_DICT = {
258: 'route-target',
2: 'route-target',
514: 'route-target',
779: 'color',
16388: 'dmzlink-bw',
259: 'route-origin',
515: 'route-origin',
3: 'route-origin',
32776: 'redirect-vrf',
2048: 'redirect-nexthop',
1537: 'esi-label',
1536: 'mac-mobility',
32777: 'traffic-marking-dscp',
32774: 'traffic-rate',
51052544: 'color-00',
51068928: 'color-01',
51085312: 'color-10',
51101696: 'color-11',
780: 'encapsulation',
1538: 'es-import',
1539: 'router-mac',
32775: 'traffic-action'
}
BGP_EXT_COM_DICT_1 = {
'esi-label': 1537, # ESI MPLS Label
'mac-mobility': 1536, # Mac Mobility
}
# route distinguisher type
BGP_ROUTE_DISTINGUISHER_TYPE_0 = 0x0000
BGP_ROUTE_DISTINGUISHER_TYPE_1 = 0x0001
BGP_ROUTE_DISTINGUISHER_TYPE_2 = 0x0002
# PMSI TUNNEL TYPE
PMSI_TUNNEL_TYPE_NO_TUNNEL = 0
PMSI_TUNNEL_TYPE_RSVP_TE_P2MP = 1
PMSI_TUNNEL_TYPE_MLDP_P2MP = 2
PMSI_TUNNEL_TYPE_PIM_SSM_TREE = 3
PMSI_TUNNEL_TYPE_PIM_SM_TREE = 4
PMSI_TUNNEL_TYPE_BIDIR_PIM_TREE = 5
PMSI_TUNNEL_TYPE_INGRESS_REPL = 6
PMSI_TUNNEL_TYPE_MLDP_MP2MP = 7
# NLRI type as define in BGP flow spec RFC
BGPNLRI_FSPEC_DST_PFIX = 1 # RFC 5575
BGPNLRI_FSPEC_SRC_PFIX = 2 # RFC 5575
BGPNLRI_FSPEC_IP_PROTO = 3 # RFC 5575
BGPNLRI_FSPEC_PORT = 4 # RFC 5575
BGPNLRI_FSPEC_DST_PORT = 5 # RFC 5575
BGPNLRI_FSPEC_SRC_PORT = 6 # RFC 5575
BGPNLRI_FSPEC_ICMP_TP = 7 # RFC 5575
BGPNLRI_FSPEC_ICMP_CD = 8 # RFC 5575
BGPNLRI_FSPEC_TCP_FLAGS = 9 # RFC 5575
BGPNLRI_FSPEC_PCK_LEN = 10 # RFC 5575
BGPNLRI_FSPEC_DSCP = 11 # RFC 5575
BGPNLRI_FSPEC_FRAGMENT = 12 # RFC 5575
# NLRI type as define in BGP flow spec RFC
BGPNLRI_IPV6_FSPEC_DST_PFIX = 1 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_SRC_PFIX = 2 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_NEXT_HEADER = 3 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_PORT = 4 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_DST_PORT = 5 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_SRC_PORT = 6 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_ICMP_TP = 7 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_ICMP_CD = 8 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_TCP_FLAGS = 9 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_PCK_LEN = 10 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_DSCP = 11 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_FRAGMENT = 12 # draft-ietf-idr-flow-spec-v6-09
BGPNLRI_IPV6_FSPEC_FLOW_LABLE = 13 # draft-ietf-idr-flow-spec-v6-09
# Sub-TLVs as defined in SR TE Policy draft
BGP_BSID_PREFERENCE_OLD_OR_NEW = 0
BGPSUB_TLV_PREFERENCE = 6
BGPSUB_TLV_REMOTEENDPOINT_NEW = 6
BGPSUB_TLV_BINDGINGSID = 7
BGPSUB_TLV_PREFERENCE_NEW = 12
BGPSUB_TLV_BINDGINGSID_NEW = 13
BGPSUB_TLV_ENLP_NEW = 14
BGPSUB_TLV_PRIORITY_NEW = 15
BGPSUB_TLV_SIDLIST = 128
BGPSUB_TLV_POLICYNAME_NEW = 129
# Sub-TLVs as defined in SR TE Policy draft and used in BGPSUB_TLV_SIDLIST
BGPSUB_TLV_WEIGHTED = 9
BGPSUB_TLV_SID = 1
# NLRI type as define in BGP EVPN
BGPNLRI_EVPN_ETHERNET_AUTO_DISCOVERY = 1
BGPNLRI_EVPN_MAC_IP_ADVERTISEMENT = 2
BGPNLRI_EVPN_INCLUSIVE_MULTICAST_ETHERNET_TAG = 3
BGPNLRI_EVPN_ETHERNET_SEGMENT = 4
BGPNLRI_EVPN_IP_ROUTE_PREFIX = 5
# ESI type as define in NLRI of BGP EVPN
ESI_BGPNLRI_EVPN_TYPE_0 = 0
ESI_BGPNLRI_EVPN_TYPE_1 = 1
ESI_BGPNLRI_EVPN_TYPE_2 = 2
ESI_BGPNLRI_EVPN_TYPE_3 = 3
ESI_BGPNLRI_EVPN_TYPE_4 = 4
ESI_BGPNLRI_EVPN_TYPE_5 = 5
# BGP message Constants
VERSION = 4
PORT = 179
HDR_LEN = 19
MAX_LEN = 4096
# BGP messages type
MSG_BGP_CLOSED = 0
MSG_OPEN = 1
MSG_UPDATE = 2
MSG_NOTIFICATION = 3
MSG_KEEPALIVE = 4
MSG_ROUTEREFRESH = 5
MSG_CISCOROUTEREFRESH = 128
# BGP Capabilities Support
SUPPORT_4AS = False
CISCO_ROUTE_REFRESH = False
NEW_ROUTE_REFRESH = False
GRACEFUL_RESTART = False
# AFI_SAFI mapping
AFI_SAFI_DICT = {
(1, 1): 'ipv4',
(2, 1): 'ipv6',
(1, 4): 'ipv4_lu',
(2, 4): 'ipv6_lu',
(1, 133): 'flowspec',
(1, 128): 'vpnv4',
(2, 128): 'vpnv6',
(25, 70): 'evpn',
(16388, 71): 'bgpls',
(1, 73): 'ipv4_srte',
(2, 133): 'ipv6_flowspec'
}
AFI_SAFI_STR_DICT = {
'ipv6': (2, 1),
'ipv4': (1, 1),
'ipv4_lu': (1, 4),
'ipv6_lu': (2, 4),
'flowspec': (1, 133),
'vpnv4': (1, 128),
'vpnv6': (2, 128),
'evpn': (25, 70),
'bgpls': (16388, 71),
'ipv4_srte': (1, 73),
'ipv6_flowspec': (2, 133)
}
ADD_PATH_ACT_DICT = {
1: 'receive',
2: 'send',
3: 'both'
}
# BGP FSM State
ST_IDLE = 1
ST_CONNECT = 2
ST_ACTIVE = 3
ST_OPENSENT = 4
ST_OPENCONFIRM = 5
ST_ESTABLISHED = 6
# BGP Timer (seconds)
# DELAY_OPEN_TIME = 10
ROUTE_REFRESH_TIME = 10
LARGER_HOLD_TIME = 4 * 60
# CONNECT_RETRY_TIME = 30
# IDLEHOLD_TIME = 30
# HOLD_TIME = 120
stateDescr = {
ST_IDLE: "IDLE",
ST_CONNECT: "CONNECT",
ST_ACTIVE: "ACTIVE",
ST_OPENSENT: "OPENSENT",
ST_OPENCONFIRM: "OPENCONFIRM",
ST_ESTABLISHED: "ESTABLISHED"
}
# Notification error codes
ERR_MSG_HDR = 1
ERR_MSG_OPEN = 2
ERR_MSG_UPDATE = 3
ERR_HOLD_TIMER_EXPIRED = 4
ERR_FSM = 5
ERR_CEASE = 6
ERR_CAP = 7
# Notification error codes dict
NOTIFICATION_ERROR_CODES_DICT = {
ERR_MSG_HDR: "Message Header Error",
ERR_MSG_OPEN: "OPEN Message Error",
ERR_MSG_UPDATE: "UPDATE Message Error",
ERR_HOLD_TIMER_EXPIRED: "Hold Timer Expired",
ERR_FSM: "Finite State Machine Error",
ERR_CEASE: "Cease",
ERR_CAP: "CAPABILITY Message Error"
}
# Notification suberror codes for ERR_MSG_HDR
ERR_MSG_HDR_CONN_NOT_SYNC = 1
ERR_MSG_HDR_BAD_MSG_LEN = 2
ERR_MSG_HDR_BAD_MSG_TYPE = 3
# Notification suberror codes for ERR_MSG_OPEN
ERR_MSG_OPEN_UNSUP_VERSION = 1
ERR_MSG_OPEN_BAD_PEER_AS = 2
ERR_MSG_OPEN_BAD_BGP_ID = 3
ERR_MSG_OPEN_UNSUP_OPT_PARAM = 4
ERR_MSG_OPEN_UNACCPT_HOLD_TIME = 6
ERR_MSG_OPEN_UNSUP_CAPA = 7 # RFC 5492
ERR_MSG_OPEN_UNKNO = 8
# Notification suberror codes for ERR_MSG_UPDATE
ERR_MSG_UPDATE_MALFORMED_ATTR_LIST = 1
ERR_MSG_UPDATE_UNRECOGNIZED_WELLKNOWN_ATTR = 2
ERR_MSG_UPDATE_MISSING_WELLKNOWN_ATTR = 3
ERR_MSG_UPDATE_ATTR_FLAGS = 4
ERR_MSG_UPDATE_ATTR_LEN = 5
ERR_MSG_UPDATE_INVALID_ORIGIN = 6
ERR_MSG_UPDATE_INVALID_NEXTHOP = 8
ERR_MSG_UPDATE_OPTIONAL_ATTR = 9
ERR_MSG_UPDATE_INVALID_NETWORK_FIELD = 10
ERR_MSG_UPDATE_MALFORMED_ASPATH = 11
ERR_MSG_UPDATE_UNKOWN_ATTR = 12
# Notification suberror codes for ERR_HOLD_TIMER_EXPIRED
ERR_SUB_HOLD_TIMER_EXPIRED = 1
# Notification suberror codes for ERR_FSM
ERR_SUB_FSM_ERROR = 1
# Notification suberror codes for ERR_CEASE
ERR_MAXIMUM_NUMBER_OF_PREFIXES_REACHED = 1
ERR_ADMINISTRATIVE_SHUTDOWN = 2
ERR_PEER_DECONFIGURED = 3
ERR_ADMINISTRATIVE_RESET = 4
ERR_CONNECTION_RESET = 5
ERR_OTHER_CONFIGURATION_CHANGE = 6
ERR_CONNECTION_COLLISION_RESOLUTION = 7
ERR_OUT_OF_RESOURCES = 8
NOTIFICATION_SUB_ERROR_CODES_DICT = {
ERR_MSG_HDR: {
ERR_MSG_HDR_CONN_NOT_SYNC: 'Connection Not Synchronized', # 1
ERR_MSG_HDR_BAD_MSG_LEN: 'Bad Message Length', # 2
ERR_MSG_HDR_BAD_MSG_TYPE: 'Bad Message Type' # 3
},
ERR_MSG_OPEN: {
ERR_MSG_OPEN_UNSUP_VERSION: 'Unsupported Version Number',
ERR_MSG_OPEN_BAD_PEER_AS: 'Bad Peer AS',
ERR_MSG_OPEN_BAD_BGP_ID: 'Bad BGP Identifier',
ERR_MSG_OPEN_UNSUP_OPT_PARAM: 'Unsupported Optional Parameter',
ERR_MSG_OPEN_UNACCPT_HOLD_TIME: 'Unacceptable Hold Time',
ERR_MSG_OPEN_UNSUP_CAPA: 'Unsupported Capability',
ERR_MSG_OPEN_UNKNO: 'NULL',
},
ERR_MSG_UPDATE: {
ERR_MSG_UPDATE_MALFORMED_ATTR_LIST: 'Malformed Attribute List',
ERR_MSG_UPDATE_UNRECOGNIZED_WELLKNOWN_ATTR: 'Unrecognized Well-known Attribute',
ERR_MSG_UPDATE_MISSING_WELLKNOWN_ATTR: 'Missing Well-known Attribute',
ERR_MSG_UPDATE_ATTR_FLAGS: 'Attribute Flags Error',
ERR_MSG_UPDATE_ATTR_LEN: 'Attribute Length Error',
ERR_MSG_UPDATE_INVALID_ORIGIN: 'Invalid ORIGIN Attribute',
ERR_MSG_UPDATE_INVALID_NEXTHOP: 'Invalid NEXT_HOP Attribute',
ERR_MSG_UPDATE_OPTIONAL_ATTR: 'Optional Attribute Error',
ERR_MSG_UPDATE_INVALID_NETWORK_FIELD: 'Invalid Network Field',
ERR_MSG_UPDATE_MALFORMED_ASPATH: 'Malformed AS_PATH',
ERR_MSG_UPDATE_UNKOWN_ATTR: 'NULL'
},
ERR_HOLD_TIMER_EXPIRED: {
ERR_SUB_HOLD_TIMER_EXPIRED: 'Hold timer expired'
},
ERR_FSM: {
ERR_SUB_FSM_ERROR: 'FSM error'
},
ERR_CEASE: {
ERR_MAXIMUM_NUMBER_OF_PREFIXES_REACHED: 'Maximum number of prefixes reached',
ERR_ADMINISTRATIVE_SHUTDOWN: 'Administrative shutdown',
ERR_PEER_DECONFIGURED: 'Peer reconfigured',
ERR_ADMINISTRATIVE_RESET: 'Administrative reset',
ERR_CONNECTION_RESET: 'Connection reset',
ERR_OTHER_CONFIGURATION_CHANGE: 'Other configuration change',
ERR_CONNECTION_COLLISION_RESOLUTION: 'Connection collision resolution',
ERR_OUT_OF_RESOURCES: 'Out of resources'
}
}
ATTRIBUTE_ID_2_STR = {
1: 'ORIGIN',
2: 'AS_PATH',
3: 'NEXT_HOP',
4: 'MULTI_EXIT_DISC',
5: 'LOCAL_PREF',
6: 'ATOMIC_AGGREGATE',
7: 'AGGREGATOR',
8: 'COMMUNITY',
9: 'ORIGINATOR_ID',
10: 'CLUSTER_LIST',
14: 'MP_REACH_NLRI',
15: 'MP_UNREACH_NLRI',
16: 'EXTENDED_COMMUNITY',
17: 'AS4_PATH',
18: 'AS4_AGGREGATOR',
22: 'PMSI_TUNNEL'
}
ATTRIBUTE_STR_2_ID = dict([(v, k) for (k, v) in ATTRIBUTE_ID_2_STR.items()])
WELL_KNOW_COMMUNITY_INT_2_STR = {
0xFFFF0000: 'PLANNED_SHUT',
0xFFFF0001: 'ACCEPT_OWN',
0xFFFF0002: 'ROUTE_FILTER_TRANSLATED_v4',
0xFFFF0003: 'ROUTE_FILTER_v4',
0xFFFF0004: 'ROUTE_FILTER_TRANSLATED_v6',
0xFFFF0005: 'ROUTE_FILTER_v6',
0xFFFF029A: 'BLACKHOLE',
0xFFFFFF01: 'NO_EXPORT',
0xFFFFFF02: 'NO_ADVERTISE',
0xFFFFFF03: 'NO_EXPORT_SUBCONFED',
0xFFFFFF04: 'NOPEER'
}
WELL_KNOW_COMMUNITY_STR_2_INT = dict(
[(r, l) for (l, r) in WELL_KNOW_COMMUNITY_INT_2_STR.items()])
TCP_MD5SIG_MAXKEYLEN = 80
SS_PADSIZE_IPV4 = 120
TCP_MD5SIG = 14
SS_PADSIZE_IPV6 = 100
SIN6_FLOWINFO = 0
SIN6_SCOPE_ID = 0
|
the-stack_0_25659
|
from django.shortcuts import render
from .forms import AddForm
def add(request):
total = 0
if request.GET:
form = AddForm(request.GET)
if form.is_valid():
total = form.add_numbers()
else:
form = AddForm()
return render(request, 'add/add_form.html', {
'form': form,
'total': total,
})
|
the-stack_0_25660
|
# -*- coding: utf-8 -*-
#Password Hash Secret
secret = 'Example'
#Sign Up Page Open
signup_open = True
#Sign Up Invitation Code
invitation_code = u"Example"
#Order Name Length
order_name_len = 8
#Debug Option
debug = False
|
the-stack_0_25663
|
import datetime
# from django.contrib.auth.decorators import user_passes_test
import io
import json
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.forms.models import model_to_dict
# Create your views here.
from django.http import JsonResponse
from django.middleware.csrf import get_token
from django.shortcuts import HttpResponse, HttpResponseRedirect, render
from django.utils.decorators import method_decorator
from django.views import View
from django_filters.rest_framework import DjangoFilterBackend, OrderingFilter
from drf_yasg.utils import swagger_auto_schema
from rest_framework import generics, mixins, status, views, viewsets
from rest_framework.authentication import (BasicAuthentication,
SessionAuthentication)
from rest_framework.mixins import ListModelMixin
from rest_framework.parsers import JSONParser
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import JSONRenderer
from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from rest_framework.views import APIView
from actor.models import (CURRENT_SEMESTER, CURRENT_SEMESTER_CODE,
ordered_to_dict)
from helpers.decorators import user_passes_test
from initial.models import (AttendanceSheet, Course, CourseStatus, MarkSheet,
OfferedCourses, SectionAttendance, SectionMarks,
Semester, StudentAttendance, StudentMarks,
Transcript)
from initial.serializers import (AttendanceSheetSerializer,
OfferedCoursesSerializer, TranscriptSerilazer)
from student_portal.serializers import StudentSerializerOnlyNameAndUid
from .forms import StudentForm, StudentFormValidate
from .models import FeeChallan, Student
from .serializers import StudentSerializer, StudentSerializerAllData
# Create your views here.
class UserNotLogged(View):
def get(self, request):
return JsonResponse({'message': 'Not Authenticated'}, status=401)
def check_if_student(user):
return bool(user.is_student)
class BaseStudentLoginView(APIView):
not_user_response = {'message': 'Login Required',
'condtion': False, 'status': 'failure'}
not_student_response = {'message': 'User Logged in is Not a Student',
'condtion': False, 'status': 'failure'}
@ method_decorator(user_passes_test(lambda u: u.is_authenticated, on_failure_json_response=JsonResponse(not_user_response, status=401)))
@ method_decorator(user_passes_test(check_if_student, on_failure_json_response=JsonResponse(not_student_response, status=401)))
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
class Home_json(BaseStudentLoginView):
def get(self, request):
print((request.user))
stud_obj = Student.objects.filter(uid=str(request.user))
print(stud_obj)
user_obj = request.user
dict_user = model_to_dict(user_obj)
dict_user.pop('groups', None)
dict_user.pop('password', None)
student_data = StudentSerializer(stud_obj, many=True).data
dat = {'status': 'success',
'student_data': student_data, 'user_data': dict_user, 'data': student_data}
return JsonResponse(dat)
class AttendanceView(BaseStudentLoginView):
def get(self, request, course_code):
from initial.models import AttendanceSheet, OfferedCourses
print(dir(self))
print(dir(request.user))
s = Student.objects.get(uid=request.user)
csddc = course_code + "_" + s.semester_code
at = AttendanceSheet.objects.get(
student__uid=request.user, scsddc__endswith=csddc)
from initial.serializers import AttendanceSheetSerializer
att_serialized = AttendanceSheetSerializer(at, many=True).data
return JsonResponse({'message': 'Available Attendacne', 'condition': True, 'attendance': att_serialized}, status=200)
class PostAttendanceQR(BaseStudentLoginView):
def post(self, request):
print(request.POST['qr_code'])
request_data = json.loads(request.POST['qr_code'])
if isinstance(request_data, int):
JsonResponse({'message': 'QR Not Scanned Properly. Please Try again',
'status': 'QR Scan Error', 'condition': False}, status=400)
print(request_data)
print(request.user)
try:
att_object = StudentAttendance.objects.get(student=Student.objects.get(uid=str(
request.user)), scsddc=request_data['scsddc'], class_date=request_data['class_date'], attendance_slot=request_data['attendance_slot'], section=request_data['section'])
except StudentAttendance.DoesNotExist as err:
return JsonResponse({'message': 'Student is not enrolled in this Class', 'condition': False, 'error': err}, status=400)
if att_object.state == 'P':
return JsonResponse({'message': 'Attendance Already Marked', 'condition': True, }, status=200)
att_object.state = 'P'
att_object.save()
from initial.serializers import StudentAttendanceSerializer
data = StudentAttendanceSerializer(
att_object, context={'request': request}).data
return JsonResponse({'message': 'Attendance Marked', 'condition': True, 'attendance': data}, status=200)
class TimeTableView(BaseStudentLoginView):
def get(self, request):
# import requests
# uid = str(request.user)
# city = uid[2].lower()
# rnum = uid[4:]
# year = uid[0:2]
# url = 'https://timetablenotifier.com/api/fetch.php?email="' + \
# str(city+year+rnum) + '@nu.edu.pk"'
# print(url)
# r = requests.get(url)
# data = r.json()
return JsonResponse({'data': [], 'message': 'TimeTable server is down', 'success': 0})
class RegistrationCheck(BaseStudentLoginView):
def get(self, request):
print(request.user)
from institution.models import Department, Degree
try:
s = Student.objects.get(uid=request.user)
dep = Department.objects.get(
department_name=s.department_name_enrolled)
deg = Degree.objects.get(
degree_short=s.degree_short_enrolled, offering_department=dep)
except Degree.DoesNotExist as e:
return JsonResponse({'message': 'Invalid Student. Degree Does not Exist', 'condition': True, 'error_raised': True, 'error': str(e)}, status=401)
except Department.DoesNotExist as e:
return JsonResponse({'message': 'Invalid Student. Department Does not Exist', 'condition': True, 'error_raised': True}, status=401)
if dep is None or deg is None:
return JsonResponse({'message': 'Invalid Student', 'condition': True}, status=401)
if(deg.registrations_open == True):
return JsonResponse({'message': 'Regisrations are Active', 'condition': True}, status=200)
else:
return JsonResponse({'message': 'Regisrations are NOT Active', 'condition': False}, status=200)
class RegistrationCourses(BaseStudentLoginView):
@ swagger_auto_schema()
def get(self, request):
from institution.models import Department, Degree
try:
s = Student.objects.get(uid=request.user)
if s.warning_count > 0:
return JsonResponse({'message': 'Student in Warning. Conatact Academic Office.', 'condition': False}, status=200)
from initial.models import Semester, OfferedCourses
# sem = Semester.objects.get(semester_code=CURRENT_SEMESTER_CODE)
# rg_courses = sem.regular_course_load.get(semester_season=CURRENT_SEMESTER,student_year=s.student_year)
# el_courses = sem.elective_course_load.get(semester_season=CURRENT_SEMESTER)
current_semester = Semester.objects.filter(
current_semester=True).latest()
s = OfferedCourses.objects.filter(student__uid=str(
request.user), semester_code=current_semester.semester_code)
# from rest_framework.request import Request
from initial.serializers import OfferedCoursesSerializer
offered_courses_to_student = OfferedCoursesSerializer(
s, many=True, context={'request': request}).data
from pprint import pprint
pprint(offered_courses_to_student)
except Semester.DoesNotExist as e:
return JsonResponse({'message': 'Invalid Semester. Contact Adminstration.', 'condition': False, 'error_raised': True}, status=401)
except OfferedCourses.DoesNotExist as e:
return JsonResponse({'message': 'Invalid Student. Department Does not Exist', 'condition': False, 'error_raised': True}, status=401)
if offered_courses_to_student is None:
return JsonResponse({'message': 'No Available Courses', 'condition': False}, status=401)
return JsonResponse({'message': 'Available Courses', 'condition': True, 'regular_courses': offered_courses_to_student}, status=200)
class StudentSignupView(View):
def post(self, request):
form = StudentFormValidate(request.POST)
if form.is_valid():
print(form.cleaned_data)
form.save()
return JsonResponse({'status': "Success", 'message': 'Student Sign Up Successful.'})
else:
return JsonResponse(form.errors.get_json_data())
class StudentLoginView(APIView):
def get(self, request, *args, **kwargs):
return HttpResponse("PLease Login" + str(kwargs))
def post(self, request, *args, **kwargs):
username = request.POST['username']
password = request.POST['password']
if username is "" or password is "":
return JsonResponse({'message': "Empty Usename or Password Field.", 'status': 'failure'}, status=401)
user = authenticate(request, username=username, password=password)
if user is None:
return JsonResponse({'message': "Invalid Id Or Password", 'status': 'failure'}, status=403)
if user.is_student == False:
return JsonResponse({'message': 'User is Not a Student',
'condtion': False, 'status': 'failure'}, status=401)
if user is not None:
login(request, user)
dict_user = model_to_dict(user)
dict_user.pop('groups', None)
dict_user.pop('password', None)
return JsonResponse({'status': 'success', 'message': 'User Logged In', **dict_user})
else:
return JsonResponse({'message': "Invalid Username of Password.", 'status': 'failure'}, status=403)
return HttpResponseRedirect('/home')
class CustomOfferedCourseSerializer(ModelSerializer):
# student = StudentSerializerOnlyNameAndUid()
# courses = SerializerMethodField('registered_courses')
# def registered_courses(self, offeredCourses):
# courses = ['gello']
# # for courseStatus in offeredCourses.courses_offered.all():
# # print(CourseStatus)
# # courses.append({
# # 'course_code': courseStatus.course.course_code,
# # 'course_name': courseStatus.course.course_name,
# # 'section': courseStatus.section
# # })
# return courses
class Meta:
model = OfferedCourses
fields = [
# 'courses',
'courses_offered'
]
# fields = '__all__'
class StudentSectionView(BaseStudentLoginView):
serializer_class = OfferedCoursesSerializer
renderer_classes = [JSONRenderer]
pagination_class = None
# queryset = OfferedCourses.objects.all()
# filter_backends = [DjangoFilterBackend, OrderingFilter]
# def get_queryset(self):
# current_semester = Semester.objects.filter(
# current_semester=True).latest()
# student = Student.objects.get(uid=self.request.user)
# courses = OfferedCourses.objects.filter(
# courses_offered__status='NR', student=student, semester_code=current_semester.semester_code)
# return courses
# def filter_queryset(self, queryset):
# filter_backends = [DjangoFilterBackend]
# for backend in list(filter_backends):
# queryset = backend().filter_queryset(self.request, queryset, view=self)
# return queryset
@ swagger_auto_schema()
def get(self, request, *args, **kwargs):
current_semester = Semester.objects.filter(
current_semester=True).latest()
student = Student.objects.get(uid=self.request.user)
courses = OfferedCourses.objects.get(
student=student, semester_code=current_semester.semester_code)
# courses_offered__status='NR', student=student, semester_code=current_semester.semester_code)
processed_courses = []
for courseStatus in courses.courses_offered.all():
if courseStatus.status == "R":
processed_courses.append({
'course_code': courseStatus.course.course_code,
'course_name': courseStatus.course.course_name,
'section': courseStatus.section,
'registration_status': courseStatus.status
})
# serialized_courses = OfferedCoursesSerializer(
# courses, many=True, context={'request': request}).data
# student=student, semester_code=current_semester.semester_code)
# print(courses)
# return Response(courses, status=200)
return Response(processed_courses)
class StudentAttendanceView(BaseStudentLoginView):
def get(self, request, *args, **kwargs):
print(kwargs['section'])
print(kwargs['course_code'])
current_semester = Semester.objects.filter(
current_semester=True).latest()
student = Student.objects.get(uid=self.request.user)
print(
f'{kwargs["section"]}_{kwargs["course_code"]}_{current_semester.semester_code}')
try:
attendance_sheet = AttendanceSheet.objects.filter(
student=student, scsddc=f'{kwargs["section"]}_{kwargs["course_code"]}_{current_semester.semester_code}'
)
except AttendanceSheet.DoesNotExist as e:
return Response({'message': 'Error, Invalid Attendance Sheet Requested. Please contact admin.', 'error': str(e)}, status=400)
print(type(attendance_sheet))
sheet_serialized = AttendanceSheetSerializer(
attendance_sheet, many=True, context={'request': request}).data
return Response(sheet_serialized, status=200)
class StudentLogoutView(View):
def post(self, request):
if request.user.is_authenticated:
logout(request)
return JsonResponse({'status': 'success', 'message': 'User Logged Out', 'condtion': True})
else:
return JsonResponse({'status': 'success', 'message': 'No User Logged in', 'condtion': True})
def get(self, request):
if request.user.is_authenticated:
logout(request)
return JsonResponse({'status': 'success', 'message': 'User Logged Out', 'condtion': True})
else:
return JsonResponse({'status': 'success', 'message': 'No User Logged in', 'condtion': True})
# def generate_challan(request):
# student = Student.objects.get(user = request.user)
# semester = Semester.objects.get(semester_code= request.POST['semester'])
# csr = request.POST['course'].split(',')
# challan = FeeChallan.object.create(student=student,semester=semester)
# fee_per_Cr = semester.fee_per_CR
# fee = 0
# for c in csr:
# challan.coursea.add(Course.object.get(course_code=c))
# fee+=fee_per_Cr
# challan.Tution_fee = fee
# challan.total_fee = fee
# challan.save()
def update_challan(request):
ts = int(datetime.datetime.now().timestamp())
EndDate = datetime.date.today() + datetime.timedelta(days=30)
admission_fee = request.POST['admission_fee']
student = Student.objects.get(user=request.user)
semester = Semester.objects.get(semester_code=request.POST['semester'])
challan, created = FeeChallan.objects.get_or_create(
student=student, semester=semester)
if created == True:
transcript = Transcript.objects.get_or_create(
student=student, semester=Semester.objects.get(current_semester=True))[0]
challan.coActivity_charges = semester.co_circular_fee
challan.due_date = EndDate
challan.challan_no = ts
option = request.POST['action']
code = request.POST['code']
print(code)
course = Course.objects.filter(course_name=code).first()
credit_hour = course.credit_hour
course_fee = semester.fee_per_CR
if option == 'drop':
challan.courses.remove(course)
challan.tution_fee = challan.tution_fee-course_fee*credit_hour
challan.total_fee = challan.total_fee-course_fee*credit_hour
else:
challan.courses.add(course)
challan.tution_fee = challan.tution_fee+course_fee*credit_hour
challan.total_fee = challan.total_fee+course_fee*credit_hour
if(admission_fee != ''):
challan.total_fee += int(admission_fee)
challan.save()
return JsonResponse("Success", safe=False)
def get_challan(request):
student = Student.objects.get(user=request.user)
code = request.POST['code']
print(code)
if len(code) > 1:
semester = Semester.objects.get(semester_code=code)
try:
challan = FeeChallan.objects.get(
student=student, semester=semester)
except:
return JsonResponse({"Error": "No Challan"}, safe=False, response=403)
else:
try:
challan = FeeChallan.objects.filter(student=student).values()
return JsonResponse(list(challan), safe=False)
except:
return JsonResponse({"Error": "No challan"}, safe=False, status=403)
opt = semester.semester_season
if(opt == 1):
season = "Fall"
elif opt == 2:
season = "Spring"
else:
season = "Summer"
challan_obj = {
"due_date": challan.due_date,
"name": request.user.first_name+request.user.last_name,
"roll_no": student.uid,
"challan_no": challan.challan_no,
"discipline": student.degree_short_enrolled,
"semester": season+" "+str(semester.semester_year),
"admission_fee": challan.admission_fee,
"tution_fee": challan.tution_fee,
"fine": challan.Fine,
"other": challan.other_charges+challan.coActivity_charges,
"arrears": challan.Arrear,
"withholding": challan.withholding_tax,
"total_amount": challan.total_fee,
"fine_per_day": int(challan.total_fee*0.001),
"challan_no": challan.challan_no,
}
return JsonResponse(challan_obj, safe=False)
class Student_Transcript(View):
def post(self, request):
try:
student = Student.objects.get(uid=request.POST['id'])
transcript = Transcript.objects.filter(student=student)
if len(transcript) > 1:
json_transcript = TranscriptSerilazer(transcript, many=True)
return JsonResponse(json_transcript.data, safe=False)
else:
transcript = Transcript.objects.get(student=student)
json_transcript = TranscriptSerilazer(transcript)
return JsonResponse([json_transcript.data], safe=False)
except:
return JsonResponse({"Error": "No Transcript"}, safe=False, status=420)
class StudentMarksView(View):
def post(self, request):
section = request.POST['section']
code = request.POST['code']
if section == None or section == "" or section == "null" or code == None or code == "" or code == "null":
return JsonResponse({"Failed": "Invalid Input Parameters"}, status=403)
else:
student = Student.objects.get(user=request.user)
semester = Semester.objects.get(current_semester=True)
scsddc = section+"_"+code+"_"+semester.semester_code
marks_info = SectionMarks.objects.filter(scsddc=scsddc)
if(len(marks_info) > 0):
marks_data = []
for mark in marks_info:
marks = StudentMarks.objects.get(
student=student, scsddc=scsddc, marks_type=mark.marks_type)
obj = {
"marks_type": mark.marks_type,
"total_marks": marks.total_marks,
"weightage": marks.weightage,
"obtained_marks": marks.obtained_marks,
"obtained_weightage": marks.obtained_weightage,
"section": marks.section,
"marks_mean": mark.marks_mean,
"marks_std_dev": mark.marks_standard_deviation,
"weightage_mean": mark.weightage_mean,
"weightage_std_dev": mark.weightage_standard_deviation,
"min_marks": mark.min_marks,
"max_marks": mark.max_marks,
}
marks_data.append(obj)
mark_sheet = MarkSheet.objects.get(
scsddc=scsddc, student=student)
grand_total = {
"total_marks": mark_sheet.grand_total_marks,
"obtained_total": mark_sheet.obtained_marks,
}
return JsonResponse({"Status": "Success", "marks_info": marks_data, "total": [grand_total]}, safe=False, status=200)
else:
return JsonResponse({"Status": "Failed", "Message": "No Marks Available"}, status=200)
def get_scsddc(request):
try:
section = request.POST['section']
code = request.POST['code']
if code == 'null' or section == "null" or code == "" or section == "":
return JsonResponse({"Failed": "Invalid Parameters"}, status=403)
except:
return JsonResponse({"Failed": "Invalid Parameters"}, status=403)
semester = Semester.objects.get(
current_semester=True)
scsddc = section+"_"+code+"_"+semester.semester_code
# mark_sheet = MarkSheet.objects.filter()
return JsonResponse({"Status": "Success", "scsddc": scsddc})
def get_latest_transcript(request):
student = Student.objects.get(user=request.user)
transcript = Transcript.objects.filter(student=student, last=True).values()
print(transcript)
return JsonResponse(list(transcript), safe=False)
|
the-stack_0_25666
|
import csv
from datetime import datetime
import os.path
import time
from fritzconnection.lib.fritzstatus import FritzStatus
from fritzconnection.core.exceptions import FritzConnectionException
from fritz_persist_status import settings, map_value
if __name__ == "__main__":
try:
status = FritzStatus()
except FritzConnectionException as e:
with open(settings.error_file, mode='a+') as error_file:
error_file.write(str(e) + '\n')
write_header = not os.path.isfile(settings.output_file)
while True:
try:
with open(settings.output_file, mode='a+') as output_file:
writer = csv.writer(output_file)
if write_header:
write_header = False
writer.writerow(['datetime', *settings.attrs])
writer.writerow(
[datetime.now().isoformat()]
+ [
map_value(getattr(status, attr), attr)
for attr in settings.attrs
]
)
time.sleep(30)
except Exception:
with open(settings.error_file, mode='a+') as error_file:
error_file.write(str(e) + '\n')
|
the-stack_0_25667
|
import os.path
import shutil
import logging
from ..optimiser import Optimiser
from .animated_gif import OptimiseAnimatedGIF
class OptimiseGIF(Optimiser):
"""
Optimises gifs. If they aren't animated, it converts them to pngs with ImageMagick before
optimising them as for pngs.
Animated gifs get optimised according to the commands in OptimiseAnimatedGIF
"""
def __init__(self, **kwargs):
super(OptimiseGIF, self).__init__(**kwargs)
# the command to execute this optimiser
if kwargs.get('quiet') == True:
pngcrush = 'pngcrush -rem alla -brute -reduce -q "__INPUT__" "__OUTPUT__"'
else:
pngcrush = 'pngcrush -rem alla -brute -reduce "__INPUT__" "__OUTPUT__"'
self.commands = ('convert "__INPUT__" png:"__OUTPUT__"',
'pngnq -n 256 -o "__OUTPUT__" "__INPUT__"',
pngcrush)
# variable so we can easily determine whether a gif is animated or not
self.animated_gif_optimiser = OptimiseAnimatedGIF()
self.converted_to_png = False
self.is_animated = False
# format as returned by 'identify'
self.format = "GIF"
def set_input(self, input):
super(OptimiseGIF, self).set_input(input)
self.converted_to_png = False
self.is_animated = False
def _is_animated(self, input):
"""
Tests an image to see whether it's an animated gif
"""
return self.animated_gif_optimiser._is_acceptable_image(input)
def _keep_smallest_file(self, input, output):
"""
Compares the sizes of two files, and discards the larger one
"""
input_size = os.path.getsize(input)
output_size = os.path.getsize(output)
# if the image was optimised (output is smaller than input), overwrite the input file with the output
# file.
if (output_size < input_size):
try:
shutil.copyfile(output, input)
self.files_optimised += 1
self.bytes_saved += (input_size - output_size)
except IOError:
logging.error("Unable to copy %s to %s: %s" % (output, input, IOError))
sys.exit(1)
if self.iterations == 1 and not self.is_animated:
self.converted_to_png = True
# delete the output file
os.unlink(output)
def _get_command(self):
"""
Returns the next command to apply
"""
command = False
# for the first iteration, return the first command
if self.iterations == 0:
# if the GIF is animated, optimise it
if self._is_animated(self.input):
command = self.animated_gif_optimiser.commands[0]
self.is_animated = True
else: # otherwise convert it to PNG
command = self.commands[0]
# execute the png optimisations if the gif was converted to a png
elif self.converted_to_png and self.iterations < len(self.commands):
command = self.commands[self.iterations]
self.iterations += 1
return command
def _list_only(self, input, output):
"""
Always keeps input, but still compares the sizes of two files
"""
input_size = os.path.getsize(input)
output_size = os.path.getsize(output)
if (output_size > 0 and output_size < input_size):
self.files_optimised += 1
self.bytes_saved += (input_size - output_size)
self.array_optimised_file.append(input)
if self.iterations == 1 and not self.is_animated:
self.convert_to_png = True
# delete the output file
os.unlink(output)
|
the-stack_0_25668
|
from contextlib import contextmanager
import os
import logging.config
import numpy as np
import torch
from torch.nn.utils.rnn import pack_padded_sequence
import seq2seq.data.config as config
def barrier():
""" Calls all_reduce on dummy tensor."""
if torch.distributed.is_initialized():
torch.distributed.all_reduce(torch.cuda.FloatTensor(1))
torch.cuda.synchronize()
def get_rank():
if torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
if torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
@contextmanager
def sync_workers():
""" Gets distributed rank and synchronizes workers at exit"""
rank = get_rank()
yield rank
barrier()
def setup_logging(log_file='log.log'):
"""Setup logging configuration
"""
class RankFilter(logging.Filter):
def __init__(self, rank):
self.rank = rank
def filter(self, record):
record.rank = self.rank
return True
rank = get_rank()
rank_filter = RankFilter(rank)
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(rank)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(rank)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').addFilter(rank_filter)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, skip_first=True):
self.reset()
self.skip = skip_first
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
if self.skip:
self.skip = False
else:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def batch_padded_sequences(seq, batch_first=False, sort=False):
if sort:
def key(item): return len(item[1])
indices, seq = zip(*sorted(enumerate(seq), key=key, reverse=True))
else:
indices = range(len(seq))
lengths = [len(sentence) for sentence in seq]
batch_length = max(lengths)
seq_tensor = torch.LongTensor(batch_length, len(seq)).fill_(config.PAD)
for idx, sentence in enumerate(seq):
end_seq = lengths[idx]
seq_tensor[:end_seq, idx].copy_(sentence[:end_seq])
if batch_first:
seq_tensor = seq_tensor.t()
return seq_tensor, lengths, indices
def debug_tensor(tensor, name):
logging.info(name)
tensor = tensor.float().cpu().numpy()
logging.info(f'MIN: {tensor.min()} MAX: {tensor.max()} '
f'AVG: {tensor.mean()} STD: {tensor.std()} '
f'NAN: {np.isnan(tensor).sum()} INF: {np.isinf(tensor).sum()}')
|
the-stack_0_25671
|
import collections
import json
from rest_framework.fields import MISSING_ERROR_MESSAGE, SerializerMethodField
from rest_framework.relations import *
from django.utils.translation import ugettext_lazy as _
from django.db.models.query import QuerySet
from django.db.models import Model
from rest_framework_json_api.exceptions import Conflict
from rest_framework_json_api.utils import Hyperlink, \
get_resource_type_from_queryset, get_resource_type_from_instance, \
get_included_serializers, get_resource_type_from_serializer
LINKS_PARAMS = ['self_link_view_name', 'related_link_view_name', 'related_link_lookup_field', 'related_link_url_kwarg']
class ResourceRelatedField(PrimaryKeyRelatedField):
self_link_view_name = None
related_link_view_name = None
related_link_lookup_field = 'pk'
links_only = False
default_error_messages = {
'required': _('This field is required.'),
'does_not_exist': _('Invalid pk "{pk_value}" - object does not exist.'),
'incorrect_type': _('Incorrect type. Expected resource identifier object, received {data_type}.'),
'incorrect_relation_type': _('Incorrect relation type. Expected {relation_type}, received {received_type}.'),
'missing_type': _('Invalid resource identifier object: missing \'type\' attribute'),
'missing_id': _('Invalid resource identifier object: missing \'id\' attribute'),
'no_match': _('Invalid hyperlink - No URL match.'),
}
def __init__(self, self_link_view_name=None, related_link_view_name=None, **kwargs):
if self_link_view_name is not None:
self.self_link_view_name = self_link_view_name
if related_link_view_name is not None:
self.related_link_view_name = related_link_view_name
self.related_link_lookup_field = kwargs.pop('related_link_lookup_field', None)
self.related_link_url_kwarg = kwargs.pop('related_link_url_kwarg', self.related_link_lookup_field)
self.links_only = kwargs.pop('links_only', self.links_only)
# check for a model class that was passed in for the relation type
model = kwargs.pop('model', None)
if model:
self.model = model
# We include this simply for dependency injection in tests.
# We can't add it as a class attributes or it would expect an
# implicit `self` argument to be passed.
self.reverse = reverse
super(ResourceRelatedField, self).__init__(**kwargs)
def use_pk_only_optimization(self):
# We need the real object to determine its type...
return False
def conflict(self, key, **kwargs):
"""
A helper method that simply raises a validation error.
"""
try:
msg = self.error_messages[key]
except KeyError:
class_name = self.__class__.__name__
msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key)
raise AssertionError(msg)
message_string = msg.format(**kwargs)
raise Conflict(message_string)
def get_url(self, name, view_name, kwargs, request):
"""
Given a name, view name and kwargs, return the URL that hyperlinks to the object.
May raise a `NoReverseMatch` if the `view_name` and `lookup_field`
attributes are not configured to correctly match the URL conf.
"""
# Return None if the view name is not supplied
if not view_name:
return None
# Return the hyperlink, or error if incorrectly configured.
try:
url = self.reverse(view_name, kwargs=kwargs, request=request)
except NoReverseMatch:
msg = (
'Could not resolve URL for hyperlinked relationship using '
'view name "%s".'
)
raise ImproperlyConfigured(msg % view_name)
if url is None:
return None
return Hyperlink(url, name)
def get_links(self, obj=None, lookup_field='pk'):
request = self.context.get('request', None)
view = self.context.get('view', None)
return_data = OrderedDict()
if obj:
kwargs = {lookup_field: getattr(obj, lookup_field)}
elif lookup_field in view.kwargs:
kwargs = {lookup_field: view.kwargs[lookup_field]}
else:
kwargs = {}
self_kwargs = kwargs.copy()
self_kwargs.update({'related_field': self.field_name if self.field_name else self.parent.field_name})
self_link = self.get_url('self', self.self_link_view_name, self_kwargs, request)
if obj:
related_id = getattr(obj, self.related_link_lookup_field or 'pk')
elif self.related_link_lookup_field in view.kwargs:
related_id = view.kwargs[self.related_link_lookup_field]
else:
related_id = None
if related_id:
if isinstance(related_id, Model):
related_id = related_id.pk
related_kwargs = {self.related_link_url_kwarg: related_id}
related_link = self.get_url('related', self.related_link_view_name, related_kwargs, request)
else:
related_link = None
if self_link:
return_data.update({'self': self_link})
if related_link:
return_data.update({'related': related_link})
return return_data
def to_internal_value(self, data):
if isinstance(data, six.text_type):
try:
data = json.loads(data)
except ValueError:
# show a useful error if they send a `pk` instead of resource object
self.fail('incorrect_type', data_type=type(data).__name__)
if not isinstance(data, dict):
self.fail('incorrect_type', data_type=type(data).__name__)
expected_relation_type = get_resource_type_from_queryset(self.queryset)
if 'type' not in data:
self.fail('missing_type')
if 'id' not in data:
self.fail('missing_id')
if data['type'] != expected_relation_type:
self.conflict('incorrect_relation_type', relation_type=expected_relation_type, received_type=data['type'])
return super(ResourceRelatedField, self).to_internal_value(data['id'])
def to_representation(self, value):
if self.links_only:
return None
if getattr(self, 'pk_field', None) is not None:
pk = self.pk_field.to_representation(value.pk)
else:
pk = value.pk
resource_type = self.get_resource_type_from_included_serializer()
if resource_type is None:
resource_type = get_resource_type_from_instance(value)
return OrderedDict([('type', resource_type), ('id', str(pk))])
def get_resource_type_from_included_serializer(self):
"""
Check to see it this resource has a different resource_name when
included and return that name, or None
"""
field_name = self.field_name or self.parent.field_name
root = self.get_root_serializer()
if root is not None:
includes = get_included_serializers(root)
if field_name in includes.keys():
return get_resource_type_from_serializer(includes[field_name])
return None
def get_root_serializer(self):
if hasattr(self.parent, 'parent') and self.is_serializer(self.parent.parent):
return self.parent.parent
elif self.is_serializer(self.parent):
return self.parent
return None
def is_serializer(self, candidate):
return hasattr(candidate, 'included_serializers')
def get_choices(self, cutoff=None):
queryset = self.get_queryset()
if queryset is None:
# Ensure that field.choices returns something sensible
# even when accessed with a read-only field.
return {}
if cutoff is not None:
queryset = queryset[:cutoff]
return OrderedDict([
(
json.dumps(self.to_representation(item)),
self.display_value(item)
)
for item in queryset
])
class SerializerMethodResourceRelatedField(ResourceRelatedField):
"""
Allows us to use serializer method RelatedFields
with return querysets
"""
def __new__(cls, *args, **kwargs):
"""
We override this because getting serializer methods
fails at the base class when many=True
"""
if kwargs.pop('many', False):
return cls.many_init(*args, **kwargs)
return super(ResourceRelatedField, cls).__new__(cls, *args, **kwargs)
def __init__(self, child_relation=None, *args, **kwargs):
# DRF 3.1 doesn't expect the `many` kwarg
kwargs.pop('many', None)
model = kwargs.pop('model', None)
if child_relation is not None:
self.child_relation = child_relation
if model:
self.model = model
super(SerializerMethodResourceRelatedField, self).__init__(*args, **kwargs)
@classmethod
def many_init(cls, *args, **kwargs):
list_kwargs = {k: kwargs.pop(k) for k in LINKS_PARAMS if k in kwargs}
list_kwargs['child_relation'] = cls(*args, **kwargs)
for key in kwargs.keys():
if key in ('model',) + MANY_RELATION_KWARGS:
list_kwargs[key] = kwargs[key]
return cls(**list_kwargs)
def get_attribute(self, instance):
# check for a source fn defined on the serializer instead of the model
if self.source and hasattr(self.parent, self.source):
serializer_method = getattr(self.parent, self.source)
if hasattr(serializer_method, '__call__'):
return serializer_method(instance)
return super(SerializerMethodResourceRelatedField, self).get_attribute(instance)
def to_representation(self, value):
if isinstance(value, collections.Iterable):
base = super(SerializerMethodResourceRelatedField, self)
return [base.to_representation(x) for x in value]
return super(SerializerMethodResourceRelatedField, self).to_representation(value)
def get_links(self, obj=None, lookup_field='pk'):
if hasattr(self, 'child_relation') and getattr(self, 'child_relation'):
return super(SerializerMethodResourceRelatedField, self).get_links(obj, lookup_field)
if self.source and hasattr(self.parent, self.source):
serializer_method = getattr(self.parent, self.source)
if hasattr(serializer_method, '__call__'):
obj = serializer_method(obj)
return super(SerializerMethodResourceRelatedField, self).get_links(obj, lookup_field)
|
the-stack_0_25673
|
import base64
import json
import os
import sys
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class Printer():
def __init__(self):
self.pages = []
self.displayHeaderFooter = True
self.headerTemplate = '<div style="font-size:8px; margin:auto;">' \
'<span class=title></span>' \
'</div>'
self.footerTemplate= '<div style="font-size:8px; margin:auto;">' \
'Page <span class="pageNumber"></span> of <span class="totalPages"></span>' \
'</div>'
self.plugin_path = os.path.dirname(os.path.realpath(__file__))
def set_config(self, display_header_footer, header_template, footer_template):
self.displayHeaderFooter = display_header_footer
if header_template:
self.headerTemplate = header_template
if footer_template:
self.footerTemplate = footer_template
def remove_invalid(self, value, deletechars):
for c in deletechars:
value = value.replace(c,' ')
return value
def add_page(self, page, config):
pdf_path = os.path.join(config["site_dir"], "pdfs", page.file.url)
os.makedirs(pdf_path, exist_ok=True)
category = ''
if 'category' in page.meta:
category = page.meta['category']
title = page.title
if 'title_full' in page.meta:
title = page.meta['title_full']
title = (category + ('__' if category else '')) + title
title = self.remove_invalid(title, '\/:*?"<>|')
title = re.sub('\s+', '_', title)
pdf_file = os.path.join(pdf_path, title) + ".pdf"
relpath = os.path.relpath(pdf_file, os.path.dirname(page.file.abs_dest_path))
page_paths = {
"name": page.file.url,
"url": "file://" + page.file.abs_dest_path,
"pdf_file": pdf_file,
"relpath": relpath,
}
self.pages.append(page_paths)
return page_paths
def add_download_link(self, output_content, page_paths):
soup = BeautifulSoup(output_content, 'html.parser')
# soup = self._add_style(soup)
soup = self._add_link(soup, page_paths)
return str(soup)
def _add_style(self, soup):
stylesheet = os.path.join(self.plugin_path, "stylesheets", "printer.css")
with open(stylesheet, 'r') as file:
style = file.read()
soup.style.append(style)
return soup
def _add_link(self, soup, page_paths):
icon = BeautifulSoup(''
'<span class="twemoji">'
'<svg viewBox="0 0 24 24" xmlns="http://www.w3.org/2000/svg">'
'<path d="M5 20h14v-2H5m14-9h-4V3H9v6H5l7 7 7-7z"></path>'
'</svg>'
'</span>',
'html.parser')
text = "PDF"
btn = soup.new_tag("a", href=page_paths["relpath"])
btn.append(icon)
btn.append(text)
btn['class'] = 'md-button'
bar = soup.find("div", {"class" : "btn-actions"})
if bar:
bar.p.insert(0, btn)
else:
toc = soup.find("div", {"class" : "toc"})
if toc:
div = BeautifulSoup(''
'<div class="btn-actions screen-only">'
'<p></p>'
'</div>',
'html.parser')
div.p.insert(0, btn)
toc.insert_after(div)
return soup
def print_pages(self):
driver = self._create_driver()
for page in self.pages:
self.print_to_pdf(driver, page)
driver.quit()
def print_to_pdf(self, driver, page):
print(f"[pdf-with-js] - printing '{page['name']}' to file...")
driver.get(page["url"])
result = self._send_devtools_command(driver, "Page.printToPDF", self._get_print_options())
self._write_file(result['data'], page["pdf_file"])
def _create_driver(self):
webdriver_options = Options()
webdriver_options.add_argument('--headless')
webdriver_options.add_argument('--disable-gpu')
webdriver_options.add_argument('--no-sandbox')
webdriver_options.add_argument('--disable-dev-shm-usage')
webdriver_options.add_argument('--disable-web-security')
webdriver_options.add_argument('--ignore-certificate-errors-spki-list')
webdriver_options.add_argument('--allow-file-access-from-files')
webdriver_options.add_argument('--allow-insecure-localhost')
return webdriver.Chrome(options=webdriver_options)
def _get_print_options(self):
return {
'landscape': False,
'displayHeaderFooter': self.displayHeaderFooter,
'footerTemplate': self.footerTemplate,
'headerTemplate': self.headerTemplate,
'printBackground': True,
'preferCSSPageSize': True,
}
def _send_devtools_command(self, driver, cmd, params={}):
resource = f"/session/{driver.session_id}/chromium/send_command_and_get_result"
url = driver.command_executor._url + resource
body = json.dumps({'cmd': cmd, 'params': params})
response = driver.command_executor._request('POST', url, body)
return response.get('value')
def _write_file(self, b64_data, name):
data = base64.b64decode(b64_data)
with open(name, 'wb') as file:
file.write(data)
|
the-stack_0_25674
|
# Adivinhando número com while
import random, time
pensador = random.randint(0, 10)
numero = int(input('Digite um número de 0 a 10: '))
cont = 1
while numero != pensador:
cont += 1
print()
print('Processando ...')
time.sleep(1)
print()
numero = int(input('Numero errado. Digite outro número de 0 a 10: '))
print()
print('Parabéns!!! Você acertou o numero {} em {} tentativas.'.format(numero, cont))
|
the-stack_0_25676
|
########################################################
# Author: Andrea Fioraldi <[email protected]> #
# License: BSD 2-Clause #
# Original Author: UCSB guyz (ShellPhish mainly) #
# Original code from https://github.com/angr/angr #
########################################################
from collections import defaultdict
import logging
import itertools
l = logging.getLogger("angrdbg.memory")
import claripy
from angr.stroage import DUMMY_SYMBOLIC_READ_VALUE
from angr.storage.memory import SimMemory, DUMMY_SYMBOLIC_READ_VALUE
from angr.storage.memory_object import SimMemoryObject
from angr.sim_state_options import SimStateOptions
from angr.misc.ux import once
from .page_8 import SimDbgMemory
DEFAULT_MAX_SEARCH = 8
class MultiwriteAnnotation(claripy.Annotation):
@property
def eliminatable(self):
return False
@property
def relocateable(self):
return True
def _multiwrite_filter(mem, ast): #pylint:disable=unused-argument
# this is a huge hack, but so is the whole multiwrite crap
return any(isinstance(a, MultiwriteAnnotation) for a in ast._uneliminatable_annotations)
class SimSymbolicDbgMemory(SimMemory): #pylint:disable=abstract-method
_CONCRETIZATION_STRATEGIES = [ 'symbolic', 'symbolic_approx', 'any', 'any_approx', 'max', 'max_approx',
'symbolic_nonzero', 'symbolic_nonzero_approx', 'norepeats' ]
_SAFE_CONCRETIZATION_STRATEGIES = [ 'symbolic', 'symbolic_approx' ]
def __init__(
self, memory_backer=None, permissions_backer=None, mem=None, memory_id="mem",
endness=None, abstract_backer=False, check_permissions=None,
read_strategies=None, write_strategies=None, stack_region_map=None, generic_region_map=None
):
SimMemory.__init__(self,
endness=endness,
abstract_backer=abstract_backer,
stack_region_map=stack_region_map,
generic_region_map=generic_region_map
)
self.id = memory_id
if check_permissions is None:
check_permissions = self.category == 'mem'
self.mem = SimDbgMemory(
memory_backer=memory_backer,
permissions_backer=permissions_backer,
check_permissions=check_permissions
) if mem is None else mem
# set up the strategies
self.read_strategies = read_strategies
self.write_strategies = write_strategies
#
# Lifecycle management
#
@SimMemory.memo
def copy(self, _):
"""
Return a copy of the SimMemory.
"""
#l.debug("Copying %d bytes of memory with id %s." % (len(self.mem), self.id))
c = SimSymbolicDbgMemory(
mem=self.mem.branch(),
memory_id=self.id,
endness=self.endness,
abstract_backer=self._abstract_backer,
read_strategies=[ s.copy() for s in self.read_strategies ],
write_strategies=[ s.copy() for s in self.write_strategies ],
stack_region_map=self._stack_region_map,
generic_region_map=self._generic_region_map
)
return c
#
# Merging stuff
#
def _changes_to_merge(self, others):
changed_bytes = set()
for o in others: # pylint:disable=redefined-outer-name
changed_bytes |= self.changed_bytes(o)
return changed_bytes
def merge(self, others, merge_conditions, common_ancestor=None): # pylint: disable=unused-argument
"""
Merge this SimMemory with the other SimMemory
"""
changed_bytes = self._changes_to_merge(others)
l.info("Merging %d bytes", len(changed_bytes))
l.info("... %s has changed bytes %s", self.id, changed_bytes)
self.read_strategies = self._merge_strategies(self.read_strategies, *[
o.read_strategies for o in others
])
self.write_strategies = self._merge_strategies(self.write_strategies, *[
o.write_strategies for o in others
])
merged_bytes = self._merge(others, changed_bytes, merge_conditions=merge_conditions)
return len(merged_bytes) > 0
@staticmethod
def _merge_strategies(*strategy_lists):
if len(set(len(sl) for sl in strategy_lists)) != 1:
raise SimMergeError("unable to merge memories with amounts of strategies")
merged_strategies = [ ]
for strategies in zip(*strategy_lists):
if len(set(s.__class__ for s in strategies)) != 1:
raise SimMergeError("unable to merge memories with different types of strategies")
unique = list(set(strategies))
if len(unique) > 1:
unique[0].merge(unique[1:])
merged_strategies.append(unique[0])
return merged_strategies
def widen(self, others):
changed_bytes = self._changes_to_merge(others)
l.info("Memory %s widening bytes %s", self.id, changed_bytes)
self._merge(others, changed_bytes, is_widening=True)
return len(changed_bytes) > 0
def _merge(self, others, changed_bytes, merge_conditions=None, is_widening=False):
all_memories = [self] + others
if merge_conditions is None:
merge_conditions = [ None ] * len(all_memories)
merged_to = None
merged_objects = set()
merged_bytes = set()
for b in sorted(changed_bytes):
if merged_to is not None and not b >= merged_to:
l.info("merged_to = %d ... already merged byte 0x%x", merged_to, b)
continue
l.debug("... on byte 0x%x", b)
memory_objects = []
unconstrained_in = []
# first get a list of all memory objects at that location, and
# all memories that don't have those bytes
for sm, fv in zip(all_memories, merge_conditions):
if b in sm.mem:
l.info("... present in %s", fv)
memory_objects.append((sm.mem[b], fv))
else:
l.info("... not present in %s", fv)
unconstrained_in.append((sm, fv))
mos = set(mo for mo,_ in memory_objects)
mo_bases = set(mo.base for mo, _ in memory_objects)
mo_lengths = set(mo.length for mo, _ in memory_objects)
if not unconstrained_in and not (mos - merged_objects):
continue
# first, optimize the case where we are dealing with the same-sized memory objects
if len(mo_bases) == 1 and len(mo_lengths) == 1 and not unconstrained_in:
our_mo = self.mem[b]
to_merge = [(mo.object, fv) for mo, fv in memory_objects]
# Update `merged_to`
mo_base = list(mo_bases)[0]
merged_to = mo_base + list(mo_lengths)[0]
merged_val = self._merge_values(
to_merge, memory_objects[0][0].length, is_widening=is_widening
)
if options.ABSTRACT_MEMORY in self.state.options:
# merge check for abstract memory
if not to_merge[0][0].uninitialized and self.state.solver.backends.vsa.identical(merged_val, to_merge[0][0]):
continue
# do the replacement
new_object = self.mem.replace_memory_object(our_mo, merged_val)
merged_objects.add(new_object)
merged_objects.update(mos)
merged_bytes.add(b)
else:
# get the size that we can merge easily. This is the minimum of
# the size of all memory objects and unallocated spaces.
min_size = min([mo.length - (b - mo.base) for mo, _ in memory_objects])
for um, _ in unconstrained_in:
for i in range(0, min_size):
if b + i in um:
min_size = i
break
merged_to = b + min_size
l.info("... determined minimum size of %d", min_size)
# Now, we have the minimum size. We'll extract/create expressions of that
# size and merge them
extracted = [(mo.bytes_at(b, min_size), fv) for mo, fv in memory_objects] if min_size != 0 else []
created = [
(self.get_unconstrained_bytes("merge_uc_%s_%x" % (uc.id, b), min_size * self.state.arch.byte_width), fv) for
uc, fv in unconstrained_in
]
to_merge = extracted + created
merged_val = self._merge_values(to_merge, min_size, is_widening=is_widening)
if options.ABSTRACT_MEMORY in self.state.options:
# merge check for abstract memory
if (not unconstrained_in or not unconstrained_in[0][0] is self) \
and self.state.solver.backends.vsa.identical(merged_val, to_merge[0][0]):
continue
self.store(b, merged_val, endness='Iend_BE', inspect=False) # do not convert endianness again
merged_bytes.add(b)
return merged_bytes
def set_state(self, state):
super(SimSymbolicDbgMemory, self).set_state(state)
self.mem.state = state._get_weakref()
if self.state is not None:
if self.read_strategies is None:
self._create_default_read_strategies()
if self.write_strategies is None:
self._create_default_write_strategies()
def _create_default_read_strategies(self):
self.read_strategies = [ ]
if options.APPROXIMATE_MEMORY_INDICES in self.state.options:
# first, we try to resolve the read address by approximation
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(1024, exact=False),
)
# then, we try symbolic reads, with a maximum width of a kilobyte
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(1024)
)
if options.CONSERVATIVE_READ_STRATEGY not in self.state.options:
# finally, we concretize to any one solution
self.read_strategies.append(
concretization_strategies.SimConcretizationStrategyAny(),
)
def _create_default_write_strategies(self):
self.write_strategies = [ ]
if options.APPROXIMATE_MEMORY_INDICES in self.state.options:
if options.SYMBOLIC_WRITE_ADDRESSES not in self.state.options:
# we try to resolve a unique solution by approximation
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategySingle(exact=False),
)
else:
# we try a solution range by approximation
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(128, exact=False)
)
if options.SYMBOLIC_WRITE_ADDRESSES in self.state.options:
# we try to find a range of values
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyRange(128)
)
else:
# we try to find a range of values, but only for ASTs annotated with the multiwrite annotation
self.write_strategies.append(concretization_strategies.SimConcretizationStrategyRange(
128,
filter=_multiwrite_filter
))
# finally, we just grab the maximum solution
if options.CONSERVATIVE_WRITE_STRATEGY not in self.state.options:
self.write_strategies.append(
concretization_strategies.SimConcretizationStrategyMax()
)
#
# Symbolicizing!
#
def make_symbolic(self, name, addr, length=None):
"""
Replaces `length` bytes starting at `addr` with a symbolic variable named name. Adds a constraint equaling that
symbolic variable to the value previously at `addr`, and returns the variable.
"""
l.debug("making %s bytes symbolic", length)
if isinstance(addr, str):
addr, length = self.state.arch.registers[addr]
else:
if length is None:
raise Exception("Unspecified length!")
r = self.load(addr, length)
v = self.get_unconstrained_bytes(name, r.size())
self.store(addr, v)
self.state.add_constraints(r == v)
l.debug("... eq constraints: %s", r == v)
return v
#
# Address concretization
#
def _resolve_size_range(self, size):
if not self.state.solver.symbolic(size):
i = self.state.solver.eval(size)
if i > self._maximum_concrete_size:
raise SimMemoryLimitError("Concrete size %d outside of allowable limits" % i)
return i, i
if options.APPROXIMATE_MEMORY_SIZES in self.state.options:
max_size_approx = self.state.solver.max_int(size, exact=True)
min_size_approx = self.state.solver.min_int(size, exact=True)
if max_size_approx < self._maximum_symbolic_size_approx:
return min_size_approx, max_size_approx
max_size = self.state.solver.max_int(size)
min_size = self.state.solver.min_int(size)
if min_size > self._maximum_symbolic_size:
self.state.history.add_event('memory_limit', message="Symbolic size %d outside of allowable limits" % min_size, size=size)
if options.BEST_EFFORT_MEMORY_STORING not in self.state.options:
raise SimMemoryLimitError("Symbolic size %d outside of allowable limits" % min_size)
else:
min_size = self._maximum_symbolic_size
return min_size, min(max_size, self._maximum_symbolic_size)
#
# Concretization strategies
#
def _apply_concretization_strategies(self, addr, strategies, action):
"""
Applies concretization strategies on the address until one of them succeeds.
"""
# we try all the strategies in order
for s in strategies:
# first, we trigger the SimInspect breakpoint and give it a chance to intervene
e = addr
self.state._inspect(
'address_concretization', BP_BEFORE, address_concretization_strategy=s,
address_concretization_action=action, address_concretization_memory=self,
address_concretization_expr=e, address_concretization_add_constraints=True
)
s = self.state._inspect_getattr('address_concretization_strategy', s)
e = self.state._inspect_getattr('address_concretization_expr', addr)
# if the breakpoint None'd out the strategy, we skip it
if s is None:
continue
# let's try to apply it!
try:
a = s.concretize(self, e)
except SimUnsatError:
a = None
# trigger the AFTER breakpoint and give it a chance to intervene
self.state._inspect(
'address_concretization', BP_AFTER,
address_concretization_result=a
)
a = self.state._inspect_getattr('address_concretization_result', a)
# return the result if not None!
if a is not None:
return a
# well, we tried
raise SimMemoryAddressError(
"Unable to concretize address for %s with the provided strategies." % action
)
def concretize_write_addr(self, addr, strategies=None):
"""
Concretizes an address meant for writing.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to override the default).
:returns: A list of concrete addresses.
"""
if isinstance(addr, int):
return [ addr ]
elif not self.state.solver.symbolic(addr):
return [ self.state.solver.eval(addr) ]
strategies = self.write_strategies if strategies is None else strategies
return self._apply_concretization_strategies(addr, strategies, 'store')
def concretize_read_addr(self, addr, strategies=None):
"""
Concretizes an address meant for reading.
:param addr: An expression for the address.
:param strategies: A list of concretization strategies (to override the default).
:returns: A list of concrete addresses.
"""
if isinstance(addr, int):
return [ addr ]
elif not self.state.solver.symbolic(addr):
return [ self.state.solver.eval(addr) ]
strategies = self.read_strategies if strategies is None else strategies
return self._apply_concretization_strategies(addr, strategies, 'load')
def normalize_address(self, addr, is_write=False):
return self.concretize_read_addr(addr)
#
# Memory reading
#
def _fill_missing(self, addr, num_bytes, inspect=True, events=True):
if self.category == 'reg':
name = "reg_%s" % (self.state.arch.translate_register_name(addr))
else:
name = "%s_%x" % (self.id, addr)
all_missing = [
self.get_unconstrained_bytes(
name,
min(self.mem._page_size, num_bytes)*self.state.arch.byte_width,
source=i,
inspect=inspect,
events=events,
key=self.variable_key_prefix + (addr,),
eternal=False) # :(
for i in range(addr, addr+num_bytes, self.mem._page_size)
]
if all_missing:
is_mem = self.category == 'mem' and \
options.ZERO_FILL_UNCONSTRAINED_MEMORY not in self.state.options and \
options.SYMBOL_FILL_UNCONSTRAINED_MEMORY not in self.state.options
is_reg = self.category == 'reg' and \
options.ZERO_FILL_UNCONSTRAINED_REGISTERS not in self.state.options and \
options.SYMBOL_FILL_UNCONSTRAINED_REGISTERS not in self.state.options
if is_mem or is_reg:
if once('mem_fill_warning'):
l.warning("The program is accessing memory or registers with an unspecified value. "
"This could indicate unwanted behavior.")
l.warning("angr will cope with this by generating an unconstrained symbolic variable and continuing. "
"You can resolve this by:")
l.warning("1) setting a value to the initial state")
l.warning("2) adding the state option ZERO_FILL_UNCONSTRAINED_{MEMORY,REGISTERS}, "
"to make unknown regions hold null")
l.warning("3) adding the state option SYMBOL_FILL_UNCONSTRAINED_{MEMORY_REGISTERS}, "
"to suppress these messages.")
if is_mem:
l.warning("Filling memory at %#x with %d unconstrained bytes", addr, num_bytes)
else:
reg_str = self.state.arch.translate_register_name(addr, size=num_bytes)
l.warning("Filling register %s with %d unconstrained bytes", reg_str, num_bytes)
if self.category == 'reg' and self.state.arch.register_endness == 'Iend_LE':
all_missing = [ a.reversed for a in all_missing ]
elif self.category != 'reg' and self.state.arch.memory_endness == 'Iend_LE':
all_missing = [ a.reversed for a in all_missing ]
b = self.state.solver.Concat(*all_missing) if len(all_missing) > 1 else all_missing[0]
if events:
self.state.history.add_event('uninitialized', memory_id=self.id, addr=addr, size=num_bytes)
default_mo = SimMemoryObject(b, addr, byte_width=self.state.arch.byte_width)
self.state.scratch.push_priv(True)
self.mem.store_memory_object(default_mo, overwrite=False)
self.state.scratch.pop_priv()
return default_mo
def _read_from(self, addr, num_bytes, inspect=True, events=True, ret_on_segv=False):
items = self.mem.load_objects(addr, num_bytes, ret_on_segv=ret_on_segv)
# optimize the case where we have a single object return
if len(items) == 1 and items[0][1].includes(addr) and items[0][1].includes(addr + num_bytes - 1):
return items[0][1].bytes_at(addr, num_bytes)
segments = [ ]
last_missing = addr + num_bytes - 1
for mo_addr,mo in reversed(items):
if not mo.includes(last_missing):
# add missing bytes
start_addr = mo.last_addr + 1
length = last_missing - mo.last_addr
fill_mo = self._fill_missing(start_addr, length, inspect=inspect, events=events)
segments.append(fill_mo.bytes_at(start_addr, length).reversed)
last_missing = mo.last_addr
# add the normal segment
segments.append(mo.bytes_at(mo_addr, last_missing - mo_addr + 1))
last_missing = mo_addr - 1
# handle missing bytes at the beginning
if last_missing != addr - 1:
start_addr = addr
end_addr = last_missing - addr + 1
fill_mo = self._fill_missing(start_addr, end_addr, inspect=inspect, events=events)
segments.append(fill_mo.bytes_at(start_addr, end_addr))
# reverse the segments to put them in the right order
segments.reverse()
# and combine
if len(segments) > 1:
r = segments[0].concat(*segments[1:])
elif len(segments) == 1:
r = segments[0]
else:
r = self.state.solver.BVV(0, 0)
return r
def _load(self, dst, size, condition=None, fallback=None,
inspect=True, events=True, ret_on_segv=False):
if self.state.solver.symbolic(size):
l.warning("Concretizing symbolic length. Much sad; think about implementing.")
# for now, we always load the maximum size
_,max_size = self._resolve_size_range(size)
if options.ABSTRACT_MEMORY not in self.state.options and self.state.solver.symbolic(size):
self.state.add_constraints(size == max_size, action=True)
if max_size == 0:
self.state.history.add_event('memory_limit', message="0-length read")
size = max_size
if self.state.solver.symbolic(dst) and options.AVOID_MULTIVALUED_READS in self.state.options:
return [ ], self.get_unconstrained_bytes("symbolic_read_unconstrained", size*self.state.arch.byte_width), [ ]
# get a concrete set of read addresses
try:
addrs = self.concretize_read_addr(dst)
except SimMemoryError:
if options.CONSERVATIVE_READ_STRATEGY in self.state.options:
return [ ], self.get_unconstrained_bytes(
"symbolic_read_unconstrained", size*self.state.arch.byte_width
), [ ]
else:
raise
constraint_options = [ ]
if len(addrs) == 1:
# It's not an conditional reaed
constraint_options.append(dst == addrs[0])
read_value = self._read_from(addrs[0], size, inspect=inspect, events=events)
else:
read_value = DUMMY_SYMBOLIC_READ_VALUE # it's a sentinel value and should never be touched
for a in addrs:
read_value = self.state.solver.If(dst == a, self._read_from(a, size, inspect=inspect, events=events),
read_value)
constraint_options.append(dst == a)
if len(constraint_options) > 1:
load_constraint = [ self.state.solver.Or(*constraint_options) ]
elif not self.state.solver.symbolic(constraint_options[0]):
load_constraint = [ ]
else:
load_constraint = [ constraint_options[0] ]
if condition is not None and fallback is not None:
read_value = self.state.solver.If(condition, read_value, fallback)
load_constraint = [ self.state.solver.Or(self.state.solver.And(condition, *load_constraint), self.state.solver.Not(condition)) ]
return addrs, read_value, load_constraint
def _find(self, start, what, max_search=None, max_symbolic_bytes=None, default=None, step=1,
disable_actions=False, inspect=True, chunk_size=None):
if max_search is None:
max_search = DEFAULT_MAX_SEARCH
if isinstance(start, int):
start = self.state.solver.BVV(start, self.state.arch.bits)
constraints = [ ]
remaining_symbolic = max_symbolic_bytes
seek_size = len(what)//self.state.arch.byte_width
symbolic_what = self.state.solver.symbolic(what)
l.debug("Search for %d bytes in a max of %d...", seek_size, max_search)
chunk_start = 0
if chunk_size is None:
chunk_size = max(0x100, seek_size + 0x80)
chunk = self.load(start, chunk_size, endness="Iend_BE",
disable_actions=disable_actions, inspect=inspect)
cases = [ ]
match_indices = [ ]
offsets_matched = [ ] # Only used in static mode
byte_width = self.state.arch.byte_width
no_singlevalue_opt = options.SYMBOLIC_MEMORY_NO_SINGLEVALUE_OPTIMIZATIONS in self.state.options
cond_prefix = [ ]
for i in itertools.count(step=step):
l.debug("... checking offset %d", i)
if i > max_search - seek_size:
l.debug("... hit max size")
break
if remaining_symbolic is not None and remaining_symbolic == 0:
l.debug("... hit max symbolic")
break
if i - chunk_start > chunk_size - seek_size:
l.debug("loading new chunk")
chunk_start += chunk_size - seek_size + 1
chunk = self.load(start+chunk_start, chunk_size,
endness="Iend_BE", ret_on_segv=True,
disable_actions=disable_actions, inspect=inspect)
chunk_off = i-chunk_start
b = chunk[chunk_size*byte_width - chunk_off*byte_width - 1 : chunk_size*byte_width - chunk_off*byte_width - seek_size*byte_width]
condition = b == what
if not self.state.solver.is_false(condition):
if no_singlevalue_opt and cond_prefix:
condition = claripy.And(*(cond_prefix + [condition]))
cases.append([condition, claripy.BVV(i, len(start))])
match_indices.append(i)
if b.symbolic and no_singlevalue_opt:
# in tracing mode, we need to make sure that all previous bytes are not equal to what
cond_prefix.append(b != what)
if self.state.mode == 'static':
si = b._model_vsa
what_si = what._model_vsa
if isinstance(si, claripy.vsa.StridedInterval):
if not si.intersection(what_si).is_empty:
offsets_matched.append(start + i)
if si.identical(what_si):
break
if si.cardinality != 1:
if remaining_symbolic is not None:
remaining_symbolic -= 1
else:
# Comparison with other types (like IfProxy or ValueSet) is not supported
if remaining_symbolic is not None:
remaining_symbolic -= 1
else:
# other modes (e.g. symbolic mode)
if not b.symbolic and not symbolic_what and self.state.solver.eval(b) == self.state.solver.eval(what):
l.debug("... found concrete")
break
else:
if b.symbolic and remaining_symbolic is not None:
remaining_symbolic -= 1
if self.state.mode == 'static':
r = self.state.solver.ESI(self.state.arch.bits)
for off in offsets_matched:
r = r.union(off)
constraints = [ ]
return r, constraints, match_indices
else:
if default is None:
l.debug("... no default specified")
default = 0
constraints += [ self.state.solver.Or(*[ c for c,_ in cases]) ]
#l.debug("running ite_cases %s, %s", cases, default)
r = self.state.solver.ite_cases(cases, default - start) + start
return r, constraints, match_indices
def __contains__(self, dst):
if isinstance(dst, int):
addr = dst
elif self.state.solver.symbolic(dst):
l.warning("Currently unable to do SimMemory.__contains__ on symbolic variables.")
return False
else:
addr = self.state.solver.eval(dst)
return addr in self.mem
def was_written_to(self, dst):
if isinstance(dst, int):
addr = dst
elif self.state.solver.symbolic(dst):
l.warning("Currently unable to do SimMemory.was_written_to on symbolic variables.")
return False
else:
addr = self.state.solver.eval(dst)
return self.mem.contains_no_backer(addr)
#
# Writes
#
def _store(self, req):
l.debug("Doing a store...")
req._adjust_condition(self.state)
max_bytes = req.data.length//self.state.arch.byte_width
if req.size is None:
req.size = max_bytes
if self.state.solver.symbolic(req.size):
if options.AVOID_MULTIVALUED_WRITES in self.state.options:
return req
if options.CONCRETIZE_SYMBOLIC_WRITE_SIZES in self.state.options:
new_size = self.state.solver.eval(req.size)
self.state.add_constraints(req.size == new_size)
req.size = new_size
if self.state.solver.symbolic(req.addr) and options.AVOID_MULTIVALUED_WRITES in self.state.options:
return req
if not self.state.solver.symbolic(req.size) and self.state.solver.eval(req.size) > req.data.length//self.state.arch.byte_width:
raise SimMemoryError("Not enough data for requested storage size (size: {}, data: {})".format(req.size, req.data))
if self.state.solver.symbolic(req.size):
self.state.add_constraints(self.state.solver.ULE(req.size, max_bytes))
#
# First, resolve the addresses
#
try:
req.actual_addresses = sorted(self.concretize_write_addr(req.addr))
except SimMemoryError:
if options.CONSERVATIVE_WRITE_STRATEGY in self.state.options:
return req
else:
raise
if type(req.addr) is not int and req.addr.symbolic:
conditional_constraint = self.state.solver.Or(*[ req.addr == a for a in req.actual_addresses ])
if (conditional_constraint.symbolic or # if the constraint is symbolic
conditional_constraint.is_false()): # if it makes the state go unsat
req.constraints.append(conditional_constraint)
#
# Prepare memory objects
#
# If we have only one address to write to we handle it as concrete, disregarding symbolic or not
is_size_symbolic = self.state.solver.symbolic(req.size)
is_addr_symbolic = self.state.solver.symbolic(req.addr)
if not is_size_symbolic and len(req.actual_addresses) == 1:
store_list = self._store_fully_concrete(req.actual_addresses[0], req.size, req.data, req.endness, req.condition)
elif not is_addr_symbolic:
store_list = self._store_symbolic_size(req.addr, req.size, req.data, req.endness, req.condition)
elif not is_size_symbolic:
store_list = self._store_symbolic_addr(req.addr, req.actual_addresses, req.size, req.data, req.endness, req.condition)
else:
store_list = self._store_fully_symbolic(req.addr, req.actual_addresses, req.size, req.data, req.endness, req.condition)
#
# store it!!!
#
req.stored_values = []
if (self.category == 'mem' and options.SIMPLIFY_MEMORY_WRITES in self.state.options) or \
(self.category == 'reg' and options.SIMPLIFY_REGISTER_WRITES in self.state.options):
for store_item in store_list:
store_item['value'] = self.state.solver.simplify(store_item['value'])
if req.endness == "Iend_LE" or (req.endness is None and self.endness == "Iend_LE"):
store_item['value'] = store_item['value'].reversed
req.stored_values.append(store_item['value'])
self._insert_memory_object(store_item['value'], store_item['addr'], store_item['size'])
else:
for store_item in store_list:
if req.endness == "Iend_LE" or (req.endness is None and self.endness == "Iend_LE"):
store_item['value'] = store_item['value'].reversed
req.stored_values.append(store_item['value'])
self._insert_memory_object(store_item['value'], store_item['addr'], store_item['size'])
l.debug("... done")
req.completed = True
return req
def _insert_memory_object(self, value, address, size):
if self.category == 'mem':
self.state.scratch.dirty_addrs.update(range(address, address+size))
mo = SimMemoryObject(value, address, length=size, byte_width=self.state.arch.byte_width)
self.mem.store_memory_object(mo)
def _store_fully_concrete(self, address, size, data, endness, condition):
if type(size) is not int:
size = self.state.solver.eval(size)
if size < data.length//self.state.arch.byte_width:
data = data[len(data)-1:len(data)-size*self.state.arch.byte_width:]
if condition is not None:
try:
original_value = self._read_from(address, size)
except Exception as ex:
raise ex
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_value = original_value.reversed
conditional_value = self.state.solver.If(condition, data, original_value)
else:
conditional_value = data
return [ dict(value=conditional_value, addr=address, size=size) ]
def _store_symbolic_size(self, address, size, data, endness, condition):
address = self.state.solver.eval(address)
max_bytes = data.length//self.state.arch.byte_width
original_value = self._read_from(address, max_bytes)
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_value = original_value.reversed
befores = original_value.chop(bits=self.state.arch.byte_width)
afters = data.chop(bits=self.state.arch.byte_width)
stored_value = self.state.solver.Concat(*[
self.state.solver.If(self.state.solver.UGT(size, i), a, b)
for i, (a, b) in enumerate(zip(afters, befores))
])
conditional_value = self.state.solver.If(condition, stored_value, original_value) if condition is not None else stored_value
return [ dict(value=conditional_value, addr=address, size=max_bytes) ]
def _store_symbolic_addr(self, address, addresses, size, data, endness, condition):
size = self.state.solver.eval(size)
segments = self._get_segments(addresses, size)
if condition is None:
condition = claripy.BoolV(True)
original_values = [ self._read_from(segment['start'], segment['size']) for segment in segments ]
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_values = [ ov.reversed for ov in original_values ]
stored_values = []
for segment, original_value in zip(segments, original_values):
conditional_value = original_value
for opt in segment['options']:
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
high = ((opt['idx']+segment['size']) * self.state.arch.byte_width)-1
low = opt['idx']*self.state.arch.byte_width
else:
high = len(data) - 1 - (opt['idx']*self.state.arch.byte_width)
low = len(data) - ((opt['idx']+segment['size']) *self.state.arch.byte_width)
data_slice = data[high:low]
conditional_value = self.state.solver.If(self.state.solver.And(address == segment['start']-opt['idx'], condition), data_slice, conditional_value)
stored_values.append(dict(value=conditional_value, addr=segment['start'], size=segment['size']))
return stored_values
def flush_pages(self,whitelist):
self.mem.flush_pages(whitelist)
@staticmethod
def _create_segment(addr, size, s_options, idx, segments):
segment = dict(start=addr, size=size, options=s_options)
segments.insert(idx, segment)
@staticmethod
def _split_segment(addr, segments):
s_idx = SimSymbolicDbgMemory._get_segment_index(addr, segments)
segment = segments[s_idx]
if segment['start'] == addr:
return s_idx
assert segment['start'] < addr < segment['start'] + segment['size']
size_prev = addr - segment['start']
size_next = segment['size'] - size_prev
assert size_prev != 0 and size_next != 0
segments.pop(s_idx)
SimSymbolicDbgMemory._create_segment(segment['start'], size_prev, segment['options'], s_idx, segments)
SimSymbolicDbgMemory._create_segment(addr, size_next, [{"idx": opt["idx"] + size_prev}
for opt in segment['options']], s_idx + 1, segments)
return s_idx + 1
@staticmethod
def _add_segments_overlap(idx, addr, segments):
for i in range(idx, len(segments)):
segment = segments[i]
if addr < segment['start'] + segment['size']:
segments[i]["options"].append({"idx": segment['start'] - addr})
@staticmethod
def _get_segment_index(addr, segments):
for i, segment in enumerate(segments):
if segment['start'] <= addr < segment['start'] + segment['size']:
return i
return -1
@staticmethod
def _get_segments(addrs, size):
segments = []
highest = 0
for addr in addrs:
if addr < highest:
idx = SimSymbolicDbgMemory._split_segment(addr, segments)
SimSymbolicDbgMemory._create_segment(highest, addr + size - highest, [], len(segments), segments)
SimSymbolicDbgMemory._add_segments_overlap(idx, addr, segments)
else:
SimSymbolicDbgMemory._create_segment(addr, size, [{'idx': 0}], len(segments), segments)
highest = addr + size
return segments
def _store_fully_symbolic(self, address, addresses, size, data, endness, condition):
stored_values = [ ]
byte_dict = defaultdict(list)
max_bytes = data.length//self.state.arch.byte_width
if condition is None:
condition = claripy.BoolV(True)
# chop data into byte-chunks
original_values = [self._read_from(a, max_bytes) for a in addresses]
if endness == "Iend_LE" or (endness is None and self.endness == "Iend_LE"):
original_values = [ ov.reversed for ov in original_values ]
data_bytes = data.chop(bits=self.state.arch.byte_width)
for a, fv in zip(addresses, original_values):
original_bytes = fv.chop(self.state.arch.byte_width)
for index, (d_byte, o_byte) in enumerate(zip(data_bytes, original_bytes)):
# create a dict of all all possible values for a certain address
byte_dict[a+index].append((a, index, d_byte, o_byte))
for byte_addr in sorted(byte_dict.keys()):
write_list = byte_dict[byte_addr]
# If this assertion fails something is really wrong!
assert all(v[3] is write_list[0][3] for v in write_list)
conditional_value = write_list[0][3]
for a, index, d_byte, o_byte in write_list:
# create the ast for each byte
conditional_value = self.state.solver.If(self.state.solver.And(address == a, size > index, condition), d_byte, conditional_value)
stored_values.append(dict(value=conditional_value, addr=byte_addr, size=1))
return stored_values
def _store_with_merge(self, req):
req._adjust_condition(self.state)
dst = req.addr
cnt = req.data
size = req.size
endness = req.endness
req.stored_values = [ ]
if options.ABSTRACT_MEMORY not in self.state.options:
raise SimMemoryError('store_with_merge is not supported without abstract memory.')
l.debug("Doing a store with merging...")
addrs = self.concretize_write_addr(dst)
if len(addrs) == 1:
l.debug("... concretized to 0x%x", addrs[0])
else:
l.debug("... concretized to %d values", len(addrs))
if size is None:
# Full length
length = len(cnt)
else:
raise NotImplementedError()
for addr in addrs:
# First we load old values
old_val = self._read_from(addr, length // self.state.arch.byte_width)
assert isinstance(old_val, claripy.Bits)
# FIXME: This is a big hack
def is_reversed(o):
if isinstance(o, claripy.Bits) and o.op == 'Reverse':
return True
return False
def can_be_reversed(o):
om = o._model_vsa
if isinstance(om, claripy.vsa.StridedInterval) and om.is_integer:
return True
return False
if endness == 'Iend_LE': cnt = cnt.reversed
reverse_it = False
if is_reversed(cnt):
if is_reversed(old_val):
cnt = cnt.args[0]
old_val = old_val.args[0]
reverse_it = True
elif can_be_reversed(old_val):
cnt = cnt.args[0]
reverse_it = True
if isinstance(old_val, (int, claripy.bv.BVV)):
merged_val = self.state.solver.SI(bits=len(old_val), to_conv=old_val)
else:
merged_val = old_val
merged_val = merged_val.union(cnt)
if reverse_it:
merged_val = merged_val.reversed
# Write the new value
self.store(addr, merged_val, size=size)
req.stored_values.append(merged_val)
req.completed = True
# TODO: revisit the following lines
req.constraints = [ ]
return req
def get_unconstrained_bytes(self, name, bits, source=None, key=None, inspect=True, events=True, **kwargs):
"""
Get some consecutive unconstrained bytes.
:param name: Name of the unconstrained variable
:param bits: Size of the unconstrained variable
:param source: Where those bytes are read from. Currently it is only used in under-constrained symbolic
execution so that we can track the allocation depth.
:return: The generated variable
"""
if self.category == 'mem' and options.ZERO_FILL_UNCONSTRAINED_MEMORY in self.state.options:
return self.state.solver.BVV(0, bits)
elif self.category == 'reg' and options.ZERO_FILL_UNCONSTRAINED_REGISTERS in self.state.options:
return self.state.solver.BVV(0, bits)
elif options.SPECIAL_MEMORY_FILL in self.state.options and self.state._special_memory_filler is not None:
return self.state._special_memory_filler(name, bits, self.state)
else:
if options.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
if source is not None and type(source) is int:
alloc_depth = self.state.uc_manager.get_alloc_depth(source)
kwargs['uc_alloc_depth'] = 0 if alloc_depth is None else alloc_depth + 1
r = self.state.solver.Unconstrained(name, bits, key=key, inspect=inspect, events=events, **kwargs)
return r
# Unconstrain a byte
def unconstrain_byte(self, addr, inspect=True, events=True):
unconstrained_byte = self.get_unconstrained_bytes("%s_unconstrain_%#x" % (self.id, addr), self.state.arch.byte_width, inspect=inspect,
events=events, key=('manual_unconstrain', addr))
self.store(addr, unconstrained_byte)
# Replaces the differences between self and other with unconstrained bytes.
def unconstrain_differences(self, other):
changed_bytes = self.changed_bytes(other)
l.debug("Will unconstrain %d %s bytes", len(changed_bytes), self.id)
for b in changed_bytes:
self.unconstrain_byte(b)
@staticmethod
def _is_uninitialized(a):
return getattr(a._model_vsa, 'uninitialized', False)
def _merge_values(self, to_merge, merged_size, is_widening=False):
if options.ABSTRACT_MEMORY in self.state.options:
if self.category == 'reg' and self.state.arch.register_endness == 'Iend_LE':
should_reverse = True
elif self.state.arch.memory_endness == 'Iend_LE':
should_reverse = True
else:
should_reverse = False
merged_val = to_merge[0][0]
if should_reverse: merged_val = merged_val.reversed
for tm,_ in to_merge[1:]:
if should_reverse: tm = tm.reversed
if self._is_uninitialized(tm):
continue
if is_widening:
l.info("Widening %s %s...", merged_val, tm)
merged_val = merged_val.widen(tm)
l.info('... Widened to %s', merged_val)
else:
l.info("Merging %s %s...", merged_val, tm)
merged_val = merged_val.union(tm)
l.info("... Merged to %s", merged_val)
if should_reverse: merged_val = merged_val.reversed
else:
merged_val = self.state.solver.BVV(0, merged_size*self.state.arch.byte_width)
for tm,fv in to_merge:
merged_val = self.state.solver.If(fv, tm, merged_val)
return merged_val
def dbg_print(self, indent=0):
"""
Print out debugging information.
"""
lst = []
more_data = False
for i, addr in enumerate(self.mem.keys()):
lst.append(addr)
if i >= 20:
more_data = True
break
for addr in sorted(lst):
data = self.mem[addr]
if isinstance(data, SimMemoryObject):
memobj = data
print("%s%xh: (%s)[%d]" % (" " * indent, addr, memobj, addr - memobj.base))
else:
print("%s%xh: <default data>" % (" " * indent, addr))
if more_data:
print("%s..." % (" " * indent))
def _copy_contents(self, dst, src, size, condition=None, src_memory=None, dst_memory=None, inspect=True,
disable_actions=False):
src_memory = self if src_memory is None else src_memory
dst_memory = self if dst_memory is None else dst_memory
_,max_size = self._resolve_size_range(size)
if max_size == 0:
return None, [ ]
data = src_memory.load(src, max_size, inspect=inspect, disable_actions=disable_actions)
dst_memory.store(dst, data, size=size, condition=condition, inspect=inspect, disable_actions=disable_actions)
return data
#
# Things that are actually handled by SimPagedMemory
#
def changed_bytes(self, other):
"""
Gets the set of changed bytes between self and `other`.
:param other: The other :class:`SimSymbolicDbgMemory`.
:returns: A set of differing bytes
"""
return self.mem.changed_bytes(other.mem)
def replace_all(self, old, new):
"""
Replaces all instances of expression old with expression new.
:param old: A claripy expression. Must contain at least one named variable (to make
to make it possible to use the name index for speedup)
:param new: The new variable to replace it with
"""
return self.mem.replace_all(old, new)
def addrs_for_name(self, n):
"""
Returns addresses that contain expressions that contain a variable
named `n`.
"""
return self.mem.addrs_for_name(n)
def addrs_for_hash(self, h):
"""
Returns addresses that contain expressions that contain a variable
with the hash of `h`.
"""
return self.mem.addrs_for_hash(h)
def replace_memory_object(self, old, new_content):
"""
Replaces the memory object 'old' with a new memory object containing
'new_content'.
:param old: A SimMemoryObject (i.e., one from memory_objects_for_hash() or
memory_objects_for_name())
:param new_content: the content (claripy expression) for the new memory object
"""
return self.mem.replace_memory_object(old, new_content)
def memory_objects_for_name(self, n):
"""
Returns a set of SimMemoryObjects that contain expressions that contain a variable
with the name of n. This is useful for replacing those values, in one fell swoop,
with replace_memory_object(), even if they've been partially overwritten.
"""
return self.mem.memory_objects_for_name(n)
def memory_objects_for_hash(self, n):
"""
Returns a set of SimMemoryObjects that contain expressions that contain a variable
with the hash of h. This is useful for replacing those values, in one fell swoop,
with replace_memory_object(), even if they've been partially overwritten.
"""
return self.mem.memory_objects_for_hash(n)
def permissions(self, addr, permissions=None):
"""
Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page
"""
out = self.mem.permissions(addr, permissions)
# if unicorn is in play and we've marked a page writable, it must be uncached
if permissions is not None and self.state.solver.is_true(permissions & 2 == 2):
if self.state.has_plugin('unicorn'):
self.state.unicorn.uncache_page(addr)
return out
def map_region(self, addr, length, permissions, init_zero=False):
"""
Map a number of pages at address `addr` with permissions `permissions`.
:param addr: address to map the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
:param permissions: AST of permissions to map, will be a bitvalue representing flags
:param init_zero: Initialize page with zeros
"""
l.info("Mapping [%#x, %#x] as %s", addr, addr + length - 1, permissions)
return self.mem.map_region(addr, length, permissions, init_zero=init_zero)
def unmap_region(self, addr, length):
"""
Unmap a number of pages at address `addr`
:param addr: address to unmap the pages at
:param length: length in bytes of region to map, will be rounded upwards to the page size
"""
return self.mem.unmap_region(addr, length)
from angr.errors import SimUnsatError, SimMemoryError, SimMemoryLimitError, SimMemoryAddressError, SimMergeError
from angr import sim_options as options
from angr.state_plugins.inspect import BP_AFTER, BP_BEFORE
from angr import concretization_strategies
|
the-stack_0_25677
|
from aiohttp import web
from certomancer import registry
from .csc_dummy_server import CSCWithCertomancer, DummyServiceParams
def run_from_file(cfg_path, port, require_hash_pinning=True):
cfg = registry.CertomancerConfig.from_file(cfg_path)
csc_app = CSCWithCertomancer(
cfg, service_params=DummyServiceParams(
hash_pinning_required=require_hash_pinning
)
)
csc_app.register_routes()
web.run_app(
csc_app.app, host='localhost', port=port,
)
|
the-stack_0_25678
|
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
"""Simple test script for 2.13" 250x122 black and white featherwing.
Supported products:
* Adafruit 2.13" Black and White FeatherWing
* https://www.adafruit.com/product/4195
"""
import time
import board
import displayio
import adafruit_ssd1675
displayio.release_displays()
epd_cs = board.D9
epd_dc = board.D10
display_bus = displayio.FourWire(
board.SPI(), command=epd_dc, chip_select=epd_cs, baudrate=1000000
)
time.sleep(1)
display = adafruit_ssd1675.SSD1675(display_bus, width=250, height=122, rotation=270)
g = displayio.Group()
with open("/display-ruler.bmp", "rb") as f:
pic = displayio.OnDiskBitmap(f)
# CircuitPython 6 & 7 compatible
t = displayio.TileGrid(
pic, pixel_shader=getattr(pic, "pixel_shader", displayio.ColorConverter())
)
# CircuitPython 7 compatible only
# t = displayio.TileGrid(pic, pixel_shader=pic.pixel_shader)
g.append(t)
display.show(g)
display.refresh()
print("refreshed")
time.sleep(120)
|
the-stack_0_25680
|
from goap.utils.os.shell_command import ShellCommand
from os import path
import unittest
import subprocess
from pprint import PrettyPrinter
import networkx as nx
from goap.WorldState import WorldState
from goap.Action import Actions
from goap.Planner import Planner
class PlannerTest(unittest.TestCase):
@staticmethod
def __reset_environment():
if path.isdir('/tmp/goap_tmp'):
subprocess.call(['rm', '-rf', '/tmp/goap_tmp'])
def __print(self):
self.print.pprint(
'Nodes: {}, Edges: {}'.format(
self.planner.graph.nodes,
self.planner.graph.edges))
def setUp(self):
# init actions
self.dir_handler_cmd = Actions()
self.lv_act = Actions()
# setup actions
self.mkdir = ShellCommand(command='mkdir -p /tmp/goap_tmp')
self.mktoken = ShellCommand(command='touch /tmp/goap_tmp/.token')
self.mkecho = ShellCommand(command='echo mocked')
# self.setUpLvmCMD()
# self.setUpDirHandlerCMD()
# init planner
self.print = PrettyPrinter(indent=4)
self.init_ws = WorldState(
{"tmp_dir_state": False, "tmp_dir_content": False, })
self.gs = WorldState(
{"tmp_dir_state": True, "tmp_dir_content": True, })
self.planner = Planner(actions=self.dir_handler_cmd)
def tearDown(self) -> None:
self.dir_handler_cmd = []
self.lv_act = []
self.init_ws = None
self.gs = {}
self.planner = None
def setUpDirHandlerCMD(self):
self.dir_handler_cmd.add(
name='CreateTmpDir',
conditions={'tmp_dir_state': False, 'tmp_dir_content': False},
effects={'tmp_dir_state': True, 'tmp_dir_content': False},
func=self.mkdir
)
self.dir_handler_cmd.add(
name='CreateToken',
conditions={'tmp_dir_state': True, 'tmp_dir_content': False},
effects={'tmp_dir_state': True, 'tmp_dir_content': True},
func=self.mktoken
)
def setUpLvmCMD(self):
self.lv_act.add(
name='ExpandLV',
conditions={
'lv_need_expansion': True,
'vg_need_expansion': False,
},
effects={
'lv_need_expansion': False,
},
func=self.mkecho,
cost=1.0
)
self.lv_act.add(
name='ExpandVG',
conditions={
'vg_need_expansion': True,
'pv_need_expansion': False,
},
effects={
'vg_need_expansion': False,
},
func=self.mkecho,
)
self.lv_act.add(
name='ExpandPV',
conditions={
'pv_need_expansion': True,
},
effects={
'pv_need_expansion': False,
},
func=self.mkecho,
cost=1.5,
)
def test_set_goal(self):
self.planner.goal = self.gs
assert self.gs == self.planner.goal
def test_graph_isomorphic(self):
from goap.Planner import Node
from goap.Planner import Edge
acts = Actions()
acts.add(
name='CreateTmpDirIso',
conditions={'tmp_dir_state': False, 'tmp_dir_content': False},
effects={'tmp_dir_state': True, 'tmp_dir_content': False},
func=self.mkdir
)
acts.add(
name='CreateTokenIso',
conditions={'tmp_dir_state': True, 'tmp_dir_content': False},
effects={'tmp_dir_state': True, 'tmp_dir_content': True},
func=self.mktoken
)
node1 = Node(
attributes={
'tmp_dir_state': False,
'tmp_dir_content': False})
node2 = Node(
attributes={
'tmp_dir_state': True,
'tmp_dir_content': False})
node3 = Node(
attributes={
'tmp_dir_state': True,
'tmp_dir_content': True})
edge1 = Edge(
name='CreateTmpDirIso',
predecessor=node1,
successor=node2,
obj=acts.get('CreateTmpDirIso'))
edge2 = Edge(
name='CreateTokenIso',
predecessor=node2,
successor=node3,
obj=acts.get('CreateTokenIso'))
g1 = nx.DiGraph(nodes=[node1, node2, node3], edges=[edge1, edge2])
g2 = self.planner.graph.directed
assert nx.is_isomorphic(g1, g2) is True
def test_plan(self):
self.setUpDirHandlerCMD()
create_tmp_dir = self.planner.actions.get('CreateTmpDir')
create_token = self.planner.actions.get('CreateToken')
plan = self.planner.plan(state=self.init_ws, goal=self.gs)
print(type(plan))
print(plan)
action_plan = [action[2]['object'] for action in plan]
print(action_plan)
assert action_plan == [create_tmp_dir, create_token]
|
the-stack_0_25685
|
import pyglet
window = pyglet.window.Window()
label = pyglet.text.Label('PyGLet GUI',
font_size=42,
x=window.width//2, y=window.height//2,
anchor_x='center', anchor_y='center')
@window.event
def on_draw():
window.clear()
label.draw()
pyglet.app.run()
|
the-stack_0_25690
|
import random
import re
import string
import math
import requests
local_prefix = 'local-'
from os import environ, path
import json
def get_version_number():
return environ["version"]
def get_stage_name():
stage = environ["stage"]
return stage[len(local_prefix):] if stage.startswith(local_prefix) else stage
def is_production():
return get_stage_name() == "production"
def is_staging():
return get_stage_name() == "staging"
def is_onprem():
return not is_production() and not is_staging()
def is_local():
return environ["stage"].startswith(local_prefix)
def generate_salt():
return "".join(random.choices(string.hexdigits, k=36))
def unique_ordered_list(array):
uniq = []
[uniq.append(x) for x in array if x not in uniq]
return uniq
def unique_unordered_list(array):
return list(set(array))
def list_to_camel_case(items, flatten=False):
for i in range(len(items)):
if flatten:
items[i] = flatten_nested_dicts(items[i])
items[i] = dict_to_camel_case(items[i])
return items
def dict_to_camel_case(variable, delimiter='_', ignore_keys=[]):
if variable is None:
return None
if isinstance(variable, str):
return variable
elif isinstance(variable, dict):
aux = {}
for key in variable.keys():
if key in ignore_keys:
aux[key] = variable[key]
elif isinstance(variable[key], dict):
aux[key_to_camel_case(key, delimiter)] = dict_to_camel_case(variable[key])
elif isinstance(variable[key], list):
aux[key_to_camel_case(key, delimiter)] = list_to_camel_case(variable[key])
else:
aux[key_to_camel_case(key, delimiter)] = variable[key]
return aux
else:
return variable
def dict_to_CAPITAL_keys(variable):
if variable is None:
return None
if isinstance(variable, str):
return variable.upper()
elif isinstance(variable, dict):
aux = {}
for key in variable.keys():
if isinstance(variable[key], dict):
aux[key.upper()] = dict_to_CAPITAL_keys(variable[key])
else:
aux[key.upper()] = variable[key]
return aux
else:
return variable
def variable_to_snake_case(variable, delimiter='_', split_number=False):
if isinstance(variable, str):
return key_to_snake_case(variable, delimiter, split_number)
elif isinstance(variable, dict):
aux = {}
for key in variable.keys():
if isinstance(variable[key], dict):
aux[key_to_snake_case(key, delimiter, split_number)] = variable_to_snake_case(variable[key], delimiter,
split_number)
else:
aux[key_to_snake_case(key, delimiter, split_number)] = variable[key]
return aux
else:
return variable
def key_to_camel_case(snake_str, delimiter='_'):
if snake_str.startswith(delimiter):
snake_str = snake_str[1:]
components = snake_str.split(delimiter)
return components[0] + ''.join(x.title() for x in components[1:])
def key_to_snake_case(name, delimiter='_', split_number=False):
s1 = re.sub('(.)([A-Z][a-z]+)', fr'\1{delimiter}\2', name)
return re.sub('([a-z])([A-Z0-9])' if split_number else '([a-z0-9])([A-Z])', fr'\1{delimiter}\2', s1).lower()
TRACK_TIME = True
def __sbool_to_bool(value):
if value is None or not isinstance(value, str):
return False
return value.lower() in ["true", "yes", "1"]
def allow_captcha():
return environ.get("captcha_server") is not None and environ.get("captcha_key") is not None \
and len(environ["captcha_server"]) > 0 and len(environ["captcha_key"]) > 0
def allow_sentry():
return environ.get("sentryURL") is not None and len(environ["sentryURL"]) > 0
def async_post(endpoint, data):
data["auth"] = environ["async_Token"]
try:
requests.post(endpoint, timeout=1, json=data)
except requests.exceptions.ReadTimeout:
pass
def string_to_sql_like(value):
value = re.sub(' +', ' ', value)
value = value.replace("*", "%")
if value.startswith("^"):
value = value[1:]
elif not value.startswith("%"):
value = '%' + value
if value.endswith("$"):
value = value[:-1]
elif not value.endswith("%"):
value = value + '%'
# value = value.replace(" ", "%")
return value
def string_to_sql_like_with_op(value, op):
if isinstance(value, list) and len(value) > 0:
_value = value[0]
else:
_value = value
if _value is None:
return _value
if op.lower() != 'ilike':
return _value.replace("%", "%%")
_value = _value.replace("*", "%")
if _value.startswith("^"):
_value = _value[1:]
elif not _value.startswith("%"):
_value = '%' + _value
if _value.endswith("$"):
_value = _value[:-1]
elif not _value.endswith("%"):
_value = _value + '%'
return _value.replace("%", "%%")
def is_valid_email(email):
return re.match(r"[^@]+@[^@]+\.[^@]+", email) is not None
def is_valid_http_url(url):
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, url) is not None
def is_valid_url(url):
regex = re.compile(
# r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return re.match(regex, url) is not None
def is_alphabet_space(word):
r = re.compile("^[a-zA-Z ]*$")
return r.match(word) is not None
def is_alphabet_latin_space(word):
r = re.compile("^[a-zA-Z\u00C0-\u00D6\u00D8-\u00f6\u00f8-\u00ff\s ]*$")
return r.match(word) is not None
def is_alphabet_space_dash(word):
r = re.compile("^[a-zA-Z -]*$")
return r.match(word) is not None
def is_alphanumeric_space(word):
r = re.compile("^[a-zA-Z0-9._\- ]*$")
return r.match(word) is not None
def merge_lists_by_key(l1, l2, key):
merged = {}
for item in l1 + l2:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
return [val for (_, val) in merged.items()]
def flatten_nested_dicts(obj):
if obj is None:
return None
result = {}
for key in obj.keys():
if isinstance(obj[key], dict):
result = {**result, **flatten_nested_dicts(obj[key])}
else:
result[key] = obj[key]
return result
def delete_keys_from_dict(d, to_delete):
if isinstance(to_delete, str):
to_delete = [to_delete]
if isinstance(d, dict):
for single_to_delete in set(to_delete):
if single_to_delete in d:
del d[single_to_delete]
for k, v in d.items():
delete_keys_from_dict(v, to_delete)
elif isinstance(d, list):
for i in d:
delete_keys_from_dict(i, to_delete)
return d
def explode_widget(data, key=None):
result = []
for k in data.keys():
if k.endswith("Progress") or k == "chart":
continue
result.append({"key": key_to_snake_case(k) if key is None else key, "data": {"value": data[k]}})
if k + "Progress" in data:
result[-1]["data"]["progress"] = data[k + "Progress"]
if "chart" in data:
result[-1]["data"]["chart"] = []
for c in data["chart"]:
result[-1]["data"]["chart"].append({"timestamp": c["timestamp"], "value": c[k]})
return result
TEMP_PATH = "./" if is_local() else "/tmp/"
def get_issue_title(issue_type):
return {'click_rage': "Click Rage",
'dead_click': "Dead Click",
'excessive_scrolling': "Excessive Scrolling",
'bad_request': "Bad Request",
'missing_resource': "Missing Image",
'memory': "High Memory Usage",
'cpu': "High CPU",
'slow_resource': "Slow Resource",
'slow_page_load': "Slow Page",
'crash': "Crash",
'ml_cpu': "High CPU",
'ml_memory': "High Memory Usage",
'ml_dead_click': "Dead Click",
'ml_click_rage': "Click Rage",
'ml_mouse_thrashing': "Mouse Thrashing",
'ml_excessive_scrolling': "Excessive Scrolling",
'ml_slow_resources': "Slow Resource",
'custom': "Custom Event",
'js_exception': "Error",
'custom_event_error': "Custom Error",
'js_error': "Error"}.get(issue_type, issue_type)
def __progress(old_val, new_val):
return ((old_val - new_val) / new_val) * 100 if new_val > 0 else 0 if old_val == 0 else 100
def __decimal_limit(value, limit):
factor = pow(10, limit)
value = math.floor(value * factor)
if value % factor == 0:
return value // factor
return value / factor
def is_free_open_source_edition():
return __sbool_to_bool(environ.get("isFOS"))
def is_enterprise_edition():
return __sbool_to_bool(environ.get("isEE"))
stag_config_file = f"chalicelib/.configs/{environ['stage']}.json"
if not path.isfile(stag_config_file):
print("!! stage config file not found, using .chalice/config.json only")
else:
print("!! stage config file found, merging with priority to .chalice/config.json")
with open(stag_config_file) as json_file:
config = json.load(json_file)
environ = {**config, **environ}
if (is_free_open_source_edition() or is_enterprise_edition()) and environ.get("config_file"):
if not path.isfile(environ.get("config_file")):
print("!! config file not found, using default environment")
else:
with open(environ.get("config_file")) as json_file:
config = json.load(json_file)
environ = {**environ, **config}
def get_internal_project_id(project_id64):
if project_id64 < 0x10000000000000 or project_id64 >= 0x20000000000000:
return None
project_id64 = (project_id64 - 0x10000000000000) * 4212451012670231 & 0xfffffffffffff
if project_id64 > 0xffffffff:
return None
project_id = int(project_id64)
return project_id
def has_smtp():
return environ["EMAIL_HOST"] is not None and len(environ["EMAIL_HOST"]) > 0
def get_edition():
return "foss" if is_free_open_source_edition() else "ee"
|
the-stack_0_25691
|
import json
import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import MultiLabelBinarizer
import nltk
from sklearn.externals import joblib
class TweetTagger:
def __init__(self, news_file, training_file, tag_map=dict()):
self.news_file = news_file
self.training_file = training_file
self.tag_map = tag_map
self.classifier = None
self.mlb = None
self.category_map = None
self.category_map_inverse = None
@staticmethod
def pre_process(news):
img_captions = []
for img in news["images"]:
if len(img) > 1:
img_captions.append(img[1])
all_captions = "\n".join(img_captions)
all_text = "\n".join(news["paragraphs"])
teaser = ("" if "teaser" not in news else news["teaser"])
all_content = "\n\n".join([news["title"], teaser, all_text, all_captions])
return all_content
def load_classifier(self, classifier_file):
with open(self.news_file, "r") as f:
news = json.load(f)
with open(self.training_file, "r") as f:
train = json.load(f)
categories = {}
for i, n in enumerate(train):
cs = train[n].split(sep=",")
for c in cs:
cat = c.strip()
if len(cat) > 1:
categories.setdefault(cat, 0)
cat_map = dict(zip(categories.keys(), range(len(categories))))
cat_map_inv = dict(zip(range(len(categories)), categories.keys()))
self.category_map = cat_map
self.category_map_inverse = cat_map_inv
train_data = []
train_target = []
for i, n in enumerate(train):
all_content = self.pre_process(news[n])
targets = []
cs = train[n].split(sep=",")
for c in cs:
cat = c.strip()
if len(cat) > 1:
targets.append(cat_map[cat])
train_data.append(all_content)
train_target.append(targets)
mlb = MultiLabelBinarizer()
mlb.fit(train_target)
train_target_binary = mlb.transform(train_target)
print("Number of labels: {}".format(len(train_target_binary[0])))
text_clf = joblib.load(classifier_file)
self.classifier = text_clf
self.mlb = mlb
def train_classifier(self):
with open(self.news_file, "r") as f:
news = json.load(f)
with open(self.training_file, "r") as f:
train = json.load(f)
stopwords = set(nltk.corpus.stopwords.words("german"))
categories = {}
for i, n in enumerate(train):
cs = train[n].split(sep=",")
for c in cs:
cat = c.strip()
if len(cat) > 1:
categories.setdefault(cat, 0)
self.category_map = dict(zip(categories.keys(), range(len(categories))))
self.category_map_inverse = dict(zip(range(len(categories)), categories.keys()))
train_data = []
train_target = []
for i, n in enumerate(train):
all_content = self.pre_process(news[n])
targets = []
cs = train[n].split(sep=",")
for c in cs:
cat = c.strip()
if len(cat) > 1:
targets.append(self.category_map[cat])
train_data.append(all_content)
train_target.append(targets)
self.mlb = MultiLabelBinarizer()
self.mlb.fit(train_target)
train_target_binary = self.mlb.transform(train_target)
self.classifier = Pipeline([("vect", CountVectorizer(stop_words=stopwords, ngram_range=(1, 3))),
("tfidf", TfidfTransformer()),
("clf", KNeighborsClassifier(n_neighbors=5, weights="distance"))])
self.classifier.fit(train_data, train_target_binary)
# Test run on all available news that haven't been used for training.
test_data = []
test_data_original = []
for i, n in enumerate(news):
if n not in train:
img_captions = []
for img in news[n]["images"]:
if len(img) > 1:
img_captions.append(img[1])
all_captions = "\n".join(img_captions)
all_text = "\n".join(news[n]["paragraphs"])
teaser = ("" if "teaser" not in news else news["teaser"])
all_content = "\n\n".join([news[n]["title"], teaser, all_text, all_captions])
test_data.append(all_content)
test_data_original.append(news[n])
pred = self.classifier.predict(test_data)
pred_labels = self.mlb.inverse_transform(pred)
news_tagged = 0
tags_used = 0
tag_dict = {}
for i, p in enumerate(pred):
tags = []
for lid in list(pred_labels[i]):
tags.append(self.category_map_inverse[lid])
tag_dict.setdefault(self.category_map_inverse[lid], 0)
tag_dict[self.category_map_inverse[lid]] += 1
if len(tags) > 0:
# tweet = make_tweet(test_data_original[i], process_tags(tags))
# print("{} ::: {}".format(len(tweet), tweet))
news_tagged += 1
tags_used += len(tags)
# Print out some rudimentary metrics.
print("=================================================================================================")
print("Number of labels: {}".format(len(train_target_binary[0])))
print("{} news tagged... (ca. {}%)".format(news_tagged, round((news_tagged/len(test_data))*100)))
print("{} tags used...".format(tags_used))
print("Average tags per tagged news: {}".format(tags_used/news_tagged))
print("{} unique tags".format(len(tag_dict)))
print()
print(tag_dict)
print("=================================================================================================")
# joblib.dump(text_clf, "last_classifier.pkl")
def suggest_hashtags(self, news):
all_content = self.pre_process(news)
pred = self.classifier.predict([all_content])
pred_labels = self.mlb.inverse_transform(pred)
tags = []
for i, p in enumerate(pred):
for lid in list(pred_labels[i]):
tag = self.category_map_inverse[lid]
if tag in self.tag_map:
tag = self.tag_map[tag]
else:
tag = tag.capitalize()
tags.append(tag)
return tags
|
the-stack_0_25692
|
"""
SQLAlchemy-JSONAPI
Flask Adapter
Colton J. Provias
MIT License
"""
import datetime
import json
import uuid
from functools import wraps
from blinker import signal
from flask import make_response, request
from .constants import Endpoint, Method
from .errors import BaseError, MissingContentTypeError
from .serializer import JSONAPI
class JSONAPIEncoder(json.JSONEncoder):
""" JSONEncoder Implementation that allows for UUID and datetime """
def default(self, value):
"""
Handle UUID, datetime, and callables.
:param value: Value to encode
"""
if isinstance(value, uuid.UUID):
return str(value)
elif isinstance(value, datetime.datetime):
return value.isoformat()
elif callable(value):
return str(value)
return json.JSONEncoder.default(self, value)
#: The views to generate
views = [
(Method.GET, Endpoint.COLLECTION), (Method.GET, Endpoint.RESOURCE),
(Method.GET, Endpoint.RELATED), (Method.GET, Endpoint.RELATIONSHIP),
(Method.POST, Endpoint.COLLECTION), (Method.POST, Endpoint.RELATIONSHIP),
(Method.PATCH, Endpoint.RESOURCE), (Method.PATCH, Endpoint.RELATIONSHIP),
(Method.DELETE, Endpoint.RESOURCE), (Method.DELETE, Endpoint.RELATIONSHIP)
]
def override(original, results):
"""
If a receiver to a signal returns a value, we override the original value
with the last returned value.
:param original: The original value
:param results: The results from the signal
"""
overrides = [v for fn, v in results if v is not None]
if len(overrides) == 0:
return original
return overrides[-1]
class FlaskJSONAPI(object):
""" Flask Adapter """
#: Fires before the serializer is called. Functions should implement the
#: following args: (sender, method, endpoint, data, req_args)
on_request = signal('jsonapi-on-request')
#: Fires before we return the response. Included args are:
#: (sender, method, endpoint, data, req_args, rendered_response)
on_response = signal('jsonapi-on-response')
#: Fires after a successful call to the serializer.
#: (sender, method, endpoint, data, req_args, response)
on_success = signal('jsonapi-on-success')
#: Fires when an error is encountered.
#: (sender, method, endpoint, data, req_args, error)
on_error = signal('jsonapi-on-error')
#: JSON Encoder to use
json_encoder = JSONAPIEncoder
def __init__(self,
app=None,
sqla=None,
namespace='api',
route_prefix='/api'):
"""
Initialize the adapter. If app isn't passed here, it should be passed
in init_app.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes
"""
self.app = app
self.sqla = sqla
self._handler_chains = dict()
if app is not None:
self._setup_adapter(namespace, route_prefix)
def init_app(self, app, sqla, namespace='api', route_prefix='/api'):
"""
Initialize the adapter if it hasn't already been initialized.
:param app: Flask application
:param sqla: Flask-SQLAlchemy instance
:param namespace: Prefixes all generated routes
:param route_prefix: The base path for the generated routes
"""
self.app = app
self.sqla = sqla
self._setup_adapter(namespace, route_prefix)
def wrap_handler(self, api_types, methods, endpoints):
"""
Allow for a handler to be wrapped in a chain.
:param api_types: Types to wrap handlers for
:param methods: Methods to wrap handlers for
:param endpoints: Endpoints to wrap handlers for
"""
def wrapper(fn):
@wraps(fn)
def wrapped(*args, **kwargs):
return fn(*args, **kwargs)
for api_type in api_types:
for method in methods:
for endpoint in endpoints:
key = (api_type, method, endpoint)
self._handler_chains.setdefault(key, [])
self._handler_chains[key].append(wrapped)
return wrapped
return wrapper
def _call_next(self, handler_chain):
"""
Generates an express-like chain for handling requests.
:param handler_chain: The current chain of handlers
"""
def wrapped(*args, **kwargs):
if len(handler_chain) == 1:
return handler_chain[0](*args, **kwargs)
else:
return handler_chain[0](self._call_next(handler_chain[1:]),
*args, **kwargs)
return wrapped
def _setup_adapter(self, namespace, route_prefix):
"""
Initialize the serializer and loop through the views to generate them.
:param namespace: Prefix for generated endpoints
:param route_prefix: Prefix for route patterns
"""
self.serializer = JSONAPI(
self.sqla.Model, prefix='{}://{}{}'.format(
self.app.config['PREFERRED_URL_SCHEME'],
self.app.config['SERVER_NAME'], route_prefix))
for view in views:
method, endpoint = view
pattern = route_prefix + endpoint.value
name = '{}_{}_{}'.format(namespace, method.name, endpoint.name)
view = self._generate_view(method, endpoint)
self.app.add_url_rule(pattern + '/',
name + '_slashed',
view,
methods=[method.name],
strict_slashes=False)
self.app.add_url_rule(pattern, name, view, methods=[method.name])
def _generate_view(self, method, endpoint):
"""
Generate a view for the specified method and endpoint.
:param method: HTTP Method
:param endpoint: Pattern
"""
def new_view(**kwargs):
if method == Method.GET:
data = request.args
else:
content_length = request.headers.get('content-length', 0)
if content_length and int(content_length) > 0:
content_type = request.headers.get('content-type', None)
if content_type != 'application/vnd.api+json':
data = MissingContentTypeError().data
data = json.dumps(data, cls=JSONAPIEncoder)
response = make_response(data)
response.status_code = 409
response.content_type = 'application/vnd.api+json'
return response
data = request.get_json(force=True)
else:
data = None
event_kwargs = {
'method': method,
'endpoint': endpoint,
'data': data,
'req_args': kwargs
}
results = self.on_request.send(self, **event_kwargs)
data = override(data, results)
args = [self.sqla.session, data, kwargs['api_type']]
if 'obj_id' in kwargs.keys():
args.append(kwargs['obj_id'])
if 'relationship' in kwargs.keys():
args.append(kwargs['relationship'])
try:
attr = '{}_{}'.format(method.name, endpoint.name).lower()
handler = getattr(self.serializer, attr)
handler_chain = list(self._handler_chains.get((
kwargs['api_type'], method, endpoint), []))
handler_chain.append(handler)
chained_handler = self._call_next(handler_chain)
response = chained_handler(*args)
results = self.on_success.send(self,
response=response,
**event_kwargs)
response = override(response, results)
except BaseError as exc:
self.sqla.session.rollback()
results = self.on_error.send(self, error=exc, **event_kwargs)
response = override(exc, results)
rendered_response = make_response('')
if response.status_code != 204:
data = json.dumps(response.data, cls=self.json_encoder)
rendered_response = make_response(data)
rendered_response.status_code = response.status_code
rendered_response.content_type = 'application/vnd.api+json'
results = self.on_response.send(self,
response=rendered_response,
**event_kwargs)
return override(rendered_response, results)
return new_view
|
the-stack_0_25693
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import time
import unittest
from unittest.mock import patch
from tests import mockLifepo4weredSO
import lifepo4weredPyController
class ReaderTests(unittest.TestCase):
"""
tearDown each test
"""
def tearDown(self):
lifepo4weredPyController.setPeriodicInterval(0.5)
lifepo4weredPyController.restartReading()
"""
Interval
"""
def testInterval_Set_ShouldChangeInterval(self):
# Arrange
# Action
lifepo4weredPyController.setPeriodicInterval(5)
# Assert
self.assertEqual(lifepo4weredPyController.getPeriodicInterval(), 5)
"""
Cease
"""
@patch('lifepo4weredPy.functions.lifepo4weredSO', new=mockLifepo4weredSO)
def testCease_ShouldCeaseReading(self):
# Arrange
handleCalled = False
count = 0
@lifepo4weredPyController.battery.observeElement("voltage")
def voltageChangeHandle(previous, actual):
nonlocal handleCalled
handleCalled = True
# Action
lifepo4weredPyController.ceaseReading()
while count < 3:
count += 1
time.sleep(0.5)
lifepo4weredPyController.battery.unObserve("voltage",
voltageChangeHandle)
# Assert
self.assertFalse(handleCalled)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.