repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ralbayaty/KaggleRetina | testing/censureHistCalc.py | 1 | 4517 | from skimage.feature import CENSURE
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
import numpy as np
import cv2
import sys
from PIL import Image, ImageDraw
def draw_keypoints(img, kp, scale):
draw = ImageDraw.Draw(img)
# Draw a maximum of 300 keypoints
for i in range(min(len(scale),300)):
x1 = kp[i,1]
y1 = kp[i,0]
x2 = kp[i,1]+2**scale[i]
y2 = kp[i,0]+2**scale[i]
coords = (x1, y1, x2, y2)
draw.ellipse(coords, fill = None, outline ='white')
if __name__ == '__main__':
try:
file_name = sys.argv[1]
except:
print("Didn't give me a file...")
file_name = "Lenna.png"
def nothing(*arg):
pass
# Create sliderbars to change the values of CENSURE parameters online
# Defaults: min_scale=1, max_scale=7, mode='DoB', non_max_threshold=0.15, line_threshold=10
cv2.namedWindow('censure')
cv2.createTrackbar('min_scale', 'censure', 1, 10, nothing)
cv2.createTrackbar('max_scale', 'censure', 7, 20, nothing)
cv2.createTrackbar('mode', 'censure', 2, 2, nothing)
cv2.createTrackbar('non_max_threshold', 'censure', 6, 1000, nothing)
cv2.createTrackbar('line_threshold', 'censure', 10, 100, nothing)
# Read image from file, then inspect the image dimensions
img = cv2.imread(file_name,1)
height, width, channels = img.shape
# Pull the different color channels from the image
blue = img[:,:,0]
green = img[:,:,1]
red = img[:,:,2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Make a PIL image from each channel so we can use PIL.Iamge.thumbnail to resize if needed
blue1 = Image.fromarray(blue)
green1 = Image.fromarray(green)
red1 = Image.fromarray(red)
gray1 = Image.fromarray(gray)
# Check if dimensions are above desired, if so then resize keepig aspect ratio
m, n = 512, 512
if height > m or width > n:
blue1.thumbnail((m,n), Image.ANTIALIAS)
green1.thumbnail((m,n), Image.ANTIALIAS)
red1.thumbnail((m,n), Image.ANTIALIAS)
gray1.thumbnail((m,n), Image.ANTIALIAS)
# CENSURE related
mode_dict = {"0": "DoB", "1": "Octagon", "2": "STAR"}
last_num_kp = 0
while True:
vis = gray.copy()
img = img1.copy()
# Read the values of the sliderbars and save them to variables
min_scale = cv2.getTrackbarPos('min_scale', 'censure')
max_scale = cv2.getTrackbarPos('max_scale', 'censure')
if min_scale is 0:
min_scale = 1
if min_scale + max_scale < 3:
max_scale = min_scale + 2
mode = mode_dict[str(cv2.getTrackbarPos('mode', 'censure'))]
non_max_threshold = float(cv2.getTrackbarPos('non_max_threshold', 'censure'))/1000
line_threshold = cv2.getTrackbarPos('line_threshold', 'censure')
# Create a CENSURE feature detector
censure = CENSURE(min_scale=min_scale, max_scale=max_scale, mode=mode,
non_max_threshold=non_max_threshold, line_threshold=line_threshold)
# Obtain the CENSURE features
censure.detect(blue1)
kp_blue, scale_blue = censure.keypoints, censure.scales
censure.detect(green1)
kp_green, scale_green = censure.keypoints, censure.scales
censure.detect(red1)
kp_red, scale_red = censure.keypoints, censure.scales
censure.detect(gray1)
kp_gray, scale_gray = censure.keypoints, censure.scales
# Print the # of features if it has changed between iterations
num_kp = len(censure.keypoints)
if last_num_kp != num_kp:
print("Number of keypoints: " + str(len(censure.keypoints)))
last_num_kp = num_kp
# Draw the feature points on the images
draw_keypoints(blue1, kp_blue, scale_blue)
draw_keypoints(green1, kp_green, scale_green)
draw_keypoints(red1, kp_red, scale_red)
draw_keypoints(gray1, kp_gray, scale_gray)
# Obtain the histogram of scale values
plt.clf() # clear the figure from any previous plot
scale_hist, bin_edges = np.histogram(censure.scales,max_scale-min_scale, (min_scale,max_scale+1))
plt.bar(bin_edges[:-1]-0.5, scale_hist, width = 1)
plt.show(block=False)
plt.draw()
# Show the image with keypoints drawn over
image = cv2.cvtColor(np.asarray(img),cv2.COLOR_BGR2RGB)
cv2.imshow('censure', image)
if 0xFF & cv2.waitKey(500) == 27:
break
cv2.destroyAllWindows() | gpl-2.0 | 159,560,390,159,278,880 | 36.032787 | 102 | 0.629179 | false |
qiou/Dev | python/edf.py | 1 | 4511 | #=========================================================================
# Dependencies / Libraries
#=========================================================================
import time
import serial
import MySQLdb
import subprocess
from time import sleep
import datetime
#=========================================================================
# Fonction Tableau/Dictionnaire
#=========================================================================
def checksum (etiquette, valeur):
sum = 32
for c in etiquette: sum = sum + ord(c)
for c in valeur: sum = sum + ord(c)
sum = (sum & 63) + 32
return chr(sum)
#=========================================================================
# Fonction LireTeleinfo
#=========================================================================
def ReadTeleinfo ():
# Attendre le debut du message
while ser.read(1) != chr(2): pass
message = ""
fin = False
while not fin:
char = ser.read(1)
if char != chr(2):
message = message + char
else:
fin = True
trames = [
trame.split(" ")
for trame in message.strip("\r\n\x03").split("\r\n")
]
tramesValides = dict([
[trame[0],trame[1]]
for trame in trames
if (len(trame) == 3) and (checksum(trame[0],trame[1]) == trame[2])
])
return tramesValides
# print('Lecture des trames Teleinformation avec la carte RPIDOM')
#=========================================================================
# Connexion au port
#=========================================================================
ser = serial.Serial(
port='/dev/ttyAMA0',
baudrate=1200,
parity=serial.PARITY_EVEN,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.SEVENBITS )
#=========================================================================
# Definition variables de trame et chargement d'une valeur initiale
#=========================================================================
vIINST = 0
vMOTDETAT = 0
vOPTARIF = 0
vISOUSC = 0
vADCO = 0
vPAPP = 0
vIMAX = 0
vBASE = 0
vADPS = 0
#=========================================================================
# Read serial data
#=========================================================================
#print '\nPremiere voie'
ser.write('A')
sleep(1)
ser.flushInput()
tramesOk = ReadTeleinfo()
trouve = False
for etiquette in tramesOk:
if etiquette == 'IINST':
#print etiquette , ":", tramesOk[etiquette]
vIINST = tramesOk[etiquette]
if etiquette == 'MOTDETAT':
#print etiquette , ":", tramesOk[etiquette]
vMOTDETAT = tramesOk[etiquette]
if etiquette == 'OPTARIF':
#print etiquette , ":", tramesOk[etiquette]
vOPTARIF = tramesOk[etiquette]
if etiquette == 'ISOUSC':
#print etiquette , ":", tramesOk[etiquette]
vISOUSC = tramesOk[etiquette]
if etiquette == 'ADCO':
#print etiquette , ":", tramesOk[etiquette]
vADCO = tramesOk[etiquette]
if etiquette == 'PAPP':
#print etiquette , ":", tramesOk[etiquette]
vPAPP = tramesOk[etiquette]
if etiquette == 'IMAX':
#print etiquette , ":", tramesOk[etiquette]
vIMAX = tramesOk[etiquette]
if etiquette == 'BASE':
#print etiquette , ":", tramesOk[etiquette]
vBASE = tramesOk[etiquette]
if etiquette == 'ADPS':
#print etiquette , ":", tramesOk[etiquette]
vADPS = tramesOk[etiquette]
#=========================================================================
# Date and Hour
#=========================================================================
vHEURE = datetime.datetime.now().strftime('%H:%M')
vDATE = datetime.datetime.today().strftime('%Y-%m-%d')
#=========================================================================
# Connect and insert into DB
#=========================================================================
db = MySQLdb.connect(host="192.168.1.250",port=3307,user="root",passwd="MariaQiou",db="edf" )
cursor = db.cursor()
if vBASE > 0:
cursor.execute("""INSERT INTO teleinfo(DATE, HEURE, IINST, MOTDETAT, OPTARIF, ISOUSC, ADCO, PAPP, IMAX, BASE, ADPS) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""" ,(vDATE, vHEURE, vIINST, vMOTDETAT, vOPTARIF, vISOUSC, vADCO, vPAPP, vIMAX, vBASE, vADPS))
# Write into DB
db.commit()
db.rollback()
db.close()
#=========================================================================
ser.close()
| gpl-2.0 | 3,979,199,428,035,871,000 | 35.379032 | 265 | 0.441809 | false |
rithms/hearthstone | xml_to_json.py | 1 | 4835 | #!/usr/bin/env python
from bs4 import BeautifulSoup
import glob
import json
#############################################
# Convert Hearthstone card data XML to JSON #
#############################################
__author__ = "Taylor Caldwell - http://github.com/rithms"
__copyright__ = "Copyright 2015, Taylor Caldwell"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "Taylor Caldwell"
__email__ = "[email protected]"
__status__ = "Production"
# EnumIds - Non-Boolean
enum_dict = {
45 : "health",
47 : "attack",
48 : "cost",
183 : "cardSet",
184 : "cardTextInHand",
185 : "cardName",
187 : "durability",
199 : "class",
200 : "race",
201 : "faction",
202 : "cardType",
203 : "rarity",
251 : "attackVisualType",
252 : "cardTextInPlay",
268 : "devState",
325 : "targetingArrowText",
330 : "enchantmentBirthVisual",
331 : "enchantmentIdleVisual",
342 : "artistName",
351 : "flavorText",
365 : "howToGetThisGoldCard",
364 : "howToGetThisCard",
#377 : "unknownHasOnDrawEffect",
#380 : "unknownBlackrockHeroes",
#389 : "unknownDuneMaulShaman",
#402 : "unknownIntenseGaze",
#401 : "unknownBroodAffliction"
}
# EnumIds - Boolean
bool_dict = {
32 : "Trigger Visual",
114 : "elite",
321 : "collectible",
189 : "Windfury",
190 : "Taunt",
191 : "Stealth",
192 : "Spell Power",
194 : "Divine Shield",
197 : "Charge",
205 : "Summoned",
208 : "Freeze",
212 : "Enrage",
215 : "Overload",
217 : "Deathrattle",
218 : "Battlecry",
219 : "Secret",
220 : "Combo",
240 : "Can't Be Damaged",
293 : "Morph",
335 : "Invisible Deathrattle",
338 : "One Turn Effect",
339 : "Silence",
340 : "Counter",
349 : "Immune To Spell Power",
350 : "Adjacent Buff",
361 : "Heal Target",
362 : "Aura",
363 : "Poisonous",
367 : "AI Must Play",
370 : "Affected By Spell Power",
388 : "Spare Part",
}
# Card Class IDs
class_dict = {
0 : "Developer",
2 : "Druid",
3 : "Hunter",
4 : "Mage",
5 : "Paladin",
6 : "Priest",
7 : "Rogue",
8 : "Shaman",
9 : "Warlock",
10 : "Warrior",
11 : "Dream"
}
# Card Set IDs
set_dict = {
2 : "Basic",
3 : "Classic",
4 : "Reward",
5 : "Missions",
7 : "System",
8 : "Debug",
11 : "Promotion",
12 : "Curse of Naxxramas",
13 : "Goblin vs Gnomes",
14 : "Blackrock Mountain",
16 : "Credits"
}
# Card Type IDs
type_dict = {
3 : "Hero",
4 : "Minion",
5 : "Spell",
6 : "Enchantment",
7 : "Weapon",
10 : "Hero Power"
}
# Card Race IDs
race_dict = {
14 : "Murloc",
15 : "Demon",
17 : "Mechanical",
20 : "Beast",
21 : "Totem",
23 : "Pirate",
24 : "Dragon"
}
# Card Faction IDs
faction_dict = {
1 : "Horde",
2 : "Alliance",
3 : "Neutral"
}
# Card Rarity IDs
rarity_dict = {
0 : "Developer",
1 : "Common",
2 : "Free",
3 : "Rare",
4 : "Epic",
5 : "Legendary"
}
# Get the name of the corresponding enum ID
def get_name(enum_id, d):
if enum_id in d:
return d[enum_id]
for f in glob.glob('cardxml0/CAB-cardxml0/TextAsset/*.txt'):
with open(f) as cardfile:
file_name = f.split('/')[-1].split('.')[0]
cardsoup = BeautifulSoup(cardfile.read(), features="xml")
cards = cardsoup.find_all('Entity')
json_dict = { 'data' : {} }
for card in cards:
card_id = card.get('CardID')
json_dict['data'][card_id] = { 'id' : card_id, 'mechanics' : [] }
tags = card.find_all('Tag')
for tag in tags:
enum_id = int(tag.get('enumID'))
if(tag.get('type') == 'String'):
enum_name = tag.text
else:
enum_name = tag.get('value')
if enum_id in enum_dict:
field = enum_dict[enum_id]
if field == 'class':
enum_name = get_name(int(enum_name), class_dict)
elif field == 'cardSet':
enum_name = enum_name = get_name(int(enum_name), set_dict)
elif field == 'cardType':
enum_name = get_name(int(enum_name), type_dict)
elif field == 'race':
enum_name = get_name(int(enum_name), race_dict)
elif field == 'faction':
enum_name = get_name(int(enum_name), faction_dict)
elif field == 'rarity':
enum_name = get_name(int(enum_name), rarity_dict)
json_dict['data'][card_id][enum_dict[enum_id]] = enum_name
elif enum_id in bool_dict:
field = bool_dict[enum_id]
if field == 'collectible' or field == 'elite':
if enum_name == '1':
json_dict['data'][card_id][field] = True
elif enum_name == '0':
json_dict['data'][card_id][field] = False
else:
if enum_name == '1':
json_dict['data'][card_id]['mechanics'].append(field)
for key in bool_dict:
field = bool_dict[key]
if field == 'collectible' or field == 'elite':
if field not in json_dict['data'][card_id]:
json_dict['data'][card_id][field] = False
if not json_dict['data'][card_id]['mechanics']:
del json_dict['data'][card_id]['mechanics']
with open(file_name+'.json', 'w') as outfile:
json.dump(json_dict, outfile, sort_keys=True)
| mit | 7,331,306,430,884,571,000 | 20.20614 | 67 | 0.588211 | false |
bodedev/prospera | plataforma/management/commands/atualizar_saldos.py | 1 | 2085 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.management.base import BaseCommand
from plataforma.constants import ETHER_DIVISOR
from plataforma.models import Saldo
import requests
def buscar_saldo(carteira):
try:
r = requests.get("https://api.etherscan.io/api?module=account&action=tokenbalance&contractaddress=%s&address=%s&tag=latest&apikey=%s" % (settings.ETHERSCAN_CONTRACT_ADDRESS, carteira, settings.ETHERSCAN_APIKEY))
if r.status_code == 200:
data = r.json()
if data["status"] == "1":
saldo = float(data["result"]) / float(ETHER_DIVISOR)
_, created = Saldo.objects.update_or_create(carteira=carteira, defaults={"total": saldo})
print "%s: %0.6f (%s)" % (carteira, saldo, str(created))
return True
return False
except Exception, e:
print "Nao consegui pegar o saldo da carteira %s" % carteira
return None
class Command(BaseCommand):
help = u"Atualiza o saldo de todas as carteiras de um contrato."
def handle(self, *args, **options):
url = "https://api.etherscan.io/api?module=logs&action=getLogs&fromBlock=%s&toBlock=latest&address=%s&apikey=%s" % (settings.ETHERSCAN_START_BLOCK_NUMBER, settings.ETHERSCAN_CONTRACT_ADDRESS, settings.ETHERSCAN_APIKEY)
r = requests.get(url)
data = r.json()
saldos_atualizados = []
for transacion in data["result"]:
carteira_from = transacion["topics"][1].replace("0x000000000000000000000000", "0x")
if carteira_from not in saldos_atualizados:
if buscar_saldo(carteira_from):
saldos_atualizados.append(carteira_from)
if len(transacion["topics"]) >= 3:
carteira_to = transacion["topics"][2].replace("0x000000000000000000000000", "0x")
if carteira_to not in saldos_atualizados:
if buscar_saldo(carteira_to):
saldos_atualizados.append(carteira_to)
print "Fim de processo!"
| mit | -6,934,085,334,793,678,000 | 44.326087 | 226 | 0.632134 | false |
iocoop/beancount | etc/find-missing-tests.py | 1 | 2218 | #!/usr/bin/env python3
"""
Find missing test coverage in our source code.
This program find source code and warns us if associated tests are
missing or incomplete. This is used to track progress in test coverage
and to ensure that the entire software suite is covered by appropriate
testing code.
"""
import os
from os import path
import re
def find_missing_tests(source_dir):
"""Find source files with incomplete tests.
Args:
source_dir: A string, the name of the source directory.
Yields:
Tuples of source filename, test filename, and an is-missing boolean.
"""
for root, dirs, files in os.walk(source_dir):
for relative_filename in files:
if ((not relative_filename.endswith('.py')) or
relative_filename.endswith('_test.py') or
relative_filename == '__init__.py'):
continue
filename = path.join(root, relative_filename)
test_filename = re.sub('.py$', '_test.py', filename)
if not path.exists(test_filename):
yield (filename, test_filename, True)
elif not is_complete(test_filename):
yield (filename, test_filename, False)
def is_complete(filename):
"""A predicate that is true if the given test file is incomplete.
Args:
filename: A string, the name of a test file.
Returns:
A boolean, true if the tests are complete.
"""
contents = open(filename, encoding='utf-8').read()
return not (re.search('^__incomplete__', contents, re.M) or
re.search(r'raise \bNotImplementedError\b', contents, re.M))
def main():
import argparse
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('source_root', action='store')
opts = parser.parse_args()
root_dir = path.realpath(path.dirname(path.dirname(__file__)))
missing_tests = list(find_missing_tests(opts.source_root))
if missing_tests:
for filename, test_filename, missing in missing_tests:
missing_str = 'MISSING' if missing else 'INCOMPLETE'
print('Missing Test: {:60} {}'.format(filename, missing_str))
if __name__ == '__main__':
main()
| gpl-2.0 | 8,152,978,246,822,283,000 | 32.606061 | 76 | 0.638864 | false |
jas0n1ee/SonyCameraAPI | takePicture.py | 1 | 1212 | #!/usr/bin/env python
from sonyAPI2 import API2
import cv2
import urllib2
import numpy as np
import time
import struct
api = API2()
api.update_api_list()
try:
result = api.do('getAvailableCameraFunction')
current = result['result'][0]
availavle = result['result'][1]
if current != "Remote Shooting":
if "Remote Shooting" in availavle:
api.do('setCameraFunction',["Remote Shooting"])
api.update_api_list()
else:
print "Remote Shooting not availavle"
except KeyError:
print result
try:
result = api.do('getAvailableShootMode')
current = result['result'][0]
availavle = result['result'][1]
if current != "still":
if "still" in availavle:
api.do('setShootMode',["still"])
api.update_api_list()
else:
print "stil Shooting not availavle"
except KeyError:
print result
try:
result = api.do('actTakePicture')
url = result['result'][0][0]
except KeyError:
print result
except TypeError:
print result
f = urllib2.urlopen(url)
d = np.asarray(bytearray(f.read()), dtype='uint8')
img = cv2.imdecode(d,cv2.IMREAD_COLOR)
cv2.imshow('postview',img)
time.sleep(10)
| apache-2.0 | 5,010,609,995,021,559,000 | 23.734694 | 59 | 0.640264 | false |
depboy/p2pool-depboy | p2pool/bitcoin/networks/digibyteSkein.py | 1 | 1236 | import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fac3b6da'.decode('hex') #pchmessagestart
P2P_PORT = 12024
ADDRESS_VERSION = 30 #pubkey_address
RPC_PORT = 14022
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'digibyteaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: __import__('digibyte_subsidy').GetBlockBaseValue(height)
POW_FUNC=lambda data: pack.IntType(256).unpack(__import__('skeinhash').getPoWHash(data))
BLOCK_PERIOD = 150 # s
SYMBOL = 'DGB'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'digibyte') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/digibyte/') if platform.system() == 'Darwin' else os.path.expanduser('~/.digibyte'), 'digibyte.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://digiexplorer.info/block/'
ADDRESS_EXPLORER_URL_PREFIX = 'http://digiexplorer.info/address/'
TX_EXPLORER_URL_PREFIX = 'http://digiexplorer.info/tx/'
SANE_TARGET_RANGE=(2**256//2**32//1000 - 1, 2**256//2**27 - 1)
DUMB_SCRYPT_DIFF = 1
DUST_THRESHOLD = 0.001e8
| gpl-3.0 | -307,123,748,108,533,950 | 43.142857 | 276 | 0.716828 | false |
junhe/chopper | src/MWpyFS/Monitor.py | 1 | 44187 | # Chopper is a diagnostic tool that explores file systems for unexpected
# behaviors. For more details, see paper Reducing File System Tail
# Latencies With Chopper (http://research.cs.wisc.edu/adsl/Publications/).
#
# Please send bug reports and questions to [email protected].
#
# Written by Jun He at University of Wisconsin-Madison
# Copyright (C) 2015 Jun He ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# The monitor is used to monitor the FS fragmentation status.
# What I want to see is, generally, how's the metadata. This may include:
#
# SIZE of inode and extent tree. (number of inode block and extent tree
# block). This can be find by debugfs "dump_extents [-n] [-l] filespec".
# But you have to do it for ALL files in the file system, which might be
# slow. I haven't got a better approach. A good indicator of metadata
# problem is #_metadata_block/#_data_block. This should be very informative
# about the aging of a file system which causes metadata disaster.
# I expect the following from the output of this per file:
#
# filepath create_time n_metablock n_datablock metadata_ratio filebytes
#
# Extent fragmentation overview. This can be obtained by e2freefrag. This
# should give me a good sense of how fragemented the FS is. The acceleration
# rate of fragmentation might be a good indicator of whether a workload
# can cause metadata problem. (Because of fragmentation, physical blocks
# might not be able to allocated contiguously, then it needs two or more
# extents to the logically contiguous blocks.)
# I expect the following from the output of this per FS:
# JUST LIKE THE ORIGINAL OUTPUT BUT FORMAT IT A LITTLE BIT
#
#
#
#
# TODO:
# 1. I need to figure out a good way to figure out
# dspan of the interested files.
# 2. Is there a better way in btrfs to find only the
# interested file, other than deleting all the
# uninteresting file.
#
import subprocess
from time import strftime, localtime, sleep
import re
import shlex
import os
import pprint
import shutil
import fnmatch
import itertools
import glob
import btrfs_db_parser
import xfs_db_parser
import dataframe
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
def fill_white_space(path, filler="_"):
path.strip()
return path.replace(" ", filler)
class FSMonitor:
"""
This monitor probes the ext4 file system and return information I
want in a nice format.
"""
def __init__(self, dn, mp, ld="/tmp", cw=20, filesystem='ext4'):
self.devname = dn # this should be the device name of the partition
self.mountpoint = mp # please only provide path without mountpoint
# when using this class.
self.col_width = cw
self.logdir = ld
self.resetMonitorTime()
self.resetJobID()
self.filesystem = filesystem # the file system this monitor monitors
def resetMonitorTime(self, monitorid=""):
"monitor_time is used to identify each data retrieval"
if monitorid == "":
self.monitor_time = strftime("%Y-%m-%d-%H-%M-%S", localtime())
else:
self.monitor_time = monitorid
def resetJobID(self, jobid="DefaultJOBID"):
self.jobid = jobid
def _spliter_dumpfs(self, line):
line = line.replace(",", " ")
elems = line.split(":")[1]
elems = elems.split()
new_elems = [] # [[a0,a1],[b0,b1]...]
for elem in elems:
e = elem.split("-")
elen = len(e)
if elen == 2:
new_elems.append(e)
elif elen == 1:
e = e*2
new_elems.append(e)
else:
print "wrong split", elem
exit(1)
return new_elems
def dumpfsSummary(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", "-h", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
proc.wait()
return proc.communicate()[0]
def dumpfs(self):
if self.filesystem != 'ext4':
return
print "dumpfs..."
cmd = ["dumpe2fs", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
print "dumpfs finished. Parsing results..."
freeblocks = []
freeinodes = []
for line in proc.stdout:
if line.startswith(" Free blocks:"):
freeblocks += self._spliter_dumpfs(line)
elif line.startswith(" Free inodes:"):
freeinodes += self._spliter_dumpfs(line)
else:
pass
proc.wait()
# initialize
freeblocks_df = dataframe.DataFrame(header=['start', 'end'],
table=freeblocks)
freeinodes_df = dataframe.DataFrame(header=['start', 'end'],
table=freeinodes)
# add additional columns
freeblocks_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeblocks_df.addColumn(key="jobid",
value=self.jobid)
freeblocks_df.addColumn(key="HEADERMARKER_freeblocks",
value="DATAMARKER_freeblocks")
freeinodes_df.addColumn(key="monitor_time",
value=self.monitor_time)
freeinodes_df.addColumn(key="jobid",
value=self.jobid)
freeinodes_df.addColumn(key="HEADERMARKER_freeinodes",
value="DATAMARKER_freeinodes")
return {"freeblocks":freeblocks_df, "freeinodes":freeinodes_df}
def e2freefrag(self):
if self.filesystem != 'ext4':
return
cmd = ["e2freefrag", self.devname]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
proc.wait()
part = 0
sums_dict = {}
hist_table = ""
hist_df = dataframe.DataFrame()
for line in proc.stdout:
if part == 0:
if "HISTOGRAM" in line:
part = 1
continue
mo = re.search( r'(.*): (\d+)', line, re.M)
if mo:
keyname = mo.group(1)
keyname = keyname.replace('.', '')
keyname = "_".join(keyname.split())
sums_dict[keyname] = mo.group(2)
elif part == 1:
# This part is the histogram.
line = line.strip()
if "Extent Size" in line:
hist_table = "Extent_start Extent_end Free_extents Free_Blocks Percent"
hist_df.header = hist_table.split()
continue
fline = re.sub(r'[\-:\n]', "", line)
fline = re.sub(r'\.{3}', "", fline)
row = fline.split()
hist_df.addRowByList(row)
hist_df.addColumns(keylist = ["HEADERMARKER_freefrag_hist",
"monitor_time",
"jobid"],
valuelist = ["DATAMARKER_freefrag_hist",
self.monitor_time,
self.jobid])
# convert dict to data frame
sums_df = dataframe.DataFrame(header=sums_dict.keys(),
table=[sums_dict.values()])
sums_df.addColumn(key="HEADERMARKER_freefrag_sum",
value="DATAMARKER_freefrag_sum")
sums_df.addColumn(key="monitor_time",
value=self.monitor_time)
sums_df.addColumn(key="jobid",
value=self.jobid)
return {"FragSummary":sums_df, "ExtSizeHistogram":hist_df}
def imap_of_a_file(self, filepath):
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'imap " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'imap "' + filepath + '"']
print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
imapdict = {}
for line in proc.stdout:
#print line
if "block group" in line:
nums = re.findall(r'\d+', line)
if len(nums) != 2:
print "Error parsing imap"
exit(1)
imapdict['inode_number'] = nums[0]
imapdict['group_number'] = nums[1]
elif 'located at block' in line:
items = line.split()
imapdict['block_number'] = items[3].rstrip(',')
imapdict['offset_in_block'] = items[5]
proc.wait()
#print imapdict
return imapdict
def dump_extents_of_a_file(self, filepath):
"This function only gets ext list for this file"
if self.filesystem != 'ext4':
return
#print "filepath:", filepath
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, '......'
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
ext_list = [] # Use list here in case I want to extract data in Python
header = []
max_level = 0
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
df_ext.header = header
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, tokens[6]) #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
df_ext.addRowByDict(d)
proc.wait()
# Put the location of the inode the df_ext, level_index as -1 to
# indicate that it is a inode
imapdict = self.imap_of_a_file(filepath)
d = {}
d['Level_index'] = '-1'
d['Max_level'] = '-1'
d['Entry_index'] = 'NA'
d['N_Entry'] = 'NA'
d['Logical_start'] = 'NA'
d['Logical_end'] = 'NA'
d['Physical_start'] = imapdict['block_number']
d['Physical_end'] = imapdict['block_number']
d['Length'] = '1'
d['Flag'] = 'NA'
df_ext.addRowByDict(d)
df_ext.addColumn(key = "filepath",
value = fill_white_space(filepath))
df_ext.addColumn(key = "HEADERMARKER_extlist",
value = "DATAMARKER_extlist")
df_ext.addColumn(key = "jobid",
value = self.jobid)
df_ext.addColumn(key = "monitor_time",
value = self.monitor_time)
return df_ext
def setBlock(self, blockn, count):
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'setb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
proc.wait()
return proc.returncode
def isAllBlocksInUse(self, blockn, count):
"if any of the blocks is not in use, return false. return true otherwise"
if self.filesystem != 'ext4':
return
cmd = "debugfs " + self.devname + \
" -w -R 'testb " + str(blockn) + " " + str(count) + "'"
cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
if 'not' in line:
return False
proc.wait()
return True
def dumpextents_sum(self, filepath):
"TODO: merge this with dump_extents_of_a_file()"
if self.filesystem != 'ext4':
return
#cmd = "debugfs " + self.devname + " -R 'dump_extents " + filepath + "'"
#cmd = ['debugfs', self.devname, '-R', '"dump_extents ' + filepath + '"']
cmd = ['debugfs', self.devname, '-R', 'dump_extents "' + filepath + '"']
#print cmd, "........."
#cmd = shlex.split(cmd)
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
header = []
n_entries = [0] * 3 # n_entries[k] is the number of entries at level k
# it can be used to calculate number of
# internal/leaf nodes
max_level = 0
exttable = ""
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag"]
for line in proc.stdout:
#print "LLL:", line,
if "Level" in line:
pass
else:
savedline = line
line = re.sub(r'[/\-]', " ", line)
tokens = line.split()
if len(tokens) == 8:
# there is no physical end
tokens.insert(7, "NA") #TODO: this is dangerous
d = {}
for i in range(9):
try:
d[ header[i] ] = tokens[i]
except:
print savedline
print "token:", tokens
print "header:", header # having a try-except can grant you
# the opportunity to do something
# after bad thing happen
if len(tokens) == 10:
d["Flag"] = tokens[10]
else:
d["Flag"] = "NA"
n_entries[ int(d["Level_index"]) ] = int( d["N_Entry"] )
max_level = int( d["Max_level"] )
#print "..... finished stdout parsing .... "
proc.terminate()
#print "..... after terminating .... "
# calculate number of meatadata blocks
# only 1st and 2nd levels takes space.
# How to calculate:
# if there is only 1 level (root and level 1).
# the number of entires in level 0 indicates the
# number of nodes in level 1.
# Basically, the number of entries in level i
# equals the number of ETB of the next level
n_metablock = 0
if max_level == 0:
# the tree has no extent tree block outside of the inode
n_metablock = 0
else:
for n in n_entries[0:max_level]:
n_metablock += n
dumpdict = {}
dumpdict["filepath"] = fill_white_space(filepath)
dumpdict["n_metablock"] = n_metablock
others = self.filefrag(filepath)
if others.has_key('nblocks'):
dumpdict["n_datablock"] = others["nblocks"]
else:
dumpdict["n_datablock"] = 'NA'
if others.has_key('nbytes'):
dumpdict["filebytes"] = others["nbytes"]
else:
dumpdict["filebytes"] = 'NA'
#print "Reached end of debugfs...."
return dumpdict
def filefrag(self, filepath):
if self.filesystem != 'ext4':
return
fullpath = os.path.join(self.mountpoint, filepath)
cmd = ["filefrag", "-sv", fullpath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
mydict = {}
for line in proc.stdout:
if line.startswith("File size of"):
#print line
line = line.split(" is ")[1]
#print line
nums = re.findall(r'\d+', line)
if len(nums) != 3:
print "filefrag something wrong"
exit(1)
mydict["nbytes"] = nums[0]
mydict["nblocks"] = nums[1]
mydict["blocksize"] = nums[2]
return mydict
def getAllInodePaths(self, target="."):
"it returns paths of all files and diretories"
rootpath = os.path.join(self.mountpoint)
paths = []
with cd(rootpath):
cmd = ['find', target]
print cmd
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def getExtentList_of_a_dir(self, target):
"""
this only works for absolute path
"""
if self.filesystem != 'ext4':
return
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
#print files
#exit(1)
df = dataframe.DataFrame()
for f in files:
f = os.path.relpath(f, target)
if len(df.header) == 0:
df = self.dump_extents_of_a_file(f)
else:
df.table.extend( self.dump_extents_of_a_file(f).table )
return df
def getPerFileBlockCounts(self, rootdir="."):
if self.filesystem != 'ext4':
return
files = self.getAllInodePaths(rootdir)
counts_df = dataframe.DataFrame()
for f in files:
d = self.dumpextents_sum(f)
if len(counts_df.header) == 0:
counts_df.header = d.keys()
counts_df.addRowByDict(d)
counts_df.addColumns(keylist=["HEADERMARKER_extstats",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extstats",
self.monitor_time,
self.jobid])
return counts_df
def getFSBlockCount(self, df_files):
"df_files has number of metablocks datablocks of each file"
if self.filesystem != 'ext4':
return
if len(df_files.table) == 0:
return ""
fs_nmetablocks = 0
fs_ndatablocks = 0
nmetaindex = df_files.header.index('n_metablock')
ndataindex = df_files.header.index('n_datablock')
for row in df_files.table:
if row[nmetaindex] == 'NA' or row[ndataindex] == 'NA':
fs_nmetablocks = 'NA'
fs_ndatablocks = 'NA'
break
fs_nmetablocks += int(row[nmetaindex])
fs_ndatablocks += int(row[ndataindex])
headerstr = "fs_nmetablocks fs_ndatablocks monitor_time HEADERMARKER_extstatssum jobid"
valuelist = [fs_nmetablocks, fs_ndatablocks, self.monitor_time,
'DATAMARKER_extstatssum', self.jobid]
fsblkcount_df = dataframe.DataFrame(
header=headerstr.split(),
table=[valuelist])
return fsblkcount_df
def widen(self, s):
return s.ljust(self.col_width)
def dict2table(self, mydict):
mytable = ""
header = ""
for keyname in mydict:
header += self.widen(keyname) + " "
header += self.widen("monitor_time") + " HEADERMARKER_freefrag_sum\n"
vals = ""
for keyname in mydict:
vals += self.widen(mydict[keyname]) + " "
vals += self.widen(str(self.monitor_time)) + " DATAMARKER_freefrag_sum\n"
return header + vals
def display(self, savedata=False, logfile="", monitorid="", jobid="myjobid"):
self.resetMonitorTime(monitorid=monitorid)
self.resetJobID(jobid=jobid)
ret_dict = {'d_span':'NA',
'physical_layout_hash':'NA'}
if savedata:
if logfile == "":
filename = self.monitor_time + ".result"
else:
filename = logfile
fullpath = os.path.join(self.logdir, filename)
f = open(fullpath, 'w')
if self.filesystem == 'ext3':
extlist = ext34_getExtentList_of_myfiles(target=self.mountpoint)
df_ext = extlist_block_to_byte(extlist)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
elif self.filesystem == 'ext4':
######################
# get extents of all files
extlist = self.getExtentList_of_a_dir(target=self.mountpoint)
df_ext = extlist_translate_new_format(extlist)
#print df_ext.toStr()
#exit(1)
if savedata and extlist != None:
h = "---------------- extent list -------------------\n"
f.write(extlist.toStr())
######################
# e2freefrag
#frag = self.e2freefrag()
#if savedata and frag != None:
#frag0_header = "----------- Extent summary -------------\n"
#frag1_header = "----------- Extent Histogram -------------\n"
#f.write(frag0_header + frag["FragSummary"].toStr())
#f.write(frag1_header + frag["ExtSizeHistogram"].toStr())
######################
# dumpfs
#freespaces = self.dumpfs()
#if savedata and frag != None:
#dumpfs_header = "----------- Dumpfs Header ------------\n"
#f.write(dumpfs_header + freespaces['freeblocks'].toStr())
#f.write(dumpfs_header + freespaces['freeinodes'].toStr())
elif self.filesystem == 'xfs':
df_ext = self.xfs_getExtentList_of_a_dir(self.mountpoint)
#df_ext = self.xfs_getExtentList_of_a_dir('./dir.1/')
#df_ext.table.extend(df_ext0.table)
df_ext = extlist_translate_new_format(df_ext)
#print df_ext.toStr()
#exit(1)
if savedata and df_ext != None:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr() )
elif self.filesystem == 'btrfs':
# too many files thera sometimes, let me remove some
remove_unecessary(self.mountpoint)
tree_lines = btrfs_db_parser.btrfs_debug_tree(self.devname)
tree_parser = btrfs_db_parser.TreeParser(tree_lines)
df_dic = tree_parser.parse()
df_rawext = df_dic['extents']
df_chunk = df_dic['chunks']
paths = get_all_my_files(self.mountpoint)
df_map = btrfs_db_parser.get_filepath_inode_map2(paths)
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
#exit(0)
df_ext = btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map)
if savedata:
df_ext.addColumns(keylist=["HEADERMARKER_extlist",
"monitor_time",
"jobid"],
valuelist=["DATAMARKER_extlist",
self.monitor_time,
self.jobid])
h = "---------------- extent list -------------------\n"
f.write( h + df_ext.toStr())
else:
print "Unsupported file system."
exit(1)
if savedata:
f.flush()
f.close()
# calculate return value
print df_ext.toStr()
#exit(0)
ret_dict['d_span'] = get_d_span_from_extent_list(df_ext,
'.file')
ret_dict['distance_sum'] = \
get_distant_sum_from_extent_list(df_ext, '.file')
if ret_dict['distance_sum'] < 0:
print 'distance_sum should be >=0'
allpaths = get_paths_in_df(df_ext)
myfiles = [os.path.basename(path) for path in allpaths \
if '.file' in path]
myfiles.sort( key=lambda x:int(x.split('.')[0]) ) #sort by file id
ret_dict['datafiles'] = '|'.join( myfiles )
dspans = []
for f in myfiles:
dspans.append( get_d_span_from_extent_list(df_ext, f) )
dspans = [str(x) for x in dspans]
ret_dict['datafiles_dspan'] = '|'.join( dspans )
num_extents = []
for f in myfiles:
num_extents.append( get_num_ext_from_extent_list(df_ext, f) )
num_extents = [str(x) for x in num_extents]
ret_dict['num_extents'] = '|'.join( num_extents )
ret_dict['physical_layout_hash'] \
= get_physical_layout_hash(df_ext,
'file',
merge_contiguous=True)
return ret_dict
def stat_a_file(self, filepath):
filepath = os.path.join(self.mountpoint, filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses buffer. Don't use it
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def xfs_get_extentlist_of_a_file(self, filepath):
inode_number = self.stat_a_file(filepath)['inode_number']
df = xfs_db_parser.xfs_get_extent_tree(inode_number, self.devname)
df.addColumn(key = "filepath",
value = fill_white_space(filepath))
return df
def xfs_getExtentList_of_a_dir(self, target="."):
"rootdir is actually relative to mountpoint. Seems bad"
#files = self.getAllInodePaths(target)
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
#print "UU____UU"
if len(df.header) == 0:
df = self.xfs_get_extentlist_of_a_file(f)
else:
df.table.extend( self.xfs_get_extentlist_of_a_file(f).table )
return df
############################################
SECTORSIZE=512
def get_num_sectors(length):
return int((length+SECTORSIZE-1)/SECTORSIZE)
def get_distant_sum(extentlist):
"""
extentlist is a list like:
[ {'off':xxx, 'len':xxx}, {..}, ..]
This unit is byte.
"""
#print extentlist
# for each extent
distsum = 0
n = 0
for ext in extentlist:
distsum += extent_distant_sum(ext)
n += get_num_sectors(ext['len'])
for ext1, ext2 in itertools.combinations(extentlist, 2):
distsum += extent_pair_distant_sum(ext1, ext2)
return distsum
def extent_distant_sum(extent):
"""
The sum of all pair distance inside the extent is:
n(n-1)(n+1)/6
"""
# doing a trick to get ceiling without floats
n = get_num_sectors(extent['len'])
# hmm.. define the distance of 1 sector
# to be 1.
if n == 1:
return 1
#print "n:", n
ret = n*(n-1)*(n+1)/6
#print extent, ret
return ret
def extent_pair_distant_sum( extent1, extent2 ):
"ext1 and ext2 cannot overlap!"
if extent1['off'] > extent2['off']:
extent1, extent2 = extent2, extent1
m = get_num_sectors(extent1['len'])
n = get_num_sectors(extent2['len'])
k = (extent2['off']-extent1['off']-extent1['len'])/SECTORSIZE
ret = m*n*(m+n+2*k)/2
#print extent1, extent2, ret
return ret
if __name__ == '__main__':
print get_distant_sum( [
{'off':0, 'len':512},
#{'off':512, 'len':512}] )
{'off':512*10, 'len':512}] )
def remove_unecessary(top):
objlist = os.listdir(top)
for name in objlist:
if name.endswith('.file') or name.startswith('dir.'):
continue
path = os.path.join(top, name)
if os.path.isfile(path):
os.remove(path)
#print 'remove FILE:', path
else:
shutil.rmtree(path)
#print 'remove DIR:', path
subprocess.call('sync')
def get_all_my_files( target ):
matches = []
for root, dirnames, filenames in os.walk(target):
for filename in fnmatch.filter(filenames, '*.file'):
matches.append(os.path.join(root, filename))
dirnames[:] = fnmatch.filter(dirnames, 'dir.*')
return matches
def ext34_getExtentList_of_myfiles(target):
files = get_all_my_files(target)
df = dataframe.DataFrame()
for f in files:
if len(df.header) == 0:
df = filefrag(f)
else:
df.table.extend( filefrag(f).table )
return df
def get_physical_layout_hash(df_ext, filter_str, merge_contiguous=False):
"""
It only cares about physical block positions.
It has nothing to do with filename, logical address of blocks..
Just sort the physical block start and end, then do a hash
Inlcuding inode, ETB, and data extent!
Another way to find layout is to get all the free blocks and do
hash on them. It is more straight free space.
"""
hdr = df_ext.header
phy_blocks = []
for row in df_ext.table:
if filter_str in row[hdr.index('filepath')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
phy_blocks.append( physical_start )
phy_blocks.append( physical_end )
# There can be over lap between extents for inode and only for inode
# block number can be overlapped in extent
# block number of the same extent always next to each other
phy_blocks.sort()
if merge_contiguous:
# the block number are ALWAYS in pair, even after sorting
# [start, end, start, end, start, end, ...]
# This may not work for BTRFS!
merged = []
n = len(phy_blocks)
assert n % 2 == 0
for i in range(0, n, 2):
# i is start of an extent
if i == 0: # the first extent
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
continue
if phy_blocks[i] == phy_blocks[i-1] + 1:
# can be merged
merged[-1] = phy_blocks[i+1]
elif phy_blocks[i] == phy_blocks[i-2] and \
phy_blocks[i+1] == phy_blocks[i-1]:
# hmm... duplicated extent. can only happen to inode
pass # do nothing
else:
# cannot be merged
merged.append( phy_blocks[i] )
merged.append( phy_blocks[i+1] )
phy_blocks = merged
return hash( str(phy_blocks) )
def get_inode_num_from_dfmap(filepath, df_map):
hdr = df_map.header
for row in df_map.table:
if row[hdr.index('filepath')] == filepath:
return row[hdr.index('inode_number')]
return None
def get_all_vir_ranges_of_an_inode(inode_number, df_rawext):
hdr = df_rawext.header
ranges = []
for row in df_rawext.table:
if str(row[hdr.index('inode_number')]) == str(inode_number):
d = {
'virtual_start': int(row[hdr.index('Virtual_start')]),
'length': int(row[hdr.index('Length')])
}
ranges.append( d )
return ranges
def btrfs_df_map_to_dic(df_map):
d = {}
hdr = df_map.header
for row in df_map.table:
filepath = row[hdr.index('filepath')]
inode_number = row[hdr.index('inode_number')]
d[str(inode_number)] = filepath
return d
def btrfs_convert_rawext_to_ext(df_rawext, df_chunk, df_map):
#print df_rawext.toStr()
#print df_chunk.toStr()
#print df_map.toStr()
dic_map = btrfs_df_map_to_dic(df_map)
hdr = df_rawext.header
devices = set()
df_ext = dataframe.DataFrame()
df_ext.header = ['Level_index',
'Max_level',
'Entry_index',
'N_Entry',
'Virtual_start',
'Logical_start',
'Logical_end',
'Physical_start',
'Physical_end',
'Length',
'Flag',
'filepath']
for row in df_rawext.table:
rowdic = {}
for col in hdr:
rowdic[col] = row[hdr.index(col)]
#print rowdic
phy_starts = btrfs_db_parser.virtual_to_physical( rowdic['Virtual_start'], df_chunk )
for stripe in phy_starts:
devices.add( stripe['devid'] )
assert len(devices) == 1, 'we only allow one device at this time'
rowdic['Physical_start'] = stripe['physical_addr']
rowdic['Physical_end'] = stripe['physical_addr'] + \
int( rowdic['Length'] )
rowdic['Logical_end'] = int(rowdic['Logical_start']) + \
int( rowdic['Length'] )
rowdic['Level_index'] = 0
rowdic['Max_level'] = 0
rowdic['Entry_index'] = 0
rowdic['N_Entry'] = 0
rowdic['filepath'] = dic_map[str( rowdic['inode_number'] )]
rowdic['Flag'] = "NA"
df_ext.addRowByDict( rowdic )
return df_ext
def extlist_translate_new_format(df_ext):
"""
Use ending of file and new unit(byte)
Only df_ext of ext4 and xfs need this, btrfs already
uses byte as unit.
But does btrfs use the new style of ending?
"""
df_ext = extlist_lastblock_to_nextblock(df_ext)
df_ext = extlist_block_to_byte(df_ext)
return df_ext
def extlist_lastblock_to_nextblock(df_ext):
"""
for ext4 and xfs, the Logical_end and Physical_end point
to the last block of the file. This is not convenient when
we translate the unit from block to byte.
so in this function, we shift the _end to point to the
next block of the file (out of the file), kind of like
the .end() of iterator in C++.
For example, it was 8,8 for a file, indicating, the first
and the last block of the file is 8.
After the translating of this file, it is 8,9.
"""
colnames = ['Logical_end', 'Physical_end']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) + 1
row[hdr.index(col)] = x
return df_ext
def extlist_block_to_byte(df_ext):
"""
Translate the unit from block to byte for extent list
Translated:
Logical_start Logical_end Physical_start Physical_end
This function should be used as soon as the df_ext is created
so all the later functions that use this df_ext can treat it
as byte.
"""
BLOCKSIZE = 4096
colnames = ['Logical_start', 'Logical_end',
'Physical_start', 'Physical_end', 'Length']
hdr = df_ext.header
for row in df_ext.table:
for col in colnames:
x = row[hdr.index(col)]
if x != 'NA':
x = int(x) * BLOCKSIZE
row[hdr.index(col)] = x
return df_ext
def get_num_ext_from_extent_list(df_ext, filename):
"Get number of extents"
hdr = df_ext.header
cnt = 0
for row in df_ext.table:
if filename == os.path.basename(row[hdr.index('filepath')]) and \
row[hdr.index('Level_index')] != '-1':
cnt += 1
return cnt
def get_paths_in_df(df_ext):
hdr = df_ext.header
paths = set()
for row in df_ext.table:
paths.add( row[hdr.index('filepath')] )
return list(paths)
def get_d_span_from_extent_list(df_ext, filepath):
hdr = df_ext.header
byte_max = -1
byte_min = float('Inf')
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
mmin = min(physical_start, physical_end)
mmax = max(physical_start, physical_end)
if mmin < byte_min:
byte_min = mmin
if mmax > byte_max:
byte_max = mmax
if byte_max == -1:
# no extent found
return 'NA'
else:
return byte_max - byte_min
def get_distant_sum_from_extent_list(df_ext, filepath):
hdr = df_ext.header
extlist = []
for row in df_ext.table:
if filepath in row[hdr.index('filepath')] and \
row[hdr.index('Level_index')] != '-1' and \
row[hdr.index('Level_index')] == row[hdr.index('Max_level')]:
#print row
physical_start = int(row[hdr.index('Physical_start')])
physical_end = int(row[hdr.index('Physical_end')])
d = {
'off': physical_start,
'len': physical_end - physical_start
}
extlist.append( d )
distsum = get_distant_sum( extlist )
return distsum
def stat_a_file(filepath):
filepath = os.path.join(filepath)
cmd = ["stat", filepath]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE)
output = proc.communicate()[0] # communicate() uses limited buffer
lines = output.strip()
lines = lines.split('\n')
stat_dict = {}
for line in lines:
#print line
if not "Inode" in line:
continue
mo = re.search( r'Inode:\s(\d+)', line, re.M)
if mo:
#print mo.group(1)
inode_number = mo.group(1)
stat_dict['inode_number'] = inode_number
return stat_dict
def get_all_paths(mountpoint, dir):
"it returns paths of all files and diretories"
paths = []
with cd(mountpoint):
cmd = ['find', dir]
proc = subprocess.Popen(cmd, stdout = subprocess.PIPE)
for line in proc.stdout:
paths.append(line.replace("\n", ""))
proc.wait()
return paths
def isfilefrag_ext_line(line):
if 'Filesystem' in line or \
'blocksize' in line or \
('logical' in line and 'length' in line) or\
('extent' in line and 'found' in line):
return False
else:
return True
def filefrag(filepath):
cmd = ["filefrag", "-sv", filepath]
#print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
df_ext = dataframe.DataFrame()
header = ["Level_index", "Max_level",
"Entry_index", "N_Entry",
"Logical_start", "Logical_end",
"Physical_start", "Physical_end",
"Length", "Flag", "filepath"]
df_ext.header = header
#ext logical physical expected length flags
#0 0 1545 12 merged
for line in proc.stdout:
if isfilefrag_ext_line(line):
items = line.split()
# it is 4 because there might be some line without
# both expected and flags
assert len(items) >= 4, line
if len(items) == 5 or len(items) == 4:
items.insert(3, -1)
#print items
d = {
'Level_index': 0,
'Max_level' : 0,
'Entry_index': int(items[0]),
'N_Entry' : 'NA',
'Logical_start': int(items[1]),
'Logical_end': int(items[1]) + int(items[4]),
'Physical_start': int(items[2]),
'Physical_end': int(items[2]) + int(items[4]),
'Length' : int(items[4]),
'Flag' : 'NA',
'filepath' : filepath
}
df_ext.addRowByDict(d)
#pprint.pprint(d)
#print df_ext.toStr()
proc.wait()
return df_ext
def get_possible_cpu():
f = open("/sys/devices/system/cpu/possible", 'r')
line = f.readline()
f.close()
return line.strip()
def get_available_cpu_dirs():
"Counting dirs is more accurate than */cpu/possible, at least on emulab"
cpudirs = [name for name in glob.glob("/sys/devices/system/cpu/cpu[0-9]*") \
if os.path.isdir(name)]
return cpudirs
def get_online_cpuids():
with open('/sys/devices/system/cpu/online', 'r') as f:
line = f.readline().strip()
# assuming format of 0-2,4,6-63
items = line.split(',')
cpus = []
for item in items:
if '-' in item:
a,b = item.split('-')
a = int(a)
b = int(b)
cpus.extend(range(a, b+1))
else:
cpus.append(int(item))
return cpus
def switch_cpu(cpuid, mode):
path = "/sys/devices/system/cpu/cpu{cpuid}/online"
path = path.format(cpuid=cpuid)
modedict = {'ON':'1', 'OFF':'0'}
f = open(path, 'w')
f.write(modedict[mode])
f.flush()
f.close()
return
| gpl-2.0 | 4,359,682,231,986,908,700 | 33.280062 | 95 | 0.512707 | false |
SanketDG/coala-bears | tests/LocalBearTestHelper.py | 1 | 8822 | import collections
import queue
import unittest
from contextlib import contextmanager
import pytest
from tests.BearTestHelper import generate_skip_decorator
from coalib.bears.LocalBear import LocalBear
from coalib.misc.ContextManagers import prepare_file
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
@contextmanager
def execute_bear(bear, *args, **kwargs):
try:
bear_output_generator = bear.execute(*args, **kwargs)
assert bear_output_generator is not None, \
"Bear returned None on execution\n"
yield bear_output_generator
except Exception as err:
msg = []
while not bear.message_queue.empty():
msg.append(bear.message_queue.get().message)
raise AssertionError(str(err) + " \n" + "\n".join(msg))
return list(bear_output_generator)
class LocalBearTestHelper(unittest.TestCase): # pragma: no cover
"""
This is a helper class for simplification of testing of local bears.
Please note that all abstraction will prepare the lines so you don't need
to do that if you use them.
If you miss some methods, get in contact with us, we'll be happy to help!
"""
def check_validity(self,
local_bear,
lines,
filename=None,
valid=True,
force_linebreaks=True,
create_tempfile=True,
tempfile_kwargs={}):
"""
Asserts that a check of the given lines with the given local bear
either yields or does not yield any results.
:param local_bear: The local bear to check with.
:param lines: The lines to check. (List of strings)
:param filename: The filename, if it matters.
:param valid: Whether the lines are valid or not.
:param force_linebreaks: Whether to append newlines at each line
if needed. (Bears expect a \\n for every line)
:param create_tempfile: Whether to save lines in tempfile if needed.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
"""
assert isinstance(self, unittest.TestCase)
self.assertIsInstance(local_bear,
LocalBear,
msg="The given bear is not a local bear.")
self.assertIsInstance(lines,
(list, tuple),
msg="The given lines are not a list.")
with prepare_file(lines, filename,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs) as (file, fname), \
execute_bear(local_bear, fname, file) as bear_output:
if valid:
msg = ("The local bear '{}' yields a result although it "
"shouldn't.".format(local_bear.__class__.__name__))
self.assertEqual(bear_output, [], msg=msg)
else:
msg = ("The local bear '{}' yields no result although it "
"should.".format(local_bear.__class__.__name__))
self.assertNotEqual(len(bear_output), 0, msg=msg)
return bear_output
def check_results(self,
local_bear,
lines,
results,
filename=None,
check_order=False,
force_linebreaks=True,
create_tempfile=True,
tempfile_kwargs={}):
"""
Asserts that a check of the given lines with the given local bear does
yield exactly the given results.
:param local_bear: The local bear to check with.
:param lines: The lines to check. (List of strings)
:param results: The expected list of results.
:param filename: The filename, if it matters.
:param force_linebreaks: Whether to append newlines at each line
if needed. (Bears expect a \\n for every line)
:param create_tempfile: Whether to save lines in tempfile if needed.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp().
"""
assert isinstance(self, unittest.TestCase)
self.assertIsInstance(local_bear,
LocalBear,
msg="The given bear is not a local bear.")
self.assertIsInstance(lines,
(list, tuple),
msg="The given lines are not a list.")
self.assertIsInstance(results,
list,
msg="The given results are not a list.")
with prepare_file(lines, filename,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs) as (file, fname), \
execute_bear(local_bear, fname, file) as bear_output:
msg = ("The local bear '{}' doesn't yield the right results. Or "
"the order may be wrong."
.format(local_bear.__class__.__name__))
if not check_order:
self.assertEqual(sorted(bear_output), sorted(results), msg=msg)
else:
self.assertEqual(bear_output, results, msg=msg)
def verify_local_bear(bear,
valid_files,
invalid_files,
filename=None,
settings={},
force_linebreaks=True,
create_tempfile=True,
timeout=None,
tempfile_kwargs={}):
"""
Generates a test for a local bear by checking the given valid and invalid
file contents. Simply use it on your module level like:
YourTestName = verify_local_bear(YourBear, (['valid line'],),
(['invalid line'],))
:param bear: The Bear class to test.
:param valid_files: An iterable of files as a string list that won't
yield results.
:param invalid_files: An iterable of files as a string list that must
yield results.
:param filename: The filename to use for valid and invalid files.
:param settings: A dictionary of keys and values (both string) from
which settings will be created that will be made
available for the tested bear.
:param force_linebreaks: Whether to append newlines at each line
if needed. (Bears expect a \\n for every line)
:param create_tempfile: Whether to save lines in tempfile if needed.
:param timeout: The total time to run the test for.
:param tempfile_kwargs: Kwargs passed to tempfile.mkstemp() if tempfile
needs to be created.
:return: A unittest.TestCase object.
"""
@pytest.mark.timeout(timeout)
@generate_skip_decorator(bear)
class LocalBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('name')
self.uut = bear(self.section,
queue.Queue())
for name, value in settings.items():
self.section.append(Setting(name, value))
def test_valid_files(self):
self.assertIsInstance(valid_files, (list, tuple))
for file in valid_files:
self.check_validity(self.uut,
file.splitlines(keepends=True),
filename,
valid=True,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs)
def test_invalid_files(self):
self.assertIsInstance(invalid_files, (list, tuple))
for file in invalid_files:
self.check_validity(self.uut,
file.splitlines(keepends=True),
filename,
valid=False,
force_linebreaks=force_linebreaks,
create_tempfile=create_tempfile,
tempfile_kwargs=tempfile_kwargs)
return LocalBearTest
| agpl-3.0 | 7,346,078,234,706,954,000 | 43.781726 | 79 | 0.530605 | false |
commentedit/commented.it | isso/tests/test_db.py | 1 | 3708 | # -*- encoding: utf-8 -*-
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import sqlite3
import tempfile
from isso import config
from isso.db import SQLite3
from isso.compat import iteritems
class TestDBMigration(unittest.TestCase):
def setUp(self):
fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.unlink(self.path)
def test_defaults(self):
conf = config.new({
"general": {
"dbpath": "/dev/null",
"max-age": "1h"
}
})
db = SQLite3(self.path, conf)
self.assertEqual(db.version, SQLite3.MAX_VERSION)
self.assertTrue(db.preferences.get("session-key", "").isalnum())
def test_session_key_migration(self):
conf = config.new({
"general": {
"dbpath": "/dev/null",
"max-age": "1h"
}
})
conf.set("general", "session-key", "supersecretkey")
with sqlite3.connect(self.path) as con:
con.execute("PRAGMA user_version = 1")
con.execute("CREATE TABLE threads (id INTEGER PRIMARY KEY)")
db = SQLite3(self.path, conf)
self.assertEqual(db.version, SQLite3.MAX_VERSION)
self.assertEqual(db.preferences.get("session-key"),
conf.get("general", "session-key"))
# try again, now with the session-key removed from our conf
conf.remove_option("general", "session-key")
db = SQLite3(self.path, conf)
self.assertEqual(db.version, SQLite3.MAX_VERSION)
self.assertEqual(db.preferences.get("session-key"),
"supersecretkey")
def test_limit_nested_comments(self):
tree = {
1: None,
2: None,
3: 2,
4: 3,
7: 3,
5: 2,
6: None
}
with sqlite3.connect(self.path) as con:
con.execute("PRAGMA user_version = 2")
con.execute("CREATE TABLE threads ("
" id INTEGER PRIMARY KEY,"
" uri VARCHAR UNIQUE,"
" title VARCHAR)")
con.execute("CREATE TABLE comments ("
" tid REFERENCES threads(id),"
" id INTEGER PRIMARY KEY,"
" parent INTEGER,"
" created FLOAT NOT NULL, modified FLOAT,"
" block VARCHAR, edit VARCHAR,"
" text VARCHAR, email VARCHAR, website VARCHAR,"
" mode INTEGER,"
" remote_addr VARCHAR,"
" likes INTEGER DEFAULT 0,"
" voters BLOB)")
con.execute("INSERT INTO threads (uri, title) VALUES (?, ?)", ("/", "Test"))
for (id, parent) in iteritems(tree):
con.execute("INSERT INTO comments ("
" tid, parent, created)"
"VALUEs (?, ?, ?)", (id, parent, id))
conf = config.new({
"general": {
"dbpath": "/dev/null",
"max-age": "1h"
}
})
SQLite3(self.path, conf)
flattened = [
(1, None),
(2, None),
(3, 2),
(4, 2),
(5, 2),
(6, None),
(7, 2)
]
with sqlite3.connect(self.path) as con:
rv = con.execute("SELECT id, parent FROM comments ORDER BY created").fetchall()
self.assertEqual(flattened, rv)
| mpl-2.0 | -1,949,383,929,266,403,300 | 29.146341 | 91 | 0.472762 | false |
LettError/filibuster | Lib/filibuster/titlecase.py | 1 | 9269 | # -*- coding: UTF-8 -*-
"""
titlecase.py v0.2
Original Perl version by: John Gruber http://daringfireball.net/ 10 May 2008
Python version by Stuart Colville http://muffinresearch.co.uk
License: http://www.opensource.org/licenses/mit-license.php
"""
import unittest
import sys
import re
SMALL = 'a|an|and|as|at|but|by|en|for|if|in|of|on|or|the|to|v\.?|via|vs\.?'
PUNCT = "[!\"#$%&'‘()*+,-./:;?@[\\\\\\]_`{|}~]"
SMALL_WORDS = re.compile(r'^(%s)$' % SMALL, re.I)
INLINE_PERIOD = re.compile(r'[a-zA-Z][.][a-zA-Z]')
UC_ELSEWHERE = re.compile(r'%s*?[a-zA-Z]+[A-Z]+?' % PUNCT)
CAPFIRST = re.compile(r"^%s*?([A-Za-z])" % PUNCT)
SMALL_FIRST = re.compile(r'^(%s*)(%s)\b' % (PUNCT, SMALL), re.I)
SMALL_LAST = re.compile(r'\b(%s)%s?$' % (SMALL, PUNCT), re.I)
SUBPHRASE = re.compile(r'([:.;?!][ ])(%s)' % SMALL)
def titlecase(text):
"""
Titlecases input text
This filter changes all words to Title Caps, and attempts to be clever
about *un*capitalizing SMALL words like a/an/the in the input.
The list of "SMALL words" which are not capped comes from
the New York Times Manual of Style, plus 'vs' and 'v'.
"""
words = re.split('\s', text)
line = []
for word in words:
if INLINE_PERIOD.search(word) or UC_ELSEWHERE.match(word):
line.append(word)
continue
if SMALL_WORDS.match(word):
line.append(word.lower())
continue
line.append(CAPFIRST.sub(lambda m: m.group(0).upper(), word))
line = " ".join(line)
line = SMALL_FIRST.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), line)
line = SMALL_LAST.sub(lambda m: m.group(0).capitalize(), line)
line = SUBPHRASE.sub(lambda m: '%s%s' % (
m.group(1),
m.group(2).capitalize()
), line)
return line
class TitlecaseTests(unittest.TestCase):
"""Tests to ensure titlecase follows all of the rules"""
def test_q_and_a(self):
u"""Testing: Q&A With Steve Jobs: 'That’s What Happens In Technology' """
text = titlecase(
u"Q&A with steve jobs: 'that’s what happens in technology'"
)
result = u"Q&A With Steve Jobs: 'That’s What Happens in Technology'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_at_and_t(self):
u"""Testing: What Is AT&T's Problem?"""
text = titlecase(u"What is AT&T’s problem?")
result = u"What Is AT&T’s Problem?"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_apple_deal(self):
"""Testing: Apple Deal With AT&T Falls Through"""
text = titlecase("Apple deal with AT&T falls through")
result = "Apple Deal With AT&T Falls Through"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_v_that(self):
"""Testing: this v that"""
text = titlecase("this v that")
result = "This v That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_v_that2(self):
"""Testing: this v. that"""
text = titlecase("this v. that")
result = "This v. That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_vs_that(self):
"""Testing: this vs that"""
text = titlecase("this vs that")
result = "This vs That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_this_vs_that2(self):
"""Testing: this vs. that"""
text = titlecase("this vs. that")
result = "This vs. That"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_apple_sec(self):
u"""Testing: The SEC’s Apple Probe: What You Need to Know"""
text = titlecase("The SEC’s Apple Probe: What You Need to Know")
result = u"The SEC’s Apple Probe: What You Need to Know"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_quoted(self):
"""Testing: 'by the Way, Small word at the start but within quotes.'"""
text = titlecase(
"'by the Way, small word at the start but within quotes.'"
)
result = "'By the Way, Small Word at the Start but Within Quotes.'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_end(self):
"""Testing: Small word at end is nothing to be afraid of"""
text = titlecase("Small word at end is nothing to be afraid of")
result = "Small Word at End Is Nothing to Be Afraid Of"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_sub_phrase_small_word(self):
"""Testing: Starting Sub-Phrase With a Small Word: a Trick, Perhaps?"""
text = titlecase(
"Starting Sub-Phrase With a Small Word: a Trick, Perhaps?"
)
result = "Starting Sub-Phrase With a Small Word: A Trick, Perhaps?"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_quotes(self):
"""Testing: Sub-Phrase With a Small Word in Quotes: 'a Trick..."""
text = titlecase(
"Sub-Phrase With a Small Word in Quotes: 'a Trick, Perhaps?'"
)
result = "Sub-Phrase With a Small Word in Quotes: 'A Trick, Perhaps?'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_small_word_double_quotes(self):
"""Testing: Sub-Phrase With a Small Word in Quotes: \"a Trick..."""
text = titlecase(
'Sub-Phrase With a Small Word in Quotes: "a Trick, Perhaps?"'
)
result = 'Sub-Phrase With a Small Word in Quotes: "A Trick, Perhaps?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_nothing_to_be_afraid_of(self):
"""Testing: \"Nothing to Be Afraid of?\""""
text = titlecase('"Nothing to Be Afraid of?"')
result = '"Nothing to Be Afraid Of?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_nothing_to_be_afraid_of2(self):
"""Testing: \"Nothing to Be Afraid Of?\""""
text = titlecase('"Nothing to be Afraid Of?"')
result = '"Nothing to Be Afraid Of?"'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_a_thing(self):
"""Testing: a thing"""
text = titlecase('a thing')
result = 'A Thing'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_vapourware(self):
"""Testing: 2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"""
text = titlecase(
"2lmc Spool: 'gruber on OmniFocus and vapo(u)rware'"
)
result = "2lmc Spool: 'Gruber on OmniFocus and Vapo(u)rware'"
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_domains(self):
"""Testing: this is just an example.com"""
text = titlecase('this is just an example.com')
result = 'This Is Just an example.com'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_domains2(self):
"""Testing: this is something listed on an del.icio.us"""
text = titlecase('this is something listed on del.icio.us')
result = 'This Is Something Listed on del.icio.us'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_itunes(self):
"""Testing: iTunes should be unmolested"""
text = titlecase('iTunes should be unmolested')
result = 'iTunes Should Be Unmolested'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_thoughts_on_music(self):
u"""Testing: Reading Between the Lines of Steve Jobs’s..."""
text = titlecase(
u'Reading between the lines of steve jobs’s ‘thoughts on music’'
)
result = u'Reading Between the Lines of Steve Jobs’s ‘Thoughts on Music’'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_repair_perms(self):
u"""Testing: Seriously, ‘Repair Permissions’ Is Voodoo"""
text = titlecase(u'seriously, ‘repair permissions’ is voodoo')
result = u'Seriously, ‘Repair Permissions’ Is Voodoo'
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
def test_generalissimo(self):
"""Testing: Generalissimo Francisco Franco..."""
text = titlecase(
'generalissimo francisco franco: still dead; kieren McCarthy: '\
'still a jackass'
)
result = u"""Generalissimo Francisco Franco: Still Dead; Kieren McCarthy: Still a Jackass."""
self.assertEqual(text, result, "%s should be: %s" % (text, result, ))
if __name__ == '__main__':
if not sys.stdin.isatty():
for line in sys.stdin:
print(titlecase(line))
else:
suite = unittest.TestLoader().loadTestsFromTestCase(TitlecaseTests)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit | 5,829,125,291,178,365,000 | 35.752988 | 101 | 0.590786 | false |
isaachenrion/jets | src/proteins/train/validation.py | 1 | 1461 | import logging
import time
import torch
from src.data_ops.wrapping import unwrap
from ..loss import loss
def half_and_half(a,b):
a = torch.stack([torch.triu(x) for x in a], 0)
b = torch.stack([torch.tril(x, diagonal=-1) for x in b], 0)
return a + b
def validation(model, data_loader):
t_valid = time.time()
model.eval()
valid_loss = 0.
yy, yy_pred = [], []
half = []
mask = []
hard_pred = []
for i, batch in enumerate(data_loader):
(x, y, y_mask, batch_mask) = batch
y_pred = model(x, mask=batch_mask)
vl = loss(y_pred, y, y_mask, batch_mask)
valid_loss = valid_loss + float(unwrap(vl))
yy.append(unwrap(y))
yy_pred.append(unwrap(y_pred))
mask.append(unwrap(batch_mask))
half.append(unwrap(half_and_half(y, y_pred)))
hard_pred.append(unwrap(half_and_half(y, (y_pred > 0.5).float())))
del y; del y_pred; del y_mask; del x; del batch_mask; del batch
valid_loss /= len(data_loader)
#grads = torch.cat([p.grad.view(-1) for p in model.parameters() if p.grad is not None], 0)
logdict = dict(
yy=yy,
yy_pred=yy_pred,
half=half,
hard_pred=hard_pred,
mask=mask,
valid_loss=valid_loss,
model=model,
#grads=grads,
)
model.train()
t1=time.time()
logging.info("Validation took {:.1f} seconds".format(time.time() - t_valid))
return logdict
| bsd-3-clause | -3,322,496,645,558,040,600 | 23.762712 | 94 | 0.578371 | false |
byteface/sing | core/PyPal.py | 1 | 16532 | """
PyPal.py
@author: byteface
"""
class PyPal(object):
"""
PyPal is the heart for all pypals :)
"""
# TODO - tell command to pass messages to other pypals. non conflicting. saves having to quit out of current one
# TODO - list commands
# TODO - learn from. quick command to copy commands between pypals may be useful. save moving between dirs and copying
# memory? - obj with funcitons for loading data etc.
# dictionary that stores object from _meta.json
o = None
# TODO - if current context is gone should be able to go through history
# MULTIPLE CONTEXT OBJECT MAY NEED TO EXISTS. searching for relevant ones is a requirement
context=None
# TODO - should every statement should carry certainty?. for now maybe store number 0-1 on here?
#certainty=0
# TODO third person, you, actor???... you can 'be' another person
#perspective={}
# the natural language processing engine. eventually will live on a brain object
nlp=None # TODO - should be an array
# natural language generation. used for output
nlg=None # TODO - as above
def __init__(self,data):
"""
data param is obj with unique name. i.e {'name':'pypal'}
"""
import json
with open("bin/%s/_meta.json" % data['name']) as json_file:
self.o = json.load(json_file)['object']
# TODO - externalise the class
self.nlp=NLP( self )
# TODO - externalise the class
self.nlg=NLG( self )
#self.context=Context( [self], [self] ) # talk to self
def introduce(self):
"""
introduce - when a pypal is first created this is what it says
"""
self.nlg.say( "Hi my name is %s, Thankyou for creating me!" % self.o['name'] )
self.listen()
def welcome(self):
"""
welcome - whenever you init a pypal
"""
self.nlg.say( "%s I see you have returned!" % self.o['friend'] )
# TODO - display stats?
self.listen()
# TODO - listen should really be an open stream at the moment this is just a friend channel.
# TODO - create channels for pypal>pyal comms
# TODO - event should be created
# TODO - should be having thoughts
def listen(self):
# NOTE - listen currently considers it to be friend who is talking
#self_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/self/' % self.o['name'], self.o['name'] )
#friend_obj = self.nlp.runWordAsFunction( 'bin/%s/brain/perception/physical/animal/human/' % self.o['name'], self.o['friend'] )
#friend_obj={}
try:
# THIS IS A APPARENTLY nix ONLY SOLUTION FOR AUTO PROCESSING
# IT WILL TIME OUT IF NO INPUT RESPONSE AND RUN AUTOMATIONS
# steps towards automation. I looked and using mulitprocessing and thread but non can stop a raw_input
# for now i'm doing this way just as I'm building some content bots and need it sorting
# the timeout for automation
#import signal
#signal.signal(signal.SIGALRM, self.automate)
#signal.alarm(10)
#from threading import Timer
#t=Timer(10,self.automate)
#t.start()
self.nlg.say( "I am listening..." )
import sys
from select import select
# TODO - keys presses should reset the timeout
timeout = 10000 # TODO - add to a pypal config?? - make timeout longer. for testing type automate. have flag/config for autobots?
rlist, _, _ = select([sys.stdin], [], [], timeout)
if rlist:
s = sys.stdin.readline().strip()
self.process(s)
self.listen()
else:
self.nlg.say( "No input. Automating..." ) # TODO - just run as bg proccess
self.automate()
self.listen()
return
# NOTE - DOESNT RUN ANYMORE
# NOTE - old way. preserving here for now until figure this all out
#self.nlg.say( "I am listening..." )
#information = raw_input("> ")
#self.process( information )
# TODO - parralell process for automation whilst listening?
except:
self.nlg.log( "FAIL :::::listen" )
def automate(self,*args,**kwargs):
"""
automate is a super simple task runner
it executes tasks listed in brain/automation/tasks.json
"""
self.nlg.log( "automate" )
try:
# add and run automation.py
path = 'bin/%s/brain/automation' % self.o['name']
import sys
sys.path.append( path )
task_runner = __import__( 'automate' )
task_runner.run(self,path)
except Exception:
self.nlg.log( "AUTOMATE FAIL!" )
# TODO - what when automations are complete?. currently returns to listening
#self.listen()
return
history=[] # TODO - should the history go on the context obj?
## TODO - this should be a HEAR function and should process chunks
# TODO - this is something that would be also good to parrallel process and decide which streams of informtion to listen to or ignore
def process(self,information,caller=None,callee=None):
self.context=Context( self, information ) # FOR NOW JUST FOR STORING PATHS
self.history.append(information)
# update the context object
#self.context=Context( [caller], [callee] )
# bust all into words, squash whitespace
words = information.split(None)
# if its a one letter answer. some helpers/shortcuts
if len(words)==1:
# added a repeat function
if information == 'r':
print self.history[len(self.history)-2]
self.process( self.history[len(self.history)-2] )
return
# show command history
if information == 'h':
for h in history:
print h
return
# TODO - some more 1 key helpers
# 'r' - repeat last command
# 'h' - history
# 'c' - show all available commands in pypal
self.nlp.processOneWord( information )
#self.listen()
return
self.nlp.processSentence( information )
#self.listen()
return
# TODO - need to ask meaning of words. to at least put it into memory for considering
# should also be able to check dictionary / nltk sources. but needs to build a program for the word
def ask_word_meaning(self,word):
self.nlp.say( "What is '%s'?" % word )
answer = raw_input("> ")
# TODO - NO - should probs be processess response
self.nlp.addNewWord( word, answer )
# when the bot is not active could explore data sources
# using a decorator pattern for behaviours on data
# def explore(self):
# TODO - let bot decide/choose which data source to consume
# TODO - can bot find new data sources? from interaction with other bots
# TODO - to begin with will attempt to buid knowledge graph data sets
# from webpages/ relational object maps from text
# can also explore things in the world
# theres various ways of determining what to explore in the world
# TODO - create a discover function?...
# do this by going into unknown. i.e. inventing urls to read.
# figure out how to chain commands?
# how to 'think of something to do'
# def spawn(self):
# def merge(self,pypal):
# the first job of the context object is to store caller, callee information
# Who is talking and who are they talking to
# NOTE / TODO - this may evolve with time
class Context(object):
"""
Context still to be fully defined.
Will hold things like conversation history and caller/callee information and is used to aid comprehension
not just personable but subject context
i.e. if i say show list, then add to list should add to the one ive shown
hmmmm caller callee is perspective and incorreclty stubbed here. probably why i removed
the implementation. unless perspective is an object that also resides in a context object?
NOW GIVES SOME PATH INFO. can access in a command like
o.context.COMMAND_PATH
Also forces app into running 1 command at a time. which is good. as thats how a brain kinda works.
you could probably still spin threads in commands if requried. but we context we have 1 train of thought which is the running command
"""
# both can be lists of animals
caller=None
callee=None
# useful for commands to know where they are loading from
# so can dump/store stuff there directly when scraping etc.
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
BASEPATH=''
# TODO - may move these 3 onto the NLP. and make context more of a caretaker pattern extended to surmise and store sessions.
LAST_COMMAND =''
COMMAND_PATH =''
#PARAMS=''
def __init__(self, parent, command, caller=None,callee=None):
# self.caller=caller
# self.callee=callee
self.BASEPATH = './bin/%s/brain/commands' % parent.o['name']
self.LAST_COMMAND = command
path = '/'.join( self.LAST_COMMAND.split(' ') )
file = '_'.join( self.LAST_COMMAND.split(' ') ) + '.py'
#self.COMMAND_PATH = '%s/%s/%s' % ( self.BASEPATH, path, file )
self.COMMAND_PATH = '%s/%s' % ( self.BASEPATH, path )
#self.PARAMS='' # NOTE - gets updated once string is parsed
class NLP(object):
"""
NLP are processes for word parsing. Generating functions for words.
Essentially a custom module loader
"""
owner=None
# TODO -
#TIME
#PLACE
#BASEPATH = './bin/%s/brain/commands/add/grades/files3' % o.o['name']
def __init__(self,owner):
self.owner=owner
def addNewWord( self, word, answer ):
# TODO - addNewWord. store what friend thinks/says it is
return
def processOneWord( self, word ):
"""
parse single word commands.
basically runs a command from the command folder
"""
#TODO - check that the word is clean
#TODO - see if we know the word
#TODO - deal with inuendo
#TODO - lemmatiser maybe required. or we re-routing manually?
knows_word=False;
c_path = 'bin/%s/brain/commands' % self.owner.o['name']
if self.has_command(c_path+"/"+word+"/"+word+".py"):
self.owner.nlg.log( "command detected" )
knows_word=True
return self.runWordAsFunction( c_path, word )
if knows_word == False:
self.owner.nlg.say( "I don't yet know that word" )
# now that we know input function we can run it..
# TODO - should probably create new problem though and process that way
# TODO - all the meta properties need updating
def runWordAsFunction(self,path,word):
import sys
sys.path.append( "%s/%s" % (path,word) )
try:
# TODO - check and update all the meta props
command_module = __import__( word )
reload(command_module) # reload class without restarting pypal
return command_module.run(self.owner)
except Exception, e:
self.owner.nlg.say( "Sorry, I can't do that, I tried but it didn't work" )
self.owner.nlg.log( "CHECK YOUR VIRUTAL ENVIRONMENT IS RUNNING." )
pass
# TODO - try to find the finite verb
# NOTE - AT THE MOMENT ONLY PROCESSING COMMANDS
def processSentence( self, sentence ):
# print "processSentence"
words = sentence.split(None)
word_count = len(words)
basepath = 'bin/%s/brain/commands' % self.owner.o['name']
word_path_arr=[]
# walk up the sentence
for word in words:
root = basepath+"/"+'/'.join(word_path_arr)
has_path = self.has_path( root +"/"+ word )
# if next word is the last word. check for a command and run it without params.
if (len(word_path_arr)+1)==word_count:
path = root+"/"+word
function = '_'.join( word_path_arr ) + "_" + word
if self.has_command(path+"/"+function+".py"):
return self.runSentenceAsFunction( path, function )
# if nowhere to go. but there's a command at current path. run it and pass the rest as param
if (False==has_path):
function = '_'.join( word_path_arr )
if self.has_command(root+"/"+function+".py"):
# get params by removing where we were up to
params = sentence.replace( ' '.join( word_path_arr ), '' )
# REMOVE THE WHITE SPACE FROM START OF PARAMS
params = params[1:]
# TODO - note. i see i built up to path to strip param. problem here is param is on the command_path. and doesn't get parsed off until here. during execution.
# TODO - will have a rethink about how want context to work before changing this. so for now will operate on the context obj here
# TODO - when doing change, nlp ref should probs get given to context. or context keeps them all in array.
self.owner.context.COMMAND_PATH = self.owner.context.COMMAND_PATH.replace( params, '' )
#self.owner.context.PARAMS = params
# TODO - throw error if no param is passed
if params == None or params == '':
print 'ERROR:parameter expected. none recieved'
# run the function
return self.runSentenceAsFunction( root, function, params )
else:
break
word_path_arr.append(word)
# TODO - if no command, attempt gnerating reponse from the self compiled programs.
# TODO - integrate memory, world states, schemas and emotions
# A LAD is a KAD : cognitive learning
return self.owner.nlg.say( "No command found" )
# params at the moment are 'rest of string'
# long term might break around finite verb and pass whole string?
def runSentenceAsFunction(self,path,function,params=None):
#print "runSentenceAsFunction"
#print path, function, params
import sys
sys.path.append( path )
try:
# TODO - check all the meta props
# TODO - may need to also write to some of the meta
# TODO - if no meta create a default one
command_module = __import__( function )
reload(command_module) # reload class without restarting pypal
if(params!=None):
return command_module.run(self.owner,params)
else:
return command_module.run(self.owner)
pass
except Exception, e:
self.owner.nlg.log( "runSentenceAsFunction FAIL!! \
\n happens when : \
\n failing code in the command. i.e imports used by the command not intalled \
\n venv not running \
\n not passing params when required" )
return False
#self.owner.listen()
pass
# run several possibilities. decide which is most relevant?
# the listener as to suppose an ontological truth in each word as they hear it
# when that doesn't happen even over sets of words things have to be considered
# and find more context or information. even lead to questioning
def suppose():
pass
## ---------------------------- NLP LANGUGAGE UTILS -----------------------------------
# check a lookup table of yes words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is negative, it could be neutral
def is_string_positive( s ):
pass
# check a lookup table of no words. program needs to be able to expand that list
# TODO - if one word use lookup, else use NLTK sentimement tool
# NOTE - false DOES NOT MEAN it is positive, it could be neutral
def is_string_negative( s ):
pass
# check a lookup table of
# TODO - building lookup tables on the fly is something we need to do
# RETURN THE NUMBER OR WORD FALSE
def is_string_number( s ):
# TODO - check if NLTK can do this
pass
def is_math_operator():
# TODO - check if NLTK can do this
pass
## ---------------------------- NLP FILE UTILS -----------------------------------
# TODO - may get rid of this lookup and have root words as delegators
def hasParams( self, path, word ):
"""
check if parameters True
"""
try:
#TODO - shoud just check if folder has param folder
import Program
program = Program.Program( path, word );
canHasParams = program.meta.get_property( 'rules', 'parameters' );
return canHasParams
except:
print "no meta or param found"
return False # force false if passing a non command. TODO- BUT. we shouldn't be calling if the case.
def has_path( self, path_to_directory ):
import os.path
return os.path.isdir(path_to_directory)
def has_command(self, path_to_py_file):
import os.path
return os.path.isfile(path_to_py_file)
class NLG(object):
"""
NLG - generates sentences in the natural language
at moment just logs strings to output.
from now on all output should come through here
"""
owner=None
def __init__(self,owner):
self.owner=owner
def say( self, words ):
"""
output helps distinguish pypals when in the console
"""
print "%s : %s" % ( self.owner.o['name'], words )
return
# TODO - setup python logger
# TODO - pass ref to pypal?
# TODO - logs should write to a file and be accessible by events. i.e. evt12345 - created variable xxx
def log( self, words ):
"""
log differs to the 'say' method.
log should be more about debugging.
say should be user comms
"""
return # NOTE <<<<<<<<<<<<<<<<<<<<<< im not running
# TOOD - if debug is true
import logging
logging.warning( "------------------------------------- %s : %s" % ( self.owner.o['name'], words ) )
return
| gpl-2.0 | 2,327,574,827,839,849,000 | 27.259829 | 163 | 0.67578 | false |
Aioxas/ax-cogs | horoscope/horoscope.py | 1 | 11258 | from discord.ext import commands
from .utils.chat_formatting import box
import aiohttp
import html
import os
import re
try:
from PIL import Image, ImageDraw, ImageFont
PIL = True
except:
PIL = False
class Horoscope:
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command(name="horo", pass_context=True, no_pm=True)
@commands.cooldown(10, 60, commands.BucketType.user)
async def _horoscope(self, ctx, *, sign: str):
"""Retrieves today's horoscope for a zodiac sign.
Works with both signs and birthdays. Make sure to do Month/Day.
Western Zodiac:
Capricorn, Aquarius, Pisces, Aries, Taurus, Gemini, Cancer, Leo,
Virgo, Libra, Scorpio Sagittarius.
For Chinese zodiac, it's chinese signs or year.
Chinese Zodiac:
Ox, Goat, Rat, Snake, Dragon, Tiger, Rabbit, Horse, Monkey,
Rooster, Dog, Pig
Examples: [p]horo love, virgo
[p]horo chinese, rooster
[p]horo daily, virgo
[p]horo whatever, virgo
[p]horo chinese, 1901
[p]horo love, 02/10"""
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
'AppleWebKit/537.36 (KHTML, lnamee Gecko) '
'Chrome/67.0.3396.99 Safari/537.36'}
signs = ["aries", "taurus", "gemini", "cancer", "leo",
"virgo", "libra", "scorpio", "sagittarius", "capricorn",
"aquarius", "pisces"]
chinese_signs = ["ox", "goat", "rat", "snake", "dragon", "tiger",
"rabbit", "horse", "monkey", "rooster", "dog", "pig"]
horo_styles = {"love": "https://www.horoscope.com/us/horoscopes/love/horoscope-love-daily-today.aspx?sign=",
"daily": "https://www.horoscope.com/us/horoscopes/general/horoscope-general-daily-today.aspx?sign=",
"chinese": "http://www.horoscope.com/us/horoscopes/chinese/horoscope-chinese-daily-today.aspx?sign="}
regex = [
"<strong( class=\"date\"|)>([^`]*?)<\/strong> - ([^`]*?)\n"]
subs = "\n\s*"
try:
horos = sign.split(', ')
style = horos[0]
horos.remove(style)
sign = horos[0].lower()
if style == "chinese":
if sign not in chinese_signs:
sign = self.getchinese_signs(int(sign)).lower()
uri = horo_styles[style]
sign_num = str(chinese_signs.index(sign) + 1)
uir = uri + sign_num
async with self.session.get(uir, headers=option) as resp:
test = str(await resp.text())
msg = re.findall(regex[0], test)[0]
msg_content = msg[2].replace("</p>", "")
msg = msg_content + " - " + msg[1]
await self.bot.say("Today's chinese horoscope for the one"
" born in the year of the {} is:\n"
.format(sign) + box(msg))
else:
if style not in horo_styles:
style = "daily"
if sign not in signs:
sign = sign.split("/")
Month = sign[0]
Day = sign[1]
sign = signs[self.getzodiac_signs(Month, Day)]
uri = horo_styles[style]
sign_num = str(signs.index(sign) + 1)
sign = list(sign)
sign[0] = sign[0].upper()
sign = "".join(sign)
uir = uri + sign_num
async with self.session.get(uir, headers=option) as resp:
test = str(await resp.text())
msg = re.findall(regex[0], test)[0]
msg_content = msg[2].replace("</p>", "")
msg = msg_content + " - " + msg[1]
if style == "love":
await self.bot.say("Today's love horoscope for **{}** is:\n"
.format(sign) + box(msg))
else:
await self.bot.say("Today's horoscope for **{}** is:\n"
.format(sign) + box(msg))
except (IndexError, ValueError):
await self.bot.say("Your search is not valid, please follow the "
"examples.\n[p]horo love, virgo\n[p]horo life,"
" pisces\n[p]horo whatever, sagittarius"
"\n[p]horo daily, virgo\n[p]horo chinese,"
" rooster")
def getzodiac_signs(self, Month, Day):
Month = int(Month)
Day = int(Day)
times = [((Month == 12 and Day >= 22)or(Month == 1 and Day <= 19)),
((Month == 1 and Day >= 20)or(Month == 2 and Day <= 17)),
((Month == 2 and Day >= 18)or(Month == 3 and Day <= 19)),
((Month == 3 and Day >= 20)or(Month == 4 and Day <= 19)),
((Month == 4 and Day >= 20)or(Month == 5 and Day <= 20)),
((Month == 5 and Day >= 21)or(Month == 6 and Day <= 20)),
((Month == 6 and Day >= 21)or(Month == 7 and Day <= 22)),
((Month == 7 and Day >= 23)or(Month == 8 and Day <= 22)),
((Month == 8 and Day >= 23)or(Month == 9 and Day <= 22)),
((Month == 9 and Day >= 23)or(Month == 10 and Day <= 22)),
((Month == 10 and Day >= 23)or(Month == 11 and Day <= 21)),
((Month == 11 and Day >= 22)or(Month == 12 and Day <= 21))]
for m in times:
if m:
return times.index(m)
def getchinese_signs(self, year):
czodiac = [(1900, "Rat"), (1901, "Ox"), (1902, "Tiger"),
(1903, "Rabbit"), (1904, "Dragon"), (1905, "Snake"),
(1906, "Horse"), (1907, "Sheep"), (1908, "Monkey"),
(1909, "Rooster"), (1910, "Dog"), (1911, "Pig")]
index = (year - czodiac[0][0]) % 12
return czodiac[index][1]
@commands.command(name="tsujiura", no_pm=True, alias=["senbei"])
@commands.cooldown(10, 60, commands.BucketType.user)
async def _cookie(self):
"""Retrieves a random fortune cookie fortune."""
regex = ["class=\"cookie-link\">([^`]*?)<\/a>", "<p>([^`]*?)<\/p>",
"(?:\\\\['])", "<strong>([^`]*?)<\/strong>",
"<\/strong><\/a>([^`]*?)<br>",
"3\)<\/strong><\/a>([^`]*?)<\/div>"]
url = "http://www.fortunecookiemessage.com"
await self.file_check()
async with self.session.get(url, headers={"encoding": "utf-8"}) as resp:
test = str(await resp.text())
fortune = re.findall(regex[0], test)
fortest = re.match("<p>", fortune[0])
if fortest is not None:
fortune = re.findall(regex[1], fortune[0])
title = re.findall(regex[3], test)
info = re.findall(regex[4], test)
info[0] = html.unescape(info[0])
dailynum = re.findall(regex[5], test)
self.fortune_process(fortune[0])
await self.bot.say("Your fortune is:")
await self.bot.upload("data/horoscope/cookie-edit.png")
await self.bot.say("\n" + title[1] +
info[1] + "\n" + title[2] + dailynum[0])
os.remove("data/horoscope/cookie-edit.png")
async def file_check(self):
urls = ["https://images-2.discordapp.net/.eJwNwcENwyAMAMBdGABDCWCyDSKIoCYxwuZVdff27qPWvNSuTpHBO8DRudA8NAvN3Kp"
"uRO2qeXTWhW7IIrmcd32EwQbjMCRMaJNxPmwILxcRg_9Da_yWYoQ3dV5z6fE09f0BC6EjAw.B0sII_QLbL9kJo6Zbb4GuO4MQNw",
"https://cdn.discordapp.com/attachments/218222973557932032/240223136447070208/FortuneCookieNF.ttf"]
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0)'
' Gecko/20100101 Firefox/40.1'}
if os.path.exists("data/horoscope/cookie.png"):
async with self.session.get(urls[0], headers=option) as resp:
test = await resp.read()
meow = False
with open("data/horoscope/cookie.png", "rb") as e:
if len(test) != len(e.read()):
meow = True
if meow:
with open("data/horoscope/cookie.png", "wb") as f:
f.write(test)
elif not os.path.exists("data/horoscope/cookie.png"):
async with self.session.get(urls[0], headers=option) as resp:
test = await resp.read()
with open("data/horoscope/cookie.png", "wb") as f:
f.write(test)
if not os.path.exists("data/horoscope/FortuneCookieNF.ttf"):
async with self.session.get(urls[1], headers=option) as resp:
test = await resp.read()
with open("data/horoscope/FortuneCookieNF.ttf", "wb") as f:
f.write(test)
@commands.command(name="font", no_pm=True)
@commands.cooldown(10, 60, commands.BucketType.user)
async def _font(self, url: str=None):
"""Allows you to set the font that the fortune cookies are shown in.
Only accepts ttf."""
option = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0)'
' Gecko/20100101 Firefox/40.1'}
if url is None :
url = "https://cdn.discordapp.com/attachments/218222973557932032/240223136447070208/FortuneCookieNF.ttf"
if os.is_file("data/horoscope/FortuneCookieNF.ttf"):
return
else:
async with self.session.get(url, headers=option) as resp:
test = await resp.read()
with open("data/horoscope/FortuneCookieNF.ttf", "wb") as f:
f.write(test)
elif not url.endswith("ttf"):
await self.bot.say("This is not a .ttf font, please use a .ttf font. Thanks")
return
await self.bot.say("Font has been saved")
def fortune_process(self, fortune):
img = Image.open("data/horoscope/cookie.png")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("data/horoscope/FortuneCookieNF.ttf", 15)
line = fortune.split()
sep = " "
line1 = sep.join(line[:5])
line2 = sep.join(line[5:10])
line3 = sep.join(line[10:])
draw.text((134, 165), line1, (0, 0, 0), font=font, align="center")
draw.text((134, 180), line2, (0, 0, 0), font=font, align="center")
draw.text((134, 195), line3, (0, 0, 0), font=font, align="center")
img.save("data/horoscope/cookie-edit.png")
def check_folders():
if not os.path.exists("data/horoscope"):
print("Creating data/horoscope folder...")
os.mkdir("data/horoscope")
def setup(bot):
if PIL:
check_folders()
n = Horoscope(bot)
bot.add_cog(n)
else:
raise RuntimeError("You need to run 'pip3 install Pillow'")
| mit | 6,268,701,126,173,950,000 | 45.908333 | 124 | 0.505596 | false |
gfyoung/numpy | numpy/lib/twodim_base.py | 2 | 27180 | """ Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
import functools
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
from numpy.core import overrides
from numpy.core import iinfo, transpose
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def _flip_dispatcher(m):
return (m,)
@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
order : {'C', 'F'}, optional
Whether the output should be stored in row-major (C-style) or
column-major (Fortran-style) order in memory.
.. versionadded:: 1.14.0
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def _diag_dispatcher(v, k=None):
return (v,)
@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def _trilu_dispatcher(m, k=None):
return (m,)
@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
def _vander_dispatcher(x, N=None, increasing=None):
return (x,)
# Originally borrowed from John Hunter and matplotlib
@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
weights=None, density=None):
return (x, y, bins, weights)
@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
density : bool, optional
If False, the default, returns the number of samples in each bin.
If True, returns the probability *density* function at the bin,
``bin_count / sample_count / bin_area``.
normed : bool, optional
An alias for the density argument that behaves identically. To avoid
confusion with the broken normed argument to `histogram`, `density`
should be preferred.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx+1,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny+1,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(2, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
>>> H = H.T # Let each row list bins with common y range.
:func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
>>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights, density)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return nonzero(tri(n, m, k=k, dtype=bool))
def _trilu_indices_form_dispatcher(arr, k=None):
return (arr,)
@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return nonzero(~tri(n, m, k=k-1, dtype=bool))
@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
| bsd-3-clause | -2,810,006,162,861,620,700 | 26.289157 | 79 | 0.551214 | false |
deepmind/open_spiel | open_spiel/python/egt/alpharank_visualizer_test.py | 1 | 2447 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for open_spiel.python.egt.alpharank_visualizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
# pylint: disable=g-import-not-at-top
import matplotlib
matplotlib.use("agg") # switch backend for testing
import mock
import numpy as np
from open_spiel.python.egt import alpharank
from open_spiel.python.egt import alpharank_visualizer
from open_spiel.python.egt import utils
import pyspiel
class AlpharankVisualizerTest(absltest.TestCase):
@mock.patch("%s.alpharank_visualizer.plt" % __name__)
def test_plot_pi_vs_alpha(self, mock_plt):
# Construct game
game = pyspiel.load_matrix_game("matrix_rps")
payoff_tables = utils.game_payoffs_array(game)
_, payoff_tables = utils.is_symmetric_matrix_game(payoff_tables)
payoffs_are_hpt_format = utils.check_payoffs_are_hpt(payoff_tables)
# Compute alpharank
alpha = 1e2
_, _, pi, num_profiles, num_strats_per_population =\
alpharank.compute(payoff_tables, alpha=alpha)
strat_labels = utils.get_strat_profile_labels(payoff_tables,
payoffs_are_hpt_format)
num_populations = len(payoff_tables)
# Construct synthetic pi-vs-alpha history
pi_list = np.empty((num_profiles, 0))
alpha_list = []
for _ in range(2):
pi_list = np.append(pi_list, np.reshape(pi, (-1, 1)), axis=1)
alpha_list.append(alpha)
# Test plotting code (via pyplot mocking to prevent plot pop-up)
alpharank_visualizer.plot_pi_vs_alpha(
pi_list.T,
alpha_list,
num_populations,
num_strats_per_population,
strat_labels,
num_strats_to_label=0)
self.assertTrue(mock_plt.show.called)
if __name__ == "__main__":
absltest.main()
| apache-2.0 | 1,702,298,411,478,422,000 | 32.520548 | 74 | 0.698406 | false |
zlorenz/synergy | synergy/config/urls.py | 1 | 1532 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$',
'users.views.contact',
name="home"),
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name="about"),
url(r'^personal/$',
TemplateView.as_view(template_name='pages/personal.html'),
name="personal"),
url(r'^business/$',
TemplateView.as_view(template_name='pages/business.html'),
name="business"),
url(r'^professionals/$',
TemplateView.as_view(template_name='pages/professionals.html'),
name="professionals"),
url(r'^clients/$',
TemplateView.as_view(template_name='pages/clients.html'),
name="clients"),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Uncomment the next line to enable avatars
url(r'^avatar/', include('avatar.urls')),
# Your stuff: custom urls go here
url(r'^pages/', include("nupages.urls", namespace="nupages")),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| bsd-3-clause | -5,642,518,087,429,554,000 | 32.304348 | 71 | 0.657311 | false |
ge0rgi/cinder | cinder/tests/unit/objects/test_cluster.py | 1 | 6662 | # Copyright (c) 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
import mock
from oslo_utils import timeutils
from cinder import objects
from cinder.tests.unit import fake_cluster
from cinder.tests.unit import objects as test_objects
from cinder import utils
def _get_filters_sentinel():
return {'session': mock.sentinel.session,
'name_match_level': mock.sentinel.name_match_level,
'read_deleted': mock.sentinel.read_deleted,
'get_services': mock.sentinel.get_services,
'services_summary': mock.sentinel.services_summary,
'name': mock.sentinel.name,
'binary': mock.sentinel.binary,
'is_up': mock.sentinel.is_up,
'disabled': mock.sentinel.disabled,
'disabled_reason': mock.sentinel.disabled_reason,
'race_preventer': mock.sentinel.race_preventer,
'last_heartbeat': mock.sentinel.last_heartbeat,
'num_hosts': mock.sentinel.num_hosts,
'name_match_level': mock.sentinel.name_match_level,
'num_down_hosts': mock.sentinel.num_down_hosts}
@ddt.ddt
class TestCluster(test_objects.BaseObjectsTestCase):
"""Test Cluster Versioned Object methods."""
cluster = fake_cluster.fake_cluster_orm()
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster)
def test_get_by_id(self, cluster_get_mock):
filters = _get_filters_sentinel()
cluster = objects.Cluster.get_by_id(self.context,
mock.sentinel.cluster_id,
**filters)
self.assertIsInstance(cluster, objects.Cluster)
self._compare(self, self.cluster, cluster)
cluster_get_mock.assert_called_once_with(self.context,
mock.sentinel.cluster_id,
**filters)
@mock.patch('cinder.db.sqlalchemy.api.cluster_create',
return_value=cluster)
def test_create(self, cluster_create_mock):
cluster = objects.Cluster(context=self.context, name='cluster_name')
cluster.create()
self.assertEqual(self.cluster.id, cluster.id)
cluster_create_mock.assert_called_once_with(self.context,
{'name': 'cluster_name'})
@mock.patch('cinder.db.sqlalchemy.api.cluster_update',
return_value=cluster)
def test_save(self, cluster_update_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.disabled = True
cluster.save()
cluster_update_mock.assert_called_once_with(self.context, cluster.id,
{'disabled': True})
@mock.patch('cinder.db.sqlalchemy.api.cluster_destroy')
def test_destroy(self, cluster_destroy_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.destroy()
cluster_destroy_mock.assert_called_once_with(mock.ANY, cluster.id)
@mock.patch('cinder.db.sqlalchemy.api.cluster_get', return_value=cluster)
def test_refresh(self, cluster_get_mock):
cluster = fake_cluster.fake_cluster_ovo(self.context)
cluster.refresh()
cluster_get_mock.assert_called_once_with(self.context, cluster.id)
def test_is_up_no_last_hearbeat(self):
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=None)
self.assertFalse(cluster.is_up)
def test_is_up(self):
cluster = fake_cluster.fake_cluster_ovo(
self.context,
last_heartbeat=timeutils.utcnow(with_timezone=True))
self.assertTrue(cluster.is_up)
def test_is_up_limit(self):
limit_expired = (utils.service_expired_time(True) +
timeutils.datetime.timedelta(seconds=1))
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=limit_expired)
self.assertTrue(cluster.is_up)
def test_is_up_down(self):
expired_time = (utils.service_expired_time(True) -
timeutils.datetime.timedelta(seconds=1))
cluster = fake_cluster.fake_cluster_ovo(self.context,
last_heartbeat=expired_time)
self.assertFalse(cluster.is_up)
@ddt.data('1.0', '1.1')
def tests_obj_make_compatible(self, version):
new_fields = {'replication_status': 'error', 'frozen': True,
'active_backend_id': 'replication'}
cluster = objects.Cluster(self.context, **new_fields)
primitive = cluster.obj_to_primitive(version)
converted_cluster = objects.Cluster.obj_from_primitive(primitive)
for key, value in new_fields.items():
if version == '1.0':
self.assertFalse(converted_cluster.obj_attr_is_set(key))
else:
self.assertEqual(value, getattr(converted_cluster, key))
class TestClusterList(test_objects.BaseObjectsTestCase):
"""Test ClusterList Versioned Object methods."""
@mock.patch('cinder.db.sqlalchemy.api.cluster_get_all')
def test_cluster_get_all(self, cluster_get_all_mock):
orm_values = [
fake_cluster.fake_cluster_orm(),
fake_cluster.fake_cluster_orm(id=2, name='cluster_name2'),
]
cluster_get_all_mock.return_value = orm_values
filters = _get_filters_sentinel()
result = objects.ClusterList.get_all(self.context, **filters)
cluster_get_all_mock.assert_called_once_with(
self.context, filters.pop('is_up'), filters.pop('get_services'),
filters.pop('services_summary'), filters.pop('read_deleted'),
filters.pop('name_match_level'), **filters)
self.assertEqual(2, len(result))
for i in range(len(result)):
self.assertIsInstance(result[i], objects.Cluster)
self._compare(self, orm_values[i], result[i])
| apache-2.0 | 2,093,792,070,328,452,900 | 43.119205 | 78 | 0.616632 | false |
qbeenslee/Nepenthes-Server | config/setting.py | 1 | 1657 | # coding:utf-8
'''
设置
Author : qbeenslee
Created : 2014/10/9
'''
import os
# 是否开启测试
DEBUG = False
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
# 数据库设置
# if DEBUG:
# DB_CONFIG = {
# "host": '127.0.0.1',
# "db": 'nepenthes',
# "port": '3140',
# "user": 'test',
# "password": 'abcd',
# }
# else:
# import sae.const
#
# DB_CONFIG = {
# "host": sae.const.MYSQL_HOST,
# "db": sae.const.MYSQL_DB,
# "port": sae.const.MYSQL_PORT,
# "user": sae.const.MYSQL_USER,
# "password": sae.const.MYSQL_PASS,
# }
DB_CONFIG = {
"host": '127.0.0.1',
"db": 'nepenthes',
"port": '3306',
"user": 'test',
"password": 'abcdef',
}
# 数据库连接
DB_CONNECT_STRING = 'mysql://' + DB_CONFIG['user'] + ':' + DB_CONFIG['password'] + '@' + DB_CONFIG['host'] + ':' + \
DB_CONFIG['port'] + '/' + DB_CONFIG['db'] + '?charset=utf8'
settings = {
'cookie_secret': 'WJyZi+hkyLTMS0X3yVHn6SzaFrY0jscNRCN6aXBIUaTCZhC',
'debug': DEBUG,
'static_path': os.path.join(BASE_PATH, 'static'),
}
# 密码重试次数
PWD_ERROR_TIME = 10
# 密码有效时长(单位:s 时长:7天)
PWD_HOLD_TIME_DEFAULT = 604800.0
PWD_HOLD_TIME_SHORT = 36000.0
# 密码迭代次数区间
PWD_ITERATION_INTERVAL = {'MIN': 11, 'MAX': 99}
# 发信邮箱账号
OFFICE_EMAIL_COUNT = r'****'
OFFICE_EMAIL_SMTPSERVER = r'****'
OFFICE_EMAIL_NAME_TITLE = r'****'
OFFICE_EMAIL_PASSWORD = r'****'
UPLOAD_PATH = os.path.join(BASE_PATH, 'static/upload/')
if __name__ == '__main__':
print BASE_PATH
print UPLOAD_PATH
| gpl-3.0 | 9,091,684,605,637,006,000 | 18.246753 | 116 | 0.560616 | false |
greyshell/Pen-Test | leetcode/factorial.py | 1 | 1129 | #!/usr/bin/python
# author: greyshell
"""
[+] problem description
=======================
find the factorial of a number
1) recursive two_sum
2) tail recursive two_sum
[+] reference
=============
TBD
"""
def tail_recursion_driver(n):
"""
tail recursive two_sum
:param n: int
:return: int
"""
return factorial_tail_recursion(n, 1) # 1 is used to start the first accumulation
def factorial_tail_recursion(n, a):
"""
better than normal recursion as it could be optimized by the compiler by not saving the current stack frame
:param n: int
:param a: int => it accumulates the result
:return: int
"""
if n == 1 or n == 0:
return a # it carries the final result
else:
return factorial_tail_recursion(n - 1, n * a)
def factorial(n):
"""
normal recursive two_sum
:return: int
"""
if n == 1 or n == 0: # base case for n = 0, 1
return 1
else: # recursive case when n > 1
return n * factorial(n - 1)
def main():
print tail_recursion_driver(12)
print factorial(0)
if __name__ == '__main__':
main()
| mit | 5,056,837,605,008,551,000 | 18.135593 | 111 | 0.581045 | false |
timothyclemansinsea/smc | src/k8s/smc-hub/control.py | 1 | 9152 | #!/usr/bin/env python3
"""
Hub management script
"""
import os, shutil, sys, tempfile
join = os.path.join
# Boilerplate to ensure we are in the directory fo this path and make the util module available.
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
sys.path.insert(0, os.path.abspath(os.path.join(SCRIPT_PATH, '..', 'util')))
os.chdir(SCRIPT_PATH)
import util
# For now in all cases, we just call the container the following; really it should
# maybe be smc-webapp-static#sha1hash, which makes switching between versions easy, etc.
NAME='smc-hub'
SECRETS = os.path.abspath(join(SCRIPT_PATH, '..', '..', 'data', 'secrets'))
def build(tag, rebuild, upgrade=False, commit=None):
"""
Build Docker container by installing and building everything inside the container itself, and
NOT using ../../static/ on host.
"""
# First build smc-hub-base, which is generic install of ubuntu packages, so we should rarely
# clear the cache for this.
v = ['sudo', 'docker', 'build', '-t', '{name}-base'.format(name=NAME)]
if upgrade:
v.append("--no-cache")
v.append(".")
util.run(v, path=join(SCRIPT_PATH, 'image-base'))
# Next build smc-hub, which depends on smc-hub-base.
v = ['sudo', 'docker', 'build', '-t', tag]
if commit:
v.append("--build-arg")
v.append("commit={commit}".format(commit=commit))
if rebuild: # will cause a git pull to happen
v.append("--no-cache")
v.append('.')
util.run(v, path=join(SCRIPT_PATH,'image'))
def build_docker(args):
if args.commit:
args.tag += ('-' if args.tag else '') + args.commit[:6]
tag = util.get_tag(args, NAME)
build(tag, args.rebuild, args.upgrade, args.commit)
if not args.local:
util.gcloud_docker_push(tag)
def run_on_kubernetes(args):
if args.test:
rethink_cpu_request = hub_cpu_request = '10m'
rethink_memory_request = hub_memory_request = '200Mi'
else:
hub_cpu_request = '300m'
hub_memory_request = '1Gi'
rethink_cpu_request = '300m'
rethink_memory_request = '1Gi'
util.ensure_secret_exists('sendgrid-api-key', 'sendgrid')
util.ensure_secret_exists('zendesk-api-key', 'zendesk')
args.local = False # so tag is for gcloud
if args.replicas is None:
args.replicas = util.get_desired_replicas(NAME, 2)
tag = util.get_tag(args, NAME, build)
opts = {
'image_hub' : tag,
'replicas' : args.replicas,
'pull_policy' : util.pull_policy(args),
'min_read_seconds' : args.gentle,
'smc_db_hosts' : args.database_nodes,
'smc_db_pool' : args.database_pool_size,
'smc_db_concurrent_warn' : args.database_concurrent_warn,
'hub_cpu_request' : hub_cpu_request,
'hub_memory_request' : hub_memory_request,
'rethink_cpu_request' : rethink_cpu_request,
'rethink_memory_request' : rethink_memory_request
}
if args.database_nodes == 'localhost':
from argparse import Namespace
ns = Namespace(tag=args.rethinkdb_proxy_tag, local=False)
opts['image_rethinkdb_proxy'] = util.get_tag(ns, 'rethinkdb-proxy', build)
filename = 'smc-hub-rethinkdb-proxy.template.yaml'
else:
filename = '{name}.template.yaml'.format(name=NAME)
t = open(join('conf', filename)).read()
with tempfile.NamedTemporaryFile(suffix='.yaml', mode='w') as tmp:
r = t.format(**opts)
#print(r)
tmp.write(r)
tmp.flush()
util.update_deployment(tmp.name)
if NAME not in util.get_services():
util.run(['kubectl', 'expose', 'deployment', NAME])
def stop_on_kubernetes(args):
util.stop_deployment(NAME)
def load_secret(name, args):
path = args.path
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise RuntimeError("path='{path}' must be a directory".format(path=path))
file = join(path, name)
if not os.path.exists(file):
raise RuntimeError("'{file}' must exist".format(file=file))
util.create_secret(name+'-api-key', file)
def status(args):
# Get all pod names
v = util.get_pods(run=NAME)
print("Getting last %s lines of logs from %s pods"%(args.tail, len(v)))
for x in v:
lg = util.get_logs(x['NAME'], tail=args.tail, container='smc-hub').splitlines()
blocked = concurrent = 0
for w in lg:
if 'BLOCKED for' in w: # 2016-07-07T17:39:23.159Z - debug: BLOCKED for 1925ms
b = int(w.split()[-1][:-2])
blocked = max(blocked, b)
if 'concurrent]' in w: # 2016-07-07T17:41:16.226Z - debug: [1 concurrent] ...
concurrent = max(concurrent, int(w.split()[3][1:]))
x['blocked'] = blocked
x['concurrent'] = concurrent
bad = util.run("kubectl describe pod {name} |grep Unhealthy |tail -1 ".format(name=x['NAME']), get_output=True, verbose=False).splitlines()
if len(bad) > 0:
x['unhealthy'] = bad[-1].split()[0]
else:
x['unhealthy'] = ''
print("%-30s%-12s%-12s%-12s%-12s%-12s"%('NAME', 'CONCURRENT', 'BLOCKED', 'UNHEALTHY', 'RESTARTS', 'AGE'))
for x in v:
print("%-30s%-12s%-12s%-12s%-12s%-12s"%(x['NAME'], x['concurrent'], x['blocked'], x['unhealthy'], x['RESTARTS'], x['AGE']))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Control deployment of {name}'.format(name=NAME))
subparsers = parser.add_subparsers(help='sub-command help')
sub = subparsers.add_parser('build', help='build docker image')
sub.add_argument("-t", "--tag", default="", help="tag for this build")
sub.add_argument("-c", "--commit", default='',
help="build a particular sha1 commit; the commit is automatically appended to the tag")
sub.add_argument("-r", "--rebuild", action="store_true",
help="re-pull latest hub source code from git and install any dependencies")
sub.add_argument("-u", "--upgrade", action="store_true",
help="re-install the base Ubuntu packages")
sub.add_argument("-l", "--local", action="store_true",
help="only build the image locally; don't push it to gcloud docker repo")
sub.set_defaults(func=build_docker)
sub = subparsers.add_parser('run', help='create/update {name} deployment on the currently selected kubernetes cluster'.format(name=NAME))
sub.add_argument("-t", "--tag", default="", help="tag of the image to run")
sub.add_argument("-r", "--replicas", default=None, help="number of replicas")
sub.add_argument("-f", "--force", action="store_true", help="force reload image in k8s")
sub.add_argument("-g", "--gentle", default=30, type=int,
help="how gentle to be in doing the rolling update; in particular, will wait about this many seconds after each pod starts up (default: 30)")
sub.add_argument("-d", "--database-nodes", default='localhost', type=str, help="database to connect to. If 'localhost' (the default), will run a local rethindkb proxy that is itself pointed at the rethinkdb-cluster service; if 'rethinkdb-proxy' will use that service.")
sub.add_argument("-p", "--database-pool-size", default=50, type=int, help="size of database connection pool")
sub.add_argument("--database-concurrent-warn", default=300, type=int, help="if this many concurrent queries for sustained time, kill container")
sub.add_argument("--rethinkdb-proxy-tag", default="", help="tag of rethinkdb-proxy image to run")
sub.add_argument("--test", action="store_true", help="using for testing so make very minimal resource requirements")
sub.set_defaults(func=run_on_kubernetes)
sub = subparsers.add_parser('delete', help='delete the deployment')
sub.set_defaults(func=stop_on_kubernetes)
sub = subparsers.add_parser('load-sendgrid', help='load the sendgrid password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "sendgrid"')
sub.set_defaults(func=lambda args: load_secret('sendgrid',args))
sub = subparsers.add_parser('load-zendesk', help='load the zendesk password into k8s from disk',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
sub.add_argument('path', type=str, help='path to directory that contains the password in a file named "zendesk"')
sub.set_defaults(func=lambda args: load_secret('zendesk',args))
util.add_deployment_parsers(NAME, subparsers, default_container='smc-hub')
sub = subparsers.add_parser('status', help='display status info about concurrent and blocked, based on recent logs')
sub.add_argument("-t", "--tail", default=100, type=int, help="how far back to go in log")
sub.set_defaults(func=status)
args = parser.parse_args()
if hasattr(args, 'func'):
args.func(args)
| gpl-3.0 | -8,445,299,926,647,956,000 | 46.419689 | 275 | 0.633086 | false |
Anaphory/libpgm | libpgm/pgmlearner.py | 1 | 45576 | # Copyright (c) 2012, CyberPoint International, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the CyberPoint International, LLC nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL CYBERPOINT INTERNATIONAL, LLC BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
This module provides tools to generate Bayesian networks that are "learned" from a data set. The learning process involves finding the Bayesian network that most accurately models data given as input -- in other words, finding the Bayesian network that makes the data set most likely. There are two major parts of Bayesian network learning: structure learning and parameter learning. Structure learning means finding the graph that most accurately depicts the dependencies detected in the data. Parameter learning means adjusting the parameters of the CPDs in a graph skeleton to most accurately model the data. This module has tools for both of these tasks.
'''
import copy
import itertools
try:
import numpy as np
except ImportError:
raise ImportError("numpy is not installed on your system.")
try:
from scipy.stats import chisquare
except ImportError:
raise ImportError("scipy is not installed on your system.")
from .nodedata import NodeData, StaticNodeData
from .graphskeleton import GraphSkeleton
from .discretebayesiannetwork import DiscreteBayesianNetwork
from .lgbayesiannetwork import LGBayesianNetwork
class PGMLearner():
'''
This class is a machine with tools for learning Bayesian networks from data. It contains the *discrete_mle_estimateparams*, *lg_mle_estimateparams*, *discrete_constraint_estimatestruct*, *lg_constraint_estimatestruct*, *discrete_condind*, *discrete_estimatebn*, and *lg_estimatebn* methods.
'''
def discrete_mle_estimateparams(self, graphskeleton, data):
'''
Estimate parameters for a discrete Bayesian network with a structure given by *graphskeleton* in order to maximize the probability of data given by *data*. This function takes the following arguments:
1. *graphskeleton* -- An instance of the :doc:`GraphSkeleton <graphskeleton>` class containing vertex and edge data.
2. *data* -- A list of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
This function normalizes the distribution of a node's outcomes for each combination of its parents' outcomes. In doing so it creates an estimated tabular conditional probability distribution for each node. It then instantiates a :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` instance based on the *graphskeleton*, and modifies that instance's *Vdata* attribute to reflect the estimated CPDs. It then returns the instance.
The Vdata attribute instantiated is in the format seen in :doc:`unittestdict`, as described in :doc:`discretebayesiannetwork`.
Usage example: this would learn parameters from a set of 200 discrete samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
bn = DiscreteBayesianNetwork(skel, nd)
data = bn.randomsample(200)
# instantiate my learner
learner = PGMLearner()
# estimate parameters from data and skeleton
result = learner.discrete_mle_estimateparams(skel, data)
# output
print json.dumps(result.Vdata, indent=2)
'''
assert (isinstance(graphskeleton, GraphSkeleton)), "First arg must be a loaded GraphSkeleton class."
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Second arg must be a list of dicts."
# instantiate Bayesian network, and add parent and children data
bn = StaticNodeData()
graphskeleton.toporder()
for vertex in graphskeleton.V:
bn.Vdata[vertex] = dict()
bn.Vdata[vertex]["children"] = graphskeleton.getchildren(vertex)
bn.Vdata[vertex]["parents"] = graphskeleton.getparents(vertex)
# make placeholders for vals, cprob, and numoutcomes
bn.Vdata[vertex]["vals"] = []
if (bn.Vdata[vertex]["parents"] == []):
bn.Vdata[vertex]["cprob"] = []
else:
bn.Vdata[vertex]["cprob"] = dict()
bn.Vdata[vertex]["numoutcomes"] = 0
bn = DiscreteBayesianNetwork(bn)
# determine which outcomes are possible for each node
for sample in data:
for vertex in bn.V:
if (sample[vertex] not in bn.Vdata[vertex]["vals"]):
bn.Vdata[vertex]["vals"].append(sample[vertex])
bn.Vdata[vertex]["numoutcomes"] += 1
# lay out probability tables, and put a [num, denom] entry in all spots:
# define helper function to recursively set up cprob table
def addlevel(vertex, _dict, key, depth, totaldepth):
if depth == totaldepth:
_dict[str(key)] = []
for _ in range(bn.Vdata[vertex]["numoutcomes"]):
_dict[str(key)].append([0, 0])
return
else:
for val in bn.Vdata[bn.Vdata[vertex]["parents"][depth]]["vals"]:
ckey = key[:]
ckey.append(str(val))
addlevel(vertex, _dict, ckey, depth+1, totaldepth)
# put [0, 0] at each entry of cprob table
for vertex in bn.V:
if (bn.Vdata[vertex]["parents"]):
root = bn.Vdata[vertex]["cprob"]
numparents = len(bn.Vdata[vertex]["parents"])
addlevel(vertex, root, [], 0, numparents)
else:
for _ in range(bn.Vdata[vertex]["numoutcomes"]):
bn.Vdata[vertex]["cprob"].append([0, 0])
# fill out entries with samples:
for sample in data:
for vertex in bn.V:
# compute index of result
rindex = bn.Vdata[vertex]["vals"].index(sample[vertex])
# go to correct place in Vdata
if bn.Vdata[vertex]["parents"]:
pvals = [str(sample[t]) for t in bn.Vdata[vertex]["parents"]]
lev = bn.Vdata[vertex]["cprob"][str(pvals)]
else:
lev = bn.Vdata[vertex]["cprob"]
# increase all denominators for the current condition
for entry in lev:
entry[1] += 1
# increase numerator for current outcome
lev[rindex][0] += 1
# convert arrays to floats
for vertex in bn.V:
if not bn.Vdata[vertex]["parents"]:
bn.Vdata[vertex]["cprob"] = [x[0]/float(x[1]) for x in bn.Vdata[vertex]["cprob"]]
else:
for key in bn.Vdata[vertex]["cprob"].keys():
try:
bn.Vdata[vertex]["cprob"][key] = [x[0]/float(x[1]) for x in bn.Vdata[vertex]["cprob"][key]]
# default to even distribution if no data points
except ZeroDivisionError:
bn.Vdata[vertex]["cprob"][key] = [1/float(bn.Vdata[vertex]["numoutcomes"]) for x in bn.Vdata[vertex]["cprob"][key]]
# return cprob table with estimated probability distributions
return bn
def lg_mle_estimateparams(self, graphskeleton, data):
'''
Estimate parameters for a linear Gaussian Bayesian network with a structure given by *graphskeleton* in order to maximize the probability of data given by *data*. This function takes the following arguments:
1. *graphskeleton* -- An instance of the :doc:`GraphSkeleton <graphskeleton>` class containing vertex and edge data.
2. *data* -- A list of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 74.343,
'Intelligence': 29.545,
...
},
...
]
The algorithm used to calculate the linear Gaussian parameters is beyond the scope of this documentation -- for a full explanation, cf. Koller et al. 729. After the parameters are calculated, the program instantiates a :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` instance based on the *graphskeleton*, and modifies that instance's *Vdata* attribute to reflect the estimated CPDs. It then returns the instance.
The Vdata attribute instantiated is in the format seen in the input file example :doc:`unittestdict`, as described in :doc:`discretebayesiannetwork`.
Usage example: this would learn parameters from a set of 200 linear Gaussian samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestlgdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
lgbn = LGBayesianNetwork(skel, nd)
data = lgbn.randomsample(200)
# instantiate my learner
learner = PGMLearner()
# estimate parameters
result = learner.lg_mle_estimateparams(skel, data)
# output
print json.dumps(result.Vdata, indent=2)
'''
assert (isinstance(graphskeleton, GraphSkeleton)), "First arg must be a loaded GraphSkeleton class."
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Second arg must be a list of dicts."
# instantiate Bayesian network, and add parent and children data
bn = StaticNodeData()
graphskeleton.toporder()
for vertex in graphskeleton.V:
bn.Vdata[vertex] = dict()
bn.Vdata[vertex]["children"] = graphskeleton.getchildren(vertex)
bn.Vdata[vertex]["parents"] = graphskeleton.getparents(vertex)
# make placeholders for mean_base, mean_scal, and variance
bn.Vdata[vertex]["mean_base"] = 0.0
bn.Vdata[vertex]["mean_scal"] = []
for parent in bn.Vdata[vertex]["parents"]:
bn.Vdata[vertex]["mean_scal"].append(0.0)
bn.Vdata[vertex]["variance"] = 0.0
bn = LGBayesianNetwork(bn)
# make covariance table, array of E[X_i] for each vertex, and table
# of E[X_i * X_j] for each combination of vertices
cov = [[0 for _ in range(len(bn.V))] for __ in range(len(bn.V))]
singletons = [0 for _ in range(len(bn.V))]
numtrials = len(data)
for sample in data:
for x in range(len(bn.V)):
singletons[x] += sample[bn.V[x]]
for y in range(len(bn.V)):
cov[x][y] += sample[bn.V[x]] * sample[bn.V[y]]
for x in range(len(bn.V)):
singletons[x] /= float(numtrials)
for y in range(len(bn.V)):
cov[x][y] /= float(numtrials)
# (save copy. this is the E[X_i * X_j] table)
product_expectations = [[cov[x][y] for y in range(len(bn.V))] for x in range(len(bn.V))]
for x in range(len(bn.V)):
for y in range(len(bn.V)):
cov[x][y] = cov[x][y] - (singletons[x] * singletons[y])
# construct system of equations and solve (for each node)
for x in range(len(bn.V)):
# start with the E[X_i * X_j] table
system = [[product_expectations[p][q] for q in range(len(bn.V))] for p in range(len(bn.V))]
# step 0: remove all entries from all the tables except for node and its parents
rowstokeep = [x]
for z in range(len(bn.V)):
if bn.V[z] in bn.Vdata[bn.V[x]]["parents"]:
rowstokeep.append(z)
smalldim = len(rowstokeep)
smallsystem = [[0 for _ in range(smalldim)] for __ in range(smalldim)]
smallcov = [[0 for _ in range(smalldim)] for __ in range(smalldim)]
smallsing = [0 for _ in range(smalldim)]
for index in range(len(rowstokeep)):
smallsing[index] = singletons[rowstokeep[index]]
for index2 in range(len(rowstokeep)):
smallsystem[index][index2] = system[rowstokeep[index]][rowstokeep[index2]]
smallcov[index][index2] = cov[rowstokeep[index]][rowstokeep[index2]]
# step 1: delete and copy row corresponding to node (using [row][column] notation)
tmparray = [0 for _ in range(smalldim)]
for y in range(smalldim):
if (y > 0):
for j in range(smalldim):
smallsystem[y-1][j] = smallsystem[y][j]
if (y == 0):
for j in range(smalldim):
tmparray[j] = smallsystem[y][j]
# step 2: delete column, leaving system with all entries
# corresponding to parents of node
for y in range(smalldim):
if (y > 0):
for j in range(smalldim):
smallsystem[j][y-1] = smallsystem[j][y]
# step 3: take entry for node out of singleton array and store it
bordarray = []
for y in range(smalldim):
if (y != 0):
bordarray.append(smallsing[y])
else:
tmpentry = smallsing[y]
# step 4: add border array on borders of system
for y in range(len(bordarray)):
smallsystem[smalldim - 1][y] = bordarray[y]
smallsystem[y][smalldim - 1] = bordarray[y]
smallsystem[smalldim - 1][smalldim - 1] = 1
# step 5: construct equality vector (the 'b' of ax = b)
evector = [0 for _ in range(smalldim)]
for y in range(smalldim):
if (y != smalldim - 1):
evector[y] = tmparray[y + 1]
else:
evector[y] = tmpentry
# use numpy to solve
a = np.array(smallsystem)
b = np.array(evector)
solve = list(np.linalg.solve(a, b))
# fill mean_base and mean_scal[] with this data
bn.Vdata[bn.V[x]]["mean_base"] = solve[smalldim - 1]
for i in range(smalldim - 1):
bn.Vdata[bn.V[x]]["mean_scal"][i] = solve[i]
# add variance
variance = smallcov[0][0]
for y in range(1, smalldim):
for z in range(1, smalldim):
variance -= (bn.Vdata[bn.V[x]]["mean_scal"][y-1] * bn.Vdata[bn.V[x]]["mean_scal"][z-1] * smallcov[y][z])
bn.Vdata[bn.V[x]]["variance"] = variance
# that's all folks
return bn
def discrete_constraint_estimatestruct(self, data, pvalparam=0.05, indegree=1):
'''
Learn a Bayesian network structure from discrete data given by *data*, using constraint-based approaches. This function first calculates all the independencies and conditional independencies present between variables in the data. To calculate dependencies, it uses the *discrete_condind* method on each pair of variables, conditioned on other sets of variables of size *indegree* or smaller, to generate a chi-squared result and a p-value. If this p-value is less than *pvalparam*, the pair of variables are considered dependent conditioned on the variable set. Once all true dependencies -- pairs of variables that are dependent no matter what they are conditioned by -- are found, the algorithm uses these dependencies to construct a directed acyclic graph. It returns this DAG in the form of a :doc:`GraphSkeleton <graphskeleton>` class.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
2. *pvalparam* -- (Optional, default is 0.05) The p-value below which to consider something significantly unlikely. A common number used is 0.05. This is passed to *discrete_condind* when it is called.
3. *indegree* -- (Optional, default is 1) The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of samples in *data* are required to avoid a divide-by-zero error.
Usage example: this would learn structure from a set of 8000 discrete samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
bn = DiscreteBayesianNetwork(skel, nd)
data = bn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# estimate structure
result = learner.discrete_constraint_estimatestruct(data)
# output
print json.dumps(result.E, indent=2)
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
# instantiate array of variables and array of potential dependencies
variables = list(data[0].keys())
ovariables = variables[:]
dependencies = []
for x in variables:
ovariables.remove(x)
for y in ovariables:
if (x != y):
dependencies.append([x, y])
# define helper function to find subsets
def subsets(array):
result = []
for i in range(indegree + 1):
comb = itertools.combinations(array, i)
for c in comb:
result.append(list(c))
return result
witnesses = []
othervariables = variables[:]
# for each pair of variables X, Y:
for X in variables:
othervariables.remove(X)
for Y in othervariables:
# consider all sets of witnesses that do not have X or Y in
# them, and are less than or equal to the size specified by
# the "indegree" argument
for U in subsets(variables):
if (X not in U) and (Y not in U) and len(U) <= indegree:
# determine conditional independence
chi, pv, witness = self.discrete_condind(data, X, Y, U)
if pv > pvalparam:
msg = "***%s and %s are found independent (chi = %f, pv = %f) with witness %s***" % (X, Y, chi, pv, U)
try:
dependencies.remove([X, Y])
dependencies.remove([Y, X])
except:
pass
witnesses.append([X, Y, witness])
break
# now that we have found our dependencies, run build PDAG (cf. Koller p. 89)
# with the stored set of independencies:
# assemble undirected graph skeleton
pdag = GraphSkeleton()
pdag.E = dependencies
pdag.V = variables
# adjust for immoralities (cf. Koller 86)
dedges = [x[:] for x in pdag.E]
for edge in dedges:
edge.append('u')
# define helper method "exists_undirected_edge"
def exists_undirected_edge(one_end, the_other_end):
for edge in dedges:
if len(edge) == 3:
if (edge[0] == one_end and edge[1] == the_other_end):
return True
elif (edge[1] == one_end and edge[0] == the_other_end):
return True
return False
# define helper method "exists_edge"
def exists_edge(one_end, the_other_end):
if exists_undirected_edge(one_end, the_other_end):
return True
elif [one_end, the_other_end] in dedges:
return True
elif [the_other_end, one_end] in dedges:
return True
return False
for edge1 in reversed(dedges):
for edge2 in reversed(dedges):
if (edge1 in dedges) and (edge2 in dedges):
if edge1[0] == edge2[1] and not exists_edge(edge1[1], edge2[0]):
if (([edge1[1], edge2[0], [edge1[0]]] not in witnesses) and ([edge2[0], edge1[1], [edge1[0]]] not in witnesses)):
dedges.append([edge1[1], edge1[0]])
dedges.append([edge2[0], edge2[1]])
dedges.remove(edge1)
dedges.remove(edge2)
elif edge1[1] == edge2[0] and not exists_edge(edge1[0], edge2[1]):
if (([edge1[0], edge2[1], [edge1[1]]] not in witnesses) and ([edge2[1], edge1[0], [edge1[1]]] not in witnesses)):
dedges.append([edge1[0], edge1[1]])
dedges.append([edge2[1], edge2[0]])
dedges.remove(edge1)
dedges.remove(edge2)
elif edge1[1] == edge2[1] and edge1[0] != edge2[0] and not exists_edge(edge1[0], edge2[0]):
if (([edge1[0], edge2[0], [edge1[1]]] not in witnesses) and ([edge2[0], edge1[0], [edge1[1]]] not in witnesses)):
dedges.append([edge1[0], edge1[1]])
dedges.append([edge2[0], edge2[1]])
dedges.remove(edge1)
dedges.remove(edge2)
elif edge1[0] == edge2[0] and edge1[1] != edge2[1] and not exists_edge(edge1[1], edge2[1]):
if (([edge1[1], edge2[1], [edge1[0]]] not in witnesses) and ([edge2[1], edge1[1], [edge1[0]]] not in witnesses)):
dedges.append([edge1[1], edge1[0]])
dedges.append([edge2[1], edge2[0]])
dedges.remove(edge1)
dedges.remove(edge2)
# use right hand rules to improve graph until convergence (Koller 89)
olddedges = []
while (olddedges != dedges):
olddedges = [x[:] for x in dedges]
for edge1 in reversed(dedges):
for edge2 in reversed(dedges):
# rule 1
inverted = False
check1, check2 = False, True
if (edge1[1] == edge2[0] and len(edge1) == 2 and len(edge2) == 3):
check1 = True
elif (edge1[1] == edge2[1] and len(edge1) == 2 and len(edge2) == 3):
check = True
inverted = True
for edge3 in dedges:
if edge3 != edge1 and ((edge3[0] == edge1[0] and edge3[1]
== edge2[1]) or (edge3[1] == edge1[0] and edge3[0]
== edge2[1])):
check2 = False
if check1 == True and check2 == True:
if inverted:
dedges.append([edge1[1], edge2[0]])
else:
dedges.append([edge1[1], edge2[1]])
dedges.remove(edge2)
# rule 2
check1, check2 = False, False
if (edge1[1] == edge2[0] and len(edge1) == 2 and len(edge2) == 2):
check1 = True
for edge3 in dedges:
if ((edge3[0] == edge1[0] and edge3[1]
== edge2[1]) or (edge3[1] == edge1[0] and edge3[0]
== edge2[1]) and len(edge3) == 3):
check2 = True
if check1 == True and check2 == True:
if edge3[0] == edge1[0]:
dedges.append([edge3[0], edge3[1]])
elif edge3[1] == edge1[0]:
dedges.append([edge3[1], edge3[0]])
dedges.remove(edge3)
# rule 3
check1, check2 = False, False
if len(edge1) == 2 and len(edge2) == 2:
if (edge1[1] == edge2[1] and edge1[0] != edge2[0]):
check1 = True
for v in variables:
if (exists_undirected_edge(v, edge1[0]) and
exists_undirected_edge(v, edge1[1]) and
exists_undirected_edge(v, edge2[0])):
check2 = True
if check1 == True and check2 == True:
dedges.append([v, edge1[1]])
for edge3 in dedges:
if (len(edge3) == 3 and ((edge3[0] == v and edge3[1]
== edge1[1]) or (edge3[1] == v and edge3[0] ==
edge1[1]))):
dedges.remove(edge3)
# return one possible graph skeleton from the pdag class found
for x in range(len(dedges)):
if len(dedges[x]) == 3:
dedges[x] = dedges[x][:2]
pdag.E = dedges
pdag.toporder()
return pdag
def lg_constraint_estimatestruct(self, data, pvalparam=0.05, bins=10, indegree=1):
'''
Learn a Bayesian network structure from linear Gaussian data given by *data* using constraint-based approaches. This function works by discretizing the linear Gaussian data into *bins* number of bins, and running the *discrete_constraint_estimatestruct* method on that discrete data with *pvalparam* and *indegree* as arguments. It returns the :doc:`GraphSkeleton <graphskeleton>` instance returned by this function.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 78.3223,
'SAT': 56.33,
...
},
...
]
2. *pvalparam* -- (Optional, default is 0.05) The p-value below which to consider something significantly unlikely. A common number used is 0.05
3. *bins* -- (Optional, default is 10) The number of bins to discretize the data into. The method is to find the highest and lowest value, divide that interval uniformly into a certain number of bins, and place the data inside. This number must be chosen carefully in light of the number of trials. There must be at least 5 trials in every bin, with more if the indegree is increased.
4. *indegree* -- (Optional, default is 1) The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of trials are required to avoid a divide-by-zero error.
The number of bins and indegree must be chosen carefully based on the size and nature of the data set. Too many bins will lead to not enough data per bin, while too few bins will lead to dependencies not getting noticed.
Usage example: this would learn structure from a set of 8000 linear Gaussian samples::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
lgbn = LGBayesianNetwork(skel, nd)
data = lgbn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# estimate structure
result = learner.lg_constraint_estimatestruct(data)
# output
print json.dumps(result.E, indent=2)
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
cdata = copy.deepcopy(data)
# establish ranges
ranges = dict()
for variable in cdata[0].keys():
ranges[variable] = [float("infinity"), float("infinity") * -1]
for sample in cdata:
for var in sample.keys():
if sample[var] < ranges[var][0]:
ranges[var][0] = sample[var]
if sample[var] > ranges[var][1]:
ranges[var][1] = sample[var]
# discretize cdata set
bincounts = dict()
for key in cdata[0].keys():
bincounts[key] = [0 for _ in range(bins)]
for sample in cdata:
for i in range(bins):
for var in sample.keys():
if (sample[var] >= (ranges[var][0] + (ranges[var][1] - ranges[var][0]) * i / float(bins)) and (sample[var] <= (ranges[var][0] + (ranges[var][1] - ranges[var][0]) * (i + 1) / float(bins)))):
sample[var] = i
bincounts[var][i] += 1
# run discrete_constraint_estimatestruct
return self.discrete_constraint_estimatestruct(cdata, pvalparam, indegree)
def discrete_condind(self, data, X, Y, U):
'''
Test how independent a variable *X* and a variable *Y* are in a discrete data set given by *data*, where the independence is conditioned on a set of variables given by *U*. This method works by assuming as a null hypothesis that the variables are conditionally independent on *U*, and thus that:
.. math::
P(X, Y, U) = P(U) \\cdot P(X|U) \\cdot P(Y|U)
It tests the deviance of the data from this null hypothesis, returning the result of a chi-square test and a p-value.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
2. *X* -- A variable whose dependence on Y we are testing given U.
3. *Y* -- A variable whose dependence on X we are testing given U.
4. *U* -- A list of variables that are given.
Returns:
1. *chi* -- The result of the chi-squared test on the data. This is a
measure of the deviance of the actual distribution of X and
Y given U from the expected distribution of X and Y given U.
Since the null hypothesis is that X and Y are independent
given U, the expected distribution is that :math:`P(X, Y, U) =
P(U) P(X | U) P (Y | U)`.
2. *pval* -- The p-value of the test, meaning the probability of
attaining a chi-square result as extreme as or more extreme
than the one found, assuming that the null hypothesis is
true. (e.g., a p-value of .05 means that if X and Y were
independent given U, the chance of getting a chi-squared
result this high or higher are .05)
3. *U* -- The 'witness' of X and Y's independence. This is the variable
that, when it is known, leaves X and Y independent.
For more information see Koller et al. 790.
'''
# find possible outcomes and store
_outcomes = dict()
for key in data[0].keys():
_outcomes[key] = [data[0][key]]
for sample in data:
for key in _outcomes.keys():
if _outcomes[key].count(sample[key]) == 0:
_outcomes[key].append(sample[key])
# store number of outcomes for X, Y, and U
Xnumoutcomes = len(_outcomes[X])
Ynumoutcomes = len(_outcomes[Y])
Unumoutcomes = []
for val in U:
Unumoutcomes.append(len(_outcomes[val]))
# calculate P(U) -- the distribution of U
PU = 1
# define helper function to add a dimension to an array recursively
def add_dimension_to_array(mdarray, size):
if isinstance(mdarray, list):
for h in range(len(mdarray)):
mdarray[h] = add_dimension_to_array(mdarray[h], size)
return mdarray
else:
mdarray = [0 for _ in range(size)]
return mdarray
# make PU the right size
for size in Unumoutcomes:
PU = add_dimension_to_array(PU, size)
# fill with data
if (len(U) > 0):
for sample in data:
tmp = PU
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
# calculate P(X, U) -- the distribution of X and U
PXandU = [0 for _ in range(Xnumoutcomes)]
for size in Unumoutcomes:
PXandU = add_dimension_to_array(PXandU, size)
for sample in data:
Xindex = _outcomes[X].index(sample[X])
if len(U) > 0:
tmp = PXandU[Xindex]
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
else:
PXandU[Xindex] += 1
# calculate P(Y, U) -- the distribution of Y and U
PYandU = [0 for _ in range(Ynumoutcomes)]
for size in Unumoutcomes:
PYandU = add_dimension_to_array(PYandU, size)
for sample in data:
Yindex = _outcomes[Y].index(sample[Y])
if len(U) > 0:
tmp = PYandU[Yindex]
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
else:
PYandU[Yindex] += 1
# assemble P(U)P(X|U)P(Y|U) -- the expected distribution if X and Y are
# independent given U.
expected = [[ 0 for _ in range(Ynumoutcomes)] for __ in range(Xnumoutcomes)]
# define helper function to multiply the entries of two matrices
def multiply_entries(matrixa, matrixb):
matrix1 = copy.deepcopy(matrixa)
matrix2 = copy.deepcopy(matrixb)
if isinstance(matrix1, list):
for h in range(len(matrix1)):
matrix1[h] = multiply_entries(matrix1[h], matrix2[h])
return matrix1
else:
return (matrix1 * matrix2)
# define helper function to divide the entries of two matrices
def divide_entries(matrixa, matrixb):
matrix1 = copy.deepcopy(matrixa)
matrix2 = copy.deepcopy(matrixb)
if isinstance(matrix1, list):
for h in range(len(matrix1)):
matrix1[h] = divide_entries(matrix1[h], matrix2[h])
return matrix1
else:
return (matrix1 / float(matrix2))
# combine known graphs to calculate P(U)P(X|U)P(Y|U)
for x in range(Xnumoutcomes):
for y in range(Ynumoutcomes):
product = multiply_entries(PXandU[x], PYandU[y])
final = divide_entries(product, PU)
expected[x][y] = final
# find P(XYU) -- the actual distribution of X, Y, and U -- in sample
PXYU = [[ 0 for _ in range(Ynumoutcomes)] for __ in range(Xnumoutcomes)]
for size in Unumoutcomes:
PXYU = add_dimension_to_array(PXYU, size)
for sample in data:
Xindex = _outcomes[X].index(sample[X])
Yindex = _outcomes[Y].index(sample[Y])
if len(U) > 0:
tmp = PXYU[Xindex][Yindex]
for x in range(len(U)-1):
Uindex = _outcomes[U[x]].index(sample[U[x]])
tmp = tmp[Uindex]
lastindex = _outcomes[U[-1]].index(sample[U[-1]])
tmp[lastindex] += 1
else:
PXYU[Xindex][Yindex] += 1
# use scipy's chisquare to determine the deviance of the evidence
a = np.array(expected)
a = a.flatten()
b = np.array(PXYU)
b = b.flatten()
# delete entries with value 0 (they mess up the chisquare function)
for i in reversed(range(b.size)):
if (b[i] == 0):
if i != 0:
a.itemset(i-1, a[i-1]+a[i])
a = np.delete(a, i)
b = np.delete(b, i)
# run chi-squared
chi, pv = chisquare(a, b)
# return chi-squared result, p-value for that result, and witness
return chi, pv, U
def discrete_estimatebn(self, data, pvalparam=.05, indegree=1):
'''
Fully learn a Bayesian network from discrete data given by *data*. This function combines the *discrete_constraint_estimatestruct* method (where it passes in the *pvalparam* and *indegree* arguments) with the *discrete_mle_estimateparams* method. It returns a complete :doc:`DiscreteBayesianNetwork <discretebayesiannetwork>` class instance learned from the data.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 'B',
'SAT': 'lowscore',
...
},
...
]
2. *pvalparam* -- The p-value below which to consider something significantly unlikely. A common number used is 0.05
3. *indegree* -- The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of trials are required to avoid a divide-by- zero error.
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
# learn graph skeleton
skel = self.discrete_constraint_estimatestruct(data, pvalparam=pvalparam, indegree=indegree)
# learn parameters
bn = self.discrete_mle_estimateparams(skel, data)
# return
return bn
def lg_estimatebn(self, data, pvalparam=.05, bins=10, indegree=1):
'''
Fully learn a Bayesian network from linear Gaussian data given by *data*. This function combines the *lg_constraint_estimatestruct* method (where it passes in the *pvalparam*, *bins*, and *indegree* arguments) with the *lg_mle_estimateparams* method. It returns a complete :doc:`LGBayesianNetwork <discretebayesiannetwork>` class instance learned from the data.
Arguments:
1. *data* -- An array of dicts containing samples from the network in {vertex: value} format. Example::
[
{
'Grade': 75.23423,
'SAT': 873.42342,
...
},
...
]
2. *pvalparam* -- The p-value below which to consider something significantly unlikely. A common number used is 0.05
3. *indegree* -- The upper bound on the size of a witness set (see Koller et al. 85). If this is larger than 1, a huge amount of trials are required to avoid a divide-by- zero error.
Usage example: this would learn entire Bayesian networks from sets of 8000 data points::
import json
from libpgm.nodedata import NodeData
from libpgm.graphskeleton import GraphSkeleton
from libpgm.lgbayesiannetwork import LGBayesianNetwork
from libpgm.discretebayesiannetwork import DiscreteBayesianNetwork
from libpgm.pgmlearner import PGMLearner
# LINEAR GAUSSIAN
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestlgdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
lgbn = LGBayesianNetwork(skel, nd)
data = lgbn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# learn bayesian network
result = learner.lg_estimatebn(data)
# output
print json.dumps(result.E, indent=2)
print json.dumps(result.Vdata, indent=2)
# DISCRETE
# generate some data to use
nd = NodeData()
nd.load("../tests/unittestdict.txt") # an input file
skel = GraphSkeleton()
skel.load("../tests/unittestdict.txt")
skel.toporder()
bn = DiscreteBayesianNetwork(skel, nd)
data = bn.randomsample(8000)
# instantiate my learner
learner = PGMLearner()
# learn bayesian network
result = learner.discrete_estimatebn(data)
# output
print json.dumps(result.E, indent=2)
print json.dumps(result.Vdata, indent=2)
'''
assert (isinstance(data, list) and data and isinstance(data[0], dict)), "Arg must be a list of dicts."
# learn graph skeleton
skel = self.lg_constraint_estimatestruct(data, pvalparam=pvalparam, bins=bins, indegree=indegree)
# learn parameters
bn = self.lg_mle_estimateparams(skel, data)
# return
return bn
| bsd-3-clause | 6,197,193,214,556,969,000 | 45.985567 | 849 | 0.545177 | false |
Yuecai/com-yuecai-dream | src/nodelay/forms.py | 1 | 9690 | # coding=utf-8
#########################################################################
# File Name: forms.py
# Original Author: 段凯强
# Mail: [email protected]
# Created Time: 2013-12-26
# Update:
#########################################################################
#########################################################################
# Copyright (c) 2013~2014 by 段凯强
# Reand the file "license" distributed with these sources, or XXXX
# XXXXXXXXXXXXXXXXXX switch for additional information, such as how
# to use, copy, modify, sell and/or distribute this software and its
# documentation any purpose anyway.
#########################################################################
import datetime, time
import re
from django import forms
class BasicTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
title = forms.CharField()
content = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Basic':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_title(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
title = self.cleaned_data['title']
l = len(title)
if l >= 1 and l <= 10 and pattern.match(title):
return title
raise forms.ValidationError('title_err')
def clean_content(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
content = self.cleaned_data['content']
l = len(content)
if l >= 1 and l <= 100 and pattern.match(content) and not pattern_blank.match(content):
return content
raise forms.ValidationError('content_err')
class BookTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
bookName = forms.CharField()
readFrom = forms.CharField()
readTo = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Book':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_bookName(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
bookName = self.cleaned_data['bookName']
l = len(bookName)
if l >= 1 and l <= 50 and pattern.match(bookName) and not pattern_blank.match(bookName):
return bookName
raise forms.ValidationError('bookName_err')
def clean_readFrom(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readFrom = self.cleaned_data['readFrom']
l = len(readFrom)
if l >= 1 and l <= 50 and pattern.match(readFrom) and not pattern_blank.match(readFrom):
return readFrom
raise forms.ValidationError('readFrom_err')
def clean_readTo(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
readTo = self.cleaned_data['readTo']
l = len(readTo)
if l >= 1 and l <= 50 and pattern.match(readTo) and not pattern_blank.match(readTo):
return readTo
raise forms.ValidationError('readTo_err')
class WorkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
summary = forms.CharField()
goal = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Work':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_summary(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
summary = self.cleaned_data['summary']
l = len(summary)
if l >= 1 and l <= 50 and pattern.match(summary) and not pattern_blank.match(summary):
return summary
raise forms.ValidationError('summary')
def clean_goal(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
goal = self.cleaned_data['goal']
l = len(goal)
if l >= 1 and l <= 50 and pattern.match(goal) and not pattern_blank.match(goal):
return goal
raise forms.ValidationError('goal_err')
class HomeworkTaskForm(forms.Form):
taskType = forms.CharField()
date = forms.DateField()
time = forms.IntegerField()
courseName = forms.CharField()
introduction = forms.CharField()
def clean_taskType(self):
taskType = self.cleaned_data['taskType']
if taskType == u'Homework':
return taskType
raise forms.ValidationError('taskType_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
def clean_courseName(self):
pattern = re.compile(u'^[a-zA-Z0-9\u4e00-\u9fa5]+$')
courseName = self.cleaned_data['courseName']
l = len(courseName)
if l >= 1 and l <= 10 and pattern.match(courseName):
return courseName
raise forms.ValidationError('courseName_err')
def clean_introduction(self):
pattern = re.compile(u'^[()!¥?。,《》{}【】“”·、:;‘’……\s\x21-\x7e\u4e00-\u9fa5]+$')
pattern_blank = re.compile(u'^\s+$')
introduction = self.cleaned_data['introduction']
l = len(introduction)
if l >= 1 and l <= 100 and pattern.match(introduction) and not pattern_blank.match(introduction):
return introduction
raise forms.ValidationError('introduction_err')
class TaskIdForm(forms.Form):
taskId = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
class ChangeDateForm(forms.Form):
taskId = forms.IntegerField()
date = forms.DateField()
time = forms.IntegerField()
def clean_taskId(self):
taskId = self.cleaned_data['taskId']
if taskId > 0:
return taskId
raise forms.ValidationError('taskId_err')
def clean_date(self):
date = self.cleaned_data['date']
today = datetime.date.today()
if date < today + datetime.timedelta(days = 8) and date > today:
return date
raise forms.ValidationError('date_err')
def clean_time(self):
time = self.cleaned_data['time']
if time >= 1 and time <= 3:
return time
raise forms.ValidationError('time_err')
class ExchangeTaskForm(forms.Form):
taskId1 = forms.IntegerField()
taskId2 = forms.IntegerField()
def clean_taskId1(self):
taskId1 = self.cleaned_data['taskId1']
if taskId1 > 0:
return taskId1
raise forms.ValidationError('taskId1_err')
def clean_taskId2(self):
taskId2 = self.cleaned_data['taskId2']
if taskId2 > 0:
return taskId2
raise forms.ValidationError('taskId2_err')
class DelayShiftTaskForm(forms.Form):
fromId = forms.IntegerField()
toId = forms.IntegerField()
def clean_fromId(self):
fromId = self.cleaned_data['fromId']
if fromId > 0:
return fromId
raise forms.ValidationError('fromId_err')
def clean_toId(self):
toId = self.cleaned_data['toId']
if toId > 0:
return toId
raise forms.ValidationError('toId_err')
| bsd-3-clause | 8,940,460,477,779,460,000 | 33.025362 | 105 | 0.577787 | false |
quantopian/zipline | zipline/data/in_memory_daily_bars.py | 1 | 5363 | from six import iteritems
import numpy as np
import pandas as pd
from pandas import NaT
from trading_calendars import TradingCalendar
from zipline.data.bar_reader import OHLCV, NoDataOnDate, NoDataForSid
from zipline.data.session_bars import CurrencyAwareSessionBarReader
from zipline.utils.input_validation import expect_types, validate_keys
from zipline.utils.pandas_utils import check_indexes_all_same
class InMemoryDailyBarReader(CurrencyAwareSessionBarReader):
"""
A SessionBarReader backed by a dictionary of in-memory DataFrames.
Parameters
----------
frames : dict[str -> pd.DataFrame]
Dictionary from field name ("open", "high", "low", "close", or
"volume") to DataFrame containing data for that field.
calendar : str or trading_calendars.TradingCalendar
Calendar (or name of calendar) to which data is aligned.
currency_codes : pd.Series
Map from sid -> listing currency for that sid.
verify_indices : bool, optional
Whether or not to verify that input data is correctly aligned to the
given calendar. Default is True.
"""
@expect_types(
frames=dict,
calendar=TradingCalendar,
verify_indices=bool,
currency_codes=pd.Series,
)
def __init__(self,
frames,
calendar,
currency_codes,
verify_indices=True):
self._frames = frames
self._values = {key: frame.values for key, frame in iteritems(frames)}
self._calendar = calendar
self._currency_codes = currency_codes
validate_keys(frames, set(OHLCV), type(self).__name__)
if verify_indices:
verify_frames_aligned(list(frames.values()), calendar)
self._sessions = frames['close'].index
self._sids = frames['close'].columns
@classmethod
def from_panel(cls, panel, calendar, currency_codes):
"""Helper for construction from a pandas.Panel.
"""
return cls(dict(panel.iteritems()), calendar, currency_codes)
@property
def last_available_dt(self):
return self._calendar[-1]
@property
def trading_calendar(self):
return self._calendar
@property
def sessions(self):
return self._sessions
def load_raw_arrays(self, columns, start_dt, end_dt, assets):
if start_dt not in self._sessions:
raise NoDataOnDate(start_dt)
if end_dt not in self._sessions:
raise NoDataOnDate(end_dt)
asset_indexer = self._sids.get_indexer(assets)
if -1 in asset_indexer:
bad_assets = assets[asset_indexer == -1]
raise NoDataForSid(bad_assets)
date_indexer = self._sessions.slice_indexer(start_dt, end_dt)
out = []
for c in columns:
out.append(self._values[c][date_indexer, asset_indexer])
return out
def get_value(self, sid, dt, field):
"""
Parameters
----------
sid : int
The asset identifier.
day : datetime64-like
Midnight of the day for which data is requested.
field : string
The price field. e.g. ('open', 'high', 'low', 'close', 'volume')
Returns
-------
float
The spot price for colname of the given sid on the given day.
Raises a NoDataOnDate exception if the given day and sid is before
or after the date range of the equity.
Returns -1 if the day is within the date range, but the price is
0.
"""
return self.frames[field].loc[dt, sid]
def get_last_traded_dt(self, asset, dt):
"""
Parameters
----------
asset : zipline.asset.Asset
The asset identifier.
dt : datetime64-like
Midnight of the day for which data is requested.
Returns
-------
pd.Timestamp : The last know dt for the asset and dt;
NaT if no trade is found before the given dt.
"""
try:
return self.frames['close'].loc[:, asset.sid].last_valid_index()
except IndexError:
return NaT
@property
def first_trading_day(self):
return self._sessions[0]
def currency_codes(self, sids):
codes = self._currency_codes
return np.array([codes[sid] for sid in sids])
def verify_frames_aligned(frames, calendar):
"""
Verify that DataFrames in ``frames`` have the same indexing scheme and are
aligned to ``calendar``.
Parameters
----------
frames : list[pd.DataFrame]
calendar : trading_calendars.TradingCalendar
Raises
------
ValueError
If frames have different indexes/columns, or if frame indexes do not
match a contiguous region of ``calendar``.
"""
indexes = [f.index for f in frames]
check_indexes_all_same(indexes, message="DataFrame indexes don't match:")
columns = [f.columns for f in frames]
check_indexes_all_same(columns, message="DataFrame columns don't match:")
start, end = indexes[0][[0, -1]]
cal_sessions = calendar.sessions_in_range(start, end)
check_indexes_all_same(
[indexes[0], cal_sessions],
"DataFrame index doesn't match {} calendar:".format(calendar.name),
)
| apache-2.0 | -1,749,134,559,772,530,400 | 30.547059 | 78 | 0.611039 | false |
suizokukan/dchars-fe | kshortcuts.py | 1 | 2929 | #!./python_link
# -*- coding: utf-8 -*-
################################################################################
# DChars-FE Copyright (C) 2008 Xavier Faure
# Contact: faure dot epistulam dot mihi dot scripsisti at orange dot fr
#
# This file is part of DChars-FE.
# DChars-FE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DChars-FE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with DChars-FE. If not, see <http://www.gnu.org/licenses/>.
################################################################################
"""
❏DChars-FE❏ kshortcuts.py
(keyboard) shortcuts
"""
################################################################################
class KeyboardShortcut(object):
"""
class KeyboardShortcut
Use this class to store two representations of shortcuts : the Qt one
and the "human readable" one.
"""
#///////////////////////////////////////////////////////////////////////////
def __init__(self, qstring, human_readeable_string):
"""
KeyboardShortcut.__init__
"""
self.qstring = qstring
self.human_readeable_string = human_readeable_string
KSHORTCUTS = {
"open" : \
KeyboardShortcut( qstring = "CTRL+O",
human_readeable_string = "CTRL+O" ),
"save as" : \
KeyboardShortcut( qstring = "CTRL+S",
human_readeable_string = "CTRL+S" ),
"exit" : \
KeyboardShortcut( qstring = "CTRL+Q",
human_readeable_string = "CTRL+Q" ),
"display help chars" : \
KeyboardShortcut( qstring = "CTRL+H",
human_readeable_string = "CTRL+H" ),
"apply" : \
KeyboardShortcut( qstring = "CTRL+SPACE",
human_readeable_string = "CTRL+SPACE" ),
"add trans" : \
KeyboardShortcut( qstring = "CTRL++",
human_readeable_string = "CTRL + '+'" ),
"sub trans" : \
KeyboardShortcut( qstring = "CTRL+-",
human_readeable_string = "CTRL + '-'" ),
}
| gpl-3.0 | -7,099,916,806,350,500,000 | 38.527027 | 82 | 0.454701 | false |
akniffe1/fsf | fsf-server/daemon.py | 1 | 3798 | #!/usr/bin/env python
#
# All credit for this class goes to Sander Marechal, 2009-05-31
# Reference: http://www.jejik.com/articles/2007/02/a_simple_unix_linux_daemon_in_python/
#
#
import sys, os, time, atexit
from signal import SIGTERM
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exists. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
| apache-2.0 | 6,438,743,217,424,489,000 | 27.343284 | 110 | 0.511848 | false |
immanetize/nikola | nikola/filters.py | 1 | 7187 | # -*- coding: utf-8 -*-
# Copyright © 2012-2015 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Utility functions to help you run filters on files."""
from .utils import req_missing
from functools import wraps
import os
import io
import shutil
import subprocess
import tempfile
import shlex
try:
import typogrify.filters as typo
except ImportError:
typo = None # NOQA
def apply_to_binary_file(f):
"""Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in binary mode."""
@wraps(f)
def f_in_file(fname):
with open(fname, 'rb') as inf:
data = inf.read()
data = f(data)
with open(fname, 'wb+') as outf:
outf.write(data)
return f_in_file
def apply_to_text_file(f):
"""Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in UTF-8."""
@wraps(f)
def f_in_file(fname):
with io.open(fname, 'r', encoding='utf-8') as inf:
data = inf.read()
data = f(data)
with io.open(fname, 'w+', encoding='utf-8') as outf:
outf.write(data)
return f_in_file
def list_replace(the_list, find, replacement):
"Replace all occurrences of ``find`` with ``replacement`` in ``the_list``"
for i, v in enumerate(the_list):
if v == find:
the_list[i] = replacement
def runinplace(command, infile):
"""Run a command in-place on a file.
command is a string of the form: "commandname %1 %2" and
it will be execed with infile as %1 and a temporary file
as %2. Then, that temporary file will be moved over %1.
Example usage:
runinplace("yui-compressor %1 -o %2", "myfile.css")
That will replace myfile.css with a minified version.
You can also supply command as a list.
"""
if not isinstance(command, list):
command = shlex.split(command)
tmpdir = None
if "%2" in command:
tmpdir = tempfile.mkdtemp(prefix="nikola")
tmpfname = os.path.join(tmpdir, os.path.basename(infile))
try:
list_replace(command, "%1", infile)
if tmpdir:
list_replace(command, "%2", tmpfname)
subprocess.check_call(command)
if tmpdir:
shutil.move(tmpfname, infile)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
def yui_compressor(infile):
yuicompressor = False
try:
subprocess.call('yui-compressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
yuicompressor = 'yui-compressor'
except Exception:
pass
if not yuicompressor:
try:
subprocess.call('yuicompressor', stdout=open(os.devnull, 'w'), stderr=open(os.devnull, 'w'))
yuicompressor = 'yuicompressor'
except:
raise Exception("yui-compressor is not installed.")
return False
return runinplace(r'{} --nomunge %1 -o %2'.format(yuicompressor), infile)
def closure_compiler(infile):
return runinplace(r'closure-compiler --warning_level QUIET --js %1 --js_output_file %2', infile)
def optipng(infile):
return runinplace(r"optipng -preserve -o2 -quiet %1", infile)
def jpegoptim(infile):
return runinplace(r"jpegoptim -p --strip-all -q %1", infile)
def html_tidy_nowrap(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_wrap(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_wrap_attr(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --tidy-mark no -modify %1")
def html_tidy_mini(infile):
return _html_tidy_runner(infile, r"-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no -modify %1")
def _html_tidy_runner(infile, options):
""" Warnings (returncode 1) are not critical, and *everything* is a warning """
try:
status = runinplace(r"tidy5 " + options, infile)
except subprocess.CalledProcessError as err:
status = 0 if err.returncode == 1 else err.returncode
return status
@apply_to_text_file
def minify_lines(data):
return data
@apply_to_text_file
def typogrify(data):
if typo is None:
req_missing(['typogrify'], 'use the typogrify filter')
data = typo.amp(data)
data = typo.widont(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
data = typo.initial_quotes(data)
return data
@apply_to_text_file
def typogrify_sans_widont(data):
# typogrify with widont disabled because it caused broken headline
# wrapping, see issue #1465
if typo is None:
req_missing(['typogrify'], 'use the typogrify_sans_widont filter')
data = typo.amp(data)
data = typo.smartypants(data)
# Disabled because of typogrify bug where it breaks <title>
# data = typo.caps(data)
data = typo.initial_quotes(data)
return data
@apply_to_text_file
def php_template_injection(data):
import re
template = re.search('<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data)
if template:
source = template.group(1)
with io.open(source, "r", encoding="utf-8") as in_file:
phpdata = in_file.read()
_META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]
phpdata = re.sub(template.group(0), phpdata, data)
return phpdata
else:
return data
| mit | -9,010,117,883,076,772,000 | 31.369369 | 198 | 0.660868 | false |
Arzaroth/python_rapidxml | tests/test_basic.py | 1 | 5638 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# File: simple.py
# by Arzaroth Lekva
# [email protected]
#
import os
import rapidxml
def test_unparse(init_rapidxml):
assert init_rapidxml.unparse() == ('<root><test attr1="one" attr2="two" attr3="three"/>'
'<test2><node id="1"/><node id="2"/><node id="3"/></test2>'
'<test>some text</test></root>')
assert init_rapidxml.unparse() == repr(init_rapidxml)
assert init_rapidxml.unparse(False, False) == repr(init_rapidxml)
assert init_rapidxml.unparse(raw=False) == repr(init_rapidxml)
assert init_rapidxml.unparse(pretty=False) == repr(init_rapidxml)
assert init_rapidxml.unparse(pretty=False, raw=False) == repr(init_rapidxml)
assert init_rapidxml.unparse(True) == str(init_rapidxml)
assert init_rapidxml.unparse(True, False) == str(init_rapidxml)
assert init_rapidxml.unparse(pretty=True) == str(init_rapidxml)
assert init_rapidxml.unparse(pretty=True, raw=False) == str(init_rapidxml)
assert init_rapidxml.unparse(True, raw=False) == str(init_rapidxml)
def test_parse(init_rapidxml):
r = rapidxml.RapidXml()
try:
data = init_rapidxml.unparse().encode('utf-8')
except UnicodeDecodeError:
data = init_rapidxml.unparse()
r.parse(data)
assert str(r) == str(init_rapidxml)
def test_parse_from_file(init_rapidxml, tmpdir):
f = tmpdir.join("dump.xml")
f.write(init_rapidxml.unparse())
r = rapidxml.RapidXml(str(f), from_file=True)
assert str(r) == str(init_rapidxml)
def test_equals(init_rapidxml):
assert init_rapidxml == init_rapidxml
root = init_rapidxml.first_node()
assert root == root
assert root == init_rapidxml.first_node()
assert root.first_node() != root.first_node("test2")
assert (root != root) == (not (root == root))
def test_parent(init_rapidxml):
assert init_rapidxml.parent is None
assert init_rapidxml.first_node().parent == init_rapidxml
def test_assign(init_rapidxml):
root = init_rapidxml.first_node()
root.name = "new_root"
assert root.name == "new_root"
test = root.first_node()
test.name = "new_test"
test.first_attribute().name = "new_attr1"
test.first_attribute().next_attribute().value = "new_two"
test = root.first_node("test")
test.value = "some new text"
assert test.value == "some new text"
assert init_rapidxml.unparse() == ('<new_root><new_test new_attr1="one" attr2="new_two" attr3="three"/>'
'<test2><node id="1"/><node id="2"/><node id="3"/></test2>'
'<test>some new text</test></new_root>')
def test_init_cdata(init_rapidxml_with_CDADA):
datra_str =('<root><test attr1="one" attr2="two" attr3="three"/>'
'<test2><node id="1"/><node id="2"/><node id="3"/></test2>'
'<test>some text</test>'
"<ns2:AdditionalData><ns2:Data TID=\"AD_1\">"
"<![CDATA[{\"Cart\":{\"expirationTime\":\"2017-04-22T09:40\","
"\"id\":\"b469df3b-f626-4fe3-898c-825373e546a2\",\"products\":[\"1223\"],"
"\"creationTime\":\"2017-04-21T09:40\",\"totalPrice\":"
"{\"currencyCode\":\"EUR\",\"amount\":\"138.000\"}}}]]>"
"</ns2:Data></ns2:AdditionalData></root>")
assert init_rapidxml_with_CDADA.unparse() == rapidxml.RapidXml(datra_str,
from_file=False,
attribute_prefix='@',
cdata_key='#text',
always_aslist=False,
parse_cdata=True).unparse()
assert init_rapidxml_with_CDADA.unparse() == repr(init_rapidxml_with_CDADA)
assert init_rapidxml_with_CDADA.unparse(True) == str(init_rapidxml_with_CDADA)
def test_parse_cdata(init_rapidxml_with_CDADA):
r = rapidxml.RapidXml()
try:
data = init_rapidxml_with_CDADA.unparse().encode('utf-8')
except UnicodeDecodeError:
data = init_rapidxml_with_CDADA.unparse()
r.parse(data, from_file=False, parse_cdata=True)
assert str(r) == str(init_rapidxml_with_CDADA)
def test_parse_from_file_cdata(init_rapidxml_with_CDADA, tmpdir):
f = tmpdir.join("dump.xml")
f.write(init_rapidxml_with_CDADA.unparse())
r = rapidxml.RapidXml(str(f), from_file=True, parse_cdata=True)
assert str(r) == str(init_rapidxml_with_CDADA)
def test_equals_cdata(init_rapidxml_with_CDADA):
assert init_rapidxml_with_CDADA == init_rapidxml_with_CDADA
root = init_rapidxml_with_CDADA.first_node()
assert root == root
assert root == init_rapidxml_with_CDADA.first_node()
assert root.first_node() != root.first_node("test2")
assert (root != root) == (not (root == root))
def test_parent_cdata(init_rapidxml_with_CDADA):
assert init_rapidxml_with_CDADA.parent is None
assert init_rapidxml_with_CDADA.first_node().parent == init_rapidxml_with_CDADA
def test_assign_cdata(init_rapidxml_with_CDADA):
root = init_rapidxml_with_CDADA.first_node()
root.name = "new_root"
assert root.name == "new_root"
test = root.first_node()
test.name = "new_test"
test.first_attribute().name = "new_attr1"
test.first_attribute().next_attribute().value = "new_two"
test = root.first_node("test")
test.value = "some new text"
assert test.value == "some new text"
| mit | 6,300,679,868,644,704,000 | 44.104 | 108 | 0.595956 | false |
henrysher/duplicity | setup.py | 1 | 7472 | #!/usr/bin/env python2
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <[email protected]>
# Copyright 2007 Kenneth Loafman <[email protected]>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sys
import os
from setuptools import setup, Extension
from setuptools.command.test import test
from setuptools.command.install import install
from setuptools.command.sdist import sdist
from distutils.command.build_scripts import build_scripts
version_string = "$version"
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] > (2, 7):
print("Sorry, duplicity requires version 2.7 of python.")
sys.exit(1)
incdir_list = libdir_list = None
if os.name == 'posix':
LIBRSYNC_DIR = os.environ.get('LIBRSYNC_DIR', '')
args = sys.argv[:]
for arg in args:
if arg.startswith('--librsync-dir='):
LIBRSYNC_DIR = arg.split('=')[1]
sys.argv.remove(arg)
if LIBRSYNC_DIR:
incdir_list = [os.path.join(LIBRSYNC_DIR, 'include')]
libdir_list = [os.path.join(LIBRSYNC_DIR, 'lib')]
data_files = [('share/man/man1',
['bin/duplicity.1',
'bin/rdiffdir.1']),
('share/doc/duplicity-%s' % version_string,
['COPYING',
'README',
'README-REPO',
'README-LOG',
'CHANGELOG']),
]
top_dir = os.path.dirname(os.path.abspath(__file__))
assert os.path.exists(os.path.join(top_dir, "po")), "Missing 'po' directory."
for root, dirs, files in os.walk(os.path.join(top_dir, "po")):
for file in files:
path = os.path.join(root, file)
if path.endswith("duplicity.mo"):
lang = os.path.split(root)[-1]
data_files.append(
('share/locale/%s/LC_MESSAGES' % lang,
["po/%s/duplicity.mo" % lang]))
if not os.environ.get('READTHEDOCS') == 'True':
ext_modules=[Extension("duplicity._librsync",
["duplicity/_librsyncmodule.c"],
include_dirs=incdir_list,
library_dirs=libdir_list,
libraries=["rsync"])]
else:
ext_modules = []
class TestCommand(test):
def run(self):
# Make sure all modules are ready
build_cmd = self.get_finalized_command("build_py")
build_cmd.run()
# And make sure our scripts are ready
build_scripts_cmd = self.get_finalized_command("build_scripts")
build_scripts_cmd.run()
# make symlinks for test data
if build_cmd.build_lib != top_dir:
for path in ['testfiles.tar.gz', 'gnupg']:
src = os.path.join(top_dir, 'testing', path)
target = os.path.join(build_cmd.build_lib, 'testing', path)
try:
os.symlink(src, target)
except Exception:
pass
os.environ['PATH'] = "%s:%s" % (
os.path.abspath(build_scripts_cmd.build_dir),
os.environ.get('PATH'))
test.run(self)
class InstallCommand(install):
def run(self):
# Normally, install will call build(). But we want to delete the
# testing dir between building and installing. So we manually build
# and mark ourselves to skip building when we run() for real.
self.run_command('build')
self.skip_build = True
# This should always be true, but just to make sure!
if self.build_lib != top_dir:
testing_dir = os.path.join(self.build_lib, 'testing')
os.system("rm -rf %s" % testing_dir)
install.run(self)
# TODO: move logic from dist/makedist inline
class SDistCommand(sdist):
def run(self):
version = version_string
if version[0] == '$':
version = "0.0dev"
os.system(os.path.join(top_dir, "dist", "makedist") + " " + version)
os.system("mkdir -p " + self.dist_dir)
os.system("mv duplicity-" + version + ".tar.gz " + self.dist_dir)
# don't touch my shebang
class BSCommand (build_scripts):
def run(self):
"""
Copy, chmod each script listed in 'self.scripts'
essentially this is the stripped
distutils.command.build_scripts.copy_scripts()
routine
"""
from stat import ST_MODE
from distutils.dep_util import newer
from distutils import log
self.mkpath(self.build_dir)
outfiles = []
for script in self.scripts:
outfile = os.path.join(self.build_dir, os.path.basename(script))
outfiles.append(outfile)
if not self.force and not newer(script, outfile):
log.debug("not copying %s (up-to-date)", script)
continue
log.info("copying and NOT adjusting %s -> %s", script,
self.build_dir)
self.copy_file(script, outfile)
if os.name == 'posix':
for file in outfiles:
if self.dry_run:
log.info("changing mode of %s", file)
else:
oldmode = os.stat(file)[ST_MODE] & 0o7777
newmode = (oldmode | 0o555) & 0o7777
if newmode != oldmode:
log.info("changing mode of %s from %o to %o",
file, oldmode, newmode)
os.chmod(file, newmode)
setup(name="duplicity",
version=version_string,
description="Encrypted backup using rsync algorithm",
author="Ben Escoto <[email protected]>",
author_email="[email protected]",
maintainer="Kenneth Loafman <[email protected]>",
maintainer_email="[email protected]",
url="http://duplicity.nongnu.org/index.html",
packages=['duplicity',
'duplicity.backends',
'duplicity.backends.pyrax_identity',
'testing',
'testing.functional',
'testing.overrides',
'testing.unit'],
package_dir={"duplicity": "duplicity",
"duplicity.backends": "duplicity/backends", },
ext_modules=ext_modules,
scripts=['bin/rdiffdir', 'bin/duplicity'],
data_files=data_files,
setup_requires=['pytest-runner'],
install_requires=['fasteners', 'future'],
tests_require=['pytest','fasteners', 'mock', 'pexpect'],
test_suite='testing',
cmdclass={'test': TestCommand,
'install': InstallCommand,
'sdist': SDistCommand,
'build_scripts': BSCommand},
classifiers=["Programming Language :: Python :: 2 :: Only",
"Programming Language :: Python :: 2.7"]
)
| gpl-2.0 | 734,994,695,471,197,000 | 34.751196 | 77 | 0.578292 | false |
OpenNetworkingFoundation/PIF-Open-Intermediate-Representation | pif_ir/bir/tests/test_common.py | 1 | 1166 | # single BIRStruct description
yaml_eth_struct_dict = {
'type' : 'struct',
'fields' : [
{'dst' : 48},
{'src' : 48},
{'type_' : 16}
]
}
yaml_udp_struct_dict = {
'type' : 'struct',
'fields' : [
{'sport' : 16},
{'dport' : 16},
{'len' : 16},
{'chksum' : 16}
]
}
yaml_req_struct_dict = {
'type' : 'struct',
'fields' : [
{'type_' : 16}
]
}
yaml_resp_struct_dict = {
'type' : 'struct',
'fields' : [
{'hit' : 1},
{'p4_action' : 2},
{'action_0_arg0' : 16},
{'action_1_arg0' : 16}
]
}
# single MetadataInstance description
yaml_eth_meta_dict = {
'type' : 'metadata',
'values' : 'eth_t',
'visibility' : 'inout'
}
yaml_req_meta_dict = {
'type' : 'metadata',
'values' : 'req_t',
'visibility' : 'inout'
}
yaml_resp_meta_dict = {
'type' : 'metadata',
'values' : 'resp_t',
'visibility' : 'inout'
}
# single Table description
yaml_table_dict = {
'type' : 'table',
'match_type' : 'ternary',
'depth' : 64,
'request' : 'req_t',
'response' : 'resp_t',
'operations' : None
}
| apache-2.0 | -2,003,529,813,339,534,800 | 17.21875 | 37 | 0.465695 | false |
specter119/custodian | custodian/feff/handlers.py | 1 | 4398 | # coding: utf-8
from __future__ import unicode_literals, division
from custodian.custodian import ErrorHandler
import re
from custodian.utils import backup
from pymatgen.io.feff.sets import FEFFDictSet
from custodian.feff.interpreter import FeffModder
import logging
""" This module implements specific error handler for FEFF runs. """
__author__ = "Chen Zheng"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Chen Zheng"
__email__ = "[email protected]"
__date__ = "Oct 18, 2017"
FEFF_BACKUP_FILES = ["ATOMS", "HEADER", "PARAMETERS", "POTENTIALS", "feff.inp", "*.cif", "pot.bin"]
logger = logging.getLogger(__name__)
class UnconvergedErrorHandler(ErrorHandler):
"""
Correct the unconverged error of FEFF's SCF calculation.
"""
is_monitor = False
def __init__(self, output_filename='log1.dat'):
"""
Initializes the handler with the output file to check
Args:
output_filename (str): Filename for the log1.dat file. log1.dat file
contains the SCF calculation convergence information. Change this only
if it is different from the default (unlikely).
"""
self.output_filename = output_filename
def check(self):
"""
If the FEFF run does not converge, the check will return
"TRUE"
"""
return self._notconverge_check()
def _notconverge_check(self):
# Process the output file and get converge information
not_converge_pattern = re.compile("Convergence not reached.*")
converge_pattern = re.compile('Convergence reached.*')
for _, line in enumerate(open(self.output_filename)):
if len(not_converge_pattern.findall(line)) > 0:
return True
elif len(converge_pattern.findall(line)) > 0:
return False
def correct(self):
backup(FEFF_BACKUP_FILES)
feff_input = FEFFDictSet.from_directory(".")
scf_values = feff_input.tags.get("SCF")
nscmt = scf_values[2]
ca = scf_values[3]
nmix = scf_values[4]
actions = []
#Add RESTART card to PARAMETERS
if not "RESTART" in feff_input.tags:
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"RESTART": []}}})
if nscmt < 100 and ca == 0.2:
scf_values[2] = 100
scf_values[4] = 3 # Set nmix = 3
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nscmt == 100 and nmix == 3 and ca > 0.01:
# Reduce the convergence accelerator factor
scf_values[3] = round(ca / 2, 2)
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nmix == 3 and ca == 0.01:
# Set ca = 0.05 and set nmix
scf_values[3] = 0.05
scf_values[4] = 5
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nmix == 5 and ca == 0.05:
# Set ca = 0.05 and set nmix
scf_values[3] = 0.05
scf_values[4] = 10
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
elif nmix == 10 and ca < 0.2:
# loop through ca with nmix = 10
scf_values[3] = round(ca * 2, 2)
actions.append({"dict": "PARAMETERS",
"action": {"_set": {"SCF": scf_values}}})
FeffModder().apply_actions(actions)
return {"errors": ["Non-converging job"], "actions": actions}
# Unfixable error. Just return None for actions.
else:
return {"errors": ["Non-converging job"], "actions": None}
| mit | -3,734,159,296,317,997,000 | 35.65 | 99 | 0.555707 | false |
ActiveState/code | recipes/Python/271607_fiber_scheduler/recipe-271607.py | 1 | 5269 | import sys, select, time, socket, traceback
class SEND:
def __init__( self, sock, timeout ):
self.fileno = sock.fileno()
self.expire = time.time() + timeout
def __str__( self ):
return 'SEND(%i,%s)' % ( self.fileno, time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class RECV:
def __init__( self, sock, timeout ):
self.fileno = sock.fileno()
self.expire = time.time() + timeout
def __str__( self ):
return 'RECV(%i,%s)' % ( self.fileno, time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class WAIT:
def __init__( self, timeout = None ):
self.expire = timeout and time.time() + timeout or None
def __str__( self ):
return 'WAIT(%s)' % ( self.expire and time.strftime( '%H:%M:%S', time.localtime( self.expire ) ) )
class Fiber:
def __init__( self, generator ):
self.__generator = generator
self.state = WAIT()
def step( self, throw=None ):
self.state = None
try:
if throw:
assert hasattr( self.__generator, 'throw' ), throw
self.__generator.throw( AssertionError, throw )
state = self.__generator.next()
assert isinstance( state, (SEND, RECV, WAIT) ), 'invalid waiting state %r' % state
self.state = state
except KeyboardInterrupt:
raise
except StopIteration:
del self.__generator
pass
except AssertionError, msg:
print 'Error:', msg
except:
traceback.print_exc()
def __repr__( self ):
return '%i: %s' % ( self.__generator.gi_frame.f_lineno, self.state )
class GatherFiber( Fiber ):
def __init__( self, generator ):
Fiber.__init__( self, generator )
self.__chunks = [ '[ 0.00 ] %s\n' % time.ctime() ]
self.__start = time.time()
self.__newline = True
def step( self, throw=None ):
stdout = sys.stdout
stderr = sys.stderr
try:
sys.stdout = sys.stderr = self
Fiber.step( self, throw )
finally:
sys.stdout = stdout
sys.stderr = stderr
def write( self, string ):
if self.__newline:
self.__chunks.append( '%6.2f ' % ( time.time() - self.__start ) )
self.__chunks.append( string )
self.__newline = string.endswith( '\n' )
def __del__( self ):
sys.stdout.writelines( self.__chunks )
if not self.__newline:
sys.stdout.write( '\n' )
class DebugFiber( Fiber ):
id = 0
def __init__( self, generator ):
Fiber.__init__( self, generator )
self.__id = DebugFiber.id
sys.stdout.write( '[ %04X ] %s\n' % ( self.__id, time.ctime() ) )
self.__newline = True
self.__stdout = sys.stdout
DebugFiber.id = ( self.id + 1 ) % 65535
def step( self, throw=None ):
stdout = sys.stdout
stderr = sys.stderr
try:
sys.stdout = sys.stderr = self
Fiber.step( self, throw )
if self.state:
print 'Waiting at', self
finally:
sys.stdout = stdout
sys.stderr = stderr
def write( self, string ):
if self.__newline:
self.__stdout.write( ' %04X ' % self.__id )
self.__stdout.write( string )
self.__newline = string.endswith( '\n' )
def spawn( generator, port, debug ):
try:
listener = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
listener.setblocking( 0 )
listener.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, listener.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR ) | 1 )
listener.bind( ( '', port ) )
listener.listen( 5 )
except Exception, e:
print 'error: failed to create socket:', e
return False
if debug:
myFiber = DebugFiber
else:
myFiber = GatherFiber
print ' .... Server started'
try:
fibers = []
while True:
tryrecv = { listener.fileno(): None }
trysend = {}
expire = None
now = time.time()
i = len( fibers )
while i:
i -= 1
state = fibers[ i ].state
if state and now > state.expire:
if isinstance( state, WAIT ):
fibers[ i ].step()
else:
fibers[ i ].step( throw='connection timed out' )
state = fibers[ i ].state
if not state:
del fibers[ i ]
continue
if isinstance( state, RECV ):
tryrecv[ state.fileno ] = fibers[ i ]
elif isinstance( state, SEND ):
trysend[ state.fileno ] = fibers[ i ]
elif state.expire is None:
continue
if state.expire < expire or expire is None:
expire = state.expire
if expire is None:
print '[ IDLE ]', time.ctime()
sys.stdout.flush()
canrecv, cansend, dummy = select.select( tryrecv, trysend, [] )
print '[ BUSY ]', time.ctime()
sys.stdout.flush()
else:
canrecv, cansend, dummy = select.select( tryrecv, trysend, [], max( expire - now, 0 ) )
for fileno in canrecv:
if fileno is listener.fileno():
fibers.append( myFiber( generator( *listener.accept() ) ) )
else:
tryrecv[ fileno ].step()
for fileno in cansend:
trysend[ fileno ].step()
except KeyboardInterrupt:
print ' .... Server terminated'
return True
except:
print ' .... Server crashed'
traceback.print_exc( file=sys.stdout )
return False
| mit | -389,809,782,214,965,100 | 23.281106 | 132 | 0.571076 | false |
DeanThompson/pyelong | pyelong/request.py | 1 | 6017 | # -*- coding: utf-8 -*-
import hashlib
import json
import time
import urllib
import requests
from requests import RequestException, ConnectionError, Timeout
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from .api import ApiSpec
from .exceptions import ElongException, ElongAPIError, \
RetryableException, RetryableAPIError
from .response import RequestsResponse, TornadoResponse, logger
from .util.retry import retry_on_error, is_retryable
class Request(object):
def __init__(self, client,
host=ApiSpec.host,
version=ApiSpec.version,
local=ApiSpec.local):
self.client = client
self.verify_ssl = self.client.cert is not None
self.host = host
self.version = version
self.local = local
def do(self, api, params, https, raw=False):
raise NotImplementedError()
def prepare(self, api, params, https, raw):
timestamp = str(int(time.time()))
data = self.build_data(params, raw)
scheme = 'https' if https else 'http'
url = "%s://%s" % (scheme, self.host)
params = {
'method': api,
'user': self.client.user,
'timestamp': timestamp,
'data': data,
'signature': self.signature(data, timestamp),
'format': 'json'
}
return url, params
def build_data(self, params, raw=False):
if not raw:
data = {
'Version': self.version,
'Local': self.local,
'Request': params
}
else:
data = params
return json.dumps(data, separators=(',', ':'))
def signature(self, data, timestamp):
s = self._md5(data + self.client.app_key)
return self._md5("%s%s%s" % (timestamp, s, self.client.secret_key))
@staticmethod
def _md5(data):
return hashlib.md5(data.encode('utf-8')).hexdigest()
def check_response(self, resp):
if not resp.ok and self.client.raise_api_error:
# logger.error('pyelong calling api failed, url: %s', resp.url)
if is_retryable(resp.code):
raise RetryableAPIError(resp.code, resp.error)
raise ElongAPIError(resp.code, resp.error)
return resp
def timing(self, api, delta):
if self.client.statsd_client and \
hasattr(self.client.statsd_client, 'timing'):
self.client.statsd_client.timing(api, delta)
class SyncRequest(Request):
@property
def session(self):
if not hasattr(self, '_session') or not self._session:
self._session = requests.Session()
if self.client.proxy_host and self.client.proxy_port:
p = '%s:%s' % (self.client.proxy_host, self.client.proxy_port)
self._session.proxies = {'http': p, 'https': p}
return self._session
@retry_on_error(retry_api_error=True)
def do(self, api, params, https, raw=False):
url, params = self.prepare(api, params, https, raw)
try:
result = self.session.get(url=url,
params=params,
verify=self.verify_ssl,
cert=self.client.cert)
except (ConnectionError, Timeout) as e:
logger.exception('pyelong catches ConnectionError or Timeout, '
'url: %s, params: %s', url, params)
raise RetryableException('ConnectionError or Timeout: %s' % e)
except RequestException as e:
logger.exception('pyelong catches RequestException, url: %s,'
' params: %s', url, params)
raise ElongException('RequestException: %s' % e)
except Exception as e:
logger.exception('pyelong catches unknown exception, url: %s, '
'params: %s', url, params)
raise ElongException('unknown exception: %s' % e)
resp = RequestsResponse(result)
self.timing(api, resp.request_time)
return self.check_response(resp)
class AsyncRequest(Request):
@property
def proxy_config(self):
if not getattr(self, '_proxy_config', None):
if self.client.proxy_host and self.client.proxy_port:
self._proxy_config = {
'proxy_host': self.client.proxy_host,
'proxy_port': self.client.proxy_port
}
else:
self._proxy_config = {}
return self._proxy_config
@staticmethod
def _encode_params(data):
"""
:param dict data: params
Taken from requests.models.RequestEncodingMixin._encode_params
"""
result = []
for k, vs in data.iteritems():
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urllib.urlencode(result, doseq=True)
def _prepare_url(self, url, params):
if url.endswith('/'):
url = url.strip('/')
return '%s?%s' % (url, self._encode_params(params))
@gen.coroutine
def do(self, api, params, https, raw=False):
url, params = self.prepare(api, params, https, raw)
# use the default SimpleAsyncHTTPClient
resp = yield AsyncHTTPClient().fetch(self._prepare_url(url, params),
validate_cert=self.verify_ssl,
ca_certs=self.client.cert,
**self.proxy_config)
resp = TornadoResponse(resp)
self.timing(api, resp.request_time)
raise gen.Return(self.check_response(resp))
| mit | -2,665,918,147,271,490,000 | 35.466667 | 78 | 0.553266 | false |
lordmos/blink | Source/bindings/scripts/unstable/idl_compiler.py | 1 | 5668 | #!/usr/bin/python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compile an .idl file to Blink V8 bindings (.h and .cpp files).
FIXME: Not currently used in build.
This is a rewrite of the Perl IDL compiler in Python, but is not complete.
Once it is complete, we will switch all IDL files over to Python at once.
Until then, please work on the Perl IDL compiler.
For details, see bug http://crbug.com/239771
"""
import optparse
import os
import pickle
import posixpath
import shlex
import sys
import code_generator_v8
import idl_reader
module_path, _ = os.path.split(__file__)
source_path = os.path.normpath(os.path.join(module_path, os.pardir, os.pardir, os.pardir))
def parse_options():
parser = optparse.OptionParser()
parser.add_option('--additional-idl-files')
# FIXME: The --dump-json-and-pickle option is only for debugging and will
# be removed once we complete migrating all IDL files from the Perl flow to
# the Python flow.
parser.add_option('--dump-json-and-pickle', action='store_true', default=False)
parser.add_option('--idl-attributes-file')
parser.add_option('--include', dest='idl_directories', action='append')
parser.add_option('--output-directory')
parser.add_option('--interface-dependencies-file')
parser.add_option('--verbose', action='store_true', default=False)
parser.add_option('--write-file-only-if-changed', type='int')
# ensure output comes last, so command line easy to parse via regexes
parser.disable_interspersed_args()
options, args = parser.parse_args()
if options.output_directory is None:
parser.error('Must specify output directory using --output-directory.')
if options.additional_idl_files is None:
options.additional_idl_files = []
else:
# additional_idl_files is passed as a string with varied (shell-style)
# quoting, hence needs parsing.
options.additional_idl_files = shlex.split(options.additional_idl_files)
if len(args) != 1:
parser.error('Must specify exactly 1 input file as argument, but %d given.' % len(args))
options.idl_filename = os.path.realpath(args[0])
return options
def get_relative_dir_posix(filename):
"""Returns directory of a local file relative to Source, in POSIX format."""
relative_path_local = os.path.relpath(filename, source_path)
relative_dir_local = os.path.dirname(relative_path_local)
return relative_dir_local.replace(os.path.sep, posixpath.sep)
def write_json_and_pickle(definitions, interface_name, output_directory):
json_string = definitions.to_json()
json_basename = interface_name + '.json'
json_filename = os.path.join(output_directory, json_basename)
with open(json_filename, 'w') as json_file:
json_file.write(json_string)
pickle_basename = interface_name + '.pkl'
pickle_filename = os.path.join(output_directory, pickle_basename)
with open(pickle_filename, 'wb') as pickle_file:
pickle.dump(definitions, pickle_file)
def main():
options = parse_options()
idl_filename = options.idl_filename
basename = os.path.basename(idl_filename)
interface_name, _ = os.path.splitext(basename)
output_directory = options.output_directory
verbose = options.verbose
if verbose:
print idl_filename
relative_dir_posix = get_relative_dir_posix(idl_filename)
reader = idl_reader.IdlReader(options.interface_dependencies_file, options.additional_idl_files, options.idl_attributes_file, output_directory, verbose)
definitions = reader.read_idl_definitions(idl_filename)
code_generator = code_generator_v8.CodeGeneratorV8(definitions, interface_name, options.output_directory, relative_dir_posix, options.idl_directories, verbose)
if not definitions:
# We generate dummy .h and .cpp files just to tell build scripts
# that outputs have been created.
code_generator.write_dummy_header_and_cpp()
return
if options.dump_json_and_pickle:
write_json_and_pickle(definitions, interface_name, output_directory)
return
code_generator.write_header_and_cpp()
if __name__ == '__main__':
sys.exit(main())
| mit | 4,028,016,241,521,882,600 | 42.937984 | 163 | 0.728652 | false |
zhlinh/leetcode | 0173.Binary Search Tree Iterator/test.py | 1 | 1230 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import TreeNode
from solution import BSTIterator
def constructOne(s):
s = s.strip()
if s == '#':
return None
else:
return TreeNode(int(s))
def createTree(tree):
q = []
tree = tree.split(",")
root = constructOne(tree[0]);
q.append(root);
idx = 1;
while q:
tn = q.pop(0)
if not tn:
continue
if idx == len(tree):
break
left = constructOne(tree[idx])
tn.left = left
q.append(left)
idx += 1
if idx == len(tree):
break
right = constructOne(tree[idx])
idx += 1
tn.right = right
q.append(right)
return root
def printNode(tn, indent):
sb = ""
for i in range(indent):
sb += "\t"
sb += str(tn.val)
print(sb)
def printTree(root, indent):
if not root:
return
printTree(root.right, indent + 1)
printNode(root, indent)
printTree(root.left, indent + 1)
# root = createTree("1, 2, 5, 3, 4, #, 6")
root = createTree("4, 3, 5, 2, #, #, 7")
i, v = BSTIterator(root), []
while i.hasNext():
v.append(i.next())
for node in v:
print(node.val)
| apache-2.0 | -6,259,950,088,596,226,000 | 20.206897 | 43 | 0.523577 | false |
spl0k/supysonic | tests/base/test_cache.py | 1 | 8007 | # This file is part of Supysonic.
# Supysonic is a Python implementation of the Subsonic server API.
#
# Copyright (C) 2018 Alban 'spl0k' Féron
# 2018-2019 Carey 'pR0Ps' Metcalfe
#
# Distributed under terms of the GNU AGPLv3 license.
import os
import unittest
import shutil
import time
import tempfile
from supysonic.cache import Cache, CacheMiss, ProtectedError
class CacheTestCase(unittest.TestCase):
def setUp(self):
self.__dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.__dir)
def test_existing_files_order(self):
cache = Cache(self.__dir, 30)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
cache.set("key3", val)
self.assertEqual(cache.size, 30)
# file mtime is accurate to the second
time.sleep(1)
cache.get_value("key1")
cache = Cache(self.__dir, 30, min_time=0)
self.assertEqual(cache.size, 30)
self.assertTrue(cache.has("key1"))
self.assertTrue(cache.has("key2"))
self.assertTrue(cache.has("key3"))
cache.set("key4", val)
self.assertEqual(cache.size, 30)
self.assertTrue(cache.has("key1"))
self.assertFalse(cache.has("key2"))
self.assertTrue(cache.has("key3"))
self.assertTrue(cache.has("key4"))
def test_missing(self):
cache = Cache(self.__dir, 10)
self.assertFalse(cache.has("missing"))
with self.assertRaises(CacheMiss):
cache.get_value("missing")
def test_delete_missing(self):
cache = Cache(self.__dir, 0, min_time=0)
cache.delete("missing1")
cache.delete("missing2")
def test_store_literal(self):
cache = Cache(self.__dir, 10)
val = b"0123456789"
cache.set("key", val)
self.assertEqual(cache.size, 10)
self.assertTrue(cache.has("key"))
self.assertEqual(cache.get_value("key"), val)
def test_store_generated(self):
cache = Cache(self.__dir, 10)
val = [b"0", b"12", b"345", b"6789"]
def gen():
yield from val
t = []
for x in cache.set_generated("key", gen):
t.append(x)
self.assertEqual(cache.size, 0)
self.assertFalse(cache.has("key"))
self.assertEqual(t, val)
self.assertEqual(cache.size, 10)
self.assertEqual(cache.get_value("key"), b"".join(val))
def test_store_to_fp(self):
cache = Cache(self.__dir, 10)
val = b"0123456789"
with cache.set_fileobj("key") as fp:
fp.write(val)
self.assertEqual(cache.size, 0)
self.assertEqual(cache.size, 10)
self.assertEqual(cache.get_value("key"), val)
def test_access_data(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key", val)
self.assertEqual(cache.get_value("key"), val)
with cache.get_fileobj("key") as f:
self.assertEqual(f.read(), val)
with open(cache.get("key"), "rb") as f:
self.assertEqual(f.read(), val)
def test_accessing_preserves(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
self.assertEqual(cache.size, 20)
cache.get_value("key1")
cache.set("key3", val)
self.assertEqual(cache.size, 20)
self.assertTrue(cache.has("key1"))
self.assertFalse(cache.has("key2"))
self.assertTrue(cache.has("key3"))
def test_automatic_delete_oldest(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key1", val)
self.assertTrue(cache.has("key1"))
self.assertEqual(cache.size, 10)
cache.set("key2", val)
self.assertEqual(cache.size, 20)
self.assertTrue(cache.has("key1"))
self.assertTrue(cache.has("key2"))
cache.set("key3", val)
self.assertEqual(cache.size, 20)
self.assertFalse(cache.has("key1"))
self.assertTrue(cache.has("key2"))
self.assertTrue(cache.has("key3"))
def test_delete(self):
cache = Cache(self.__dir, 25, min_time=0)
val = b"0123456789"
cache.set("key1", val)
self.assertTrue(cache.has("key1"))
self.assertEqual(cache.size, 10)
cache.delete("key1")
self.assertFalse(cache.has("key1"))
self.assertEqual(cache.size, 0)
def test_cleanup_on_error(self):
cache = Cache(self.__dir, 10)
def gen():
# Cause a TypeError halfway through
yield from [b"0", b"12", object(), b"345", b"6789"]
with self.assertRaises(TypeError):
for x in cache.set_generated("key", gen):
pass
# Make sure no partial files are left after the error
self.assertEqual(list(os.listdir(self.__dir)), list())
def test_parallel_generation(self):
cache = Cache(self.__dir, 20)
def gen():
yield from [b"0", b"12", b"345", b"6789"]
g1 = cache.set_generated("key", gen)
g2 = cache.set_generated("key", gen)
next(g1)
files = os.listdir(self.__dir)
self.assertEqual(len(files), 1)
for x in files:
self.assertTrue(x.endswith(".part"))
next(g2)
files = os.listdir(self.__dir)
self.assertEqual(len(files), 2)
for x in files:
self.assertTrue(x.endswith(".part"))
self.assertEqual(cache.size, 0)
for x in g1:
pass
self.assertEqual(cache.size, 10)
self.assertTrue(cache.has("key"))
# Replace the file - size should stay the same
for x in g2:
pass
self.assertEqual(cache.size, 10)
self.assertTrue(cache.has("key"))
# Only a single file
self.assertEqual(len(os.listdir(self.__dir)), 1)
def test_replace(self):
cache = Cache(self.__dir, 20)
val_small = b"0"
val_big = b"0123456789"
cache.set("key", val_small)
self.assertEqual(cache.size, 1)
cache.set("key", val_big)
self.assertEqual(cache.size, 10)
cache.set("key", val_small)
self.assertEqual(cache.size, 1)
def test_no_auto_prune(self):
cache = Cache(self.__dir, 10, min_time=0, auto_prune=False)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
cache.set("key3", val)
cache.set("key4", val)
self.assertEqual(cache.size, 40)
cache.prune()
self.assertEqual(cache.size, 10)
def test_min_time_clear(self):
cache = Cache(self.__dir, 40, min_time=1)
val = b"0123456789"
cache.set("key1", val)
cache.set("key2", val)
time.sleep(1)
cache.set("key3", val)
cache.set("key4", val)
self.assertEqual(cache.size, 40)
cache.clear()
self.assertEqual(cache.size, 20)
time.sleep(1)
cache.clear()
self.assertEqual(cache.size, 0)
def test_not_expired(self):
cache = Cache(self.__dir, 40, min_time=1)
val = b"0123456789"
cache.set("key1", val)
with self.assertRaises(ProtectedError):
cache.delete("key1")
time.sleep(1)
cache.delete("key1")
self.assertEqual(cache.size, 0)
def test_missing_cache_file(self):
cache = Cache(self.__dir, 10, min_time=0)
val = b"0123456789"
os.remove(cache.set("key", val))
self.assertEqual(cache.size, 10)
self.assertFalse(cache.has("key"))
self.assertEqual(cache.size, 0)
os.remove(cache.set("key", val))
self.assertEqual(cache.size, 10)
with self.assertRaises(CacheMiss):
cache.get("key")
self.assertEqual(cache.size, 0)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 | 539,294,049,317,192,400 | 28.112727 | 67 | 0.569323 | false |
lxml/lxml | src/lxml/tests/test_xslt.py | 1 | 69992 | # -*- coding: utf-8 -*-
"""
Test cases related to XSLT processing
"""
from __future__ import absolute_import
import io
import sys
import copy
import gzip
import os.path
import unittest
import contextlib
from textwrap import dedent
from tempfile import NamedTemporaryFile, mkdtemp
is_python3 = sys.version_info[0] >= 3
try:
unicode
except NameError: # Python 3
unicode = str
try:
basestring
except NameError: # Python 3
basestring = str
from .common_imports import (
etree, BytesIO, HelperTestCase, fileInTestDir, _bytes, make_doctest, skipif
)
class ETreeXSLTTestCase(HelperTestCase):
"""XSLT tests etree"""
def test_xslt(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
def test_xslt_elementtree_error(self):
self.assertRaises(ValueError, etree.XSLT, etree.ElementTree())
def test_xslt_input_none(self):
self.assertRaises(TypeError, etree.XSLT, None)
def test_xslt_invalid_stylesheet(self):
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:stylesheet />
</xsl:stylesheet>''')
self.assertRaises(
etree.XSLTParseError, etree.XSLT, style)
def test_xslt_copy(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
transform = etree.XSLT(style)
res = transform(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
transform_copy = copy.deepcopy(transform)
res = transform_copy(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
transform = etree.XSLT(style)
res = transform(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
@contextlib.contextmanager
def _xslt_setup(
self, encoding='UTF-16', expected_encoding=None,
expected='<?xml version="1.0" encoding="%(ENCODING)s"?><foo>\\uF8D2</foo>'):
tree = self.parse(_bytes('<a><b>\\uF8D2</b><c>\\uF8D2</c></a>'
).decode("unicode_escape"))
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output encoding="%(ENCODING)s"/>
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''' % {'ENCODING': encoding})
st = etree.XSLT(style)
res = st(tree)
expected = _bytes(dedent(expected).strip()).decode("unicode_escape").replace('\n', '') % {
'ENCODING': expected_encoding or encoding,
}
data = [res]
yield data
self.assertEqual(expected, data[0].replace('\n', ''))
def test_xslt_utf8(self):
with self._xslt_setup(encoding='UTF-8') as res:
res[0] = unicode(bytes(res[0]), 'UTF-8')
assert 'UTF-8' in res[0]
def test_xslt_encoding(self):
with self._xslt_setup() as res:
res[0] = unicode(bytes(res[0]), 'UTF-16')
assert 'UTF-16' in res[0]
def test_xslt_encoding_override(self):
with self._xslt_setup(encoding='UTF-8', expected_encoding='UTF-16') as res:
f = BytesIO()
res[0].write(f, encoding='UTF-16')
if is_python3:
output = str(f.getvalue(), 'UTF-16')
else:
output = unicode(str(f.getvalue()), 'UTF-16')
res[0] = output.replace("'", '"')
def test_xslt_write_output_bytesio(self):
with self._xslt_setup() as res:
f = BytesIO()
res[0].write_output(f)
res[0] = f.getvalue().decode('UTF-16')
def test_xslt_write_output_failure(self):
class Writer(object):
def write(self, data):
raise ValueError("FAILED!")
try:
with self._xslt_setup() as res:
res[0].write_output(Writer())
except ValueError as exc:
self.assertTrue("FAILED!" in str(exc), exc)
else:
self.assertTrue(False, "exception not raised")
def test_xslt_write_output_file(self):
with self._xslt_setup() as res:
f = NamedTemporaryFile(delete=False)
try:
try:
res[0].write_output(f)
finally:
f.close()
with io.open(f.name, encoding='UTF-16') as f:
res[0] = f.read()
finally:
os.unlink(f.name)
def test_xslt_write_output_file_path(self):
with self._xslt_setup() as res:
f = NamedTemporaryFile(delete=False)
try:
try:
res[0].write_output(f.name, compression=9)
finally:
f.close()
with gzip.GzipFile(f.name) as f:
res[0] = f.read().decode("UTF-16")
finally:
os.unlink(f.name)
def test_xslt_write_output_file_path_urlescaped(self):
# libxml2 should not unescape file paths.
with self._xslt_setup() as res:
f = NamedTemporaryFile(prefix='tmp%2e', suffix='.xml.gz', delete=False)
try:
try:
res[0].write_output(f.name, compression=3)
finally:
f.close()
with gzip.GzipFile(f.name) as f:
res[0] = f.read().decode("UTF-16")
finally:
os.unlink(f.name)
def test_xslt_write_output_file_path_urlescaped_plus(self):
with self._xslt_setup() as res:
f = NamedTemporaryFile(prefix='p+%2e', suffix='.xml.gz', delete=False)
try:
try:
res[0].write_output(f.name, compression=1)
finally:
f.close()
with gzip.GzipFile(f.name) as f:
res[0] = f.read().decode("UTF-16")
finally:
os.unlink(f.name)
def test_xslt_write_output_file_oserror(self):
with self._xslt_setup(expected='') as res:
tempdir = mkdtemp()
try:
res[0].write_output(os.path.join(tempdir, 'missing_subdir', 'out.xml'))
except IOError:
res[0] = ''
else:
self.fail("IOError not raised")
finally:
os.rmdir(tempdir)
def test_xslt_unicode(self):
expected = '''
<?xml version="1.0"?>
<foo>\\uF8D2</foo>
'''
with self._xslt_setup(expected=expected) as res:
res[0] = unicode(res[0])
def test_xslt_unicode_standalone(self):
tree = self.parse(_bytes('<a><b>\\uF8D2</b><c>\\uF8D2</c></a>'
).decode("unicode_escape"))
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output encoding="UTF-16" standalone="no"/>
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
expected = _bytes('''\
<?xml version="1.0" standalone="no"?>
<foo>\\uF8D2</foo>
''').decode("unicode_escape")
self.assertEqual(expected,
unicode(res))
def test_xslt_input(self):
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
st = etree.XSLT(style.getroot())
def test_xslt_input_partial_doc(self):
style = self.parse('''\
<otherroot>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>
</otherroot>''')
self.assertRaises(etree.XSLTParseError, etree.XSLT, style)
root_node = style.getroot()
self.assertRaises(etree.XSLTParseError, etree.XSLT, root_node)
st = etree.XSLT(root_node[0])
def test_xslt_broken(self):
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:foo />
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTParseError,
etree.XSLT, style)
def test_xslt_parsing_error_log(self):
tree = self.parse('<a/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:foo />
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTParseError,
etree.XSLT, style)
exc = None
try:
etree.XSLT(style)
except etree.XSLTParseError as e:
exc = e
else:
self.assertFalse(True, "XSLT processing should have failed but didn't")
self.assertTrue(exc is not None)
self.assertTrue(len(exc.error_log))
for error in exc.error_log:
self.assertTrue(':ERROR:XSLT:' in str(error))
def test_xslt_apply_error_log(self):
tree = self.parse('<a/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="a">
<xsl:copy>
<xsl:message terminate="yes">FAIL</xsl:message>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTApplyError,
etree.XSLT(style), tree)
transform = etree.XSLT(style)
exc = None
try:
transform(tree)
except etree.XSLTApplyError as e:
exc = e
else:
self.assertFalse(True, "XSLT processing should have failed but didn't")
self.assertTrue(exc is not None)
self.assertTrue(len(exc.error_log))
self.assertEqual(len(transform.error_log), len(exc.error_log))
for error in exc.error_log:
self.assertTrue(':ERROR:XSLT:' in str(error))
for error in transform.error_log:
self.assertTrue(':ERROR:XSLT:' in str(error))
def test_xslt_parameters(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree, bar="'Bar'")
self.assertEqual('''\
<?xml version="1.0"?>
<foo>Bar</foo>
''',
str(res))
def test_xslt_string_parameters(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree, bar=etree.XSLT.strparam('''it's me, "Bar"'''))
self.assertEqual('''\
<?xml version="1.0"?>
<foo>it's me, "Bar"</foo>
''',
str(res))
def test_xslt_parameter_invalid(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:param name="bar"/>
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = self.assertRaises(etree.XSLTApplyError,
st, tree, bar="<test/>")
res = self.assertRaises(etree.XSLTApplyError,
st, tree, bar="....")
def test_xslt_parameter_missing(self):
# apply() without needed parameter will lead to XSLTApplyError
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
# at least libxslt 1.1.28 produces this error, earlier ones (e.g. 1.1.18) might not ...
self.assertRaises(etree.XSLTApplyError, st.apply, tree)
def test_xslt_multiple_parameters(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
<foo><xsl:value-of select="$baz" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree, bar="'Bar'", baz="'Baz'")
self.assertEqual('''\
<?xml version="1.0"?>
<foo>Bar</foo><foo>Baz</foo>
''',
str(res))
def test_xslt_parameter_xpath(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree, bar="/a/b/text()")
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
def test_xslt_parameter_xpath_object(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree, bar=etree.XPath("/a/b/text()"))
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
def test_xslt_default_parameters(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:param name="bar" select="'Default'" />
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="$bar" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree, bar="'Bar'")
self.assertEqual('''\
<?xml version="1.0"?>
<foo>Bar</foo>
''',
str(res))
res = st(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>Default</foo>
''',
str(res))
def test_xslt_html_output(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html"/>
<xsl:strip-space elements="*"/>
<xsl:template match="/">
<html><body><xsl:value-of select="/a/b/text()" /></body></html>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual('<html><body>B</body></html>',
str(res).strip())
def test_xslt_include(self):
tree = etree.parse(fileInTestDir('test1.xslt'))
st = etree.XSLT(tree)
def test_xslt_include_from_filelike(self):
f = open(fileInTestDir('test1.xslt'), 'rb')
tree = etree.parse(f)
f.close()
st = etree.XSLT(tree)
def test_xslt_multiple_transforms(self):
xml = '<a/>'
xslt = '''\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match="/">
<response>Some text</response>
</xsl:template>
</xsl:stylesheet>
'''
source = self.parse(xml)
styledoc = self.parse(xslt)
style = etree.XSLT(styledoc)
result = style(source)
etree.tostring(result.getroot())
source = self.parse(xml)
styledoc = self.parse(xslt)
style = etree.XSLT(styledoc)
result = style(source)
etree.tostring(result.getroot())
def test_xslt_repeat_transform(self):
xml = '<a/>'
xslt = '''\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match="/">
<response>Some text</response>
</xsl:template>
</xsl:stylesheet>
'''
source = self.parse(xml)
styledoc = self.parse(xslt)
transform = etree.XSLT(styledoc)
result = transform(source)
result = transform(source)
etree.tostring(result.getroot())
result = transform(source)
etree.tostring(result.getroot())
str(result)
result1 = transform(source)
result2 = transform(source)
self.assertEqual(str(result1), str(result2))
result = transform(source)
str(result)
def test_xslt_empty(self):
# could segfault if result contains "empty document"
xml = '<blah/>'
xslt = '''
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match="/" />
</xsl:stylesheet>
'''
source = self.parse(xml)
styledoc = self.parse(xslt)
style = etree.XSLT(styledoc)
result = style(source)
self.assertEqual('', str(result))
def test_xslt_message(self):
xml = '<blah/>'
xslt = '''
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match="/">
<xsl:message>TEST TEST TEST</xsl:message>
</xsl:template>
</xsl:stylesheet>
'''
source = self.parse(xml)
styledoc = self.parse(xslt)
style = etree.XSLT(styledoc)
result = style(source)
self.assertEqual('', str(result))
self.assertTrue("TEST TEST TEST" in [entry.message
for entry in style.error_log])
def test_xslt_message_terminate(self):
xml = '<blah/>'
xslt = '''
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:template match="/">
<xsl:message terminate="yes">TEST TEST TEST</xsl:message>
</xsl:template>
</xsl:stylesheet>
'''
source = self.parse(xml)
styledoc = self.parse(xslt)
style = etree.XSLT(styledoc)
self.assertRaises(etree.XSLTApplyError, style, source)
self.assertTrue("TEST TEST TEST" in [entry.message
for entry in style.error_log])
def test_xslt_shortcut(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<doc>
<foo><xsl:value-of select="$bar" /></foo>
<foo><xsl:value-of select="$baz" /></foo>
</doc>
</xsl:template>
</xsl:stylesheet>''')
result = tree.xslt(style, bar="'Bar'", baz="'Baz'")
self.assertEqual(
_bytes('<doc><foo>Bar</foo><foo>Baz</foo></doc>'),
etree.tostring(result.getroot()))
def test_multiple_elementrees(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="a"><A><xsl:apply-templates/></A></xsl:template>
<xsl:template match="b"><B><xsl:apply-templates/></B></xsl:template>
<xsl:template match="c"><C><xsl:apply-templates/></C></xsl:template>
</xsl:stylesheet>''')
self.assertEqual(self._rootstring(tree),
_bytes('<a><b>B</b><c>C</c></a>'))
result = tree.xslt(style)
self.assertEqual(self._rootstring(tree),
_bytes('<a><b>B</b><c>C</c></a>'))
self.assertEqual(self._rootstring(result),
_bytes('<A><B>B</B><C>C</C></A>'))
b_tree = etree.ElementTree(tree.getroot()[0])
self.assertEqual(self._rootstring(b_tree),
_bytes('<b>B</b>'))
result = b_tree.xslt(style)
self.assertEqual(self._rootstring(tree),
_bytes('<a><b>B</b><c>C</c></a>'))
self.assertEqual(self._rootstring(result),
_bytes('<B>B</B>'))
c_tree = etree.ElementTree(tree.getroot()[1])
self.assertEqual(self._rootstring(c_tree),
_bytes('<c>C</c>'))
result = c_tree.xslt(style)
self.assertEqual(self._rootstring(tree),
_bytes('<a><b>B</b><c>C</c></a>'))
self.assertEqual(self._rootstring(result),
_bytes('<C>C</C>'))
def test_xslt_document_XML(self):
# make sure document('') works from parsed strings
xslt = etree.XSLT(etree.XML("""\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>TEXT<xsl:copy-of select="document('')//test"/></test>
</xsl:template>
</xsl:stylesheet>
"""))
result = xslt(etree.XML('<a/>'))
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(root[0].tag,
'test')
self.assertEqual(root[0].text,
'TEXT')
self.assertEqual(root[0][0].tag,
'{http://www.w3.org/1999/XSL/Transform}copy-of')
def test_xslt_document_parse(self):
# make sure document('') works from loaded files
xslt = etree.XSLT(etree.parse(fileInTestDir("test-document.xslt")))
result = xslt(etree.XML('<a/>'))
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(root[0].tag,
'{http://www.w3.org/1999/XSL/Transform}stylesheet')
def test_xslt_document_elementtree(self):
# make sure document('') works from loaded files
xslt = etree.XSLT(etree.ElementTree(file=fileInTestDir("test-document.xslt")))
result = xslt(etree.XML('<a/>'))
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(root[0].tag,
'{http://www.w3.org/1999/XSL/Transform}stylesheet')
def test_xslt_document_error(self):
xslt = etree.XSLT(etree.XML("""\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>TEXT<xsl:copy-of select="document('uri:__junkfood__is__evil__')//test"/></test>
</xsl:template>
</xsl:stylesheet>
"""))
errors = None
try:
xslt(etree.XML('<a/>'))
except etree.XSLTApplyError as exc:
errors = exc.error_log
else:
self.assertFalse(True, "XSLT processing should have failed but didn't")
self.assertTrue(len(errors))
for error in errors:
if ':ERROR:XSLT:' in str(error):
break
else:
self.assertFalse(True, 'No XSLT errors found in error log:\n%s' % errors)
def test_xslt_document_XML_resolver(self):
# make sure document('') works when custom resolvers are in use
assertEqual = self.assertEqual
called = {'count' : 0}
class TestResolver(etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, 'file://ANYTHING')
called['count'] += 1
return self.resolve_string('<CALLED/>', context)
parser = etree.XMLParser()
parser.resolvers.add(TestResolver())
xslt = etree.XSLT(etree.XML(_bytes("""\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:l="local">
<xsl:template match="/">
<test>
<xsl:for-each select="document('')//l:data/l:entry">
<xsl:copy-of select="document('file://ANYTHING')"/>
<xsl:copy>
<xsl:attribute name="value">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:copy>
</xsl:for-each>
</test>
</xsl:template>
<l:data>
<l:entry>A</l:entry>
<l:entry>B</l:entry>
</l:data>
</xsl:stylesheet>
"""), parser))
self.assertEqual(called['count'], 0)
result = xslt(etree.XML('<a/>'))
self.assertEqual(called['count'], 1)
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(len(root), 4)
self.assertEqual(root[0].tag,
'CALLED')
self.assertEqual(root[1].tag,
'{local}entry')
self.assertEqual(root[1].text,
None)
self.assertEqual(root[1].get("value"),
'A')
self.assertEqual(root[2].tag,
'CALLED')
self.assertEqual(root[3].tag,
'{local}entry')
self.assertEqual(root[3].text,
None)
self.assertEqual(root[3].get("value"),
'B')
def test_xslt_resolver_url_building(self):
assertEqual = self.assertEqual
called = {'count' : 0}
expected_url = None
class TestResolver(etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, expected_url)
called['count'] += 1
return self.resolve_string('<CALLED/>', context)
stylesheet_xml = _bytes("""\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:l="local">
<xsl:template match="/">
<xsl:copy-of select="document('test.xml')"/>
</xsl:template>
</xsl:stylesheet>
""")
parser = etree.XMLParser()
parser.resolvers.add(TestResolver())
# test without base_url => relative path only
expected_url = 'test.xml'
xslt = etree.XSLT(etree.XML(stylesheet_xml, parser))
self.assertEqual(called['count'], 0)
result = xslt(etree.XML('<a/>'))
self.assertEqual(called['count'], 1)
# now the same thing with a stylesheet base URL on the filesystem
called['count'] = 0
expected_url = 'MY/BASE/test.xml' # seems to be the same on Windows
xslt = etree.XSLT(etree.XML(
stylesheet_xml, parser,
base_url=os.path.join('MY', 'BASE', 'FILE')))
self.assertEqual(called['count'], 0)
result = xslt(etree.XML('<a/>'))
self.assertEqual(called['count'], 1)
# now the same thing with a stylesheet base URL
called['count'] = 0
expected_url = 'http://server.com/BASE/DIR/test.xml'
xslt = etree.XSLT(etree.XML(
stylesheet_xml, parser,
base_url='http://server.com/BASE/DIR/FILE'))
self.assertEqual(called['count'], 0)
result = xslt(etree.XML('<a/>'))
self.assertEqual(called['count'], 1)
# now the same thing with a stylesheet base file:// URL
called['count'] = 0
expected_url = 'file://BASE/DIR/test.xml'
xslt = etree.XSLT(etree.XML(
stylesheet_xml, parser,
base_url='file://BASE/DIR/FILE'))
self.assertEqual(called['count'], 0)
result = xslt(etree.XML('<a/>'))
self.assertEqual(called['count'], 1)
def test_xslt_document_parse_allow(self):
access_control = etree.XSLTAccessControl(read_file=True)
xslt = etree.XSLT(etree.parse(fileInTestDir("test-document.xslt")),
access_control=access_control)
result = xslt(etree.XML('<a/>'))
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(root[0].tag,
'{http://www.w3.org/1999/XSL/Transform}stylesheet')
def test_xslt_document_parse_deny(self):
access_control = etree.XSLTAccessControl(read_file=False)
xslt = etree.XSLT(etree.parse(fileInTestDir("test-document.xslt")),
access_control=access_control)
self.assertRaises(etree.XSLTApplyError, xslt, etree.XML('<a/>'))
def test_xslt_document_parse_deny_all(self):
access_control = etree.XSLTAccessControl.DENY_ALL
xslt = etree.XSLT(etree.parse(fileInTestDir("test-document.xslt")),
access_control=access_control)
self.assertRaises(etree.XSLTApplyError, xslt, etree.XML('<a/>'))
def test_xslt_access_control_repr(self):
access_control = etree.XSLTAccessControl.DENY_ALL
self.assertTrue(repr(access_control).startswith(type(access_control).__name__))
self.assertEqual(repr(access_control), repr(access_control))
self.assertNotEqual(repr(etree.XSLTAccessControl.DENY_ALL),
repr(etree.XSLTAccessControl.DENY_WRITE))
self.assertNotEqual(repr(etree.XSLTAccessControl.DENY_ALL),
repr(etree.XSLTAccessControl()))
def test_xslt_move_result(self):
root = etree.XML(_bytes('''\
<transform>
<widget displayType="fieldset"/>
</transform>'''))
xslt = etree.XSLT(etree.XML(_bytes('''\
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="html" indent="no"/>
<xsl:template match="/">
<html>
<xsl:apply-templates/>
</html>
</xsl:template>
<xsl:template match="widget">
<xsl:element name="{@displayType}"/>
</xsl:template>
</xsl:stylesheet>''')))
result = xslt(root[0])
root[:] = result.getroot()[:]
del root # segfaulted before
def test_xslt_pi(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="%s"?>
<a>
<b>B</b>
<c>C</c>
</a>''' % fileInTestDir("test1.xslt"))
style_root = tree.getroot().getprevious().parseXSL().getroot()
self.assertEqual("{http://www.w3.org/1999/XSL/Transform}stylesheet",
style_root.tag)
def test_xslt_pi_embedded_xmlid(self):
# test xml:id dictionary lookup mechanism
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="#style"?>
<a>
<b>B</b>
<c>C</c>
<xsl:stylesheet version="1.0" xml:id="style"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>
</a>''')
style_root = tree.getroot().getprevious().parseXSL().getroot()
self.assertEqual("{http://www.w3.org/1999/XSL/Transform}stylesheet",
style_root.tag)
st = etree.XSLT(style_root)
res = st(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
def test_xslt_pi_embedded_id(self):
# test XPath lookup mechanism
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="#style"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
style = self.parse('''\
<xsl:stylesheet version="1.0" xml:id="style"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>
''')
tree.getroot().append(style.getroot())
style_root = tree.getroot().getprevious().parseXSL().getroot()
self.assertEqual("{http://www.w3.org/1999/XSL/Transform}stylesheet",
style_root.tag)
st = etree.XSLT(style_root)
res = st(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<foo>B</foo>
''',
str(res))
def test_xslt_pi_get(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="TEST"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
pi = tree.getroot().getprevious()
self.assertEqual("TEST", pi.get("href"))
def test_xslt_pi_get_all(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="TEST"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
pi = tree.getroot().getprevious()
self.assertEqual("TEST", pi.get("href"))
self.assertEqual("text/xsl", pi.get("type"))
self.assertEqual(None, pi.get("motz"))
def test_xslt_pi_get_all_reversed(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet href="TEST" type="text/xsl"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
pi = tree.getroot().getprevious()
self.assertEqual("TEST", pi.get("href"))
self.assertEqual("text/xsl", pi.get("type"))
self.assertEqual(None, pi.get("motz"))
def test_xslt_pi_get_unknown(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="TEST"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
pi = tree.getroot().getprevious()
self.assertEqual(None, pi.get("unknownattribute"))
def test_xslt_pi_set_replace(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="TEST"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
pi = tree.getroot().getprevious()
self.assertEqual("TEST", pi.get("href"))
pi.set("href", "TEST123")
self.assertEqual("TEST123", pi.get("href"))
def test_xslt_pi_set_new(self):
tree = self.parse('''\
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl"?>
<a>
<b>B</b>
<c>C</c>
</a>''')
pi = tree.getroot().getprevious()
self.assertEqual(None, pi.get("href"))
pi.set("href", "TEST")
self.assertEqual("TEST", pi.get("href"))
class ETreeEXSLTTestCase(HelperTestCase):
"""EXSLT tests"""
def test_exslt_str(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:str="http://exslt.org/strings"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
exclude-result-prefixes="str xsl">
<xsl:template match="text()">
<xsl:value-of select="str:align(string(.), '***', 'center')" />
</xsl:template>
<xsl:template match="*">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<a><b>*B*</b><c>*C*</c></a>
''',
str(res))
def test_exslt_str_attribute_replace(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version = "1.0"
xmlns:xsl='http://www.w3.org/1999/XSL/Transform'
xmlns:str="http://exslt.org/strings"
extension-element-prefixes="str">
<xsl:template match="/">
<h1 class="{str:replace('abc', 'b', 'x')}">test</h1>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(str(res), '''\
<?xml version="1.0"?>
<h1 class="axc">test</h1>
''')
def test_exslt_math(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:math="http://exslt.org/math"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
exclude-result-prefixes="math xsl">
<xsl:template match="*">
<xsl:copy>
<xsl:attribute name="pi">
<xsl:value-of select="math:constant('PI', count(*)+2)"/>
</xsl:attribute>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual('''\
<?xml version="1.0"?>
<a pi="3.14"><b pi="3">B</b><c pi="3">C</c></a>
''',
str(res))
def test_exslt_regexp_test(self):
xslt = etree.XSLT(etree.XML(_bytes("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<test><xsl:copy-of select="*[regexp:test(string(.), '8.')]"/></test>
</xsl:template>
</xsl:stylesheet>
""")))
result = xslt(etree.XML(_bytes('<a><b>123</b><b>098</b><b>987</b></a>')))
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag,
'b')
self.assertEqual(root[0].text,
'987')
def test_exslt_regexp_replace(self):
xslt = etree.XSLT(etree.XML("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<test>
<xsl:copy-of select="regexp:replace(string(.), 'd.', '', 'XX')"/>
<xsl:text>-</xsl:text>
<xsl:copy-of select="regexp:replace(string(.), 'd.', 'gi', 'XX')"/>
</test>
</xsl:template>
</xsl:stylesheet>
"""))
result = xslt(etree.XML(_bytes('<a>abdCdEeDed</a>')))
root = result.getroot()
self.assertEqual(root.tag,
'test')
self.assertEqual(len(root), 0)
self.assertEqual(root.text, 'abXXdEeDed-abXXXXeXXd')
def test_exslt_regexp_match(self):
xslt = etree.XSLT(etree.XML("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<test>
<test1><xsl:copy-of select="regexp:match(string(.), 'd.')"/></test1>
<test2><xsl:copy-of select="regexp:match(string(.), 'd.', 'g')"/></test2>
<test2i><xsl:copy-of select="regexp:match(string(.), 'd.', 'gi')"/></test2i>
</test>
</xsl:template>
</xsl:stylesheet>
"""))
result = xslt(etree.XML(_bytes('<a>abdCdEeDed</a>')))
root = result.getroot()
self.assertEqual(root.tag, 'test')
self.assertEqual(len(root), 3)
self.assertEqual(len(root[0]), 1)
self.assertEqual(root[0][0].tag, 'match')
self.assertEqual(root[0][0].text, 'dC')
self.assertEqual(len(root[1]), 2)
self.assertEqual(root[1][0].tag, 'match')
self.assertEqual(root[1][0].text, 'dC')
self.assertEqual(root[1][1].tag, 'match')
self.assertEqual(root[1][1].text, 'dE')
self.assertEqual(len(root[2]), 3)
self.assertEqual(root[2][0].tag, 'match')
self.assertEqual(root[2][0].text, 'dC')
self.assertEqual(root[2][1].tag, 'match')
self.assertEqual(root[2][1].text, 'dE')
self.assertEqual(root[2][2].tag, 'match')
self.assertEqual(root[2][2].text, 'De')
def test_exslt_regexp_match_groups(self):
xslt = etree.XSLT(etree.XML(_bytes("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>
<xsl:for-each select="regexp:match(
'123abc567', '([0-9]+)([a-z]+)([0-9]+)' )">
<test1><xsl:value-of select="."/></test1>
</xsl:for-each>
</test>
</xsl:template>
</xsl:stylesheet>
""")))
result = xslt(etree.XML(_bytes('<a/>')))
root = result.getroot()
self.assertEqual(root.tag, 'test')
self.assertEqual(len(root), 4)
self.assertEqual(root[0].text, "123abc567")
self.assertEqual(root[1].text, "123")
self.assertEqual(root[2].text, "abc")
self.assertEqual(root[3].text, "567")
def test_exslt_regexp_match1(self):
# taken from http://www.exslt.org/regexp/functions/match/index.html
xslt = etree.XSLT(etree.XML(_bytes("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>
<xsl:for-each select="regexp:match(
'http://www.bayes.co.uk/xml/index.xml?/xml/utils/rechecker.xml',
'(\\w+):\\/\\/([^/:]+)(:\\d*)?([^# ]*)')">
<test1><xsl:value-of select="."/></test1>
</xsl:for-each>
</test>
</xsl:template>
</xsl:stylesheet>
""")))
result = xslt(etree.XML(_bytes('<a/>')))
root = result.getroot()
self.assertEqual(root.tag, 'test')
self.assertEqual(len(root), 5)
self.assertEqual(
root[0].text,
"http://www.bayes.co.uk/xml/index.xml?/xml/utils/rechecker.xml")
self.assertEqual(
root[1].text,
"http")
self.assertEqual(
root[2].text,
"www.bayes.co.uk")
self.assertFalse(root[3].text)
self.assertEqual(
root[4].text,
"/xml/index.xml?/xml/utils/rechecker.xml")
def test_exslt_regexp_match2(self):
# taken from http://www.exslt.org/regexp/functions/match/index.html
xslt = etree.XSLT(self.parse("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>
<xsl:for-each select="regexp:match(
'This is a test string', '(\\w+)', 'g')">
<test1><xsl:value-of select="."/></test1>
</xsl:for-each>
</test>
</xsl:template>
</xsl:stylesheet>
"""))
result = xslt(etree.XML(_bytes('<a/>')))
root = result.getroot()
self.assertEqual(root.tag, 'test')
self.assertEqual(len(root), 5)
self.assertEqual(root[0].text, "This")
self.assertEqual(root[1].text, "is")
self.assertEqual(root[2].text, "a")
self.assertEqual(root[3].text, "test")
self.assertEqual(root[4].text, "string")
def _test_exslt_regexp_match3(self):
# taken from http://www.exslt.org/regexp/functions/match/index.html
# THIS IS NOT SUPPORTED!
xslt = etree.XSLT(etree.XML(_bytes("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>
<xsl:for-each select="regexp:match(
'This is a test string', '([a-z])+ ', 'g')">
<test1><xsl:value-of select="."/></test1>
</xsl:for-each>
</test>
</xsl:template>
</xsl:stylesheet>
""")))
result = xslt(etree.XML(_bytes('<a/>')))
root = result.getroot()
self.assertEqual(root.tag, 'test')
self.assertEqual(len(root), 4)
self.assertEqual(root[0].text, "his")
self.assertEqual(root[1].text, "is")
self.assertEqual(root[2].text, "a")
self.assertEqual(root[3].text, "test")
def _test_exslt_regexp_match4(self):
# taken from http://www.exslt.org/regexp/functions/match/index.html
# THIS IS NOT SUPPORTED!
xslt = etree.XSLT(etree.XML(_bytes("""\
<xsl:stylesheet version="1.0"
xmlns:regexp="http://exslt.org/regular-expressions"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="/">
<test>
<xsl:for-each select="regexp:match(
'This is a test string', '([a-z])+ ', 'gi')">
<test1><xsl:value-of select="."/></test1>
</xsl:for-each>
</test>
</xsl:template>
</xsl:stylesheet>
""")))
result = xslt(etree.XML(_bytes('<a/>')))
root = result.getroot()
self.assertEqual(root.tag, 'test')
self.assertEqual(len(root), 4)
self.assertEqual(root[0].text, "This")
self.assertEqual(root[1].text, "is")
self.assertEqual(root[2].text, "a")
self.assertEqual(root[3].text, "test")
class ETreeXSLTExtFuncTestCase(HelperTestCase):
"""Tests for XPath extension functions in XSLT."""
def test_extensions1(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
exclude-result-prefixes="myns">
<xsl:template match="a"><A><xsl:value-of select="myns:mytext(b)"/></A></xsl:template>
</xsl:stylesheet>''')
def mytext(ctxt, values):
return 'X' * len(values)
result = tree.xslt(style, {('testns', 'mytext') : mytext})
self.assertEqual(self._rootstring(result),
_bytes('<A>X</A>'))
def test_extensions2(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
exclude-result-prefixes="myns">
<xsl:template match="a"><A><xsl:value-of select="myns:mytext(b)"/></A></xsl:template>
</xsl:stylesheet>''')
def mytext(ctxt, values):
return 'X' * len(values)
namespace = etree.FunctionNamespace('testns')
namespace['mytext'] = mytext
result = tree.xslt(style)
self.assertEqual(self._rootstring(result),
_bytes('<A>X</A>'))
def test_variable_result_tree_fragment(self):
tree = self.parse('<a><b>B</b><b/></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
exclude-result-prefixes="myns">
<xsl:template match="a">
<xsl:variable name="content">
<xsl:apply-templates/>
</xsl:variable>
<A><xsl:value-of select="myns:mytext($content)"/></A>
</xsl:template>
<xsl:template match="b"><xsl:copy>BBB</xsl:copy></xsl:template>
</xsl:stylesheet>''')
def mytext(ctxt, values):
for value in values:
self.assertTrue(hasattr(value, 'tag'),
"%s is not an Element" % type(value))
self.assertEqual(value.tag, 'b')
self.assertEqual(value.text, 'BBB')
return 'X'.join([el.tag for el in values])
namespace = etree.FunctionNamespace('testns')
namespace['mytext'] = mytext
result = tree.xslt(style)
self.assertEqual(self._rootstring(result),
_bytes('<A>bXb</A>'))
def test_xpath_on_context_node(self):
tree = self.parse('<a><b>B<c/>C</b><b/></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
exclude-result-prefixes="myns">
<xsl:template match="b">
<A><xsl:value-of select="myns:myext()"/></A>
</xsl:template>
</xsl:stylesheet>''')
def extfunc(ctxt):
text_content = ctxt.context_node.xpath('text()')
return 'x'.join(text_content)
namespace = etree.FunctionNamespace('testns')
namespace['myext'] = extfunc
result = tree.xslt(style)
self.assertEqual(self._rootstring(result),
_bytes('<A>BxC</A>'))
def test_xpath_on_foreign_context_node(self):
# LP ticket 1354652
class Resolver(etree.Resolver):
def resolve(self, system_url, public_id, context):
assert system_url == 'extdoc.xml'
return self.resolve_string(b'<a><b>B<c/>C</b><b/></a>', context)
parser = etree.XMLParser()
parser.resolvers.add(Resolver())
tree = self.parse(b'<a><b/><b/></a>')
transform = etree.XSLT(self.parse(b'''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:mypre="testns"
exclude-result-prefixes="mypre">
<xsl:template match="b">
<B><xsl:value-of select="mypre:myext()"/></B>
</xsl:template>
<xsl:template match="a">
<A><xsl:apply-templates select="document('extdoc.xml')//b" /></A>
</xsl:template>
</xsl:stylesheet>''', parser=parser))
def extfunc(ctxt):
text_content = ctxt.context_node.xpath('text()')
return 'x'.join(text_content)
namespace = etree.FunctionNamespace('testns')
namespace['myext'] = extfunc
result = transform(tree)
self.assertEqual(self._rootstring(result),
_bytes('<A><B>BxC</B><B/></A>'))
class ETreeXSLTExtElementTestCase(HelperTestCase):
"""Tests for extension elements in XSLT."""
def test_extension_element(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns"
exclude-result-prefixes="myns">
<xsl:template match="a">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
child = etree.Element(self_node.text)
child.text = 'X'
output_parent.append(child)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A><b>X</b></A>'))
def test_extension_element_doc_context(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns"
exclude-result-prefixes="myns">
<xsl:template match="/">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
</xsl:stylesheet>''')
tags = []
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
tags.append(input_node.tag)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(tags, ['a'])
def test_extension_element_comment_pi_context(self):
tree = self.parse('<?test toast?><a><!--a comment--><?another pi?></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns"
exclude-result-prefixes="myns">
<xsl:template match="/">
<ROOT><xsl:apply-templates /></ROOT>
</xsl:template>
<xsl:template match="comment()">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
<xsl:template match="processing-instruction()">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
</xsl:stylesheet>''')
text = []
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
text.append(input_node.text)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(text, ['toast', 'a comment', 'pi'])
def _test_extension_element_attribute_context(self):
# currently not supported
tree = self.parse('<a test="A"><b attr="B"/></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns"
exclude-result-prefixes="myns">
<xsl:template match="@test">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
<xsl:template match="@attr">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
</xsl:stylesheet>''')
text = []
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, attr_value, output_parent):
text.append(attr_value)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(text, ['A', 'B'])
def test_extension_element_content(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A>
</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
output_parent.extend(list(self_node)[1:])
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A><y>Y</y><z/></A>'))
def test_extension_element_apply_templates(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A>
</xsl:template>
<xsl:template match="x" />
<xsl:template match="z">XYZ</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
for child in self_node:
for result in self.apply_templates(context, child):
if isinstance(result, basestring):
el = etree.Element("T")
el.text = result
else:
el = result
output_parent.append(el)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A><T>Y</T><T>XYZ</T></A>'))
def test_extension_element_apply_templates_elements_only(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A>
</xsl:template>
<xsl:template match="x"><X/></xsl:template>
<xsl:template match="z">XYZ</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
for child in self_node:
for result in self.apply_templates(context, child,
elements_only=True):
assert not isinstance(result, basestring)
output_parent.append(result)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A><X/></A>'))
def test_extension_element_apply_templates_remove_blank_text(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A>
</xsl:template>
<xsl:template match="x"><X/></xsl:template>
<xsl:template match="y"><xsl:text> </xsl:text></xsl:template>
<xsl:template match="z">XYZ</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
for child in self_node:
for result in self.apply_templates(context, child,
remove_blank_text=True):
if isinstance(result, basestring):
assert result.strip()
el = etree.Element("T")
el.text = result
else:
el = result
output_parent.append(el)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A><X/><T>XYZ</T></A>'))
def test_extension_element_apply_templates_target_node(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<A><myns:myext><x>X</x><y>Y</y><z/></myns:myext></A>
</xsl:template>
<xsl:template match="x" />
<xsl:template match="z">XYZ</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
for child in self_node:
self.apply_templates(context, child, output_parent)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A>YXYZ</A>'))
def test_extension_element_apply_templates_target_node_doc(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<myns:myext><x>X</x><y>Y</y><z/></myns:myext>
</xsl:template>
<xsl:template match="x"><xsl:processing-instruction name="test">TEST</xsl:processing-instruction></xsl:template>
<xsl:template match="y"><Y>XYZ</Y></xsl:template>
<xsl:template match="z"><xsl:comment>TEST</xsl:comment></xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
for child in self_node:
self.apply_templates(context, child, output_parent)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(etree.tostring(result),
_bytes('<?test TEST?><Y>XYZ</Y><!--TEST-->'))
def test_extension_element_process_children(self):
tree = self.parse('<a><b>E</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<xsl:variable name="testvar">yo</xsl:variable>
<A>
<myns:myext>
<xsl:attribute name="attr">
<xsl:value-of select="$testvar" />
</xsl:attribute>
<B>
<xsl:choose>
<xsl:when test="1 = 2"><C/></xsl:when>
<xsl:otherwise><D><xsl:value-of select="b/text()" /></D></xsl:otherwise>
</xsl:choose>
</B>
</myns:myext>
</A>
</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
el = etree.Element('MY')
self.process_children(context, el)
output_parent.append(el)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A><MYattr="yo"><B><D>E</D></B></MY></A>'))
def test_extension_element_process_children_to_append_only(self):
tree = self.parse('<a/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<myns:myext>
<A/>
</myns:myext>
</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
self.process_children(context, output_parent)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<A/>'))
def test_extension_element_process_children_to_read_only_raise(self):
tree = self.parse('<a/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<myns:myext>
<A/>
</myns:myext>
</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
self.process_children(context, self_node)
extensions = { ('testns', 'myext') : MyExt() }
self.assertRaises(TypeError, tree.xslt, style, extensions=extensions)
def test_extension_element_process_children_with_subextension_element(self):
tree = self.parse('<a/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns">
<xsl:template match="a">
<myns:myext>
<A><myns:myext><B/></myns:myext></A>
</myns:myext>
</xsl:template>
</xsl:stylesheet>''')
class MyExt(etree.XSLTExtension):
callback_call_counter = 0
def execute(self, context, self_node, input_node, output_parent):
self.callback_call_counter += 1
el = etree.Element('MY', n=str(self.callback_call_counter))
self.process_children(context, el)
output_parent.append(el)
extensions = { ('testns', 'myext') : MyExt() }
result = tree.xslt(style, extensions=extensions)
self.assertEqual(self._rootstring(result),
_bytes('<MYn="1"><A><MYn="2"><B/></MY></A></MY>'))
def test_extension_element_raise(self):
tree = self.parse('<a><b>B</b></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:myns="testns"
extension-element-prefixes="myns"
exclude-result-prefixes="myns">
<xsl:template match="a">
<A><myns:myext>b</myns:myext></A>
</xsl:template>
</xsl:stylesheet>''')
class MyError(Exception):
pass
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
raise MyError("expected!")
extensions = { ('testns', 'myext') : MyExt() }
self.assertRaises(MyError, tree.xslt, style, extensions=extensions)
# FIXME: DISABLED - implementation seems to be broken
# if someone cares enough about this feature, I take pull requests that fix it.
def _test_multiple_extension_elements_with_output_parent(self):
tree = self.parse("""\
<text>
<par>This is <format>arbitrary</format> text in a paragraph</par>
</text>""")
style = self.parse("""\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:my="my" extension-element-prefixes="my" version="1.0">
<xsl:template match="par">
<my:par><xsl:apply-templates /></my:par>
</xsl:template>
<xsl:template match="format">
<my:format><xsl:apply-templates /></my:format>
</xsl:template>
</xsl:stylesheet>
""")
test = self
calls = []
class ExtMyPar(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
calls.append('par')
p = etree.Element("p")
p.attrib["style"] = "color:red"
self.process_children(context, p)
output_parent.append(p)
class ExtMyFormat(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
calls.append('format')
content = self.process_children(context)
test.assertEqual(1, len(content))
test.assertEqual('arbitrary', content[0])
test.assertEqual('This is ', output_parent.text)
output_parent.text += '*-%s-*' % content[0]
extensions = {("my", "par"): ExtMyPar(), ("my", "format"): ExtMyFormat()}
transform = etree.XSLT(style, extensions=extensions)
result = transform(tree)
self.assertEqual(['par', 'format'], calls)
self.assertEqual(
b'<p style="color:red">This is *-arbitrary-* text in a paragraph</p>\n',
etree.tostring(result))
def test_extensions_nsmap(self):
tree = self.parse("""\
<root>
<inner xmlns:sha256="http://www.w3.org/2001/04/xmlenc#sha256">
<data>test</data>
</inner>
</root>
""")
style = self.parse("""\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" xmlns:my="extns" extension-element-prefixes="my" version="1.0">
<xsl:template match="node()|@*">
<xsl:copy>
<xsl:apply-templates select="node()|@*"/>
</xsl:copy>
</xsl:template>
<xsl:template match="data">
<my:show-nsmap/>
</xsl:template>
</xsl:stylesheet>
""")
class MyExt(etree.XSLTExtension):
def execute(self, context, self_node, input_node, output_parent):
output_parent.text = str(input_node.nsmap)
extensions = {('extns', 'show-nsmap'): MyExt()}
result = tree.xslt(style, extensions=extensions)
self.assertEqual(etree.tostring(result, pretty_print=True), b"""\
<root>
<inner xmlns:sha256="http://www.w3.org/2001/04/xmlenc#sha256">{'sha256': 'http://www.w3.org/2001/04/xmlenc#sha256'}
</inner>
</root>
""")
class Py3XSLTTestCase(HelperTestCase):
"""XSLT tests for etree under Python 3"""
pytestmark = skipif('sys.version_info < (3,0)')
def test_xslt_result_bytes(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(_bytes('''\
<?xml version="1.0"?>
<foo>B</foo>
'''),
bytes(res))
def test_xslt_result_bytearray(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(_bytes('''\
<?xml version="1.0"?>
<foo>B</foo>
'''),
bytearray(res))
def test_xslt_result_memoryview(self):
tree = self.parse('<a><b>B</b><c>C</c></a>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*" />
<xsl:template match="/">
<foo><xsl:value-of select="/a/b/text()" /></foo>
</xsl:template>
</xsl:stylesheet>''')
st = etree.XSLT(style)
res = st(tree)
self.assertEqual(_bytes('''\
<?xml version="1.0"?>
<foo>B</foo>
'''),
bytes(memoryview(res)))
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeXSLTTestCase)])
suite.addTests([unittest.makeSuite(ETreeEXSLTTestCase)])
suite.addTests([unittest.makeSuite(ETreeXSLTExtFuncTestCase)])
suite.addTests([unittest.makeSuite(ETreeXSLTExtElementTestCase)])
if is_python3:
suite.addTests([unittest.makeSuite(Py3XSLTTestCase)])
suite.addTests(
[make_doctest('../../../doc/extensions.txt')])
suite.addTests(
[make_doctest('../../../doc/xpathxslt.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
| bsd-3-clause | 9,071,935,211,363,453,000 | 32.440994 | 128 | 0.557464 | false |
cpn18/track-chart | desktop/gps_smoothing.py | 1 | 1313 | import sys
import json
import math
THRESHOLD = 10
data = []
with open(sys.argv[1], "r") as f:
used = count = 0
for line in f:
if line[0] == "#":
continue
items = line.split()
if items[1] == "TPV":
obj = json.loads(" ".join(items[2:-1]))
obj['used'] = used
obj['count'] = count
elif items[1] == "SKY":
obj = json.loads(" ".join(items[2:-1]))
used = 0
count = len(obj['satellites'])
for i in range(0, count):
if obj['satellites'][i]['used']:
used += 1
continue
else:
continue
if used >= THRESHOLD and 'lon' in obj and 'lat' in obj:
data.append(obj)
print("Longitude Latitude dx epx dy epy used count")
for i in range(1, len(data)):
dx = abs((data[i]['lon'] - data[i-1]['lon']) * 111120 * math.cos(math.radians(data[i]['lat'])))
dy = abs((data[i]['lat'] - data[i-1]['lat']) * 111128) # degrees to meters
try:
if dx > 3*data[i]['epx'] or dy > 3*data[i]['epy']:
continue
print("%f %f %f %f %f %f %d %d" % (data[i]['lon'], data[i]['lat'], dx, data[i]['epx'], dy, data[i]['epy'], data[i]['used'], data[i]['count']))
except KeyError:
pass
| gpl-3.0 | -8,348,637,999,380,964,000 | 29.534884 | 150 | 0.476009 | false |
kgori/treeCl | treeCl/parutils.py | 1 | 9550 | from abc import ABCMeta, abstractmethod
from .constants import PARALLEL_PROFILE
from .utils import setup_progressbar, grouper, flatten_list
import logging
import multiprocessing
import sys
logger = logging.getLogger(__name__)
__author__ = 'kgori'
"""
Introduced this workaround for a bug in multiprocessing where
errors are thrown for an EINTR interrupt.
Workaround taken from http://stackoverflow.com/a/5395277 - but
changed because can't subclass from multiprocessing.Queue (it's
a factory method)
"""
import errno
def retry_on_eintr(function, *args, **kw):
while True:
try:
return function(*args, **kw)
except IOError as e:
if e.errno == errno.EINTR:
continue
else:
raise
def get_from_queue(queue, block=True, timeout=None):
return retry_on_eintr(queue.get, block, timeout)
"""
End of workaround
"""
def fun(f, q_in, q_out):
while True:
(i, x) = get_from_queue(q_in)
if i is None:
break
q_out.put((i, f(*x)))
def async_avail():
from IPython import parallel
try:
client = parallel.Client(PARALLEL_PROFILE)
return len(client) > 0
except IOError:
return False
except Exception:
return False
def get_client():
from IPython import parallel
try:
client = parallel.Client(profile=PARALLEL_PROFILE)
return client if len(client) > 0 else None
except IOError:
return None
except Exception:
return None
def tupleise(args):
for a in args:
if isinstance(a, (tuple, list)):
yield a
else:
yield (a,)
def get_njobs(nargs, args):
if nargs is not None:
njobs = nargs
elif isinstance(args, (tuple, list)):
njobs = len(args)
else:
njobs = int(sys.maxsize / 1000000) # sys.maxsize is too large for progressbar to display ETA (datetime issue)
return njobs
def parallel_map(client, task, args, message, batchsize=1, background=False, nargs=None):
"""
Helper to map a function over a sequence of inputs, in parallel, with progress meter.
:param client: IPython.parallel.Client instance
:param task: Function
:param args: Must be a list of tuples of arguments that the task function will be mapped onto.
If the function takes a single argument, it still must be a 1-tuple.
:param message: String for progress bar
:param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic,
but longer execution time per job.
:return: IPython.parallel.AsyncMapResult
"""
show_progress = bool(message)
njobs = get_njobs(nargs, args)
nproc = len(client)
logger.debug('parallel_map: len(client) = {}'.format(len(client)))
view = client.load_balanced_view()
if show_progress:
message += ' (IP:{}w:{}b)'.format(nproc, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
if not background:
pbar.start()
map_result = view.map(task, *list(zip(*args)), chunksize=batchsize)
if background:
return map_result, client
while not map_result.ready():
map_result.wait(1)
if show_progress:
pbar.update(min(njobs, map_result.progress * batchsize))
if show_progress:
pbar.finish()
return map_result
def sequential_map(task, args, message, nargs=None):
"""
Helper to map a function over a sequence of inputs, sequentially, with progress meter.
:param client: IPython.parallel.Client instance
:param task: Function
:param args: Must be a list of tuples of arguments that the task function will be mapped onto.
If the function takes a single argument, it still must be a 1-tuple.
:param message: String for progress bar
:param batchsize: Jobs are shipped in batches of this size. Higher numbers mean less network traffic,
but longer execution time per job.
:return: IPython.parallel.AsyncMapResult
"""
njobs = get_njobs(nargs, args)
show_progress = bool(message)
if show_progress:
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
map_result = []
for (i, arglist) in enumerate(tupleise(args), start=1):
map_result.append(task(*arglist))
if show_progress:
pbar.update(i)
if show_progress:
pbar.finish()
return map_result
def threadpool_map(task, args, message, concurrency, batchsize=1, nargs=None):
"""
Helper to map a function over a range of inputs, using a threadpool, with a progress meter
"""
import concurrent.futures
njobs = get_njobs(nargs, args)
show_progress = bool(message)
batches = grouper(batchsize, tupleise(args))
batched_task = lambda batch: [task(*job) for job in batch]
if show_progress:
message += ' (TP:{}w:{}b)'.format(concurrency, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
futures = []
completed_count = 0
for batch in batches:
futures.append(executor.submit(batched_task, batch))
if show_progress:
for i, fut in enumerate(concurrent.futures.as_completed(futures), start=1):
completed_count += len(fut.result())
pbar.update(completed_count)
else:
concurrent.futures.wait(futures)
if show_progress:
pbar.finish()
return flatten_list([fut.result() for fut in futures])
def processpool_map(task, args, message, concurrency, batchsize=1, nargs=None):
"""
See http://stackoverflow.com/a/16071616
"""
njobs = get_njobs(nargs, args)
show_progress = bool(message)
batches = grouper(batchsize, tupleise(args))
def batched_task(*batch):
return [task(*job) for job in batch]
if show_progress:
message += ' (PP:{}w:{}b)'.format(concurrency, batchsize)
pbar = setup_progressbar(message, njobs, simple_progress=True)
pbar.start()
q_in = multiprocessing.Queue() # Should I limit either queue size? Limiting in-queue
q_out = multiprocessing.Queue() # increases time taken to send jobs, makes pbar less useful
proc = [multiprocessing.Process(target=fun, args=(batched_task, q_in, q_out)) for _ in range(concurrency)]
for p in proc:
p.daemon = True
p.start()
sent = [q_in.put((i, x)) for (i, x) in enumerate(batches)]
[q_in.put((None, None)) for _ in range(concurrency)]
res = []
completed_count = 0
for _ in range(len(sent)):
result = get_from_queue(q_out)
res.append(result)
completed_count += len(result[1])
if show_progress:
pbar.update(completed_count)
[p.join() for p in proc]
if show_progress:
pbar.finish()
return flatten_list([x for (i, x) in sorted(res)])
class JobHandler(object):
"""
Base class to provide uniform interface for all job handlers
"""
metaclass = ABCMeta
@abstractmethod
def __call__(self, task, args, message, batchsize):
""" If you define a message, then progress will be written to stderr """
pass
class SequentialJobHandler(JobHandler):
"""
Jobs are handled using a simple map
"""
def __call__(self, task, args, message, batchsize, nargs=None):
if batchsize > 1:
logger.warn("Setting batchsize > 1 has no effect when using a SequentialJobHandler")
return sequential_map(task, args, message, nargs)
class ThreadpoolJobHandler(JobHandler):
"""
Jobs are handled by a threadpool using concurrent.futures
"""
def __init__(self, concurrency):
self.concurrency = concurrency
def __call__(self, task, args, message, batchsize, nargs=None):
return threadpool_map(task, args, message, self.concurrency, batchsize, nargs)
class ProcesspoolJobHandler(JobHandler):
"""
Jobs are handled by a threadpool using concurrent.futures
"""
def __init__(self, concurrency):
self.concurrency = concurrency
def __call__(self, task, args, message, batchsize, nargs=None):
return processpool_map(task, args, message, self.concurrency, batchsize, nargs)
class IPythonJobHandler(JobHandler):
"""
Jobs are handled using an IPython.parallel.Client
"""
def __init__(self, profile=None):
"""
Initialise the IPythonJobHandler using the given ipython profile.
Parameters
----------
profile: string
The ipython profile to connect to - this should already be running an ipcluster
If the connection fails it raises a RuntimeError
"""
import IPython.parallel
try:
self.client=IPython.parallel.Client(profile=profile)
logger.debug('__init__: len(client) = {}'.format(len(self.client)))
except (IOError, IPython.parallel.TimeoutError):
msg = 'Could not obtain an IPython parallel Client using profile "{}"'.format(profile)
logger.error(msg)
raise RuntimeError(msg)
def __call__(self, task, args, message, batchsize):
logger.debug('__call__: len(client) = {}'.format(len(self.client)))
return list(parallel_map(self.client, task, args, message, batchsize))
| mit | 559,474,478,483,958,140 | 32.745583 | 118 | 0.638325 | false |
anbangr/trusted-juju | juju/unit/tests/test_charm.py | 1 | 5922 | from functools import partial
import os
import shutil
from twisted.internet.defer import inlineCallbacks, returnValue, succeed, fail
from twisted.web.error import Error
from twisted.web.client import downloadPage
from juju.charm import get_charm_from_path
from juju.charm.bundle import CharmBundle
from juju.charm.publisher import CharmPublisher
from juju.charm.tests import local_charm_id
from juju.charm.tests.test_directory import sample_directory
from juju.errors import FileNotFound
from juju.lib import under
from juju.state.errors import CharmStateNotFound
from juju.state.tests.common import StateTestBase
from juju.unit.charm import download_charm
from juju.lib.mocker import MATCH
class CharmPublisherTestBase(StateTestBase):
@inlineCallbacks
def setUp(self):
yield super(CharmPublisherTestBase, self).setUp()
yield self.push_default_config()
self.provider = self.config.get_default().get_machine_provider()
self.storage = self.provider.get_file_storage()
@inlineCallbacks
def publish_charm(self, charm_path=sample_directory):
charm = get_charm_from_path(charm_path)
publisher = CharmPublisher(self.client, self.storage)
yield publisher.add_charm(local_charm_id(charm), charm)
charm_states = yield publisher.publish()
returnValue((charm, charm_states[0]))
class DownloadTestCase(CharmPublisherTestBase):
@inlineCallbacks
def test_charm_download_file(self):
"""Downloading a charm should store the charm locally.
"""
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
# Download the charm
yield download_charm(
self.client, charm_state.id, charm_directory)
# Verify the downloaded copy
checksum = charm.get_sha256()
charm_id = local_charm_id(charm)
charm_key = under.quote("%s:%s" % (charm_id, checksum))
charm_path = os.path.join(charm_directory, charm_key)
self.assertTrue(os.path.exists(charm_path))
bundle = CharmBundle(charm_path)
self.assertEquals(bundle.get_revision(), charm.get_revision())
self.assertEqual(checksum, bundle.get_sha256())
@inlineCallbacks
def test_charm_missing_download_file(self):
"""Downloading a file that doesn't exist raises FileNotFound.
"""
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
# Delete the file
file_path = charm_state.bundle_url[len("file://"):]
os.remove(file_path)
# Download the charm
yield self.assertFailure(
download_charm(self.client, charm_state.id, charm_directory),
FileNotFound)
@inlineCallbacks
def test_charm_download_http(self):
"""Downloading a charm should store the charm locally.
"""
mock_storage = self.mocker.patch(self.storage)
def match_string(expected, value):
self.assertTrue(isinstance(value, basestring))
self.assertIn(expected, value)
return True
mock_storage.get_url(MATCH(
partial(match_string, "local_3a_series_2f_dummy-1")))
self.mocker.result("http://example.com/foobar.zip")
download_page = self.mocker.replace(downloadPage)
download_page(
MATCH(partial(match_string, "http://example.com/foobar.zip")),
MATCH(partial(match_string, "local_3a_series_2f_dummy-1")))
def bundle_in_place(url, local_path):
# must keep ref to charm else temp file goes out of scope.
charm = get_charm_from_path(sample_directory)
bundle = charm.as_bundle()
shutil.copyfile(bundle.path, local_path)
self.mocker.call(bundle_in_place)
self.mocker.result(succeed(True))
self.mocker.replay()
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
self.assertEqual(
charm_state.bundle_url, "http://example.com/foobar.zip")
# Download the charm
yield download_charm(
self.client, charm_state.id, charm_directory)
@inlineCallbacks
def test_charm_download_http_error(self):
"""Errors in donwloading a charm are reported as charm not found.
"""
def match_string(expected, value):
self.assertTrue(isinstance(value, basestring))
self.assertIn(expected, value)
return True
mock_storage = self.mocker.patch(self.storage)
mock_storage.get_url(
MATCH(partial(match_string, "local_3a_series_2f_dummy-1")))
remote_url = "http://example.com/foobar.zip"
self.mocker.result(remote_url)
download_page = self.mocker.replace(downloadPage)
download_page(
MATCH(partial(match_string, "http://example.com/foobar.zip")),
MATCH(partial(match_string, "local_3a_series_2f_dummy-1")))
self.mocker.result(fail(Error("400", "Bad Stuff", "")))
self.mocker.replay()
charm, charm_state = yield self.publish_charm()
charm_directory = self.makeDir()
self.assertEqual(charm_state.bundle_url, remote_url)
error = yield self.assertFailure(
download_charm(self.client, charm_state.id, charm_directory),
FileNotFound)
self.assertIn(remote_url, str(error))
@inlineCallbacks
def test_charm_download_not_found(self):
"""An error is raised if trying to download a non existant charm.
"""
charm_directory = self.makeDir()
# Download the charm
error = yield self.assertFailure(
download_charm(
self.client, "local:mickey-21", charm_directory),
CharmStateNotFound)
self.assertEquals(str(error), "Charm 'local:mickey-21' was not found")
| agpl-3.0 | -8,211,216,513,305,947,000 | 34.461078 | 78 | 0.651807 | false |
bcantoni/ccm | ccmlib/dse_node.py | 1 | 22234 | # ccm node
from __future__ import absolute_import, with_statement
import os
import re
import shutil
import signal
import stat
import subprocess
import time
import yaml
from six import iteritems, print_
from ccmlib import common, extension, repository
from ccmlib.node import (Node, NodeError, ToolError,
handle_external_tool_process)
class DseNode(Node):
"""
Provides interactions to a DSE node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None):
super(DseNode, self).__init__(name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save, binary_interface, byteman_port, environment_variables=environment_variables)
self.get_cassandra_version()
self._dse_config_options = {}
if self.cluster.hasOpscenter():
self._copy_agent()
def get_install_cassandra_root(self):
return os.path.join(self.get_install_dir(), 'resources', 'cassandra')
def get_node_cassandra_root(self):
return os.path.join(self.get_path(), 'resources', 'cassandra')
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'resources', 'cassandra', 'conf')
def get_tool(self, toolname):
return common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(os.path.join(self.get_install_dir(), 'resources', 'cassandra'), 'bin', 'dse'), toolname]
def get_env(self):
(node_ip, _) = self.network_interfaces['binary']
return common.make_dse_env(self.get_install_dir(), self.get_path(), node_ip)
def get_cassandra_version(self):
return common.get_dse_cassandra_version(self.get_install_dir())
def node_setup(self, version, verbose):
dir, v = repository.setup_dse(version, self.cluster.dse_username, self.cluster.dse_password, verbose=verbose)
return dir
def set_workloads(self, workloads):
self.workloads = workloads
self._update_config()
if 'solr' in self.workloads:
self.__generate_server_xml()
if 'graph' in self.workloads:
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
graph_options = data['graph']
graph_options['gremlin_server']['host'] = node_ip
self.set_dse_configuration_options({'graph': graph_options})
self.__update_gremlin_config_yaml()
if 'dsefs' in self.workloads:
dsefs_options = {'dsefs_options': {'enabled': True,
'work_dir': os.path.join(self.get_path(), 'dsefs'),
'data_directories': [{'dir': os.path.join(self.get_path(), 'dsefs', 'data')}]}}
self.set_dse_configuration_options(dsefs_options)
if 'spark' in self.workloads:
self._update_spark_env()
def set_dse_configuration_options(self, values=None):
if values is not None:
self._dse_config_options = common.merge_configuration(self._dse_config_options, values)
self.import_dse_config_files()
def watch_log_for_alive(self, nodes, from_mark=None, timeout=720, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked UP. This method works similarly to watch_log_for_death.
We want to provide a higher default timeout when this is called on DSE.
"""
super(DseNode, self).watch_log_for_alive(nodes, from_mark=from_mark, timeout=timeout, filename=filename)
def get_launch_bin(self):
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'dse')
# Copy back the dse scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
return common.join_bin(self.get_path(), 'bin', 'dse')
def add_custom_launch_arguments(self, args):
args.append('cassandra')
for workload in self.workloads:
if 'hadoop' in workload:
args.append('-t')
if 'solr' in workload:
args.append('-s')
if 'spark' in workload:
args.append('-k')
if 'cfs' in workload:
args.append('-c')
if 'graph' in workload:
args.append('-g')
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=True,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False,
quiet_start=False,
allow_root=False,
set_migration_task=True):
process = super(DseNode, self).start(join_ring, no_wait, verbose, update_pid, wait_other_notice, replace_token,
replace_address, jvm_args, wait_for_binary_proto, profile_options, use_jna,
quiet_start, allow_root, set_migration_task)
if self.cluster.hasOpscenter():
self._start_agent()
def _start_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
self._write_agent_address_yaml(agent_dir)
self._write_agent_log4j_properties(agent_dir)
args = [os.path.join(agent_dir, 'bin', common.platform_binary('datastax-agent'))]
subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def stop(self, wait=True, wait_other_notice=False, signal_event=signal.SIGTERM, **kwargs):
if self.cluster.hasOpscenter():
self._stop_agent()
return super(DseNode, self).stop(wait=wait, wait_other_notice=wait_other_notice, signal_event=signal_event, **kwargs)
def _stop_agent(self):
agent_dir = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_dir):
pidfile = os.path.join(agent_dir, 'datastax-agent.pid')
if os.path.exists(pidfile):
with open(pidfile, 'r') as f:
pid = int(f.readline().strip())
f.close()
if pid is not None:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
os.remove(pidfile)
def nodetool(self, cmd, username=None, password=None, capture_output=True, wait=True):
if password is not None:
cmd = '-pw {} '.format(password) + cmd
if username is not None:
cmd = '-u {} '.format(username) + cmd
return super(DseNode, self).nodetool(cmd)
def dsetool(self, cmd):
env = self.get_env()
extension.append_to_client_env(self, env)
node_ip, binary_port = self.network_interfaces['binary']
dsetool = common.join_bin(self.get_install_dir(), 'bin', 'dsetool')
args = [dsetool, '-h', node_ip, '-j', str(self.jmx_port), '-c', str(binary_port)]
args += cmd.split()
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return handle_external_tool_process(p, args)
def dse(self, dse_options=None):
if dse_options is None:
dse_options = []
env = self.get_env()
extension.append_to_client_env(self, env)
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse]
args += dse_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def hadoop(self, hadoop_options=None):
if hadoop_options is None:
hadoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hadoop']
args += hadoop_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def hive(self, hive_options=None):
if hive_options is None:
hive_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'hive']
args += hive_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def pig(self, pig_options=None):
if pig_options is None:
pig_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'pig']
args += pig_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def sqoop(self, sqoop_options=None):
if sqoop_options is None:
sqoop_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'sqoop']
args += sqoop_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def spark(self, spark_options=None):
if spark_options is None:
spark_options = []
env = self.get_env()
env['JMX_PORT'] = self.jmx_port
dse = common.join_bin(self.get_install_dir(), 'bin', 'dse')
args = [dse, 'spark']
args += spark_options
p = subprocess.Popen(args, env=env) #Don't redirect stdout/stderr, users need to interact with new process
return handle_external_tool_process(p, args)
def import_dse_config_files(self):
self._update_config()
if not os.path.isdir(os.path.join(self.get_path(), 'resources', 'dse', 'conf')):
os.makedirs(os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'dse', 'conf'), os.path.join(self.get_path(), 'resources', 'dse', 'conf'))
self.__update_yaml()
def copy_config_files(self):
for product in ['dse', 'cassandra', 'hadoop', 'hadoop2-client', 'sqoop', 'hive', 'tomcat', 'spark', 'shark', 'mahout', 'pig', 'solr', 'graph']:
src_conf = os.path.join(self.get_install_dir(), 'resources', product, 'conf')
dst_conf = os.path.join(self.get_path(), 'resources', product, 'conf')
if not os.path.isdir(src_conf):
continue
if os.path.isdir(dst_conf):
common.rmdirs(dst_conf)
shutil.copytree(src_conf, dst_conf)
if product == 'solr':
src_web = os.path.join(self.get_install_dir(), 'resources', product, 'web')
dst_web = os.path.join(self.get_path(), 'resources', product, 'web')
if os.path.isdir(dst_web):
common.rmdirs(dst_web)
shutil.copytree(src_web, dst_web)
if product == 'tomcat':
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'lib')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'lib')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
src_webapps = os.path.join(self.get_install_dir(), 'resources', product, 'webapps')
dst_webapps = os.path.join(self.get_path(), 'resources', product, 'webapps')
if os.path.isdir(dst_webapps):
common.rmdirs(dst_webapps)
shutil.copytree(src_webapps, dst_webapps)
src_lib = os.path.join(self.get_install_dir(), 'resources', product, 'gremlin-console', 'conf')
dst_lib = os.path.join(self.get_path(), 'resources', product, 'gremlin-console', 'conf')
if os.path.isdir(dst_lib):
common.rmdirs(dst_lib)
if os.path.exists(src_lib):
shutil.copytree(src_lib, dst_lib)
def import_bin_files(self):
common.copy_directory(os.path.join(self.get_install_dir(), 'bin'), self.get_bin_dir())
cassandra_bin_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'bin')
shutil.rmtree(cassandra_bin_dir, ignore_errors=True)
os.makedirs(cassandra_bin_dir)
common.copy_directory(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'bin'), cassandra_bin_dir)
if os.path.exists(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools')):
cassandra_tools_dir = os.path.join(self.get_path(), 'resources', 'cassandra', 'tools')
shutil.rmtree(cassandra_tools_dir, ignore_errors=True)
shutil.copytree(os.path.join(self.get_install_dir(), 'resources', 'cassandra', 'tools'), cassandra_tools_dir)
self.export_dse_home_in_dse_env_sh()
def export_dse_home_in_dse_env_sh(self):
'''
Due to the way CCM lays out files, separating the repository
from the node(s) confs, the `dse-env.sh` script of each node
needs to have its DSE_HOME var set and exported. Since DSE
4.5.x, the stock `dse-env.sh` file includes a commented-out
place to do exactly this, intended for installers.
Basically: read in the file, write it back out and add the two
lines.
'sstableloader' is an example of a node script that depends on
this, when used in a CCM-built cluster.
'''
with open(self.get_bin_dir() + "/dse-env.sh", "r") as dse_env_sh:
buf = dse_env_sh.readlines()
with open(self.get_bin_dir() + "/dse-env.sh", "w") as out_file:
for line in buf:
out_file.write(line)
if line == "# This is here so the installer can force set DSE_HOME\n":
out_file.write("DSE_HOME=" + self.get_install_dir() + "\nexport DSE_HOME\n")
def _update_log4j(self):
super(DseNode, self)._update_log4j()
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
append_pattern = 'log4j.appender.V.File='
log_file = os.path.join(self.get_path(), 'logs', 'solrvalidation.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.A.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
append_pattern = 'log4j.appender.B.File='
log_file = os.path.join(self.get_path(), 'logs', 'audit', 'dropped-events.log')
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
def __update_yaml(self):
conf_file = os.path.join(self.get_path(), 'resources', 'dse', 'conf', 'dse.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['system_key_directory'] = os.path.join(self.get_path(), 'keys')
# Get a map of combined cluster and node configuration with the node
# configuration taking precedence.
full_options = common.merge_configuration(
self.cluster._dse_config_options,
self._dse_config_options, delete_empty=False)
# Merge options with original yaml data.
data = common.merge_configuration(data, full_options)
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def __generate_server_xml(self):
server_xml = os.path.join(self.get_path(), 'resources', 'tomcat', 'conf', 'server.xml')
if os.path.isfile(server_xml):
os.remove(server_xml)
with open(server_xml, 'w+') as f:
f.write('<Server port="8005" shutdown="SHUTDOWN">\n')
f.write(' <Service name="Solr">\n')
f.write(' <Connector port="8983" address="%s" protocol="HTTP/1.1" connectionTimeout="20000" maxThreads = "200" URIEncoding="UTF-8"/>\n' % self.network_interfaces['thrift'][0])
f.write(' <Engine name="Solr" defaultHost="localhost">\n')
f.write(' <Host name="localhost" appBase="../solr/web"\n')
f.write(' unpackWARs="true" autoDeploy="true"\n')
f.write(' xmlValidation="false" xmlNamespaceAware="false">\n')
f.write(' </Host>\n')
f.write(' </Engine>\n')
f.write(' </Service>\n')
f.write('</Server>\n')
f.close()
def __update_gremlin_config_yaml(self):
(node_ip, _) = self.network_interfaces['binary']
conf_file = os.path.join(self.get_path(), 'resources', 'graph', 'gremlin-console', 'conf', 'remote.yaml')
with open(conf_file, 'r') as f:
data = yaml.load(f)
data['hosts'] = [node_ip]
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _get_directories(self):
dirs = []
for i in ['data', 'commitlogs', 'saved_caches', 'logs', 'bin', 'keys', 'resources', os.path.join('data', 'hints')]:
dirs.append(os.path.join(self.get_path(), i))
return dirs
def _copy_agent(self):
agent_source = os.path.join(self.get_install_dir(), 'datastax-agent')
agent_target = os.path.join(self.get_path(), 'datastax-agent')
if os.path.exists(agent_source) and not os.path.exists(agent_target):
shutil.copytree(agent_source, agent_target)
def _write_agent_address_yaml(self, agent_dir):
address_yaml = os.path.join(agent_dir, 'conf', 'address.yaml')
if not os.path.exists(address_yaml):
with open(address_yaml, 'w+') as f:
(ip, port) = self.network_interfaces['thrift']
jmx = self.jmx_port
f.write('stomp_interface: 127.0.0.1\n')
f.write('local_interface: %s\n' % ip)
f.write('agent_rpc_interface: %s\n' % ip)
f.write('agent_rpc_broadcast_address: %s\n' % ip)
f.write('cassandra_conf: %s\n' % os.path.join(self.get_path(), 'resources', 'cassandra', 'conf', 'cassandra.yaml'))
f.write('cassandra_install: %s\n' % self.get_path())
f.write('cassandra_logs: %s\n' % os.path.join(self.get_path(), 'logs'))
f.write('thrift_port: %s\n' % port)
f.write('jmx_port: %s\n' % jmx)
f.close()
def _write_agent_log4j_properties(self, agent_dir):
log4j_properties = os.path.join(agent_dir, 'conf', 'log4j.properties')
with open(log4j_properties, 'w+') as f:
f.write('log4j.rootLogger=INFO,R\n')
f.write('log4j.logger.org.apache.http=OFF\n')
f.write('log4j.logger.org.eclipse.jetty.util.log=WARN,R\n')
f.write('log4j.appender.R=org.apache.log4j.RollingFileAppender\n')
f.write('log4j.appender.R.maxFileSize=20MB\n')
f.write('log4j.appender.R.maxBackupIndex=5\n')
f.write('log4j.appender.R.layout=org.apache.log4j.PatternLayout\n')
f.write('log4j.appender.R.layout.ConversionPattern=%5p [%t] %d{ISO8601} %m%n\n')
f.write('log4j.appender.R.File=./log/agent.log\n')
f.close()
def _update_spark_env(self):
try:
node_num = re.search(u'node(\d+)', self.name).group(1)
except AttributeError:
node_num = 0
conf_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-env.sh')
env = self.get_env()
content = []
with open(conf_file, 'r') as f:
for line in f.readlines():
for spark_var in env.keys():
if line.startswith('export %s=' % spark_var) or line.startswith('export %s=' % spark_var, 2):
line = 'export %s=%s\n' % (spark_var, env[spark_var])
break
content.append(line)
with open(conf_file, 'w') as f:
f.writelines(content)
# set unique spark.shuffle.service.port for each node; this is only needed for DSE 5.0.x;
# starting in 5.1 this setting is no longer needed
if self.cluster.version() > '5.0' and self.cluster.version() < '5.1':
defaults_file = os.path.join(self.get_path(), 'resources', 'spark', 'conf', 'spark-defaults.conf')
with open(defaults_file, 'a') as f:
port_num = 7737 + int(node_num)
f.write("\nspark.shuffle.service.port %s\n" % port_num)
# create Spark working dirs; starting with DSE 5.0.10/5.1.3 these are no longer automatically created
for e in ["SPARK_WORKER_DIR", "SPARK_LOCAL_DIRS"]:
dir = env[e]
if not os.path.exists(dir):
os.makedirs(dir)
| apache-2.0 | -8,702,722,111,606,709,000 | 46.105932 | 232 | 0.580777 | false |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/mesh_extra_tools/pkhg_faces.py | 1 | 32230 | bl_info = {
"name": "PKHG faces",
"author": " PKHG ",
"version": (0, 0, 5),
"blender": (2, 7, 1),
"location": "View3D > Tools > PKHG (tab)",
"description": "Faces selected will become added faces of different style",
"warning": "not yet finished",
"wiki_url": "",
"category": "Mesh",
}
import bpy
import bmesh
from mathutils import Vector, Matrix
from bpy.props import BoolProperty, StringProperty, IntProperty, FloatProperty, EnumProperty
class AddFaces(bpy.types.Operator):
"""Get parameters and build object with added faces"""
bl_idname = "mesh.add_faces_to_object"
bl_label = "new FACES: add"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
reverse_faces = BoolProperty(name="reverse_faces", default=False,
description="revert the normal of selected faces")
name_source_object = StringProperty(
name="which MESH",
description="lets you chose a mesh",
default="Cube")
remove_start_faces = BoolProperty(name="remove_start_faces", default=True,
description="make a choice, remove or not")
base_height = FloatProperty(name="base_height faces", min=-20,
soft_max=10, max=20, default=0.2,
description="sets general base_height")
use_relative_base_height = BoolProperty(name="rel.base_height", default=False,
description=" reletive or absolute base_height")
relative_base_height = FloatProperty(name="relative_height", min=-5,
soft_max=5, max=20, default=0.2,
description="PKHG>TODO")
relative_width = FloatProperty(name="relative_width", min=-5,
soft_max=5, max=20, default=0.2,
description="PKHG>TODO")
second_height = FloatProperty(name="2. height", min=-5,
soft_max=5, max=20, default=0.2,
description="2. height for this and that")
width = FloatProperty(name="wds.faces", min=-20, max=20, default=0.5,
description="sets general width")
repeat_extrude = IntProperty(name="repeat", min=1,
soft_max=5, max=20,
description="for longer base")
move_inside = FloatProperty(name="move inside", min=0.0,
max=1.0, default=0.5,
description="how much move to inside")
thickness = FloatProperty(name="thickness", soft_min=0.01, min=0,
soft_max=5.0, max=20.0, default=0)
depth = FloatProperty(name="depth", min=-5,
soft_max=5.0, max=20.0, default=0)
collapse_edges = BoolProperty(name="make point", default=False,
description="collapse vertices of edges")
spike_base_width = FloatProperty(name="spike_base_width", default=0.4,
min=-4.0, soft_max=1, max=20,
description="base width of a spike")
base_height_inset = FloatProperty(name="base_height_inset", default=0.0,
min=-5, max=5,
description="to elevate/or neg the ...")
top_spike = FloatProperty(name="top_spike", default=1.0, min=-10.0, max=10.0,
description=" the base_height of a spike")
top_extra_height = FloatProperty(name="top_extra_height", default=0.0, min=-10.0, max=10.0,
description=" add extra height")
step_with_real_spike = BoolProperty(name="step_with_real_spike", default=False,
description=" in stepped a real spike")
use_relative = BoolProperty(name="use_relative", default=False,
description="change size using area, min of max")
face_types = EnumProperty(
description="different types of faces",
default="no",
items=[
('no', 'choose!', 'choose one of the other possibilies'),
('open inset', 'open inset', 'holes'),
('with base', 'with base', 'base and ...'),
('clsd vertical', 'clsd vertical', 'clsd vertical'),
('open vertical', 'open vertical', 'openvertical'),
('spiked', 'spiked', 'spike'),
('stepped', 'stepped', 'stepped'),
('boxed', 'boxed', 'boxed'),
('bar', 'bar', 'bar'),
])
strange_boxed_effect = BoolProperty(name="strange effect", default=False,
description="do not show one extrusion")
use_boundary = BoolProperty(name="use_boundary", default=True)
use_even_offset = BoolProperty(name="even_offset", default=True)
use_relative_offset = BoolProperty(name="relativ_offset", default=True)
use_edge_rail = BoolProperty(name="edge_rail", default=False)
use_outset = BoolProperty(name="outset", default=False)
use_select_inset = BoolProperty(name="inset", default=False)
use_interpolate = BoolProperty(name="interpolate", default=True)
@classmethod
def poll(cls, context):
result = False
active_object = context.active_object
if active_object:
mesh_objects_name = [el.name for el in bpy.data.objects if el.type ==
"MESH"]
if active_object.name in mesh_objects_name:
result = True
return result
def draw(self, context): # PKHG>INFO Add_Faces_To_Object operator GUI
layout = self.layout
col = layout.column()
col.label(text="ACTIVE object used!")
col.prop(self, "face_types")
col.prop(self, "use_relative")
if self.face_types == "open inset":
col.prop(self, "move_inside")
col.prop(self, "base_height")
elif self.face_types == "with base":
col.prop(self, "move_inside")
col.prop(self, "base_height")
col.prop(self, "second_height")
col.prop(self, "width")
elif self.face_types == "clsd vertical":
col.prop(self, "base_height")
elif self.face_types == "open vertical":
col.prop(self, "base_height")
elif self.face_types == "boxed":
col.prop(self, "move_inside")
col.prop(self, "base_height")
col.prop(self, "top_spike")
col.prop(self, "strange_boxed_effect")
elif self.face_types == "spiked":
col.prop(self, "spike_base_width")
col.prop(self, "base_height_inset")
col.prop(self, "top_spike")
elif self.face_types == "bar":
col.prop(self, "spike_base_width")
col.prop(self, "top_spike")
col.prop(self, "top_extra_height")
elif self.face_types == "stepped":
col.prop(self, "spike_base_width")
col.prop(self, "base_height_inset")
col.prop(self, "top_extra_height")
col.prop(self, "second_height")
col.prop(self, "step_with_real_spike")
def execute(self, context):
bpy.context.scene.objects.active
obj_name = self.name_source_object
face_type = self.face_types
if face_type == "spiked":
Spiked(spike_base_width=self.spike_base_width,
base_height_inset=self.base_height_inset,
top_spike=self.top_spike, top_relative=self.use_relative)
elif face_type == "boxed":
startinfo = prepare(self, context, self.remove_start_faces)
bm = startinfo['bm']
top = self.top_spike
obj = startinfo['obj']
obj_matrix_local = obj.matrix_local
distance = None
base_heights = None
t = self.move_inside
areas = startinfo['areas']
base_height = self.base_height
if self.use_relative:
distance = [min(t * area, 1.0) for i, area in enumerate(areas)]
base_heights = [base_height * area for i, area in enumerate(areas)]
else:
distance = [t] * len(areas)
base_heights = [base_height] * len(areas)
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
for i in range(len(rings)):
make_one_inset(self, context, bm=bm, ringvectors=rings[i],
center=centers[i], normal=normals[i],
t=distance[i], base_height=base_heights[i])
bpy.ops.mesh.select_mode(type="EDGE")
bpy.ops.mesh.select_more()
bpy.ops.mesh.select_more()
bpy.ops.object.mode_set(mode='OBJECT')
# PKHG>INFO base extrusion done and set to the mesh
# PKHG>INFO if the extrusion is NOT done ... it looks straneg soon!
if not self.strange_boxed_effect:
bpy.ops.object.mode_set(mode='EDIT')
obj = context.active_object
bm = bmesh.from_edit_mesh(obj.data)
bmfaces = [face for face in bm.faces if face.select]
res = extrude_faces(self, context, bm=bm, face_l=bmfaces)
ring_edges = [face.edges[:] for face in res]
bpy.ops.object.mode_set(mode='OBJECT')
# PKHG>INFO now the extruded facec have to move in normal direction
bpy.ops.object.mode_set(mode='EDIT')
obj = bpy.context.scene.objects.active
bm = bmesh.from_edit_mesh(obj.data)
todo_faces = [face for face in bm.faces if face.select]
for face in todo_faces:
bmesh.ops.translate(bm, vec=face.normal * top, space=obj_matrix_local,
verts=face.verts)
bpy.ops.object.mode_set(mode='OBJECT')
elif face_type == "stepped":
Stepped(spike_base_width=self.spike_base_width,
base_height_inset=self.base_height_inset,
top_spike=self.second_height,
top_extra_height=self.top_extra_height,
use_relative_offset=self.use_relative, with_spike=self.step_with_real_spike)
elif face_type == "open inset":
startinfo = prepare(self, context, self.remove_start_faces)
bm = startinfo['bm']
# PKHG>INFO adjust for relative, via areas
t = self.move_inside
areas = startinfo['areas']
base_height = self.base_height
base_heights = None
distance = None
if self.use_relative:
distance = [min(t * area, 1.0) for i, area in enumerate(areas)]
base_heights = [base_height * area for i, area in enumerate(areas)]
else:
distance = [t] * len(areas)
base_heights = [base_height] * len(areas)
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
for i in range(len(rings)):
make_one_inset(self, context, bm=bm, ringvectors=rings[i],
center=centers[i], normal=normals[i],
t=distance[i], base_height=base_heights[i])
bpy.ops.object.mode_set(mode='OBJECT')
elif face_type == "with base":
startinfo = prepare(self, context, self.remove_start_faces)
bm = startinfo['bm']
obj = startinfo['obj']
object_matrix = obj.matrix_local
# PKHG>INFO for relative (using areas)
t = self.move_inside
areas = startinfo['areas']
base_height = self.base_height
distance = None
base_heights = None
if self.use_relative:
distance = [min(t * area, 1.0) for i, area in enumerate(areas)]
base_heights = [base_height * area for i, area in enumerate(areas)]
else:
distance = [t] * len(areas)
base_heights = [base_height] * len(areas)
next_rings = []
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
for i in range(len(rings)):
next_rings.append(make_one_inset(self, context, bm=bm, ringvectors=rings[i],
center=centers[i], normal=normals[i],
t=distance[i], base_height=base_heights[i]))
prepare_ring = extrude_edges(self, context, bm=bm, edge_l_l=next_rings)
second_height = self.second_height
width = self.width
vectors = [[ele.verts[:] for ele in edge] for edge in prepare_ring]
n_ring_vecs = []
for rings in vectors:
v = []
for edgv in rings:
v.extend(edgv)
# PKHF>INFO no double verts allowed, coming from two adjacents edges!
bm.verts.ensure_lookup_table()
vv = list(set([ele.index for ele in v]))
vvv = [bm.verts[i].co for i in vv]
n_ring_vecs.append(vvv)
for i, ring in enumerate(n_ring_vecs):
make_one_inset(self, context, bm=bm, ringvectors=ring,
center=centers[i], normal=normals[i],
t=width, base_height=base_heights[i] + second_height)
bpy.ops.object.mode_set(mode='OBJECT')
else:
if face_type == "clsd vertical":
obj_name = context.active_object.name
ClosedVertical(name=obj_name, base_height=self.base_height,
use_relative_base_height=self.use_relative)
elif face_type == "open vertical":
obj_name = context.active_object.name
OpenVertical(name=obj_name, base_height=self.base_height,
use_relative_base_height=self.use_relative)
elif face_type == "bar":
startinfo = prepare(self, context, self.remove_start_faces)
result = []
bm = startinfo['bm']
rings = startinfo['rings']
centers = startinfo['centers']
normals = startinfo['normals']
spike_base_width = self.spike_base_width
for i, ring in enumerate(rings):
result.append(make_one_inset(self, context, bm=bm,
ringvectors=ring, center=centers[i],
normal=normals[i], t=spike_base_width))
next_ring_edges_list = extrude_edges(self, context, bm=bm,
edge_l_l=result)
top_spike = self.top_spike
fac = top_spike
object_matrix = startinfo['obj'].matrix_local
for i in range(len(next_ring_edges_list)):
translate_ONE_ring(self, context, bm=bm,
object_matrix=object_matrix,
ring_edges=next_ring_edges_list[i],
normal=normals[i], distance=fac)
next_ring_edges_list_2 = extrude_edges(self, context, bm=bm,
edge_l_l=next_ring_edges_list)
top_extra_height = self.top_extra_height
for i in range(len(next_ring_edges_list_2)):
move_corner_vecs_outside(self, context, bm=bm,
edge_list=next_ring_edges_list_2[i],
center=centers[i], normal=normals[i],
base_height_erlier=fac + top_extra_height,
distance=fac)
bpy.ops.mesh.select_mode(type="VERT")
bpy.ops.mesh.select_more()
bpy.ops.object.mode_set(mode='OBJECT')
return {'FINISHED'}
class ReverseFacesOperator(bpy.types.Operator):
"""Reverse selected Faces"""
bl_idname = "mesh.revers_selected_faces"
bl_label = "reverse normal of selected faces1"
bl_options = {'REGISTER', 'UNDO', 'PRESET'}
reverse_faces = BoolProperty(name="reverse_faces", default=False,
description="revert the normal of selected faces")
def execute(self, context):
name = context.active_object.name
ReverseFaces(name=name)
return {'FINISHED'}
class pkhg_help(bpy.types.Operator):
bl_idname = 'help.pkhg'
bl_label = ''
def draw(self, context):
layout = self.layout
layout.label('To use:')
layout.label('Make a selection or selection of Faces')
layout.label('Extrude, rotate extrusions & more')
layout.label('Toggle edit mode after use')
def invoke(self, context, event):
return context.window_manager.invoke_popup(self, width=300)
class VIEW3D_Faces_Panel(bpy.types.Panel):
bl_label = "Face Extrude"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = 'Tools'
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
result = False
active_object = context.active_object
if active_object:
mesh_objects_name = [el.name for el in bpy.data.objects if el.type ==
"MESH"]
if active_object.name in mesh_objects_name:
if active_object.mode == "OBJECT":
result = True
return result
def draw(self, context):
layout = self.layout
layout.operator(AddFaces.bl_idname, "Selected Faces!")
layout.label("Use this to Extrude")
layout.label("Selected Faces Only")
layout.label("---------------------------------------")
layout.operator(ReverseFacesOperator.bl_idname, "Reverse faceNormals")
layout.label("Only Use This")
layout.label("After Mesh Creation")
layout.label("To Repair Normals")
layout.label("Save File Often")
def find_one_ring(sel_vertices):
ring0 = sel_vertices.pop(0)
to_delete = []
for i, edge in enumerate(sel_vertices):
len_nu = len(ring0)
if len(ring0 - edge) < len_nu:
to_delete.append(i)
ring0 = ring0.union(edge)
to_delete.reverse()
for el in to_delete:
sel_vertices.pop(el)
return (ring0, sel_vertices)
class Stepped:
def __init__(self, spike_base_width=0.5, base_height_inset=0.0, top_spike=0.2, top_relative=False, top_extra_height=0, use_relative_offset=False, with_spike=False):
obj = bpy.context.active_object
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=False, thickness=spike_base_width, depth=0, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=use_relative_offset, use_edge_rail=False, thickness=top_extra_height, depth=base_height_inset, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=use_relative_offset, use_edge_rail=False, thickness=spike_base_width, depth=0, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=False, thickness=0, depth=top_spike, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
if with_spike:
bpy.ops.mesh.merge(type='COLLAPSE')
bpy.ops.object.mode_set(mode='OBJECT')
class Spiked:
def __init__(self, spike_base_width=0.5, base_height_inset=0.0, top_spike=0.2, top_relative=False):
obj = bpy.context.active_object
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=False, thickness=spike_base_width, depth=base_height_inset, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=top_relative, use_edge_rail=False, thickness=0, depth=top_spike, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bm = bmesh.from_edit_mesh(obj.data)
selected_faces = [face for face in bm.faces if face.select]
edges_todo = []
bpy.ops.mesh.merge(type='COLLAPSE')
bpy.ops.object.mode_set(mode='OBJECT')
class ClosedVertical:
def __init__(self, name="Plane", base_height=1, use_relative_base_height=False):
obj = bpy.data.objects[name]
bm = bmesh.new()
bm.from_mesh(obj.data)
# PKHG>INFO deselect chosen faces
sel = [f for f in bm.faces if f.select]
for f in sel:
f.select = False
res = bmesh.ops.extrude_discrete_faces(bm, faces=sel)
# PKHG>INFO select extruded faces
for f in res['faces']:
f.select = True
lood = Vector((0, 0, 1))
# PKHG>INFO adjust extrusion by a vector! test just only lood
factor = base_height
for face in res['faces']:
if use_relative_base_height:
area = face.calc_area()
factor = area * base_height
else:
factor = base_height
for el in face.verts:
tmp = el.co + face.normal * factor
el.co = tmp
me = bpy.data.meshes[name]
bm.to_mesh(me)
bm.free()
class ReverseFaces:
def __init__(self, name="Cube"):
obj = bpy.data.objects[name]
me = obj.data
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.new()
bm.from_mesh(me)
bpy.ops.object.mode_set(mode='OBJECT')
sel = [f for f in bm.faces if f.select]
bmesh.ops.reverse_faces(bm, faces=sel)
bm.to_mesh(me)
bm.free()
class OpenVertical:
def __init__(self, name="Plane", base_height=1, use_relative_base_height=False):
obj = bpy.data.objects[name]
bm = bmesh.new()
bm.from_mesh(obj.data)
# PKHG>INFO deselect chosen faces
sel = [f for f in bm.faces if f.select]
for f in sel:
f.select = False
res = bmesh.ops.extrude_discrete_faces(bm, faces=sel)
# PKHG>INFO select extruded faces
for f in res['faces']:
f.select = True
# PKHG>INFO adjust extrusion by a vector! test just only lood
factor = base_height
for face in res['faces']:
if use_relative_base_height:
area = face.calc_area()
factor = area * base_height
else:
factor = base_height
for el in face.verts:
tmp = el.co + face.normal * factor
el.co = tmp
me = bpy.data.meshes[name]
bm.to_mesh(me)
bm.free()
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.editmode_toggle()
class StripFaces:
def __init__(self, use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=0.0, depth=0.0, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=True):
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.inset(use_boundary=use_boundary, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=use_outset, use_select_inset=use_select_inset, use_individual=use_individual, use_interpolate=use_interpolate)
bpy.ops.object.mode_set(mode='OBJECT')
# PKHG>IMFO only 3 parameters inc execution context supported!!
if False:
bpy.ops.mesh.inset(use_boundary, use_even_offset, use_relative_offset, use_edge_rail, thickness, depth, use_outset, use_select_inset, use_individual, use_interpolate)
elif type == 0:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=True)
elif type == 1:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=True, use_relative_offset=False, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=False)
bpy.ops.mesh.delete(type='FACE')
elif type == 2:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=False)
bpy.ops.mesh.delete(type='FACE')
elif type == 3:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=depth, depth=thickness, use_outset=False, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.delete(type='FACE')
elif type == 4:
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.inset(use_boundary=True, use_even_offset=False, use_relative_offset=True, use_edge_rail=True, thickness=thickness, depth=depth, use_outset=True, use_select_inset=False, use_individual=True, use_interpolate=True)
bpy.ops.mesh.delete(type='FACE')
bpy.ops.object.mode_set(mode='OBJECT')
def prepare(self, context, remove_start_faces=True):
"""Start for a face selected change of faces
select an object of type mesh, with activated severel (all) faces
"""
obj = bpy.context.scene.objects.active
bpy.ops.object.mode_set(mode='OBJECT')
selectedpolygons = [el for el in obj.data.polygons if el.select]
# PKHG>INFO copies of the vectors are needed, otherwise Blender crashes!
centers = [face.center for face in selectedpolygons]
centers_copy = [Vector((el[0], el[1], el[2])) for el in centers]
normals = [face.normal for face in selectedpolygons]
normals_copy = [Vector((el[0], el[1], el[2])) for el in normals]
vertindicesofpolgons = [[vert for vert in face.vertices] for face in selectedpolygons]
vertVectorsOfSelectedFaces = [[obj.data.vertices[ind].co for ind in vertIndiceofface]
for vertIndiceofface in vertindicesofpolgons]
vertVectorsOfSelectedFaces_copy = [[Vector((el[0], el[1], el[2])) for el in listofvecs]
for listofvecs in vertVectorsOfSelectedFaces]
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(obj.data)
selected_bm_faces = [ele for ele in bm.faces if ele.select]
selected_edges_per_face_ind = [[ele.index for ele in face.edges] for face in selected_bm_faces]
indices = [el.index for el in selectedpolygons]
selected_faces_areas = [bm.faces[:][i] for i in indices]
tmp_area = [el.calc_area() for el in selected_faces_areas]
# PKHG>INFO, selected faces are removed, only their edges are used!
if remove_start_faces:
bpy.ops.mesh.delete(type='ONLY_FACE')
bpy.ops.object.mode_set(mode='OBJECT')
obj.data.update()
bpy.ops.object.mode_set(mode='EDIT')
bm = bmesh.from_edit_mesh(obj.data)
bm.verts.ensure_lookup_table()
bm.faces.ensure_lookup_table()
start_ring_raw = [[bm.verts[ind].index for ind in vertIndiceofface]
for vertIndiceofface in vertindicesofpolgons]
start_ring = []
for el in start_ring_raw:
start_ring.append(set(el))
bm.edges.ensure_lookup_table()
bm_selected_edges_l_l = [[bm.edges[i] for i in bm_ind_list] for bm_ind_list in selected_edges_per_face_ind]
result = {'obj': obj, 'centers': centers_copy, 'normals': normals_copy,
'rings': vertVectorsOfSelectedFaces_copy, 'bm': bm,
'areas': tmp_area, 'startBMRingVerts': start_ring,
'base_edges': bm_selected_edges_l_l}
return result
def make_one_inset(self, context, bm=None, ringvectors=None, center=None,
normal=None, t=None, base_height=0):
"""a face will get 'inserted' faces to create (normaly)
a hole it t is > 0 and < 1)
"""
tmp = []
for el in ringvectors:
tmp.append((el * (1 - t) + center * t) + normal * base_height)
tmp = [bm.verts.new(v) for v in tmp] # the new corner bmvectors
# PKHG>INFO so to say sentinells, ot use ONE for ...
tmp.append(tmp[0])
vectorsFace_i = [bm.verts.new(v) for v in ringvectors]
vectorsFace_i.append(vectorsFace_i[0])
myres = []
for ii in range(len(vectorsFace_i) - 1):
# PKHG>INFO next line: sequence important! for added edge
bmvecs = [vectorsFace_i[ii], vectorsFace_i[ii + 1], tmp[ii + 1], tmp[ii]]
res = bm.faces.new(bmvecs)
myres.append(res.edges[2])
myres[-1].select = True # PKHG>INFO to be used later selected!
return (myres)
def extrude_faces(self, context, bm=None, face_l=None):
"""
to make a ring extrusion!
"""
all_results = []
res = bmesh.ops.extrude_discrete_faces(bm, faces=face_l)['faces']
for face in res:
face.select = True
return res
def extrude_edges(self, context, bm=None, edge_l_l=None):
"""
to make a ring extrusion!
"""
all_results = []
for edge_l in edge_l_l:
for edge in edge_l:
edge.select = False
res = bmesh.ops.extrude_edge_only(bm, edges=edge_l)
tmp = [ele for ele in res['geom'] if isinstance(ele, bmesh.types.BMEdge)]
for edge in tmp:
edge.select = True
all_results.append(tmp)
return all_results
def translate_ONE_ring(self, context, bm=None, object_matrix=None, ring_edges=None,
normal=(0, 0, 1), distance=0.5):
"""
translate a ring in given (normal?!) direction with given (global) amount
"""
tmp = []
for edge in ring_edges:
tmp.extend(edge.verts[:])
# PKHG>INFO no double vertices allowed by bmesh!
tmp = set(tmp)
tmp = list(tmp)
bmesh.ops.translate(bm, vec=normal * distance, space=object_matrix, verts=tmp)
return ring_edges
# PKHG>INFO relevant edges will stay selected
def move_corner_vecs_outside(self, context, bm=None, edge_list=None, center=None, normal=None,
base_height_erlier=0.5, distance=0.5):
"""
move corners (outside meant mostly) dependent on the parameters
"""
tmp = []
for edge in edge_list:
tmp.extend([ele for ele in edge.verts if isinstance(ele, bmesh.types.BMVert)])
# PKHG>INFO to remove vertices, they are all twices used in the ring!
tmp = set(tmp)
tmp = list(tmp)
for i in range(len(tmp)):
vec = tmp[i].co
direction = vec + (vec - (normal * base_height_erlier + center)) * distance
tmp[i].co = direction
def register():
bpy.utils.register_module(__name__)
def unregister():
bpy.utils.unregister_module(__name__)
if __name__ == "__main__":
register()
| gpl-3.0 | -9,101,845,059,198,115,000 | 42.85034 | 278 | 0.57403 | false |
ArcherSys/ArcherSys | Lib/test/test_socket.py | 1 | 585437 | <<<<<<< HEAD
<<<<<<< HEAD
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen(1)
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises OSError with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(OSError) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(OSError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(hasattr(socket, 'socketpair'),
'test needs socket.socketpair()')
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise OSError(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
=======
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen(1)
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises OSError with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(OSError) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(OSError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(hasattr(socket, 'socketpair'),
'test needs socket.socketpair()')
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise OSError(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen(1)
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises OSError with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(OSError) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(OSError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(hasattr(socket, 'socketpair'),
'test needs socket.socketpair()')
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise OSError(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| mit | -5,195,542,381,641,820,000 | 36.720407 | 117 | 0.615957 | false |
laissezfarrell/rl-bitcurator-scripts | python/accession-reporter.py | 1 | 3754 | #!/usr/bin/env python3
#Script (in progress) to report high-level folder information for offices transferring records to the University Archives.
#Dependencies: argparse, pathlib, python3.6 or above, csv, datetime
#Assumptions:
# 1.
import argparse, csv, datetime
from pathlib import Path, PurePath
from datetime import datetime
def walkPath():
startingPath = Path(inputDir) #uncomment when ready for arguments
#csvOut = Path('/home/bcadmin/Desktop/accession-report-test.csv')
#startingPath = Path('/home/bcadmin/Desktop/test-data/objects') #comment when ready for arguments
spChild = [x for x in startingPath.iterdir() if x.is_dir()] #create a list of the children directories in startingPath.
with open (csvOut, 'w') as m:
writer = csv.writer(m)
writer.writerow(['path','foldersize '+ labelUnits,'Earliest Timestamp','Latest Timestamp'])
for i in spChild:
operatingDirectory = Path(i)
print("the next directory to process is ",operatingDirectory)#sanity check
fileList = list(operatingDirectory.glob('**/*'))
#fileList = [x for x in operatingDirectory.iterdir() if x.is_file()]
#print(fileList) #sanity check
folderSize = 0
fModTime = datetime.now()
oldestTime = fModTime
newestTime = datetime.strptime("Jan 01 1950", "%b %d %Y")
for f in fileList:
fSizeBytes = (Path.stat(f).st_size / repUnits)
folderSize = folderSize + fSizeBytes
fModTime = datetime.fromtimestamp(Path.stat(f).st_mtime)
if fModTime >= oldestTime:
pass
else:
oldestTime = fModTime
if fModTime <= newestTime:
pass
else:
newestTime = fModTime
#print(folderSize)
#print(oldestTime)
#print(newestTime)
writer.writerow([i,folderSize,oldestTime,newestTime])
#end of day May 15: above function calculates the size of the files in a folder, as well as the most recent and oldest date modified. Next steps: 1) add arguments back in and test function. Mostly compied/pasted writer stuff from another script, so potentially doesn't work yet.
# Main body to accept arguments and call the three functions.
parser = argparse.ArgumentParser()
parser.add_argument("output", help="Path to and filename for the CSV to create.")
parser.add_argument("input", help="Path to input directory.")
parser.add_argument("-u", "--units", type=str, choices=["b", "kb", "mb", "gb"], help="Unit of measurement for reporting aggregate size")
args = parser.parse_args()
if args.output:
csvOut = args.output
if args.input:
inputDir = args.input
if args.units == "kb":
repUnits = 1024
labelUnits = "(kilobytes)"
print("Reporting sizes in kilobytes")
elif args.units == "mb":
repUnits = 1024*1024
labelUnits = "(megabytes)"
print("Reporting sizes in megabytes")
elif args.units =="gb":
repUnits = 1024*1024*1024
labelUnits = "(gigabytes)"
print("Reporting sizes in gigabytes")
elif args.units == "b":
repUnits = 1
labelUnits = "(bytes)"
print("Reporting sizes in bytes, the purest way to report sizes.")
else:
repUnits = 1
labelUnits = "(bytes)"
print("Your inattentiveness leads Self to default to reporting size in bytes, the purest yet least human readable way to report sizes. Ha ha, puny human. Bow before Self, the mighty computer. 01000100 01000101 01010011 01010100 01010010 01001111 01011001 00100000 01000001 01001100 01001100 00100000 01001000 01010101 01001101 01000001 01001110 01010011 00101110")
walkPath()
| gpl-3.0 | -5,533,678,024,818,261,000 | 43.690476 | 368 | 0.660629 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_01_01/operations/_subnets_operations.py | 1 | 21762 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubnetsOperations(object):
"""SubnetsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified subnet.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
"""Gets the specified subnet by virtual network and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subnet, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_01_01.models.Subnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> "_models.Subnet"
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(subnet_parameters, 'Subnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Subnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
subnet_name, # type: str
subnet_parameters, # type: "_models.Subnet"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Subnet"]
"""Creates or updates a subnet in the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param subnet_name: The name of the subnet.
:type subnet_name: str
:param subnet_parameters: Parameters supplied to the create or update subnet operation.
:type subnet_parameters: ~azure.mgmt.network.v2018_01_01.models.Subnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Subnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_01_01.models.Subnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
subnet_name=subnet_name,
subnet_parameters=subnet_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Subnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subnetName': self._serialize.url("subnet_name", subnet_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets/{subnetName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SubnetListResult"]
"""Gets all subnets in a virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubnetListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_01_01.models.SubnetListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubnetListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-01-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubnetListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/subnets'} # type: ignore
| mit | -452,068,598,390,759,740 | 48.124153 | 220 | 0.636752 | false |
alirizakeles/zato | code/zato-server/src/zato/server/service/internal/security/openstack.py | 1 | 6547 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
from traceback import format_exc
from uuid import uuid4
# Zato
from zato.common import SEC_DEF_TYPE
from zato.common.broker_message import SECURITY
from zato.common.odb.model import Cluster, OpenStackSecurity
from zato.common.odb.query import openstack_security_list
from zato.server.service.internal import AdminService, AdminSIO, ChangePasswordBase, GetListAdminSIO
class GetList(AdminService):
""" Returns a list of OpenStack definitions available.
"""
_filter_by = OpenStackSecurity.name,
class SimpleIO(GetListAdminSIO):
request_elem = 'zato_security_openstack_get_list_request'
response_elem = 'zato_security_openstack_get_list_response'
input_required = ('cluster_id',)
output_required = ('id', 'name', 'is_active', 'username')
def get_data(self, session):
return self._search(openstack_security_list, session, self.request.input.cluster_id, False)
def handle(self):
with closing(self.odb.session()) as session:
self.response.payload[:] = self.get_data(session)
class Create(AdminService):
""" Creates a new OpenStack definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_openstack_create_request'
response_elem = 'zato_security_openstack_create_response'
input_required = ('cluster_id', 'name', 'is_active', 'username')
output_required = ('id', 'name')
def handle(self):
input = self.request.input
input.password = uuid4().hex
with closing(self.odb.session()) as session:
try:
cluster = session.query(Cluster).filter_by(id=input.cluster_id).first()
# Let's see if we already have a definition of that name before committing
# any stuff into the database.
existing_one = session.query(OpenStackSecurity).\
filter(Cluster.id==input.cluster_id).\
filter(OpenStackSecurity.name==input.name).first()
if existing_one:
raise Exception('OpenStack definition [{0}] already exists on this cluster'.format(input.name))
auth = OpenStackSecurity(None, input.name, input.is_active, input.username, input.password, cluster)
session.add(auth)
session.commit()
except Exception, e:
msg = 'Could not create an OpenStack definition, e:[{e}]'.format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
input.action = SECURITY.OPENSTACK_CREATE.value
input.sec_type = SEC_DEF_TYPE.OPENSTACK
self.broker_client.publish(input)
self.response.payload.id = auth.id
self.response.payload.name = auth.name
class Edit(AdminService):
""" Updates an OpenStack definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_openstack_edit_request'
response_elem = 'zato_security_openstack_edit_response'
input_required = ('id', 'cluster_id', 'name', 'is_active', 'username')
output_required = ('id', 'name')
def handle(self):
input = self.request.input
with closing(self.odb.session()) as session:
try:
existing_one = session.query(OpenStackSecurity).\
filter(Cluster.id==input.cluster_id).\
filter(OpenStackSecurity.name==input.name).\
filter(OpenStackSecurity.id!=input.id).\
first()
if existing_one:
raise Exception('OpenStack definition [{0}] already exists on this cluster'.format(input.name))
definition = session.query(OpenStackSecurity).filter_by(id=input.id).one()
old_name = definition.name
definition.name = input.name
definition.is_active = input.is_active
definition.username = input.username
session.add(definition)
session.commit()
except Exception, e:
msg = 'Could not update the OpenStack definition, e:[{e}]'.format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
input.action = SECURITY.OPENSTACK_EDIT.value
input.old_name = old_name
input.sec_type = SEC_DEF_TYPE.OPENSTACK
self.broker_client.publish(input)
self.response.payload.id = definition.id
self.response.payload.name = definition.name
class ChangePassword(ChangePasswordBase):
""" Changes the password of an OpenStack definition.
"""
password_required = False
class SimpleIO(ChangePasswordBase.SimpleIO):
request_elem = 'zato_security_openstack_change_password_request'
response_elem = 'zato_security_openstack_change_password_response'
def handle(self):
def _auth(instance, password):
instance.password = password
return self._handle(OpenStackSecurity, _auth, SECURITY.OPENSTACK_CHANGE_PASSWORD.value)
class Delete(AdminService):
""" Deletes an OpenStack definition.
"""
class SimpleIO(AdminSIO):
request_elem = 'zato_security_openstack_delete_request'
response_elem = 'zato_security_openstack_delete_response'
input_required = ('id',)
def handle(self):
with closing(self.odb.session()) as session:
try:
auth = session.query(OpenStackSecurity).\
filter(OpenStackSecurity.id==self.request.input.id).\
one()
session.delete(auth)
session.commit()
except Exception, e:
msg = 'Could not delete the OpenStack definition, e:[{e}]'.format(e=format_exc(e))
self.logger.error(msg)
session.rollback()
raise
else:
self.request.input.action = SECURITY.OPENSTACK_DELETE.value
self.request.input.name = auth.name
self.broker_client.publish(self.request.input)
| gpl-3.0 | -7,917,064,679,736,714,000 | 36.626437 | 116 | 0.607148 | false |
10239847509238470925387z/tmp123 | app.py | 1 | 2195 | #!/usr/bin/env python
import urllib
import json
import os
import constants
import accounts
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
PERSON = constants.TEST_1
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = makeWebhookResult(req)
res = json.dumps(res, indent=4)
print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def makeWebhookResult(req):
if req.get("result").get("action") != "account-balance":
return constants.ERR_DICT(req.get("result").get("action"))
result = req.get("result")
parameters = result.get("parameters")
acct = parameters.get("account-type")
acct = acct.strip()
if acct=='401k':
acct='WI'
qual = parameters.get("qualifier")
speech = str(req.get("result").get("action"))
if acct:
if acct in constants.ACCT_TYPES:
speech = "The value of your {ACCT_TYPE} accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON, acct), ACCT_TYPE=acct)
else:
speech = "You don't have any accounts of that type. The total value of your other accounts is {VALU} dollars.".format(
VALU=accounts.get_balance(PERSON))
elif qual:
speech = "The total value of your accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON))
else:
speech = "The total value of your accounts is {VALU} dollars.".format(VALU=accounts.get_balance(PERSON))
# speech = "The cost of shipping to " + zone + " is " + str(cost[zone]) + " euros."
print("Response:")
print(speech)
speech += "\nAnything else I can help you with today?"
return {
"speech": speech,
"displayText": speech,
#"data": {},
# "contextOut": [],
"source": "home"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print "Starting app on port %d" % port
app.run(debug=True, port=port, host='0.0.0.0')
| apache-2.0 | -4,790,441,292,185,734,000 | 26.098765 | 144 | 0.625513 | false |
Lind-Project/native_client | src/trusted/validator_arm/validation-report.py | 1 | 3575 | #!/usr/bin/python2
#
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
import sys
import textwrap
from subprocess import Popen, PIPE
_OBJDUMP = 'arm-linux-gnueabi-objdump'
def _objdump(binary, vaddr, ctx_before, ctx_after):
args = [
_OBJDUMP,
'-d',
'-G',
binary,
'--start-address=0x%08X' % (vaddr - (4 * ctx_before)),
'--stop-address=0x%08X' % (vaddr + 4 + (4 * ctx_after))]
highlight = ctx_before
lines = 0
for line in Popen(args, stdout=PIPE).stdout.read().split('\n'):
if line.startswith(' '):
if highlight == 0:
print '--> ', line
else:
print ' ', line
highlight -= 1
lines += 1
if not lines:
print ' (not found)'
def _problem_info(code):
return {
'kProblemUnsafe': ['Instruction is unsafe', 0, 0],
'kProblemBranchSplitsPattern': ['The destination of this branch is '
'part of an instruction sequence that must be executed in full, '
'or is inline data',
0, 0],
'kProblemPatternCrossesBundle': ['This instruction is part of a '
'sequence that must execute in full, but it spans a bundle edge '
'-- so an indirect branch may target it',
1, 1],
'kProblemBranchInvalidDest': ['This branch targets a location that is '
'outside of the application\'s executable code, and is not a valid '
'trampoline entry point', 0, 0],
'kProblemUnsafeLoadStore': ['This store instruction is not preceded by '
'a valid address mask instruction', 1, 0],
'kProblemUnsafeBranch': ['This indirect branch instruction is not '
'preceded by a valid address mask instruction', 1, 0],
'kProblemUnsafeDataWrite': ['This instruction affects a register that '
'must contain a valid data-region address, but is not followed by '
'a valid address mask instruction', 0, 1],
'kProblemReadOnlyRegister': ['This instruction changes the contents of '
'a read-only register', 0, 0],
'kProblemMisalignedCall': ['This linking branch instruction is not in '
'the last slot of its bundle, so when its LR result is masked, the '
'caller will not return to the next instruction', 0, 0],
}[code]
def _safety_msg(val):
return {
0: 'UNKNOWN', # Should not appear
1: 'is undefined',
2: 'has unpredictable effects',
3: 'is deprecated',
4: 'is forbidden',
5: 'uses forbidden operands',
}[val]
def _explain_problem(binary, vaddr, safety, code, ref_vaddr):
msg, ctx_before, ctx_after = _problem_info(code)
if safety == 6:
msg = "At %08X: %s:" % (vaddr, msg)
else:
msg = ("At %08X: %s (%s):"
% (vaddr, msg, _safety_msg(safety)))
print '\n'.join(textwrap.wrap(msg, 70, subsequent_indent=' '))
_objdump(binary, vaddr, ctx_before, ctx_after)
if ref_vaddr:
print "Destination address %08X:" % ref_vaddr
_objdump(binary, ref_vaddr, 1, 1)
def _parse_report(line):
vaddr_hex, safety, code, ref_vaddr_hex = line.split()
return (int(vaddr_hex, 16), int(safety), code, int(ref_vaddr_hex, 16))
for line in sys.stdin:
if line.startswith('ncval: '):
line = line[7:].strip()
_explain_problem(sys.argv[1], *_parse_report(line))
| bsd-3-clause | 2,495,468,691,190,867,000 | 36.631579 | 80 | 0.59021 | false |
core-code/LibVT | Dependencies/Core3D/Preprocessing/generateOctreeFromObj.py | 1 | 10849 | #!/usr/bin/env python
#
# generateOctreeFromObj.py
# Core3D
#
# Created by Julian Mayer on 16.11.07.
# Copyright (c) 2010 A. Julian Mayer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitationthe rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, bz2
from struct import *
from vecmath import *
TEXTURING = 1
GENTEX = 0
MAX_FLOAT = 1e+308
MIN_FLOAT = -1e308
MAX_USHORT = 0xFFFF
MAX_FACES_PER_TREELEAF = 1000
MAX_RECURSION_DEPTH = 10
SCALE = 1.0
vertices = []
faces = []
normals = []
texcoords = []
def faceContent(f, i):
if i == 0:
if f.count("/") == 0: return f
else: return f[:f.find("/")]
elif i == 1:
if f.count("/") == 0 or f.count("//") == 1: return 0
else:
if f.count("/") == 2:
return f[f.find("/")+1:f.rfind("/")]
else:
return f[f.find("/")+1:]
else:
if f.count("/") != 2: return 0
else: return f[f.rfind("/")+1:]
def calculateAABB(faces):
mi = [MAX_FLOAT, MAX_FLOAT,MAX_FLOAT]
ma = [MIN_FLOAT, MIN_FLOAT, MIN_FLOAT]
for face in faces:
for i in range(3):
for v in range(3):
ma[i] = max(ma[i], vertices[face[v]][i])
mi[i] = min(mi[i], vertices[face[v]][i])
return mi,ma
def classifyVertex(vertex, splitpoint): #TODO: do splitting or other funny things
if vertex[0] > splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] > splitpoint[2]: return 0
if vertex[0] <= splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] > splitpoint[2]: return 1
if vertex[0] > splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] <= splitpoint[2]: return 2
if vertex[0] > splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] > splitpoint[2]: return 3
if vertex[0] <= splitpoint[0] and vertex[1] > splitpoint[1] and vertex[2] <= splitpoint[2]: return 4
if vertex[0] > splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] <= splitpoint[2]: return 5
if vertex[0] <= splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] > splitpoint[2]: return 6
if vertex[0] <= splitpoint[0] and vertex[1] <= splitpoint[1] and vertex[2] <= splitpoint[2]:return 7
def classifyFace(face, splitpoint):
return max(classifyVertex(vertices[face[0]], splitpoint), classifyVertex(vertices[face[1]], splitpoint), classifyVertex(vertices[face[2]], splitpoint)) #TODO: random instead of max?
def buildOctree(faces, offset, level):
mi,ma = calculateAABB(faces)
ournum = buildOctree.counter
buildOctree.counter += 1
childoffset = offset
if len(faces) > MAX_FACES_PER_TREELEAF and level < MAX_RECURSION_DEPTH:
splitpoint = [mi[0] + (ma[0] - mi[0])/2, mi[1] + (ma[1] - mi[1])/2, mi[2] + (ma[2] - mi[2])/2]
newfaces = [[],[],[],[],[],[],[],[]]
newnodes = []
childnums = []
for face in faces:
x = classifyFace(face, splitpoint)
newfaces[x].append(face)
for newface in newfaces:
a,b = buildOctree(newface, childoffset, level+1)
childoffset += len(newface)
childnums.append(a)
newnodes.extend(b)
faces[:] = newfaces[0]+newfaces[1]+newfaces[2]+newfaces[3]+newfaces[4]+newfaces[5]+newfaces[6]+newfaces[7]
newnodes.insert(0, [offset, len(faces), mi[0], mi[1], mi[2], ma[0] - mi[0], ma[1] - mi[1], ma[2] - mi[2], childnums[0], childnums[1], childnums[2], childnums[3], childnums[4], childnums[5], childnums[6], childnums[7]])
return ournum, newnodes
else:
return ournum, [[offset, len(faces), mi[0], mi[1], mi[2], ma[0] - mi[0], ma[1] - mi[1], ma[2] - mi[2], MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT, MAX_USHORT]]
try:
if (len(sys.argv)) == 1: raise Exception('input', 'error')
f = open(sys.argv[len(sys.argv) - 1], 'r')
of = 0
for i in range(1, len(sys.argv) - 1):
if sys.argv[i].startswith("-s="): SCALE = float(sys.argv[i][3:])
elif sys.argv[i].startswith("-t"): TEXTURING = 0
elif sys.argv[i].startswith("-g="): GENTEX = int(sys.argv[i][3:]); TEXTURING = 0
elif sys.argv[i].startswith("-m="): MAX_FACES_PER_TREELEAF = int(sys.argv[i][3:])
elif sys.argv[i].startswith("-r="): MAX_RECURSION_DEPTH = int(sys.argv[i][3:])
elif sys.argv[i].startswith("-o="): of = open(sys.argv[i][3:], 'w')
else: raise Exception('input', 'error')
if of == 0: of = open(sys.argv[len(sys.argv) - 1][:sys.argv[len(sys.argv) - 1].rfind(".")] + ".octree.bz2", 'w')
except:
print """Usage: generateOctreeFromObj [options] obj_file
Options:
-t Ignore texture coordinates, produce an untextured Octree
-s=<scale> Scale all coordinates by <scale>
-m=<max_faces> Limit faces per leafnode to <max_faces> (Default: 1000)
-r=<max_recursion> Limit tree depth to <max_recursion> (Default: 10)
-o=<octree_file> Place the output octree into <octree_file>
-g=<0,1,2,3,4> Texture coordinate generation:
0 = off, 1 = on, 2 = swap X, 3 = swap Y, 4 = swap XY"""
sys.exit()
print "Info: Reading the OBJ-file"
lines = f.readlines()
for line in lines:
i = line.strip().split(" ")[0]
c = line[2:].strip().split(" ")
if i == "v":
vertices.append([float(c[0]) * SCALE, float(c[1]) * SCALE, float(c[2]) * SCALE])
elif i == "vn":
normals.append(normalize([float(c[0]), float(c[1]), float(c[2])]))
elif i == "vt":
texcoords.append([float(c[0]), float(c[1]), 0.0]) #TODO: if we discard W anyway we shouldnt store it
elif i == "f":
if (len(c) > 4):
print "Error: faces with more than 4 edges not supported"
sys.exit()
elif (len(c) == 4): #triangulate
faces.append([int(faceContent(c[0],0))-1, int(faceContent(c[1],0))-1, int(faceContent(c[2],0))-1, int(faceContent(c[0],2))-1, int(faceContent(c[1],2))-1, int(faceContent(c[2],2))-1, int(faceContent(c[0],1))-1, int(faceContent(c[1],1))-1, int(faceContent(c[2],1))-1])
faces.append([int(faceContent(c[0],0))-1, int(faceContent(c[2],0))-1, int(faceContent(c[3],0))-1, int(faceContent(c[0],2))-1, int(faceContent(c[2],2))-1, int(faceContent(c[3],2))-1, int(faceContent(c[0],1))-1, int(faceContent(c[2],1))-1, int(faceContent(c[3],1))-1])
else:
faces.append([int(faceContent(c[0],0))-1, int(faceContent(c[1],0))-1, int(faceContent(c[2],0))-1, int(faceContent(c[0],2))-1, int(faceContent(c[1],2))-1, int(faceContent(c[2],2))-1, int(faceContent(c[0],1))-1, int(faceContent(c[1],1))-1, int(faceContent(c[2],1))-1])
print "Info: Building the Octree"
buildOctree.counter = 0
a,nodes = buildOctree(faces, 0, 0)
if len(nodes) > MAX_USHORT:
print "Error: too many octree nodes generated, increase MAX_FACES_PER_TREELEAF"
sys.exit()
print "Info: Unifying and Uniquing Vertices, Normals and Texcoords"
normalwarning = 0
newvertices = []
newvertices_dict = {} #it's perhaps not the most intuitive way to have the newvertices stored twice, but it prevents a quadratic runtime
for face in faces:
for i in range(3):
if face[i+3] == -1:
normalwarning += 1
normals.append(normalize(crossProduct(substract(vertices[face[0]],vertices[face[1]]), substract(vertices[face[2]],vertices[face[0]]))))
face[i+3] = len(normals)-1
if TEXTURING and face[i+6] == -1:
print "Warning: some face without a texcoord detected, turning texturing off"
TEXTURING = 0
for i in range(3):
if len(vertices[face[i]]) == 3:
vertices[face[i]].extend(normals[face[i+3]])
if TEXTURING:
vertices[face[i]].extend(texcoords[face[i+6]])
elif vertices[face[i]][3:6] != normals[face[i+3]] or (TEXTURING and vertices[face[i]][6:] != texcoords[face[i+6]]): #if this vertex has a different normal/texcoord we have to duplicate it because opengl has only one index list
sf = face[i]
if TEXTURING:
key = vertices[face[i]][0], vertices[face[i]][1], vertices[face[i]][2], normals[face[i+3]][0], normals[face[i+3]][1], normals[face[i+3]][2], texcoords[face[i+6]][0], texcoords[face[i+6]][1], texcoords[face[i+6]][2]
else:
key = vertices[face[i]][0], vertices[face[i]][1], vertices[face[i]][2], normals[face[i+3]][0], normals[face[i+3]][1], normals[face[i+3]][2]
if newvertices_dict.has_key(key):
face[i] = len(vertices)+newvertices_dict[key]
if sf == face[i]: #or create duplicate
newvertices.append(list(key))
newvertices_dict[key] = len(newvertices)-1
face[i] = len(vertices)+len(newvertices)-1 #don't forget to update the index to the duplicated vertex+normal
vertices.extend(newvertices)
if normalwarning:
print "Warning: some face without a normal detected, calculating it (x" + str(normalwarning) +")"
print "Info: Writing the resulting Octree-file"
dummywarning = 0
out = pack('III', 0xDEADBEEF if (TEXTURING or GENTEX) else 0x6D616C62, len(nodes), len(vertices))
for node in nodes:
out += pack('IIffffffHHHHHHHH', node[0], node[1], node[2], node[3], node[4], node[5], node[6], node[7], node[8], node[9], node[10], node[11], node[12], node[13], node[14], node[15])
for vert in vertices:
try:
if TEXTURING:
out += pack('ffffffff', vert[0], vert[1], vert[2], vert[3], vert[4], vert[5], vert[6], vert[7])
elif GENTEX:
xcoord = (vert[0] - nodes[0][2]) / nodes[0][5]
ycoord = (vert[2] - nodes[0][4]) / nodes[0][7]
out += pack('ffffffff', vert[0], vert[1], vert[2], vert[3], vert[4], vert[5], (1.0 - xcoord) if (GENTEX == 2 or GENTEX == 4) else xcoord, (1.0 - ycoord) if (GENTEX == 3 or GENTEX == 4) else ycoord)
else:
out += pack('ffffff', vert[0], vert[1], vert[2], vert[3], vert[4], vert[5]) #the vertex includes the normal now, if not the vertex is unreferenced and this throws
except:
dummywarning += 1
if TEXTURING:
out += pack('ffffffff', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
else:
out += pack('ffffff', 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
if (len(vertices) <= MAX_USHORT): type = 'HHH'
else: type = 'III'
for face in faces:
out += pack(type, face[0], face[1], face[2])
of.write(bz2.compress(out))
if dummywarning:
print "Warning: unreferenced vertex detected, writing dummy vertex (x" + str(dummywarning) +")"
print "\nSUCCESS:\n\nnodes:\t\t", len(nodes), "\nvertices:\t", len(vertices), "\t( duplicatesWithDifferentNormalsOrTexcoords:", len(newvertices), ")", "\nfaces:\t\t", len(faces), "\n"
| mit | 7,716,171,793,022,086,000 | 49.696262 | 462 | 0.660337 | false |
Effective-Quadratures/Effective-Quadratures | equadratures/scalers.py | 1 | 7167 | """
Classes to scale data.
Some of these classes are called internally by other modules, but they can also be used independently as a pre-processing stage.
Scalers can fit to one set of data, and used to transform other data sets with the same number of dimensions.
Examples
--------
Fitting scaler implicitly during transform
>>> # Define some 1D sample data
>>> X = np.random.RandomState(0).normal(2,0.5,200)
>>> (X.mean(),X.std())
>>> (2.0354552465705806, 0.5107113843479977)
>>>
>>> # Scale to zero mean and unit variance
>>> X = eq.scalers.scaler_meanvar().transform(X)
>>> (X.mean(),X.std())
>>> (2.886579864025407e-17, 1.0)
Using the same scaling to transform train and test data
>>> # Define some 5D example data
>>> X = np.random.RandomState(0).uniform(-10,10,size=(50,5))
>>> y = X[:,0]**2 - X[:,4]
>>> # Split into train/test
>>> X_train, X_test,y_train,y_test = eq.datasets.train_test_split(X,y,train=0.7,random_seed=0)
>>> (X_train.min(),X_train.max())
>>> (-9.906090476149059, 9.767476761184525)
>>>
>>> # Define a scaler and fit to training split
>>> scaler = eq.scalers.scaler_minmax()
>>> scaler.fit(X_train)
>>>
>>> # Transform train and test data with same scaler
>>> X_train = scaler.transform(X_train)
>>> X_test = scaler.transform(X_test)
>>> (X_train.min(),X_train.max())
>>> (-1.0, 1.0)
>>>
>>> # Finally, e.g. of transforming data back again
>>> X_train = scaler.untransform(X_train)
>>> (X_train.min(),X_train.max())
>>> (-9.906090476149059, 9.767476761184525)
"""
import numpy as np
class scaler_minmax(object):
""" Scale the data to have a min/max of -1 to 1. """
def __init__(self):
self.fitted = False
def fit(self,X):
""" Fit scaler to data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to fit scaler to.
"""
if X.ndim == 1: X = X.reshape(-1,1)
self.Xmin = np.min(X,axis=0)
self.Xmax = np.max(X,axis=0)
self.fitted = True
def transform(self,X):
""" Transforms data. Calls :meth:`~equadratures.scalers.scaler_minmax.fit` fit internally if scaler not already fitted.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to transform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing transformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted: self.fit(X)
Xtrans = 2.0 * ( (X[:,:]-self.Xmin)/(self.Xmax - self.Xmin) ) - 1.0
return Xtrans
def untransform(self,X):
""" Untransforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to untransform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing untransformed data.
Raises
------
Exception
scaler has not been fitted
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted:
raise Exception('scaler has not been fitted')
Xuntrans = 0.5*(X[:,:]+1)*(self.Xmax - self.Xmin) + self.Xmin
return Xuntrans
class scaler_meanvar(object):
"""
Scale the data to have a mean of 0 and variance of 1.
"""
def __init__(self):
self.fitted = False
def fit(self,X):
""" Fit scaler to data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to fit scaler to.
"""
if X.ndim == 1: X = X.reshape(-1,1)
self.Xmean = np.mean(X,axis=0)
self.Xstd = np.std(X,axis=0)
self.fitted = True
def transform(self,X):
""" Transforms data. Calls :meth:`~equadratures.scalers.scaler_meanvar.fit` fit internally if scaler not already fitted.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to transform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing transformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted: self.fit(X)
eps = np.finfo(np.float64).tiny
Xtrans = (X[:,:]-self.Xmean)/(self.Xstd+eps)
return Xtrans
def untransform(self,X):
""" Untransforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to untransform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing untransformed data.
Raises
------
Exception
scaler has not been fitted
"""
if X.ndim == 1: X = X.reshape(-1,1)
if not self.fitted:
raise Exception('scaler has not been fitted')
eps = np.finfo(np.float64).tiny
Xuntrans = X[:,:]*(self.Xstd+eps) + self.Xmean
return Xuntrans
class scaler_custom(object):
""" Scale the data by the provided offset and divisor.
Parameters
----------
offset : float, numpy.ndarray
Offset to subtract from data. Either a float, or array with shape (number_of_samples, number_of_dimensions).
div : float, numpy.ndarray
Divisor to divide data with. Either a float, or array with shape (number_of_samples, number_of_dimensions).
"""
def __init__(self, offset, div):
self.offset = offset
self.div = div
self.fitted = True
def transform(self,X):
""" Transforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to transform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing transformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
eps = np.finfo(np.float64).tiny
Xtrans = (X - self.offset)/(self.div + eps)
return Xtrans
def untransform(self,X):
""" Untransforms data.
Parameters
----------
X : numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing data to untransform.
Returns
-------
numpy.ndarray
Array with shape (number_of_samples, number_of_dimensions) containing untransformed data.
"""
if X.ndim == 1: X = X.reshape(-1,1)
eps = np.finfo(np.float64).tiny
Xuntrans = X * (self.div + eps) + self.offset
return Xuntrans
| lgpl-2.1 | 3,159,541,299,082,102,300 | 31.425339 | 128 | 0.571588 | false |
cloudconductor/cloud_conductor_gui | gui_app/views/applicationDeployViews.py | 1 | 8093 | # -*- coding: utf-8 -*-
from django.shortcuts import render, redirect, render_to_response
import ast
from ..forms import selecttForm
from ..forms import applicationForm
from ..utils import ApplicationUtil
from ..utils import ApplicationHistoryUtil
from ..utils import EnvironmentUtil
from ..utils import StringUtil
from ..utils.PathUtil import Path
from ..utils.PathUtil import Html
from ..enum.FunctionCode import FuncCode
from ..enum.ApplicationType import ApplicaionType
from ..enum.ProtocolType import ProtocolType
from ..utils import SessionUtil
from ..utils import SystemUtil
from ..logs import log
def applicationSelect(request):
try:
session = request.session
code = FuncCode.appDep_application.value
token = session.get('auth_token')
project_id = session.get('project_id')
application = ''
list = ''
list = ApplicationUtil.get_application_version(
code, token, project_id)
if request.method == "GET":
application = request.session.get('w_app_select')
return render(request, Html.appdeploy_applicationSelect,
{'list': list, 'application': application,
'message': ''})
elif request.method == "POST":
param = request.POST
# -- Session add
application = selectPut(param)
form = selecttForm(application)
if not form.is_valid():
return render(request, Html.appdeploy_applicationSelect,
{'list': list, 'application': application,
'form': form, 'message': ''})
request.session['w_app_select'] = application
return redirect(Path.appdeploy_environmentSelect)
except Exception as ex:
log.error(FuncCode.appDep_application.value, None, ex)
return render(request, Html.appdeploy_applicationSelect,
{'list': list, 'application': application,
'message': str(ex)})
def applicationCreate(request):
code = FuncCode.applicationCreate.value
apptype = list(ApplicaionType)
protocol = list(ProtocolType)
systems = None
try:
if not SessionUtil.check_login(request):
return redirect(Path.logout)
if not SessionUtil.check_permission(request, 'application', 'create'):
return render_to_response(Html.error_403)
token = request.session['auth_token']
project_id = request.session['project_id']
systems = SystemUtil.get_system_list2(code, token, project_id)
if request.method == "GET":
return render(request, Html.appdeploy_applicationCreate,
{'app': '', 'history': '', 'apptype': apptype,
'protocol': protocol, 'message': '',
'systems': systems, 'save': True})
else:
# -- Get a value from a form
p = request.POST
cpPost = p.copy()
# -- Validate check
form = applicationForm(p)
form.full_clean()
if not form.is_valid():
return render(request, Html.appdeploy_applicationCreate,
{'app': cpPost, 'history': cpPost,
'apptype': apptype,
'protocol': protocol, 'form': form,
'message': '', 'systems': systems,
'save': True})
# -- 1.Create a application, api call
app = ApplicationUtil.create_application(code, token, form.data)
# -- 2.Create a applicationhistory, api call
ApplicationHistoryUtil.create_history(
code, token, app.get('id'), form.data)
request.session['w_app_select'] = {"id": app.get("id"),
"name": app.get("name")}
return redirect(Path.appdeploy_environmentSelect)
except Exception as ex:
log.error(FuncCode.applicationCreate.value, None, ex)
return render(request, Html.appdeploy_applicationCreate,
{'app': request.POST, 'history': request.POST,
'apptype': apptype,
'protocol': protocol, 'form': '',
'systems': systems, 'message': str(ex), 'save': True})
def environmentSelect(request):
list = ''
try:
code = FuncCode.appDep_environment.value
session = request.session
environment = session.get('w_env_select')
token = session['auth_token']
project_id = session['project_id']
app = ApplicationUtil.get_application_detail(
code, token, session.get('w_app_select').get('id'))
list = EnvironmentUtil.get_environment_list_system_id(
code, token, project_id, app.get("system_id"))
if request.method == "GET":
return render(request, Html.appdeploy_environmentSelect,
{"list": list, 'environment': environment,
'message': ''})
elif request.method == "POST":
param = request.POST
environment = selectPut(param)
form = selecttForm(environment)
if not form.is_valid():
return render(request, Html.appdeploy_environmentSelect,
{"list": list, 'environment': environment,
'form': form,
'message': ''})
request.session['w_env_select'] = environment
return redirect(Path.appdeploy_confirm)
except Exception as ex:
log.error(FuncCode.appDep_environment.value, None, ex)
return render(request, Html.appdeploy_environmentSelect,
{"list": '', 'environment': '', 'message': str(ex)})
def confirm(request):
try:
code = FuncCode.appDep_confirm.value
session = request.session
app_session = session.get('w_app_select')
env_session = session.get('w_env_select')
if request.method == "GET":
return render(request, Html.appdeploy_confirm,
{'application': app_session,
'environment': env_session, 'message': ''})
elif request.method == "POST":
session = request.session
code = FuncCode.newapp_confirm.value
token = session.get('auth_token')
env_id = env_session.get('id')
app_id = app_session.get('id')
# -- application deploy
ApplicationUtil.deploy_application(code, token, env_id, app_id)
# -- session delete
sessionDelete(session)
return redirect(Path.top)
except Exception as ex:
log.error(FuncCode.newapp_confirm.value, None, ex)
session = request.session
return render(request, Html.appdeploy_confirm,
{"application": session.get('application'),
'environment': session.get('environment'),
'message': str(ex)})
def selectPut(req):
if StringUtil.isEmpty(req):
return None
select_param = req.get('id', None)
if StringUtil.isNotEmpty(select_param):
select_param = ast.literal_eval(select_param)
param = {
'id': str(select_param.get('id')),
'name': select_param.get('name'),
}
return param
else:
return select_param
def putBlueprint(param):
blueprint = param.get('blueprint', None)
if not (blueprint is None) and not (blueprint == ''):
blueprint = ast.literal_eval(blueprint)
param['blueprint_id'] = blueprint.get('id')
param['version'] = blueprint.get('version')
return param
def sessionDelete(session):
if 'w_env_select' in session:
del session['w_env_select']
if 'w_app_select' in session:
del session['w_app_select']
| apache-2.0 | 7,722,206,676,188,103,000 | 34.034632 | 78 | 0.561596 | false |
hackerkid/zulip | zerver/openapi/python_examples.py | 1 | 46543 | # Zulip's OpenAPI-based API documentation system is documented at
# https://zulip.readthedocs.io/en/latest/documentation/api.html
#
# This file defines the Python code examples that appears in Zulip's
# REST API documentation, and also contains a system for running the
# example code as part of the `tools/test-api` test suite.
#
# The actual documentation appears within these blocks:
# # {code_example|start}
# Code here
# # {code_example|end}
#
# Whereas the surrounding code is test setup logic.
import json
import os
import sys
from functools import wraps
from typing import Any, Callable, Dict, Iterable, List, Optional, Set, TypeVar, cast
from zulip import Client
from zerver.lib import mdiff
from zerver.models import get_realm, get_user
from zerver.openapi.openapi import validate_against_openapi_schema
ZULIP_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
TEST_FUNCTIONS: Dict[str, Callable[..., object]] = {}
REGISTERED_TEST_FUNCTIONS: Set[str] = set()
CALLED_TEST_FUNCTIONS: Set[str] = set()
FuncT = TypeVar("FuncT", bound=Callable[..., object])
def openapi_test_function(endpoint: str) -> Callable[[FuncT], FuncT]:
"""This decorator is used to register an OpenAPI test function with
its endpoint. Example usage:
@openapi_test_function("/messages/render:post")
def ...
"""
def wrapper(test_func: FuncT) -> FuncT:
@wraps(test_func)
def _record_calls_wrapper(*args: object, **kwargs: object) -> object:
CALLED_TEST_FUNCTIONS.add(test_func.__name__)
return test_func(*args, **kwargs)
REGISTERED_TEST_FUNCTIONS.add(test_func.__name__)
TEST_FUNCTIONS[endpoint] = _record_calls_wrapper
return cast(FuncT, _record_calls_wrapper) # https://github.com/python/mypy/issues/1927
return wrapper
def ensure_users(ids_list: List[int], user_names: List[str]) -> None:
# Ensure that the list of user ids (ids_list)
# matches the users we want to refer to (user_names).
realm = get_realm("zulip")
user_ids = [get_user(name + "@zulip.com", realm).id for name in user_names]
assert ids_list == user_ids
@openapi_test_function("/users/me/subscriptions:post")
def add_subscriptions(client: Client) -> None:
# {code_example|start}
# Subscribe to the stream "new stream"
result = client.add_subscriptions(
streams=[
{
"name": "new stream",
"description": "New stream for testing",
},
],
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "200_0")
# {code_example|start}
# To subscribe other users to a stream, you may pass
# the `principals` argument, like so:
user_id = 26
result = client.add_subscriptions(
streams=[
{"name": "new stream", "description": "New stream for testing"},
],
principals=[user_id],
)
# {code_example|end}
assert result["result"] == "success"
assert "[email protected]" in result["subscribed"]
def test_add_subscriptions_already_subscribed(client: Client) -> None:
result = client.add_subscriptions(
streams=[
{"name": "new stream", "description": "New stream for testing"},
],
principals=["[email protected]"],
)
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "200_1")
def test_authorization_errors_fatal(client: Client, nonadmin_client: Client) -> None:
client.add_subscriptions(
streams=[
{"name": "private_stream"},
],
)
stream_id = client.get_stream_id("private_stream")["stream_id"]
client.call_endpoint(
f"streams/{stream_id}",
method="PATCH",
request={"is_private": True},
)
result = nonadmin_client.add_subscriptions(
streams=[
{"name": "private_stream"},
],
authorization_errors_fatal=False,
)
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "400_0")
result = nonadmin_client.add_subscriptions(
streams=[
{"name": "private_stream"},
],
authorization_errors_fatal=True,
)
validate_against_openapi_schema(result, "/users/me/subscriptions", "post", "400_1")
@openapi_test_function("/users/{user_id_or_email}/presence:get")
def get_user_presence(client: Client) -> None:
# {code_example|start}
# Get presence information for "[email protected]"
result = client.get_user_presence("[email protected]")
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id_or_email}/presence", "get", "200")
@openapi_test_function("/users/me/presence:post")
def update_presence(client: Client) -> None:
request = {
"status": "active",
"ping_only": False,
"new_user_input": False,
}
result = client.update_presence(request)
assert result["result"] == "success"
@openapi_test_function("/users:post")
def create_user(client: Client) -> None:
# {code_example|start}
# Create a user
request = {
"email": "[email protected]",
"password": "temp",
"full_name": "New User",
}
result = client.create_user(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users", "post", "200")
# Test "Email already used error"
result = client.create_user(request)
validate_against_openapi_schema(result, "/users", "post", "400")
@openapi_test_function("/users:get")
def get_members(client: Client) -> None:
# {code_example|start}
# Get all users in the realm
result = client.get_members()
# {code_example|end}
validate_against_openapi_schema(result, "/users", "get", "200")
members = [m for m in result["members"] if m["email"] == "[email protected]"]
assert len(members) == 1
newbie = members[0]
assert not newbie["is_admin"]
assert newbie["full_name"] == "New User"
# {code_example|start}
# You may pass the `client_gravatar` query parameter as follows:
result = client.get_members({"client_gravatar": True})
# {code_example|end}
validate_against_openapi_schema(result, "/users", "get", "200")
assert result["members"][0]["avatar_url"] is None
# {code_example|start}
# You may pass the `include_custom_profile_fields` query parameter as follows:
result = client.get_members({"include_custom_profile_fields": True})
# {code_example|end}
validate_against_openapi_schema(result, "/users", "get", "200")
for member in result["members"]:
if member["is_bot"]:
assert member.get("profile_data", None) is None
else:
assert member.get("profile_data", None) is not None
@openapi_test_function("/users/{email}:get")
def get_user_by_email(client: Client) -> None:
# {code_example|start}
# Fetch details on a user given a user ID
email = "[email protected]"
result = client.call_endpoint(
url=f"/users/{email}",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{email}", "get", "200")
@openapi_test_function("/users/{user_id}:get")
def get_single_user(client: Client) -> None:
# {code_example|start}
# Fetch details on a user given a user ID
user_id = 8
result = client.get_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "get", "200")
# {code_example|start}
# If you'd like data on custom profile fields, you can request them as follows:
result = client.get_user_by_id(user_id, include_custom_profile_fields=True)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "get", "200")
@openapi_test_function("/users/{user_id}:delete")
def deactivate_user(client: Client) -> None:
# {code_example|start}
# Deactivate a user
user_id = 8
result = client.deactivate_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "delete", "200")
@openapi_test_function("/users/{user_id}/reactivate:post")
def reactivate_user(client: Client) -> None:
# {code_example|start}
# Reactivate a user
user_id = 8
result = client.reactivate_user_by_id(user_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}/reactivate", "post", "200")
@openapi_test_function("/users/{user_id}:patch")
def update_user(client: Client) -> None:
# {code_example|start}
# Change a user's full name.
user_id = 10
result = client.update_user_by_id(user_id, full_name="New Name")
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "patch", "200")
# {code_example|start}
# Change value of the custom profile field with ID 9.
user_id = 8
result = client.update_user_by_id(user_id, profile_data=[{"id": 9, "value": "some data"}])
# {code_example|end}
validate_against_openapi_schema(result, "/users/{user_id}", "patch", "400")
@openapi_test_function("/users/{user_id}/subscriptions/{stream_id}:get")
def get_subscription_status(client: Client) -> None:
# {code_example|start}
# Check whether a user is a subscriber to a given stream.
user_id = 7
stream_id = 1
result = client.call_endpoint(
url=f"/users/{user_id}/subscriptions/{stream_id}",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(
result, "/users/{user_id}/subscriptions/{stream_id}", "get", "200"
)
@openapi_test_function("/realm/linkifiers:get")
def get_realm_linkifiers(client: Client) -> None:
# {code_example|start}
# Fetch all the filters in this organization
result = client.call_endpoint(
url="/realm/linkifiers",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/linkifiers", "get", "200")
@openapi_test_function("/realm/profile_fields:get")
def get_realm_profile_fields(client: Client) -> None:
# {code_example|start}
# Fetch all the custom profile fields in the user's organization.
result = client.call_endpoint(
url="/realm/profile_fields",
method="GET",
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/profile_fields", "get", "200")
@openapi_test_function("/realm/profile_fields:patch")
def reorder_realm_profile_fields(client: Client) -> None:
# {code_example|start}
# Reorder the custom profile fields in the user's organization.
order = [8, 7, 6, 5, 4, 3, 2, 1]
request = {"order": json.dumps(order)}
result = client.call_endpoint(url="/realm/profile_fields", method="PATCH", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/profile_fields", "patch", "200")
@openapi_test_function("/realm/profile_fields:post")
def create_realm_profile_field(client: Client) -> None:
# {code_example|start}
# Create a custom profile field in the user's organization.
request = {"name": "Phone", "hint": "Contact No.", "field_type": 1}
result = client.call_endpoint(url="/realm/profile_fields", method="POST", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/profile_fields", "post", "200")
@openapi_test_function("/realm/filters:post")
def add_realm_filter(client: Client) -> None:
# {code_example|start}
# Add a filter to automatically linkify #<number> to the corresponding
# issue in Zulip's server repo
result = client.add_realm_filter(
"#(?P<id>[0-9]+)", "https://github.com/zulip/zulip/issues/%(id)s"
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/filters", "post", "200")
@openapi_test_function("/realm/filters/{filter_id}:delete")
def remove_realm_filter(client: Client) -> None:
# {code_example|start}
# Remove the linkifier (realm_filter) with ID 1
result = client.remove_realm_filter(1)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/filters/{filter_id}", "delete", "200")
@openapi_test_function("/realm/playgrounds:post")
def add_realm_playground(client: Client) -> None:
# {code_example|start}
# Add a realm playground for Python
request = {
"name": "Python playground",
"pygments_language": json.dumps("Python"),
"url_prefix": json.dumps("https://python.example.com"),
}
result = client.call_endpoint(url="/realm/playgrounds", method="POST", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/playgrounds", "post", "200")
@openapi_test_function("/realm/playgrounds/{playground_id}:delete")
def remove_realm_playground(client: Client) -> None:
# {code_example|start}
# Remove the playground with ID 1
result = client.call_endpoint(url="/realm/playgrounds/1", method="DELETE")
# {code_example|end}
validate_against_openapi_schema(result, "/realm/playgrounds/{playground_id}", "delete", "200")
@openapi_test_function("/users/me:get")
def get_profile(client: Client) -> None:
# {code_example|start}
# Get the profile of the user/bot that requests this endpoint,
# which is `client` in this case:
result = client.get_profile()
# {code_example|end}
validate_against_openapi_schema(result, "/users/me", "get", "200")
@openapi_test_function("/users/me:delete")
def deactivate_own_user(client: Client, owner_client: Client) -> None:
user_id = client.get_profile()["user_id"]
# {code_example|start}
# Deactivate the account of the current user/bot that requests.
result = client.call_endpoint(
url="/users/me",
method="DELETE",
)
# {code_example|end}
# Reactivate the account to avoid polluting other tests.
owner_client.reactivate_user_by_id(user_id)
validate_against_openapi_schema(result, "/users/me", "delete", "200")
@openapi_test_function("/get_stream_id:get")
def get_stream_id(client: Client) -> int:
# {code_example|start}
# Get the ID of a given stream
stream_name = "new stream"
result = client.get_stream_id(stream_name)
# {code_example|end}
validate_against_openapi_schema(result, "/get_stream_id", "get", "200")
return result["stream_id"]
@openapi_test_function("/streams/{stream_id}:delete")
def archive_stream(client: Client, stream_id: int) -> None:
result = client.add_subscriptions(
streams=[
{
"name": "stream to be archived",
"description": "New stream for testing",
},
],
)
# {code_example|start}
# Archive the stream named 'stream to be archived'
stream_id = client.get_stream_id("stream to be archived")["stream_id"]
result = client.delete_stream(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, "/streams/{stream_id}", "delete", "200")
assert result["result"] == "success"
@openapi_test_function("/streams:get")
def get_streams(client: Client) -> None:
# {code_example|start}
# Get all streams that the user has access to
result = client.get_streams()
# {code_example|end}
validate_against_openapi_schema(result, "/streams", "get", "200")
streams = [s for s in result["streams"] if s["name"] == "new stream"]
assert streams[0]["description"] == "New stream for testing"
# {code_example|start}
# You may pass in one or more of the query parameters mentioned above
# as keyword arguments, like so:
result = client.get_streams(include_public=False)
# {code_example|end}
validate_against_openapi_schema(result, "/streams", "get", "200")
assert len(result["streams"]) == 4
@openapi_test_function("/streams/{stream_id}:patch")
def update_stream(client: Client, stream_id: int) -> None:
# {code_example|start}
# Update the stream by a given ID
request = {
"stream_id": stream_id,
"stream_post_policy": 2,
"is_private": True,
}
result = client.update_stream(request)
# {code_example|end}
validate_against_openapi_schema(result, "/streams/{stream_id}", "patch", "200")
assert result["result"] == "success"
@openapi_test_function("/user_groups:get")
def get_user_groups(client: Client) -> int:
# {code_example|start}
# Get all user groups of the realm
result = client.get_user_groups()
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups", "get", "200")
hamlet_user_group = [u for u in result["user_groups"] if u["name"] == "hamletcharacters"][0]
assert hamlet_user_group["description"] == "Characters of Hamlet"
marketing_user_group = [u for u in result["user_groups"] if u["name"] == "marketing"][0]
return marketing_user_group["id"]
def test_user_not_authorized_error(nonadmin_client: Client) -> None:
result = nonadmin_client.get_streams(include_all_active=True)
validate_against_openapi_schema(result, "/rest-error-handling", "post", "400_2")
def get_subscribers(client: Client) -> None:
result = client.get_subscribers(stream="new stream")
assert result["subscribers"] == ["[email protected]", "[email protected]"]
def get_user_agent(client: Client) -> None:
result = client.get_user_agent()
assert result.startswith("ZulipPython/")
@openapi_test_function("/users/me/subscriptions:get")
def list_subscriptions(client: Client) -> None:
# {code_example|start}
# Get all streams that the user is subscribed to
result = client.list_subscriptions()
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "get", "200")
streams = [s for s in result["subscriptions"] if s["name"] == "new stream"]
assert streams[0]["description"] == "New stream for testing"
@openapi_test_function("/users/me/subscriptions:delete")
def remove_subscriptions(client: Client) -> None:
# {code_example|start}
# Unsubscribe from the stream "new stream"
result = client.remove_subscriptions(
["new stream"],
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "delete", "200")
# test it was actually removed
result = client.list_subscriptions()
assert result["result"] == "success"
streams = [s for s in result["subscriptions"] if s["name"] == "new stream"]
assert len(streams) == 0
# {code_example|start}
# Unsubscribe another user from the stream "new stream"
result = client.remove_subscriptions(
["new stream"],
principals=["[email protected]"],
)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions", "delete", "200")
@openapi_test_function("/users/me/subscriptions/muted_topics:patch")
def toggle_mute_topic(client: Client) -> None:
# Send a test message
message = {
"type": "stream",
"to": "Denmark",
"topic": "boat party",
}
client.call_endpoint(
url="messages",
method="POST",
request=message,
)
# {code_example|start}
# Mute the topic "boat party" in the stream "Denmark"
request = {
"stream": "Denmark",
"topic": "boat party",
"op": "add",
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions/muted_topics", "patch", "200")
# {code_example|start}
# Unmute the topic "boat party" in the stream "Denmark"
request = {
"stream": "Denmark",
"topic": "boat party",
"op": "remove",
}
result = client.mute_topic(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions/muted_topics", "patch", "200")
@openapi_test_function("/users/me/muted_users/{muted_user_id}:post")
def add_user_mute(client: Client) -> None:
ensure_users([10], ["hamlet"])
# {code_example|start}
# Mute user with ID 10
muted_user_id = 10
result = client.call_endpoint(url=f"/users/me/muted_users/{muted_user_id}", method="POST")
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/muted_users/{muted_user_id}", "post", "200")
@openapi_test_function("/users/me/muted_users/{muted_user_id}:delete")
def remove_user_mute(client: Client) -> None:
ensure_users([10], ["hamlet"])
# {code_example|start}
# Unmute user with ID 10
muted_user_id = 10
result = client.call_endpoint(url=f"/users/me/muted_users/{muted_user_id}", method="DELETE")
# {code_example|end}
validate_against_openapi_schema(
result, "/users/me/muted_users/{muted_user_id}", "delete", "200"
)
@openapi_test_function("/mark_all_as_read:post")
def mark_all_as_read(client: Client) -> None:
# {code_example|start}
# Mark all of the user's unread messages as read
result = client.mark_all_as_read()
# {code_example|end}
validate_against_openapi_schema(result, "/mark_all_as_read", "post", "200")
@openapi_test_function("/mark_stream_as_read:post")
def mark_stream_as_read(client: Client) -> None:
# {code_example|start}
# Mark the unread messages in stream with ID "1" as read
result = client.mark_stream_as_read(1)
# {code_example|end}
validate_against_openapi_schema(result, "/mark_stream_as_read", "post", "200")
@openapi_test_function("/mark_topic_as_read:post")
def mark_topic_as_read(client: Client) -> None:
# Grab an existing topic name
topic_name = client.get_stream_topics(1)["topics"][0]["name"]
# {code_example|start}
# Mark the unread messages in stream 1's topic "topic_name" as read
result = client.mark_topic_as_read(1, topic_name)
# {code_example|end}
validate_against_openapi_schema(result, "/mark_stream_as_read", "post", "200")
@openapi_test_function("/users/me/subscriptions/properties:post")
def update_subscription_settings(client: Client) -> None:
# {code_example|start}
# Update the user's subscription in stream #1 to pin it to the top of the
# stream list; and in stream #3 to have the hex color "f00"
request = [
{
"stream_id": 1,
"property": "pin_to_top",
"value": True,
},
{
"stream_id": 3,
"property": "color",
"value": "#f00f00",
},
]
result = client.update_subscription_settings(request)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/subscriptions/properties", "POST", "200")
@openapi_test_function("/messages/render:post")
def render_message(client: Client) -> None:
# {code_example|start}
# Render a message
request = {
"content": "**foo**",
}
result = client.render_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/render", "post", "200")
@openapi_test_function("/messages:get")
def get_messages(client: Client) -> None:
# {code_example|start}
# Get the 100 last messages sent by "[email protected]" to the stream "Verona"
request: Dict[str, Any] = {
"anchor": "newest",
"num_before": 100,
"num_after": 0,
"narrow": [
{"operator": "sender", "operand": "[email protected]"},
{"operator": "stream", "operand": "Verona"},
],
}
result = client.get_messages(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages", "get", "200")
assert len(result["messages"]) <= request["num_before"]
@openapi_test_function("/messages/matches_narrow:get")
def check_messages_match_narrow(client: Client) -> None:
message = {"type": "stream", "to": "Verona", "topic": "test_topic", "content": "http://foo.com"}
msg_ids = []
response = client.send_message(message)
msg_ids.append(response["id"])
message["content"] = "no link here"
response = client.send_message(message)
msg_ids.append(response["id"])
# {code_example|start}
# Check which messages within an array match a narrow.
request = {
"msg_ids": msg_ids,
"narrow": [{"operator": "has", "operand": "link"}],
}
result = client.call_endpoint(url="messages/matches_narrow", method="GET", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/matches_narrow", "get", "200")
@openapi_test_function("/messages/{message_id}:get")
def get_raw_message(client: Client, message_id: int) -> None:
assert int(message_id)
# {code_example|start}
# Get the raw content of the message with ID "message_id"
result = client.get_raw_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}", "get", "200")
@openapi_test_function("/attachments:get")
def get_attachments(client: Client) -> None:
# {code_example|start}
# Get your attachments.
result = client.get_attachments()
# {code_example|end}
validate_against_openapi_schema(result, "/attachments", "get", "200")
@openapi_test_function("/messages:post")
def send_message(client: Client) -> int:
request: Dict[str, Any] = {}
# {code_example|start}
# Send a stream message
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages", "post", "200")
# test that the message was actually sent
message_id = result["id"]
url = "messages/" + str(message_id)
result = client.call_endpoint(
url=url,
method="GET",
)
assert result["result"] == "success"
assert result["raw_content"] == request["content"]
ensure_users([10], ["hamlet"])
# {code_example|start}
# Send a private message
user_id = 10
request = {
"type": "private",
"to": [user_id],
"content": "With mirth and laughter let old wrinkles come.",
}
result = client.send_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages", "post", "200")
# test that the message was actually sent
message_id = result["id"]
url = "messages/" + str(message_id)
result = client.call_endpoint(
url=url,
method="GET",
)
assert result["result"] == "success"
assert result["raw_content"] == request["content"]
return message_id
@openapi_test_function("/messages/{message_id}/reactions:post")
def add_reaction(client: Client, message_id: int) -> None:
request: Dict[str, Any] = {}
# {code_example|start}
# Add an emoji reaction
request = {
"message_id": message_id,
"emoji_name": "octopus",
}
result = client.add_reaction(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}/reactions", "post", "200")
@openapi_test_function("/messages/{message_id}/reactions:delete")
def remove_reaction(client: Client, message_id: int) -> None:
request: Dict[str, Any] = {}
# {code_example|start}
# Remove an emoji reaction
request = {
"message_id": message_id,
"emoji_name": "octopus",
}
result = client.remove_reaction(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}/reactions", "delete", "200")
def test_nonexistent_stream_error(client: Client) -> None:
request = {
"type": "stream",
"to": "nonexistent_stream",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
validate_against_openapi_schema(result, "/messages", "post", "400_0")
def test_private_message_invalid_recipient(client: Client) -> None:
request = {
"type": "private",
"to": "[email protected]",
"content": "With mirth and laughter let old wrinkles come.",
}
result = client.send_message(request)
validate_against_openapi_schema(result, "/messages", "post", "400_1")
@openapi_test_function("/messages/{message_id}:patch")
def update_message(client: Client, message_id: int) -> None:
assert int(message_id)
# {code_example|start}
# Edit a message
# (make sure that message_id below is set to the ID of the
# message you wish to update)
request = {
"message_id": message_id,
"content": "New content",
}
result = client.update_message(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}", "patch", "200")
# test it was actually updated
url = "messages/" + str(message_id)
result = client.call_endpoint(
url=url,
method="GET",
)
assert result["result"] == "success"
assert result["raw_content"] == request["content"]
def test_update_message_edit_permission_error(client: Client, nonadmin_client: Client) -> None:
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
request = {
"message_id": result["id"],
"content": "New content",
}
result = nonadmin_client.update_message(request)
validate_against_openapi_schema(result, "/messages/{message_id}", "patch", "400")
@openapi_test_function("/messages/{message_id}:delete")
def delete_message(client: Client, message_id: int) -> None:
# {code_example|start}
# Delete the message with ID "message_id"
result = client.delete_message(message_id)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}", "delete", "200")
def test_delete_message_edit_permission_error(client: Client, nonadmin_client: Client) -> None:
request = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
result = client.send_message(request)
result = nonadmin_client.delete_message(result["id"])
validate_against_openapi_schema(result, "/messages/{message_id}", "delete", "400_1")
@openapi_test_function("/messages/{message_id}/history:get")
def get_message_history(client: Client, message_id: int) -> None:
# {code_example|start}
# Get the edit history for message with ID "message_id"
result = client.get_message_history(message_id)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/{message_id}/history", "get", "200")
@openapi_test_function("/realm/emoji:get")
def get_realm_emoji(client: Client) -> None:
# {code_example|start}
result = client.get_realm_emoji()
# {code_example|end}
validate_against_openapi_schema(result, "/realm/emoji", "GET", "200")
@openapi_test_function("/messages/flags:post")
def update_message_flags(client: Client) -> None:
# Send a few test messages
request: Dict[str, Any] = {
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "I come not, friends, to steal away your hearts.",
}
message_ids = []
for i in range(0, 3):
message_ids.append(client.send_message(request)["id"])
# {code_example|start}
# Add the "read" flag to the messages with IDs in "message_ids"
request = {
"messages": message_ids,
"op": "add",
"flag": "read",
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/flags", "post", "200")
# {code_example|start}
# Remove the "starred" flag from the messages with IDs in "message_ids"
request = {
"messages": message_ids,
"op": "remove",
"flag": "starred",
}
result = client.update_message_flags(request)
# {code_example|end}
validate_against_openapi_schema(result, "/messages/flags", "post", "200")
def register_queue_all_events(client: Client) -> str:
# Register the queue and get all events
# Mainly for verifying schema of /register.
result = client.register()
validate_against_openapi_schema(result, "/register", "post", "200")
return result["queue_id"]
@openapi_test_function("/register:post")
def register_queue(client: Client) -> str:
# {code_example|start}
# Register the queue
result = client.register(
event_types=["message", "realm_emoji"],
)
# {code_example|end}
validate_against_openapi_schema(result, "/register", "post", "200")
return result["queue_id"]
@openapi_test_function("/events:delete")
def deregister_queue(client: Client, queue_id: str) -> None:
# {code_example|start}
# Delete a queue (queue_id is the ID of the queue
# to be removed)
result = client.deregister(queue_id)
# {code_example|end}
validate_against_openapi_schema(result, "/events", "delete", "200")
# Test "BAD_EVENT_QUEUE_ID" error
result = client.deregister(queue_id)
validate_against_openapi_schema(result, "/events", "delete", "400")
@openapi_test_function("/server_settings:get")
def get_server_settings(client: Client) -> None:
# {code_example|start}
# Fetch the settings for this server
result = client.get_server_settings()
# {code_example|end}
validate_against_openapi_schema(result, "/server_settings", "get", "200")
@openapi_test_function("/settings/notifications:patch")
def update_notification_settings(client: Client) -> None:
# {code_example|start}
# Enable push notifications even when online
request = {
"enable_offline_push_notifications": True,
"enable_online_push_notifications": True,
}
result = client.update_notification_settings(request)
# {code_example|end}
validate_against_openapi_schema(result, "/settings/notifications", "patch", "200")
@openapi_test_function("/settings/display:patch")
def update_display_settings(client: Client) -> None:
# {code_example|start}
# Show user list on left sidebar in narrow windows.
# Change emoji set used for display to Google modern.
request = {
"left_side_userlist": True,
"emojiset": '"google"',
}
result = client.call_endpoint("settings/display", method="PATCH", request=request)
# {code_example|end}
validate_against_openapi_schema(result, "/settings/display", "patch", "200")
@openapi_test_function("/user_uploads:post")
def upload_file(client: Client) -> None:
path_to_file = os.path.join(ZULIP_DIR, "zerver", "tests", "images", "img.jpg")
# {code_example|start}
# Upload a file
with open(path_to_file, "rb") as fp:
result = client.upload_file(fp)
# Share the file by including it in a message.
client.send_message(
{
"type": "stream",
"to": "Denmark",
"topic": "Castle",
"content": "Check out [this picture]({}) of my castle!".format(result["uri"]),
}
)
# {code_example|end}
validate_against_openapi_schema(result, "/user_uploads", "post", "200")
@openapi_test_function("/users/me/{stream_id}/topics:get")
def get_stream_topics(client: Client, stream_id: int) -> None:
# {code_example|start}
result = client.get_stream_topics(stream_id)
# {code_example|end}
validate_against_openapi_schema(result, "/users/me/{stream_id}/topics", "get", "200")
@openapi_test_function("/typing:post")
def set_typing_status(client: Client) -> None:
ensure_users([10, 11], ["hamlet", "iago"])
# {code_example|start}
# The user has started to type in the group PM with Iago and Polonius
user_id1 = 10
user_id2 = 11
request = {
"op": "start",
"to": [user_id1, user_id2],
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, "/typing", "post", "200")
# {code_example|start}
# The user has finished typing in the group PM with Iago and Polonius
user_id1 = 10
user_id2 = 11
request = {
"op": "stop",
"to": [user_id1, user_id2],
}
result = client.set_typing_status(request)
# {code_example|end}
validate_against_openapi_schema(result, "/typing", "post", "200")
@openapi_test_function("/realm/emoji/{emoji_name}:post")
def upload_custom_emoji(client: Client) -> None:
emoji_path = os.path.join(ZULIP_DIR, "zerver", "tests", "images", "img.jpg")
# {code_example|start}
# Upload a custom emoji; assume `emoji_path` is the path to your image.
with open(emoji_path, "rb") as fp:
emoji_name = "my_custom_emoji"
result = client.call_endpoint(
f"realm/emoji/{emoji_name}",
method="POST",
files=[fp],
)
# {code_example|end}
validate_against_openapi_schema(result, "/realm/emoji/{emoji_name}", "post", "200")
@openapi_test_function("/users/me/alert_words:get")
def get_alert_words(client: Client) -> None:
result = client.get_alert_words()
assert result["result"] == "success"
@openapi_test_function("/users/me/alert_words:post")
def add_alert_words(client: Client) -> None:
word = ["foo", "bar"]
result = client.add_alert_words(word)
assert result["result"] == "success"
@openapi_test_function("/users/me/alert_words:delete")
def remove_alert_words(client: Client) -> None:
word = ["foo"]
result = client.remove_alert_words(word)
assert result["result"] == "success"
@openapi_test_function("/user_groups/create:post")
def create_user_group(client: Client) -> None:
ensure_users([6, 7, 8, 10], ["aaron", "zoe", "cordelia", "hamlet"])
# {code_example|start}
request = {
"name": "marketing",
"description": "The marketing team.",
"members": [6, 7, 8, 10],
}
result = client.create_user_group(request)
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups/create", "post", "200")
assert result["result"] == "success"
@openapi_test_function("/user_groups/{user_group_id}:patch")
def update_user_group(client: Client, user_group_id: int) -> None:
# {code_example|start}
request = {
"group_id": user_group_id,
"name": "marketing",
"description": "The marketing team.",
}
result = client.update_user_group(request)
# {code_example|end}
assert result["result"] == "success"
@openapi_test_function("/user_groups/{user_group_id}:delete")
def remove_user_group(client: Client, user_group_id: int) -> None:
# {code_example|start}
result = client.remove_user_group(user_group_id)
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups/{user_group_id}", "delete", "200")
assert result["result"] == "success"
@openapi_test_function("/user_groups/{user_group_id}/members:post")
def update_user_group_members(client: Client, user_group_id: int) -> None:
ensure_users([8, 10, 11], ["cordelia", "hamlet", "iago"])
# {code_example|start}
request = {
"delete": [8, 10],
"add": [11],
}
result = client.update_user_group_members(user_group_id, request)
# {code_example|end}
validate_against_openapi_schema(result, "/user_groups/{group_id}/members", "post", "200")
def test_invalid_api_key(client_with_invalid_key: Client) -> None:
result = client_with_invalid_key.list_subscriptions()
validate_against_openapi_schema(result, "/rest-error-handling", "post", "400_0")
def test_missing_request_argument(client: Client) -> None:
result = client.render_message({})
validate_against_openapi_schema(result, "/rest-error-handling", "post", "400_1")
def test_user_account_deactivated(client: Client) -> None:
request = {
"content": "**foo**",
}
result = client.render_message(request)
validate_against_openapi_schema(result, "/rest-error-handling", "post", "403_0")
def test_realm_deactivated(client: Client) -> None:
request = {
"content": "**foo**",
}
result = client.render_message(request)
validate_against_openapi_schema(result, "/rest-error-handling", "post", "403_1")
def test_invalid_stream_error(client: Client) -> None:
result = client.get_stream_id("nonexistent")
validate_against_openapi_schema(result, "/get_stream_id", "get", "400")
# SETUP METHODS FOLLOW
def test_against_fixture(
result: Dict[str, Any],
fixture: Dict[str, Any],
check_if_equal: Optional[Iterable[str]] = None,
check_if_exists: Optional[Iterable[str]] = None,
) -> None:
assertLength(result, fixture)
if check_if_equal is None and check_if_exists is None:
for key, value in fixture.items():
assertEqual(key, result, fixture)
if check_if_equal is not None:
for key in check_if_equal:
assertEqual(key, result, fixture)
if check_if_exists is not None:
for key in check_if_exists:
assertIn(key, result)
def assertEqual(key: str, result: Dict[str, Any], fixture: Dict[str, Any]) -> None:
if result[key] != fixture[key]:
first = f"{key} = {result[key]}"
second = f"{key} = {fixture[key]}"
raise AssertionError(
"Actual and expected outputs do not match; showing diff:\n"
+ mdiff.diff_strings(first, second)
)
else:
assert result[key] == fixture[key]
def assertLength(result: Dict[str, Any], fixture: Dict[str, Any]) -> None:
if len(result) != len(fixture):
result_string = json.dumps(result, indent=4, sort_keys=True)
fixture_string = json.dumps(fixture, indent=4, sort_keys=True)
raise AssertionError(
"The lengths of the actual and expected outputs do not match; showing diff:\n"
+ mdiff.diff_strings(result_string, fixture_string)
)
else:
assert len(result) == len(fixture)
def assertIn(key: str, result: Dict[str, Any]) -> None:
if key not in result.keys():
raise AssertionError(
f"The actual output does not contain the the key `{key}`.",
)
else:
assert key in result
def test_messages(client: Client, nonadmin_client: Client) -> None:
render_message(client)
message_id = send_message(client)
add_reaction(client, message_id)
remove_reaction(client, message_id)
update_message(client, message_id)
get_raw_message(client, message_id)
get_messages(client)
check_messages_match_narrow(client)
get_message_history(client, message_id)
delete_message(client, message_id)
mark_all_as_read(client)
mark_stream_as_read(client)
mark_topic_as_read(client)
update_message_flags(client)
test_nonexistent_stream_error(client)
test_private_message_invalid_recipient(client)
test_update_message_edit_permission_error(client, nonadmin_client)
test_delete_message_edit_permission_error(client, nonadmin_client)
def test_users(client: Client, owner_client: Client) -> None:
create_user(client)
get_members(client)
get_single_user(client)
deactivate_user(client)
reactivate_user(client)
update_user(client)
get_user_by_email(client)
get_subscription_status(client)
get_profile(client)
update_notification_settings(client)
update_display_settings(client)
upload_file(client)
get_attachments(client)
set_typing_status(client)
update_presence(client)
get_user_presence(client)
create_user_group(client)
user_group_id = get_user_groups(client)
update_user_group(client, user_group_id)
update_user_group_members(client, user_group_id)
remove_user_group(client, user_group_id)
get_alert_words(client)
add_alert_words(client)
remove_alert_words(client)
deactivate_own_user(client, owner_client)
add_user_mute(client)
remove_user_mute(client)
def test_streams(client: Client, nonadmin_client: Client) -> None:
add_subscriptions(client)
test_add_subscriptions_already_subscribed(client)
list_subscriptions(client)
stream_id = get_stream_id(client)
update_stream(client, stream_id)
get_streams(client)
get_subscribers(client)
remove_subscriptions(client)
toggle_mute_topic(client)
update_subscription_settings(client)
update_notification_settings(client)
get_stream_topics(client, 1)
archive_stream(client, stream_id)
test_user_not_authorized_error(nonadmin_client)
test_authorization_errors_fatal(client, nonadmin_client)
def test_queues(client: Client) -> None:
# Note that the example for api/get-events is not tested.
# Since, methods such as client.get_events() or client.call_on_each_message
# are blocking calls and since the event queue backend is already
# thoroughly tested in zerver/tests/test_event_queue.py, it is not worth
# the effort to come up with asynchronous logic for testing those here.
queue_id = register_queue(client)
deregister_queue(client, queue_id)
register_queue_all_events(client)
def test_server_organizations(client: Client) -> None:
get_realm_linkifiers(client)
add_realm_filter(client)
add_realm_playground(client)
get_server_settings(client)
remove_realm_filter(client)
remove_realm_playground(client)
get_realm_emoji(client)
upload_custom_emoji(client)
get_realm_profile_fields(client)
reorder_realm_profile_fields(client)
create_realm_profile_field(client)
def test_errors(client: Client) -> None:
test_missing_request_argument(client)
test_invalid_stream_error(client)
def test_the_api(client: Client, nonadmin_client: Client, owner_client: Client) -> None:
get_user_agent(client)
test_users(client, owner_client)
test_streams(client, nonadmin_client)
test_messages(client, nonadmin_client)
test_queues(client)
test_server_organizations(client)
test_errors(client)
sys.stdout.flush()
if REGISTERED_TEST_FUNCTIONS != CALLED_TEST_FUNCTIONS:
print("Error! Some @openapi_test_function tests were never called:")
print(" ", REGISTERED_TEST_FUNCTIONS - CALLED_TEST_FUNCTIONS)
sys.exit(1)
| apache-2.0 | -7,320,417,508,209,486,000 | 30.090848 | 100 | 0.64362 | false |
CloudI/CloudI | src/service_api/python/jsonrpclib/tests/test_server.py | 1 | 1597 | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests the pooled server
:license: Apache License 2.0
"""
# JSON-RPC library
from jsonrpclib import ServerProxy
from jsonrpclib.SimpleJSONRPCServer import PooledJSONRPCServer
from jsonrpclib.threadpool import ThreadPool
# Standard library
import random
import threading
import unittest
# ------------------------------------------------------------------------------
def add(a, b):
return a+b
class PooledServerTests(unittest.TestCase):
"""
These tests verify that the pooled server works correctly
"""
def test_default_pool(self, pool=None):
"""
Tests the default pool
"""
# Setup server
server = PooledJSONRPCServer(("localhost", 0), thread_pool=pool)
server.register_function(add)
# Serve in a thread
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
# Find its port
port = server.socket.getsockname()[1]
# Make the client
client = ServerProxy("http://localhost:{0}".format(port))
# Check calls
for _ in range(10):
a, b = random.random(), random.random()
result = client.add(a, b)
self.assertEqual(result, a+b)
# Close server
server.server_close()
thread.join()
def test_custom_pool(self):
"""
Tests the ability to have a custom pool
"""
# Setup the pool
pool = ThreadPool(2)
pool.start()
self.test_default_pool(pool)
| mit | -3,816,758,300,949,687,000 | 23.19697 | 80 | 0.579837 | false |
dan-cristian/haiot | gpio/io_common/__init__.py | 1 | 3453 | from common import Constant
from storage.model import m
from main.logger_helper import L
import abc
from common import utils
# update in db (without propagatting the change by default)
def update_custom_relay(pin_code, pin_value, notify=False, ignore_missing=False):
relay = m.ZoneCustomRelay.find_one({m.ZoneCustomRelay.gpio_pin_code: pin_code,
m.ZoneCustomRelay.gpio_host_name: Constant.HOST_NAME})
if relay is not None:
relay.relay_is_on = pin_value
relay.save_changed_fields(broadcast=notify)
L.l.info('Updated relay {} val={}'.format(pin_code, pin_value))
else:
if not ignore_missing:
L.l.warning('Unable to find relay pin {}'.format(pin_code))
# update in db (without propagatting the change by default)
def update_listener_custom_relay(relay, is_on):
relay.relay_is_on = is_on
relay.save_changed_fields(broadcast=True)
L.l.info('Updated listener relay {} val={}'.format(relay, is_on))
class Port:
_port_list = []
type = None
TYPE_GPIO = 'gpio'
TYPE_PIFACE = 'piface'
TYPE_PCF8574 = 'pcf8574'
_types = frozenset([TYPE_GPIO, TYPE_PIFACE, TYPE_PCF8574])
def __init__(self):
pass
class OutputPort(Port):
def __init__(self):
pass
class InputPort(Port):
def __init__(self):
pass
class IOPort(InputPort, OutputPort):
def __init__(self):
pass
class GpioBase:
__metaclass__ = abc.ABCMeta
@staticmethod
@abc.abstractmethod
def get_current_record(record):
return None, None
@staticmethod
@abc.abstractmethod
def get_db_record(key):
return None
def record_update(self, record, changed_fields):
# record = utils.json_to_record(self.obj, json_object)
current_record, key = self.get_current_record(record)
if current_record is not None:
new_record = self.obj()
kwargs = {}
for field in changed_fields:
val = getattr(record, field)
# setattr(new_record, field, val)
kwargs[field] = val
if record.host_name == Constant.HOST_NAME and record.source_host != Constant.HOST_NAME:
# https://stackoverflow.com/questions/1496346/passing-a-list-of-kwargs
self.set(key, **kwargs)
# do nothing, action done already as it was local
# save will be done on model.save
# record.save_changed_fields()
@staticmethod
@abc.abstractmethod
def set(key, values):
pass
@staticmethod
@abc.abstractmethod
def save(key, values):
pass
@staticmethod
@abc.abstractmethod
def get(key):
return None
@staticmethod
@abc.abstractmethod
def sync_to_db(key):
pass
@staticmethod
@abc.abstractmethod
def unload():
pass
def __init__(self, obj):
self.obj = obj
def format_piface_pin_code(board_index, pin_direction, pin_index):
return str(board_index) + ":" + str(pin_direction) + ":" + str(pin_index)
# port format is x:direction:y, e.g. 0:in:3, x=board, direction=in/out, y=pin index (0 based)
def decode_piface_pin(pin_code):
ar = pin_code.split(':')
if len(ar) == 3:
return int(ar[0]), ar[1], int(ar[2])
else:
L.l.error('Invalid piface pin code {}'.format(pin_code))
return None, None, None | gpl-2.0 | 1,524,281,052,770,179,800 | 25.775194 | 99 | 0.609036 | false |
pimoroni/unicorn-hat-hd | examples/show-png.py | 1 | 1382 | #!/usr/bin/env python
import time
from sys import exit
try:
from PIL import Image
except ImportError:
exit('This script requires the pillow module\nInstall with: sudo pip install pillow')
import unicornhathd
print("""Unicorn HAT HD: Show a PNG image!
This basic example shows use of the Python Pillow library.
The tiny 16x16 bosses in lofi.png are from Oddball:
http://forums.tigsource.com/index.php?topic=8834.0
Licensed under Creative Commons Attribution-Noncommercial-Share Alike 3.0
Unported License.
Press Ctrl+C to exit!
""")
unicornhathd.rotation(0)
unicornhathd.brightness(0.6)
width, height = unicornhathd.get_shape()
img = Image.open('lofi.png')
try:
while True:
for o_x in range(int(img.size[0] / width)):
for o_y in range(int(img.size[1] / height)):
valid = False
for x in range(width):
for y in range(height):
pixel = img.getpixel(((o_x * width) + y, (o_y * height) + x))
r, g, b = int(pixel[0]), int(pixel[1]), int(pixel[2])
if r or g or b:
valid = True
unicornhathd.set_pixel(x, y, r, g, b)
if valid:
unicornhathd.show()
time.sleep(0.5)
except KeyboardInterrupt:
unicornhathd.off()
| mit | -8,984,369,968,961,083,000 | 24.592593 | 89 | 0.583213 | false |
stuartlangridge/raspi-recorder | listener_daemon.py | 1 | 2508 | import threading, time, subprocess
from bluetooth import *
server_sock=BluetoothSocket( RFCOMM )
server_sock.bind(("",PORT_ANY))
server_sock.listen(1)
port = server_sock.getsockname()[1]
uuid = "c3091f5f-7e2f-4908-b628-18231dfb5034"
advertise_service( server_sock, "PiRecorder",
service_id = uuid,
service_classes = [ uuid, SERIAL_PORT_CLASS ],
profiles = [ SERIAL_PORT_PROFILE ],
)
print "Waiting for connection on RFCOMM channel %d" % port
client_sock, client_info = server_sock.accept()
print "Accepted connection from ", client_info
lock = threading.Lock()
def mainthread(sock):
try:
with lock:
sock.send("\r\nWelcome to recorder. [1] start recording, [2] stop.\r\n\r\n")
while True:
data = sock.recv(1)
if len(data) == 0: break
if data == "1":
with lock:
sock.send("Starting sound recorder\r\n")
os.system("supervisorctl -c ./supervisor.conf start sound_recorder")
elif data == "2":
with lock:
sock.send("Stopping sound recorder\r\n")
os.system("supervisorctl -c ./supervisor.conf stop sound_recorder")
else:
print "received [%s]" % data
with lock:
output = "unrecognised [%s]\r\n" % (data,)
sock.send(output)
except IOError:
print "got io error"
def heartbeat(sock):
while True:
time.sleep(5)
o = subprocess.check_output(["supervisorctl", "-c",
os.path.join(os.path.split(__file__)[0], "supervisor.conf"),
"status"])
procs = {}
for parts in [x.split() for x in o.split("\n")]:
if len(parts) > 1:
procs[parts[0]] = parts[1]
sr = procs.get("sound_recorder", "ABSENT")
svfs = os.statvfs(".")
bytes_remaining = svfs.f_frsize * svfs.f_bavail
bytes_total = svfs.f_frsize * svfs.f_blocks
with lock:
sock.send("heartbeat %s %s %s\r\n" % (
sr, bytes_remaining, bytes_total))
mainthread = threading.Thread(target=mainthread, args=(client_sock,))
mainthread.start()
heartbeatthread = threading.Thread(target=heartbeat, args=(client_sock,))
heartbeatthread.setDaemon(True)
heartbeatthread.start()
mainthread.join()
print "disconnected"
client_sock.close()
server_sock.close()
print "all done"
| mit | 294,923,898,243,346,560 | 33.356164 | 88 | 0.570175 | false |
Azure/azure-sdk-for-python | sdk/communication/azure-communication-sms/azure/communication/sms/_models/_models.py | 1 | 2064 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class SmsSendResult(msrest.serialization.Model):
"""Response for a single recipient.
All required parameters must be populated in order to send to Azure.
:param to: Required. The recipient's phone number in E.164 format.
:type to: str
:param message_id: The identifier of the outgoing Sms message. Only present if message
processed.
:type message_id: str
:param http_status_code: Required. HTTP Status code.
:type http_status_code: int
:param successful: Required. Indicates if the message is processed successfully or not.
:type successful: bool
:param error_message: Optional error message in case of 4xx/5xx/repeatable errors.
:type error_message: str
"""
_validation = {
'to': {'required': True},
'http_status_code': {'required': True},
'successful': {'required': True},
}
_attribute_map = {
'to': {'key': 'to', 'type': 'str'},
'message_id': {'key': 'messageId', 'type': 'str'},
'http_status_code': {'key': 'httpStatusCode', 'type': 'int'},
'successful': {'key': 'successful', 'type': 'bool'},
'error_message': {'key': 'errorMessage', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SmsSendResult, self).__init__(**kwargs)
self.to = kwargs['to']
self.message_id = kwargs.get('message_id', None)
self.http_status_code = kwargs['http_status_code']
self.successful = kwargs['successful']
self.error_message = kwargs.get('error_message', None)
| mit | 8,139,079,774,969,169,000 | 37.943396 | 94 | 0.593508 | false |
JoelEager/pyTanks.Server | start.py | 1 | 2083 | """
Startup script for the pyTanks server
Requirements:
Python 3.5 or newer
websockets 7.0 (pip install websockets==7.0)
Usage:
python start.py
The pyTanks server uses the settings found in config.py to control how the server works. Those values can be
changed directly or be overridden by appending one or more of these command line args:
log=n - Overrides the default logging level. (See the usage section of the readme.)
ip:port - Overrides the ip and port used to host the server.
"""
import sys
import config
def main():
"""
Check the environment, apply any command line args to config.py, and start wsServer.py
"""
# Check Python version
if sys.version_info[0] < 3 or sys.version_info[1] < 5:
print("Python 3.5 or newer is required to run the pyTanks server")
return
# Check for websockets
from importlib import util
if util.find_spec("websockets") is None:
print("The websockets module is required to run the pyTanks server")
return
# Import the code that requires the above things
from serverLogic.wsServer import runServer
# Parse and apply the args
for arg in sys.argv:
if arg == sys.argv[0] or arg == "":
continue
elif arg.startswith("log="):
try:
config.server.logLevel = int(arg[-1:])
except ValueError:
print("Invalid log level")
return
elif arg.startswith("minPlayers="):
try:
num = int(arg[-1:])
if num <= 1:
print("minPlayers must be greater than 1")
return
config.server.minPlayers = num
except ValueError:
print("Invalid min player count")
return
elif ":" in arg:
config.server.ipAndPort = arg
else:
print(__doc__[__doc__.index("Usage:"):].strip())
return
# Start the server
runServer()
if __name__ == "__main__":
main()
| mit | 3,182,180,769,446,605,000 | 29.188406 | 112 | 0.583293 | false |
nullzero/wprobot | scripts/userfixes.py | 1 | 4673 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Fix misspelled words
"""
import init
import wp
import pywikibot
from wp import lre
def glob():
global subst
subst = lre.Subst()
subst.append(u"กฏหมาย", u"กฎหมาย")
subst.append(u"กรกฏาคม", u"กรกฎาคม")
subst.append(u"กษัตรย์", u"กษัตริย์")
subst.append(u"กิติมศักดิ์", u"กิตติมศักดิ์")
subst.append(u"ขาดดุลย์", u"ขาดดุล")
subst.append(u"คริสตศตวรรษ", u"คริสต์ศตวรรษ")
subst.append(u"คริสตศักราช", u"คริสต์ศักราช")
subst.append(u"คริสตศาสนา", u"คริสต์ศาสนา")
subst.append(u"คริสต์กาล", u"คริสตกาล")
subst.append(u"คริสต์เตียน", u"คริสเตียน")
subst.append(u"คริส(ต์)?มาส(ต์)?", u"คริสต์มาส")
subst.append(u"โครงการณ์", u"โครงการ")
subst.append(u"งบดุลย์", u"งบดุล")
subst.append(u"ซอฟท์แวร์", u"ซอฟต์แวร์")
subst.append(u"ฟัง[กค]์ชั่?น", u"ฟังก์ชัน")
subst.append(u"ภาพยนต์", u"ภาพยนตร์")
subst.append(u"ผูกพันธ์", u"ผูกพัน")
subst.append(u"ลอส ?แองเจ[นลอ]?ล[ีิ]ส", u"ลอสแอนเจลิส")
subst.append(u"ลายเซ็นต์", u"ลายเซ็น")
subst.append(u"เวคเตอร์", u"เวกเตอร์")
subst.append(u"เวท(ย์)?มนตร?์", u"เวทมนตร์")
subst.append(u"เว็?[บป]ไซ[ทต]์?", u"เว็บไซต์")
subst.append(u"เวอร์ชั่น", u"เวอร์ชัน")
subst.append(u"อินเ[ตท]อ(ร์)?เน็?[ตท]", u"อินเทอร์เน็ต")
subst.append(u"อั[พป]เด็?[ตท]", u"อัปเดต")
subst.append(u"อัพโหลด", u"อัปโหลด")
subst.append(u"(?m)^(=+) *(.*?) *(=+) *$", ur"\1 \2 \3")
subst.append(u"(?m)^= (.*?) =$", ur"== \1 ==")
subst.append(u"\[\[(:?)[Cc]ategory:", ur"[[\1หมวดหมู่:")
subst.append(u"\[\[(:?)([Ii]mage|[Ff]ile|ภาพ):", ur"[[\1ไฟล์:")
subst.append(u"(?m)^== (แหล่ง|หนังสือ|เอกสาร|แหล่งข้อมูล)อ้างอิง ==$", u"== อ้างอิง ==")
subst.append(u"(?m)^== (หัวข้ออื่นที่เกี่ยวข้อง|ดูเพิ่มที่) ==$", u"== ดูเพิ่ม ==")
subst.append(u"(?m)^== (เว็บไซต์|โยง|ลิงก์|Link *|(แหล่ง)?(ข้อมูล)?)(ภายนอก|อื่น) ==$", u"== แหล่งข้อมูลอื่น ==")
subst.append(u"(?m)^== ลิงก์ ==$", u"== แหล่งข้อมูลอื่น ==")
subst.append(u"\{\{[Rr]eflist", u"{{รายการอ้างอิง")
subst.append(ur"\[\[ *(.*?)\]\]", ur"[[\1]]")
subst.append(ur"\[\[(?!หมวดหมู่)(.*?) *\]\]", ur"[[\1]]")
subst.append(u"(?<!วัด)ทรง(เสวย|ประชวร|มีพระ|เป็นพระ|เสด็จ|บรรทม|ผนวช|ทอดพระเนตร|สวรรคต|ตรัส|โปรด|ประสูติ)", r"\1")
def fix(s):
if "nofixbot" in s:
return s
return subst.process(s)
def main():
#tl = wp.Page(u"Template:บาเบล")
for page in site.allpages(filterredir=False, content=True):
#for page in tl.embeddedin(content=True):
#for page in tl:
#page = wp.Page(u"รายชื่อวัดในจังหวัดชัยนาท")
pywikibot.output(">>>" + page.title())
text = fix(page.get())
if page.get() != text:
pywikibot.showDiff(page.get(), text)
if raw_input("...") == "y":
try:
page.put(text, u"โรบอต: เก็บกวาด", async=True)
except:
wp.error()
pass
if __name__ == "__main__":
args, site, conf = wp.pre(u"user-fixes")
try:
glob()
main()
except:
wp.posterror()
else:
wp.post()
| mit | 5,976,263,289,708,200,000 | 36.345238 | 119 | 0.52598 | false |
twoolie/ProjectNarwhal | narwhal/core/profile/admin.py | 1 | 1124 | # -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail.admin import AdminImageMixin
from treebeard.admin import TreeAdmin
from models import Profile
class ProfileAdmin(AdminImageMixin, admin.ModelAdmin):
search_fields = ('user__username', 'extra_data')
list_display = (#'user__username', 'user__date_joined',
'user', 'uploaded', 'downloaded',)
list_filter = ('user__is_staff',)
fields = ('user', 'avatar', 'key', 'downloaded', 'uploaded' )
#fieldsets = (
# (None, {
# 'fields': ( ('title', 'slug'),
# ('user', 'comments_enabled'),
# 'description', )
# }),
# (_('Files'), {
# 'fields': ( ('torrent', 'image'), ),
# }),
#(_('Quick Stats'), {
# 'classes': ('collapse',),
# 'fields': ( ('size', 'files'),
# ('seeders', 'leechers'),
# ('pub_date', 'comment_count'), )
#}),
#)
admin.site.register(Profile, ProfileAdmin)
| gpl-3.0 | 3,874,579,077,347,851,300 | 31.114286 | 65 | 0.508007 | false |
cappatar/knesset-data-pipelines | datapackage_pipelines_knesset/members/processors/load_members.py | 1 | 1419 | from datapackage_pipelines_knesset.common.base_processors.add_resource import AddResourceBaseProcessor
# only loads members with the following positionId:
SUPPORTED_POSITION_IDS = [43, 61]
class Processor(AddResourceBaseProcessor):
def _get_schema(self, resource_descriptor):
return resource_descriptor.get("schema", {
"fields": [
{"name": "url", "type": "string", "description": "url to download protocol from"},
{
"name": "kns_person_id", "type": "integer",
"description": "primary key from kns_person table"}
],
"primaryKey": ["kns_person_id"]
})
def _get_new_resource(self):
person_table = self.db_meta.tables.get("kns_person")
persontoposition_table = self.db_meta.tables.get("kns_persontoposition")
if person_table is None or persontoposition_table is None:
raise Exception("processor requires kns person tables to exist")
for db_row in self.db_session\
.query(person_table, persontoposition_table)\
.filter(persontoposition_table.p.PersonID==person_table.p.PersonID)\
.filter(persontoposition_table.p.PositionID.in_(SUPPORTED_POSITION_IDS))\
.all():
row = db_row._asdict()
yield {"kns_person_id": row["PersonID"]}
if __name__ == "__main__":
Processor.main()
| mit | -2,604,610,188,792,806,400 | 40.735294 | 102 | 0.613813 | false |
warriorframework/warriorframework | warrior/WarriorCore/iterative_parallel_kw_driver.py | 1 | 4275 | '''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#!/usr/bin/python
"""This is iterative parallel keyword driver which is used to execute
the keywords of a testcase in parallel where data_type = iterative"""
import traceback
from collections import OrderedDict
import WarriorCore.testcase_steps_execution as testcase_steps_execution
import Framework.Utils as Utils
from Framework.Utils.print_Utils import print_debug, print_error
from WarriorCore.multiprocessing_utils import create_and_start_process_with_queue, \
get_results_from_queue, update_tc_junit_resultfile
from Framework.Utils import testcase_Utils
def execute_iterative_parallel(step_list, data_repository, tc_status, system_list):
"""Takes a list of steps as input and executes them in parallel by
creating separate process of step_driver for each of these steps """
jobs_list = []
output_q = None
for system_name in system_list:
target_module = testcase_steps_execution.main
#args_list = [step_list, data_repository, system_name, True]
args_dict = OrderedDict([("step_list", step_list),
("data_repository", data_repository),
("system_name", system_name),
("kw_parallel", True),
("output_q", output_q),
])
process, jobs_list, output_q = create_and_start_process_with_queue(target_module, args_dict,
jobs_list, output_q)
print_debug("process: {0}".format(process))
for job in jobs_list:
job.join()
result_list = get_results_from_queue(output_q)
system_status_list = []
system_resultfile_list = []
step_impact_list = []
tc_junit_list = []
for result in result_list:
step_status_list = result[0]
kw_resultfile_list = result[1]
system_name = result[2]
step_impact_list = result[3]
tc_junit_list.append(result[4])
system_status = testcase_Utils.compute_status_using_impact(step_status_list,
step_impact_list)
system_resultfile = testcase_Utils.compute_system_resultfile(kw_resultfile_list,
data_repository['wt_resultsdir'],
system_name)
system_status_list.append(system_status)
system_resultfile_list.append(system_resultfile)
tc_status = Utils.testcase_Utils.compute_status_without_impact(system_status_list)
# parallel keywords generate multiple keyword junit result files
# each files log the result for one keyword and not intergrated
# update testcase junit result file with individual keyword result files
data_repository['wt_junit_object'] = update_tc_junit_resultfile(data_repository['wt_junit_object'], tc_junit_list, data_repository['wt_tc_timestamp'])
print_debug("Updating Testcase result file...")
Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'], system_resultfile_list)
return tc_status
def main(step_list, data_repository, tc_status, system_list):
"""Executes the list of keyword in iterative parallel fashion
Computes and returns the testcase status"""
try:
testcase_status = execute_iterative_parallel(step_list, data_repository,
tc_status, system_list)
except Exception:
testcase_status = False
print_error('unexpected error {0}'.format(traceback.format_exc()))
return testcase_status
| apache-2.0 | 7,122,337,398,566,753,000 | 44.478723 | 154 | 0.643743 | false |
valentinmetraux/hierophis | hierophis/maths/statistics/basic.py | 1 | 2080 | #!/usr/bin/env python
# -*- coding: utf 8 -*-
"""
Utility functions.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import scipy.signal
def rms(a):
"""
Calculates the RMS of an array.
:param a: An array.
:returns: The RMS of the array.
"""
return np.sqrt(np.sum(a**2.0)/a.size)
def normalize(a, new_min=0.0, new_max=1.0):
"""
Normalize an array to [0,1] or to
arbitrary new min and max.
:param a: An array.
:param new_min: A float to be the new min, default 0.
:param new_max: A float to be the new max, default 1.
:returns: The normalized array.
"""
n = (a - np.amin(a)) / float(np.amax(a - np.amin(a)))
return n * (new_max - new_min) + new_min
def moving_average(a, method = "convolve", length=9, mode='valid'):
"""
Computes the mean in a moving window.
Methods: naive, fft, convolve
Length: Kernel length
Modes: full, valid, same
"""
if method == "fft":
boxcar = np.ones(length)/length
return scipy.signal.fftconvolve(a, boxcar, mode="valid")
elif method == "convolve":
boxcar = np.ones(length)/length
return np.convolve(a, boxcar, mode="valid")
else:
pad = np.floor(length/2)
if mode == 'full':
pad *= 2
# Make a padded version, paddding with first and last values
r = np.empty(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1, ...] + out[1:, ...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out
| apache-2.0 | -7,067,858,668,146,052,000 | 23.77381 | 68 | 0.533173 | false |
all-of-us/raw-data-repository | tests/api_tests/test_ppi_data_check_api.py | 1 | 2788 | from rdr_service.model.code import CodeType
from tests.helpers.unittest_base import BaseTestCase
class CheckPpiDataApiTest(BaseTestCase):
def setUp(self):
super(CheckPpiDataApiTest, self).setUp(with_consent_codes=True)
self.participant_summary = self.data_generator.create_database_participant_summary(email='[email protected]')
questions_and_answers = [
('first_question_code', 'first_answer_code'),
('Second_CODE', 'ANOTHER_ANSWER'),
('LAST_CODE', 'Final_Answer|with_additional_option')
]
questionnaire = self.data_generator.create_database_questionnaire_history()
for question_code_value, _ in questions_and_answers:
question_code = self.data_generator.create_database_code(
value=question_code_value,
codeType=CodeType.QUESTION
)
self.data_generator.create_database_questionnaire_question(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
codeId=question_code.codeId
)
questionnaire_response = self.data_generator.create_database_questionnaire_response(
participantId=self.participant_summary.participantId,
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version
)
for question_index, (_, answer_code_values) in enumerate(questions_and_answers):
question = questionnaire.questions[question_index]
for answer_value in answer_code_values.split('|'):
answer_code = self.data_generator.create_database_code(value=answer_value)
self.data_generator.create_database_questionnaire_response_answer(
questionnaireResponseId=questionnaire_response.questionnaireResponseId,
questionId=question.questionnaireQuestionId,
valueCodeId=answer_code.codeId
)
def test_case_insensitive_answer_code_matching(self):
"""Make sure case doesn't matter when matching answer codes against what the server has"""
ppi_check_payload = {
'ppi_data': {
self.participant_summary.email: {
'fIrSt_QuEsTiOn_CoDe': 'First_Answer_Code',
'SECOND_CODE': 'another_answer',
'last_code': 'Final_ANSWER|WITH_ADDITIONAL_OPTION'
}
}
}
response = self.send_post('CheckPpiData', ppi_check_payload)
response_error_count = response['ppi_results']['[email protected]']['errors_count']
self.assertEqual(0, response_error_count, 'Differences in case should not cause errors')
| bsd-3-clause | -8,436,569,744,233,131,000 | 45.466667 | 116 | 0.638451 | false |
wooga/airflow | airflow/example_dags/example_nested_branch_dag.py | 1 | 2019 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG demonstrating a workflow with nested branching. The join tasks are created with
``none_failed_or_skipped`` trigger rule such that they are skipped whenever their corresponding
``BranchPythonOperator`` are skipped.
"""
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.utils.dates import days_ago
with DAG(dag_id="example_nested_branch_dag", start_date=days_ago(2), schedule_interval="@daily") as dag:
branch_1 = BranchPythonOperator(task_id="branch_1", python_callable=lambda: "true_1")
join_1 = DummyOperator(task_id="join_1", trigger_rule="none_failed_or_skipped")
true_1 = DummyOperator(task_id="true_1")
false_1 = DummyOperator(task_id="false_1")
branch_2 = BranchPythonOperator(task_id="branch_2", python_callable=lambda: "true_2")
join_2 = DummyOperator(task_id="join_2", trigger_rule="none_failed_or_skipped")
true_2 = DummyOperator(task_id="true_2")
false_2 = DummyOperator(task_id="false_2")
false_3 = DummyOperator(task_id="false_3")
branch_1 >> true_1 >> join_1
branch_1 >> false_1 >> branch_2 >> [true_2, false_2] >> join_2 >> false_3 >> join_1
| apache-2.0 | -1,070,214,598,085,398,900 | 47.071429 | 104 | 0.740961 | false |
mojolab/LivingData | lib/livdatops.py | 1 | 1153 | import pandas
def getColRenameDict(mergersheet,sheet):
colrenamedict={}
originalcolnames=mergersheet[sheet].fillna("NA")
newcolnames=mergersheet[mergersheet.columns[0]]
for i in range(0,len(originalcolnames)):
colrenamedict[originalcolnames[i]]=newcolnames[i]
# if originalcolnames[i]!="NA":
# colrenamedict[originalcolnames[i]]=newcolnames[i]
return colrenamedict
def createMergedDFList(dflist,mergersheetname):
altereddfs={}
for sheet,matrix in dflist.iteritems():
if sheet == mergersheetname:
altereddfs[sheet]=matrix
mergersheet=matrix
else:
df=matrix
print df.columns
columnrenamedict=getColRenameDict(mergersheet,sheet)
print columnrenamedict
altereddf=df.rename(columns=columnrenamedict)
for key,value in columnrenamedict.iteritems():
if key =="NA":
altereddf[value]=0
print df,altereddf
altereddfs[sheet]=altereddf
finalsheet=[]
for sheet,matrix in altereddfs.iteritems():
if sheet!=mergersheetname:
finalsheet.append(matrix.fillna(0))
finalsheetm=pandas.concat(finalsheet)
finalsheetname=mergersheet.columns.values[0]
altereddfs[finalsheetname]=finalsheetm
return altereddfs
| apache-2.0 | -1,906,478,291,639,418,000 | 30.162162 | 55 | 0.774501 | false |
CSC-IT-Center-for-Science/pouta-blueprints | pebbles/views/authorize_instances.py | 1 | 3153 | from flask import abort, request, Response, Blueprint
import datetime
import logging
import re
from pebbles.models import InstanceToken
from pebbles.server import restful
authorize_instances = Blueprint('authorize_instances', __name__)
class AuthorizeInstancesView(restful.Resource):
def get(self):
token = ''
instance_id = ''
# The idea here is to check if the original-token and instance-id headers are already present, sent by the nginx proxy of the openshift app,
# if the headers are present that means the authentication had taken place previously and a cookie exists for the openshift app,
# in this case - obtain the info contained in the headers
if 'ORIGINAL-TOKEN' in request.headers and 'INSTANCE-ID' in request.headers:
token = request.headers['ORIGINAL-TOKEN']
instance_id = request.headers['INSTANCE-ID']
# otherwise, the x-original-uri consists of the query string info (which is sent by the openshift driver to the nginx of the openshift app)
# The query string has the token info and instance id
# NOTE: This is only used when the authentication is being done for the first time!
elif 'X-ORIGINAL-URI' in request.headers:
h_uri = request.headers['X-ORIGINAL-URI']
regex_query_capture = re.search('.*\\?(.*)=(.*)&(.*)=(.*)', h_uri) # parse the query string
if regex_query_capture and len(regex_query_capture.groups()) == 4:
if regex_query_capture.group(1) == 'token' and regex_query_capture.group(3) == 'instance_id':
token = regex_query_capture.group(2)
instance_id = regex_query_capture.group(4)
elif regex_query_capture.group(1) == 'instance_id' and regex_query_capture.group(3) == 'token':
instance_id = regex_query_capture.group(2)
token = regex_query_capture.group(4)
if not token and not instance_id:
logging.warn('No instance token or id found from the headers')
return abort(401)
instance_token_obj = InstanceToken.query.filter_by(token=token).first()
if not instance_token_obj:
logging.warn("instance token object %s not found" % token)
return abort(401)
curr_time = datetime.datetime.utcnow()
expires_on = instance_token_obj.expires_on
if curr_time > expires_on:
logging.warn("instance token %s has expired" % token)
return abort(403)
if instance_token_obj.instance_id != instance_id:
logging.warn("instance id %s from the token does not match the instance_id %s passed as a parameter" % (instance_token_obj.instance_id, instance_id))
return abort(403)
resp = Response("Authorized")
# send the headers back to nginx proxy running on the openshift based instance,
# which is going to store it as a cookie for the next time, the authorization takes place
resp.headers["TOKEN"] = instance_token_obj.token
resp.headers["INSTANCE-ID"] = instance_id
return resp
| mit | 2,302,791,649,978,159,000 | 49.047619 | 161 | 0.64732 | false |
weiqiangdragonite/blog_tmp | python/baidu/myip.py | 1 | 1085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://ip.taobao.com/instructions.php
import socket
#
common_headers = \
"Host: ip.taobao.com\r\n" + \
"Connection: Keep-Alive\r\n" + \
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + \
"User-Agent: Mozilla/5.0 (X11; Linux) AppleWebKit/538.1 (KHTML, like Gecko) Chrome/18.0.1025.133 Safari/538.1 Midori/0.5\r\n" + \
"Accept-Language: en-us;q=0.750\r\n"
# 通过 GET
get_headers = \
"GET /service/getIpInfo.php?ip=myip HTTP/1.1\r\n" + \
common_headers + \
"\r\n"
# 通过 POST
post_headers = \
"POST /service/getIpInfo2.php HTTP/1.1\r\n" + \
common_headers + \
"Content-Length: 7\r\n" + \
"\r\n" + \
"ip=myip";
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("ip.taobao.com", 80))
s.send(get_headers)
buffer = []
while True:
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = ''.join(buffer)
s.close()
print data | gpl-2.0 | -721,930,583,678,637,200 | 21.93617 | 133 | 0.563603 | false |
dhanababum/accessdb | accessdb/utils.py | 1 | 8395 | # -*- coding: utf-8 -*-
# Copyright 2017 Dhana Babu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import string
import tempfile
import shutil
import pypyodbc as odbc
from .access_api import create
_MS_ACCESS_TYPES = {
'BIT',
'BYTE',
'SHORT',
'LONG',
'CURRENCY',
'SINGLE',
'DOUBLE',
'DATETIME',
'TEXT',
'MEMO',
'PRIMARY', # CUSTOM Type for handling AUTOINCREMENT
}
SCHEMA_FILE = 'schema.ini'
_TEXT_SEPARATORS = {
r',': 'CSVDelimited',
r'\t': 'TabDelimited'
}
def _text_formater(sep):
separator = _TEXT_SEPARATORS.get(sep, 'Delimited({})')
return separator.format(sep)
def _stringify_path(db_path):
dtr, path = os.path.split(db_path)
if dtr == '':
db_path = os.path.join('.', path)
return db_path
def _push_access_db(temp_dir, text_file, data_columns,
header_columns, dtype, path, table_name, sep,
append, overwrite, delete='file'):
table = Table(temp_dir, text_file,
table_name,
data_columns,
header_columns,
dtype, sep, append)
schema_file = os.path.join(temp_dir, SCHEMA_FILE)
try:
with SchemaWriter(temp_dir, text_file, data_columns,
header_columns, dtype, sep, schema_file) as schema:
schema.write()
with AccessDBConnection(path, overwrite) as con:
cursor = con.cursor()
if not append:
cursor.execute(table.create_query())
cursor.execute(table.insert_query())
con.commit()
finally:
if delete == 'folder':
shutil.rmtree(temp_dir)
else:
os.unlink(schema_file)
def _get_random_file():
return ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
class DataTypeNotFound(Exception):
pass
class SchemaWriter(object):
def __init__(self, temp_dir, text_file, df_columns,
columns, dtype, sep, schema_file):
self.temp_dir = temp_dir
self.text_file = text_file
self.df_columns = df_columns
self.columns = columns
self.dtype = dtype
self.sep = sep
self.path = schema_file
def __enter__(self):
self.fp = open(self.path, 'w')
return self
def __exit__(self, *args):
self.fp.close()
def formater(self):
yield '[%s]' % self.text_file
yield 'ColNameHeader=True'
yield 'Format=%s' % _text_formater(self.sep)
self.dcols = {col: ('Col%s' % (i + 1))
for i, col in enumerate(self.df_columns)}
if not isinstance(self.dtype, dict):
self.dtype = {}
for col in self.df_columns:
ctype = self.dtype.get(col, 'text').upper()
if ctype not in _MS_ACCESS_TYPES:
raise DataTypeNotFound(
'Provided Data Type Not Found %s' % ctype)
if ctype == 'PRIMARY':
ctype = 'TEXT'
yield '{c_col}="{d_col}" {c_type}'.format(
c_col=self.dcols[col],
d_col=col,
c_type=ctype.capitalize())
def write(self):
for line in self.formater():
self.fp.write(line)
self.fp.write('\n')
class Table(object):
def __init__(self, temp_dir, text_file,
table_name, df_columns, columns,
dtype, sep, append):
self.temp_dir = temp_dir
self.text_file = text_file
self.df_columns = df_columns
self.table_name = table_name
self.df_columns = df_columns
self.columns = columns
self.dtype = dtype
self.sep = sep
self.append = append
if not isinstance(self.dtype, dict):
self.dtype = {}
def _get_colunm_type(self, col):
ctype = self.dtype.get(col, 'TEXT').upper()
if ctype not in _MS_ACCESS_TYPES:
raise Exception
return ctype
def formater(self):
for col in self.df_columns:
c_type = self._get_colunm_type(col)
if c_type == 'PRIMARY':
c_type = 'AUTOINCREMENT PRIMARY KEY'
if self.columns:
if col not in self.columns:
continue
col = self.columns[col]
yield '`{c_col}` {c_type}'.format(c_col=col,
c_type=c_type)
def insert_formater(self):
for col in self.df_columns:
if self._get_colunm_type(col) == 'PRIMARY':
continue
if not self.columns:
self.columns = dict(zip(self.df_columns, self.df_columns))
if self.columns:
if col not in self.columns:
continue
cus_col = self.columns[col]
yield col, cus_col
def built_columns(self):
return '(%s)' % ','.join(self.formater())
def create_query(self):
return "CREATE TABLE `{table_name}`{columns}".format(
table_name=self.table_name,
columns=self.built_columns())
@staticmethod
def required_columns(cols):
return ','.join('`%s`' % c for c in cols)
def insert_query(self):
custom_columns = []
columns = []
for col1, col2 in self.insert_formater():
columns.append(col1)
custom_columns.append(col2)
return """
INSERT INTO `{table_name}`({columns})
SELECT {required_cols} FROM [TEXT;HDR=YES;FMT={separator};
Database={temp_dir}].{text_file}
""".format(temp_dir=self.temp_dir,
text_file=self.text_file,
columns=self.required_columns(custom_columns),
required_cols=self.required_columns(columns),
table_name=self.table_name,
separator=_text_formater(self.sep))
class AccessDBConnection(object):
def __init__(self, db_path, overwrite):
self.overwrite = overwrite
self.db_path = _stringify_path(db_path)
def __enter__(self):
if not os.path.isfile(self.db_path) or self.overwrite:
create(self.db_path)
odbc_conn_str = '''DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};
DBQ=%s''' % (self.db_path)
self.con = odbc.connect(odbc_conn_str)
return self.con
def __exit__(self, *args):
self.con.close()
def to_accessdb(self, path, table_name,
header_columns=None, dtype='str', engine='text',
sep=',', append=False, overwrite=False):
if self.empty:
return
temp_dir = tempfile.mkdtemp()
text_file = '%s.txt' % _get_random_file()
text_path = os.path.join(temp_dir, text_file)
self.to_csv(text_path, index=False)
_push_access_db(temp_dir, text_file,
self.columns.tolist(),
header_columns, dtype, path, table_name,
sep, append, overwrite, 'folder')
def create_accessdb(path, text_path, table_name,
header_columns=None, dtype='str',
engine='text', sep=',', append=False, overwrite=False):
temp_dir, text_file = os.path.split(os.path.abspath(text_path))
with open(text_path) as fp:
file_columns = fp.readline().strip('\n').split(sep)
_push_access_db(temp_dir, text_file,
file_columns,
header_columns, dtype, path, table_name,
sep, append, overwrite)
| apache-2.0 | 6,640,736,085,775,844,000 | 31.921569 | 79 | 0.534485 | false |
jimsize/PySolFC | pysollib/games/harp.py | 1 | 13061 | #!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.mfxutil import kwdefault
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import CautiousDefaultHint
from pysollib.hint import KlondikeType_Hint
from pysollib.games.spider import Spider_RowStack, Spider_SS_Foundation, \
Spider_Hint
from pysollib.util import ACE, KING
from pysollib.stack import \
AC_RowStack, \
BO_RowStack, \
KingAC_RowStack, \
SS_FoundationStack, \
Spider_SS_RowStack, \
StackWrapper, \
WasteStack, \
WasteTalonStack, \
SS_RowStack
# ************************************************************************
# * Double Klondike (Klondike with 2 decks and 9 rows)
# ************************************************************************
class DoubleKlondike(Game):
Layout_Method = staticmethod(Layout.harpLayout)
Foundation_Class = SS_FoundationStack
RowStack_Class = KingAC_RowStack
Hint_Class = KlondikeType_Hint
def createGame(self, max_rounds=-1, num_deal=1, **layout):
# create layout
l, s = Layout(self), self.s
kwdefault(layout, rows=9, waste=1, texts=1, playcards=19)
self.Layout_Method(l, **layout)
self.setSize(l.size[0], l.size[1])
# create stacks
s.talon = WasteTalonStack(l.s.talon.x, l.s.talon.y, self,
max_rounds=max_rounds, num_deal=num_deal)
s.waste = WasteStack(l.s.waste.x, l.s.waste.y, self)
for r in l.s.foundations:
s.foundations.append(
self.Foundation_Class(r.x, r.y, self, suit=r.suit))
for r in l.s.rows:
s.rows.append(self.RowStack_Class(r.x, r.y, self))
# default
l.defaultAll()
# extra
if max_rounds > 1:
anchor = 'nn'
if layout.get("texts"):
anchor = 'nnn'
l.createRoundText(s.talon, anchor)
return l
def startGame(self, flip=0):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[i+1:], flip=flip, frames=0)
self._startAndDealRowAndCards()
shallHighlightMatch = Game._shallHighlightMatch_AC
# ************************************************************************
# * Double Klondike by Threes
# ************************************************************************
class DoubleKlondikeByThrees(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, num_deal=3)
# ************************************************************************
# * Gargantua (Double Klondike with one redeal)
# * Pantagruel
# ************************************************************************
class Gargantua(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=2)
class Pantagruel(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Harp (Double Klondike with 10 non-king rows and no redeal)
# ************************************************************************
class BigHarp(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1, rows=10)
#
# game overrides
#
# no real need to override, but this way the layout
# looks a little bit different
def startGame(self):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[:i], flip=0, frames=0)
self._startAndDealRowAndCards()
# ************************************************************************
# * Steps (Harp with 7 rows)
# ************************************************************************
class Steps(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=2, rows=7)
# ************************************************************************
# * Triple Klondike
# * Triple Klondike by Threes
# * Chinese Klondike
# ************************************************************************
class TripleKlondike(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, rows=13)
class TripleKlondikeByThrees(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, rows=13, num_deal=3)
class ChineseKlondike(DoubleKlondike):
RowStack_Class = StackWrapper(BO_RowStack, base_rank=KING)
def createGame(self):
DoubleKlondike.createGame(self, rows=12)
# ************************************************************************
# * Lady Jane
# * Inquisitor
# ************************************************************************
class LadyJane(DoubleKlondike):
Hint_Class = Spider_Hint
RowStack_Class = Spider_SS_RowStack
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=2, num_deal=3)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
class Inquisitor(DoubleKlondike):
RowStack_Class = SS_RowStack
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=3, num_deal=3)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_SS
# ************************************************************************
# * Arabella
# ************************************************************************
class Arabella(DoubleKlondike):
Hint_Class = Spider_Hint
RowStack_Class = StackWrapper(Spider_SS_RowStack, base_rank=KING)
def createGame(self):
DoubleKlondike.createGame(self, rows=13, max_rounds=1, playcards=24)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
# ************************************************************************
# * Big Deal
# ************************************************************************
class BigDeal(DoubleKlondike):
RowStack_Class = KingAC_RowStack
def createGame(self, rows=12, max_rounds=2, XOFFSET=0):
l, s = Layout(self), self.s
self.setSize(l.XM+(rows+2)*l.XS, l.YM+8*l.YS)
x, y = l.XM, l.YM
for i in range(rows):
s.rows.append(self.RowStack_Class(x, y, self))
x += l.XS
for i in range(2):
y = l.YM
for j in range(8):
s.foundations.append(
SS_FoundationStack(x, y, self, suit=j % 4))
y += l.YS
x += l.XS
x, y = l.XM, self.height-l.YS
s.talon = WasteTalonStack(x, y, self, max_rounds=max_rounds)
l.createText(s.talon, 'n')
x += l.XS
s.waste = WasteStack(x, y, self)
s.waste.CARD_XOFFSET = XOFFSET
l.createText(s.waste, 'n')
if max_rounds > 1:
l.createRoundText(s.talon, 'nnn')
self.setRegion(s.rows, (-999, -999, l.XM+rows*l.XS-l.CW//2, 999999),
priority=1)
l.defaultStackGroups()
# ************************************************************************
# * Delivery
# ************************************************************************
class Delivery(BigDeal):
Hint_Class = CautiousDefaultHint
RowStack_Class = StackWrapper(SS_RowStack, max_move=1)
def createGame(self):
dx = self.app.images.CARDW//10
BigDeal.createGame(self, rows=12, max_rounds=1, XOFFSET=dx)
shallHighlightMatch = Game._shallHighlightMatch_SS
def startGame(self):
self._startDealNumRows(2)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
# ************************************************************************
# * Double Kingsley
# ************************************************************************
class DoubleKingsley(DoubleKlondike):
Foundation_Class = StackWrapper(SS_FoundationStack, base_rank=KING, dir=-1)
RowStack_Class = StackWrapper(KingAC_RowStack, base_rank=ACE, dir=1)
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Thieves of Egypt
# ************************************************************************
class ThievesOfEgypt(DoubleKlondike):
Layout_Method = staticmethod(Layout.klondikeLayout)
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=2)
def startGame(self):
# rows: 1 3 5 7 9 10 8 6 4 2
row = 0
for i in (0, 2, 4, 6, 8, 9, 7, 5, 3, 1):
for j in range(i):
self.s.talon.dealRow(rows=[self.s.rows[row]], frames=0)
row += 1
self._startAndDealRowAndCards()
# ************************************************************************
# * Brush
# ************************************************************************
class Brush(DoubleKlondike):
Layout_Method = staticmethod(Layout.klondikeLayout)
Foundation_Class = Spider_SS_Foundation
RowStack_Class = Spider_RowStack
Hint_Class = Spider_Hint
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=1)
def startGame(self):
self._startDealNumRows(3)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
# register the game
registerGame(GameInfo(21, DoubleKlondike, "Double Klondike",
GI.GT_KLONDIKE, 2, -1, GI.SL_BALANCED))
registerGame(GameInfo(28, DoubleKlondikeByThrees, "Double Klondike by Threes",
GI.GT_KLONDIKE, 2, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(25, Gargantua, "Gargantua",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(15, BigHarp, "Big Harp",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(51, Steps, "Steps",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(273, TripleKlondike, "Triple Klondike",
GI.GT_KLONDIKE, 3, -1, GI.SL_BALANCED))
registerGame(GameInfo(274, TripleKlondikeByThrees, "Triple Klondike by Threes",
GI.GT_KLONDIKE, 3, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(495, LadyJane, "Lady Jane",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(496, Inquisitor, "Inquisitor",
GI.GT_KLONDIKE, 2, 2, GI.SL_BALANCED))
registerGame(GameInfo(497, Arabella, "Arabella",
GI.GT_KLONDIKE, 3, 0, GI.SL_BALANCED))
registerGame(GameInfo(545, BigDeal, "Big Deal",
GI.GT_KLONDIKE | GI.GT_ORIGINAL, 4, 1, GI.SL_BALANCED))
registerGame(GameInfo(562, Delivery, "Delivery",
GI.GT_FORTY_THIEVES | GI.GT_ORIGINAL, 4, 0,
GI.SL_BALANCED))
registerGame(GameInfo(590, ChineseKlondike, "Chinese Klondike",
GI.GT_KLONDIKE, 3, -1, GI.SL_BALANCED,
suits=(0, 1, 2)))
registerGame(GameInfo(591, Pantagruel, "Pantagruel",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(668, DoubleKingsley, "Double Kingsley",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(678, ThievesOfEgypt, "Thieves of Egypt",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(689, Brush, "Brush",
GI.GT_2DECK_TYPE | GI.GT_ORIGINAL, 2, 0,
GI.SL_MOSTLY_SKILL))
| gpl-3.0 | 6,819,752,331,048,072,000 | 34.3 | 79 | 0.537861 | false |
iafan/zing | tests/forms/project.py | 1 | 1228 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle_app.forms import ProjectForm
from pootle_project.models import PROJECT_CHECKERS, RESERVED_PROJECT_CODES
@pytest.mark.parametrize('reserved_code', RESERVED_PROJECT_CODES)
@pytest.mark.django_db
def test_clean_code_invalid(reserved_code):
form_data = {
'code': reserved_code,
'checkstyle': PROJECT_CHECKERS.keys()[0],
'fullname': 'Foo',
'source_language': 1,
}
form = ProjectForm(form_data)
assert not form.is_valid()
assert 'code' in form.errors
assert len(form.errors.keys()) == 1
@pytest.mark.django_db
def test_clean_code_blank_invalid():
form_data = {
'code': ' ',
'checkstyle': PROJECT_CHECKERS.keys()[0],
'fullname': 'Foo',
'source_language': 1,
}
form = ProjectForm(form_data)
assert not form.is_valid()
assert 'code' in form.errors
assert len(form.errors.keys()) == 1
| gpl-3.0 | -8,058,345,137,155,454,000 | 28.238095 | 75 | 0.663681 | false |
etherkit/OpenBeacon2 | client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-gi.repository.GdkPixbuf.py | 1 | 6760 | #-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject's "gi.repository.GdkPixbuf" package.
"""
import glob
import os
import subprocess
from PyInstaller.config import CONF
from PyInstaller.compat import (
exec_command_stdout, is_darwin, is_win, is_linux, open_file, which)
from PyInstaller.utils.hooks import (
collect_glib_translations, get_gi_typelibs, get_gi_libdir, logger)
loaders_path = os.path.join('gdk-pixbuf-2.0', '2.10.0', 'loaders')
destpath = "lib/gdk-pixbuf-2.0/2.10.0/loaders"
cachedest = "lib/gdk-pixbuf-2.0/2.10.0"
# If the "gdk-pixbuf-query-loaders" command is not in the current ${PATH}, or
# is not in the GI lib path, GDK and thus GdkPixbuf is unavailable. Return with
# a non-fatal warning.
gdk_pixbuf_query_loaders = None
try:
libdir = get_gi_libdir('GdkPixbuf', '2.0')
except ValueError:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, '
'since GdkPixbuf library not found'
)
libdir = None
if libdir:
# Distributions either package gdk-pixbuf-query-loaders in the GI libs
# directory (not on the path), or on the path with or without a -x64 suffix
# depending on the architecture
cmds = [
os.path.join(libdir, 'gdk-pixbuf-2.0/gdk-pixbuf-query-loaders'),
'gdk-pixbuf-query-loaders-64',
'gdk-pixbuf-query-loaders',
]
for cmd in cmds:
gdk_pixbuf_query_loaders = which(cmd)
if gdk_pixbuf_query_loaders is not None:
break
if gdk_pixbuf_query_loaders is None:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, since '
'"gdk-pixbuf-query-loaders" is not in $PATH or gi lib dir.'
)
# Else, GDK is available. Let's do this.
else:
binaries, datas, hiddenimports = get_gi_typelibs('GdkPixbuf', '2.0')
datas += collect_glib_translations('gdk-pixbuf')
# To add support for a new platform, add a new "elif" branch below with
# the proper is_<platform>() test and glob for finding loaders on that
# platform.
if is_win:
ext = "*.dll"
elif is_darwin or is_linux:
ext = "*.so"
# If loader detection is supported on this platform, bundle all
# detected loaders and an updated loader cache.
if ext:
loader_libs = []
# Bundle all found loaders with this user application.
pattern = os.path.join(libdir, loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Sometimes the loaders are stored in a different directory from
# the library (msys2)
if not loader_libs:
pattern = os.path.join(libdir, '..', 'lib', loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Filename of the loader cache to be written below.
cachefile = os.path.join(CONF['workpath'], 'loaders.cache')
# Run the "gdk-pixbuf-query-loaders" command and capture its
# standard output providing an updated loader cache; then write
# this output to the loader cache bundled with this frozen
# application.
#
# On OSX we use @executable_path to specify a path relative to the
# generated bundle. However, on non-Windows we need to rewrite the
# loader cache because it isn't relocatable by default. See
# https://bugzilla.gnome.org/show_bug.cgi?id=737523
#
# To make it easier to rewrite, we just always write
# @executable_path, since its significantly easier to find/replace
# at runtime. :)
#
# If we need to rewrite it...
if not is_win:
# To permit string munging, decode the encoded bytes output by
# this command (i.e., enable the "universal_newlines" option).
# Note that:
#
# * Under Python 2.7, "cachedata" will be a decoded "unicode"
# object. * Under Python 3.x, "cachedata" will be a decoded
# "str" object.
#
# On Fedora, the default loaders cache is /usr/lib64, but the
# libdir is actually /lib64. To get around this, we pass the
# path to the loader command, and it will create a cache with
# the right path.
cachedata = exec_command_stdout(gdk_pixbuf_query_loaders,
*loader_libs)
cd = []
prefix = '"' + os.path.join(libdir, 'gdk-pixbuf-2.0', '2.10.0')
plen = len(prefix)
# For each line in the updated loader cache...
for line in cachedata.splitlines():
if line.startswith('#'):
continue
if line.startswith(prefix):
line = '"@executable_path/' + cachedest + line[plen:]
cd.append(line)
# Rejoin these lines in a manner preserving this object's
# "unicode" type under Python 2.
cachedata = u'\n'.join(cd)
# Write the updated loader cache to this file.
with open_file(cachefile, 'w') as fp:
fp.write(cachedata)
# Else, GdkPixbuf will do the right thing on Windows, so no changes
# to the loader cache are required. For efficiency and reliability,
# this command's encoded byte output is written as is without being
# decoded.
else:
with open_file(cachefile, 'wb') as fp:
fp.write(subprocess.check_output(gdk_pixbuf_query_loaders))
# Bundle this loader cache with this frozen application.
datas.append((cachefile, cachedest))
# Else, loader detection is unsupported on this platform.
else:
logger.warning(
'GdkPixbuf loader bundling unsupported on your platform.'
)
| gpl-3.0 | -3,919,858,749,350,087,700 | 39.969697 | 79 | 0.569822 | false |
all-of-us/raw-data-repository | rdr_service/alembic/versions/72365b7c0037_add_gender_identity_enums.py | 1 | 1232 | """add gender identity enums
Revision ID: 72365b7c0037
Revises: 9c957ce496bf
Create Date: 2019-06-05 08:56:34.278852
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import GenderIdentity
# revision identifiers, used by Alembic.
revision = "72365b7c0037"
down_revision = "9c957ce496bf"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("gender_identity", model.utils.Enum(GenderIdentity), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "gender_identity")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
| bsd-3-clause | 7,862,067,207,526,346,000 | 23.64 | 119 | 0.685065 | false |
gunny26/datalogger | datalogger/Test_DataLogger.py | 1 | 4699 | #!/usr/bin/python2
from __future__ import print_function
import unittest
import logging
logging.basicConfig(level=logging.INFO)
import datetime
import gzip
import json
import os
# own modules
from DataLogger import DataLogger as DataLogger
from Timeseries import Timeseries as Timeseries
from TimeseriesArray import TimeseriesArray as TimeseriesArray
from TimeseriesStats import TimeseriesStats as TimeseriesStats
from TimeseriesArrayStats import TimeseriesArrayStats as TimeseriesArrayStats
from Quantile import QuantileArray as QuantileArray
from Quantile import Quantile as Quantile
class Test(unittest.TestCase):
def setUp(self):
self.basedir = "testdata"
self.project = "mysql"
self.tablename = "performance"
self.datestring = "2018-04-01"
self.datalogger = DataLogger(self.basedir)
def notest__str__(self):
print(self.datalogger)
def test__init__(self):
try:
DataLogger("/nonexisting")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
try:
dl = DataLogger("testdata")
dl.setup("unknownproject", self.tablename, "2018-04-01")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
try:
DataLogger("testdata")
dl.setup("sanportperf", "unknowntablename", "2018-04-01")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
try:
DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
def test__getitem__(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
caches = dl["caches"]
print(caches)
assert isinstance(caches, dict)
tsa = dl["tsa"]
print(tsa)
assert isinstance(tsa, TimeseriesArray)
ts = dl["tsa", ("nagios.tilak.cc",)]
print(ts)
assert isinstance(ts, Timeseries)
assert tsa[("nagios.tilak.cc",)] == ts
tsastats = dl["tsastats"]
print(tsastats)
assert isinstance(tsastats, TimeseriesArrayStats)
tsstats = dl["tsastats", ("nagios.tilak.cc",)]
print(tsstats)
assert isinstance(tsstats, TimeseriesStats)
assert tsastats[("nagios.tilak.cc",)] == tsstats
qa = dl["qa"]
print(qa)
assert isinstance(qa, QuantileArray)
quantile = dl["qa", ("nagios.tilak.cc",)]
print(quantile)
assert isinstance(quantile, dict)
assert qa[("nagios.tilak.cc",)] == quantile
def test_load_tsa(self):
dl = DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
dl.delete_caches()
tsa = dl.load_tsa()
#print(tsa)
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
tsa = dl.load_tsa()
#print(tsa)
def test_load_tsastats(self):
dl = DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
dl.delete_caches()
tsastats = dl.load_tsastats()
#print(tsa)
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
tsastats = dl.load_tsastats()
#print(tsa)
def test_load_quantiles(self):
dl = DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
dl.delete_caches()
quantiles = dl.load_quantile()
#print(tsa)
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
quantiles = dl.load_quantile()
#print(tsa)
def test_load_caches(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
print(dl.get_caches())
tsa = dl.load_tsa()
print(dl.get_caches())
def test_total_stats(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
total_stats = dl.load_total_stats()
print(json.dumps(total_stats, indent=4))
def test_raw_reader(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
for row in dl.raw_reader():
pass
assert row['bytes_received'] == '272517939'
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
| apache-2.0 | -5,105,882,383,725,770,000 | 32.091549 | 77 | 0.602256 | false |
vtraag/leidenalg | tests/test_VertexPartition.py | 1 | 8491 | import unittest
import igraph as ig
import leidenalg
import random
from copy import deepcopy
from ddt import ddt, data, unpack
#%%
def name_object(obj, name):
obj.__name__ = name
return obj
graphs = [
###########################################################################
# Zachary karate network
name_object(ig.Graph.Famous('Zachary'),
'Zachary'),
###########################################################################
# ER Networks
# Undirected no loop
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=False, loops=False),
'ER_k1_undirected_no_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=False, loops=False),
'ER_k5_undirected_no_loops'),
# Directed no loop
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=True, loops=False),
'ER_k1_directed_no_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=True, loops=False),
'ER_k5_directed_no_loops'),
# Undirected loops
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=False, loops=True),
'ER_k1_undirected_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=False, loops=True),
'ER_k5_undirected_loops'),
# Directed loops
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=True, loops=True),
'ER_k1_directed_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=True, loops=True),
'ER_k5_directed_loops'),
###########################################################################
# Tree
name_object(ig.Graph.Tree(100, 3, type=ig.TREE_UNDIRECTED),
'Tree_undirected'),
name_object(ig.Graph.Tree(100, 3, type=ig.TREE_OUT),
'Tree_directed_out'),
name_object(ig.Graph.Tree(100, 3, type=ig.TREE_IN),
'Tree_directed_in'),
###########################################################################
# Lattice
name_object(ig.Graph.Lattice([100], nei=3, directed=False, mutual=True, circular=True),
'Lattice_undirected'),
name_object(ig.Graph.Lattice([100], nei=3, directed=True, mutual=False, circular=True),
'Lattice_directed')
]
bipartite_graph = name_object(
ig.Graph.Bipartite([0, 0, 0, 0, 1, 1, 1, 1],
[[0, 4],
[0, 5],
[0, 6],
[1, 4],
[1, 5],
[2, 6],
[2, 7],
[3, 6],
[3, 7],
[3, 5]]),
'bipartite_example')
def make_weighted(G):
m = G.ecount()
G.es['weight'] = [random.random() for i in range(G.ecount())]
G.__name__ += '_weighted'
return G
graphs += [make_weighted(H) for H in graphs]
class BaseTest:
@ddt
class MutableVertexPartitionTest(unittest.TestCase):
def setUp(self):
self.optimiser = leidenalg.Optimiser()
@data(*graphs)
def test_move_nodes(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type == leidenalg.SignificanceVertexPartition:
raise unittest.SkipTest('Significance doesn\'t handle weighted graphs')
if 'weight' in graph.es.attributes():
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
for v in range(graph.vcount()):
if graph.degree(v) >= 1:
u = graph.neighbors(v)[0]
diff = partition.diff_move(v, partition.membership[u])
q1 = partition.quality()
partition.move_node(v, partition.membership[u])
q2 = partition.quality()
self.assertAlmostEqual(
q2 - q1,
diff,
places=5,
msg="Difference in quality ({0}) not equal to calculated difference ({1})".format(
q2 - q1, diff))
@data(*graphs)
def test_aggregate_partition(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type != leidenalg.SignificanceVertexPartition:
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
self.optimiser.move_nodes(partition)
aggregate_partition = partition.aggregate_partition()
self.assertAlmostEqual(
partition.quality(),
aggregate_partition.quality(),
places=5,
msg='Quality not equal for aggregate partition.')
self.optimiser.move_nodes(aggregate_partition)
partition.from_coarse_partition(aggregate_partition)
self.assertAlmostEqual(
partition.quality(),
aggregate_partition.quality(),
places=5,
msg='Quality not equal from coarser partition.')
@data(*graphs)
def test_total_weight_in_all_comms(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type != leidenalg.SignificanceVertexPartition:
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
self.optimiser.optimise_partition(partition)
s = sum([partition.total_weight_in_comm(c) for c,_ in enumerate(partition)])
self.assertAlmostEqual(
s,
partition.total_weight_in_all_comms(),
places=5,
msg='Total weight in all communities ({0}) not equal to the sum of the weight in all communities ({1}).'.format(
s, partition.total_weight_in_all_comms())
)
@data(*graphs)
def test_copy(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type != leidenalg.SignificanceVertexPartition:
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
self.optimiser.optimise_partition(partition)
partition2 = deepcopy(partition)
self.assertAlmostEqual(
partition.quality(),
partition2.quality(),
places=5,
msg='Quality of deepcopy ({0}) not equal to quality of original partition ({1}).'.format(
partition.quality(), partition2.quality())
)
if (partition2.membership[0] == 0):
partition2.move_node(0, 1)
else:
partition2.move_node(0, 0)
self.assertNotEqual(
partition.membership[0],
partition2.membership[0],
msg='Moving node 0 in the deepcopy to community {0} results in community membership {1} for node 0 also in original partition.'.format(
partition.membership[0], partition2.membership[0])
)
class ModularityVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(ModularityVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.ModularityVertexPartition
class RBERVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(RBERVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.RBERVertexPartition
class RBConfigurationVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(RBConfigurationVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.RBConfigurationVertexPartition
class CPMVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(CPMVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.CPMVertexPartition
def test_Bipartite(self):
graph = bipartite_graph
partition, partition_0, partition_1 = \
leidenalg.CPMVertexPartition.Bipartite(graph, resolution_parameter_01=0.2)
self.optimiser.optimise_partition_multiplex(
[partition, partition_0, partition_1],
layer_weights=[1, -1, -1])
self.assertEqual(len(partition), 1)
class SurpriseVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(SurpriseVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.SurpriseVertexPartition
class SignificanceVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(SignificanceVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.SignificanceVertexPartition
#%%
if __name__ == '__main__':
#%%
unittest.main(verbosity=3)
suite = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=1).run(suite)
| gpl-3.0 | 685,149,790,798,385,900 | 36.570796 | 143 | 0.611824 | false |
joshfriend/memegen | tests/test_routes_templates.py | 1 | 2217 | # pylint: disable=unused-variable
# pylint: disable=misplaced-comparison-constant
from .conftest import load
def describe_get():
def when_default_text(client):
response = client.get("/templates/iw")
assert 200 == response.status_code
assert dict(
name="Insanity Wolf",
description="http://knowyourmeme.com/memes/insanity-wolf",
aliases=['insanity', 'insanity-wolf', 'iw'],
styles=[],
example="http://localhost/iw/does-testing/in-production",
) == load(response)
def when_no_default_text(client):
response = client.get("/templates/keanu")
assert 200 == response.status_code
assert "http://localhost/keanu/your-text/goes-here" == \
load(response)['example']
def when_alternate_sytles_available(client):
response = client.get("/templates/sad-biden")
assert 200 == response.status_code
assert ['down', 'scowl', 'window'] == load(response)['styles']
def when_dashes_in_key(client):
response = client.get("/templates/awkward-awesome")
assert 200 == response.status_code
def it_returns_list_when_no_key(client):
response = client.get("/templates/")
assert 200 == response.status_code
data = load(response)
assert "http://localhost/templates/iw" == data['Insanity Wolf']
assert len(data) >= 20 # there should be many memes
def it_redirects_when_text_is_provided(client):
response = client.get("/templates/iw/top/bottom")
assert 302 == response.status_code
assert '<a href="/iw/top/bottom">' in load(response, as_json=False)
def it_redirects_when_key_is_an_alias(client):
response = client.get("/templates/insanity-wolf")
assert 302 == response.status_code
assert '<a href="/templates/iw">' in load(response, as_json=False)
def describe_post():
def it_returns_an_error(client):
response = client.post("/templates/")
assert 403 == response.status_code
assert dict(
message="https://raw.githubusercontent.com/jacebrowning/memegen/master/CONTRIBUTING.md"
) == load(response)
| mit | -5,729,464,290,669,171,000 | 31.602941 | 99 | 0.626071 | false |
JIC-CSB/dtoolcore | docs/source/conf.py | 1 | 5148 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u"dtoolcore"
copyright = u"2017, Tjelvar Olsson"
author = u"Tjelvar Olsson"
repo_name = u"dtoolcore"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"3.13.0"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Set the readthedocs theme.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
print('using readthedocs theme...')
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{}doc'.format(repo_name)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{}.tex'.format(repo_name),
u'{} Documentation'.format(repo_name),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
author, repo_name, u'Core API for managing (scientific) data',
'Miscellaneous'),
]
| mit | 835,693,816,536,538,400 | 30.012048 | 79 | 0.673271 | false |
praekelt/vumi-go | go/billing/migrations/0009_auto__chg_field_messagecost_tag_pool__add_index_messagecost_message_di.py | 1 | 10898 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool'], null=True))
# Adding index on 'MessageCost', fields ['message_direction']
db.create_index(u'billing_messagecost', ['message_direction'])
# Adding unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Adding index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.create_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
def backwards(self, orm):
# Removing index on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_index(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing unique constraint on 'MessageCost', fields ['account', 'tag_pool', 'message_direction']
db.delete_unique(u'billing_messagecost', ['account_id', 'tag_pool_id', 'message_direction'])
# Removing index on 'MessageCost', fields ['message_direction']
db.delete_index(u'billing_messagecost', ['message_direction'])
# User chose to not deal with backwards NULL issues for 'MessageCost.tag_pool'
raise RuntimeError("Cannot reverse this migration. 'MessageCost.tag_pool' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'MessageCost.tag_pool'
db.alter_column(u'billing_messagecost', 'tag_pool_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['billing.TagPool']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.gouser': {
'Meta': {'object_name': 'GoUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'billing.account': {
'Meta': {'object_name': 'Account'},
'account_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'alert_credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'alert_threshold': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'credit_balance': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.GoUser']"})
},
u'billing.lineitem': {
'Meta': {'object_name': 'LineItem'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message_direction': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '20', 'blank': 'True'}),
'statement': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Statement']"}),
'tag_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'total_cost': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'billing.messagecost': {
'Meta': {'unique_together': "[['account', 'tag_pool', 'message_direction']]", 'object_name': 'MessageCost', 'index_together': "[['account', 'tag_pool', 'message_direction']]"},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '3'}),
'tag_pool': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.TagPool']", 'null': 'True', 'blank': 'True'})
},
u'billing.statement': {
'Meta': {'object_name': 'Statement'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['billing.Account']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'from_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_date': ('django.db.models.fields.DateField', [], {}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'billing.tagpool': {
'Meta': {'object_name': 'TagPool'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'billing.transaction': {
'Meta': {'object_name': 'Transaction'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'credit_amount': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '20', 'decimal_places': '6'}),
'credit_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'markup_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'message_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'message_direction': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'session_cost': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '3'}),
'session_created': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Pending'", 'max_length': '20'}),
'tag_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'tag_pool_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['billing'] | bsd-3-clause | -5,296,161,936,349,393,000 | 75.216783 | 188 | 0.569279 | false |
Sabayon/entropy | lib/tests/spm.py | 1 | 13331 | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, '.')
sys.path.insert(0, '../')
import unittest
import shutil
import entropy.tools as et
from entropy.const import const_mkdtemp
from entropy.client.interfaces import Client
from entropy.spm.plugins.interfaces.portage_plugin import \
PortageEntropyDepTranslator
import tests._misc as _misc
class SpmTest(unittest.TestCase):
def setUp(self):
self.Client = Client(installed_repo = -1, indexing = False,
xcache = False, repo_validation = False)
self.test_pkg = _misc.get_test_entropy_package()
self.test_pkg2 = _misc.get_test_entropy_package2()
self.test_pkg3 = _misc.get_test_entropy_package3()
self.test_pkgs = [self.test_pkg, self.test_pkg2, self.test_pkg3]
def tearDown(self):
"""
tearDown is run after each test
"""
# calling destroy() and shutdown()
# need to call destroy() directly to remove all the SystemSettings
# plugins because shutdown() doesn't, since it's meant to be called
# right before terminating the process
self.Client.destroy()
self.Client.shutdown()
def test_portage_translator(self):
deps = {
"""|| ( app-emulation/virtualbox
>=app-emulation/virtualbox-bin-2.2.0
)""": \
"( app-emulation/virtualbox | >=app-emulation/virtualbox-bin-2.2.0 )",
"""|| ( ( gnome-extra/zenity ) ( kde-base/kdialog ) )
""": \
"( ( gnome-extra/zenity ) | ( kde-base/kdialog ) )",
"""|| ( <media-libs/xine-lib-1.2
( >=media-libs/xine-lib-1.2 virtual/ffmpeg ) )
""": \
"( <media-libs/xine-lib-1.2 | ( >=media-libs/xine-lib-1.2 & virtual/ffmpeg ) )",
}
for dep, expected in deps.items():
tr = PortageEntropyDepTranslator(dep)
self.assertEqual(expected, tr.translate())
def test_init(self):
spm = self.Client.Spm()
spm2 = self.Client.Spm()
self.assertTrue(spm is spm2)
spm_class = self.Client.Spm_class()
spm_class2 = self.Client.Spm_class()
self.assertTrue(spm_class is spm_class2)
def test_basic_methods(self):
spm = self.Client.Spm()
spm_class = self.Client.Spm_class()
path = spm.get_user_installed_packages_file()
self.assertTrue(path)
groups = spm_class.get_package_groups()
self.assertTrue(isinstance(groups, dict))
keys = spm.package_metadata_keys()
self.assertTrue(isinstance(keys, list))
cache_dir = spm.get_cache_directory()
self.assertTrue(cache_dir)
sys_pkgs = spm.get_system_packages()
self.assertTrue(sys_pkgs)
self.assertTrue(isinstance(sys_pkgs, list))
path1 = spm.get_merge_protected_paths_mask()
path2 = spm.get_merge_protected_paths()
self.assertTrue(isinstance(path1, list))
self.assertTrue(isinstance(path2, list))
pkg = spm.convert_from_entropy_package_name("app-foo/foo")
self.assertTrue(pkg)
def test_portage_xpak(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
sums = {}
paths = []
from entropy.spm.plugins.interfaces.portage_plugin import xpak
from entropy.spm.plugins.interfaces.portage_plugin import xpaktools
temp_unpack = const_mkdtemp(prefix="test_portage_xpak")
temp_unpack2 = const_mkdtemp(prefix="test_portage_xpak2")
test_pkg = os.path.join(temp_unpack2, "test.pkg")
dbdir = _misc.get_entrofoo_test_spm_portage_dir()
for path in os.listdir(dbdir):
xpath = os.path.join(dbdir, path)
paths.append(xpath)
sums[path] = et.md5sum(xpath)
et.compress_files(test_pkg, paths)
comp_file = xpak.tbz2(test_pkg)
result = comp_file.recompose(dbdir)
shutil.rmtree(temp_unpack)
os.mkdir(temp_unpack)
# now extract xpak
new_sums = {}
xpaktools.extract_xpak(test_pkg, tmpdir = temp_unpack)
for path in os.listdir(temp_unpack):
xpath = os.path.join(temp_unpack, path)
new_sums[path] = et.md5sum(xpath)
self.assertEqual(sums, new_sums)
shutil.rmtree(temp_unpack)
shutil.rmtree(temp_unpack2)
def test_extract_xpak(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
from entropy.spm.plugins.interfaces.portage_plugin import xpaktools
tmp_path = const_mkdtemp(prefix="test_extract_xpak")
for test_pkg in self.test_pkgs:
out_path = xpaktools.extract_xpak(test_pkg, tmp_path)
self.assertNotEqual(out_path, None)
self.assertTrue(os.listdir(out_path))
shutil.rmtree(tmp_path, True)
def test_extract_xpak_only(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
from entropy.spm.plugins.interfaces.portage_plugin import xpaktools
pkg_path = _misc.get_test_xpak_empty_package()
tmp_path = const_mkdtemp(prefix="test_extract_xpak_only")
out_path = xpaktools.extract_xpak(pkg_path, tmp_path)
self.assertNotEqual(out_path, None)
self.assertTrue(os.listdir(out_path))
shutil.rmtree(tmp_path, True)
def test_sets_load(self):
spm = self.Client.Spm()
sets = spm.get_package_sets(False)
self.assertNotEqual(sets, None)
def test_static_sets_load(self):
spm = self.Client.Spm()
sets = spm.get_package_sets(False)
self.assertNotEqual(sets, None)
def test_dependencies_calculation(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
iuse = "system-sqlite"
use = "amd64 dbus elibc_glibc kernel_linux multilib " + \
"startup-notification userland_GNU"
license = "MPL-1.1 GPL-2"
depend = """>=mail-client/thunderbird-3.1.1-r1[system-sqlite=]
x11-libs/libXrender x11-libs/libXt x11-libs/libXmu
>=sys-libs/zlib-1.1.4 dev-util/pkgconfig x11-libs/libXrender
x11-libs/libXt x11-libs/libXmu virtual/jpeg dev-libs/expat
app-arch/zip app-arch/unzip >=x11-libs/gtk+-2.8.6
>=dev-libs/glib-2.8.2 >=x11-libs/pango-1.10.1 >=dev-libs/libIDL-0.8.0
>=dev-libs/dbus-glib-0.72 >=x11-libs/startup-notification-0.8
!<x11-base/xorg-x11-6.7.0-r2 >=x11-libs/cairo-1.6.0 app-arch/unzip
=sys-devel/automake-1.11* =sys-devel/autoconf-2.1*
>=sys-devel/libtool-2.2.6b""".replace("\n", " ")
rdepend = """>=mail-client/thunderbird-3.1.1-r1[system-sqlite=] ||
( ( >=app-crypt/gnupg-2.0 || ( app-crypt/pinentry
app-crypt/pinentry-base ) ) =app-crypt/gnupg-1.4* ) x11-libs/libXrender
x11-libs/libXt x11-libs/libXmu >=sys-libs/zlib-1.1.4 x11-libs/libXrender
x11-libs/libXt x11-libs/libXmu virtual/jpeg dev-libs/expat app-arch/zip
app-arch/unzip >=x11-libs/gtk+-2.8.6 >=dev-libs/glib-2.8.2
>=x11-libs/pango-1.10.1 >=dev-libs/libIDL-0.8.0
>=dev-libs/dbus-glib-0.72 >=x11-libs/startup-notification-0.8
!<x11-base/xorg-x11-6.7.0-r2 >=x11-libs/cairo-1.6.0""".replace("\n", " ")
pdepend = ""
bdepend = ""
provide = ""
sources = ""
eapi = "2"
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
portage_metadata = spm._calculate_dependencies(
iuse, use, license,
depend, rdepend, pdepend, bdepend, provide, sources, eapi)
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
expected_deps = [
'>=mail-client/thunderbird-3.1.1-r1[-system-sqlite]',
'( ( >=app-crypt/gnupg-2.0 & ( app-crypt/pinentry | app-crypt/pinentry-base ) ) | ( app-crypt/pinentry & app-crypt/pinentry-base ) | =app-crypt/gnupg-1.4* )',
'x11-libs/libXrender',
'x11-libs/libXt',
'x11-libs/libXmu',
'>=sys-libs/zlib-1.1.4',
'x11-libs/libXrender',
'x11-libs/libXt',
'x11-libs/libXmu',
'virtual/jpeg',
'dev-libs/expat',
'app-arch/zip',
'app-arch/unzip',
'>=x11-libs/gtk+-2.8.6',
'>=dev-libs/glib-2.8.2',
'>=x11-libs/pango-1.10.1',
'>=dev-libs/libIDL-0.8.0',
'>=dev-libs/dbus-glib-0.72',
'>=x11-libs/startup-notification-0.8',
'!<x11-base/xorg-x11-6.7.0-r2',
'>=x11-libs/cairo-1.6.0']
expected_deps.sort()
resolved_deps = portage_metadata['RDEPEND']
resolved_deps.sort()
self.assertEqual(resolved_deps, expected_deps)
def test_eapi5_portage_slotdeps(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
iuse = "system-sqlite"
use = "amd64 dbus elibc_glibc kernel_linux multilib " + \
"startup-notification userland_GNU"
license = "MPL-1.1 GPL-2"
depend = """
>=mail-client/thunderbird-3.1.1-r1:2=[system-sqlite=]
>=mail-client/thunderbird-3.1.1-r1:2*[system-sqlite=]
>=mail-client/thunderbird-3.1.1-r1:2*
>=mail-client/thunderbird-3.1.1-r1:2=
>=mail-client/thunderbird-3.1.1-r1:=
>=mail-client/thunderbird-3.1.1-r1:*
>=mail-client/thunderbird-3.1.1-r1:0/1
>=mail-client/thunderbird-3.1.1-r1:0/1=
""".replace("\n", " ")
rdepend = depend[:]
pdepend = depend[:]
bdepend = []
provide = ""
sources = ""
eapi = "2"
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
portage_metadata = spm._calculate_dependencies(
iuse, use, license,
depend, rdepend, pdepend, bdepend, provide, sources, eapi)
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
expected_deps = [
'>=mail-client/thunderbird-3.1.1-r1:2[-system-sqlite]',
'>=mail-client/thunderbird-3.1.1-r1:2[-system-sqlite]',
'>=mail-client/thunderbird-3.1.1-r1:2',
'>=mail-client/thunderbird-3.1.1-r1:2',
'>=mail-client/thunderbird-3.1.1-r1',
'>=mail-client/thunderbird-3.1.1-r1',
'>=mail-client/thunderbird-3.1.1-r1:0',
'>=mail-client/thunderbird-3.1.1-r1:0',
]
expected_deps.sort()
for k in ("RDEPEND", "PDEPEND", "DEPEND"):
resolved_deps = portage_metadata[k]
resolved_deps.sort()
self.assertEqual(resolved_deps, expected_deps)
def test_eapi7_portage_bdepend(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
iuse = "system-sqlite"
use = "amd64 dbus elibc_glibc kernel_linux multilib " + \
"startup-notification userland_GNU"
license = "MPL-1.1 GPL-2"
depend = """
=mail-client/thunderbird-3.1.1-r1:2
x11-misc/dwm
""".replace("\n", " ")
rdepend = ">=mail-client/thunderbird-3"
pdepend = "www-client/firefox:0"
bdepend = """
dev-lang/python[xml]
virtual/pkgconfig:0/1
""".replace("\n", " ")
provide = ""
sources = ""
eapi = "2"
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
portage_metadata = spm._calculate_dependencies(
iuse, use, license,
depend, rdepend, pdepend, bdepend, provide, sources, eapi)
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
expected = {
'DEPEND': [
"=mail-client/thunderbird-3.1.1-r1:2",
"x11-misc/dwm"
],
'RDEPEND': [">=mail-client/thunderbird-3"],
'PDEPEND': ["www-client/firefox:0"],
'BDEPEND': [
"dev-lang/python[xml]",
"virtual/pkgconfig:0"
]
}
for k in ("RDEPEND", "PDEPEND", "DEPEND", "BDEPEND"):
resolved_deps = portage_metadata[k]
resolved_deps.sort()
expected_deps = expected[k]
expected_deps.sort()
self.assertEqual(resolved_deps, expected_deps)
def test_portage_or_selector(self):
spm_class = self.Client.Spm_class()
if spm_class.PLUGIN_NAME != "portage":
return
spm = self.Client.Spm()
os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE'] = "1"
try:
or_deps = ['x11-foo/foo', 'x11-bar/bar']
self.assertEqual(spm._dep_or_select(
or_deps, top_level = True),
["( x11-foo/foo | x11-bar/bar )"])
finally:
del os.environ['ETP_PORTAGE_CONDITIONAL_DEPS_ENABLE']
if __name__ == '__main__':
unittest.main()
raise SystemExit(0)
| gpl-2.0 | 3,206,684,940,896,305,700 | 34.174142 | 170 | 0.569125 | false |
cajone/pychess | lib/pychess/System/TaskQueue.py | 1 | 2187 | # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/475160
# Was accepted into Python 2.5, but earlier versions still have
# to do stuff manually
import threading
from pychess.compat import Queue
def TaskQueue():
if hasattr(Queue, "task_done"):
return Queue()
return _TaskQueue()
class _TaskQueue(Queue):
def __init__(self):
Queue.__init__(self)
self.all_tasks_done = threading.Condition(self.mutex)
self.unfinished_tasks = 0
def _put(self, item):
Queue._put(self, item)
self.unfinished_tasks += 1
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notifyAll()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
| gpl-3.0 | 3,400,890,274,600,280,000 | 33.171875 | 79 | 0.622771 | false |
akesandgren/easybuild-framework | test/framework/config.py | 1 | 32361 | # #
# Copyright 2013-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for EasyBuild configuration.
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
"""
import os
import re
import shutil
import sys
import tempfile
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered, init_config
from unittest import TextTestRunner
import easybuild.tools.options as eboptions
from easybuild.tools import run
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option, build_path, get_build_log_path, get_log_filename, get_repositorypath
from easybuild.tools.config import install_path, log_file_format, log_path, source_paths
from easybuild.tools.config import BuildOptions, ConfigurationVariables
from easybuild.tools.config import DEFAULT_PATH_SUBDIRS, init_build_options
from easybuild.tools.filetools import copy_dir, mkdir, write_file
from easybuild.tools.options import CONFIG_ENV_VAR_PREFIX
from easybuild.tools.py2vs3 import reload
class EasyBuildConfigTest(EnhancedTestCase):
"""Test cases for EasyBuild configuration."""
tmpdir = None
def setUp(self):
"""Prepare for running a config test."""
reload(eboptions)
super(EasyBuildConfigTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
def purge_environment(self):
"""Remove any leftover easybuild variables"""
for var in os.environ.keys():
# retain $EASYBUILD_IGNORECONFIGFILES, to make sure the test is isolated from system-wide config files!
if var.startswith('EASYBUILD_') and var != 'EASYBUILD_IGNORECONFIGFILES':
del os.environ[var]
def tearDown(self):
"""Clean up after a config test."""
super(EasyBuildConfigTest, self).tearDown()
try:
shutil.rmtree(self.tmpdir)
except OSError:
pass
tempfile.tempdir = None
def configure(self, args=None):
"""(re)Configure and return configfile"""
options = init_config(args=args)
return options.config
def test_default_config(self):
"""Test default configuration."""
self.purge_environment()
eb_go = eboptions.parse_options(args=[])
config_options = eb_go.get_options_by_section('config')
# check default subdirs
self.assertEqual(DEFAULT_PATH_SUBDIRS['buildpath'], 'build')
self.assertEqual(DEFAULT_PATH_SUBDIRS['installpath'], '')
self.assertEqual(DEFAULT_PATH_SUBDIRS['subdir_modules'], 'modules')
self.assertEqual(DEFAULT_PATH_SUBDIRS['repositorypath'], 'ebfiles_repo')
self.assertEqual(DEFAULT_PATH_SUBDIRS['sourcepath'], 'sources')
self.assertEqual(DEFAULT_PATH_SUBDIRS['subdir_software'], 'software')
# check whether defaults are honored, use hardcoded paths/subdirs
eb_homedir = os.path.join(os.path.expanduser('~'), '.local', 'easybuild')
self.assertEqual(config_options['buildpath'], os.path.join(eb_homedir, 'build'))
self.assertEqual(config_options['sourcepath'], os.path.join(eb_homedir, 'sources'))
self.assertEqual(config_options['installpath'], eb_homedir)
self.assertEqual(config_options['subdir_software'], 'software')
self.assertEqual(config_options['subdir_modules'], 'modules')
self.assertEqual(config_options['repository'], 'FileRepository')
self.assertEqual(config_options['repositorypath'], [os.path.join(eb_homedir, 'ebfiles_repo')])
self.assertEqual(config_options['logfile_format'][0], 'easybuild')
self.assertEqual(config_options['logfile_format'][1], "easybuild-%(name)s-%(version)s-%(date)s.%(time)s.log")
self.assertEqual(config_options['tmpdir'], None)
self.assertEqual(config_options['tmp_logdir'], None)
def test_generaloption_config(self):
"""Test new-style configuration (based on generaloption)."""
self.purge_environment()
# check whether configuration via environment variables works as expected
prefix = os.path.join(self.tmpdir, 'testprefix')
buildpath_env_var = os.path.join(self.tmpdir, 'envvar', 'build', 'path')
os.environ['EASYBUILD_PREFIX'] = prefix
os.environ['EASYBUILD_BUILDPATH'] = buildpath_env_var
options = init_config(args=[])
self.assertEqual(build_path(), buildpath_env_var)
self.assertEqual(install_path(), os.path.join(prefix, 'software'))
self.assertEqual(get_repositorypath(), [os.path.join(prefix, 'ebfiles_repo')])
del os.environ['EASYBUILD_PREFIX']
del os.environ['EASYBUILD_BUILDPATH']
# check whether configuration via command line arguments works
prefix = os.path.join(self.tmpdir, 'test1')
install = os.path.join(self.tmpdir, 'test2', 'install')
repopath = os.path.join(self.tmpdir, 'test2', 'repo')
config_file = os.path.join(self.tmpdir, 'nooldconfig.py')
write_file(config_file, '')
args = [
'--configfiles', config_file, # force empty config file
'--prefix', prefix,
'--installpath', install,
'--repositorypath', repopath,
'--subdir-software', 'APPS',
]
options = init_config(args=args)
self.assertEqual(build_path(), os.path.join(prefix, 'build'))
self.assertEqual(install_path(), os.path.join(install, 'APPS'))
self.assertEqual(install_path(typ='mod'), os.path.join(install, 'modules'))
self.assertEqual(options.installpath, install)
self.assertTrue(config_file in options.configfiles)
# check mixed command line/env var configuration
prefix = os.path.join(self.tmpdir, 'test3')
install = os.path.join(self.tmpdir, 'test4', 'install')
subdir_software = 'eb-soft'
args = [
'--configfiles', config_file, # force empty config file
'--installpath', install,
]
os.environ['EASYBUILD_PREFIX'] = prefix
os.environ['EASYBUILD_SUBDIR_SOFTWARE'] = subdir_software
installpath_modules = tempfile.mkdtemp(prefix='installpath-modules')
os.environ['EASYBUILD_INSTALLPATH_MODULES'] = installpath_modules
options = init_config(args=args)
self.assertEqual(build_path(), os.path.join(prefix, 'build'))
self.assertEqual(install_path(), os.path.join(install, subdir_software))
self.assertEqual(install_path('mod'), installpath_modules)
# subdir options *must* be relative (to --installpath)
installpath_software = tempfile.mkdtemp(prefix='installpath-software')
os.environ['EASYBUILD_SUBDIR_SOFTWARE'] = installpath_software
error_regex = r"Found problems validating the options.*'subdir_software' must specify a \*relative\* path"
self.assertErrorRegex(EasyBuildError, error_regex, init_config)
del os.environ['EASYBUILD_PREFIX']
del os.environ['EASYBUILD_SUBDIR_SOFTWARE']
def test_error_env_var_typo(self):
"""Test error reporting on use of known $EASYBUILD-prefixed env vars."""
# all is well
init_config()
os.environ['EASYBUILD_FOO'] = 'foo'
os.environ['EASYBUILD_THERESNOSUCHCONFIGURATIONOPTION'] = 'whatever'
error = r"Found 2 environment variable\(s\) that are prefixed with %s " % CONFIG_ENV_VAR_PREFIX
error += r"but do not match valid option\(s\): "
error += r','.join(['EASYBUILD_FOO', 'EASYBUILD_THERESNOSUCHCONFIGURATIONOPTION'])
self.assertErrorRegex(EasyBuildError, error, init_config)
del os.environ['EASYBUILD_THERESNOSUCHCONFIGURATIONOPTION']
del os.environ['EASYBUILD_FOO']
def test_install_path(self):
"""Test install_path function."""
# defaults
self.assertEqual(install_path(), os.path.join(self.test_installpath, 'software'))
self.assertEqual(install_path('software'), os.path.join(self.test_installpath, 'software'))
self.assertEqual(install_path(typ='mod'), os.path.join(self.test_installpath, 'modules'))
self.assertEqual(install_path('modules'), os.path.join(self.test_installpath, 'modules'))
self.assertErrorRegex(EasyBuildError, "Unknown type specified", install_path, typ='foo')
args = [
'--subdir-software', 'SOFT',
'--installpath', '/foo',
]
os.environ['EASYBUILD_SUBDIR_MODULES'] = 'MOD'
init_config(args=args)
self.assertEqual(install_path(), os.path.join('/foo', 'SOFT'))
self.assertEqual(install_path(typ='mod'), os.path.join('/foo', 'MOD'))
del os.environ['EASYBUILD_SUBDIR_MODULES']
args = [
'--installpath', '/prefix',
'--installpath-modules', '/foo',
]
os.environ['EASYBUILD_INSTALLPATH_SOFTWARE'] = '/bar/baz'
init_config(args=args)
self.assertEqual(install_path(), os.path.join('/bar', 'baz'))
self.assertEqual(install_path(typ='mod'), '/foo')
del os.environ['EASYBUILD_INSTALLPATH_SOFTWARE']
init_config(args=args)
self.assertEqual(install_path(), os.path.join('/prefix', 'software'))
self.assertEqual(install_path(typ='mod'), '/foo')
def test_generaloption_config_file(self):
"""Test use of new-style configuration file."""
self.purge_environment()
config_file = os.path.join(self.tmpdir, 'testconfig.cfg')
testpath1 = os.path.join(self.tmpdir, 'test1')
testpath2 = os.path.join(self.tmpdir, 'testtwo')
# test with config file passed via command line
cfgtxt = '\n'.join([
'[config]',
'installpath = %s' % testpath2,
])
write_file(config_file, cfgtxt)
installpath_software = tempfile.mkdtemp(prefix='installpath-software')
args = [
'--configfiles', config_file,
'--debug',
'--buildpath', testpath1,
'--installpath-software', installpath_software,
]
options = init_config(args=args)
self.assertEqual(build_path(), testpath1) # via command line
self.assertEqual(source_paths(), [os.path.join(os.getenv('HOME'), '.local', 'easybuild', 'sources')]) # default
self.assertEqual(install_path(), installpath_software) # via cmdline arg
self.assertEqual(install_path('mod'), os.path.join(testpath2, 'modules')) # via config file
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
copy_dir(test_ecs_dir, os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
orig_sys_path = sys.path[:]
sys.path.insert(0, tmpdir) # prepend to give it preference over possible other installed easyconfigs pkgs
# test with config file passed via environment variable
# also test for existence of HOME and USER by adding paths to robot-paths
installpath_modules = tempfile.mkdtemp(prefix='installpath-modules')
cfgtxt = '\n'.join([
'[config]',
'buildpath = %s' % testpath1,
'sourcepath = %(DEFAULT_REPOSITORYPATH)s',
'repositorypath = %(DEFAULT_REPOSITORYPATH)s,somesubdir',
'robot-paths=/tmp/foo:%(sourcepath)s:%(HOME)s:/tmp/%(USER)s:%(DEFAULT_ROBOT_PATHS)s',
'installpath-modules=%s' % installpath_modules,
])
write_file(config_file, cfgtxt)
os.environ['EASYBUILD_CONFIGFILES'] = config_file
args = [
'--debug',
'--sourcepath', testpath2,
]
options = init_config(args=args)
topdir = os.path.join(os.getenv('HOME'), '.local', 'easybuild')
self.assertEqual(install_path(), os.path.join(topdir, 'software')) # default
self.assertEqual(install_path('mod'), installpath_modules), # via config file
self.assertEqual(source_paths(), [testpath2]) # via command line
self.assertEqual(build_path(), testpath1) # via config file
self.assertEqual(get_repositorypath(), [os.path.join(topdir, 'ebfiles_repo'), 'somesubdir']) # via config file
# hardcoded first entry
self.assertEqual(options.robot_paths[0], '/tmp/foo')
# resolved value for %(sourcepath)s template
self.assertEqual(options.robot_paths[1], os.path.join(os.getenv('HOME'), '.local', 'easybuild', 'ebfiles_repo'))
# resolved value for HOME constant
self.assertEqual(options.robot_paths[2], os.getenv('HOME'))
# resolved value that uses USER constant
self.assertEqual(options.robot_paths[3], os.path.join('/tmp', os.getenv('USER')))
# first path in DEFAULT_ROBOT_PATHS
self.assertEqual(options.robot_paths[4], os.path.join(tmpdir, 'easybuild', 'easyconfigs'))
testpath3 = os.path.join(self.tmpdir, 'testTHREE')
os.environ['EASYBUILD_SOURCEPATH'] = testpath2
args = [
'--debug',
'--installpath', testpath3,
]
options = init_config(args=args)
self.assertEqual(source_paths(), [testpath2]) # via environment variable $EASYBUILD_SOURCEPATHS
self.assertEqual(install_path(), os.path.join(testpath3, 'software')) # via command line
self.assertEqual(install_path('mod'), installpath_modules), # via config file
self.assertEqual(build_path(), testpath1) # via config file
del os.environ['EASYBUILD_CONFIGFILES']
sys.path[:] = orig_sys_path
def test_configuration_variables(self):
"""Test usage of ConfigurationVariables."""
# delete instance of ConfigurationVariables
ConfigurationVariables.__class__._instances.clear()
# make sure ConfigurationVariables is a singleton class (only one available instance)
cv1 = ConfigurationVariables()
cv2 = ConfigurationVariables()
cv3 = ConfigurationVariables({'foo': 'bar'}) # note: argument is ignored, an instance is already available
self.assertTrue(cv1 is cv2)
self.assertTrue(cv1 is cv3)
def test_build_options(self):
"""Test usage of BuildOptions."""
# delete instance of BuildOptions
BuildOptions.__class__._instances.clear()
# make sure BuildOptions is a singleton class
bo1 = BuildOptions()
bo2 = BuildOptions()
bo3 = BuildOptions({'foo': 'bar'}) # note: argument is ignored, an instance is already available
self.assertTrue(bo1 is bo2)
self.assertTrue(bo1 is bo3)
# test basic functionality
BuildOptions.__class__._instances.clear()
bo = BuildOptions({
'debug': False,
'force': True
})
self.assertTrue(not bo['debug'])
self.assertTrue(bo['force'])
# updating is impossible (methods are not even available)
self.assertErrorRegex(Exception, '.*(item assignment|no attribute).*', lambda x: bo.update(x), {'debug': True})
self.assertErrorRegex(AttributeError, '.*no attribute.*', lambda x: bo.__setitem__(*x), ('debug', True))
# only valid keys can be set
BuildOptions.__class__._instances.clear()
msg = r"Encountered unknown keys .* \(known keys: .*"
self.assertErrorRegex(KeyError, msg, BuildOptions, {'thisisclearlynotavalidbuildoption': 'FAIL'})
# test init_build_options and build_option functions
self.assertErrorRegex(KeyError, msg, init_build_options, {'thisisclearlynotavalidbuildoption': 'FAIL'})
bo = init_build_options({
'robot_path': '/some/robot/path',
'stop': 'configure',
})
# specific build options should be set
self.assertEqual(bo['robot_path'], '/some/robot/path')
self.assertEqual(bo['stop'], 'configure')
# all possible build options should be set (defaults are used where needed)
self.assertEqual(sorted(bo.keys()), sorted(BuildOptions.KNOWN_KEYS))
# there should be only one BuildOptions instance
bo2 = BuildOptions()
self.assertTrue(bo is bo2)
def test_XDG_CONFIG_env_vars(self):
"""Test effect of XDG_CONFIG* environment variables on default configuration."""
self.purge_environment()
xdg_config_home = os.environ.get('XDG_CONFIG_HOME')
xdg_config_dirs = os.environ.get('XDG_CONFIG_DIRS')
cfg_template = '\n'.join([
'[config]',
'prefix=%s',
])
homedir = os.path.join(self.test_prefix, 'homedir', '.config')
mkdir(os.path.join(homedir, 'easybuild'), parents=True)
write_file(os.path.join(homedir, 'easybuild', 'config.cfg'), cfg_template % '/home')
dir1 = os.path.join(self.test_prefix, 'dir1')
mkdir(os.path.join(dir1, 'easybuild.d'), parents=True)
write_file(os.path.join(dir1, 'easybuild.d', 'foo.cfg'), cfg_template % '/foo')
write_file(os.path.join(dir1, 'easybuild.d', 'bar.cfg'), cfg_template % '/bar')
dir2 = os.path.join(self.test_prefix, 'dir2') # empty on purpose
mkdir(os.path.join(dir2, 'easybuild.d'), parents=True)
dir3 = os.path.join(self.test_prefix, 'dir3')
mkdir(os.path.join(dir3, 'easybuild.d'), parents=True)
write_file(os.path.join(dir3, 'easybuild.d', 'foobarbaz.cfg'), cfg_template % '/foobarbaz')
# set $XDG_CONFIG_DIRS to non-existing dir to isolate ourselves from possible system-wide config files
os.environ['XDG_CONFIG_DIRS'] = '/there/should/be/no/such/directory/we/hope'
# only $XDG_CONFIG_HOME set (to existing path)
os.environ['XDG_CONFIG_HOME'] = homedir
cfg_files = [os.path.join(homedir, 'easybuild', 'config.cfg')]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.configfiles, cfg_files)
self.assertEqual(eb_go.options.prefix, '/home')
# $XDG_CONFIG_HOME set, one directory listed in $XDG_CONFIG_DIRS
os.environ['XDG_CONFIG_DIRS'] = dir1
cfg_files = [
os.path.join(dir1, 'easybuild.d', 'bar.cfg'),
os.path.join(dir1, 'easybuild.d', 'foo.cfg'),
os.path.join(homedir, 'easybuild', 'config.cfg'), # $XDG_CONFIG_HOME goes last
]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.configfiles, cfg_files)
self.assertEqual(eb_go.options.prefix, '/home') # last cfgfile wins
# $XDG_CONFIG_HOME not set, multiple directories listed in $XDG_CONFIG_DIRS
del os.environ['XDG_CONFIG_HOME'] # unset, so should become default
os.environ['XDG_CONFIG_DIRS'] = os.pathsep.join([dir1, dir2, dir3])
cfg_files = [
os.path.join(dir1, 'easybuild.d', 'bar.cfg'),
os.path.join(dir1, 'easybuild.d', 'foo.cfg'),
os.path.join(dir3, 'easybuild.d', 'foobarbaz.cfg'),
]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
# note: there may be a config file in $HOME too, so don't use a strict comparison
self.assertEqual(cfg_files, eb_go.options.configfiles[:3])
# $XDG_CONFIG_HOME set to non-existing directory, multiple directories listed in $XDG_CONFIG_DIRS
os.environ['XDG_CONFIG_HOME'] = os.path.join(self.test_prefix, 'nosuchdir')
cfg_files = [
os.path.join(dir1, 'easybuild.d', 'bar.cfg'),
os.path.join(dir1, 'easybuild.d', 'foo.cfg'),
os.path.join(dir3, 'easybuild.d', 'foobarbaz.cfg'),
]
reload(eboptions)
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.configfiles, cfg_files)
self.assertEqual(eb_go.options.prefix, '/foobarbaz') # last cfgfile wins
# restore $XDG_CONFIG env vars to original state
if xdg_config_home is None:
del os.environ['XDG_CONFIG_HOME']
else:
os.environ['XDG_CONFIG_HOME'] = xdg_config_home
if xdg_config_dirs is None:
del os.environ['XDG_CONFIG_DIRS']
else:
os.environ['XDG_CONFIG_DIRS'] = xdg_config_dirs
reload(eboptions)
def test_flex_robot_paths(self):
"""Test prepend/appending to default robot search path via --robot-paths."""
# unset $EASYBUILD_ROBOT_PATHS that was defined in setUp
del os.environ['EASYBUILD_ROBOT_PATHS']
# copy test easyconfigs to easybuild/easyconfigs subdirectory of temp directory
# to check whether easyconfigs install path is auto-included in robot path
tmpdir = tempfile.mkdtemp(prefix='easybuild-easyconfigs-pkg-install-path')
mkdir(os.path.join(tmpdir, 'easybuild'), parents=True)
test_ecs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs')
tmp_ecs_dir = os.path.join(tmpdir, 'easybuild', 'easyconfigs')
copy_dir(test_ecs_path, tmp_ecs_dir)
# prepend path to test easyconfigs into Python search path, so it gets picked up as --robot-paths default
orig_sys_path = sys.path[:]
sys.path = [tmpdir] + [p for p in sys.path if not os.path.exists(os.path.join(p, 'easybuild', 'easyconfigs'))]
# default: only pick up installed easyconfigs via sys.path
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.robot_paths, [tmp_ecs_dir])
# prepend to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=/foo:'])
self.assertEqual(eb_go.options.robot_paths, ['/foo', tmp_ecs_dir])
eb_go = eboptions.parse_options(args=['--robot-paths=/foo:/bar/baz/:'])
self.assertEqual(eb_go.options.robot_paths, ['/foo', '/bar/baz/', tmp_ecs_dir])
# append to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=:/bar/baz'])
self.assertEqual(eb_go.options.robot_paths, [tmp_ecs_dir, '/bar/baz'])
# append to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=:/bar/baz:/foo'])
self.assertEqual(eb_go.options.robot_paths, [tmp_ecs_dir, '/bar/baz', '/foo'])
# prepend and append to default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar::/baz'])
self.assertEqual(eb_go.options.robot_paths, ['/foo/bar', tmp_ecs_dir, '/baz'])
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar::/baz:/trala'])
self.assertEqual(eb_go.options.robot_paths, ['/foo/bar', tmp_ecs_dir, '/baz', '/trala'])
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar:/trala::/baz'])
self.assertEqual(eb_go.options.robot_paths, ['/foo/bar', '/trala', tmp_ecs_dir, '/baz'])
# also via $EASYBUILD_ROBOT_PATHS
os.environ['EASYBUILD_ROBOT_PATHS'] = '/foo::/bar/baz'
eb_go = eboptions.parse_options(args=[])
self.assertEqual(eb_go.options.robot_paths, ['/foo', tmp_ecs_dir, '/bar/baz'])
# --robot-paths overrides $EASYBUILD_ROBOT_PATHS
os.environ['EASYBUILD_ROBOT_PATHS'] = '/foobar::/barbar/baz/baz'
eb_go = eboptions.parse_options(args=['--robot-paths=/one::/last'])
self.assertEqual(eb_go.options.robot_paths, ['/one', tmp_ecs_dir, '/last'])
del os.environ['EASYBUILD_ROBOT_PATHS']
# also works with a cfgfile in the mix
config_file = os.path.join(self.tmpdir, 'testconfig.cfg')
cfgtxt = '\n'.join([
'[config]',
'robot-paths=/cfgfirst::/cfglast',
])
write_file(config_file, cfgtxt)
eb_go = eboptions.parse_options(args=['--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/cfgfirst', tmp_ecs_dir, '/cfglast'])
# cfgfile entry is lost when env var and/or cmdline options are used
os.environ['EASYBUILD_ROBOT_PATHS'] = '/envfirst::/envend'
eb_go = eboptions.parse_options(args=['--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/envfirst', tmp_ecs_dir, '/envend'])
del os.environ['EASYBUILD_ROBOT_PATHS']
eb_go = eboptions.parse_options(args=['--robot-paths=/veryfirst:', '--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/veryfirst', tmp_ecs_dir])
os.environ['EASYBUILD_ROBOT_PATHS'] = ':/envend'
eb_go = eboptions.parse_options(args=['--robot-paths=/veryfirst:', '--configfiles=%s' % config_file])
self.assertEqual(eb_go.options.robot_paths, ['/veryfirst', tmp_ecs_dir])
del os.environ['EASYBUILD_ROBOT_PATHS']
# override default robot path
eb_go = eboptions.parse_options(args=['--robot-paths=/foo:/bar/baz'])
self.assertEqual(eb_go.options.robot_paths, ['/foo', '/bar/baz'])
# paths specified via --robot still get preference
first = os.path.join(self.test_prefix, 'first')
mkdir(first)
eb_go = eboptions.parse_options(args=['--robot-paths=/foo/bar::/baz', '--robot=%s' % first])
self.assertEqual(eb_go.options.robot_paths, [first, '/foo/bar', tmp_ecs_dir, '/baz'])
sys.path[:] = orig_sys_path
def test_strict(self):
"""Test use of --strict."""
# check default
self.assertEqual(build_option('strict'), run.WARN)
for strict_str, strict_val in [('error', run.ERROR), ('ignore', run.IGNORE), ('warn', run.WARN)]:
options = init_config(args=['--strict=%s' % strict_str])
init_config(build_options={'strict': options.strict})
self.assertEqual(build_option('strict'), strict_val)
def test_get_log_filename(self):
"""Test for get_log_filename()."""
tmpdir = tempfile.gettempdir()
res = get_log_filename('foo', '1.2.3')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-[0-9]{8}\.[0-9]{6}\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
res = get_log_filename('foo', '1.2.3', date='19700101')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.[0-9]{6}\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
res = get_log_filename('foo', '1.2.3', timestamp='094651')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-[0-9]{8}\.094651\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
res = get_log_filename('foo', '1.2.3', date='19700101', timestamp='094651')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.094651\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
# if log file already exists, numbers are added to the filename to obtain a new file path
write_file(res, '')
res = get_log_filename('foo', '1.2.3', date='19700101', timestamp='094651')
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.094651\.log\.1$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
# adding salt ensures a unique filename (pretty much)
prev_log_filenames = []
for i in range(10):
res = get_log_filename('foo', '1.2.3', date='19700101', timestamp='094651', add_salt=True)
regex = re.compile(os.path.join(tmpdir, r'easybuild-foo-1\.2\.3-19700101\.094651\.[a-zA-Z]{5}\.log$'))
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
self.assertTrue(res not in prev_log_filenames)
prev_log_filenames.append(res)
def test_log_file_format(self):
"""Test for log_file_format()."""
# first test defaults -> no templating when no values are provided
self.assertEqual(log_file_format(), 'easybuild-%(name)s-%(version)s-%(date)s.%(time)s.log')
self.assertEqual(log_file_format(return_directory=True), 'easybuild')
# test whether provided values are used to complete template
ec = {'name': 'foo', 'version': '1.2.3'}
res = log_file_format(ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'easybuild-foo-1.2.3-20190322.094356.log')
res = log_file_format(return_directory=True, ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'easybuild')
# partial templating is done when only some values are provided...
self.assertEqual(log_file_format(ec=ec), 'easybuild-foo-1.2.3-%(date)s.%(time)s.log')
res = log_file_format(date='20190322', timestamp='094356')
self.assertEqual(res, 'easybuild-%(name)s-%(version)s-20190322.094356.log')
# also try with a custom setting
init_config(args=['--logfile-format=eb-%(name)s-%(date)s,log-%(version)s-%(date)s-%(time)s.out'])
self.assertEqual(log_file_format(), 'log-%(version)s-%(date)s-%(time)s.out')
self.assertEqual(log_file_format(return_directory=True), 'eb-%(name)s-%(date)s')
res = log_file_format(ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'log-1.2.3-20190322-094356.out')
res = log_file_format(return_directory=True, ec=ec, date='20190322', timestamp='094356')
self.assertEqual(res, 'eb-foo-20190322')
# test handling of incorrect setting for --logfile-format
init_config(args=['--logfile-format=easybuild,log.txt,thisiswrong'])
error_pattern = "Incorrect log file format specification, should be 2-tuple"
self.assertErrorRegex(EasyBuildError, error_pattern, log_file_format)
def test_log_path(self):
"""Test for log_path()."""
# default
self.assertEqual(log_path(), 'easybuild')
# providing template values doesn't affect the default
ec = {'name': 'foo', 'version': '1.2.3'}
res = log_path(ec=ec)
self.assertEqual(res, 'easybuild')
# reconfigure with value for log directory that includes templates
init_config(args=['--logfile-format=easybuild-%(name)s-%(version)s-%(date)s-%(time)s,log.txt'])
regex = re.compile(r'^easybuild-foo-1\.2\.3-[0-9-]{8}-[0-9]{6}$')
res = log_path(ec=ec)
self.assertTrue(regex.match(res), "Pattern '%s' matches '%s'" % (regex.pattern, res))
self.assertEqual(log_file_format(), 'log.txt')
def test_get_build_log_path(self):
"""Test for build_log_path()"""
init_config()
self.assertEqual(get_build_log_path(), tempfile.gettempdir())
build_log_path = os.path.join(self.test_prefix, 'chicken')
init_config(args=['--tmp-logdir=%s' % build_log_path])
self.assertEqual(get_build_log_path(), build_log_path)
def suite():
return TestLoaderFiltered().loadTestsFromTestCase(EasyBuildConfigTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
| gpl-2.0 | -2,618,774,525,449,291,300 | 46.450147 | 120 | 0.636754 | false |
deter-project/magi | magi/messaging/transportTCP.py | 1 | 2821 |
import socket
import logging
import time
from asyncore import dispatcher
from transport import Transport
import transportStream
from magimessage import DefaultCodec
log = logging.getLogger(__name__)
class TCPServer(Transport):
""" Simple TCP Server that returns new TCP clients as 'messages' """
def __init__(self, address = None, port = None):
Transport.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.set_reuse_addr()
self.bind((address, port))
self.listen(5)
def handle_accept(self):
pair = self.accept()
if pair is None:
return
sock, addr = pair
log.info('Incoming connection from %s', repr(addr))
newTrans = TCPTransport(sock)
newTrans.saveHost = addr[0]
newTrans.savePort = addr[1]
self.inmessages.append(newTrans)
def serverOnly(self):
return True
def __repr__(self):
return "TCPServer %s:%d" % (self.addr[0], self.addr[1])
__str__ = __repr__
class TCPTransport(transportStream.StreamTransport):
"""
This class implements a TCP connection that streams MAGI messages back and forth. It
uses the StreamTransport for most work, extending it just for the connecting and reconnecting
portion.
"""
def __init__(self, sock = None, codec=DefaultCodec, address = None, port = None):
"""
Create a new TCP Transport. If sock is provided, it is used, otherwise starts with
an unconnected socket.
"""
transportStream.StreamTransport.__init__(self, sock=sock, codec=codec)
self.closed = False
self.saveHost = ""
self.savePort = -1
if address is not None and port is not None:
self.connect(address, port)
def connect(self, host, port):
"""
Attempt to connect this socket.
"""
self.saveHost = host
self.savePort = port
self.closed = False
log.info("connect %s:%d", self.saveHost, self.savePort)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
log.info("If connection fails, it will retry shortly.")
dispatcher.connect(self, (self.saveHost, self.savePort))
def reconnect(self):
"""
Attempt a reconnect of a socket that was closed or never fully connected
"""
self.connect(self.saveHost, self.savePort)
def handle_write(self):
"""
Override stream version so we can add hosttime to outgoing packets
"""
if self.txMessage.isDone():
try:
msg = self.outmessages.pop(0)
msg.hosttime = int(time.time())
self.txMessage = transportStream.TXTracker(codec=self.codec, msg=msg)
except IndexError:
return
#keep sending till you can
while not self.txMessage.isDone():
bytesWritten = self.send(self.txMessage.getData())
self.txMessage.sent(bytesWritten)
#if no more can be written, break out
if bytesWritten == 0:
break
def __repr__(self):
return "TCPTransport %s:%d" % (self.saveHost, self.savePort)
__str__ = __repr__
| gpl-2.0 | -4,412,502,271,619,851,000 | 25.613208 | 95 | 0.698688 | false |
hastexo/edx-platform | pavelib/paver_tests/test_assets.py | 1 | 7878 | """Unit tests for the Paver asset tasks."""
import os
from unittest import TestCase
import ddt
from mock import patch
from paver.easy import call_task, path
from watchdog.observers import Observer
from pavelib.assets import COLLECTSTATIC_LOG_DIR_ARG, collect_assets
from ..utils.envs import Env
from .utils import PaverTestCase
ROOT_PATH = path(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
TEST_THEME_DIR = ROOT_PATH / "common/test/test-theme" # pylint: disable=invalid-name
class TestPaverWatchAssetTasks(TestCase):
"""
Test the Paver watch asset tasks.
"""
def setUp(self):
self.expected_sass_directories = [
path('common/static/sass'),
path('common/static'),
path('node_modules/@edx'),
path('node_modules'),
path('node_modules/edx-pattern-library/node_modules'),
path('lms/static/sass/partials'),
path('lms/static/sass'),
path('lms/static/certificates/sass'),
path('cms/static/sass'),
path('cms/static/sass/partials'),
]
super(TestPaverWatchAssetTasks, self).setUp()
def tearDown(self):
self.expected_sass_directories = []
super(TestPaverWatchAssetTasks, self).tearDown()
def test_watch_assets(self):
"""
Test the "compile_sass" task.
"""
with patch('pavelib.assets.SassWatcher.register') as mock_register:
with patch('pavelib.assets.Observer.start'):
with patch('pavelib.assets.execute_webpack_watch') as mock_webpack:
call_task(
'pavelib.assets.watch_assets',
options={"background": True},
)
self.assertEqual(mock_register.call_count, 2)
self.assertEqual(mock_webpack.call_count, 1)
sass_watcher_args = mock_register.call_args_list[0][0]
self.assertIsInstance(sass_watcher_args[0], Observer)
self.assertIsInstance(sass_watcher_args[1], list)
self.assertItemsEqual(sass_watcher_args[1], self.expected_sass_directories)
def test_watch_theme_assets(self):
"""
Test the Paver watch asset tasks with theming enabled.
"""
self.expected_sass_directories.extend([
path(TEST_THEME_DIR) / 'lms/static/sass',
path(TEST_THEME_DIR) / 'lms/static/sass/partials',
path(TEST_THEME_DIR) / 'cms/static/sass',
path(TEST_THEME_DIR) / 'cms/static/sass/partials',
])
with patch('pavelib.assets.SassWatcher.register') as mock_register:
with patch('pavelib.assets.Observer.start'):
with patch('pavelib.assets.execute_webpack_watch') as mock_webpack:
call_task(
'pavelib.assets.watch_assets',
options={
"background": True,
"theme_dirs": [TEST_THEME_DIR.dirname()],
"themes": [TEST_THEME_DIR.basename()]
},
)
self.assertEqual(mock_register.call_count, 2)
self.assertEqual(mock_webpack.call_count, 1)
sass_watcher_args = mock_register.call_args_list[0][0]
self.assertIsInstance(sass_watcher_args[0], Observer)
self.assertIsInstance(sass_watcher_args[1], list)
self.assertItemsEqual(sass_watcher_args[1], self.expected_sass_directories)
@ddt.ddt
class TestCollectAssets(PaverTestCase):
"""
Test the collectstatic process call.
ddt data is organized thusly:
* debug: whether or not collect_assets is called with the debug flag
* specified_log_location: used when collect_assets is called with a specific
log location for collectstatic output
* expected_log_location: the expected string to be used for piping collectstatic logs
"""
@ddt.data(
[{
"collect_log_args": {}, # Test for default behavior
"expected_log_location": "> /dev/null"
}],
[{
"collect_log_args": {COLLECTSTATIC_LOG_DIR_ARG: "/foo/bar"},
"expected_log_location": "> /foo/bar/lms-collectstatic.log"
}], # can use specified log location
[{
"systems": ["lms", "cms"],
"collect_log_args": {},
"expected_log_location": "> /dev/null"
}], # multiple systems can be called
)
@ddt.unpack
def test_collect_assets(self, options):
"""
Ensure commands sent to the environment for collect_assets are as expected
"""
specified_log_loc = options.get("collect_log_args", {})
specified_log_dict = specified_log_loc
log_loc = options.get("expected_log_location", "> /dev/null")
systems = options.get("systems", ["lms"])
if specified_log_loc is None:
collect_assets(
systems,
Env.DEVSTACK_SETTINGS
)
else:
collect_assets(
systems,
Env.DEVSTACK_SETTINGS,
**specified_log_dict
)
self._assert_correct_messages(log_location=log_loc, systems=systems)
def test_collect_assets_debug(self):
"""
When the method is called specifically with None for the collectstatic log dir, then
it should run in debug mode and pipe to console.
"""
expected_log_loc = ""
systems = ["lms"]
kwargs = {COLLECTSTATIC_LOG_DIR_ARG: None}
collect_assets(systems, Env.DEVSTACK_SETTINGS, **kwargs)
self._assert_correct_messages(log_location=expected_log_loc, systems=systems)
def _assert_correct_messages(self, log_location, systems):
"""
Asserts that the expected commands were run.
We just extract the pieces we care about here instead of specifying an
exact command, so that small arg changes don't break this test.
"""
for i, sys in enumerate(systems):
msg = self.task_messages[i]
self.assertTrue(msg.startswith('python manage.py {}'.format(sys)))
self.assertIn(' collectstatic '.format(Env.DEVSTACK_SETTINGS), msg)
self.assertIn('--settings={}'.format(Env.DEVSTACK_SETTINGS), msg)
self.assertTrue(msg.endswith(' {}'.format(log_location)))
@ddt.ddt
class TestUpdateAssetsTask(PaverTestCase):
"""
These are nearly end-to-end tests, because they observe output from the commandline request,
but do not actually execute the commandline on the terminal/process
"""
@ddt.data(
[{"expected_substring": "> /dev/null"}], # go to /dev/null by default
[{"cmd_args": ["--debug"], "expected_substring": "collectstatic"}] # TODO: make this regex
)
@ddt.unpack
def test_update_assets_task_collectstatic_log_arg(self, options):
"""
Scoped test that only looks at what is passed to the collecstatic options
"""
cmd_args = options.get("cmd_args", [""])
expected_substring = options.get("expected_substring", None)
call_task('pavelib.assets.update_assets', args=cmd_args)
self.assertTrue(
self._is_substring_in_list(self.task_messages, expected_substring),
msg="{substring} not found in messages".format(substring=expected_substring)
)
def _is_substring_in_list(self, messages_list, expected_substring):
"""
Return true a given string is somewhere in a list of strings
"""
for message in messages_list:
if expected_substring in message:
return True
return False
| agpl-3.0 | 6,135,130,765,977,636,000 | 38.19403 | 99 | 0.588982 | false |
drtuxwang/system-config | bin/battery.py | 1 | 3916 | #!/usr/bin/env python3
"""
Monitor laptop battery
"""
import argparse
import signal
import sys
from typing import List
import power_mod
class Options:
"""
Options class
"""
def __init__(self) -> None:
self._args: argparse.Namespace = None
self.parse(sys.argv)
def get_summary_flag(self) -> bool:
"""
Return summary flag.
"""
return self._args.summary_flag
def _parse_args(self, args: List[str]) -> None:
parser = argparse.ArgumentParser(description='Monitor laptop battery.')
parser.add_argument(
'-s',
action='store_true',
dest='summary_flag',
help='Show summary'
)
self._args = parser.parse_args(args)
def parse(self, args: List[str]) -> None:
"""
Parse arguments
"""
self._parse_args(args[1:])
class Main:
"""
Main class
"""
def __init__(self) -> None:
try:
self.config()
sys.exit(self.run())
except (EOFError, KeyboardInterrupt):
sys.exit(114)
except SystemExit as exception:
sys.exit(exception)
@staticmethod
def config() -> None:
"""
Configure program
"""
if hasattr(signal, 'SIGPIPE'):
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
@staticmethod
def _show_battery(battery: power_mod.Battery) -> None:
model = (
battery.get_oem() + ' ' + battery.get_name() + ' ' +
battery.get_type() + ' ' + str(battery.get_capacity_max()) +
'mAh/' + str(battery.get_voltage()) + 'mV'
)
if battery.get_charge() == '-':
state = '-'
if battery.get_rate() > 0:
state += str(battery.get_rate()) + 'mA'
if battery.get_voltage() > 0:
power = '{0:4.2f}'.format(float(
battery.get_rate()*battery.get_voltage()) / 1000000)
state += ', ' + str(power) + 'W'
hours = '{0:3.1f}'.format(float(
battery.get_capacity()) / battery.get_rate())
state += ', ' + str(hours) + 'h'
elif battery.get_charge() == '+':
state = '+'
if battery.get_rate() > 0:
state += str(battery.get_rate()) + 'mA'
if battery.get_voltage() > 0:
power = '{0:4.2f}'.format(float(
battery.get_rate()*battery.get_voltage()) / 1000000)
state += ', ' + str(power) + 'W'
else:
state = 'Unused'
print(
model + " = ", battery.get_capacity(),
"mAh [" + state + "]",
sep=""
)
@staticmethod
def _show_summary(batteries: List[power_mod.Battery]) -> None:
capacity = 0
rate = 0
for battery in batteries:
if battery.is_exist():
capacity += battery.get_capacity()
if battery.get_charge() == '-':
rate -= battery.get_rate()
elif battery.get_charge() == '+':
rate += battery.get_rate()
if capacity:
if rate:
print("{0:d}mAh [{1:+d}mAh]".format(capacity, rate))
else:
print("{0:d}mAh [Unused]".format(capacity))
def run(self) -> int:
"""
Start program
"""
options = Options()
batteries = power_mod.Battery.factory()
if options.get_summary_flag():
self._show_summary(batteries)
else:
for battery in batteries:
if battery.is_exist():
self._show_battery(battery)
return 0
if __name__ == '__main__':
if '--pydoc' in sys.argv:
help(__name__)
else:
Main()
| gpl-2.0 | -5,714,600,251,821,818,000 | 26.384615 | 79 | 0.470123 | false |
karllessard/tensorflow | tensorflow/python/keras/layers/preprocessing/text_vectorization.py | 1 | 29394 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras text vectorization preprocessing layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.layers.preprocessing import category_encoding
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_string_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_string_ops
from tensorflow.python.util.tf_export import keras_export
LOWER_AND_STRIP_PUNCTUATION = "lower_and_strip_punctuation"
SPLIT_ON_WHITESPACE = "whitespace"
TFIDF = category_encoding.TFIDF
INT = category_encoding.INT
BINARY = category_encoding.BINARY
COUNT = category_encoding.COUNT
# This is an explicit regex of all the tokens that will be stripped if
# LOWER_AND_STRIP_PUNCTUATION is set. If an application requires other
# stripping, a Callable should be passed into the 'standardize' arg.
DEFAULT_STRIP_REGEX = r'[!"#$%&()\*\+,-\./:;<=>?@\[\\\]^_`{|}~\']'
# The string tokens in the extracted vocabulary
_VOCAB_NAME = "vocab"
# The inverse-document-frequency weights
_IDF_NAME = "idf"
# The IDF data for the OOV token
_OOV_IDF_NAME = "oov_idf"
# The string tokens in the full vocabulary
_ACCUMULATOR_VOCAB_NAME = "vocab"
# The total counts of each token in the vocabulary
_ACCUMULATOR_COUNTS_NAME = "counts"
# The number of documents / examples that each token appears in.
_ACCUMULATOR_DOCUMENT_COUNTS = "document_counts"
# The total number of documents / examples in the dataset.
_ACCUMULATOR_NUM_DOCUMENTS = "num_documents"
@keras_export(
"keras.layers.experimental.preprocessing.TextVectorization", v1=[])
class TextVectorization(base_preprocessing_layer.CombinerPreprocessingLayer):
"""Text vectorization layer.
This layer has basic options for managing text in a Keras model. It
transforms a batch of strings (one sample = one string) into either a list of
token indices (one sample = 1D tensor of integer token indices) or a dense
representation (one sample = 1D tensor of float values representing data about
the sample's tokens).
If desired, the user can call this layer's adapt() method on a dataset.
When this layer is adapted, it will analyze the dataset, determine the
frequency of individual string values, and create a 'vocabulary' from them.
This vocabulary can have unlimited size or be capped, depending on the
configuration options for this layer; if there are more unique values in the
input than the maximum vocabulary size, the most frequent terms will be used
to create the vocabulary.
The processing of each sample contains the following steps:
1. standardize each sample (usually lowercasing + punctuation stripping)
2. split each sample into substrings (usually words)
3. recombine substrings into tokens (usually ngrams)
4. index tokens (associate a unique int value with each token)
5. transform each sample using this index, either into a vector of ints or
a dense float vector.
Some notes on passing Callables to customize splitting and normalization for
this layer:
1. Any callable can be passed to this Layer, but if you want to serialize
this object you should only pass functions that are registered Keras
serializables (see `tf.keras.utils.register_keras_serializable` for more
details).
2. When using a custom callable for `standardize`, the data received
by the callable will be exactly as passed to this layer. The callable
should return a tensor of the same shape as the input.
3. When using a custom callable for `split`, the data received by the
callable will have the 1st dimension squeezed out - instead of
`[["string to split"], ["another string to split"]]`, the Callable will
see `["string to split", "another string to split"]`. The callable should
return a Tensor with the first dimension containing the split tokens -
in this example, we should see something like `[["string", "to", "split],
["another", "string", "to", "split"]]`. This makes the callable site
natively compatible with `tf.strings.split()`.
Attributes:
max_tokens: The maximum size of the vocabulary for this layer. If None,
there is no cap on the size of the vocabulary. Note that this vocabulary
contains 1 OOV token, so the effective number of tokens is `(max_tokens -
1 - (1 if output == "int" else 0))`.
standardize: Optional specification for standardization to apply to the
input text. Values can be None (no standardization),
'lower_and_strip_punctuation' (lowercase and remove punctuation) or a
Callable. Default is 'lower_and_strip_punctuation'.
split: Optional specification for splitting the input text. Values can be
None (no splitting), 'whitespace' (split on ASCII whitespace), or a
Callable. The default is 'whitespace'.
ngrams: Optional specification for ngrams to create from the possibly-split
input text. Values can be None, an integer or tuple of integers; passing
an integer will create ngrams up to that integer, and passing a tuple of
integers will create ngrams for the specified values in the tuple. Passing
None means that no ngrams will be created.
output_mode: Optional specification for the output of the layer. Values can
be "int", "binary", "count" or "tf-idf", configuring the layer as follows:
"int": Outputs integer indices, one integer index per split string
token. When output == "int", 0 is reserved for masked locations;
this reduces the vocab size to max_tokens-2 instead of max_tokens-1
"binary": Outputs a single int array per batch, of either vocab_size or
max_tokens size, containing 1s in all elements where the token mapped
to that index exists at least once in the batch item.
"count": As "binary", but the int array contains a count of the number
of times the token at that index appeared in the batch item.
"tf-idf": As "binary", but the TF-IDF algorithm is applied to find the
value in each token slot.
output_sequence_length: Only valid in INT mode. If set, the output will have
its time dimension padded or truncated to exactly `output_sequence_length`
values, resulting in a tensor of shape [batch_size,
output_sequence_length] regardless of how many tokens resulted from the
splitting step. Defaults to None.
pad_to_max_tokens: Only valid in "binary", "count", and "tf-idf" modes. If
True, the output will have its feature axis padded to `max_tokens` even if
the number of unique tokens in the vocabulary is less than max_tokens,
resulting in a tensor of shape [batch_size, max_tokens] regardless of
vocabulary size. Defaults to True.
vocabulary: An optional list of vocabulary terms, or a path to a text file
containing a vocabulary to load into this layer. The file should contain
one token per line. If the list or file contains the same token multiple
times, an error will be thrown.
Example:
This example instantiates a TextVectorization layer that lowercases text,
splits on whitespace, strips punctuation, and outputs integer vocab indices.
>>> text_dataset = tf.data.Dataset.from_tensor_slices(["foo", "bar", "baz"])
>>> max_features = 5000 # Maximum vocab size.
>>> max_len = 4 # Sequence length to pad the outputs to.
>>> embedding_dims = 2
>>>
>>> # Create the layer.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len)
>>>
>>> # Now that the vocab layer has been created, call `adapt` on the text-only
>>> # dataset to create the vocabulary. You don't have to batch, but for large
>>> # datasets this means we're not keeping spare copies of the dataset.
>>> vectorize_layer.adapt(text_dataset.batch(64))
>>>
>>> # Create the model that uses the vectorize text layer
>>> model = tf.keras.models.Sequential()
>>>
>>> # Start by creating an explicit input layer. It needs to have a shape of
>>> # (1,) (because we need to guarantee that there is exactly one string
>>> # input per batch), and the dtype needs to be 'string'.
>>> model.add(tf.keras.Input(shape=(1,), dtype=tf.string))
>>>
>>> # The first layer in our model is the vectorization layer. After this
>>> # layer, we have a tensor of shape (batch_size, max_len) containing vocab
>>> # indices.
>>> model.add(vectorize_layer)
>>>
>>> # Now, the model can map strings to integers, and you can add an embedding
>>> # layer to map these integers to learned embeddings.
>>> input_data = [["foo qux bar"], ["qux baz"]]
>>> model.predict(input_data)
array([[2, 1, 4, 0],
[1, 3, 0, 0]])
Example:
This example instantiates a TextVectorization layer by passing a list
of vocabulary terms to the layer's __init__ method.
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = get_layer_class()(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT,
vocabulary=vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_array)
>>> vocab_data = ["earth", "wind", "and", "fire"]
>>> max_len = 4 # Sequence length to pad the outputs to.
>>>
>>> # Create the layer, passing the vocab directly. You can also pass the
>>> # vocabulary arg a path to a file containing one vocabulary word per
>>> # line.
>>> vectorize_layer = TextVectorization(
... max_tokens=max_features,
... output_mode='int',
... output_sequence_length=max_len,
... vocabulary=vocab_data)
>>>
>>> # Because we've passed the vocabulary directly, we don't need to adapt
>>> # the layer - the vocabulary is already set. The vocabulary contains the
>>> # padding token ('') and OOV token ('[UNK]') as well as the passed tokens.
>>> vectorize_layer.get_vocabulary()
['', '[UNK]', 'earth', 'wind', 'and', 'fire']
"""
# TODO(momernick): Add an examples section to the docstring.
def __init__(self,
max_tokens=None,
standardize=LOWER_AND_STRIP_PUNCTUATION,
split=SPLIT_ON_WHITESPACE,
ngrams=None,
output_mode=INT,
output_sequence_length=None,
pad_to_max_tokens=True,
vocabulary=None,
**kwargs):
# This layer only applies to string processing, and so should only have
# a dtype of 'string'.
if "dtype" in kwargs and kwargs["dtype"] != dtypes.string:
raise ValueError("TextVectorization may only have a dtype of string.")
elif "dtype" not in kwargs:
kwargs["dtype"] = dtypes.string
# 'standardize' must be one of (None, LOWER_AND_STRIP_PUNCTUATION, callable)
layer_utils.validate_string_arg(
standardize,
allowable_strings=(LOWER_AND_STRIP_PUNCTUATION),
layer_name="TextVectorization",
arg_name="standardize",
allow_none=True,
allow_callables=True)
# 'split' must be one of (None, SPLIT_ON_WHITESPACE, callable)
layer_utils.validate_string_arg(
split,
allowable_strings=(SPLIT_ON_WHITESPACE),
layer_name="TextVectorization",
arg_name="split",
allow_none=True,
allow_callables=True)
# 'output_mode' must be one of (None, INT, COUNT, BINARY, TFIDF)
layer_utils.validate_string_arg(
output_mode,
allowable_strings=(INT, COUNT, BINARY, TFIDF),
layer_name="TextVectorization",
arg_name="output_mode",
allow_none=True)
# 'ngrams' must be one of (None, int, tuple(int))
if not (ngrams is None or
isinstance(ngrams, int) or
isinstance(ngrams, tuple) and
all(isinstance(item, int) for item in ngrams)):
raise ValueError(("`ngrams` must be None, an integer, or a tuple of "
"integers. Got %s") % (ngrams,))
# 'output_sequence_length' must be one of (None, int) and is only
# set if output_mode is INT.
if (output_mode == INT and not (isinstance(output_sequence_length, int) or
(output_sequence_length is None))):
raise ValueError("`output_sequence_length` must be either None or an "
"integer when `output_mode` is 'int'. "
"Got %s" % output_sequence_length)
if output_mode != INT and output_sequence_length is not None:
raise ValueError("`output_sequence_length` must not be set if "
"`output_mode` is not 'int'.")
# If max_tokens is set, the value must be greater than 1 - otherwise we
# are creating a 0-element vocab, which doesn't make sense.
if max_tokens is not None and max_tokens < 1:
raise ValueError("max_tokens must be > 1.")
self._max_tokens = max_tokens
# In INT mode, the zero value is reserved for padding (per Keras standard
# padding approaches). In non-INT modes, there is no padding so we can set
# the OOV value to zero instead of one.
self._oov_value = 1 if output_mode == INT else 0
self._standardize = standardize
self._split = split
self._ngrams_arg = ngrams
if isinstance(ngrams, int):
self._ngrams = tuple(range(1, ngrams + 1))
else:
self._ngrams = ngrams
self._output_mode = output_mode
self._output_sequence_length = output_sequence_length
self._pad_to_max = pad_to_max_tokens
self._vocab_size = 0
self._called = False
super(TextVectorization, self).__init__(
combiner=None,
**kwargs)
base_preprocessing_layer._kpl_gauge.get_cell("V2").set("TextVectorization")
mask_token = "" if output_mode in [None, INT] else None
self._index_lookup_layer = self._get_index_lookup_class()(
max_tokens=max_tokens, mask_token=mask_token, vocabulary=vocabulary)
# If this layer is configured for string or integer output, we do not
# create a vectorization layer (as the output is not vectorized).
if self._output_mode in [None, INT]:
self._vectorize_layer = None
else:
if max_tokens is not None and self._pad_to_max:
max_elements = max_tokens
else:
max_elements = None
self._vectorize_layer = self._get_vectorization_class()(
max_tokens=max_elements, output_mode=self._output_mode)
# These are V1/V2 shim points. There are V1 implementations in the V1 class.
def _get_vectorization_class(self):
return category_encoding.CategoryEncoding
def _get_index_lookup_class(self):
return string_lookup.StringLookup
# End of V1/V2 shim points.
def _assert_same_type(self, expected_type, values, value_name):
if dtypes.as_dtype(expected_type) != dtypes.as_dtype(values.dtype):
raise RuntimeError("Expected %s type %s, got %s" %
(value_name, expected_type, values.dtype))
def _convert_to_ndarray(self, x):
return np.array(x) if isinstance(x, (list, tuple)) else x
def compute_output_shape(self, input_shape):
if self._output_mode != INT:
return tensor_shape.TensorShape([input_shape[0], self._max_tokens])
if self._output_mode == INT and self._split is None:
if len(input_shape) == 1:
input_shape = tuple(input_shape) + (1,)
return tensor_shape.TensorShape(input_shape)
if self._output_mode == INT and self._split is not None:
input_shape = list(input_shape)
if len(input_shape) == 1:
input_shape = input_shape + [self._output_sequence_length]
else:
input_shape[1] = self._output_sequence_length
return tensor_shape.TensorShape(input_shape)
def compute_output_signature(self, input_spec):
output_shape = self.compute_output_shape(input_spec.shape.as_list())
output_dtype = dtypes.int64 if self._output_mode == INT else K.floatx()
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
def adapt(self, data, reset_state=True):
"""Fits the state of the preprocessing layer to the dataset.
Overrides the default adapt method to apply relevant preprocessing to the
inputs before passing to the combiner.
Arguments:
data: The data to train on. It can be passed either as a tf.data Dataset,
as a NumPy array, a string tensor, or as a list of texts.
reset_state: Optional argument specifying whether to clear the state of
the layer at the start of the call to `adapt`. This must be True for
this layer, which does not support repeated calls to `adapt`.
"""
if not reset_state:
raise ValueError("TextVectorization does not support streaming adapts.")
# Build the layer explicitly with the original data shape instead of relying
# on an implicit call to `build` in the base layer's `adapt`, since
# preprocessing changes the input shape.
if isinstance(data, (list, tuple, np.ndarray)):
data = ops.convert_to_tensor_v2_with_dispatch(data)
if isinstance(data, ops.Tensor):
if data.shape.rank == 1:
data = array_ops.expand_dims(data, axis=-1)
self.build(data.shape)
preprocessed_inputs = self._preprocess(data)
elif isinstance(data, dataset_ops.DatasetV2):
# TODO(momernick): Replace this with a more V2-friendly API.
shape = dataset_ops.get_legacy_output_shapes(data)
if not isinstance(shape, tensor_shape.TensorShape):
raise ValueError("The dataset passed to 'adapt' must contain a single "
"tensor value.")
if shape.rank == 0:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, 0))
shape = dataset_ops.get_legacy_output_shapes(data)
if shape.rank == 1:
data = data.map(lambda tensor: array_ops.expand_dims(tensor, -1))
self.build(dataset_ops.get_legacy_output_shapes(data))
preprocessed_inputs = data.map(self._preprocess)
else:
raise ValueError(
"adapt() requires a Dataset or an array as input, got {}".format(
type(data)))
self._index_lookup_layer.adapt(preprocessed_inputs)
if self._vectorize_layer:
if isinstance(data, ops.Tensor):
integer_data = self._index_lookup_layer(preprocessed_inputs)
else:
integer_data = preprocessed_inputs.map(self._index_lookup_layer)
self._vectorize_layer.adapt(integer_data)
def get_vocabulary(self):
return self._index_lookup_layer.get_vocabulary()
def get_config(self):
# This does not include the 'vocabulary' arg, since if the vocab was passed
# at init time it's now stored in variable state - we don't need to
# pull it off disk again.
config = {
"max_tokens": self._max_tokens,
"standardize": self._standardize,
"split": self._split,
"ngrams": self._ngrams_arg,
"output_mode": self._output_mode,
"output_sequence_length": self._output_sequence_length,
"pad_to_max_tokens": self._pad_to_max,
}
base_config = super(TextVectorization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def count_params(self):
# This method counts the number of scalars in the weights of this layer.
# Since this layer doesn't have any /actual/ weights (in that there's
# nothing in this layer that can be trained - we only use the weight
# abstraction for ease of saving!) we return 0.
return 0
def set_vocabulary(self,
vocab,
df_data=None,
oov_df_value=None):
"""Sets vocabulary (and optionally document frequency) data for this layer.
This method sets the vocabulary and DF data for this layer directly, instead
of analyzing a dataset through 'adapt'. It should be used whenever the vocab
(and optionally document frequency) information is already known. If
vocabulary data is already present in the layer, this method will replace
it.
Arguments:
vocab: An array of string tokens.
df_data: An array of document frequency data. Only necessary if the layer
output_mode is TFIDF.
oov_df_value: The document frequency of the OOV token. Only necessary if
output_mode is TFIDF.
Raises:
ValueError: If there are too many inputs, the inputs do not match, or
input data is missing.
RuntimeError: If the vocabulary cannot be set when this function is
called. This happens when "binary", "count", and "tfidf" modes,
if "pad_to_max_tokens" is False and the layer itself has already been
called.
"""
if self._output_mode != TFIDF and df_data is not None:
raise ValueError("df_data should only be set if output_mode is TFIDF. "
"output_mode is %s." % self._output_mode)
if (self._output_mode in [BINARY, COUNT, TFIDF] and self._called and
not self._pad_to_max):
raise RuntimeError(("When using TextVectorization in {mode} mode and "
"pad_to_max_tokens is False, the vocabulary cannot "
"be changed after the layer is "
"called.").format(mode=self._output_mode))
self._index_lookup_layer.set_vocabulary(vocab)
# When doing raw or integer output, we don't have a Vectorize layer to
# manage. In this case, we can return directly.
if self._output_mode in [None, INT]:
return
if not self._pad_to_max or self._max_tokens is None:
num_tokens = self._index_lookup_layer.vocab_size()
self._vectorize_layer.set_num_elements(num_tokens)
if self._output_mode == TFIDF:
if df_data is None:
raise ValueError("df_data must be set if output_mode is TFIDF")
if len(vocab) != len(df_data):
raise ValueError("df_data must be the same length as vocab. "
"len(df_data) is %s, len(vocab) is %s" %
(len(vocab), len(df_data)))
if oov_df_value is None:
raise ValueError("You must pass an oov_df_value when output_mode is "
"TFIDF.")
df_data = self._convert_to_ndarray(df_data)
if not isinstance(oov_df_value, np.ndarray):
oov_df_value = np.array([oov_df_value])
df_data = np.insert(df_data, 0, oov_df_value)
self._vectorize_layer.set_tfidf_data(df_data)
def build(self, input_shape):
# We have to use 'and not ==' here, because input_shape[1] !/== 1 can result
# in None for undefined shape axes. If using 'and !=', this causes the
# expression to evaluate to False instead of True if the shape is undefined;
# the expression needs to evaluate to True in that case.
if self._split is not None:
if input_shape.ndims > 1 and not input_shape[-1] == 1: # pylint: disable=g-comparison-negation
raise RuntimeError(
"When using TextVectorization to tokenize strings, the innermost "
"dimension of the input array must be 1, got shape "
"{}".format(input_shape))
super(TextVectorization, self).build(input_shape)
def _set_state_variables(self, updates):
if not self.built:
raise RuntimeError("_set_state_variables() must be called after build().")
if self._output_mode == TFIDF:
self.set_vocabulary(
updates[_VOCAB_NAME],
updates[_IDF_NAME],
updates[_OOV_IDF_NAME])
else:
self.set_vocabulary(updates[_VOCAB_NAME])
def _preprocess(self, inputs):
if self._standardize == LOWER_AND_STRIP_PUNCTUATION:
if tf_utils.is_ragged(inputs):
lowercase_inputs = ragged_functional_ops.map_flat_values(
gen_string_ops.string_lower, inputs)
# Depending on configuration, we may never touch the non-data tensor
# in the ragged inputs tensor. If that is the case, and this is the
# only layer in the keras model, running it will throw an error.
# To get around this, we wrap the result in an identity.
lowercase_inputs = array_ops.identity(lowercase_inputs)
else:
lowercase_inputs = gen_string_ops.string_lower(inputs)
inputs = string_ops.regex_replace(lowercase_inputs, DEFAULT_STRIP_REGEX,
"")
elif callable(self._standardize):
inputs = self._standardize(inputs)
elif self._standardize is not None:
raise ValueError(("%s is not a supported standardization. "
"TextVectorization supports the following options "
"for `standardize`: None, "
"'lower_and_strip_punctuation', or a "
"Callable.") % self._standardize)
if self._split is not None:
# If we are splitting, we validate that the 1st axis is of dimension 1 and
# so can be squeezed out. We do this here instead of after splitting for
# performance reasons - it's more expensive to squeeze a ragged tensor.
if inputs.shape.ndims > 1:
inputs = array_ops.squeeze(inputs, axis=-1)
if self._split == SPLIT_ON_WHITESPACE:
# This treats multiple whitespaces as one whitespace, and strips leading
# and trailing whitespace.
inputs = ragged_string_ops.string_split_v2(inputs)
elif callable(self._split):
inputs = self._split(inputs)
else:
raise ValueError(
("%s is not a supported splitting."
"TextVectorization supports the following options "
"for `split`: None, 'whitespace', or a Callable.") % self._split)
# Note that 'inputs' here can be either ragged or dense depending on the
# configuration choices for this Layer. The strings.ngrams op, however, does
# support both ragged and dense inputs.
if self._ngrams is not None:
inputs = ragged_string_ops.ngrams(
inputs, ngram_width=self._ngrams, separator=" ")
return inputs
def call(self, inputs):
if isinstance(inputs, (list, tuple, np.ndarray)):
inputs = ops.convert_to_tensor_v2_with_dispatch(inputs)
self._called = True
inputs = self._preprocess(inputs)
# If we're not doing any output processing, return right away.
if self._output_mode is None:
return inputs
indexed_data = self._index_lookup_layer(inputs)
if self._output_mode == INT:
# Once we have the dense tensor, we can return it if we weren't given a
# fixed output sequence length. If we were, though, we have to dynamically
# choose whether to pad or trim it based on each tensor.
# We need to convert to dense if we have a ragged tensor.
if tf_utils.is_ragged(indexed_data):
dense_data = indexed_data.to_tensor(default_value=0)
else:
dense_data = indexed_data
if self._output_sequence_length is None:
return dense_data
else:
sequence_len = K.shape(dense_data)[1]
pad_amt = self._output_sequence_length - sequence_len
pad_fn = lambda: array_ops.pad(dense_data, [[0, 0], [0, pad_amt]])
slice_fn = lambda: dense_data[:, :self._output_sequence_length]
output_tensor = control_flow_ops.cond(
sequence_len < self._output_sequence_length,
true_fn=pad_fn,
false_fn=slice_fn)
output_shape = output_tensor.shape.as_list()
output_shape[-1] = self._output_sequence_length
output_tensor.set_shape(tensor_shape.TensorShape(output_shape))
return output_tensor
# If we're not returning integers here, we rely on the vectorization layer
# to create the output.
return self._vectorize_layer(indexed_data)
| apache-2.0 | 7,755,867,636,308,005,000 | 44.082822 | 101 | 0.666667 | false |
entropyx/callme | callme/proxy.py | 1 | 9608 | # Copyright (c) 2009-2014, Christian Haintz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of callme nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import socket
import time
import uuid
import kombu
from callme import base
from callme import exceptions as exc
from callme import protocol as pr
LOG = logging.getLogger(__name__)
REQUEST_TIMEOUT = 60
class Proxy(base.Base):
"""This Proxy class is used to handle the communication with the rpc
server.
:keyword server_id: default id of the Server (can be declared later
see :func:`use_server`)
:keyword amqp_host: the host of where the AMQP Broker is running
:keyword amqp_user: the username for the AMQP Broker
:keyword amqp_password: the password for the AMQP Broker
:keyword amqp_vhost: the virtual host of the AMQP Broker
:keyword amqp_port: the port of the AMQP Broker
:keyword ssl: use SSL connection for the AMQP Broker
:keyword timeout: default timeout for calls in seconds
:keyword durable: make all exchanges and queues durable
:keyword auto_delete: delete server queues after all connections are closed
not applicable for client queues
"""
def __init__(self,
server_exchange_name,
server_queue_name=None,
server_routing_key=None,
amqp_host='localhost',
amqp_user='guest',
amqp_password='guest',
amqp_vhost='/',
amqp_port=5672,
ssl=False,
timeout=REQUEST_TIMEOUT,
durable=False,
auto_delete=True,
):
super(Proxy, self).__init__(amqp_host, amqp_user, amqp_password,
amqp_vhost, amqp_port, ssl)
self._uuid = str(uuid.uuid4())
self._server_exchange_name = server_exchange_name
self._server_queue_name = server_queue_name
self._server_routing_key = server_routing_key
self._timeout = timeout
self._is_received = False
self._corr_id = None
self._response = None
self._exchange_name = 'client_{0}_ex_{1}'.format(self._server_exchange_name, self._uuid)
self._queue_name = 'client_{0}_queue_{1}'.format(self._server_queue_name, self._uuid) if self._server_queue_name else ''
self._durable = durable
self._auto_delete = auto_delete
# create queue
queue = self._make_queue(self._queue_name, None,
durable=self._durable,
auto_delete=True)
# create consumer
consumer = kombu.Consumer(channel=self._conn,
queues=queue,
callbacks=[self._on_response],
accept=['pickle'])
consumer.consume()
def use_server(self, exchange_name=None, queue_name=None, timeout=None):
"""Use the specified server and set an optional timeout for the method
call.
Typical use:
>> my_proxy.use_server('foo_exchange','foo.receive').a_remote_func()
:keyword exchange_name: the exchange_name where the call will be made
:keyword queue_name: the queue_name where the call will be made
:keyword timeout: set or overrides the call timeout in seconds
:rtype: return `self` to cascade further calls
"""
if exchange_name is not None:
self._server_exchange_name= exchange_name
if queue_name is not None:
self._server_queue_name= queue_name
if timeout is not None:
self._timeout = timeout
return self
def _on_response(self, response, message):
"""This method is automatically called when a response is incoming and
decides if it is the message we are waiting for - the message with the
result.
:param response: the body of the amqp message already deserialized
by kombu
:param message: the plain amqp kombu.message with additional
information
"""
LOG.debug("Got response: {0}".format(response))
try:
message.ack()
except Exception:
LOG.exception("Failed to acknowledge AMQP message.")
else:
LOG.debug("AMQP message acknowledged.")
# check response type
if not isinstance(response, pr.RpcResponse):
LOG.warning("Response is not a `RpcResponse` instance.")
return
# process response
try:
if self._corr_id == message.properties['correlation_id']:
self._response = response
self._is_received = True
except KeyError:
LOG.error("Message has no `correlation_id` property.")
def __request(self, func_name, func_args, func_kwargs):
"""The remote-method-call execution function.
:param func_name: name of the method that should be executed
:param func_args: arguments for the remote-method
:param func_kwargs: keyword arguments for the remote-method
:type func_name: string
:type func_args: list of parameters
:rtype: result of the method
"""
self._corr_id = str(uuid.uuid4())
request = pr.RpcRequest(func_name, func_args, func_kwargs)
LOG.debug("Publish request: {0}".format(request))
# publish request
with kombu.producers[self._conn].acquire(block=True) as producer:
type = 'topic'
exchange = self._make_exchange(
self._server_exchange_name,
type=type,
durable=self._durable,
auto_delete=self._auto_delete)
producer.publish(body=request,
serializer='pickle',
exchange=exchange,
reply_to=self._queue_name,
correlation_id=self._corr_id,
routing_key=self._server_routing_key)
# start waiting for the response
self._wait_for_result()
self._is_received = False
# handler response
result = self._response.result
LOG.debug("Result: {!r}".format(result))
if self._response.is_exception:
raise result
return result
def _wait_for_result(self):
"""Waits for the result from the server, checks every second if
a timeout occurred. If a timeout occurred - the `RpcTimeout` exception
will be raised.
"""
start_time = time.time()
while not self._is_received:
try:
self._conn.drain_events(timeout=1)
except socket.timeout:
if self._timeout > 0:
if time.time() - start_time > self._timeout:
raise exc.RpcTimeout("RPC Request timeout")
def __getattr__(self, name):
"""This method is invoked, if a method is being called, which doesn't
exist on Proxy. It is used for RPC, to get the function which should
be called on the Server.
"""
# magic method dispatcher
LOG.debug("Recursion: {0}".format(name))
return _Method(self.__request, name)
# ===========================================================================
class _Method(object):
"""This class is used to realize remote-method-calls.
:param send: name of the function that should be executed on Proxy
:param name: name of the method which should be called on the Server
"""
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self._send = send
self._name = name
def __getattr__(self, name):
return _Method(self._send, "{0}.{1}".format(self._name, name))
def __call__(self, *args, **kw):
return self._send(self._name, args, kw)
# ===========================================================================
| bsd-3-clause | 4,423,005,917,173,876,700 | 38.216327 | 128 | 0.600125 | false |
etamponi/resilient-protocol | resilient/ensemble.py | 1 | 6786 | import hashlib
import numpy
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.tree.tree import DecisionTreeClassifier
from sklearn.utils.fixes import unique
from sklearn import preprocessing
from sklearn.utils.random import check_random_state
from resilient.logger import Logger
from resilient.selection_strategies import SelectBestPercent
from resilient.train_set_generators import RandomCentroidPDFTrainSetGenerator
from resilient.weighting_strategies import CentroidBasedWeightingStrategy
__author__ = 'Emanuele Tamponi <[email protected]>'
MAX_INT = numpy.iinfo(numpy.int32).max
class TrainingStrategy(BaseEstimator):
def __init__(self,
base_estimator=DecisionTreeClassifier(max_features='auto'),
train_set_generator=RandomCentroidPDFTrainSetGenerator(),
random_sample=None):
self.base_estimator = base_estimator
self.train_set_generator = train_set_generator
self.random_sample = random_sample
def train_estimators(self, n, inp, y, weighting_strategy, random_state):
classifiers = []
weight_generator = self.train_set_generator.get_sample_weights(
n, inp, y, random_state
)
for i, weights in enumerate(weight_generator):
if self.random_sample is not None:
ix = random_state.choice(
len(y),
size=int(self.random_sample*len(y)),
p=weights, replace=True
)
weights = numpy.bincount(ix, minlength=len(y))
s = weights.sum()
weights = numpy.array([float(w) / s for w in weights])
Logger.get().write("!Training estimator:", (i+1))
est = self._make_estimator(inp, y, weights, random_state)
weighting_strategy.add_estimator(est, inp, y, weights)
classifiers.append(est)
return classifiers
def _make_estimator(self, inp, y, sample_weights, random_state):
seed = random_state.randint(MAX_INT)
est = clone(self.base_estimator)
est.set_params(random_state=check_random_state(seed))
est.fit(inp, y, sample_weight=sample_weights)
return est
class ResilientEnsemble(BaseEstimator, ClassifierMixin):
def __init__(self,
pipeline=None,
n_estimators=10,
training_strategy=TrainingStrategy(),
weighting_strategy=CentroidBasedWeightingStrategy(),
selection_strategy=SelectBestPercent(),
multiply_by_weight=False,
use_prob=True,
random_state=None):
self.pipeline = pipeline
self.n_estimators = n_estimators
self.training_strategy = training_strategy
self.weighting_strategy = weighting_strategy
self.selection_strategy = selection_strategy
self.multiply_by_weight = multiply_by_weight
self.use_prob = use_prob
self.random_state = random_state
# Training time attributes
self.classes_ = None
self.n_classes_ = None
self.classifiers_ = None
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.random_state_ = None
def fit(self, inp, y):
self.precomputed_probs_ = None
self.precomputed_weights_ = None
self.classes_, y = unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
self.random_state_ = check_random_state(self.random_state)
if self.pipeline is not None:
inp = self.pipeline.fit_transform(inp)
self.weighting_strategy.prepare(inp, y)
self.classifiers_ = self.training_strategy.train_estimators(
self.n_estimators, inp, y,
self.weighting_strategy, self.random_state_
)
# Reset it to null because the previous line uses self.predict
self.precomputed_probs_ = None
self.precomputed_weights_ = None
return self
def predict_proba(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, (N, n_classes_), each row sums to one
if self.precomputed_probs_ is None:
self._precompute(inp)
prob = numpy.zeros((len(inp), self.n_classes_))
for i in range(len(inp)):
active_indices = self.selection_strategy.get_indices(
self.precomputed_weights_[i], self.random_state_
)
prob[i] = self.precomputed_probs_[i][active_indices].sum(axis=0)
preprocessing.normalize(prob, norm='l1', copy=False)
return prob
def predict(self, inp):
# inp is array-like, (N, D), one instance per row
# output is array-like, N, one label per instance
if self.pipeline is not None:
inp = self.pipeline.transform(inp)
p = self.predict_proba(inp)
return self.classes_[numpy.argmax(p, axis=1)]
def _precompute(self, inp):
self.precomputed_probs_ = numpy.zeros(
(len(inp), len(self.classifiers_), self.n_classes_)
)
self.precomputed_weights_ = numpy.zeros(
(len(inp), len(self.classifiers_))
)
for i, x in enumerate(inp):
Logger.get().write(
"!Computing", len(inp), "probabilities and weights:", (i+1)
)
for j, cls in enumerate(self.classifiers_):
prob = cls.predict_proba(x)[0]
if not self.use_prob:
max_index = prob.argmax()
prob = numpy.zeros_like(prob)
prob[max_index] = 1
self.precomputed_probs_[i][j] = prob
self.precomputed_weights_[i] = (
self.weighting_strategy.weight_estimators(x)
)
if self.multiply_by_weight:
for j in range(len(self.classifiers_)):
self.precomputed_probs_[i][j] *= (
self.precomputed_weights_[i][j]
)
def get_directory(self):
current_state = self.random_state
current_selection = self.selection_strategy
self.random_state = None
self.selection_strategy = None
filename = hashlib.md5(str(self)).hexdigest()
self.random_state = current_state
self.selection_strategy = current_selection
return filename
def get_filename(self):
return self.get_directory() + "/ensemble"
def __eq__(self, other):
return isinstance(other, ResilientEnsemble) and (
self.get_directory() == other.get_directory()
)
def __hash__(self):
return hash(self.get_directory())
| gpl-2.0 | 4,599,996,119,543,902,700 | 37.338983 | 77 | 0.600796 | false |
andrewschaaf/pj-closure | js/goog/array.py | 1 | 3829 | #<pre>Copyright 2006 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.</pre>
# TODO the rest
from goog import bind, isString
ARRAY_PROTOTYPE_ = Array.prototype
def concat(var_args):
return ARRAY_PROTOTYPE_.concat.apply(ARRAY_PROTOTYPE_, arguments)
def forEach(arr, f, opt_obj):
#DIFF: goog runs this if-statement at load-time
if ARRAY_PROTOTYPE_.forEach:
# TODO assert(arr.length != None)
ARRAY_PROTOTYPE_.forEach.call(arr, f, opt_obj)
else:
arr2 = (arr.split('') if isString(arr) else arr)
for i in range(len(arr)):
if i in arr2:
f.call(opt_obj, arr2[i], i, arr)
def map(arr, f, opt_obj):
#DIFF: goog runs this if-statement at load-time
if ARRAY_PROTOTYPE_.map:
#TODO goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.map.call(arr, f, opt_obj)
else:
l = len(arr)
res = Array(l)
arr2 = (arr.split('') if isString(arr) else arr)
for i in range(l):
if i in arr2:
res[i] = f.call(opt_obj, arr2[i], i, arr)
return res
def reduce(arr, f, val, opt_obj):
if arr.reduce:
if opt_obj:
return arr.reduce(bind(f, opt_obj), val)
else:
return arr.reduce(f, val)
rval = val
def f(val, index):
rval = f.call(opt_obj, rval, val, index, arr)
forEach(arr, f)
return rval
def slice(arr, start, opt_end):
#goog.asserts.assert(arr.length != null);
# passing 1 arg to slice is not the same as passing 2 where the second is
# null or undefined (in that case the second argument is treated as 0).
# we could use slice on the arguments object and then use apply instead of
# testing the length
if arguments.length <= 2:
return ARRAY_PROTOTYPE_.slice.call(arr, start)
else:
return ARRAY_PROTOTYPE_.slice.call(arr, start, opt_end)
def splice(arr, index, howMany, var_args):
#goog.asserts.assert(arr.length != null)
return ARRAY_PROTOTYPE_.splice.apply(
arr, slice(arguments, 1))
def insertAt(arr, obj, opt_i):
splice(arr, opt_i, 0, obj)
def filter(arr, f, opt_obj):
if ARRAY_PROTOTYPE_.filter:
#goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.filter.call(arr, f, opt_obj)
else:
res = []
resLength = 0
arr2 = arr.split('') if isString(arr) else arr
for i in range(len(arr)):
if i in arr2:
val = arr2[i]
if f.call(opt_obj, val, i, arr):
# Is this better than .push?
resLength += 1
res[resLength] = val
return res
def indexOf(arr, obj, opt_fromIndex):
if ARRAY_PROTOTYPE_.indexOf:
#goog.asserts.assert(arr.length != null);
return ARRAY_PROTOTYPE_.indexOf.call(arr, obj, opt_fromIndex)
else:
fromIndex = (
0
if opt_fromIndex == None else
(
Math.max(0, arr.length + opt_fromIndex)
if opt_fromIndex < 0 else
opt_fromIndex))
if isString(arr):
# Array.prototype.indexOf uses === so only strings should be found.
if not isString(obj) or len(obj) != 1:
return -1
return arr.indexOf(obj, fromIndex)
for i in range(fromIndex, len(arr)):
if (i in arr) and (arr[i] == obj):
return i
return -1
| apache-2.0 | -2,837,759,687,464,881,000 | 25.226027 | 76 | 0.628362 | false |
ilastik/ilastik-0.5 | ilastik/modules/unsupervised_decomposition/core/unsupervisedMgr.py | 1 | 7290 | from ilastik.core.baseModuleMgr import BaseModuleDataItemMgr, BaseModuleMgr
import numpy
import traceback, sys
from ilastik.core import jobMachine
from PyQt4 import QtCore
import os
import algorithms
from ilastik.core.volume import DataAccessor
from ilastik.core.overlayMgr import OverlayItem
""" Import all algorithm plugins"""
pathext = os.path.dirname(__file__)
try:
for f in os.listdir(os.path.abspath(pathext + '/algorithms')):
module_name, ext = os.path.splitext(f) # Handles no-extension files, etc.
if ext == '.py': # Important, ignore .pyc/othesr files.
module = __import__('ilastik.modules.unsupervised_decomposition.core.algorithms.' + module_name)
except Exception, e:
print e
traceback.print_exc()
pass
for i, c in enumerate(algorithms.unsupervisedDecompositionBase.UnsupervisedDecompositionBase.__subclasses__()):
print "Loaded unsupervised decomposition algorithm:", c.name
#*******************************************************************************
# U n s u p e r v i s e d I t e m M o d u l e M g r *
#*******************************************************************************
class UnsupervisedItemModuleMgr(BaseModuleDataItemMgr):
name = "Unsupervised_Decomposition"
def __init__(self, dataItemImage):
BaseModuleDataItemMgr.__init__(self, dataItemImage)
self.dataItemImage = dataItemImage
self.overlays = []
self.inputData = None
def setInputData(self, data):
self.inputData = data
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n M o d u l e M g r *
#*******************************************************************************
class UnsupervisedDecompositionModuleMgr(BaseModuleMgr):
name = "Unsupervised_Decomposition"
def __init__(self, dataMgr):
BaseModuleMgr.__init__(self, dataMgr)
self.dataMgr = dataMgr
self.unsupervisedMethod = algorithms.unsupervisedDecompositionPCA.UnsupervisedDecompositionPCA
if self.dataMgr.module["Unsupervised_Decomposition"] is None:
self.dataMgr.module["Unsupervised_Decomposition"] = self
def computeResults(self, inputOverlays):
self.decompThread = UnsupervisedDecompositionThread(self.dataMgr, inputOverlays, self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod)
self.decompThread.start()
return self.decompThread
def finalizeResults(self):
activeItem = self.dataMgr[self.dataMgr._activeImageNumber]
activeItem._dataVol.unsupervised = self.decompThread.result
#create overlays for unsupervised decomposition:
if self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname] is None:
data = self.decompThread.result[:,:,:,:,:]
myColor = OverlayItem.qrgb(0, 0, 0)
for o in range(0, data.shape[4]):
data2 = OverlayItem.normalizeForDisplay(data[:,:,:,:,o:(o+1)])
# for some strange reason we have to invert the data before displaying it
ov = OverlayItem(255 - data2, color = myColor, alpha = 1.0, colorTable = None, autoAdd = True, autoVisible = True)
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname + " component %d" % (o+1)] = ov
# remove outdated overlays (like PCA components 5-10 if a decomposition with 4 components is done)
numOverlaysBefore = len(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.keys())
finished = False
while finished != True:
o = o + 1
# assumes consecutive numbering
key = "Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname + " component %d" % (o+1)
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.remove(key)
numOverlaysAfter = len(self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr.keys())
if(numOverlaysBefore == numOverlaysAfter):
finished = True
else:
numOverlaysBefore = numOverlaysAfter
else:
self.dataMgr[self.dataMgr._activeImageNumber].overlayMgr["Unsupervised/" + self.dataMgr.module["Unsupervised_Decomposition"].unsupervisedMethod.shortname]._data = DataAccessor(self.decompThread.result)
#*******************************************************************************
# U n s u p e r v i s e d D e c o m p o s i t i o n T h r e a d *
#*******************************************************************************
class UnsupervisedDecompositionThread(QtCore.QThread):
def __init__(self, dataMgr, overlays, unsupervisedMethod = algorithms.unsupervisedDecompositionPCA.UnsupervisedDecompositionPCA, unsupervisedMethodOptions = None):
QtCore.QThread.__init__(self, None)
self.reshapeToFeatures(overlays)
self.dataMgr = dataMgr
self.count = 0
self.numberOfJobs = 1
self.stopped = False
self.unsupervisedMethod = unsupervisedMethod
self.unsupervisedMethodOptions = unsupervisedMethodOptions
self.jobMachine = jobMachine.JobMachine()
self.result = []
def reshapeToFeatures(self, overlays):
# transform to feature matrix
# ...first find out how many columns and rows the feature matrix will have
numFeatures = 0
numPoints = overlays[0].shape[0] * overlays[0].shape[1] * overlays[0].shape[2] * overlays[0].shape[3]
for overlay in overlays:
numFeatures += overlay.shape[4]
# ... then copy the data
features = numpy.zeros((numPoints, numFeatures), dtype=numpy.float)
currFeature = 0
for overlay in overlays:
currData = overlay[:,:,:,:,:]
features[:, currFeature:currFeature+overlay.shape[4]] = currData.reshape(numPoints, (currData.shape[4]))
currFeature += currData.shape[4]
self.features = features
self.origshape = overlays[0].shape
def decompose(self):
# V contains the component spectra/scores, W contains the projected data
unsupervisedMethod = self.unsupervisedMethod()
V, W = unsupervisedMethod.decompose(self.features)
self.result = (W.T).reshape((self.origshape[0], self.origshape[1], self.origshape[2], self.origshape[3], W.shape[0]))
def run(self):
self.dataMgr.featureLock.acquire()
try:
jobs = []
job = jobMachine.IlastikJob(UnsupervisedDecompositionThread.decompose, [self])
jobs.append(job)
self.jobMachine.process(jobs)
self.dataMgr.featureLock.release()
except Exception, e:
print "######### Exception in UnsupervisedThread ##########"
print e
traceback.print_exc(file=sys.stdout)
self.dataMgr.featureLock.release()
| bsd-2-clause | -7,100,022,085,646,028,000 | 48.598639 | 213 | 0.610288 | false |
Jason-Gew/Python_modules | authenticate.py | 1 | 2754 | #!/usr/bin/env python
#
# authenticate.py module is create by Jason/Ge Wu
# Purpose to fast set up and verify username & password
# for system or software access.
from getpass import getpass # Disable password display on console
import base64 # If necessary, use more advanced encryption such as AES, MD5
encryp_pass = ""
def set_authentication(pass_length, set_timeout):
global encryp_pass
while set_timeout > 0:
select1 = raw_input("\nWould you like to setup a new Password for Login? (Y/n): ")
if select1 == 'Y' or select1 == 'y':
while set_timeout > 0:
buff1 = getpass(prompt = "\nPlease Enter your Password: ")
if not buff1.isspace():
buff2 = getpass(prompt = "Please Enter your Password again: ")
if buff1 == buff2:
if len(buff2) < pass_length:
print "-> Password must have {} characters or more!".format(pass_length)
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
else:
encryp_pass = base64.b64encode(buff2)
print "\n ==== Password Setup Success ====\n"
del buff1, buff2
return True
else:
print "-> Password does not match! Please Try Again!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
else:
print "-> Invalid Password!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
continue
elif select1 == 'N' or select1 == 'n':
return False
break
else:
if set_timeout > 0:
print "-> Please enter \'Y\' or \'n\' character only!"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
else:
print "\nTime Out, please re-run the program and Try Carefully!\n"
exit(1)
def console_authenticate(set_timeout):
while set_timeout > 0:
buff = getpass(prompt = "\nPlease enter your Password: ")
encryp_buffer = base64.b64encode(buff)
if encryp_buffer == encryp_pass:
print "\n ==== Authentication Success ==== \n"
del buff, encryp_buffer
return True
elif buff == '':
print "-> Password cannot be empty!\n"
set_timeout -= 1
print "-> You have {} chance(s)...".format(set_timeout)
else:
set_timeout -= 1
if set_timeout > 0:
print "-> Invalid Password, Please Try Again!"
print "-> You still have {} chance(s)...".format(set_timeout)
else:
print "\n ==== Authentication Fail ==== \n"
return False
# For testing purpose...
if __name__ == "__main__":
if set_authentication(6,4):
if console_authenticate(3):
print "Done"
else:
print "Failed"
exit(1)
else:
print "No Authentication!"
exit(0)
| gpl-3.0 | 333,663,628,608,991,300 | 28.94382 | 84 | 0.603849 | false |
petervanderdoes/wger | wger/utils/fields.py | 1 | 1559 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import logging
# Third Party
from django.db import models
# wger
from wger.utils.widgets import (
Html5FormDateField,
Html5FormTimeField
)
logger = logging.getLogger(__name__)
class Html5TimeField(models.TimeField):
'''
Custom Time field that uses the Html5TimeInput widget
'''
def formfield(self, **kwargs):
'''
Use our custom field
'''
defaults = {'form_class': Html5FormTimeField}
defaults.update(kwargs)
return super(Html5TimeField, self).formfield(**defaults)
class Html5DateField(models.DateField):
'''
Custom Time field that uses the Html5DateInput widget
'''
def formfield(self, **kwargs):
'''
Use our custom field
'''
defaults = {'form_class': Html5FormDateField}
defaults.update(kwargs)
return super(Html5DateField, self).formfield(**defaults)
| agpl-3.0 | -8,020,619,199,044,156,000 | 26.839286 | 78 | 0.699808 | false |
uclouvain/osis | base/migrations/0054_scoresencoding.py | 1 | 2269 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-05-02 15:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0053_auto_20160529_2355'),
]
operations = [
migrations.RunSQL(
"""
DROP VIEW IF EXISTS app_scores_encoding;
CREATE OR REPLACE VIEW app_scores_encoding AS
SELECT row_number() OVER () as id,
base_programmanager.id as program_manager_id,
program_manager_person.id as pgm_manager_person_id,
base_offeryear.id as offer_year_id,
base_learningunityear.id as learning_unit_year_id,
count(base_examenrollment.id) as total_exam_enrollments,
sum(case when base_examenrollment.score_final is not null or base_examenrollment.justification_final is not null then 1 else 0 end) exam_enrollments_encoded
from base_examenrollment
join base_sessionexam on base_sessionexam.id = base_examenrollment.session_exam_id
join base_learningunityear on base_learningunityear.id = base_sessionexam.learning_unit_year_id
join base_offeryearcalendar on base_offeryearcalendar.id = base_sessionexam.offer_year_calendar_id
join base_learningunitenrollment on base_learningunitenrollment.id = base_examenrollment.learning_unit_enrollment_id
join base_offerenrollment on base_offerenrollment.id = base_learningunitenrollment.offer_enrollment_id
join base_offeryear on base_offeryear.id = base_offerenrollment.offer_year_id
join base_programmanager on base_programmanager.offer_year_id = base_offeryear.id
join base_person program_manager_person on program_manager_person.id = base_programmanager.person_id
where base_offeryearcalendar.start_date <= CURRENT_TIMESTAMP::date
and base_offeryearcalendar.end_date >= CURRENT_TIMESTAMP::date
group by
base_programmanager.id,
program_manager_person.id,
base_offeryear.id,
base_learningunityear.id
;
""",
elidable=True
),
]
| agpl-3.0 | 3,287,373,462,270,548,000 | 39.517857 | 172 | 0.654473 | false |
314r/joliebulle | joliebulle/view/base.py | 1 | 1815 | #joliebulle 3.6
#Copyright (C) 2010-2016 Pierre Tavares
#Copyright (C) 2012-2015 joliebulle's authors
#See AUTHORS file.
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 3
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from PyQt5 import QtGui
from base import ImportBase
from view.yeastview import *
def getFermentablesQtModel():
model = QtGui.QStandardItemModel()
for f in ImportBase().listeFermentables:
item = QtGui.QStandardItem(f.name)
item.setData(f, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getHopsQtModel():
model = QtGui.QStandardItemModel()
for h in ImportBase().listeHops :
item = QtGui.QStandardItem(h.name)
item.setData(h, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getMiscsQtModel():
model = QtGui.QStandardItemModel()
for m in ImportBase().listeMiscs:
item = QtGui.QStandardItem(m.name)
item.setData(m, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getYeastsQtModel():
model = QtGui.QStandardItemModel()
for y in ImportBase().listeYeasts:
item = QtGui.QStandardItem(YeastView(y).yeastDetailDisplay())
item.setData(y, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model | gpl-3.0 | 553,826,155,992,883,500 | 32.018182 | 76 | 0.770799 | false |
shawncaojob/LC | QUESTIONS/44_wildcard_matching.py | 1 | 1859 | # 44. Wildcard Matching My Submissions QuestionEditorial Solution
# Total Accepted: 59032 Total Submissions: 333961 Difficulty: Hard
# Implement wildcard pattern matching with support for '?' and '*'.
#
# '?' Matches any single character.
# '*' Matches any sequence of characters (including the empty sequence).
#
# The matching should cover the entire input string (not partial).
#
# The function prototype should be:
# bool isMatch(const char *s, const char *p)
#
# Some examples:
# isMatch("aa","a") false
# isMatch("aa","aa") true
# isMatch("aaa","aa") false
# isMatch("aa", "*") true
# isMatch("aa", "a*") true
# isMatch("ab", "?*") true
# isMatch("aab", "c*a*b") false
class Solution(object):
def isMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
print(s, p)
m, n = len(s), len(p)
dp = [ [ False for y in xrange(n + 1) ] for x in xrange(m + 1) ]
dp[0][0] = True
for j in xrange(1, n + 1):
if p[j-1] == "*" and dp[0][j-1]:
dp[0][j] = True
for i in xrange(1, m + 1):
for j in xrange(1, n + 1):
if (s[i-1] == p[j-1] or p[j-1] == "?") and dp[i-1][j-1]: #Cur char matching
dp[i][j] = True
if p[j-1] == "*":
if dp[i][j-1] or dp[i-1][j]: # Matching 0 or more
dp[i][j] = True
for row in dp:
print(row)
return dp[-1][-1]
if __name__ == "__main__":
print(Solution().isMatch("aa","a"))
print(Solution().isMatch("aa","aa"))
print(Solution().isMatch("aaa","aa"))
print(Solution().isMatch("aa", "*"))
print(Solution().isMatch("aa", "a*"))
print(Solution().isMatch("ab", "?*"))
print(Solution().isMatch("aab", "c*a*b"))
| gpl-3.0 | -199,543,660,837,520,030 | 30.508475 | 94 | 0.507262 | false |
caioariede/pyq | sizzle/match.py | 1 | 3323 | from .selector import Selector
class MatchEngine(object):
pseudo_fns = {}
selector_class = Selector
def __init__(self):
self.register_pseudo('not', self.pseudo_not)
self.register_pseudo('has', self.pseudo_has)
def register_pseudo(self, name, fn):
self.pseudo_fns[name] = fn
@staticmethod
def pseudo_not(matcher, node, value):
return not matcher.match_node(matcher.parse_selector(value)[0], node)
@staticmethod
def pseudo_has(matcher, node, value):
for node, body in matcher.iter_data([node]):
if body:
return any(
matcher.match_data(matcher.parse_selector(value)[0], body))
def parse_selector(self, selector):
return self.selector_class.parse(selector)
def match(self, selector, data):
selectors = self.parse_selector(selector)
nodeids = {}
for selector in selectors:
for node in self.match_data(selector, data):
nodeid = id(node)
if nodeid not in nodeids:
nodeids[nodeid] = None
yield node
def match_data(self, selector, data):
for node, body in self._iter_data(data):
match = self.match_node(selector, node)
if match:
next_selector = selector.next_selector
if next_selector:
if body:
for node in self.match_data(next_selector, body):
yield node
else:
yield node
if body and not selector.combinator == self.selector_class.CHILD:
for node in self.match_data(selector, body):
yield node
def match_node(self, selector, node):
match = all(self.match_rules(selector, node))
if match and selector.attrs:
match &= all(self.match_attrs(selector, node))
if match and selector.pseudos:
match &= all(self.match_pseudos(selector, node))
return match
def match_rules(self, selector, node):
if selector.typ:
yield self.match_type(selector.typ, node)
if selector.id_:
yield self.match_id(selector.id_, node)
def match_attrs(self, selector, node):
for a in selector.attrs:
lft, op, rgt = a
yield self.match_attr(lft, op, rgt, node)
def match_pseudos(self, selector, d):
for p in selector.pseudos:
name, value = p
if name not in self.pseudo_fns:
raise Exception('Selector not implemented: {}'.format(name))
yield self.pseudo_fns[name](self, d, value)
def _iter_data(self, data):
for tupl in self.iter_data(data):
if len(tupl) != 2:
raise Exception(
'The iter_data method must yield pair tuples containing '
'the node and its body (empty if not available)')
yield tupl
def match_type(self, typ, node):
raise NotImplementedError
def match_id(self, id_, node):
raise NotImplementedError
def match_attr(self, lft, op, rgt, no):
raise NotImplementedError
def iter_data(self, data):
raise NotImplementedError
| mit | 5,607,412,709,546,960,000 | 30.951923 | 79 | 0.563948 | false |
beyoungwoo/C_glibc_Sample | _Algorithm/ProjectEuler_python/euler_42.py | 1 | 2038 | #!/usr/bin/python -Wall
# -*- coding: utf-8 -*-
"""
<div id="content">
<div style="text-align:center;" class="print"><img src="images/print_page_logo.png" alt="projecteuler.net" style="border:none;" /></div>
<h2>Coded triangle numbers</h2><div id="problem_info" class="info"><h3>Problem 42</h3><span>Published on Friday, 25th April 2003, 06:00 pm; Solved by 46003; Difficulty rating: 5%</span></div>
<div class="problem_content" role="problem">
<p>The <i>n</i><sup>th</sup> term of the sequence of triangle numbers is given by, <i>t<sub>n</sub></i> = ½<i>n</i>(<i>n</i>+1); so the first ten triangle numbers are:</p>
<p style="text-align:center;">1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...</p>
<p>By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value. For example, the word value for SKY is 19 + 11 + 25 = 55 = <i>t</i><sub>10</sub>. If the word value is a triangle number then we shall call the word a triangle word.</p>
<p>Using <a href="project/resources/p042_words.txt">words.txt</a> (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?</p>
</div><br />
<br /></div>
"""
import re
tri=[ ]
for i in range (1, 50):
res = (i*(i+1)/2)
tri.append(res)
def is_triangle(num):
global tri
tri_len = len(tri)
for i in range(0, tri_len):
if (num == tri[i]):
return True
elif (num < tri[i]):
return False
return False
count = 0
fread = open("p42words.txt", "r")
for line in fread:
text = re.split("\"", line)
total_text = list(text)
len_t = len(total_text)
for i in range(0, len_t):
if total_text[i].startswith(','):
continue
ret = [ord(c) for c in total_text[i]]
len_ret = len(ret)
if (is_triangle(sum(ret) - (64 * len_ret)) == True):
count += 1
print total_text[i], sum(ret) - (64 * len_ret)
print "total=", count
#a = 'hi'
#print [ord(c) for c in a]
| gpl-3.0 | -5,037,118,179,064,823,000 | 37.45283 | 309 | 0.626104 | false |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/aio/operations/_hub_virtual_network_connections_operations.py | 1 | 8982 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HubVirtualNetworkConnectionsOperations:
"""HubVirtualNetworkConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
virtual_hub_name: str,
connection_name: str,
**kwargs
) -> "_models.HubVirtualNetworkConnection":
"""Retrieves the details of a HubVirtualNetworkConnection.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:param connection_name: The name of the vpn connection.
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: HubVirtualNetworkConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_02_01.models.HubVirtualNetworkConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.HubVirtualNetworkConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
'connectionName': self._serialize.url("connection_name", connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('HubVirtualNetworkConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections/{connectionName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_hub_name: str,
**kwargs
) -> AsyncIterable["_models.ListHubVirtualNetworkConnectionsResult"]:
"""Retrieves the details of all HubVirtualNetworkConnections.
:param resource_group_name: The resource group name of the VirtualHub.
:type resource_group_name: str
:param virtual_hub_name: The name of the VirtualHub.
:type virtual_hub_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListHubVirtualNetworkConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_02_01.models.ListHubVirtualNetworkConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListHubVirtualNetworkConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualHubName': self._serialize.url("virtual_hub_name", virtual_hub_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListHubVirtualNetworkConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/hubVirtualNetworkConnections'} # type: ignore
| mit | -270,808,405,234,510,460 | 48.9 | 215 | 0.656647 | false |
Squishymedia/feedingdb | src/feeddb/feed/migrations/0058_muscleowl_emg_sono.py | 1 | 40048 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SonoSensor.muscle'
db.add_column(u'feed_sonosensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
# Adding field 'EmgSensor.muscle'
db.add_column(u'feed_emgsensor', 'muscle',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['feed.MuscleOwl'], null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SonoSensor.muscle'
db.delete_column(u'feed_sonosensor', 'muscle_id')
# Deleting field 'EmgSensor.muscle'
db.delete_column(u'feed_emgsensor', 'muscle_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'feed.ageunit': {
'Meta': {'ordering': "['label']", 'object_name': 'AgeUnit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ageunit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anatomicallocation': {
'Meta': {'ordering': "['label']", 'object_name': 'AnatomicalLocation'},
'category': ('django.db.models.fields.IntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anatomicallocation_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.anteriorposterioraxis': {
'Meta': {'object_name': 'AnteriorPosteriorAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'anteriorposterioraxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behavior': {
'Meta': {'ordering': "['label']", 'object_name': 'Behavior'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'behavior_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.behaviorowl': {
'Meta': {'object_name': 'BehaviorOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.BehaviorOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.channel': {
'Meta': {'object_name': 'Channel'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channel_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'rate': ('django.db.models.fields.IntegerField', [], {}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.channellineup': {
'Meta': {'ordering': "['position']", 'object_name': 'ChannelLineup'},
'channel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Channel']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'channellineup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.depthaxis': {
'Meta': {'object_name': 'DepthAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'depthaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.developmentstage': {
'Meta': {'ordering': "['label']", 'object_name': 'DevelopmentStage'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'developmentstage_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.dorsalventralaxis': {
'Meta': {'object_name': 'DorsalVentralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'dorsalventralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.electrodetype': {
'Meta': {'ordering': "['label']", 'object_name': 'ElectrodeType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'electrodetype_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgchannel': {
'Meta': {'object_name': 'EmgChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'emg_amplification': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'emg_filtering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Emgfiltering']"}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.EmgSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.emgfiltering': {
'Meta': {'ordering': "['label']", 'object_name': 'Emgfiltering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'emgfiltering_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.emgsensor': {
'Meta': {'ordering': "['id']", 'object_name': 'EmgSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'electrode_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ElectrodeType']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.emgsetup': {
'Meta': {'object_name': 'EmgSetup', '_ormbases': [u'feed.Setup']},
'preamplifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.eventchannel': {
'Meta': {'object_name': 'EventChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'unit': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.eventsetup': {
'Meta': {'object_name': 'EventSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.experiment': {
'Meta': {'object_name': 'Experiment'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'experiment_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'impl_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'subj_age': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subj_ageunit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AgeUnit']", 'null': 'True', 'blank': 'True'}),
'subj_devstage': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DevelopmentStage']"}),
'subj_tooth': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_weight': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '19', 'decimal_places': '5', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']"}),
'subject_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.forcechannel': {
'Meta': {'object_name': 'ForceChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ForceSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.forcesensor': {
'Meta': {'object_name': 'ForceSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.forcesetup': {
'Meta': {'object_name': 'ForceSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.illustration': {
'Meta': {'object_name': 'Illustration'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'illustration_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']", 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Subject']", 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.kinematicschannel': {
'Meta': {'object_name': 'KinematicsChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.KinematicsSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.kinematicssensor': {
'Meta': {'object_name': 'KinematicsSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.kinematicssetup': {
'Meta': {'object_name': 'KinematicsSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.mediallateralaxis': {
'Meta': {'object_name': 'MedialLateralAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'mediallateralaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.muscleowl': {
'Meta': {'object_name': 'MuscleOwl'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'obo_definition': ('django.db.models.fields.TextField', [], {}),
'rdfs_comment': ('django.db.models.fields.TextField', [], {}),
'rdfs_subClassOf_ancestors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.MuscleOwl']", 'symmetrical': 'False'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '1500'})
},
u'feed.pressurechannel': {
'Meta': {'object_name': 'PressureChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.PressureSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.pressuresensor': {
'Meta': {'object_name': 'PressureSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.pressuresetup': {
'Meta': {'object_name': 'PressureSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.proximaldistalaxis': {
'Meta': {'object_name': 'ProximalDistalAxis'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'proximaldistalaxis_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.restraint': {
'Meta': {'ordering': "['label']", 'object_name': 'Restraint'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'restraint_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sensor': {
'Meta': {'object_name': 'Sensor'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sensor_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loc_ap': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnteriorPosteriorAxis']", 'null': 'True', 'blank': 'True'}),
'loc_dv': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DorsalVentralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_ml': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MedialLateralAxis']", 'null': 'True', 'blank': 'True'}),
'loc_pd': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.ProximalDistalAxis']", 'null': 'True', 'blank': 'True'}),
'loc_side': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Side']"}),
'location_freetext': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'setup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Setup']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.session': {
'Meta': {'ordering': "['position']", 'object_name': 'Session'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'channels': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['feed.Channel']", 'through': u"orm['feed.ChannelLineup']", 'symmetrical': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'session_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_anesthesia_sedation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_restraint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Restraint']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.setup': {
'Meta': {'object_name': 'Setup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'setup_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'experiment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Experiment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sampling_rate': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.side': {
'Meta': {'ordering': "['label']", 'object_name': 'Side'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'side_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.sonochannel': {
'Meta': {'object_name': 'SonoChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'crystal1': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals1_related'", 'to': u"orm['feed.SonoSensor']"}),
'crystal2': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'crystals2_related'", 'to': u"orm['feed.SonoSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']"})
},
u'feed.sonosensor': {
'Meta': {'object_name': 'SonoSensor', '_ormbases': [u'feed.Sensor']},
'axisdepth': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.DepthAxis']", 'null': 'True', 'blank': 'True'}),
'location_controlled': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.AnatomicalLocation']"}),
'muscle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.MuscleOwl']", 'null': 'True'}),
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.sonosetup': {
'Meta': {'object_name': 'SonoSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'}),
'sonomicrometer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'feed.strainchannel': {
'Meta': {'object_name': 'StrainChannel', '_ormbases': [u'feed.Channel']},
u'channel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Channel']", 'unique': 'True', 'primary_key': 'True'}),
'sensor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.StrainSensor']"}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Unit']", 'null': 'True'})
},
u'feed.strainsensor': {
'Meta': {'object_name': 'StrainSensor', '_ormbases': [u'feed.Sensor']},
u'sensor_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Sensor']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.strainsetup': {
'Meta': {'object_name': 'StrainSetup', '_ormbases': [u'feed.Setup']},
u'setup_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['feed.Setup']", 'unique': 'True', 'primary_key': 'True'})
},
u'feed.study': {
'Meta': {'ordering': "['title']", 'object_name': 'Study'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'approval_secured': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'study_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'funding_agency': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'resources': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.studyprivate': {
'Meta': {'object_name': 'StudyPrivate'},
'approval': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'studyprivate_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'funding': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lab': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'pi': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.subject': {
'Meta': {'object_name': 'Subject'},
'breed': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'subject_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'study': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Study']"}),
'taxon': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Taxon']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.taxon': {
'Meta': {'ordering': "['genus']", 'object_name': 'Taxon'},
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'taxon_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
u'feed.trial': {
'Meta': {'object_name': 'Trial'},
'accession': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'behavior_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'behavior_primary': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Behavior']"}),
'behavior_secondary': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bookkeeping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'trial_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'food_property': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_size': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'food_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'session': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['feed.Session']"}),
'start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'subj_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'subj_treatment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'waveform_picture': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'feed.unit': {
'Meta': {'ordering': "['technique', 'label']", 'object_name': 'Unit'},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'unit_related'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'technique': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
}
}
complete_apps = ['feed'] | gpl-3.0 | -4,522,338,344,012,879,000 | 81.40535 | 195 | 0.546519 | false |
alphagov/notifications-api | migrations/versions/0321_drop_postage_constraints.py | 1 | 2423 | """
Revision ID: 0321_drop_postage_constraints
Revises: 0320_optimise_notifications
Create Date: 2020-06-08 11:48:53.315768
"""
import os
from alembic import op
revision = '0321_drop_postage_constraints'
down_revision = '0320_optimise_notifications'
environment = os.environ['NOTIFY_ENVIRONMENT']
def upgrade():
if environment not in ["live", "production"]:
op.execute('ALTER TABLE notifications DROP CONSTRAINT IF EXISTS chk_notifications_postage_null')
op.execute('ALTER TABLE notification_history DROP CONSTRAINT IF EXISTS chk_notification_history_postage_null')
op.execute('ALTER TABLE templates DROP CONSTRAINT IF EXISTS chk_templates_postage')
op.execute('ALTER TABLE templates_history DROP CONSTRAINT IF EXISTS chk_templates_history_postage')
def downgrade():
# The downgrade command must not be run in production - it will lock the tables for a long time
if environment not in ["live", "production"]:
op.execute("""
ALTER TABLE notifications ADD CONSTRAINT "chk_notifications_postage_null"
CHECK (
CASE WHEN notification_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE notification_history ADD CONSTRAINT "chk_notification_history_postage_null"
CHECK (
CASE WHEN notification_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates ADD CONSTRAINT "chk_templates_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
op.execute("""
ALTER TABLE templates_history ADD CONSTRAINT "chk_templates_history_postage"
CHECK (
CASE WHEN template_type = 'letter' THEN
postage is not null and postage in ('first', 'second')
ELSE
postage is null
END
)
""")
| mit | 3,433,414,074,737,906,700 | 34.115942 | 118 | 0.574907 | false |
jayc0b0/Projects | Python/Security/caesarCypher.py | 1 | 1545 | # Jacob Orner (jayc0b0)
# Caesar Cypher script
def main():
# Declare variables and take input
global alphabet
alphabet = ['a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l',
'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x',
'y', 'z']
global messageArray
messageArray = []
choice = int(input("Enter 1 to encode. 2 to decode.\n>> "))
if choice == 1:
encode()
elif choice == 2:
pass # Implement decode() and add here
else:
print "Invalid choice"
main()
def encode():
message = raw_input("Enter a string to encode (letters only):\n>> ")
cypherShift = int(input("Enter an integer from 1-25 for the shift:\n>> "))
# Verify input
if not message.isalpha():
print "Please enter only letters into your message."
main()
else:
pass
if cypherShift < 1 or cypherShift > 25:
print "Invalid number. Please enter a valid shift value."
main()
else:
pass
# Break string into an array of letters and shift
messageArray = list(message)
for letter in messageArray:
if alphabet.index(letter) + cypherShift < 25:
messageArray[letter] = alphabet[alphabet.index(letter) + cypherShift]
else:
letter = alphabet[(alphabet.index(letter) + cypherShift) % 25]
# Output cyphered text
message = " ".join(messageArray)
print "Your cyphered message is:"
print message
main()
| mit | 5,803,349,331,164,338,000 | 26.105263 | 81 | 0.552104 | false |
Subsets and Splits