blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dd362d139002a4217fdd1735daa4f34396aee423 | efceab61588936151e49bf9311fe6f949cdd81c8 | /context_utils/regression_analysis/contexts/feature_weights.py | 55efd2312a5d416e950262951e999744467a7568 | [] | no_license | GiovanniCassani/distributional_bootstrapping | c74b4dddcb91a083c8cc0c55263228bc1acff54c | 324176f659835a29cfd6859febb570e99c1bad31 | refs/heads/master | 2021-03-19T11:45:38.364181 | 2018-09-10T15:24:37 | 2018-09-10T15:24:37 | 113,855,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,348 | py | __author__ = 'GCassani'
"""Compute feature weights for the relevant contexts given an input co-occurrence vector space"""
import os
from collections import defaultdict
def compute_feature_weights(training_space, output_file):
"""
:param training_space: the path to a file containing the co-occurrence count matrix derived from the training
corpus
:param output_file: the path to a file where the weight of each context will be written
:return weights: a dictionary mapping each context to 4 strings, each indicating one of the possible
weighting schemes: gain ratio ('gr'), information gain ('ig'), X-square ('x2'), and shared
variance ('sv'). Each string map to the weight of the corresponding contexts under the
weighting scheme at hand. All scores are stored for later processing.
"""
weights = defaultdict(dict)
with open(training_space, 'r') as t:
first_line = t.readline()
n = len(first_line.strip().split("\t")) + 100
train_space = ' -f ' + training_space
out_file = ' -W ' + output_file
timbl_cmd = 'timbl -mN:I1 -N' + str(n) + train_space + out_file
print(timbl_cmd)
os.system(timbl_cmd)
with open(output_file, "r") as f:
gr, ig, x2, sv = [0, 0, 0, 0]
for line in f:
if line.strip() == '# gr':
gr, ig, x2, sv = [1, 0, 0, 0]
elif line.strip() == '# ig':
gr, ig, x2, sv = [0, 1, 0, 0]
elif line.strip() == '# x2':
gr, ig, x2, sv = [0, 0, 1, 0]
elif line.strip() == '# sv':
gr, ig, x2, sv = [0, 0, 0, 1]
if any([gr, ig, x2, sv]):
try:
feature, weight = line.strip().split("\t")
if gr:
weights[int(feature) - 2]['gr'] = float(weight)
elif ig:
weights[int(feature) - 2]['ig'] = float(weight)
elif x2:
weights[int(feature) - 2]['x2'] = float(weight)
elif sv:
weights[int(feature) - 2]['sv'] = float(weight)
except ValueError:
pass
return weights
| [
"[email protected]"
] | |
7ed3e17232f1e18ac0f0b32b4082aea884541ced | a1e6c25d701eacb0dd893802f7d3db316768dbc4 | /featureExtrator/feature_extractor.py | e461e884f6bcae25d455741367a851a7a26da379 | [] | no_license | zhengchengyy/BBDataProcessing | c04f5053266881116f1ab764041f7fd4901561ab | 5d4f98e4b4b7d7e98db2677d00b618d2bb6a74c8 | refs/heads/master | 2021-07-20T22:31:28.894304 | 2020-05-17T14:04:30 | 2020-05-17T14:04:30 | 165,455,028 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,672 | py | '''
修改原有的feature_extractor文件,将按次数处理获得的数据改为按时间段处理
新增以及改变的代码均使用两行------标识
使用观察者模式实现特征提取,特征提取器(Subject)依赖于多个特征提取模块(Observer),特征提取器注册了多个特征提取模块,
当特征提取器状态改变(获取到新数据),通知所有特征模块更新状态(计算新的特征值)。
'''
from abc import ABC, abstractmethod
from queue import Queue
class ProcessModule(ABC):
"""Observer的抽象类,表示处理数据的模块。
每一个继承ProcessModule的类都包含一个存储数据的队列queue。
继承该类需要重写processFullQueue方法。
"""
# 改变为时间存储后,构造函数改变为如下
# ------
def __init__(self, interval=1, rate=0.5, size=0):
"""构造方法中,参数中的interval表示每次特征提取的时间跨度,
rate表示间隔多长时间进行一次特征提取,
上述参数的单位均为秒
"""
if (isinstance(interval, float) or isinstance(interval, int)) \
and (isinstance(rate, float) or isinstance(rate, int)):
if interval <= 0 or rate <= 0 or rate > interval:
raise ModuleProcessException("Illegal rate or interval.")
else:
raise ModuleProcessException("Interval and rate both should be float or int.")
self.interval = interval
self.rate = rate
self.size = size
# 考虑采集数据频率可能变化,且分析时间会变化,因此不设定队列最大长度
self.queue = Queue(maxsize=0)
super(ProcessModule, self).__init__()
# ------
@abstractmethod
def processFullQueue(self):
"""处理满队列中的所有元素,通常为统计值。需要返回值。"""
pass
def process(self, value):
"""Observer的update(),接收一个字典值{time:t,volt:v},将其添加到队列中,
如果队列中头尾的数据达到了interval定义的时间差,则进行处理,
并在处理后移除rate定义的时间差的数据。
"""
self.queue.put(value)
self.size += 1
if value['time'] - self.queue.queue[0]['time'] >= self.interval:
result = self.processFullQueue()
t = value['time']
while (value['time'] - self.queue.queue[0]['time']) > (self.interval - self.rate):
self.queue.get()
self.size -= 1
return result
class FeatureExtractor:
"""Subject提取特征值的类,该类需要配合ProcessModule使用。
FeatureExtractor中有一个用于存储ProcessModule的列表,使用register函数可以向列表中添加ProcessModule。
当FeatureExtractor接受到一个数据的时候,会让该列表中的所有PrcessModule接收这个数据并分别处理。
"""
def __init__(self):
self.modules = []
def register(self, processModule):
"""添加一个ProcessModule"""
self.modules.append(processModule)
def process(self, value):
"""Subject的notify(),接收一个value值,让self.modules中的每一个ProcessModule处理该值"""
result = {}
for module in self.modules:
output = module.process(value)
if (output != None):
result[module.FEATURE_NAME] = output
return result
def clear(self):
"""清理所有的ProcessModule"""
for module in self.modules:
module.clear()
class ModuleProcessException(Exception):
pass
| [
"[email protected]"
] | |
c6d92dc424e95b6378c43eb99d934375630c943d | e1fe66628d58e66b25d910c6d2f173a7dfa74301 | /1. Edge AI Fundamentals with OpenVINO/4. Inference Engine/Workspaces/3. Integrate into an Edge App/solution/app-custom.py | 48e61b9d9b66578fc97dd81f97cebbf7727c6f5e | [
"MIT"
] | permissive | mmphego/Udacity-EdgeAI | 7c5443c4f19eaaf4f6eb44739f7e6413ba26e106 | 25af22f85772adc25ff9d5a59ba8a33a1e5551cd | refs/heads/master | 2022-12-31T15:39:07.077926 | 2020-10-20T11:45:58 | 2020-10-20T11:45:58 | 258,950,438 | 8 | 1 | MIT | 2020-05-25T14:38:30 | 2020-04-26T05:48:07 | Jupyter Notebook | UTF-8 | Python | false | false | 4,277 | py | import argparse
import cv2
from inference import Network
INPUT_STREAM = "test_video.mp4"
CPU_EXTENSION = "/opt/intel/openvino/deployment_tools/inference_engine/lib/intel64/libcpu_extension_sse4.so"
def get_args():
"""
Gets the arguments from the command line.
"""
parser = argparse.ArgumentParser("Run inference on an input video")
# -- Create the descriptions for the commands
m_desc = "The location of the model XML file"
i_desc = "The location of the input file"
d_desc = "The device name, if not 'CPU'"
### TODO: Add additional arguments and descriptions for:
### 1) Different confidence thresholds used to draw bounding boxes
### 2) The user choosing the color of the bounding boxes
c_desc = "The color of the bounding boxes to draw; RED, GREEN or BLUE"
ct_desc = "The confidence threshold to use with the bounding boxes"
# -- Add required and optional groups
parser._action_groups.pop()
required = parser.add_argument_group("required arguments")
optional = parser.add_argument_group("optional arguments")
# -- Create the arguments
required.add_argument("-m", help=m_desc, required=True)
optional.add_argument("-i", help=i_desc, default=INPUT_STREAM)
optional.add_argument("-d", help=d_desc, default="CPU")
optional.add_argument("-c", help=c_desc, default="BLUE")
optional.add_argument("-ct", help=ct_desc, default=0.5)
args = parser.parse_args()
return args
def convert_color(color_string):
"""
Get the BGR value of the desired bounding box color.
Defaults to Blue if an invalid color is given.
"""
colors = {"BLUE": (255, 0, 0), "GREEN": (0, 255, 0), "RED": (0, 0, 255)}
out_color = colors.get(color_string)
if out_color:
return out_color
else:
return colors["BLUE"]
def draw_boxes(frame, result, args, width, height):
"""
Draw bounding boxes onto the frame.
"""
for box in result[0][0]: # Output shape is 1x1x100x7
conf = box[2]
if conf >= args.ct:
xmin = int(box[3] * width)
ymin = int(box[4] * height)
xmax = int(box[5] * width)
ymax = int(box[6] * height)
cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), args.c, 1)
return frame
def infer_on_video(args):
# Convert the args for color and confidence
args.c = convert_color(args.c)
args.ct = float(args.ct)
### TODO: Initialize the Inference Engine
plugin = Network()
### TODO: Load the network model into the IE
plugin.load_model(args.m, args.d, CPU_EXTENSION)
net_input_shape = plugin.get_input_shape()
# Get and open video capture
cap = cv2.VideoCapture(args.i)
cap.open(args.i)
# Grab the shape of the input
width = int(cap.get(3))
height = int(cap.get(4))
# Create a video writer for the output video
# The second argument should be `cv2.VideoWriter_fourcc('M','J','P','G')`
# on Mac, and `0x00000021` on Linux
out = cv2.VideoWriter("out.mp4", 0x00000021, 30, (width, height))
# Process frames until the video ends, or process is exited
while cap.isOpened():
# Read the next frame
flag, frame = cap.read()
if not flag:
break
key_pressed = cv2.waitKey(60)
### TODO: Pre-process the frame
p_frame = cv2.resize(frame, (net_input_shape[3], net_input_shape[2]))
p_frame = p_frame.transpose((2, 0, 1))
p_frame = p_frame.reshape(1, *p_frame.shape)
### TODO: Perform inference on the frame
plugin.async_inference(p_frame)
### TODO: Get the output of inference
if plugin.wait() == 0:
result = plugin.extract_output()
### TODO: Update the frame to include detected bounding boxes
frame = draw_boxes(frame, result, args, width, height)
# Write out the frame
out.write(frame)
# Break if escape key pressed
if key_pressed == 27:
break
# Release the out writer, capture, and destroy any OpenCV windows
out.release()
cap.release()
cv2.destroyAllWindows()
def main():
args = get_args()
infer_on_video(args)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b62f7a5b91d0ff73e7acfc2c65e782f77896a901 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02989/s015774618.py | dc2669e094988c96002baa64ddc78876d7e41529 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | N = int(input())
d = list(map(int,input().split()))
d.sort()
dmed1 = N//2 - 1
dmed2 = N//2
ans = d[dmed2]-d[dmed1]
print(ans) | [
"[email protected]"
] | |
6f195985f978c28718426a751fc71c876b85bbff | 5d38ca0abaa29063d1db7bf398760cf57536803f | /jobplus/handlers/__init__.py | e8e1b9235deb329442abc9f29722747c75902aa8 | [] | no_license | LouPlus/jobplus4-7 | a1fece372dcad7b4edd9ea56445f562134b01da3 | 3b1c9b1db76b77eb28aadd4d06b229a5215ef5f6 | refs/heads/master | 2021-05-03T16:40:17.334850 | 2018-02-27T14:04:13 | 2018-02-28T03:24:16 | 120,439,974 | 1 | 10 | null | 2018-02-28T03:24:17 | 2018-02-06T10:39:56 | HTML | UTF-8 | Python | false | false | 123 | py | from .front import front
from .company import company
from .user import user
from .admin import admin
from .job import job
| [
"[email protected]"
] | |
3cd47a7d7dd674e5aa584bc0ac1049be6c8bdf48 | b01646abacbef23719926477e9e1dfb42ac0f6a9 | /Rebrov/training/673K/673K_O088N12_Rebrov_lib_and_all_families/input.py | d9cd3c9eddcac08e009b6e6733f837aefa7982fc | [] | no_license | Tingchenlee/Test | 41b0fd782f4f611d2b93fda6b63e70956881db33 | 37313c3f594f94cdc64c35e17afed4ae32d3e4e6 | refs/heads/master | 2023-06-02T05:38:32.884356 | 2021-06-10T11:59:02 | 2021-06-10T11:59:02 | 349,764,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,308 | py | # Microkinetic model for ammonia oxidation
# E.V. Rebrov, M.H.J.M. de Croon, J.C. Schouten
# Development of the kinetic model of platinum catalyzed ammonia oxidation in a microreactor
# Chemical Engineering Journal 90 (2002) 61–76
database(
thermoLibraries=['surfaceThermoPt111', 'primaryThermoLibrary', 'thermo_DFT_CCSDTF12_BAC','DFT_QCI_thermo', 'GRI-Mech3.0-N', 'NitrogenCurran', 'primaryNS', 'CHON'],
reactionLibraries = ['Surface/CPOX_Pt/Deutschmann2006','Surface/Nitrogen','Surface/Rebrov_Pt111'],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = ['surface','default'],
kineticsEstimator = 'rate rules',
)
catalystProperties(
metal = 'Pt111'
)
generatedSpeciesConstraints(
allowed=['input species','seed mechanisms','reaction libraries'],
maximumNitrogenAtoms=2,
maximumOxygenAtoms=3,
)
# List of species
species(
label='X',
reactive=True,
structure=adjacencyList("1 X u0"),
)
species(
label='O2',
reactive=True,
structure=adjacencyList(
"""
multiplicity 3
1 O u1 p2 c0 {2,S}
2 O u1 p2 c0 {1,S}
"""),
)
species(
label='H2O',
reactive=True,
structure=SMILES("O"),
)
species(
label='N2',
reactive=True,
structure=SMILES("N#N"),
)
species(
label='NO',
reactive=True,
structure=adjacencyList(
"""
multiplicity 2
1 N u1 p1 c0 {2,D}
2 O u0 p2 c0 {1,D}
"""),
)
species(
label='NH3',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p1 c0 {2,S} {3,S} {4,S}
2 H u0 p0 c0 {1,S}
3 H u0 p0 c0 {1,S}
4 H u0 p0 c0 {1,S}
"""),
)
species(
label='N2O',
reactive=True,
structure=adjacencyList(
"""
1 N u0 p2 c-1 {2,D}
2 N u0 p0 c+1 {1,D} {3,D}
3 O u0 p2 c0 {2,D}
"""),
)
species(
label='He',
reactive=False,
structure=adjacencyList(
"""
1 He u0 p1 c0
"""),
)
#-------------
#temperature from 523-673K
surfaceReactor(
temperature=(673,'K'),
initialPressure=(1.0, 'bar'),
nSims=12,
initialGasMoleFractions={
"NH3": 0.12,
"O2": 0.88,
"He": 0.0,
"NO":0.0,
"H2O":0.0,
"N2O":0.0,
"N2":0.0,
},
initialSurfaceCoverages={
"X": 1.0,
},
surfaceVolumeRatio=(2.8571428e4, 'm^-1'), #A/V = 280µm*π*9mm/140µm*140µm*π*9mm = 2.8571428e4^m-1
terminationConversion = {"NH3":0.99,},
#terminationTime=(10, 's'),
)
simulator( #default for surface reaction atol=1e-18,rtol=1e-12
atol=1e-18, #absolute tolerance are 1e-15 to 1e-25
rtol=1e-12, #relative tolerance is usually 1e-4 to 1e-8
)
model(
toleranceKeepInEdge=0.01, #recommend setting toleranceKeepInEdge to not be larger than 10% of toleranceMoveToCore
toleranceMoveToCore=0.1,
toleranceInterruptSimulation=1e8, #This value should be set to be equal to toleranceMoveToCore unless the advanced pruning feature is desired
#to always enable pruning should be set as a high value, e.g. 1e8
maximumEdgeSpecies=5000, #set up less than 200000
minCoreSizeForPrune=50, #default value
#toleranceThermoKeepSpeciesInEdge=0.5,
minSpeciesExistIterationsForPrune=2, #default value = 2 iteration
)
options(
units='si',
saveRestartPeriod=None,
generateOutputHTML=True,
generatePlots=True,
saveEdgeSpecies=True,
saveSimulationProfiles=True,
) | [
"[email protected]"
] | |
3071ce5a9c9fdebf28175ab07ecb3091a84ba165 | edfa045d12b8efb65de20261ff80a86160298e44 | /checkout/migrations/0003_orderitem_order.py | 97c09cc1a4f31557128d3510eed3d113a9d9b7d2 | [
"MIT"
] | permissive | yusif763/Unistore-pro | 1d559a89bb71f3db8b5d1e89df64ed7113f00f2a | 41ad0fa209c79a201d3f6a7aa68ec0ace707dcad | refs/heads/main | 2023-04-24T02:50:30.085011 | 2021-04-29T11:00:11 | 2021-04-29T11:00:11 | 362,782,688 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # Generated by Django 3.1.7 on 2021-04-13 09:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('checkout', '0002_auto_20210413_0746'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='order',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='orderwish', to='checkout.checkout', verbose_name='CheckOut'),
),
]
| [
"[email protected]"
] | |
631abf0d4fadcce442a27b89555a1f1cde35aa63 | a2e638cd0c124254e67963bda62c21351881ee75 | /Python modules/CBFETR_Category_Mapping.py | 28fa1fa72b9c7462a15142087f46f485831e5223 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,583 | py | '''----------------------------------------------------------------------------------------------------------
MODULE : CBFETR_Category_Mapping
PROJECT : Cross Border Foreign Exchange Transaction Reporting
PURPOSE : This module contains the detailed money flow category mapping based on money flow type
and instrument type.
DEPARTMENT AND DESK : Operations
REQUASTER : CBFETR Project
DEVELOPER : Heinrich Cronje
CR NUMBER : 235281
-------------------------------------------------------------------------------------------------------------
HISTORY
=============================================================================================================
Date Change no Developer Description
-------------------------------------------------------------------------------------------------------------
2012-02-22 235281 Heinrich Cronje Initial Implementation
2013-08-17 CHNG0001209844 Heinrich Cronje BOPCUS 3 Upgrade
2015-12-04 MINT-456 Melusi Maseko Added secondary category mapping is to cater for enumerations
that are greater than 61 as stated in AEF Browser
for "Enumeration enum(SettlementCashFlowType)"
-------------------------------------------------------------------------------------------------------------
DESCRIPTION OF MODULE:
Rows are the Money Flow Types with SettlementCashFlowType Enums.
Columns are the Instrument Type Enums.
Below are the Instrument Type and Enum breakdown.
CATEGORY_MAPPING
Instrument_Type Enum Instrument_Type Enum Instrument_Type Enum Instrument_Type Enum Instrument_Type Enum Instrument_Type Enum Instrument_Type Enum
Stock 1 Bill 11 Curr 21 Collateral 31 DualCurrBond 41 CFD 51 Commodity Variant 61
StockRight 2 CD 12 EquityIndex 22 SecurityLoan 32 MBS/ABS 42 VarianceSwap 52
Future/Forward 3 Blank 13 BondIndex 23 Repo/Reverse 33 UnKnown 43 Fund 53
Option 4 Deposit 14 RateIndex 24 BuySellback 34 CLN 44 Depositary Receipt 54
Warrant 5 FRA 15 Convertible 25 PriceIndex 35 CallAccount 45 FXOptionDatedFwd 55
LEPO 6 Swap 16 MultiOption 26 IndexLinkedBond 36 CashCollateral 46 Portfolio Swap 56
Bond 7 CurrSwap 17 MultiAsset 27 TotalReturnSwap 37 BasketRepo/Reverse 47 ETF 57
FRN 8 Cap 18 Combination 28 CreditDefaultSwap 38 CreditIndex 48 Fx Rate 58
PromisLoan 9 Floor 19 FreeDefCF 29 EquitySwap 39 IndexLinkedSwap 49 PriceSwap 59
Zero 10 Collar 20 FxSwap 30 Commodity 40 BasketSecurityLoan 50 Commodity Index 60
CATEGORY_MAPPING_2
Instrument_Type Enum
Commitment Fee 1
'''
CATEGORY_MAPPING = [['None 0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', 'None 0'],
['Premium 1', '601', '', '702', '701', '703', '', '603', '603', '', '603', '602', '', '', '', '275', '275', '275', '275', '275', '', '', '601', '', '', '603', '', '', '', '', '', '', '603', '603', '603', '', '603', '275', '', '', '', '603', '', '', '603', '', '', '603', '', '275', '', '', '275', '', '', '', '', '601', '', '', '', '', 'Premium 1'],
['Dividend 2', '301', '', '301', '', '301', '', '301', '', '', '', '301', '301', '', '301', '301', '301', '301', '301', '301', '', '', '301', '', '301', '301', '', '', '301', '301', '', '', '301', '301', '301', '', '', '301', '', '', '', '', '', '', '301', '', '', '301', '', '301', '', '301', '301', '', '', '301', '301', '301', '', '', '', '', 'Dividend 2'],
['Payment Premium 3', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Payment Premium 3'],
['Assignment Fee 4', '275', '', '275', '275', '275', '', '275', '275', '', '275', '275', '275', '', '275', '275', '275', '275', '275', '275', '', '275', '275', '', '275', '275', '', '', '275', '275', '', '', '275', '275', '275', '', '275', '275', '275', '', '', '275', '', '', '275', '', '', '275', '', '275', '', '275', '275', '', '', '', '275', '275', '', '', '', '', 'Assignment Fee 4'],
['Broker Fee 5', '307', '', '307', '307', '307', '', '307', '307', '', '307', '307', '307', '', '307', '307', '307', '307', '307', '307', '', '307', '307', '', '307', '307', '', '', '307', '307', '', '', '307', '307', '307', '', '307', '307', '307', '', '', '307', '', '', '307', '', '', '307', '', '307', '', '307', '307', '', '', '', '307', '275', '', '', '', '', 'Broker Fee 5'],
['Internal Fee 6', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '275', '', '', '', '', 'Internal Fee 6'],
['Extension Fee 7', '309', '', '309', '', '309', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '', '', '309', '275', '', '', '', '', 'Extension Fee 7'],
['Termination Fee 8', '275', '', '275', '275', '275', '', '275', '275', '', '275', '275', '275', '', '275', '275', '275', '275', '275', '275', '', '275', '275', '', '275', '275', '', '', '275', '275', '', '', '275', '275', '275', '', '275', '275', '275', '', '', '275', '', '', '275', '', '', '275', '', '275', '', '275', '275', '', '', '275', '275', '275', '', '', '', '', 'Termination Fee 8'],
['Payment Cash 9', '601', '', '275', '701', '275', '', '275', '275', '', '275', '', '', '', '', '', '', '', '', '', '', '', '601', '', '', '275', '', '', '', '', '', '', '275', '275', '275', '', '275', '', '', '', '', '', '', '', '275', '', '', '275', '', '', '', '', '', '', '', '', '', '275', '', '', '', '', 'Payment Cash 9'],
['Fixed Amount 10', '601', '', '702', '', '703', '', '603', '603', '', '603', '602', '602', '', '602', '', '', '', '', '', '', '103', '601', '', '', '603', '', '', '', '', '', '', '603', '603', '603', '', '603', '', '', '', '', '603', '', '', '603', '', '', '603', '', '', '', '', '', '', '', '', '', '601', '', '', '', '', 'Fixed Amount 10'],
['Fixed Rate 11', '309', '', '309', '', '309', '', '309', '309', '', '309', '', '309', '', '309', '', '', '', '', '', '', '', '309', '', '', '309', '', '', '', '', '', '', '309', '309', '309', '', '309', '', '309', '', '', '309', '', '', '309', '', '', '309', '', '', '', '309', '', '', '', '', '309', '309', '', '', '', '', 'Fixed Rate 11'],
['Float Rate 12', '309', '', '309', '', '309', '', '309', '309', '', '309', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '309', '', '', '', '', '', '', '309', '309', '309', '', '309', '', '309', '', '', '309', '', '', '309', '', '', '309', '', '', '', '309', '', '', '', '', '309', '309', '', '', '', '', 'Float Rate 12'],
['Caplet 13', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Caplet 13'],
['Floorlet 14', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Floorlet 14'],
['Digital Caplet 15', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Digital Caplet 15'],
['Digital Floorlet 16', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Digital Floorlet 16'],
['Total Return 17', '601', '', '702', '', '703', '', '603', '603', '', '603', '', '', '', '', '', '', '', '', '', '', '', '601', '', '', '603', '', '', '', '', '', '', '603', '603', '603', '', '603', '', '', '', '', '603', '', '', '603', '', '', '603', '', '', '', '', '', '', '', '', '', '601', '', '', '', '', 'Total Return 17'],
['Credit Default 18', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Credit Default 18'],
['Call Fixed Rate 19', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Call Fixed Rate 19'],
['Call Float Rate 20', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Call Float Rate 20'],
['Redemption Amount 21', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Redemption Amount 21'],
['Zero Coupon Fixed 22', '601', '', '702', '', '703', '', '603', '603', '', '603', '', '', '', '', '', '', '', '', '', '', '', '601', '', '', '603', '', '', '', '', '', '', '603', '603', '603', '', '603', '', '', '', '', '603', '', '', '603', '', '', '603', '', '', '', '', '', '', '', '', '', '601', '', '', '', '', 'Zero Coupon Fixed 22'],
['Return 23', '601', '', '702', '', '703', '', '603', '603', '', '603', '', '', '', '', '', '', '', '', '', '', '', '601', '', '', '603', '', '', '', '', '', '', '603', '603', '603', '', '603', '', '', '', '', '603', '', '', '603', '', '', '603', '', '', '', '', '', '', '', '', '', '601' '', '', '', '', 'Return 23'],
['Exercise Cash 24', '309', '', '309', '', '309', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '', '', '', '', '', '', '', '', '603', '603', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '', '', '309', '309', '', '', '', '', 'Exercise Cash 24'],
['Security Nominal 25', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Security Nominal 25'],
['Stand Alone Payment 26', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Stand Alone Payment 26'],
['Fee 27', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Fee 27'],
['End Cash 28', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'End Cash 28'],
['Initial Margin 29', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Initial Margin 29'],
['Variation Margin 30', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Variation Margin 30'],
['Premium 2 31', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Premium 2 31'],
['Coupon 32', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Coupon 32'],
['Coupon transfer 33', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Coupon transfer 33'],
['End Security 34', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'End Security 34'],
['Aggregate Security 35', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Aggregate Security 35'],
['Aggregate Cash 36', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Aggregate Cash 36'],
['Fixed Rate Adjustable 37', '309', '', '309', '', '309', '', '309', '309', '', '309', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '309', '', '', '', '', '', '', '309', '309', '309', '', '309', '', '309', '', '', '309', '', '', '309', '', '', '309', '', '', '', '309', '', '', '', '', '309', '309', '', '', '', '', 'Fixed Rate Adjustable 37'],
['Cashflow Dividend 38', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Cashflow Dividend 38'],
['Redemption 39', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Redemption 39'],
['Payout 40', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Payout 40'],
['Dividend Transfer 41', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Dividend Transfer 41'],
['Call Fixed Rate Adjustable 42', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Call Fixed Rate Adjustable 42'],
['Interest Reinvestment 43', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Interest Reinvestment 43'],
['Fill Fee 44', '309', '', '309', '', '309', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '', '', '309', '275', '', '', '', '', 'Fill Fee 44'],
['Commission 45', '307', '', '307', '307', '307', '', '307', '307', '', '307', '307', '307', '', '307', '307', '307', '307', '307', '307', '', '307', '307', '', '307', '307', '', '', '307', '307', '', '', '307', '307', '307', '', '307', '307', '307', '', '', '307', '', '', '307', '', '', '307', '', '307', '', '307', '307', '', '', '', '307', '275', '', '', '', '', 'Commission 45'],
['Allocation Fee 46', '275', '', '275', '275', '275', '', '275', '275', '', '275', '275', '275', '', '275', '275', '275', '275', '275', '275', '', '275', '275', '', '275', '275', '', '', '275', '275', '', '', '275', '275', '275', '', '275', '275', '275', '', '', '275', '', '', '275', '', '', '275', '', '275', '', '275', '275', '', '', '', '275', '275', '', '', '', '', 'Allocation Fee 46'],
['Interest Accrued 47', '309', '', '309', '309', '309', '', '309', '309', '', '309', '309', '309', '', '309', '', '', '', '', '', '', '309', '309', '', '309', '309', '', '', '309', '309', '', '', '309', '309', '309', '', '309', '', '309', '', '', '309', '', '', '309', '', '', '309', '', '', '', '309', '', '', '', '', '309', '309', '', '', '', '', 'Interest Accrued 47'],
['Fixed Rate Accretive 48', '309', '', '309', '', '309', '', '309', '309', '', '309', '', '', '', '', '', '', '', '', '', '', '', '309', '', '', '309', '', '', '', '', '', '', '309', '309', '309', '', '309', '', '309', '', '', '309', '', '', '309', '', '', '309', '', '', '', '309', '', '', '', '', '309', '309', '', '', '', '', 'Fixed Rate Accretive 48'],
['Position Total Return 49', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Position Total Return 49'],
['Recovery 50', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Recovery 50'],
['Fixed Price 51', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Fixed Price 51'],
['Float Price 52', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Float Price 52']
]
#Added secondary category mapping is to cater for enumerations that are greater than 61 as stated in AEF Browser for "Enumeration enum(SettlementCashFlowType)"
CATEGORY_MAPPING_2 = [
['None 0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '60', '61', 'None 0'],
['Commitment Fee 4300', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', 'Commitment Fee 4300']
]
def get_Category_Mapping(e_num):
if e_num >= 1 and e_num <= 52:
return CATEGORY_MAPPING[e_num]
elif e_num == 4300: #Commitment Fee
return CATEGORY_MAPPING_2[1]
'''
INSTRUMENT_PRODUCT_MAPPING = {
'BasketRepo/Reverse' : 'Bond',
'BasketSecurityLoan' : 'Shares',
'Bill' : 'Money',
'Bond' : 'Bond',
'BondIndex' : 'Bond',
'BuySellback' : 'Bond',
'CallAccount' : 'None',
'CashCollateral' : 'Money',
'Cap' : 'Interest/Loan',
'CD' : 'Money',
'CFD' : 'Shares',
'CLN' : 'N/A',
'Collateral' : 'Money',
'Combination' : 'Option or as Underlying',
'Collar' : 'None',
'Commodity' : 'Commodity (Agri)',
'Commodity Index' : 'None',
'Commodity Variant' : 'None',
'Convertible' : 'N/A',
'CreditDefaultSwap' : 'Bond or Credit',
'CreditIndex' : 'Bond or Credit',
'Curr' : 'FX',
'CurrSwap' : 'Interest/Loan',
'Deposit' : 'Money',
'Depositary Receipt' : 'Money',
'DualCurrBond' : 'Bond',
'EquityIndex' : 'Shares',
'EquitySwap' : 'Shares',
'ETF' : 'Shares or as Underlying',
'Floor' : 'Interest/Loan',
'FRA' : 'Interest/Loan',
'FreeDefCF' : 'N/A',
'FRN' : 'Bond',
'Fund' : 'Money',
'Future/Forward' : 'Shares or Commodities',
'Fx Rate' : 'N/A',
'FXOptionDatedFwd' : 'Option or as Underlying',
'FxSwap' : 'FX',
'IndexLinkedBond' : 'Bond',
'IndexLinkedSwap' : 'Interest/Loan',
'LEPO' : 'None',
'MBS/ABS' : 'Credit or Mortgages',
'MultiAsset' : 'None',
'MultiOption' : 'None',
'Option' : 'Option or as Underlying',
'Portfolio Swap' : 'Shares',
'PriceIndex' : 'Shares',
'PriceSwap' : 'None',
'PromisLoan' : 'Money',
'RateIndex' : 'Interest/Loan',
'Repo/Reverse' : 'Bond',
'SecurityLoan' : 'Shares',
'Stock' : 'Shares',
'StockRight' : 'None',
'Swap' : 'Interest/Loan',
'TotalReturnSwap' : 'Shares/Inward Listing',
'VarianceSwap' : 'Shares',
'Warrant' : 'Shares',
'Zero' : 'Bond'}
PRODUCT_CODE = {
'Bond' : '1',
'Bond or Credit' : '2',
'Commodity (Agri)' : '3',
'Credit or Mortgages' : '4',
'FX' : '5',
'Interest/Loan' : '6',
'Money' : '7',
'N/A' : '8',
'Option or as Underlying' : '9',
'Shares' : '10',
'Shares or as Underlying' : '11',
'Shares or Commodities' : '12',
'Shares/Inward Listing' : '13'
}
'''
| [
"[email protected]"
] | |
4936d190287dd5249daf34313e71e03c891daab6 | fb5d9f9b4ae3d7059d582ebb390916c2f9528852 | /util/__init__.py | 231dd87f66aa7d5f32d08714cbb4ea33acfaa90f | [] | no_license | tianxiaguixin002/Code-Implementation-of-Super-Resolution-ZOO | 32d4168f4d8d031968b7a601cf68b50730b15b06 | f6ccf309c7653a27173de5184d17bb5933baab14 | refs/heads/master | 2022-11-13T17:09:11.484532 | 2020-07-06T01:51:25 | 2020-07-06T01:51:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,476 | py | """This package includes a miscellaneous collection of useful helper functions."""
import os
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
def remove_pad_for_tensor(tensor, HR_GT_h_w, factor, LR_flag=True):
assert len(tensor.shape) == 4
_, _, now_h, now_w = tensor.shape
des_h, des_w = HR_GT_h_w
assert des_h % factor == 0 and des_w % factor == 0
if LR_flag:
des_h = des_h // factor
des_w = des_w // factor
assert now_h >= des_h and now_w >= des_w
delta_h = now_h - des_h
delta_w = now_w - des_w
if LR_flag:
start_h = delta_h // 2
start_w = delta_w // 2
return tensor[..., start_h: start_h + des_h, start_w: start_w + des_w]
else:
assert delta_w % factor == 0 and delta_h % factor == 0
delta_h = delta_h // factor
delta_w = delta_w // factor
start_h = delta_h // 2
start_w = delta_w // 2
return tensor[..., start_h*factor: start_h*factor + des_h, start_w*factor: start_w*factor + des_w]
| [
"[email protected]"
] | |
38e065c61bb431a8fc4dd5b8d0a8130d39cb9dfd | 4ddf4fa6a4a499d64b23fb99d70a7bb3802fd1b0 | /exercises/flask_regression.py | 4fd8a87deed5370e70ec83805bf14ac80fa11aac | [] | no_license | biterbilen/MVML | 2b318b3883c00ed1908ef75924077e3aab639094 | 76a79ded26d09452234b7ae2b4809e47aa93df70 | refs/heads/master | 2023-01-13T10:04:10.269589 | 2020-11-16T18:55:19 | 2020-11-16T18:55:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | import pickle
from flask import Flask, request, render_template
import pandas as pd
app = Flask(__name__)
with open('exercises/model.pkl', 'rb') as f:
model = pickle.load(f)
@app.route("/")
def index():
pass
@app.route("/result", methods=["POST"])
def predict():
new = pd.DataFrame({'X': [20]})
y = float(model.predict(new)[0])
return pass
| [
"[email protected]"
] | |
686a68dd2426c857f28b7069b29021b4b28d8624 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_monsters.py | e7f2bb60af124f30bf240705f5e9a379d84687ad | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.verbs._monster import _MONSTER
#calss header
class _MONSTERS(_MONSTER, ):
def __init__(self,):
_MONSTER.__init__(self)
self.name = "MONSTERS"
self.specie = 'verbs'
self.basic = "monster"
self.jsondata = {}
| [
"[email protected]"
] | |
5cc5c7291b31f104c7c5edb376c186183d3c4f40 | efe8b839832350774ed563d3f18359d1df1b046a | /tests/test_documents.py | de20e541db7519fe2206b402d35d178eae7a8f7e | [] | no_license | torchbox/wagtailapi | ae3bd64beb4381e0a481e389962b7e2ca91bc269 | 0b8cf3e9beb1e66c26f4af1519a752b69f6b7471 | refs/heads/master | 2021-01-01T19:30:21.505800 | 2015-08-11T11:59:55 | 2015-08-11T11:59:55 | 29,594,101 | 8 | 6 | null | 2015-08-11T11:59:23 | 2015-01-21T14:45:16 | Python | UTF-8 | Python | false | false | 14,688 | py | import json
import unittest
import mock
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from wagtail.wagtaildocs.models import Document
from wagtailapi import signal_handlers
from . import models
class TestDocumentListing(TestCase):
fixtures = ['wagtailapi_tests.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v1_documents:listing'), params)
def get_document_id_list(self, content):
return [page['id'] for page in content['documents']]
# BASIC TESTS
def test_status_code(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
def test_content_type_header(self):
response = self.get_response()
self.assertEqual(response['Content-type'], 'application/json')
def test_valid_json(self):
response = self.get_response()
# Will crash if there's a problem
json.loads(response.content.decode('UTF-8'))
def test_meta_section_is_present(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
def test_total_count_is_present(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
def test_documents_section_is_present(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('documents', content)
self.assertIsInstance(content['documents'], list)
def test_total_count(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], Document.objects.count())
# EXTRA FIELDS
def test_extra_fields_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
for document in content['documents']:
self.assertEqual(document.keys(), set(['id', 'title']))
def test_extra_fields(self):
response = self.get_response(fields='title,tags')
content = json.loads(response.content.decode('UTF-8'))
for document in content['documents']:
self.assertEqual(document.keys(), set(['id', 'title', 'tags']))
def test_extra_fields_tags(self):
response = self.get_response(fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for document in content['documents']:
self.assertIsInstance(document['tags'], list)
def test_extra_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='uploaded_by_user')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: uploaded_by_user"})
def test_extra_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='James Joyce')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [2])
def test_filtering_tags(self):
Document.objects.get(id=3).tags.add('test')
response = self.get_response(tags='test')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [3])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
# ORDERING
def test_ordering_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [3, 12, 10, 2, 7, 9, 8, 4, 1, 5, 11, 6])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list, [6, 11, 5, 1, 4, 8, 9, 7, 2, 10, 12, 3])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
document_id_list_1 = self.get_document_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
document_id_list_2 = self.get_document_id_list(content_2)
self.assertNotEqual(document_id_list_1, document_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_results_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['documents']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], Document.objects.count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['documents']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(document_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], Document.objects.count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_james_joyce(self):
response = self.get_response(search='james')
content = json.loads(response.content.decode('UTF-8'))
document_id_list = self.get_document_id_list(content)
self.assertEqual(set(document_id_list), set([2]))
def test_search_when_ordering_gives_error(self):
response = self.get_response(search='james', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ordering with a search query is not supported"})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='james')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(search='james', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
class TestDocumentDetail(TestCase):
fixtures = ['wagtailapi_tests.json']
def get_response(self, image_id, **params):
return self.client.get(reverse('wagtailapi_v1_documents:detail', args=(image_id, )), params)
def test_status_code(self):
response = self.get_response(1)
self.assertEqual(response.status_code, 200)
def test_content_type_header(self):
response = self.get_response(1)
self.assertEqual(response['Content-type'], 'application/json')
def test_valid_json(self):
response = self.get_response(1)
# Will crash if there's a problem
json.loads(response.content.decode('UTF-8'))
def test_id(self):
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], 1)
def test_meta(self):
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
def test_title(self):
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('title', content)
self.assertEqual(content['title'], "Wagtail by Mark Harkin")
def test_tags(self):
Document.objects.get(id=1).tags.add('hello')
Document.objects.get(id=1).tags.add('world')
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('tags', content)
self.assertEqual(content['tags'], ['hello', 'world'])
def test_download_url(self):
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('download_url', content['meta'])
self.assertEqual(content['meta']['download_url'], 'http://localhost/documents/1/wagtail_by_markyharky.jpg')
@override_settings(WAGTAILAPI_BASE_URL='http://api.example.com/')
def test_download_url_with_custom_base_url(self):
response = self.get_response(1)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('download_url', content['meta'])
self.assertEqual(content['meta']['download_url'], 'http://api.example.com/documents/1/wagtail_by_markyharky.jpg')
@override_settings(
INSTALLED_APPS=settings.INSTALLED_APPS + (
'wagtail.contrib.wagtailfrontendcache',
),
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend.purge')
class TestDocumentCacheInvalidation(TestCase):
fixtures = ['wagtailapi_tests.json']
@classmethod
def setUpClass(cls):
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
signal_handlers.unregister_signal_handlers()
def test_resave_document_purges(self, purge):
Document.objects.get(id=5).save()
purge.assert_any_call('http://api.example.com/api/v1/documents/5/')
def test_delete_document_purges(self, purge):
Document.objects.get(id=5).delete()
purge.assert_any_call('http://api.example.com/api/v1/documents/5/')
| [
"[email protected]"
] | |
90088be62e540370be33da0a6c2da6c4e57b429a | d3426a5d1bbecde0fe480e7af64a54bfdb8295eb | /students/migrations/0005_auto_20170802_1524.py | 24c508cf44b2fa93d7d609ad89d31875eb8a08a5 | [
"MIT"
] | permissive | pu6ki/elsyser | 5a3b83f25f236b4a4903180985f60ced98b3fb53 | 52261c93b58422b0e39cae656ae9409ea03a488d | refs/heads/master | 2021-01-12T18:06:18.375185 | 2017-12-10T18:18:34 | 2017-12-10T18:18:34 | 71,325,732 | 5 | 4 | MIT | 2017-12-10T18:18:35 | 2016-10-19T06:26:47 | Python | UTF-8 | Python | false | false | 739 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-08-02 12:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('students', '0004_auto_20170801_1243'),
]
operations = [
migrations.DeleteModel(
name='CustomUser',
),
migrations.AddField(
model_name='student',
name='activation_key',
field=models.CharField(blank=True, max_length=40, null=True),
),
migrations.AddField(
model_name='teacher',
name='activation_key',
field=models.CharField(blank=True, max_length=40, null=True),
),
]
| [
"[email protected]"
] | |
0d243a771485ed631550fd265ff1c7dd644c4b81 | d8af7c6372aff57012c80d3b8a9dfaab81499f71 | /AIDStudy/01-PythonBase/day07/exercise05.py | ab36bcf266a5024e045bea117f549cbbf81366f4 | [] | no_license | fanxiao168/pythonStudy | 4843c56019b8f997fd7fc566904a9e0162e9a541 | f94e2238d40c41ee54ff4184c500d659c6820c03 | refs/heads/master | 2021-02-04T20:54:10.850770 | 2020-05-28T08:55:35 | 2020-05-28T08:55:35 | 243,708,800 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 456 | py | # 3行5列的二维列表
list01 = [
[0, 1, 2, 3, 4],
[1, 28, 45, 6, 7],
[20, 7, 3, 65, 2]
]
# 将第二行元素打印出来
for item in list01[1]:
print(item, end=' ')
# 将第一列打印出来
print(list01[0][0])
print(list01[1][0])
print(list01[2][0])
for i in range(len(list01)):
print(list01[i][0])
# 将全部元素打印出来
for i in range(len(list01)):
for item in list01[i]:
print(item, end=' ')
print()
| [
"[email protected]"
] | |
ffaf6719d09a6304bc437aa6bcffe56cc27c6ecf | ed5629376d293b7dbda9f53ef1b57e38cd52d655 | /asciivalue.py | 82e9ca120867d894dfbe6162dab0195351cc8a12 | [] | no_license | sabariks/pythonpgm | eb46172b8ffd17b945f6ccd8241015c9874e37e7 | 1bb9264b6f219b69b9a782591d526fc7adb891cd | refs/heads/master | 2021-01-08T23:22:15.891688 | 2020-02-27T17:37:34 | 2020-02-27T17:37:34 | 242,174,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | s=input()
l=list(s)
sum=0
for i in range(0,len(s)):
val=ord(l[i])
sum+=val
print(sum)
| [
"[email protected]"
] | |
46077844615c08090f6de524a45ad3b9f9d1e776 | e415e4cdab3d1cd04a4aa587f7ddc59e71977972 | /datetime/datetime.timedelta.py | 77ed0bd2af1f40780439d18c8c5f04973b36cd10 | [] | no_license | nixawk/hello-python3 | 8c3ebba577b39f545d4a67f3da9b8bb6122d12ea | e0680eb49d260c5e3f06f9690c558f95a851f87c | refs/heads/master | 2022-03-31T23:02:30.225702 | 2019-12-02T10:15:55 | 2019-12-02T10:15:55 | 84,066,942 | 5 | 7 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
class datetime.timedelta
A duration expressing the difference between two date, time,
or datetime instances to microsecond resolution.
"""
import datetime
def datetime_timedelta():
# def __new__(cls, days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):
_timedelta = datetime.timedelta(
days=1,
seconds=0,
microseconds=0,
milliseconds=0,
minutes=0,
hours=0,
weeks=1
)
print(str(_timedelta)) # 8 days, 0:00:00
if __name__ == '__main__':
datetime_timedelta()
# reference
# https://docs.python.org/3/library/datetime.html
| [
"[email protected]"
] | |
b7020ef6386d6ad70f6a34fd08ff52d6e5aac54a | bf4f5e90fff95800f3ab944efcdb9aace29be71d | /banshee-master/api/mms/balance/mall_balance.py | 2f573caaf12c82cd5229034b31716e1636ce01a9 | [] | no_license | tanlu01/testfan | c77c833d4e164a4786f20d7f28ffbb99cd6dcb2e | de8cf936dc7e80a9e2847fa47ae5c909729675b1 | refs/heads/master | 2023-04-20T17:12:57.515484 | 2021-05-10T15:53:15 | 2021-05-10T15:53:15 | 365,418,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,918 | py | from api.mms.mms_ import Mms
class MallBalance(Mms):
method = 'post'
api = '/api/mall/Balance'
data = {}
error_resp = {
'code': 400000,
'message': '没有可以购买的商品'
}
expected_schema = {
"$schema": "http://json-schema.org/draft-06/schema#",
"title": "expected_data",
"type": "object",
"required": ["code", "payload"],
"properties": {
"code": {
"type": "number"
},
"payload": {
"type": "object",
"required": ["id", "mall_id", "goods_payment_income", "goods_payment_expend", "shop_deposit_cash", "activity_deposit_cash", "activity_subsidy", "created_at", "update_at", "default_shop_deposit_cash", "goods_payment_withdrawing", "shop_deposit_withdrawing", "activity_deposit_withdrawing", "activity_subsidy_withdrawing", "goods_payment_freeze", "is_open", "loan_withdraw_times", "remain_withdraw_times", "activation_status", "sub_mch_state", "address", "need_annual_fee", "has_factory_info"],
"properties": {
"id": {
"type": "string"
}, "mall_id": {
"type": "string"
}, "goods_payment_income": {
"type": "string"
}, "goods_payment_expend": {
"type": "string"
}, "shop_deposit_cash": {
"type": "string"
}, "activity_deposit_cash": {
"type": "string"
}, "activity_subsidy": {
"type": "string"
}, "created_at": {
"type": "string"
}, "update_at": {
"type": "string"
}, "default_shop_deposit_cash": {
"type": "string"
}, "goods_payment_withdrawing": {
"type": "string"
}, "shop_deposit_withdrawing": {
"type": "string"
}, "activity_deposit_withdrawing": {
"type": "string"
}, "activity_subsidy_withdrawing": {
"type": "string"
}, "goods_payment_freeze": {
"type": "string"
}, "is_open": {
"type": "string"
},
"punishment":{
"type": "object",
"required": [],
"properties": {}
},
"activity_forbidden":{
"type": "object",
"required": [],
"properties": {}
},
"loan_withdraw_times": {
"type": "number"
}, "remain_withdraw_times": {
"type": "number"
}, "activation_status": {
"type": "string"
}, "sub_mch_state": {
"type": "object",
"required": ["status", "info"],
"properties": {
"status": {
"type": "string"
},
"info": {
"type": "string"
},
}
}, "address": {
"type": "number"
}, "need_annual_fee": {
"type": "boolean"
}, "has_factory_info": {
"type": "boolean"
},
}
}
}
}
| [
"[email protected]"
] | |
eb7268205ffc0e037565f404d2dc6e35a518804e | 5cfc22491d6c83e807b21883ce71fdb958a2c53f | /identify/trend.py | 826bf058e30afc7807df0bd377da5c8348ad60e1 | [] | no_license | Jeffin-Studios/stocktool | ce29824bfa663927eac1b17dad3ed7830dbedf1f | 96f4e96feb4de9a54028e4b5ef56bd8554f122eb | refs/heads/master | 2020-03-28T03:27:27.271479 | 2018-10-18T08:03:08 | 2018-10-18T08:03:08 | 147,646,775 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,466 | py |
#rolling average for both stock and market
from pandas_datareader import data
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
class stocktrend():
# Initialization requires a ticker symbol
def __init__(self, stock_name, start_date = None, end_date = None, draw_graph = False):
self.name = stock_name.upper()
self.start = start_date
self.end = end_date
self.graph = draw_graph
self.stock = data.DataReader(stock_name, 'yahoo', start_date, end_date)
# Basic Historical Plots and Basic Statistics
def plot_stock(self, stats=[], series = [], serieslabels = [], xlabel="Date", ylabel="Price"):
fig, ax = plt.subplots(figsize=(16,9))
fig.suptitle(self.name, fontsize=20)
for stat in stats:
ax.plot(self.stock[stat].index, self.stock[stat], label=stat)
for i, data in enumerate(series):
ax.plot(data.index, data, label=serieslabels[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
plt.axhline(0, color='black')
plt.grid()
plt.show()
def rollingAverage(self, strict = False):
close_price = self.stock['Close']
if (strict):
# Getting all weekdays
all_weekdays = pd.date_range(start=self.start, end=self.end, freq='B')
close_price = close_price.reindex(all_weekdays)
close_price = close_price.fillna(method='ffill')
# weekly_roll
short_rolling_stock = close_price.rolling(window=5).mean()
medium_rolling_stock = close_price.rolling(window=20).mean()
long_rolling_stock = close_price.rolling(window=60).mean()
if (self.graph):
self.plot_stock(series=[close_price, short_rolling_stock, medium_rolling_stock, long_rolling_stock], serieslabels=["Closing Price", "5 days rolling", "20 days rolling", "60 days rolling"])
return (short_rolling_stock, medium_rolling_stock)
# Buy when this is at a zero or high positive slope
def daily_change(self):
if ('Adj. Close' not in self.stock.columns):
self.stock['Adj. Close'] = self.stock['Close']
self.stock['Adj. Open'] = self.stock['Open']
self.stock['y'] = self.stock['Adj. Close']
self.stock['Daily Change'] = self.stock['Adj. Close'] - self.stock['Adj. Open']
if (self.graph):
self.plot_stock(stats=['Daily Change'], ylabel="Change in Price")
def get_rsi(self, n=14):
prices = self.stock['Close']
dates = prices.index
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
for i in range(n, len(prices)):
delta = deltas[i-1] # cause the diff is 1 shorter
if delta>0:
upval = delta
downval = 0.
else:
upval = 0.
downval = -delta
up = (up*(n-1) + upval)/n
down = (down*(n-1) + downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
if (self.graph):
fig, ax = plt.subplots(figsize=(16,9))
fig.suptitle(self.name, fontsize=20)
ax.plot(dates, rsi, color = "purple", linewidth=1.5, label='RSI')
ax.axhline(70, color="red")
ax.axhline(30, color="green")
ax.fill_between(dates, rsi, 70, where=(rsi>=70), facecolor="red", edgecolor="red", alpha=0.5)
ax.fill_between(dates, rsi, 30, where=(rsi<=30), facecolor="green", edgecolor="green", alpha=0.5)
ax.set_yticks([30,70])
ax.legend()
ax.tick_params(axis='y')
ax.tick_params(axis='x')
ax.set_xlabel("Date")
ax.set_ylabel("Momentum")
ax.grid()
plt.show()
return rsi
######## Need to make this real time, to detect when climb starts and when dip starts
def fluctuation(self):
(short_rolling_stock, medium_rolling_stock) = self.rollingAverage()
self.stock["Fluctuation"] = short_rolling_stock - medium_rolling_stock
# Starts climbing when short term average passes long term average
# Starts dipping when short term average goes below long term average
### Code determines if change is at a zero, evaluates slope of the change, to see if its climbing or dipping, also concavity to make sure.
if (self.graph):
self.plot_stock(stats=['Fluctuation'], ylabel="Deviation From Average")
# return short_rolling_stock.std(ddof=0)
#How wildy stock prices fluctuate (look at daily changes)
def get_volatility(self):
return
if __name__ == "__main__":
start="2018-08-01"
end="2018-9-20"
tickers = ['CRON', 'ECOM', 'CLDR', 'HMI']
stock = stocktrend(tickers[3], start, end, draw_graph = True)
volatility = stock.fluctuation()
print(volatility)
stock.daily_change()
stock.get_rsi()
| [
"[email protected]"
] | |
0e361c2d25f1dc4f3d81ae0f99eff2116e37073d | eec0d71067c95a772e3fdeeadab242230cd2b31f | /mysite/settings.py | e3abb79a4fe1388ad0999909201edfd4ccbc3f9b | [] | no_license | amcmy/my-first-blog | d543ef051ab9decca1a5ff073c3bb40ef552f74a | 5bb9d7284633f31797e5cc83fcd3d14b9eed8960 | refs/heads/master | 2020-05-17T15:21:36.295933 | 2019-04-27T15:14:09 | 2019-04-27T15:14:09 | 183,788,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'p+#p6o^(-%rxrujd$8wda&+c%!9ejyotrr&)hc0mwd&j(iomen'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/London'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
8214a4f2f5d28141284cdcea9a1c77e386cbbf48 | cb4736c3db7288ca3868e319ace6c19364302d2f | /module_template/repeater.py | e453e33ab9530bcdf5872230c59496fffeb4f50f | [
"MIT"
] | permissive | rstms/repeat | 0928d9833463e5048071a2564916b1a2eb233fb6 | af9f1aa01cc0d5d5fd8e6ff688e7c84267b8600f | refs/heads/master | 2023-07-27T16:40:04.187344 | 2021-09-11T03:43:31 | 2021-09-11T03:43:31 | 399,602,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,784 | py | # -*- coding: utf-8 -*-
"""repeater object for repeat command line tool
This simple module contains a Repeater() class with a run() method that
does the work of the program.
Example:
To use the function open an input stream and an output stream in text
mode, then cal the run method specifying the number of times to repeat
the copy of the input.
To use it from Python code::
>>> form repeat import Repeater
>>> Repeater(sys.stdin, sys.stdout, 1).run()
The function operates in text mode and uses line buffering. It is designed
to be called by the click api.
"""
import io
import tempfile
import sys
from .exception import ParameterError
class Repeater():
"""Repeat stream input to output mutliple times"""
def __init__(self):
"""Context constructor"""
pass
def run(self, infile, outfile, count=1, prefix=False):
"""Copy stdin to stdout and a buffer, then output the buffer multiple times.
Arguments
infile (:obj: `File`): input file
outfile (:obj: `File`): output file
count (int): number of output repetitions
prefix (bool): begin output with line count
"""
if count < 0:
raise ParameterError('I refuse to repeat a negative number of times.')
# see if the stream is binary or text
if isinstance(infile, io.TextIOBase):
mode = 'w+'
else:
mode = 'wb+'
buf = tempfile.SpooledTemporaryFile(mode=mode)
for (length, line) in enumerate(infile):
buf.write(line)
if prefix:
outfile.write(f'{length+1}\n')
for _ in range(0, count):
buf.seek(0)
for line in buf:
outfile.write(line)
| [
"[email protected]"
] | |
b2740e61d2cce9cc90e86aa07c9fe0c08ffeec19 | 6b9084d234c87d7597f97ec95808e13f599bf9a1 | /evaluation/evaluator/got10k/experiments/otb.py | 5b7df3f72c4a16a1caf4f72adc7cc90aefd73e4b | [] | no_license | LitingLin/ubiquitous-happiness | 4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc | aae2d764e136ca4a36c054212b361dd7e8b22cba | refs/heads/main | 2023-07-13T19:51:32.227633 | 2021-08-03T16:02:03 | 2021-08-03T16:02:03 | 316,664,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,526 | py | from __future__ import absolute_import, division, print_function
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import json
from PIL import Image
from ..datasets import OTB
from ..utils.metrics import rect_iou, center_error
from ..utils.viz import show_frame
class ExperimentOTB(object):
r"""Experiment pipeline and evaluation toolkit for OTB dataset.
Args:
root_dir (string): Root directory of OTB dataset.
version (integer or string): Specify the benchmark version, specify as one of
``2013``, ``2015``, ``tb50`` and ``tb100``. Default is ``2015``.
result_dir (string, optional): Directory for storing tracking
results. Default is ``./results``.
report_dir (string, optional): Directory for storing performance
evaluation results. Default is ``./reports``.
"""
def __init__(self, root_dir, version=2015,
result_dir='results', report_dir='reports'):
super(ExperimentOTB, self).__init__()
self.dataset = OTB(root_dir, version)
self.result_dir = os.path.join(result_dir, 'OTB' + str(version))
self.report_dir = os.path.join(report_dir, 'OTB' + str(version))
# as nbins_iou increases, the success score
# converges to the average overlap (AO)
self.nbins_iou = 21
self.nbins_ce = 51
def run(self, tracker, visualize=False):
print('Running tracker %s on %s...' % (
tracker.name, type(self.dataset).__name__))
# loop over the complete dataset
for s, (img_files, anno) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
print('--Sequence %d/%d: %s' % (s + 1, len(self.dataset), seq_name))
# skip if results exist
record_file = os.path.join(
self.result_dir, tracker.name, '%s.txt' % seq_name)
if os.path.exists(record_file):
print(' Found results, skipping', seq_name)
continue
# tracking loop
boxes, times = tracker.track(
img_files, anno[0, :], visualize=visualize)
assert len(boxes) == len(anno)
# record results
self._record(record_file, boxes, times)
def report(self, tracker_names, plot_curves=True):
assert isinstance(tracker_names, (list, tuple))
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
if not os.path.isdir(report_dir):
os.makedirs(report_dir)
report_file = os.path.join(report_dir, 'performance.json')
performance = {}
for name in tracker_names:
print('Evaluating', name)
seq_num = len(self.dataset)
succ_curve = np.zeros((seq_num, self.nbins_iou))
prec_curve = np.zeros((seq_num, self.nbins_ce))
speeds = np.zeros(seq_num)
performance.update({name: {
'overall': {},
'seq_wise': {}}})
for s, (_, anno) in enumerate(self.dataset):
seq_name = self.dataset.seq_names[s]
record_file = os.path.join(
self.result_dir, name, '%s.txt' % seq_name)
boxes = np.loadtxt(record_file, delimiter=',')
boxes[0] = anno[0]
if not (len(boxes) == len(anno)):
print('warning: %s anno donnot match boxes'%seq_name)
len_min = min(len(boxes),len(anno))
boxes = boxes[:len_min]
anno = anno[:len_min]
assert len(boxes) == len(anno)
ious, center_errors = self._calc_metrics(boxes, anno)
succ_curve[s], prec_curve[s] = self._calc_curves(ious, center_errors)
# calculate average tracking speed
time_file = os.path.join(
self.result_dir, name, 'times/%s_time.txt' % seq_name)
if os.path.isfile(time_file):
times = np.loadtxt(time_file)
times = times[times > 0]
if len(times) > 0:
speeds[s] = np.mean(1. / times)
# store sequence-wise performance
performance[name]['seq_wise'].update({seq_name: {
'success_curve': succ_curve[s].tolist(),
'precision_curve': prec_curve[s].tolist(),
'success_score': np.mean(succ_curve[s]),
'precision_score': prec_curve[s][20],
'success_rate': succ_curve[s][self.nbins_iou // 2],
'speed_fps': speeds[s] if speeds[s] > 0 else -1}})
succ_curve = np.mean(succ_curve, axis=0)
prec_curve = np.mean(prec_curve, axis=0)
succ_score = np.mean(succ_curve)
prec_score = prec_curve[20]
succ_rate = succ_curve[self.nbins_iou // 2]
if np.count_nonzero(speeds) > 0:
avg_speed = np.sum(speeds) / np.count_nonzero(speeds)
else:
avg_speed = -1
# store overall performance
performance[name]['overall'].update({
'success_curve': succ_curve.tolist(),
'precision_curve': prec_curve.tolist(),
'success_score': succ_score,
'precision_score': prec_score,
'success_rate': succ_rate,
'speed_fps': avg_speed})
# report the performance
with open(report_file, 'w') as f:
json.dump(performance, f, indent=4)
# plot precision and success curves
if plot_curves:
self.plot_curves(tracker_names)
return performance
def show(self, tracker_names, seq_names=None, play_speed=1):
if seq_names is None:
seq_names = self.dataset.seq_names
elif isinstance(seq_names, str):
seq_names = [seq_names]
assert isinstance(tracker_names, (list, tuple))
assert isinstance(seq_names, (list, tuple))
play_speed = int(round(play_speed))
assert play_speed > 0
for s, seq_name in enumerate(seq_names):
print('[%d/%d] Showing results on %s...' % (
s + 1, len(seq_names), seq_name))
# load all tracking results
records = {}
for name in tracker_names:
record_file = os.path.join(
self.result_dir, name, '%s.txt' % seq_name)
records[name] = np.loadtxt(record_file, delimiter=',')
# loop over the sequence and display results
img_files, anno = self.dataset[seq_name]
for f, img_file in enumerate(img_files):
if not f % play_speed == 0:
continue
image = Image.open(img_file)
boxes = [anno[f]] + [
records[name][f] for name in tracker_names]
show_frame(image, boxes,
legends=['GroundTruth'] + tracker_names,
colors=['w', 'r', 'g', 'b', 'c', 'm', 'y',
'orange', 'purple', 'brown', 'pink'])
def _record(self, record_file, boxes, times):
# record bounding boxes
record_dir = os.path.dirname(record_file)
if not os.path.isdir(record_dir):
os.makedirs(record_dir)
np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
while not os.path.exists(record_file):
print('warning: recording failed, retrying...')
np.savetxt(record_file, boxes, fmt='%.3f', delimiter=',')
print(' Results recorded at', record_file)
# record running times
time_dir = os.path.join(record_dir, 'times')
if not os.path.isdir(time_dir):
os.makedirs(time_dir)
time_file = os.path.join(time_dir, os.path.basename(
record_file).replace('.txt', '_time.txt'))
np.savetxt(time_file, times, fmt='%.8f')
def _calc_metrics(self, boxes, anno):
# can be modified by children classes
ious = rect_iou(boxes, anno)
center_errors = center_error(boxes, anno)
return ious, center_errors
def _calc_curves(self, ious, center_errors):
ious = np.asarray(ious, float)[:, np.newaxis]
center_errors = np.asarray(center_errors, float)[:, np.newaxis]
thr_iou = np.linspace(0, 1, self.nbins_iou)[np.newaxis, :]
thr_ce = np.arange(0, self.nbins_ce)[np.newaxis, :]
bin_iou = np.greater(ious, thr_iou)
bin_ce = np.less_equal(center_errors, thr_ce)
succ_curve = np.mean(bin_iou, axis=0)
prec_curve = np.mean(bin_ce, axis=0)
return succ_curve, prec_curve
def plot_curves(self, tracker_names):
# assume tracker_names[0] is your tracker
report_dir = os.path.join(self.report_dir, tracker_names[0])
assert os.path.exists(report_dir), \
'No reports found. Run "report" first' \
'before plotting curves.'
report_file = os.path.join(report_dir, 'performance.json')
assert os.path.exists(report_file), \
'No reports found. Run "report" first' \
'before plotting curves.'
# load pre-computed performance
with open(report_file) as f:
performance = json.load(f)
succ_file = os.path.join(report_dir, 'success_plots.png')
prec_file = os.path.join(report_dir, 'precision_plots.png')
key = 'overall'
# markers
markers = ['-', '--', '-.']
markers = [c + m for m in markers for c in [''] * 10]
# sort trackers by success score
tracker_names = list(performance.keys())
succ = [t[key]['success_score'] for t in performance.values()]
inds = np.argsort(succ)[::-1]
tracker_names = [tracker_names[i] for i in inds]
# plot success curves
thr_iou = np.linspace(0, 1, self.nbins_iou)
fig, ax = plt.subplots()
lines = []
legends = []
for i, name in enumerate(tracker_names):
line, = ax.plot(thr_iou,
performance[name][key]['success_curve'],
markers[i % len(markers)])
lines.append(line)
legends.append('%s: [%.3f]' % (name, performance[name][key]['success_score']))
matplotlib.rcParams.update({'font.size': 7.4})
legend = ax.legend(lines, legends, loc='center left',
bbox_to_anchor=(1, 0.5))
matplotlib.rcParams.update({'font.size': 9})
ax.set(xlabel='Overlap threshold',
ylabel='Success rate',
xlim=(0, 1), ylim=(0, 1),
title='Success plots of OPE')
ax.grid(True)
fig.tight_layout()
print('Saving success plots to', succ_file)
fig.savefig(succ_file,
bbox_extra_artists=(legend,),
bbox_inches='tight',
dpi=300)
# sort trackers by precision score
tracker_names = list(performance.keys())
prec = [t[key]['precision_score'] for t in performance.values()]
inds = np.argsort(prec)[::-1]
tracker_names = [tracker_names[i] for i in inds]
# plot precision curves
thr_ce = np.arange(0, self.nbins_ce)
fig, ax = plt.subplots()
lines = []
legends = []
for i, name in enumerate(tracker_names):
line, = ax.plot(thr_ce,
performance[name][key]['precision_curve'],
markers[i % len(markers)])
lines.append(line)
legends.append('%s: [%.3f]' % (name, performance[name][key]['precision_score']))
matplotlib.rcParams.update({'font.size': 7.4})
legend = ax.legend(lines, legends, loc='center left',
bbox_to_anchor=(1, 0.5))
matplotlib.rcParams.update({'font.size': 9})
ax.set(xlabel='Location error threshold',
ylabel='Precision',
xlim=(0, thr_ce.max()), ylim=(0, 1),
title='Precision plots of OPE')
ax.grid(True)
fig.tight_layout()
print('Saving precision plots to', prec_file)
fig.savefig(prec_file, dpi=300)
| [
"[email protected]"
] | |
4f57c4e198a2dc78dc00b90d05162dd65d57d004 | ebd1d1bbaa0fe30590e2c8c0d19a9d7eff180320 | /arp/scripts/xtalk_preview.py | 7110e508cd826977ff0fc86692a60f78449baab7 | [] | no_license | HERA-Team/hera_sandbox | 3093009c21e13a79bf3914d64b521b8fbc4bc733 | f9d292f4a91c0599947e3c013b48114b2097d76d | refs/heads/master | 2021-11-26T16:54:38.665721 | 2021-10-05T19:41:40 | 2021-10-05T19:41:40 | 95,712,482 | 2 | 6 | null | 2017-07-10T19:46:22 | 2017-06-28T21:17:39 | Python | UTF-8 | Python | false | false | 1,100 | py | #! /usr/bin/env python
import aipy as a, capo as C, pylab as p, numpy as n
import sys
CH0,CH1 = 16,182
fqs = n.linspace(.1,.2,203)
jy2T = C.pspec.jy2T(fqs)
fqs = fqs[CH0:CH1]
aa = a.cal.get_aa('psa6240_v003', n.array([.15]))
t,dat,flg = C.arp.get_dict_of_uv_data(sys.argv[1:], 'cross', 'I')
window = a.dsp.gen_window(fqs.size,'blackman-harris')
norm = n.fft.ifft(window)[0]
tau = n.fft.fftfreq(fqs.size, fqs[1]-fqs[0])
tau = n.fft.fftshift(tau)
#for filename in sys.argv[1:]:
# t,dat,flg = C.arp.get_dict_of_uv_data([filename], 'cross', 'I')
for bl in dat:
i,j = a.miriad.bl2ij(bl)
print i,j
for pol in dat[bl]:
d = n.sum(dat[bl][pol], axis=0) * jy2T
if (aa[i] - aa[j])[1] < 0: d = d.conj()
w = n.sum(n.logical_not(flg[bl][pol]).astype(n.int), axis=0)
d = n.where(w[CH0:CH1] > 0, d[CH0:CH1]/w[CH0:CH1], 0)
w = n.where(w > 0, 1, 0)
p.subplot(121); p.plot(fqs, d)
_d = n.fft.ifft(window*d) / norm
_d = n.fft.fftshift(_d)
p.subplot(122); p.plot(tau, n.abs(_d))
p.xlabel('Delay [ns]')
p.ylabel('Power [mK]')
p.show()
| [
"[email protected]"
] | |
b3bb756dcc5fa36b2ee79947713b1d3b50e1fdda | 03dddfda1c488f08ae4b7914e83dd96c24f1e3d7 | /meadow_mall/meadow_mall/apps/oauth/utils.py | 25c69391d72c9ec60ab021d7c100769fbc532cd7 | [] | no_license | zizle/MeadowMall | 0223ed134baf164eb71358e0c0d3c7a0fbf58782 | d5cc05e71a666724726bc324c1eb4841b828b284 | refs/heads/master | 2020-03-21T00:14:47.853180 | 2018-07-11T00:38:55 | 2018-07-11T00:38:55 | 137,884,601 | 0 | 0 | null | 2018-07-11T00:38:56 | 2018-06-19T11:50:28 | HTML | UTF-8 | Python | false | false | 3,856 | py | # _*_ coding:utf-8 _*_
# QQ登录辅助
from django.conf import settings
from urllib.parse import urlencode, parse_qs
from urllib.request import urlopen
from itsdangerous import TimedJSONWebSignatureSerializer as TJWSSerializer, BadData
import logging
import json
from .exceptions import OAuthQQAPIError
from . import constants
logger = logging.getLogger('django')
class OAuthQQ(object):
"""QQ认证登录辅助"""
def __init__(self, client_id=None, client_secret=None, redirect_uri=None, state=None):
self.state = state or settings.QQ_STATE
self.client_id = client_id if client_id else settings.QQ_CLIENT_ID
self.client_secret = client_secret if client_secret else settings.QQ_CLIENT_SECRET
self.redirect_uri = redirect_uri if redirect_uri else settings.QQ_REDIRECT_URI
def get_qq_login_url(self):
"""获取QQ登录的url"""
params = {
'response_type': 'code',
'client_id': self.client_id,
'redirect_uri': self.redirect_uri,
'state': self.state,
'scope': 'get_user_info',
}
url = 'https://graph.qq.com/oauth2.0/authorize?' + urlencode(params)
return url
def get_access_token(self, code):
"""获取access_token"""
params = {
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
'redirect_uri': self.redirect_uri,
}
url = 'https://graph.qq.com/oauth2.0/token?' + urlencode(params)
try:
# 向qq服务器发起获取access_token的请求
response = urlopen(url)
response_data = response.read().decode()
# access_token = FE04 ** ** ** ** CCE2 & expires_in = 7776000 & refresh_token = 88E4 *** ** ** ** BE14
# 解析出来的数据是{access_token: [xxx], }
response_dict = parse_qs(response_data)
except Exception as e:
logger.error('获取access_token异常:%s' %e)
# 抛出错误
raise OAuthQQAPIError
else:
access_token = response_dict.get('access_token')
return access_token[0]
def get_openid(self, access_token):
"""
获取openid
:param access_token: 向QQ服务器获取openid必须参数
:return: openid
"""
url = 'https://graph.qq.com/oauth2.0/me?access_token=' + access_token
response = urlopen(url)
response_data = response.read().decode()
# callback({"client_id": "YOUR_APPID", "openid": "YOUR_OPENID"})\n;
try:
# 解析数据
response_dict = json.loads(response_data[10:-4])
print('获取openid时response_dict:',response_dict)
except Exception as e:
data = parse_qs(response_data)
logger.error('code=%s msg=%s' % ((data.get('code'), data.get('msg'))))
raise OAuthQQAPIError
# 获取openid
openid = response_dict.get('openid', None)
return openid
@staticmethod
def generate_access_token(openid):
"""
生成access_token
:return: token
"""
serializer = TJWSSerializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
token = serializer.dumps({"openid": openid})
return token.decode()
@staticmethod
def check_save_user_token(token):
"""
检验我们生成的access_token
:param token:
:return:
"""
serializer = TJWSSerializer(settings.SECRET_KEY, expires_in=constants.SAVE_QQ_USER_TOKEN_EXPIRES)
try:
data = serializer.loads(token)
except BadData:
return None
else:
return data.get('openid') | [
"[email protected]"
] | |
df21b265a6cacdff01901795d819235d4a0eb590 | f7e0780b4d73ebf6e50fe4053c01fd3cc4d6b227 | /auctioning_platform/shipping_infrastructure/shipping_infrastructure/repositories/address.py | 8ec2f8f1ec18b584e7ea6e8fffc9eacf0ddcf650 | [
"MIT"
] | permissive | Enforcer/clean-architecture | 78d663585f913c51a0460bcafa4af35515cdf549 | f0c1c0a8364996d309e7381b44933807529200b1 | refs/heads/master | 2023-02-20T01:40:24.653512 | 2022-08-02T20:59:03 | 2022-08-02T20:59:03 | 208,138,785 | 454 | 51 | MIT | 2023-02-16T01:31:26 | 2019-09-12T20:16:08 | Python | UTF-8 | Python | false | false | 589 | py | import uuid
import faker
from shipping import AddressRepository
from shipping.domain.entities import Address
from shipping.domain.value_objects import ConsigneeId
class FakeAddressRepository(AddressRepository):
def get(self, consignee_id: ConsigneeId) -> Address:
fake = faker.Faker()
return Address(
uuid=uuid.uuid4(),
street=fake.street_name(),
house_number=fake.building_number(),
city=fake.city(),
state=fake.state(),
zip_code=fake.zipcode(),
country=fake.country(),
)
| [
"[email protected]"
] | |
eb9f0a5bbe7e49980ce27a7e5656855d9bfe7f04 | 14427b4c73fef188791affb42c9fffe8e25b7dc1 | /tests/solr_tests/tests/admin.py | 082e19f81b3cb37860f748e658e952a736a542f1 | [
"BSD-3-Clause",
"MIT"
] | permissive | markng/django-haystack | 697ea05dd49980be43002e0fb6bf5c2b6357a015 | 78160bb2f530f7fadc0caf22f2f8babbac89ef32 | refs/heads/master | 2021-01-17T23:13:00.755184 | 2010-10-21T01:02:38 | 2010-10-21T01:02:38 | 1,010,851 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from haystack import backends
from core.models import MockModel
from solr_tests.tests.solr_backend import SolrMockModelSearchIndex, clear_solr_index
class SearchModelAdminTestCase(TestCase):
fixtures = ['bulk_data.json']
def setUp(self):
super(SearchModelAdminTestCase, self).setUp()
# With the models registered, you get the proper bits.
import haystack
from haystack.sites import SearchSite
# Stow.
self.old_debug = settings.DEBUG
settings.DEBUG = True
self.old_site = haystack.site
test_site = SearchSite()
test_site.register(MockModel, SolrMockModelSearchIndex)
haystack.site = test_site
# Wipe it clean.
clear_solr_index()
# Force indexing of the content.
mockmodel_index = test_site.get_index(MockModel)
mockmodel_index.update()
superuser = User.objects.create_superuser(
username='superuser',
password='password',
email='[email protected]',
)
def tearDown(self):
# Restore.
import haystack
haystack.site = self.old_site
settings.DEBUG = self.old_debug
super(SearchModelAdminTestCase, self).tearDown()
def test_usage(self):
backends.reset_search_queries()
self.assertEqual(len(backends.queries), 0)
self.assertEqual(self.client.login(username='superuser', password='password'), True)
# First, non-search behavior.
resp = self.client.get('/admin/core/mockmodel/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(backends.queries), 0)
self.assertEqual(resp.context['cl'].full_result_count, 23)
# Then search behavior.
resp = self.client.get('/admin/core/mockmodel/', data={'q': 'Haystack'})
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(backends.queries), 2)
self.assertEqual(resp.context['cl'].full_result_count, 7)
# Ensure they aren't search results.
self.assertEqual(isinstance(resp.context['cl'].result_list[0], MockModel), True)
self.assertEqual(resp.context['cl'].result_list[0].id, 17)
# Make sure only changelist is affected.
resp = self.client.get('/admin/core/mockmodel/1/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(len(backends.queries), 2)
self.assertEqual(resp.context['original'].id, 1)
| [
"[email protected]"
] | |
d6d19a882acc2888a4c56b24b29d7ea83b450ec4 | b2cfcacbd898f758a56d095f2140681934205d89 | /GeekShop_mentor/src_lesson_8/step_6(CBV_DetailView)/geekshop/geekshop/urls.py | f61dfb9bfdf3de46c576ad4a7bb2d44badcae26a | [] | no_license | AndreySperansky/Django_1 | 7d3be3ea2ede8e46d932fdae146ce4a7c4e300b4 | 0fec0a9a02b887fd8b45a5b763b7da5dc6377208 | refs/heads/master | 2022-12-15T19:56:23.611288 | 2020-09-21T17:40:40 | 2020-09-21T17:40:40 | 284,131,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | from django.conf.urls import url, include
from django.contrib import admin
import mainapp.views as mainapp
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^$', mainapp.main, name='main'),
url(r'^products/', include('mainapp.urls', namespace='products')),
url(r'^contact/', mainapp.contact, name='contact'),
url(r'^auth/', include('authapp.urls', namespace='auth')),
url(r'^basket/', include('basketapp.urls', namespace='basket')),
url(r'^admin/', include('adminapp.urls', namespace='admin')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
60f8ee32eec66d5a1aceb50eae7ede2d9486b5ac | 12068d6ebe90fcbd13c635f35c23ce7c73884b12 | /Lecture/Lecture_basic/TSP.py | bdc30261cdae927d3d5f5be96d23f11de1767206 | [] | no_license | jngcii/python-algorithm | 6ffb7a90eb0288cadc64bc3b2bf2d103455d2337 | e976b0033beeeb14217e7acc8d67f201a79184f2 | refs/heads/master | 2020-04-21T09:09:13.353113 | 2020-01-23T17:36:02 | 2020-01-23T17:36:02 | 169,439,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | def next_permutation(a): # 다음 순열 찾는 함수
# 맨 뒤부터 자기보다 자기 바로앞이 작은 놈을 찾는다.
i = len(a) - 1
if i > 0 and a[i] <= a[i-1]: i -= 1
# 끝까지 갔는데도 자기보다 작은놈을 못찾았다. ======> e.g) 987654321
if i <= 0: False
# 이제 i-1은 무조건 i 보다 크다.
# i부터 마지막중에 i-1보단 큰것중에 제일 작은거 찾기
# i-1 이후로는 내림차순(87654)으로 정렬되어있어서 맨뒤부터 보다가 i-1번째보다 큰거 나오면 그놈 저장
j = len(a) - 1
if a[i-1] > a[j]: j -= 1
# 저장된 j 랑 i-1 번째 교체
a[i-1], a[j] = a[j], a[i-1]
# 교체래도 i부터 마지막까지는 내림차순 정렬되어있음
# 오름차순으로 다시 정렬해버리기
j = len(a) - 1
for j > i:
a[i], a[j] = a[j], a[i]
i += 1
j -= 1
# 더이상 정렬된 놈을 못찾지 않고
# 정렬을 완료했다.
return True
n = int(input()) # 도시의 갯수 입력받음
d = list(range(n)) # 각 도시 0 ~ n-1로 이름 지어줌
w = [list(map(int, input().split())) for _ in range(n)] # 도시에서 도시로 가는 요금 입력받음
s = 0
res = 1000000000000
while True: # 파이썬에서 일단 반복 시작하는 방법은 일단 True로 해놓고 루프 안에서 if, break 로 탈출시키기
ok = True
# 현재 행렬의 가는길들을 모두 보면서 못가는 막힌길 있으면 ok=False로 하고 현재 루프 취소함 (마지막에서처음으로 가는 것부터)
for k in range(-1, n-1):
if w[d[k]][d[k+1]] == 0:
ok = False
break
else:
s += w[d[k]][d[k+1]]
# 앞에서 돈 루프가 무사히 돌아서 s에 다 더했고 res 와 비교해서 작은걸 res에 넣는다.
if ok: res = min(s, res)
# d 다음 순열을 구한다. 없으면 마무리
if not next_permutation(d): break
# d첫번째가 0인것만 할거다. 1로 넘어가면 바로 마무리
if d[0] != 0: break
print(res)
| [
"[email protected]"
] | |
f04ac3ed7b2bb5e3752f0ef2d96c1cb9e22b29a2 | 1670af31dd78a82915b8cda16c181c0aa31a1c3a | /favs/urls.py | 4e213700c57ab1fa1133e8a7999e64f4f4711de4 | [
"CC-BY-3.0",
"MIT"
] | permissive | chyuting/dj4e-samples | ecd2eca6edc50402a55dee487c11f74ec873cb2d | bbe3dcd214789e4be96dd2460018524f8078b4e3 | refs/heads/master | 2020-09-15T18:12:47.472948 | 2019-11-22T20:28:57 | 2019-11-22T20:28:57 | 223,524,434 | 1 | 0 | MIT | 2019-11-23T03:24:03 | 2019-11-23T03:24:02 | null | UTF-8 | Python | false | false | 1,083 | py | from django.urls import path, reverse_lazy
from . import views
from django.views.generic import TemplateView
# In urls.py reverse_lazy('favs:all')
# In views.py class initialization reverse_lazy('favs:all')
# In views.py methods reverse('favs:all')
# In templates {% url 'favs:thing_update' thing.id %}
app_name='favs'
urlpatterns = [
path('', views.ThingListView.as_view(), name='all'),
path('thing/<int:pk>', views.ThingDetailView.as_view(), name='thing_detail'),
path('thing/create',
views.ThingCreateView.as_view(success_url=reverse_lazy('favs:all')), name='thing_create'),
path('thing/<int:pk>/update',
views.ThingUpdateView.as_view(success_url=reverse_lazy('favs:all')), name='thing_update'),
path('thing/<int:pk>/delete',
views.ThingDeleteView.as_view(success_url=reverse_lazy('favs:all')), name='thing_delete'),
path('thing/<int:pk>/favorite',
views.AddFavoriteView.as_view(), name='thing_favorite'),
path('thing/<int:pk>/unfavorite',
views.DeleteFavoriteView.as_view(), name='thing_unfavorite'),
]
| [
"[email protected]"
] | |
dbc327a8c84469e18f9c25854df9fec96a476708 | 0fb505765604b586c3a46e608fc23930f8501db5 | /venv/lib/python2.7/site-packages/django/contrib/staticfiles/apps.py | ae69667b7b99c779598188e2cff803417bb26aa0 | [
"MIT"
] | permissive | domenicosolazzo/practice-django | b05edecc302d97c97b7ce1de809ea46d59e2f0e6 | 44e74c973384c38bd71e7c8a1aacd1e10d6a6893 | refs/heads/master | 2021-08-19T15:36:22.732954 | 2015-01-22T18:42:14 | 2015-01-22T18:42:14 | 25,118,384 | 0 | 0 | MIT | 2021-06-10T19:50:51 | 2014-10-12T12:08:47 | Python | UTF-8 | Python | false | false | 206 | py | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class StaticFilesConfig(AppConfig):
name = 'django.contrib.staticfiles'
verbose_name = _("Static Files")
| [
"[email protected]"
] | |
7601589c18fcecc2f0a8da320cabeceadd1e1dbe | 9ef6d625945ecdebb476614a96a83a2139a92e9b | /nlstruct/datasets/quaero.py | 8d1aedb5e67c9965c45a5d4f2bc23add2d4fe78c | [
"MIT"
] | permissive | percevalw/nlstruct | f0cdf8ed86a32a5a96204b1c787eb35b1a4a804a | 7b5fa2230a555331e2e68b25fbb2b25e4c9404a0 | refs/heads/master | 2023-08-09T00:07:13.782518 | 2023-07-03T14:37:43 | 2023-07-03T14:37:43 | 229,176,303 | 13 | 7 | MIT | 2023-07-23T00:52:19 | 2019-12-20T02:38:34 | Python | UTF-8 | Python | false | false | 6,137 | py | import os
import random
import zipfile
from sklearn.datasets._base import RemoteFileMetadata
from nlstruct.datasets.brat import load_from_brat
from nlstruct.datasets.base import NetworkLoadMode, ensure_files, NormalizationDataset
class QUAERO(NormalizationDataset):
REMOTE_FILES = [
RemoteFileMetadata(
url="https://quaerofrenchmed.limsi.fr/QUAERO_FrenchMed_brat.zip",
checksum="2cf8b5715d938fdc1cd02be75c4eaccb5b8ee14f4148216b8f9b9e80b2445c10",
filename="QUAERO_FrenchMed_brat.zip")
]
def __init__(self, path, terminology=None, sources=("EMEA", "MEDLINE"), version="2016", val_split=None, seed=False, debug=False,
map_concepts=False, unmappable_concepts="raise", relabel_with_semantic_type=False, preprocess_fn=None):
assert version in ("2015", "2016")
if val_split is not None or seed is not False:
assert version == "2015", "As validation split already exist for Quaero 2016, leave val_split=None and seed=False"
val_split = val_split
if not isinstance(sources, (tuple, list)):
sources = (sources,)
self.sources = sources = tuple(sources)
train_data, val_data, test_data = self.download_and_extract(path, version, sources, val_split, seed, debug)
super().__init__(
train_data=train_data,
val_data=val_data,
test_data=test_data,
terminology=terminology,
map_concepts=map_concepts,
unmappable_concepts=unmappable_concepts,
relabel_with_semantic_type=relabel_with_semantic_type,
preprocess_fn=preprocess_fn,
)
def download_and_extract(self, path, version, sources=("EMEA", "MEDLINE"), val_split=False, seed=False, debug=False):
"""
Loads the Quaero dataset
Parameters
----------
path: str
Location of the Quaero files
version: str
Version to load, either '2015' or '2016'
val_split: float
Will only be used if version is '2015' since no dev set was defined for this version
seed: int
Will only be used if version is '2015' since no dev set was defined for this version
sources: tuple of str
Which sources to load, ie EMEA, MEDLINE
Returns
-------
Dataset
"""
[file] = ensure_files(path, self.REMOTE_FILES, mode=NetworkLoadMode.AUTO)
zip_ref = zipfile.ZipFile(file, "r")
zip_ref.extractall(path)
zip_ref.close()
train_data = [
*[{**doc, "source": "EMEA", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/train/EMEA"))],
*[{**doc, "source": "MEDLINE", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/train/MEDLINE"))],
]
train_data = [doc for doc in train_data if doc["source"] in sources]
val_data = [
*[{**doc, "source": "EMEA", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/dev/EMEA"))],
*[{**doc, "source": "MEDLINE", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/dev/MEDLINE"))],
]
val_data = [doc for doc in val_data if doc["source"] in sources]
test_data = [
*[{**doc, "source": "EMEA", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/test/EMEA"))],
*[{**doc, "source": "MEDLINE", "entities": [{**entity, "concept": tuple(sorted(part.strip() for comment in entity["comments"]
for part in comment["comment"].strip().strip("+").split(" ")))} for entity in doc["entities"]]}
for doc in load_from_brat(os.path.join(path, "QUAERO_FrenchMed/corpus/test/MEDLINE"))],
]
test_data = [doc for doc in test_data if doc["source"] in sources]
if version == "2015":
if val_split:
shuffled_data = list(train_data)
if seed is not False:
random.Random(seed).shuffle(shuffled_data)
offset = val_split if isinstance(val_split, int) else int(val_split * len(shuffled_data))
val_data = shuffled_data[:offset]
train_data = shuffled_data[offset:]
else:
val_data = []
subset = slice(None) if not debug else slice(0, 50)
train_data = train_data[subset]
val_data = val_data[subset]
test_data = test_data # Never subset the test set, we don't want to give false hopes
return train_data, val_data, test_data
| [
"[email protected]"
] | |
1ecc6c19485b08f78b1da7819afdbb6fb6669109 | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/bin3/jupyter-kernelspec | e82d73045ac485f46b7e7879db1c57e894321d7b | [
"Python-2.0",
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 344 | #!/var/mobile/Containers/Data/Application/966C455F-0658-40E2-B076-EC684AFD0415/Library/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_client.kernelspecapp import KernelSpecApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(KernelSpecApp.launch_instance())
| [
"[email protected]"
] | ||
e49d78dfbaf2eab206ce9b59421933eb775a7f3e | 523f8f5febbbfeb6d42183f2bbeebc36f98eadb5 | /424.py | 25219c47f183e713f54738fc014afb70c66da0ff | [] | no_license | saleed/LeetCode | 655f82fdfcc3000400f49388e97fc0560f356af0 | 48b43999fb7e2ed82d922e1f64ac76f8fabe4baa | refs/heads/master | 2022-06-15T21:54:56.223204 | 2022-05-09T14:05:50 | 2022-05-09T14:05:50 | 209,430,056 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | class Solution(object):
def characterReplacement(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
dict=[0]*26
tail=0
res=0
for i in range(len(s)):
# print(i,tail,dict)
dict[ord(s[i])-ord('A')]+=1
maxv=max(dict)
while i-tail+1-maxv>k:
dict[ord(s[tail])-ord('A')]-=1
tail+=1
maxv=max(dict)
# print(maxv)
if i-tail+1>res:
res=i-tail+1
return res
s = "AABABBA"
k = 1
print(ord('A'))
a=Solution()
print(a.characterReplacement(s,k))
| [
"[email protected]"
] | |
170f17debf8e0aa5216ed9e03bf1456f06c0cc04 | a7205bcd48196c1391d8c56414a1e20c39b52aa7 | /train_online.py | 2f18b62b4240057a1e6866d5f6aabb0846fd5164 | [] | no_license | CS-433/cs-433-project-2-fesenjoon | f9f58ef0caa54b08c6d59ffca0cbf2a08642ecb5 | cb0f7519901f16ae0cb1bb9b1ae8b89761a0b519 | refs/heads/master | 2021-04-18T19:56:06.737876 | 2020-12-17T13:42:15 | 2020-12-17T13:42:15 | 314,034,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,511 | py | import argparse
import json
from datetime import datetime
import os
import numpy as np
import torch
import models
import datasets
from utils import train_one_epoch, eval_on_dataloader
try:
from tensorboardX import SummaryWriter
except:
from torch.utils.tensorboard import SummaryWriter
def build_parser():
parser = argparse.ArgumentParser(description="""Trains models in the online setting described in the original paper.""")
parser.add_argument('--title', type=str)
parser.add_argument('--exp-dir', type=str, default=None)
parser.add_argument('--model', type=str, default='resnet18', choices=models.get_available_models())
# parser.add_argument('--dataset', type=str, default='cifar10', choices=datasets.get_available_datasets())
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--split-size', type=int, default=5000)
parser.add_argument('--random-seed', type=int, default=42)
parser.add_argument('--convergence-epochs', type=int, default=5) # If the minimum val loss does not decrease in 3 epochs training will stop
# parser.add_argument('--save-per-epoch', action='store_true', default=False)
parser.add_argument('--checkpoint', default=None)
parser.add_argument('--checkpoint-shrink', default=1.0, type=float)
parser.add_argument('--checkpoint-perturb', default=0.0, type=float)
return parser
def main(args):
print("Running with arguments:")
args_dict = {}
for key in vars(args):
if key == "default_function":
continue
args_dict[key] = getattr(args, key)
print(key, ": ", args_dict[key])
print("---")
experiment_time = datetime.now().strftime('%b%d_%H-%M-%S')
if args.exp_dir:
experiment_dir = args.exp_dir
else:
experiment_dir = os.path.join('exp', args.title, experiment_time)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, "config.json"), "w") as f:
json.dump(args_dict, f, indent=4, sort_keys=True, default=lambda x: x.__name__)
if torch.cuda.is_available():
device = torch.device('cuda:0')
print("CUDA Recognized")
else:
device = torch.device('cpu')
try:
summary_writer = SummaryWriter(logdir=experiment_dir)
except:
summary_writer = SummaryWriter(experiment_dir)
print("Starting Online Learning")
#Online learning setup
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
model = models.get_model(args.model).to(device)
criterion = torch.nn.CrossEntropyLoss()
loaders = datasets.get_dataset("online_with_val_cifar10", split_size=args.split_size)
number_of_samples_online = []
test_accuracies_online = []
training_times_online = []
epoch = 0
for i, train_loader in enumerate(loaders['train_loaders']):
t_start = datetime.now()
n_train = (i + 1) * args.split_size
number_of_samples_online.append(n_train)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
random_model = models.get_model(args.model).to(device)
with torch.no_grad():
for real_parameter, random_parameter in zip(model.parameters(), random_model.parameters()):
real_parameter.mul_(args.checkpoint_shrink).add_(random_parameter, alpha=args.checkpoint_perturb)
train_accuracies = []
while True:
if epoch % 5 == 0:
print(f"Starting training in epoch {epoch + 1}")
train_loss, train_accuracy = train_one_epoch(device, model, optimizer, criterion, train_loader)
val_loss, val_accuracy = eval_on_dataloader(device, criterion, model, loaders['val_loader'])
test_loss, test_accuracy = eval_on_dataloader(device, criterion, model, loaders['test_loader'])
train_accuracies.append(train_accuracy)
epoch += 1
summary_writer.add_scalar("test_accuracy", test_accuracy, epoch)
summary_writer.add_scalar("test_loss", test_loss, epoch)
summary_writer.add_scalar("train_accuracy", train_accuracy, epoch)
summary_writer.add_scalar("train_loss", train_loss, epoch)
summary_writer.add_scalar("val_accuracy", val_accuracy, epoch)
summary_writer.add_scalar("val_loss", val_loss, epoch)
#if len(train_accuracies) >= args.convergence_epochs and \
# max(train_accuracies) not in train_accuracies[-args.convergence_epochs:]:
if train_accuracy >= 0.99:
print("Convergence condition met")
break
val_loss, val_accuracy = eval_on_dataloader(device, criterion, model, loaders['val_loader'])
test_loss, test_accuracy = eval_on_dataloader(device, criterion, model, loaders['test_loader'])
summary_writer.add_scalar("online_val_accuracy", val_accuracy, n_train)
summary_writer.add_scalar("online_val_loss", val_loss, n_train)
summary_writer.add_scalar("online_test_accuracy", test_accuracy, n_train)
summary_writer.add_scalar("online_test_loss", test_loss, n_train)
t_end = datetime.now()
training_time = (t_end - t_start).total_seconds()
training_times_online.append(training_time)
summary_writer.add_scalar("online_train_time", training_time, n_train)
summary_writer.close()
if __name__ == "__main__":
parser = build_parser()
args = parser.parse_args()
main(args) | [
"[email protected]"
] | |
0b01562f680ea36d596485130d22202338aa0262 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9b4h6mK9CBMLwyGiY_6.py | 1abf1ae0a48f31137fb13996b1512d64f8d6b0d6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """
In this challenge, you have to find the distance between two points placed on
a Cartesian plane. Knowing the coordinates of both the points, you have to
apply the **Pythagorean theorem** to find the distance between them.

Given two dictionaries `a` and `b` being the two points coordinates ( **x**
and **y** ), implement a function that returns the distance between the
points, rounded to the nearest thousandth.
### Examples
get_distance({"x": -2, "y": 1}, {"x": 4, "y": 3}) ➞ 6.325
get_distance({"x": 0, "y": 0}, {"x": 1, "y": 1}) ➞ 1.414
get_distance({"x": 10, "y": -5}, {"x": 8, "y": 16}) ➞ 21.095
### Notes
* Take a look at the **Resources** tab if you need a refresher on the geometry related to this challenge.
* The "distance" is the shortest distance between the two points, or the straight line generated from `a` to `b`.
"""
def get_distance(a, b):
p=list(a.values())
q=list(b.values())
dis=((p[0]-q[0])**2+(p[1]-q[1])**2)**.5
return round(dis,3)
| [
"[email protected]"
] | |
ee12d3b89ca259032c8f090a344825f2320900a2 | 45cfc0bf573d0419ff4c2e9cc8d73256be9bf8bb | /lws/translate.py | 9a4d17f3e2092934a1d8e61e371afa9ac8f16708 | [
"MIT"
] | permissive | KEHANG/Life_Web_Services | c79c0f7df9aa78ca9efee79652c2fc8525076aae | 2f519d38692492088fb6ba80a648d2099828b07e | refs/heads/master | 2021-06-19T01:02:15.520716 | 2021-01-03T19:58:02 | 2021-01-03T19:58:02 | 149,108,564 | 0 | 0 | MIT | 2018-09-22T22:55:59 | 2018-09-17T10:27:50 | Python | UTF-8 | Python | false | false | 744 | py | import json
import requests
from flask_babel import _
from flask import current_app
def translate(text, source_language, dest_language):
if 'MS_TRANSLATOR_KEY' not in current_app.config or not current_app.config['MS_TRANSLATOR_KEY']:
return _('Error: the translation service is not configured.')
auth = {'Ocp-Apim-Subscription-Key': current_app.config['MS_TRANSLATOR_KEY']}
r = requests.get('https://api.microsofttranslator.com/v2/Ajax.svc'
'/Translate?text={}&from={}&to={}'.format(text, source_language, dest_language),
headers=auth)
if r.status_code != 200:
return _('Error: the translation service failed.')
return json.loads(r.content.decode('utf-8-sig')) | [
"[email protected]"
] | |
3130f019200ef90cb43098229e4eae8adf5a1006 | d08e6e7a2abcc7568fd3b9a6022c79091ebd0efa | /Logic/verify_no_duplicate_seqs_test.py | 7924d76700dab6cb85ace88791fd019b1f32ef3e | [] | no_license | hz-xmz/Kaggle-LANL | dd20b6951743fc068a866c799de93d9a89a2429d | 0236d3bcaeb2f3f4c960902d6c31ef0c49e749c4 | refs/heads/master | 2020-09-11T01:25:06.633524 | 2019-06-07T16:50:41 | 2019-06-07T16:50:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,214 | py | import numpy as np
import pandas as pd
data_folder = '/media/tom/cbd_drive/Kaggle/LANL/Data/'
if not 'test_combined' in locals():
test_combined = pd.read_csv(
data_folder + 'test_combined.csv').values.reshape(-1, 150000)
extreme_length = 10000
first_id = 0
def get_max_shared_seq_len(first_id, sec_id):
valid_ids = np.where(
start_test[sec_id] == end_test[first_id, extreme_length-1])[0]
longest_match = 0
while valid_ids.size:
longest_match += 1
valid_ids = valid_ids[np.logical_and(
valid_ids >= longest_match,
end_test[first_id, extreme_length-longest_match-1] == start_test[
sec_id, valid_ids-longest_match])]
return longest_match
#longest_match_sequence.max()
#longest_match_sequence.argmax()
#get_max_shared_seq_len(1418, 1232) # Longest sequence
num_test_files = test_combined.shape[0]
start_test = test_combined[:, :extreme_length]
end_test = test_combined[:, -extreme_length:]
longest_match_sequence = np.zeros((num_test_files, num_test_files))
for first_id in range(num_test_files):
print(first_id)
for sec_id in range(num_test_files):
longest_match_sequence[first_id, sec_id] = get_max_shared_seq_len(
first_id, sec_id)
| [
"[email protected]"
] | |
3a5d3b8bcd880c5bb11c80e625c2118f69744913 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/SjShObligationCall/YW_GGQQ_YWFSJHA_GOU_084.py | e0ca6b636bc265d2295b0d6957247055bbcce54d | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
import json
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/service")
from OptMainService import *
from OptQueryStkPriceQty import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from log import *
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/option/mysql")
from Opt_SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from QueryOrderErrorMsg import queryOrderErrorMsg
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
reload(sys)
sys.setdefaultencoding('utf-8')
class YW_GGQQ_YWFSJHA_GOU_084(xtp_test_case):
def setUp(self):
sql_transfer = Opt_SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_GGQQ_YWFSJHA_GOU_084')
clear_data_and_restart_sh()
Api.trade.Logout()
Api.trade.Login()
def test_YW_GGQQ_YWFSJHA_GOU_084(self):
title = '买平(义务方平仓):市价剩余转限价-验资(可用资金<=0且下单导致可用资金增加后变成正数)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('10001318', '1', '*', '1', '0', 'C', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
logger.error('查询结果为False,错误原因: {0}'.format(
json.dumps(rs['测试错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type':Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_OPTION'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'position_effect':Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_CLOSE'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_REVERSE_BEST_LIMIT'],
'price': stkparm['涨停价'],
'quantity': 1
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
if rs['用例测试结果']:
logger.warning('执行结果为{0}'.format(str(rs['用例测试结果'])))
else:
logger.warning('执行结果为{0},{1},{2}'.format(
str(rs['用例测试结果']), str(rs['用例错误源']),
json.dumps(rs['用例错误原因'], encoding='UTF-8', ensure_ascii=False)))
self.assertEqual(rs['用例测试结果'], True) # 4
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
96cfc8f20622043eeff69349f2ee5eda49f0f6a3 | 7769cb512623c8d3ba96c68556b2cea5547df5fd | /mmdet/ops/carafe/grad_check.py | 06820be2459c9766113796cbe34e76db7ae9c108 | [
"MIT"
] | permissive | JialeCao001/D2Det | 0e49f4c76e539d574e46b02f278242ca912c31ea | a76781ab624a1304f9c15679852a73b4b6770950 | refs/heads/master | 2022-12-05T01:00:08.498629 | 2020-09-04T11:33:26 | 2020-09-04T11:33:26 | 270,723,372 | 312 | 88 | MIT | 2020-07-08T23:53:23 | 2020-06-08T15:37:35 | Python | UTF-8 | Python | false | false | 2,118 | py | import os.path as osp
import sys
import mmcv
import torch
from torch.autograd import gradcheck
sys.path.append(osp.abspath(osp.join(__file__, '../../')))
from mmdet.ops.carafe import CARAFENAIVE # noqa: E402, isort:skip
from mmdet.ops.carafe import carafe_naive # noqa: E402, isort:skip
from mmdet.ops.carafe import carafe, CARAFE # noqa: E402, isort:skip
feat = torch.randn(2, 64, 3, 3, requires_grad=True, device='cuda:0').double()
mask = torch.randn(
2, 100, 6, 6, requires_grad=True, device='cuda:0').sigmoid().double()
print('Gradcheck for carafe...')
test = gradcheck(CARAFE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
print('Gradcheck for carafe naive...')
test = gradcheck(CARAFENAIVE(5, 4, 2), (feat, mask), atol=1e-4, eps=1e-4)
print(test)
feat = torch.randn(
2, 1024, 100, 100, requires_grad=True, device='cuda:0').float()
mask = torch.randn(
2, 25, 200, 200, requires_grad=True, device='cuda:0').sigmoid().float()
loop_num = 500
time_forward = 0
time_backward = 0
bar = mmcv.ProgressBar(loop_num)
timer = mmcv.Timer()
for i in range(loop_num):
x = carafe(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_backward += timer.since_last_check()
bar.update()
print('\nCARAFE time forward: {} ms/iter | time backward: {} ms/iter'.format(
(time_forward + 1e-3) * 1e3 / loop_num,
(time_backward + 1e-3) * 1e3 / loop_num))
time_naive_forward = 0
time_naive_backward = 0
bar = mmcv.ProgressBar(loop_num)
timer = mmcv.Timer()
for i in range(loop_num):
x = carafe_naive(feat.clone(), mask.clone(), 5, 1, 2)
torch.cuda.synchronize()
time_naive_forward += timer.since_last_check()
x.sum().backward(retain_graph=True)
torch.cuda.synchronize()
time_naive_backward += timer.since_last_check()
bar.update()
print('\nCARAFE naive time forward: {} ms/iter | time backward: {} ms/iter'.
format((time_naive_forward + 1e-3) * 1e3 / loop_num,
(time_naive_backward + 1e-3) * 1e3 / loop_num))
| [
"[email protected]"
] | |
6f925f97b7f04bf835e0d8185ead4532b0c99e7b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_orbs.py | 66df141052b56b070130b15e4bf528655d9c8b3a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py |
from xai.brain.wordbase.nouns._orb import _ORB
#calss header
class _ORBS(_ORB, ):
def __init__(self,):
_ORB.__init__(self)
self.name = "ORBS"
self.specie = 'nouns'
self.basic = "orb"
self.jsondata = {}
| [
"[email protected]"
] | |
61886c57f514349c383865d61b354268510431e7 | 350db570521d3fc43f07df645addb9d6e648c17e | /1367_Linked_List_in_Binary_Tree/solution_test.py | 3b9df505ae99770b2edb20625a60761479c57dfc | [] | no_license | benjaminhuanghuang/ben-leetcode | 2efcc9185459a1dd881c6e2ded96c42c5715560a | a2cd0dc5e098080df87c4fb57d16877d21ca47a3 | refs/heads/master | 2022-12-10T02:30:06.744566 | 2022-11-27T04:06:52 | 2022-11-27T04:06:52 | 236,252,145 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 387 | py |
'''
1367. Linked List in Binary Tree
Level: Medium
https://leetcode.com/problems/linked-list-in-binary-tree
'''
import unittest
class TestSum(unittest.TestCase):
def test_sum(self):
self.assertEqual(sum([1, 2, 3]), 6, "Should be 6")
def test_sum_tuple(self):
self.assertEqual(sum((1, 2, 2)), 6, "Should be 6")
if __name__ == '__main__':
unittest.main() | [
"[email protected]"
] | |
29789f0b401ecebc3aa6c2b52e67296ed970cdb5 | d785e993ed65049c82607a1482b45bddb2a03dda | /nano2017/cfg2018/GluGluToWWToMNEN_cfg.py | 8ec3225a1e614e4a444d24817082712992c4f196 | [] | no_license | PKUHEPEWK/ssww | eec02ad7650014646e1bcb0e8787cf1514aaceca | a507a289935b51b8abf819b1b4b05476a05720dc | refs/heads/master | 2020-05-14T04:15:35.474981 | 2019-06-28T23:48:15 | 2019-06-28T23:48:15 | 181,696,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,364 | py | from WMCore.Configuration import Configuration
from CRABClient.UserUtilities import config, getUsernameFromSiteDB
config = Configuration()
config.section_("General")
config.General.requestName = 'GluGluToWWToMNEN_2018'
config.General.transferLogs= False
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.psetName = 'PSet.py'
config.JobType.scriptExe = 'crab_script_2018.sh'
config.JobType.inputFiles = ['crab_script_2018.py','ssww_keep_and_drop_2018.txt','ssww_output_branch_selection_2018.txt','haddnano.py'] #hadd nano will not be needed once nano tools are in cmssw
config.JobType.sendPythonFolder = True
config.section_("Data")
config.Data.inputDataset = '/GluGluToWWToMNEN_TuneCP5_13TeV_MCFM701_pythia8/RunIIAutumn18NanoAODv4-Nano14Dec2018_102X_upgrade2018_realistic_v16-v1/NANOAODSIM'
#config.Data.inputDBS = 'phys03'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
#config.Data.splitting = 'EventAwareLumiBased'
config.Data.unitsPerJob = 20
config.Data.totalUnits = -1
config.Data.outLFNDirBase ='/store/user/%s/nano2018_v0' % (getUsernameFromSiteDB())
config.Data.publication = False
config.Data.outputDatasetTag = 'GluGluToWWToMNEN_2018'
config.section_("Site")
config.Site.storageSite = "T2_CN_Beijing"
#config.Site.storageSite = "T2_CH_CERN"
#config.section_("User")
#config.User.voGroup = 'dcms'
| [
"[email protected]"
] | |
d8ffdcf84d2027052d36f9dd6c7668b3dec09237 | 17fb5e4cdcf8e557bd0ab8606dfd88074dc4d525 | /ticket_26758/tests.py | 0c5f0bcdd0c36962169c74cd5c324dbae1b70edd | [] | no_license | charettes/django-ticketing | 0b17c85afa049d1b73db244e1199798feb9a4b73 | 78ed6a345e760ea46434690e9385ae4d26fc2810 | refs/heads/master | 2021-01-17T06:38:35.337305 | 2016-06-15T02:33:38 | 2016-06-15T02:33:38 | 45,122,368 | 0 | 1 | null | 2016-02-09T20:21:48 | 2015-10-28T15:30:59 | Python | UTF-8 | Python | false | false | 291 | py | from django.test import TestCase
from django.db.models import Count
from .models import Company
class FooTests(TestCase):
def test_foo(self):
qs = Company.objects.annotate(ticketcount=Count('srservice')).exclude(ticketcount=0).order_by('-ticketcount')
print(list(qs))
| [
"[email protected]"
] | |
768bbacc62ddda437780f7a4469c365ecc15cccc | a31c21bcc4486fd44dd2c5b7f364e8f0320f7dd3 | /mlsh/lab/lab/envs/mujoco/ant_obstaclesgen.py | 9d1881775735ec9b7f26b3bb7e0f778226fe12c2 | [
"MIT"
] | permissive | SynthAI/SynthAI | 0cb409a4f5eb309dfc6a22d21ac78447af075a33 | 4e28fdf2ffd0eaefc0d23049106609421c9290b0 | refs/heads/master | 2020-03-19T12:49:07.246339 | 2018-06-07T23:27:51 | 2018-06-07T23:27:51 | 136,542,297 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,907 | py | import numpy as np
from lab import utils
from lab.envs.mujoco import mujoco_env
class AntObstaclesGenEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
self.count = 0
self.realgoal = 0
mujoco_env.MujocoEnv.__init__(self, 'ant_obstacles_gen.xml', 5)
utils.EzPickle.__init__(self)
self.randomizeCorrect()
def randomizeCorrect(self):
self.realgoal = self.np_random.choice([0, 1, 2, 3, 4, 5, 6, 7, 8])
# 0 = obstacle. 1 = no obstacle.
self.realgoal = 6
def _step(self, a):
self.count += 1
if self.count % 200 == 0:
n_qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
n_qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
n_qpos[:2] = self.data.qpos[:2,0]
n_qpos[-11:] = self.data.qpos[-11:,0]
self.set_state(n_qpos, n_qvel)
goal = np.array([8, 24])
if self.realgoal == 0:
goal = np.array([8, 24])
if self.realgoal == 1:
goal = np.array([8, -24])
if self.realgoal == 2:
goal = np.array([24, 24])
if self.realgoal == 3:
goal = np.array([24, -24])
if self.realgoal == 4:
goal = np.array([48, 0])
if self.realgoal == 5:
goal = np.array([40, 24])
if self.realgoal == 6:
goal = np.array([40, -24])
if self.realgoal == 7:
goal = np.array([32, 16])
if self.realgoal == 8:
goal = np.array([32, -16])
# reward = -np.sum(np.square(self.data.qpos[:2,0] - goal)) / 100000
xposbefore = self.data.qpos[0,0]
yposbefore = self.data.qpos[1,0]
self.do_simulation(a, self.frame_skip)
xposafter = self.data.qpos[0,0]
yposafter = self.data.qpos[1,0]
if xposbefore < goal[0]:
forward_reward = (xposafter - xposbefore)/self.dt
else:
forward_reward = -1*(xposafter - xposbefore)/self.dt
if yposbefore < goal[1]:
forward_reward += (yposafter - yposbefore)/self.dt
else:
forward_reward += -1*(yposafter - yposbefore)/self.dt
ctrl_cost = .1 * np.square(a).sum()
reward = forward_reward - ctrl_cost
# print(reward)
done = False
ob = self._get_obs()
return ob, reward, done, {}
def _get_obs(self):
return np.concatenate([
self.data.qpos.flat[:-11],
self.data.qvel.flat[:-11],
# self.data.qpos.flat,
# self.data.qvel.flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
# self.realgoal = 4
if self.realgoal == 0:
qpos[-11:] = np.array([80,0,0,80,0,0,0,0,0, 8, 24])
if self.realgoal == 1:
qpos[-11:] = np.array([0,0,0,80,0,0,80,0,0, 8, -24])
if self.realgoal == 2:
qpos[-11:] = np.array([0,80,0,80,80,0,0,0,0, 24, 24])
if self.realgoal == 3:
qpos[-11:] = np.array([0,0,0,80,80,0,0,80,0, 24, -24])
if self.realgoal == 4:
qpos[-11:] = np.array([0,0,0,80,80,80,0,0,0, 48, 0])
if self.realgoal == 5:
qpos[-11:] = np.array([0,0,80,80,80,80,0,0,0, 40, 24])
if self.realgoal == 6:
qpos[-11:] = np.array([0,0,0,80,80,80,0,0,80, 40, -24])
if self.realgoal == 7:
qpos[-11:] = np.array([80,80,0,80,0,0,0,0,0, 32, 16])
if self.realgoal == 8:
qpos[-11:] = np.array([0,0,0,80,0,0,80,80,0, 32, -16])
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.6
| [
"[email protected]"
] | |
97b5d2a5a50dabf5fe78820858521fad8fd97b92 | 09f0505f3ac1dccaf301c1e363423f38768cc3cc | /r_DailyProgrammer/Intermediate/C206/__init__.py | 962d83bb28a1ec6a068d32fc41ec4bee75110cbb | [] | no_license | Awesome-Austin/PythonPractice | 02212292b92814016d062f0fec1c990ebde21fe7 | 9a717f91d41122be6393f9fcd1a648c5e62314b3 | refs/heads/master | 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 | Python | UTF-8 | Python | false | false | 71 | py | #! python3
from r_DailyProgrammer.Intermediate.C206.main import main
| [
"{ID}+{username}@users.noreply.github.com"
] | {ID}+{username}@users.noreply.github.com |
fa896bba72962dd3bdd298ba034e34b8bfd8937f | 7822e658e88f3f948732e6e3e588ca4b2eb5662a | /diapos/programas/saltos-de-linea.py | 5d9a5581bc6352fd09c497dfa19303a498591b69 | [] | no_license | carlos2020Lp/progra-utfsm | 632b910e96c17b9f9bb3d28329e70de8aff64570 | a0231d62837c54d4eb8bbf00bb1b84484efc1af2 | refs/heads/master | 2021-05-28T06:00:35.711630 | 2015-02-05T02:19:18 | 2015-02-05T02:19:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | >>> a = 'casa\narbol\npatio'
>>> a
'casa\narbol\npatio'
>>> print a
casa
arbol
patio
>>> len('a\nb\nc')
5
| [
"[email protected]"
] | |
55a0831d34cbfef8d1eeff1fafad9e2e71e5f77b | ea378480ba678eb123ef826e3ca0c3eb8f4e538f | /paused/02.DXF write/dxfwrite-1.1.0/tests/test_abstract_entity.py | d9db54a5c2ba688be3762d19b23babb1cd18b1fc | [] | no_license | msarch/py | 67235643666b1ed762d418263f7eed3966d3f522 | dcd25e633a87cdb3710e90224e5387d3516c1cd3 | refs/heads/master | 2021-01-01T05:21:58.175043 | 2017-05-25T08:15:26 | 2017-05-25T08:15:26 | 87,453,820 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,296 | py | #!/usr/bin/env python
#coding:utf-8
# Created: 15.11.2010
# Copyright (C) 2010, Manfred Moitzi
# License: GPLv3
__author__ = "mozman <[email protected]>"
try:
# Python 2.6 and earlier need the unittest2 package
# try: pip install unittest2
# or download source from: http://pypi.python.org/pypi/unittest2
import unittest2 as unittest
except ImportError:
import unittest
from dxfwrite.entities import _Entity, Line
class MockEntity(_Entity):
DXF_ENTITY_NAME = Line.DXF_ENTITY_NAME
DXF_ATTRIBUTES = Line.DXF_ATTRIBUTES
class TestEntity(unittest.TestCase):
def test_init(self):
e = MockEntity()
self.assertEqual(e['layer'], '0')
def test_init_with_kwargs(self):
e = MockEntity(layer='1')
self.assertEqual(e['layer'], '1')
def test_set_get_attribute(self):
e = MockEntity()
e['layer'] = '1'
self.assertEqual(e['layer'], '1')
def test_get_attribute_error(self):
e = MockEntity()
with self.assertRaises(KeyError):
result = e['mozman']
def test_set_attribute_error(self):
e = MockEntity()
with self.assertRaises(KeyError):
e['mozman'] = 'test'
if __name__=='__main__':
unittest.main() | [
"[email protected]"
] | |
238d011d53f467d54bd37298772793a292644147 | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/buy_top_fc_20210203213703.py | da47d36dfa2e2de55eb512db4b137d0e379b7d01 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,567 | py | # Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import tda
import os, sys
import time
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
from selenium import webdriver
import json
token_path = 'token'
DRIVER_PATH = "/home/hopper/chromedriver"
driver = webdriver.Chrome(DRIVER_PATH)
redirect_uri = "https://localhost"
try:
c = auth.client_from_token_file(token_path, config.api_key)
except FileNotFoundError:
c = auth.client_from_login_flow(
driver, config.api_key, redirect_uri, token_path
)
#All this scraping code works
driver.get("https://financhill.com/screen/stock-score")
time.sleep(2)
driver.find_element_by_css_selector(
'span[data-sort-name="stock_score_normalized"]'
).click()
time.sleep(2)
tickers = driver.find_elements_by_tag_name("td")
i = 0
# [0]:Ticker, [1]:Share Price, [2]:Rating, [3]:Score, [4]:Rating Change Date, [5]:Price Change %
while i < 40:
ticker = str(tickers[i].text)
print(ticker)
# How many dollars of each stock to buy:
desired_dollar_amount = 1000
num_shares = round(desired_dollar_amount/float(tickers[i+1].text))
print(num_shares)
# order = equity_buy_market(ticker, 1)
# r = c.place_order(config.tda_acct_num, order)
# time.sleep(2)
# print(r.status_code)
i += 10
driver.quit() | [
"[email protected]"
] | |
c315767ae48f4b9f82aeda1f355e3cd3dc81471b | 7ddded3d38469cd3238a702d7d62cf816cb0d5d5 | /cmsplugin_cascade/segmentation/mixins.py | 92dd20dd334d71023b13787c88506ee2f5282004 | [
"MIT"
] | permissive | sayonetech/djangocms-cascade | 8e249ab83bba97dad27aee468a2a6fce0eb58f3b | 699d645cefae926d32879fbc6837693082f84c78 | refs/heads/master | 2022-05-25T02:31:58.059789 | 2020-04-25T07:33:29 | 2020-04-25T07:33:29 | 259,930,919 | 0 | 0 | MIT | 2020-04-29T13:20:54 | 2020-04-29T13:20:53 | null | UTF-8 | Python | false | false | 7,488 | py | from django import VERSION as DJANGO_VERSION
from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.http import HttpResponse, HttpResponseBadRequest
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _, ungettext
from django.utils.html import format_html
from cms.constants import REFRESH_PAGE
class SegmentPluginModelMixin(object):
"""
TODO: whenever cmsplugin_cascade drops support for django-CMS < 3.4, this mixin class
shall be added to the plugin rather than to the model
"""
def get_context_override(self, request):
"""
Return a dictionary to override the request context object during evaluation with
alternative values. Normally this is an empty dict. However, when a staff user overrides
the segmentation, then update the context with this returned dict.
"""
return {}
def render_plugin(self, context=None, placeholder=None, admin=False, processors=None):
context.update(self.get_context_override(context['request']))
content = super().render_plugin(context, placeholder, admin, processors)
context.pop()
return content
class EmulateUserModelMixin(SegmentPluginModelMixin):
UserModel = get_user_model()
def get_context_override(self, request):
"""
Override the request object with an emulated user.
"""
context_override = super().get_context_override(request)
try:
if request.user.is_staff:
user = self.UserModel.objects.get(pk=request.session['emulate_user_id'])
context_override.update(user=user)
except (self.UserModel.DoesNotExist, KeyError):
pass
return context_override
class EmulateUserAdminMixin(object):
UserModel = get_user_model()
@staticmethod
def populate_toolbar(segmentation_menu, request):
active = 'emulate_user_id' in request.session
segmentation_menu.add_sideframe_item(
_("Emulate User"),
url=reverse('admin:emulate-users'),
active=active,
)
segmentation_menu.add_ajax_item(
_("Clear emulations"),
action=reverse('admin:clear-emulations'),
on_success=REFRESH_PAGE,
disabled=not active,
)
def get_urls(self):
return [
url(r'^emulate_users/$', self.admin_site.admin_view(self.emulate_users), name='emulate-users'),
url(r'^emulate_user/(?P<user_id>\d+)/$', self.admin_site.admin_view(self.emulate_user), name='emulate-user'),
url(r'^clear_emulations/$', self.admin_site.admin_view(self.clear_emulations), name='clear-emulations'),
] + super().get_urls()
def emulate_user(self, request, user_id):
try:
request.session['emulate_user_id'] = int(user_id)
return HttpResponse('OK')
except TypeError as err:
return HttpResponseBadRequest(err.message)
def emulate_users(self, request):
"""
The list view
"""
def display_as_link(obj):
try:
identifier = getattr(user_model_admin, list_display_link)(obj)
except AttributeError:
identifier = admin.utils.lookup_field(list_display_link, obj, model_admin=self)[2]
emulate_user_id = request.session.get('emulate_user_id')
if emulate_user_id == obj.id:
return format_html('<strong>{}</strong>', identifier)
fmtargs = {
'href': reverse('admin:emulate-user', kwargs={'user_id': obj.id}),
'identifier': identifier,
}
return format_html('<a href="{href}" class="emulate-user">{identifier}</a>', **fmtargs)
opts = self.UserModel._meta
app_label = opts.app_label
user_model_admin = self.admin_site._registry[self.UserModel]
request._lookup_model = self.UserModel
list_display_links = user_model_admin.get_list_display_links(request, user_model_admin.list_display)
# replace first entry in list_display_links by customized method display_as_link
list_display_link = list_display_links[0]
try:
list_display = list(user_model_admin.segmentation_list_display)
except AttributeError:
list_display = list(user_model_admin.list_display)
list_display.remove(list_display_link)
list_display.insert(0, 'display_as_link')
display_as_link.allow_tags = True # TODO: presumably not required anymore since Django-1.9
try:
display_as_link.short_description = user_model_admin.identifier.short_description
except AttributeError:
display_as_link.short_description = admin.utils.label_for_field(list_display_link, self.UserModel)
self.display_as_link = display_as_link
ChangeList = self.get_changelist(request)
if DJANGO_VERSION < (2, 1):
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self)
else:
cl = ChangeList(request, self.UserModel, list_display,
(None,), # disable list_display_links in ChangeList, instead override that field
user_model_admin.list_filter,
user_model_admin.date_hierarchy, user_model_admin.search_fields,
user_model_admin.list_select_related, user_model_admin.list_per_page,
user_model_admin.list_max_show_all,
(), # disable list_editable
self,
None)
cl.formset = None
selection_note_all = ungettext('%(total_count)s selected',
'All %(total_count)s selected', cl.result_count)
context = {
'module_name': str(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
'title': _("Select %(user_model)s to emulate") % {'user_model': opts.verbose_name},
'is_popup': cl.is_popup,
'cl': cl,
'media': self.media,
'has_add_permission': False,
'opts': cl.opts,
'app_label': app_label,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
'actions_selection_counter': self.actions_selection_counter,
'preserved_filters': self.get_preserved_filters(request),
}
return TemplateResponse(request, self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.model_name),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context)
def clear_emulations(self, request):
request.session.pop('emulate_user_id', None)
return HttpResponse('OK')
| [
"[email protected]"
] | |
b86fb3e8323c7fac53da50672674d2d083ec8e83 | c137d7fb6eaa1c1900a63b8dae6b027176a98b6f | /MxShop/apps/user_operation/migrations/0002_auto_20191215_1324.py | 67ebf35770168b3b304df8aed21406cfe8fb758c | [] | no_license | LasterSmithKim/vuedjango | 22220414ad2f928f0a0df1a0e68c9083e90c1cc7 | 4a5b7fee4dd3f2d31255d7dc9188ea977a75db29 | refs/heads/master | 2022-12-10T19:52:25.014956 | 2019-12-23T16:23:01 | 2019-12-23T16:23:01 | 225,315,491 | 0 | 0 | null | 2022-11-22T04:52:05 | 2019-12-02T07:47:12 | JavaScript | UTF-8 | Python | false | false | 495 | py | # Generated by Django 2.2.7 on 2019-12-15 13:24
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('goods', '0003_auto_20191201_0823'),
('user_operation', '0001_initial'),
]
operations = [
migrations.AlterUniqueTogether(
name='userfav',
unique_together={('user', 'goods')},
),
]
| [
"[email protected]"
] | |
dac682aff595a8ac60201e3480935c04e5b62c3b | 83cf642504313b6ef6527dda52158a6698c24efe | /scripts/addons/fd_scripting_tools/autocompletion/suggestions/dynamic/_bpy_fake/__private__/obstacle.py | 7598560cf2b91178e54ad5d8d3e0f5402501e10f | [] | no_license | PyrokinesisStudio/Fluid-Designer-Scripts | a4c40b871e8d27b0d76a8025c804d5a41d09128f | 23f6fca7123df545f0c91bf4617f4de7d9c12e6b | refs/heads/master | 2021-06-07T15:11:27.144473 | 2016-11-08T03:02:37 | 2016-11-08T03:02:37 | 113,630,627 | 1 | 0 | null | 2017-12-09T00:55:58 | 2017-12-09T00:55:58 | null | UTF-8 | Python | false | false | 500 | py | from . struct import Struct
from . bpy_struct import bpy_struct
import mathutils
class Obstacle(bpy_struct):
@property
def rna_type(self):
'''(Struct) RNA type definition'''
return Struct()
@property
def name(self):
'''(String) Unique name used in the code and scripting'''
return str()
@property
def bp_name(self):
'''(String)'''
return str()
@property
def base_point(self):
'''(String)'''
return str() | [
"[email protected]"
] | |
358c3dc0cf6ca85640bf9dce63f0bc8db277b92d | c5291e50a3c72c885922378573a0ad423fcedf05 | /EasyPay/billpay/controllers/transaction.py | 5d51ce3230840967ef71d5b124a63aff15748a5b | [] | no_license | raghurammanyam/django-projects | bcc3ed6285882af437a2995514cef33760fb063e | dd20ae354f7f111a0176a1cc047c099bd23e9f05 | refs/heads/master | 2022-12-12T19:22:31.698114 | 2018-12-09T09:41:45 | 2018-12-09T09:41:45 | 137,443,359 | 0 | 0 | null | 2022-11-22T03:01:07 | 2018-06-15T05:08:15 | Python | UTF-8 | Python | false | false | 1,353 | py | from rest_framework.views import APIView
from rest_framework.response import Response
from django.core import serializers
from django.http import JsonResponse
from billpay.models import transactions
from billpay.serializers.transactionserializer import transactionSerializer,gettransactionSerializer
from django.http import Http404
from rest_framework import status
from django.conf import settings
import logging
logger = logging.getLogger('billpay.transaction')
class addtransaction(APIView):
def get(self,request,*args,**kwargs):
try:
get=transactions.objects.all()
serializer=gettransactionSerializer(get,many=True)
return JsonResponse({"success":True,"data":serializer.data})
except transactions.DoesNotExist:
raise Http404
def post(self,request,format=None):
try:
serializer=transactionSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
logger.info(serializer.data)
return JsonResponse({'data':serializer.data,'message':'transaction added suceesfully'})
logger.error(serializer.errors)
return JsonResponse({"message":serializer.errors})
except Http404:
return JsonResponse({"success":False,"message":"transaction not added"})
| [
"[email protected]"
] | |
3e2c4eec3c3fd761e5f94ad65d35d6609cc1d30a | c41edf53089b1ee466ea578aa74f0c1e9b95a8b3 | /aqi_v3.0.py | 04b339479b8e885f186fe65eb4e7373bb5804cc2 | [] | no_license | gjw199513/aqidemo | fb4e81e6d1f6b40c14bbcc5401ce57eac11beb2b | 64dc64def274916aa513890cb24e18777518c375 | refs/heads/master | 2021-05-11T12:33:20.256133 | 2018-01-16T09:17:49 | 2018-01-16T09:17:49 | 117,661,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 884 | py | # -*- coding:utf-8 -*-
__author__ = 'gjw'
__time__ = '2018/1/16 0016 上午 9:33'
# AQI计算
# 读取json文件
import json
import csv
def process_json_file(filepath):
"""
解码json文件
:param filepath:
:return:
"""
f = open(filepath, mode='r', encoding='utf-8')
city_list = json.load(f)
return city_list
def main():
"""
主函数
:return:
"""
filepath = input("请输入json文件名称:")
city_list = process_json_file(filepath)
city_list.sort(key=lambda city: city['aqi'])
lines = []
# 列名
lines.append(list(city_list[0].keys()))
for city in city_list:
lines.append(list(city.values()))
f = open('aqi.csv', 'w', encoding='utf-8', newline='')
writer = csv.writer(f)
for line in lines:
writer.writerow(line)
f.close()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
55bd0b4a233bda1faa5458145b221dc1f947400a | e7c3d2b1fd7702b950e31beed752dd5db2d127bd | /code/pythagorean_tree/sol_395.py | a058d0ed47da7c5635ddf92ee8c14e841b46c257 | [
"Apache-2.0"
] | permissive | Ved005/project-euler-solutions | bbadfc681f5ba4b5de7809c60eb313897d27acfd | 56bf6a282730ed4b9b875fa081cf4509d9939d98 | refs/heads/master | 2021-09-25T08:58:32.797677 | 2018-10-20T05:40:58 | 2018-10-20T05:40:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,396 | py |
# -*- coding: utf-8 -*-
'''
File name: code\pythagorean_tree\sol_395.py
Author: Vaidic Joshi
Date created: Oct 20, 2018
Python Version: 3.x
'''
# Solution to Project Euler Problem #395 :: Pythagorean tree
#
# For more information see:
# https://projecteuler.net/problem=395
# Problem Statement
'''
The Pythagorean tree is a fractal generated by the following procedure:
Start with a unit square. Then, calling one of the sides its base (in the animation, the bottom side is the base):
Attach a right triangle to the side opposite the base, with the hypotenuse coinciding with that side and with the sides in a 3-4-5 ratio. Note that the smaller side of the triangle must be on the 'right' side with respect to the base (see animation).
Attach a square to each leg of the right triangle, with one of its sides coinciding with that leg.
Repeat this procedure for both squares, considering as their bases the sides touching the triangle.
The resulting figure, after an infinite number of iterations, is the Pythagorean tree.
It can be shown that there exists at least one rectangle, whose sides are parallel to the largest square of the Pythagorean tree, which encloses the Pythagorean tree completely.
Find the smallest area possible for such a bounding rectangle, and give your answer rounded to 10 decimal places.
'''
# Solution
# Solution Approach
'''
'''
| [
"[email protected]"
] | |
d1655810249067814fa279b9d239f50bee8f2737 | 795df757ef84073c3adaf552d5f4b79fcb111bad | /r8lib/r8_besj0.py | fe91ea0ac6c7d9fc8c82a57a9db0e31023d84ff0 | [] | no_license | tnakaicode/jburkardt-python | 02cb2f9ba817abf158fc93203eb17bf1cb3a5008 | 1a63f7664e47d6b81c07f2261b44f472adc4274d | refs/heads/master | 2022-05-21T04:41:37.611658 | 2022-04-09T03:31:00 | 2022-04-09T03:31:00 | 243,854,197 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,405 | py | #! /usr/bin/env python
#
def r8_b0mp ( x ):
#*****************************************************************************80
#
## R8_B0MP evaluates the modulus and phase for the Bessel J0 and Y0 functions.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 April 2016
#
# Author:
#
# Original FORTRAN77 version by Wayne Fullerton.
# Python version by John Burkardt.
#
# Reference:
#
# Wayne Fullerton,
# Portable Special Function Routines,
# in Portability of Numerical Software,
# edited by Wayne Cowell,
# Lecture Notes in Computer Science, Volume 57,
# Springer 1977,
# ISBN: 978-3-540-08446-4,
# LC: QA297.W65.
#
# Parameters:
#
# Input, real X, the argument.
#
# Output, real AMPL, THETA, the modulus and phase.
#
import numpy as np
from r8_csevl import r8_csevl
from r8_inits import r8_inits
from machine import r8_mach
from sys import exit
bm0cs = np.array ( [ \
+0.9211656246827742712573767730182E-01, \
-0.1050590997271905102480716371755E-02, \
+0.1470159840768759754056392850952E-04, \
-0.5058557606038554223347929327702E-06, \
+0.2787254538632444176630356137881E-07, \
-0.2062363611780914802618841018973E-08, \
+0.1870214313138879675138172596261E-09, \
-0.1969330971135636200241730777825E-10, \
+0.2325973793999275444012508818052E-11, \
-0.3009520344938250272851224734482E-12, \
+0.4194521333850669181471206768646E-13, \
-0.6219449312188445825973267429564E-14, \
+0.9718260411336068469601765885269E-15, \
-0.1588478585701075207366635966937E-15, \
+0.2700072193671308890086217324458E-16, \
-0.4750092365234008992477504786773E-17, \
+0.8615128162604370873191703746560E-18, \
-0.1605608686956144815745602703359E-18, \
+0.3066513987314482975188539801599E-19, \
-0.5987764223193956430696505617066E-20, \
+0.1192971253748248306489069841066E-20, \
-0.2420969142044805489484682581333E-21, \
+0.4996751760510616453371002879999E-22, \
-0.1047493639351158510095040511999E-22, \
+0.2227786843797468101048183466666E-23, \
-0.4801813239398162862370542933333E-24, \
+0.1047962723470959956476996266666E-24, \
-0.2313858165678615325101260800000E-25, \
+0.5164823088462674211635199999999E-26, \
-0.1164691191850065389525401599999E-26, \
+0.2651788486043319282958336000000E-27, \
-0.6092559503825728497691306666666E-28, \
+0.1411804686144259308038826666666E-28, \
-0.3298094961231737245750613333333E-29, \
+0.7763931143074065031714133333333E-30, \
-0.1841031343661458478421333333333E-30, \
+0.4395880138594310737100799999999E-31 ] )
bm02cs = np.array ( [ \
+0.9500415145228381369330861335560E-01, \
-0.3801864682365670991748081566851E-03, \
+0.2258339301031481192951829927224E-05, \
-0.3895725802372228764730621412605E-07, \
+0.1246886416512081697930990529725E-08, \
-0.6065949022102503779803835058387E-10, \
+0.4008461651421746991015275971045E-11, \
-0.3350998183398094218467298794574E-12, \
+0.3377119716517417367063264341996E-13, \
-0.3964585901635012700569356295823E-14, \
+0.5286111503883857217387939744735E-15, \
-0.7852519083450852313654640243493E-16, \
+0.1280300573386682201011634073449E-16, \
-0.2263996296391429776287099244884E-17, \
+0.4300496929656790388646410290477E-18, \
-0.8705749805132587079747535451455E-19, \
+0.1865862713962095141181442772050E-19, \
-0.4210482486093065457345086972301E-20, \
+0.9956676964228400991581627417842E-21, \
-0.2457357442805313359605921478547E-21, \
+0.6307692160762031568087353707059E-22, \
-0.1678773691440740142693331172388E-22, \
+0.4620259064673904433770878136087E-23, \
-0.1311782266860308732237693402496E-23, \
+0.3834087564116302827747922440276E-24, \
-0.1151459324077741271072613293576E-24, \
+0.3547210007523338523076971345213E-25, \
-0.1119218385815004646264355942176E-25, \
+0.3611879427629837831698404994257E-26, \
-0.1190687765913333150092641762463E-26, \
+0.4005094059403968131802476449536E-27, \
-0.1373169422452212390595193916017E-27, \
+0.4794199088742531585996491526437E-28, \
-0.1702965627624109584006994476452E-28, \
+0.6149512428936330071503575161324E-29, \
-0.2255766896581828349944300237242E-29, \
+0.8399707509294299486061658353200E-30, \
-0.3172997595562602355567423936152E-30, \
+0.1215205298881298554583333026514E-30, \
-0.4715852749754438693013210568045E-31 ] )
bt02cs = np.array ( [ \
-0.24548295213424597462050467249324, \
+0.12544121039084615780785331778299E-02, \
-0.31253950414871522854973446709571E-04, \
+0.14709778249940831164453426969314E-05, \
-0.99543488937950033643468850351158E-07, \
+0.85493166733203041247578711397751E-08, \
-0.86989759526554334557985512179192E-09, \
+0.10052099533559791084540101082153E-09, \
-0.12828230601708892903483623685544E-10, \
+0.17731700781805131705655750451023E-11, \
-0.26174574569485577488636284180925E-12, \
+0.40828351389972059621966481221103E-13, \
-0.66751668239742720054606749554261E-14, \
+0.11365761393071629448392469549951E-14, \
-0.20051189620647160250559266412117E-15, \
+0.36497978794766269635720591464106E-16, \
-0.68309637564582303169355843788800E-17, \
+0.13107583145670756620057104267946E-17, \
-0.25723363101850607778757130649599E-18, \
+0.51521657441863959925267780949333E-19, \
-0.10513017563758802637940741461333E-19, \
+0.21820381991194813847301084501333E-20, \
-0.46004701210362160577225905493333E-21, \
+0.98407006925466818520953651199999E-22, \
-0.21334038035728375844735986346666E-22, \
+0.46831036423973365296066286933333E-23, \
-0.10400213691985747236513382399999E-23, \
+0.23349105677301510051777740800000E-24, \
-0.52956825323318615788049749333333E-25, \
+0.12126341952959756829196287999999E-25, \
-0.28018897082289428760275626666666E-26, \
+0.65292678987012873342593706666666E-27, \
-0.15337980061873346427835733333333E-27, \
+0.36305884306364536682359466666666E-28, \
-0.86560755713629122479172266666666E-29, \
+0.20779909972536284571238399999999E-29, \
-0.50211170221417221674325333333333E-30, \
+0.12208360279441714184191999999999E-30, \
-0.29860056267039913454250666666666E-31 ] )
bth0cs = np.array ( [ \
-0.24901780862128936717709793789967, \
+0.48550299609623749241048615535485E-03, \
-0.54511837345017204950656273563505E-05, \
+0.13558673059405964054377445929903E-06, \
-0.55691398902227626227583218414920E-08, \
+0.32609031824994335304004205719468E-09, \
-0.24918807862461341125237903877993E-10, \
+0.23449377420882520554352413564891E-11, \
-0.26096534444310387762177574766136E-12, \
+0.33353140420097395105869955014923E-13, \
-0.47890000440572684646750770557409E-14, \
+0.75956178436192215972642568545248E-15, \
-0.13131556016891440382773397487633E-15, \
+0.24483618345240857495426820738355E-16, \
-0.48805729810618777683256761918331E-17, \
+0.10327285029786316149223756361204E-17, \
-0.23057633815057217157004744527025E-18, \
+0.54044443001892693993017108483765E-19, \
-0.13240695194366572724155032882385E-19, \
+0.33780795621371970203424792124722E-20, \
-0.89457629157111779003026926292299E-21, \
+0.24519906889219317090899908651405E-21, \
-0.69388422876866318680139933157657E-22, \
+0.20228278714890138392946303337791E-22, \
-0.60628500002335483105794195371764E-23, \
+0.18649748964037635381823788396270E-23, \
-0.58783732384849894560245036530867E-24, \
+0.18958591447999563485531179503513E-24, \
-0.62481979372258858959291620728565E-25, \
+0.21017901684551024686638633529074E-25, \
-0.72084300935209253690813933992446E-26, \
+0.25181363892474240867156405976746E-26, \
-0.89518042258785778806143945953643E-27, \
+0.32357237479762298533256235868587E-27, \
-0.11883010519855353657047144113796E-27, \
+0.44306286907358104820579231941731E-28, \
-0.16761009648834829495792010135681E-28, \
+0.64292946921207466972532393966088E-29, \
-0.24992261166978652421207213682763E-29, \
+0.98399794299521955672828260355318E-30, \
-0.39220375242408016397989131626158E-30, \
+0.15818107030056522138590618845692E-30, \
-0.64525506144890715944344098365426E-31, \
+0.26611111369199356137177018346367E-31 ] )
eta = 0.1 * r8_mach ( 3 )
nbm0 = r8_inits ( bm0cs, 37, eta )
nbt02 = r8_inits ( bt02cs, 39, eta )
nbm02 = r8_inits ( bm02cs, 40, eta )
nbth0 = r8_inits ( bth0cs, 44, eta )
xmax = 1.0 / r8_mach ( 4 )
if ( x < 4.0 ):
print ( '' )
print ( 'R8_B0MP - Fatal error!' )
print ( ' X < 4.' )
exit ( 'R8_B0MP - Fatal error!' )
elif ( x <= 8.0 ):
z = ( 128.0 / x / x - 5.0 ) / 3.0
ampl = ( 0.75 + r8_csevl ( z, bm0cs, nbm0 ) ) / np.sqrt ( x )
theta = x - 0.25 * np.pi + r8_csevl ( z, bt02cs, nbt02 ) / x
else:
z = 128.0 / x / x - 1.0
ampl = ( 0.75 + r8_csevl ( z, bm02cs, nbm02) ) / np.sqrt ( x )
theta = x - 0.25 * np.pi + r8_csevl ( z, bth0cs, nbth0 ) / x
return ampl, theta
def r8_besj0 ( x ):
#*****************************************************************************80
#
## R8_BESJ0 evaluates the Bessel function J of order 0 of an R8 argument.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 April 2016
#
# Author:
#
# Original FORTRAN77 version by Wayne Fullerton.
# Python version by John Burkardt.
#
# Reference:
#
# Wayne Fullerton,
# Portable Special Function Routines,
# in Portability of Numerical Software,
# edited by Wayne Cowell,
# Lecture Notes in Computer Science, Volume 57,
# Springer 1977,
# ISBN: 978-3-540-08446-4,
# LC: QA297.W65.
#
# Parameters:
#
# Input, real X, the argument.
#
# Output, real VALUE, the Bessel function J of order 0 of X.
#
import numpy as np
from r8_csevl import r8_csevl
from r8_inits import r8_inits
from machine import r8_mach
bj0cs = np.array ( [ \
+0.10025416196893913701073127264074, \
-0.66522300776440513177678757831124, \
+0.24898370349828131370460468726680, \
-0.33252723170035769653884341503854E-01, \
+0.23114179304694015462904924117729E-02, \
-0.99112774199508092339048519336549E-04, \
+0.28916708643998808884733903747078E-05, \
-0.61210858663032635057818407481516E-07, \
+0.98386507938567841324768748636415E-09, \
-0.12423551597301765145515897006836E-10, \
+0.12654336302559045797915827210363E-12, \
-0.10619456495287244546914817512959E-14, \
+0.74706210758024567437098915584000E-17, \
-0.44697032274412780547627007999999E-19, \
+0.23024281584337436200523093333333E-21, \
-0.10319144794166698148522666666666E-23, \
+0.40608178274873322700800000000000E-26, \
-0.14143836005240913919999999999999E-28, \
+0.43910905496698880000000000000000E-31 ] )
ntj0 = r8_inits ( bj0cs, 19, 0.1 * r8_mach ( 3 ) )
xsml = np.sqrt ( 4.0 * r8_mach ( 3 ) )
y = abs ( x )
if ( y <= xsml ):
value = 1.0
elif ( y <= 4.0 ):
value = r8_csevl ( 0.125 * y * y - 1.0, bj0cs, ntj0 )
else:
ampl, theta = r8_b0mp ( y )
value = ampl * np.cos ( theta )
return value
def r8_besj0_test ( ):
#*****************************************************************************80
#
## R8_BESJ0_TEST tests R8_BESJ0.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 April 2016
#
# Author:
#
# John Burkardt
#
import platform
from bessel_j0_values import bessel_j0_values
print ( '' )
print ( 'R8_BESJ0_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8_BESJ0 evaluates the Bessel J0(x) function' )
print ( '' )
print ( ' X BESJ0(X) R8_BESJ0(X) Diff' )
print ( '' )
n_data = 0
while ( True ):
n_data, x, fx1 = bessel_j0_values ( n_data )
if ( n_data == 0 ):
break
fx2 = r8_besj0 ( x )
print ( ' %14.4g %14.6g %14.6g %14.6g' % ( x, fx1, fx2, abs ( fx1 - fx2 ) ) )
#
# Terminate.
#
print ( '' )
print ( 'R8_BESJ0_TEST:' )
print ( ' Normal end of execution.' )
return
def r8_besy0 ( x ):
#*****************************************************************************80
#
## R8_BESY0 evaluates the Bessel function Y of order 0 of an R8 argument.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 April 2016
#
# Author:
#
# Original FORTRAN77 version by Wayne Fullerton.
# Python version by John Burkardt.
#
# Reference:
#
# Wayne Fullerton,
# Portable Special Function Routines,
# in Portability of Numerical Software,
# edited by Wayne Cowell,
# Lecture Notes in Computer Science, Volume 57,
# Springer 1977,
# ISBN: 978-3-540-08446-4,
# LC: QA297.W65.
#
# Parameters:
#
# Input, real X, the argument.
#
# Output, real VALUE, the Bessel function Y of order 0 of X.
#
import numpy as np
from r8_csevl import r8_csevl
from r8_inits import r8_inits
from machine import r8_mach
from sys import exit
alnhaf = -0.69314718055994530941723212145818
twodpi = 0.636619772367581343075535053490057
by0cs = np.array ( [ \
-0.1127783939286557321793980546028E-01, \
-0.1283452375604203460480884531838, \
-0.1043788479979424936581762276618, \
+0.2366274918396969540924159264613E-01, \
-0.2090391647700486239196223950342E-02, \
+0.1039754539390572520999246576381E-03, \
-0.3369747162423972096718775345037E-05, \
+0.7729384267670667158521367216371E-07, \
-0.1324976772664259591443476068964E-08, \
+0.1764823261540452792100389363158E-10, \
-0.1881055071580196200602823012069E-12, \
+0.1641865485366149502792237185749E-14, \
-0.1195659438604606085745991006720E-16, \
+0.7377296297440185842494112426666E-19, \
-0.3906843476710437330740906666666E-21, \
+0.1795503664436157949829120000000E-23, \
-0.7229627125448010478933333333333E-26, \
+0.2571727931635168597333333333333E-28, \
-0.8141268814163694933333333333333E-31 ] )
nty0 = r8_inits ( by0cs, 19, 0.1 * r8_mach ( 3 ) )
xsml = np.sqrt ( 4.0 * r8_mach ( 3 ) )
if ( x <= 0.0 ):
print ( '' )
print ( 'R8_BESY0 - Fatal error!' )
print ( ' X <= 0.' )
exit ( 'R8_BESY0 - Fatal error!' )
elif ( x <= xsml ):
y = 0.0
value = twodpi * ( alnhaf + np.log ( x ) ) * r8_besj0 ( x ) \
+ 0.375 + r8_csevl ( 0.125 * y - 1.0, by0cs, nty0 )
elif ( x <= 4.0 ):
y = x * x
value = twodpi * ( alnhaf + np.log ( x ) ) * r8_besj0 ( x ) \
+ 0.375 + r8_csevl ( 0.125 * y - 1.0, by0cs, nty0 )
else:
ampl, theta = r8_b0mp ( x )
value = ampl * np.sin ( theta )
return value
def r8_besy0_test ( ):
#*****************************************************************************80
#
## R8_BESY0_TEST tests R8_BESY0.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 26 April 2016
#
# Author:
#
# John Burkardt
#
import platform
from bessel_y0_values import bessel_y0_values
print ( '' )
print ( 'R8_BESY0_TEST:' )
print ( ' Python version: %s' % ( platform.python_version ( ) ) )
print ( ' R8_BESY0 evaluates the Bessel Y0(X) function.' )
print ( '' )
print ( ' X BESY0(X) R8_BESY0(X) Diff' )
print ( '' )
n_data = 0
while ( True ):
n_data, x, fx1 = bessel_y0_values ( n_data )
if ( n_data == 0 ):
break
fx2 = r8_besy0 ( x )
print ( ' %14.4g %14.6g %14.6g %14.6g' % ( x, fx1, fx2, abs ( fx1 - fx2 ) ) )
#
# Terminate.
#
print ( '' )
print ( 'R8_BESY0_TEST:' )
print ( ' Normal end of execution.' )
return
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r8_besj0_test ( )
r8_besy0_test ( )
timestamp ( )
| [
"[email protected]"
] | |
4921921057dceaa1a35447e580f62d0b41e5898e | e6590826c10648c472743c20b898655ec0ef3ce5 | /7.14.py | 1417073e3f79cdfa391f4ccd257ba21c5e5cc211 | [] | no_license | suddencode/pythontutor_2018 | a770fbf476af049dc8f04c8b0f81cce7922a63c4 | 767cdd0d980be290a613ebda455a49daad1a7902 | refs/heads/master | 2020-03-24T00:15:07.744889 | 2019-02-11T20:17:42 | 2019-02-11T20:17:42 | 142,281,968 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | a = [int(s) for s in input().split()]
c = ''
for f in range(len(a)):
if a.count(a[f]) == 1:
c += str(a[f]) + ' '
print(c)
| [
"[email protected]"
] | |
5c740df3c278cbd5a651616a8c6362d2a129e5df | 0a46b027e8e610b8784cb35dbad8dd07914573a8 | /scripts/venv/lib/python2.7/site-packages/cogent/struct/selection.py | 03f31e74ff518727fac35b01df2868657cf41af5 | [
"MIT"
] | permissive | sauloal/cnidaria | bb492fb90a0948751789938d9ec64677052073c3 | fe6f8c8dfed86d39c80f2804a753c05bb2e485b4 | refs/heads/master | 2021-01-17T13:43:17.307182 | 2016-10-05T14:14:46 | 2016-10-05T14:14:46 | 33,726,643 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,229 | py | """Contains functions to select and group structural entities."""
from cogent.core.entity import StructureHolder, ModelHolder, ChainHolder, \
ResidueHolder, AtomHolder, HIERARCHY
__author__ = "Marcin Cieslik"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["Marcin Cieslik"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "Marcin Cieslik"
__email__ = "[email protected]"
__status__ = "Development"
def select(entities, level, *args, **kwargs):
"""Shorthand for ``einput`` and subsequent ``selectChildren``. Returns
Returns a ``Holder`` instance. The "name" can be specified.
Additional arguments and keyworded arguments are passed to the
``selectChildren`` method of the holder instance.
"""
try:
name = kwargs.pop('name')
except KeyError:
name = 'select'
holder = einput(entities, level)
selection = holder.selectChildren(*args, **kwargs)
try:
holder = einput(selection.values(), level, name)
except ValueError:
raise ValueError('No entities have been selected')
return holder
def einput(entities, level, name=None):
"""Creates a ``XyzHolder`` instance of entities at the specified level. Where
Xyz is 'Structure', 'Model', 'Chain', Residue' or 'Atom'.
Arguments:
- entities: ``Entity`` instance or sequence of entities.
- level: one of 'H', 'S', 'M', 'C', 'R', 'A'
- name: optional name of the ``XyzHolder`` instance.
"""
# Keep it bug-free
all = {}
index = HIERARCHY.index(level)
for entity in entities: # __iter__ override in Entity
if index > HIERARCHY.index(entity.level): # call for children
all.update(get_children(entity, level))
elif index < HIERARCHY.index(entity.level): # call for parents
all.update(get_parent(entity, level))
else:
all.update({entity.getFull_id():entity}) # call for self
higher_level = HIERARCHY[index - 1] # one up;)
if all:
name = name or higher_level
if higher_level == 'C':
holder = ResidueHolder(name, all)
elif higher_level == 'R':
holder = AtomHolder(name, all)
elif higher_level == 'M':
holder = ChainHolder(name, all)
elif higher_level == 'S':
holder = ModelHolder(name, all)
elif higher_level == 'H':
holder = StructureHolder(name, all)
else:
raise ValueError, "einput got no input entites."
holder.setSort_tuple()
return holder
def get_children(entity, level):
"""Return unique entities of lower or equal level
Arguments:
- entity: any ``Entity`` instance.
- level: one of 'H', 'S', 'M', 'C', 'R', 'A'
"""
entity.setTable()
return entity.table[level]
def get_parent(entity, level):
"""Returns unique entities of higher level.
Arguments:
- entity: any ``Entity`` instance.
- level: one of 'H', 'S', 'M', 'C', 'R', 'A'
"""
parent = entity.getParent(level) # get the correct parent
return {parent.getFull_id(): parent}
| [
"[email protected]"
] | |
653912fc3bb836925fa3ad1cc2c80b096d3ce413 | c36d9d70cbb257b2ce9a214bcf38f8091e8fe9b7 | /1480_running_sum_of_1d_array.py | 2963386c35e2ba41f6b22c1535d379e20a42c43a | [] | no_license | zdadadaz/coding_practice | 3452e4fc8f4a79cb98d0d4ea06ce0bcae85f96a0 | 5ed070f22f4bc29777ee5cbb01bb9583726d8799 | refs/heads/master | 2021-06-23T17:52:40.149982 | 2021-05-03T22:31:23 | 2021-05-03T22:31:23 | 226,006,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | class Solution:
def runningSum(self, nums: List[int]) -> List[int]:
n = len(nums)
if n == 1:
return nums
for i in range(1,n):
nums[i] += nums[i-1]
return nums | [
"[email protected]"
] | |
1cbbbcd8c445f2dc867f77a6fe853653e4a2819d | 0b69a011c9ffee099841c140be95ed93c704fb07 | /problemsets/Codeforces/Python/A586.py | d7228d5b1012ec2539b03decd80c92a6c9012979 | [
"Apache-2.0"
] | permissive | juarezpaulino/coderemite | 4bd03f4f2780eb6013f07c396ba16aa7dbbceea8 | a4649d3f3a89d234457032d14a6646b3af339ac1 | refs/heads/main | 2023-01-31T11:35:19.779668 | 2020-12-18T01:33:46 | 2020-12-18T01:33:46 | 320,931,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | """
*
* Author: Juarez Paulino(coderemite)
* Email: [email protected]
*
"""
import re
input()
print(len(''.join(re.split('00+',input().replace(' ','').strip('0'))))) | [
"[email protected]"
] | |
91cfed98353755db93342aea8a5adbe298724ef7 | 602ad0ee13e66f8d6de9a5a30cba599e7f98eb9b | /examples/text-classification/jcx/run_glue.py | 782b8f481a3be7a72e7832e8a8e2c6fb22b18f3b | [
"Apache-2.0"
] | permissive | pohanchi/MixtureOfExpert | 0df35d23f4e8329a32390765f52841b92f5db5d8 | b6c0663fdedea717c7956c43a0ace6d78a085542 | refs/heads/master | 2023-08-18T10:03:15.695659 | 2020-05-29T14:35:13 | 2020-05-29T14:35:13 | 266,769,844 | 0 | 1 | Apache-2.0 | 2023-09-06T17:33:07 | 2020-05-25T12:10:52 | Python | UTF-8 | Python | false | false | 8,637 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa, Albert, XLM-RoBERTa)."""
import dataclasses
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
BertForSequenceClassification,
AlbertForSequenceClassification,
AutoTokenizer,
EvalPrediction,
GlueDataset,
GlueDataTrainingArguments,
HfArgumentParser,
Trainer,
TrainingArguments,
glue_compute_metrics,
glue_output_modes,
glue_tasks_num_labels,
set_seed,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, GlueDataTrainingArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# Set seed
set_seed(training_args.seed)
try:
num_labels = glue_tasks_num_labels[data_args.task_name]
output_mode = glue_output_modes[data_args.task_name]
except KeyError:
raise ValueError("Task not found: %s" % (data_args.task_name))
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if training_args.albert:
config = AutoConfig.from_pretrained(
'albert-base-v2',
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
else:
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
)
config.pretrained = training_args.pretrained
config.rand_nonatt = training_args.rand_nonatt
config.full_att = training_args.full_att
config.synthesizer = training_args.synthesizer
config.mix = training_args.mix
config.all_rand = training_args.all_rand
config.hand_crafted = training_args.hand_crafted
if training_args.pretrained:
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
# if training_args.synthesizer:
# if training_args.full_att:
# for i in range(12):
# tmp_key = model.bert.encoder.layer[i].attention.self.key.weight
# tmp_query = model.bert.encoder.layer[i].attention.self.query.weight
# tmp_key = tmp_key[:, :]
# import pdb;pdb.set_trace()
else:
if training_args.albert:
model = AlbertForSequenceClassification(config=config)
else:
model = BertForSequenceClassification(config=config)
# model = BertForSequenceClassification.from_pretrained('out/lsh/test_ialsh-8/checkpoint-1000', config=
# Get datasets
train_dataset = (
GlueDataset(data_args, tokenizer=tokenizer, local_rank=training_args.local_rank)
if training_args.do_train
else None
)
eval_dataset = (
GlueDataset(data_args, tokenizer=tokenizer, local_rank=training_args.local_rank, evaluate=True)
if training_args.do_eval
else None
)
def compute_metrics(p: EvalPrediction) -> Dict:
if output_mode == "classification":
preds = np.argmax(p.predictions, axis=1)
elif output_mode == "regression":
preds = np.squeeze(p.predictions)
return glue_compute_metrics(data_args.task_name, preds, p.label_ids)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# Evaluation
results = {}
if training_args.do_eval and training_args.local_rank in [-1, 0]:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
mnli_mm_data_args = dataclasses.replace(data_args, task_name="mnli-mm")
eval_datasets.append(
GlueDataset(mnli_mm_data_args, tokenizer=tokenizer, local_rank=training_args.local_rank, evaluate=True)
)
for eval_dataset in eval_datasets:
result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(
training_args.output_dir, f"eval_results_{eval_dataset.args.task_name}.txt"
)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(eval_dataset.args.task_name))
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
results.update(result)
return results
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
9bf48177a873f6126e4cfa4661fed22742055db9 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_116/795.py | 219e317d76df5272a163058483e9e3d06bc08a8e | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | #!/usr/bin/python2
def won(player):
return rows(player) or cols(player) or diag1(player) or diag2(player)
def rows(player):
for i in range(0,4):
if row(i,player):
return True
return False
def cols(player):
for j in range(0,4):
if col(j,player):
return True
return False
def row(i, player):
for j in range(0,4):
if board[i][j] not in [player,"T"]:
return False
return True
def col(j, player):
for i in range(0,4):
if board[i][j] not in [player,"T"]:
return False
return True
def diag1(player):
for i in range(0,4):
if board[i][i] not in [player,"T"]:
return False
return True
def diag2(player):
for j in range(0,4):
i = 3-j
if board[i][j] not in [player,"T"]:
return False
return True
def evaluate(board):
if won("X"):
return "X won"
if won("O"):
return "O won"
for i in range(0,4):
for j in range(0,4):
if board[i][j] == ".":
return "Game has not completed"
return "Draw"
lines = open("A-large.in").readlines()
num_cases = int(lines[0])
x = 1
y = 4
for n in range(0,num_cases):
board = lines[x:y+1]
#print board
print "Case #{0}: {1}".format(n+1, evaluate(board))
x += 5
y += 5
| [
"[email protected]"
] | |
85f5a86b401a45080844f34bc6d9b786c830a113 | 71c7683331a9037fda7254b3a7b1ffddd6a4c4c8 | /Phys/BsJPsiKst/python/BsJPsiKst/syst.py | 64b1b8e87dad10cd219df5f4cd53e134b168dbff | [] | no_license | pseyfert-cern-gitlab-backup/Urania | edc58ba4271089e55900f8bb4a5909e9e9c12d35 | 1b1c353ed5f1b45b3605990f60f49881b9785efd | refs/heads/master | 2021-05-18T13:33:22.732970 | 2017-12-15T14:42:04 | 2017-12-15T14:42:04 | 251,259,622 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from math import *
eps = 1.011
f = 0.951
k = 1.063
def err(s1,s2,v):
a1 = abs(s1-v)
a2 = abs(s2-v)
return max(a1,a2)
| [
"[email protected]"
] | |
bb64dbc2667d5b007272f60222fdce55f6a7ba45 | 21f98d8bb31264c94e7a98fb8eb806d7f5bd396e | /Binary Search/081. Search in Rotated Sorted Array II.py | 996738cbc12be3cb5963f0b2d735bfec29253141 | [] | no_license | mcfair/Algo | e1500d862a685e598ab85e8ed5b68170632fdfd0 | 051e2a9f6c918907cc8b665353c46042e7674e66 | refs/heads/master | 2021-10-31T01:25:46.917428 | 2021-10-08T04:52:44 | 2021-10-08T04:52:44 | 141,460,584 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 875 | py | """
When pivot has duplicates, nums[0]==nums[-1], the solution for LC33 doesn't apply here anymore
"""
class Solution(object):
def search(self, nums, target):
if not nums: return False
if target == nums[0]: return True
pivot = nums[0]
l, r = 0, len(nums)
#this while loop has worst case O(n)
while r>0 and nums[r-1] == pivot:
r-=1
while l<r:
mid = (l+r)//2
compare = nums[mid]
if (target < pivot) ^ (compare < pivot):
if target < pivot:
compare = float('-inf')
else:
compare = float('inf')
if compare == target:
return True
if compare < target:
l = mid + 1
else:
r = mid
return False
| [
"[email protected]"
] | |
c381edbdc210fa7e13c9646b2572c064b2fb4914 | 9b68d23d4409af5921a51e2d91344090996787e5 | /main_backup20190512.py | ce5655b0b8aebe49a859eeb899f1dea4d9abaf20 | [] | no_license | sasakishun/segm | 48381b77b9a876c44edfbcdbb4a2ef19096b48f4 | d48158243853d737b7056815108aef4e0ac5b6ca | refs/heads/master | 2020-05-05T10:23:12.673182 | 2019-05-14T07:46:07 | 2019-05-14T07:46:07 | 179,943,924 | 0 | 0 | null | 2019-04-07T09:31:08 | 2019-04-07T09:31:08 | null | UTF-8 | Python | false | false | 7,122 | py | from model import Model
from load_data import Datagen, plot_data
import tensorflow as tf
from util import plot_segm_map, calc_iou
import numpy as np
import networkx as nx
import adjacency
import matplotlib.pyplot as plt
import scipy
import math
np.set_printoptions(threshold=np.inf)
G = nx.Graph()
nodes = np.array(list(range(32 * 32)))
G.add_nodes_from(nodes)
# グリッド状グラフの全辺リストを生成
edges = []
grid = adjacency.grid_points([32, 32])
for _grid in grid:
edges.append((_grid[0], _grid[1]))
edges.append((_grid[1], _grid[0]))
# グラフにグリッド辺を追加
G.add_edges_from(edges)
"""
# グラフ表示
pos = nx.spring_layout(G)
nx.draw_networkx(G, pos, with_labels=True)
plt.axis("off")
plt.show()
"""
# グラフコンボリューションのための行列を用意
A = nx.adjacency_matrix(G).astype("float32")
D = nx.laplacian_matrix(G).astype("float32") + A
for i in range(G.number_of_nodes()):
D[i, i] = 1 / math.sqrt(D[i, i])
A_chil_ = D.dot(A.dot(D))
# scipy.sparse -> tf.SparseTensorへの変換のための関数
def convert_sparse_matrix_to_sparse_tensor(X):
coo = X.tocoo()
indices = np.mat([coo.row, coo.col]).transpose()
return tf.SparseTensor(indices, coo.data, coo.shape)
A_chil = convert_sparse_matrix_to_sparse_tensor(A_chil_)
# GCN layerを出力する関数
def GCN_layer(A, layer_input, W, activation):
if activation is None:
return tf.matmul(tf.sparse.matmul(A, layer_input), W)
else:
return activation(tf.matmul(tf.sparse.matmul(A, layer_input), W))
d = 1 # 最終出力の次元
hidden_size = 8 # 1層目の出力サイズ
learning_rate = 1e-3 # 学習率
# モデル定義
X = tf.placeholder(tf.float32, shape=[G.number_of_nodes(), 256])
# _segm_map = tf.sparse_placeholder(tf.float32)
_segm_map = tf.placeholder(tf.float32, shape=[32*32, 1])
# _segm_map = convert_sparse_matrix_to_sparse_tensor(_segm_map)
# _segm_map = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], dense_shape=[32*32, 1])
W_1 = tf.Variable(tf.random_normal([256, hidden_size]), dtype=tf.float32)
W_2 = tf.Variable(tf.random_normal([hidden_size, d]), dtype=tf.float32)
L1 = GCN_layer(A_chil, X, W_1, tf.nn.relu)
L2 = GCN_layer(A_chil, L1, W_2, None)
print("W_1:{}".format(tf.shape(W_1)))
print("W_2:{}".format(tf.shape(W_2)))
print("A_chil:{}".format(tf.shape(A_chil)))
print("L1:{}".format(tf.shape(L1)))
print("L2:{}".format(tf.shape(L2)))
A_rec = tf.sigmoid(tf.matmul(L2, tf.transpose(L2)))
# loss = tf.nn.l2_loss(tf.sparse.add(-1 * A_rec, A_chil))
# L2 = tf.sparse.to_dense(L2)
loss = tf.nn.l2_loss(tf.add(-1 * L2, _segm_map))
# loss = tf.nn.l2_loss(tf.sparse.add(-1 * L2, _segm_map))
# loss = tf.transpose(loss)
train = tf.train.AdamOptimizer(learning_rate).minimize(loss)
batch_size = 1
dg = Datagen('data/mnist', 'data/cifar')
data, segm_map = dg.sample(batch_size, norm=False)
# 学習部分
epoch = 10000
# x = np.identity(G.number_of_nodes(), dtype="float32")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
loss_list = list()
for e in range(epoch):
# 学習
data_batch, segm_map_batch = dg.sample(batch_size, norm=False)
data_batch = data_batch.reshape([3, 1024])[0]
data_batch = np.array(data_batch, dtype=np.int64)
data_batch = np.identity(256)[data_batch]
# print("#{} databatch:{}".format(e, data_batch))
x = data_batch.reshape([G.number_of_nodes(), 256])
segm_map_batch = np.array(segm_map_batch, dtype=np.int64)
# print("segm_map_batch.shape:{}".format(segm_map_batch.shape))
"""
indices = [[], []]
values = []
for i in range(segm_map_batch[0].shape[0]):
for j in range(segm_map_batch[0].shape[1]):
if segm_map_batch[0, i, j] != 0:
indices[0].append(i)
indices[1].append(j)
values.append(segm_map_batch[0, i, j])
print("indices:{}".format(indices))
print("values:{}".format(values))
indices = np.array([[i for i in indices[0]],
[i for i in indices[1]]], dtype=np.int64)
values = np.array([i for i in values], dtype=np.float32)
shape = np.array([32 * 32, 1], dtype=np.int64)
"""
segm_map_batch = segm_map_batch.reshape([32 * 32, 1])
# segm_map_batch = scipy.sparse.lil_matrix(segm_map_batch)
# print("x:{}".format(x.shape))
# print("L1 in sess:{}".format(sess.run(tf.shape(L1), feed_dict={X: x})))
# print("L2 in sess:{}".format(sess.run(tf.shape(L2), feed_dict={X: x})))
# print("A_rec in sess:{}".format(sess.run(tf.shape(A_rec), feed_dict={X: x})))
# print("segm_map_batch:{}".format(segm_map_batch.shape))
tloss, _ = sess.run([loss, train], feed_dict={X: x, _segm_map: segm_map_batch})
# segm_map_batch})
loss_list.append(tloss)
print("#{} loss:{}".format(e, tloss))
if e % 100 == 0:
test_loss, segm_map_pred = sess.run([loss, L2],
feed_dict={X: data_batch, _segm_map: segm_map_batch})
print("shapes input:{} output:{} target:{}".format(np.shape(data_batch), np.shape(segm_map_batch), np.shape(segm_map_pred)))
plot_segm_map(np.squeeze(data_batch), np.squeeze(segm_map_batch), np.squeeze(segm_map_pred))
"""
# 学習結果の出力
if (e + 1) % 100 == 0:
emb = sess.run(L2, feed_dict={X: x})
fig, ax = plt.subplots()
for i in range(G.number_of_nodes()):
ax.scatter(emb[i][0], emb[i][1], color=color[i])
plt.title("epoch" + str(e + 1))
plt.show()
plt.title("epoch" + str(e + 1))
nx.draw_networkx(G, pos=emb, node_color=color)
plt.show()
"""
"""
batch_size = 64
dropout = 0.7
dg = Datagen('data/mnist', 'data/cifar')
data, segm_map = dg.sample(batch_size)
model = Model(batch_size, dropout)
num_iter = 500
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for iter in range(num_iter):
data_batch, segm_map_batch = dg.sample(batch_size)
train_loss, _ = sess.run([model.total_loss, model.train_step],
feed_dict={model.image: data_batch, model.segm_map: segm_map_batch})
if iter % 50 == 0:
data_batch, segm_map_batch = dg.sample(batch_size, dataset='test')
test_loss, segm_map_pred = sess.run([model.total_loss, model.h4],
feed_dict={model.image: data_batch, model.segm_map: segm_map_batch})
print('iter %5i/%5i loss is %5.3f and mIOU %5.3f' % (
iter, num_iter, test_loss, calc_iou(segm_map_batch, segm_map_pred)))
# Final run
data_batch, segm_map_batch = dg.sample(batch_size, dataset='test')
test_loss, segm_map_pred = sess.run([model.total_loss, model.h4],
feed_dict={model.image: data_batch, model.segm_map: segm_map_batch})
plot_segm_map(data_batch, segm_map_batch, segm_map_pred)
"""
| [
"[email protected]"
] | |
7ef60b70a910574209907c3a9e4d6c0dc73d5b45 | 67b0d2d2e1e3b2b9fba4cfc14adc31f503bb0b91 | /AirConditioningV2/filters.py | 54b56a16c2e40a45057929cf6936b15a8652dbdb | [] | no_license | Hk4Fun/qtstudy | 02a5059555462f5e7fe626632e351f4af69206f6 | 7b38853e29c8e055f9db2828c34815000158bf28 | refs/heads/master | 2020-03-14T11:29:32.501540 | 2018-06-16T07:48:35 | 2018-06-16T07:48:35 | 131,591,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | __author__ = 'Hk4Fun'
__date__ = '2018/5/15 17:24'
import time
import datetime
from PyQt5.QtCore import QDate
from AirConditioningV2.settings import *
def mapWindSpeed_c2w(wind_speed):
return {LOW_WIND: '低风', MID_WIND: '中风', HIGH_WIND: '高风'}[wind_speed]
def mapWindSpeed_w2c(wind_speed):
return {'低风': LOW_WIND, '中风': MID_WIND, '高风': HIGH_WIND}[wind_speed]
def mapMode_c2w(mode):
return {COLD_MODE: '制冷', WARM_MODE: '制热'}[mode]
def mapUserLevel_c2w(userLevel):
return {USER_NORMAL: '普通用户', USER_VIP: 'VIP'}[userLevel]
def timeFormat(timeStamp):
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timeStamp))
def isSettle(orderId):
return '未结帐' if orderId == '0' else orderId
def mapDiscount(userLevel):
return {USER_NORMAL: NORMAL_DISCOUNT, USER_VIP: VIP_DISCOUNT}[userLevel]
def discountFormat(discount):
if discount == 1:
return '无'
return str(discount * 100) + '%'
def durationFormat(start, end): # start,end -- timestamp
start = datetime.datetime.fromtimestamp(start).replace(microsecond=0) # microsecond=0忽略毫秒数
end = datetime.datetime.fromtimestamp(end).replace(microsecond=0)
return str(end - start)
def isEqDate(date, queryDate):
t = datetime.datetime.strptime(queryDate, "%Y-%m-%d %H:%M:%S")
return date == QDate(t.year, t.month, t.day)
| [
"[email protected]"
] | |
029838e1292b4def0204d31ffebfc8890bbee7bc | 8906e04870524f190a11f3eb3caf8fe377ab3a24 | /Chapter14/Chapter_14/multiprocessing_env.py | 9c110a17857d5a7f8028ddace2f37a2e3e7c1954 | [
"MIT"
] | permissive | PacktPublishing/Hands-On-Reinforcement-Learning-for-Games | 8719c086c8410a2da2b4fb9852b029a4c8f67f60 | 609d63ee5389b80b760a17f7f43abe632d99a9bb | refs/heads/master | 2023-02-08T19:35:30.005167 | 2023-01-30T09:09:07 | 2023-01-30T09:09:07 | 231,567,217 | 54 | 32 | MIT | 2022-04-21T06:47:24 | 2020-01-03T10:43:21 | Python | UTF-8 | Python | false | false | 4,886 | py | #This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
| [
"[email protected]"
] | |
ab4b676c0492c9481f64943f47707bf9d6e58515 | 7453911cee47edd9414ecfc66d189dc578f7e421 | /src/greentest/3.10/test_socket.py | 211fd8c02da0a4141e85fa1d26444939d2e52c1e | [
"Python-2.0",
"MIT"
] | permissive | gevent/gevent | f20eca1852098e47f32eb062db646acfead36e71 | 6b22af0fa8eb2efa89fce36c35808948c67352b0 | refs/heads/master | 2023-08-31T19:27:29.410236 | 2023-08-31T10:26:35 | 2023-08-31T10:26:35 | 5,801,666 | 4,981 | 866 | NOASSERTION | 2023-09-13T14:16:59 | 2012-09-13T22:03:03 | Python | UTF-8 | Python | false | false | 252,000 | py | import unittest
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = socket_helper.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_j1939():
"""Check whether CAN J1939 sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _have_socket_bluetooth():
"""Check whether AF_BLUETOOTH sockets are supported on this host."""
try:
# RFCOMM is supported by all platforms with bluetooth support. Windows
# does not support omitting the protocol.
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
except (AttributeError, OSError):
return False
else:
s.close()
return True
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_CAN_J1939 = _have_socket_can_j1939()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE")
HAVE_SOCKET_BLUETOOTH = _have_socket_bluetooth()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = socket_helper.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPLITETest(SocketUDPTest):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
self.port = socket_helper.bind_port(self.serv)
class ThreadSafeCleanupTestCase:
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = socket_helper.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.setUp = self._setUp
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = threading_helper.wait_threads_exit()
self.wait_threads.__enter__()
self.addCleanup(self.wait_threads.__exit__, None, None, None)
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
def raise_queued_exception():
if self.queue.qsize():
raise self.queue.get()
self.addCleanup(raise_queued_exception)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
self.addCleanup(self.done.wait)
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPLITETest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
socket_helper.bind_unix_socket(sock, path)
self.addCleanup(os_helper.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
socket_helper.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class UDPLITETestBase(InetTestBase):
"""Base class for UDPLITE-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = socket_helper.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
class UDPLITE6TestBase(Inet6TestBase):
"""Base class for UDPLITE-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind(("0.0.0.0", port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
def test_getfqdn_filter_localhost(self):
self.assertEqual(socket.getfqdn(), socket.getfqdn("0.0.0.0"))
self.assertEqual(socket.getfqdn(), socket.getfqdn("::"))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_CAN_J1939, 'CAN J1939 required for this test.')
class J1939Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testJ1939Constants(self):
socket.CAN_J1939
socket.J1939_MAX_UNICAST_ADDR
socket.J1939_IDLE_ADDR
socket.J1939_NO_ADDR
socket.J1939_NO_NAME
socket.J1939_PGN_REQUEST
socket.J1939_PGN_ADDRESS_CLAIMED
socket.J1939_PGN_ADDRESS_COMMANDED
socket.J1939_PGN_PDU1_MAX
socket.J1939_PGN_MAX
socket.J1939_NO_PGN
# J1939 socket options
socket.SO_J1939_FILTER
socket.SO_J1939_PROMISC
socket.SO_J1939_SEND_PRIO
socket.SO_J1939_ERRQUEUE
socket.SCM_J1939_DEST_ADDR
socket.SCM_J1939_DEST_NAME
socket.SCM_J1939_PRIO
socket.SCM_J1939_ERRQUEUE
socket.J1939_NLA_PAD
socket.J1939_NLA_BYTES_ACKED
socket.J1939_EE_INFO_NONE
socket.J1939_EE_INFO_TX_ABORT
socket.J1939_FILTER_MAX
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testCreateJ1939Socket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
pass
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
socket_helper.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, socket_helper.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH,
'Bluetooth sockets required for this test.')
class BasicBluetoothTest(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class BasicUDPLITETest(ThreadedUDPLITESocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPLITESocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDPLITE
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDPLITE
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except TimeoutError:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("TimeoutError not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(TimeoutError,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecondCmsgTruncInData.client_skip
def _testSecondCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITE6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase:
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = support.LOOPBACK_TIMEOUT
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(False)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(False)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(TimeoutError, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise TimeoutError('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = socket_helper.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = socket_helper.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except TimeoutError:
pass
except OSError as exc:
if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('TimeoutError not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = socket_helper.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(TimeoutError, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class UDPLITETimeoutTest(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
self.assertIs(socket.error, OSError)
self.assertIs(socket.timeout, TimeoutError)
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
def testAutobind(self):
# Check that binding to an empty string binds to an available address
# in the abstract namespace as specified in unix(7) "Autobind feature".
abstract_address = b"^\0[0-9a-f]{5}"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind("")
self.assertRegex(s1.getsockname(), abstract_address)
# Each socket is bound to a different abstract address.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.bind("")
self.assertRegex(s2.getsockname(), abstract_address)
self.assertNotEqual(s1.getsockname(), s2.getsockname())
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
socket_helper.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(os_helper.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if os_helper.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(os_helper.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipIf(sys.platform == 'linux', 'Linux specific test')
def testEmptyAddress(self):
# Test that binding empty address fails.
self.assertRaises(OSError, self.sock.bind, "")
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules", encoding="utf-8")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
s.setblocking(False)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(os_helper.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(os_helper.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
os_helper.unlink(os_helper.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = os_helper.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(os_helper.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(os_helper.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(TimeoutError, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
time.sleep(1)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(os_helper.TESTFN, encoding="utf-8") as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
class TestMacOSTCPFlags(unittest.TestCase):
def test_tcp_keepalive(self):
self.assertTrue(socket.TCP_KEEPALIVE)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = socket_helper.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = support.LOOPBACK_TIMEOUT
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
thread = threading.Thread(target=run, args=(sock, ))
thread.start()
self.addCleanup(thread.join, self.timeout)
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
@requireAttrs(socket, "send_fds")
@requireAttrs(socket, "recv_fds")
@requireAttrs(socket, "AF_UNIX")
class SendRecvFdsTests(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def setUpModule():
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
74b490218935aa0cdad3e8d7ddf4d99da22c41ec | 2542edca28a456aa5c217be10419e56fc581fa4e | /dataread.py | ad68b69900a80d898c714dc5dd7e53ff56578f0a | [] | no_license | neeshuagr/profile_match_score | 16bfef57c38e6993148175bf40c29acae6e7e9a1 | 77f4f8035746f075b8232e466907f18aa3eef641 | refs/heads/master | 2020-04-29T08:22:35.568600 | 2019-03-16T15:28:11 | 2019-03-16T15:28:11 | 175,984,719 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,041 | py | #!/usr/bin/python3.4
import datareadfiletypes
import config
import filemanager
import utility
import dcrnlp
import custom
import datetime
import dictionaries
import pyodbc
import dbmanager
def route_dataread(filepaths):
data_read_count = int(utility.read_from_file(
config.ConfigManager().ExecutioncountFile, 'r'))
file_read_count = 0
file_path_count = 0
configdocs = custom.retrieve_data_from_DB(int(config.ConfigManager().MongoDBPort), config.ConfigManager(
).DataCollectionDB, config.ConfigManager().ConfigCollection)
docid_count = int(configdocs[0]['docid_count'])
connection = dbmanager.mongoDB_connection(
int(config.ConfigManager().MongoDBPort))
utility.write_to_file(config.ConfigManager().LogFile,
'a', 'dataread running')
for filepath in filepaths:
data_text = ''
try:
file_path_count += 1
print('File number: ' + str(file_path_count))
print('Processing file..' + filepath)
if filepath[-4:].lower() == ".txt":
data_text = datareadfiletypes.read_text_text(
filepath, data_text)
elif filepath[-4:].lower() == ".pdf":
data_text = datareadfiletypes.read_pdf_text(
filepath, data_text)
elif filepath[-5:].lower() == ".docx":
data_text = datareadfiletypes.read_docx_text(
filepath, data_text)
elif filepath[-4:].lower() == ".doc":
data_text = datareadfiletypes.read_doc_text(
filepath, data_text)
elif filepath[-4:].lower() == ".xls":
# data_text = datareadfiletypes.read_excel_text(
# filepath, data_text)
docid_count = custom.process_excel_rowdata(
filepath, docid_count)
elif filepath[-5:].lower() == ".xlsx":
# data_text = datareadfiletypes.read_excel_text(
# filepath, data_text)
docid_count = custom.process_excel_rowdata(
filepath, docid_count)
elif filepath[-4:].lower() == ".csv":
data_text = datareadfiletypes.read_csv_text(
filepath, data_text)
elif filepath[-4:].lower() == ".odt":
data_text = datareadfiletypes.read_odt_text(
filepath, data_text)
elif filepath[-4:].lower() == ".xml":
docid_count = custom.process_xml_data(filepath, docid_count)
if not data_text == '':
docid_count += 1
file_read_count += 1
# dcrnlp.extract_nounphrases_sentences(data_text)
noun_phrases = ''
dictionaries.DataProperties['description'] = data_text
dictionaries.DataProperties['nounPhrases'] = noun_phrases
dictionaries.DataProperties[
'documentType'] = utility.filefolder_from_filepath(filepath)
dictionaries.DataProperties[
'dataSource'] = config.ConfigManager().Misc # config.ConfigManager().JobPortal
dictionaries.DataProperties['doc_id'] = docid_count
dictionaries.DataProperties[
'documentTitle'] = utility.filename_from_filepath(filepath)
dictionaries.DataProperties['documentDesc'] = (
dictionaries.DataProperties['description'])[0:200]
jsonfordatastore = custom.prepare_json_for_datastore(
dictionaries.DataProperties)
jsonfordatastore_deserialized = utility.jsonstring_deserialize(
jsonfordatastore)
custom.insert_data_to_DB(
jsonfordatastore_deserialized, connection)
phrases_file_data = custom.prepare_phrases_file_data(
noun_phrases, data_read_count, file_read_count)
utility.write_to_file(
config.ConfigManager().PhraseFile, 'a', phrases_file_data)
except BaseException as ex:
exception_message = '\n' + 'Exception:' + \
str(datetime.datetime.now()) + '\n'
exception_message += 'File: ' + filepath + '\n'
exception_message += '\n' + str(ex) + '\n'
exception_message += '-' * 100
utility.write_to_file(
config.ConfigManager().LogFile, 'a', exception_message)
data_read_count += 1
utility.write_to_file(config.ConfigManager(
).ExecutioncountFile, 'w', str(data_read_count))
dictionaries.UpdateTemplateWhere['_id'] = configdocs[0]['_id']
dictionaries.UpdateTemplateSet['docid_count'] = docid_count
dictionaries.DBSet['$set'] = dictionaries.UpdateTemplateSet
custom.update_data_to_Db_noupsert(int(config.ConfigManager().MongoDBPort), config.ConfigManager().DataCollectionDB, config.ConfigManager(
).ConfigCollection, dictionaries.UpdateTemplateWhere, dictionaries.DBSet, connection)
if __name__ == "__main__":
file_paths = []
directory_list = []
directory_list = utility.string_to_array(
config.ConfigManager().DirectoryList, ',', directory_list)
file_paths = filemanager.directory_iterate(directory_list)
route_dataread(file_paths)
# utility.archive_content(
# file_paths, config.ConfigManager().ArchiveDirectory)
#connection = dbmanager.mongoDB_connection(int(config.ConfigManager().MongoDBPort))
# configdocs = custom.retrieve_data_from_DB(int(config.ConfigManager(
#).MongoDBPort), config.ConfigManager().DataCollectionDB, config.ConfigManager().ConfigCollection)
#docid_count = int(configdocs[0]['docid_count'])
# docid_count = custom.data_from_DB(config.ConfigManager().STConnStr, config.ConfigManager(
#).STJobQueryId, config.ConfigManager().JobDetails, config.ConfigManager().ST, docid_count)
# docid_count = custom.data_from_DB(config.ConfigManager().STConnStr, config.ConfigManager(
#).STCandidateQueryId, config.ConfigManager().CandidateDetails, config.ConfigManager().ST, docid_count)
# docid_count = custom.data_from_DB(config.ConfigManager().XchangeConnStr, config.ConfigManager(
#).XchangeJobQueryId, config.ConfigManager().JobDetails, config.ConfigManager().Xchange, docid_count)
# docid_count = custom.data_from_DB(config.ConfigManager().XchangeConnStr, config.ConfigManager(
#).XchangeCandidateQueryId, config.ConfigManager().CandidateDetails, config.ConfigManager().Xchange, docid_count)
#dictionaries.UpdateTemplateWhere['_id'] = configdocs[0]['_id']
#dictionaries.UpdateTemplateSet['docid_count'] = docid_count
#dictionaries.DBSet['$set'] = dictionaries.UpdateTemplateSet
# custom.update_data_to_Db_noupsert(int(config.ConfigManager().MongoDBPort), config.ConfigManager().DataCollectionDB,
# config.ConfigManager().ConfigCollection,
# dictionaries.UpdateTemplateWhere, dictionaries.DBSet, connection)
| [
"[email protected]"
] | |
24f8c5f6b848ba541a0f03a6241859851b481ca3 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_386/ch92_2019_10_02_18_01_55_591754.py | c842af9ea317fe2aab5bd222878e1be5c6911078 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | dict = {}
dict['felipe']="lacombe"
dict['ohara']="shiba"
def simplifica_dict(dict):
dictLista = []
for key, value in dict.items():
dictLista = [key,value]
dictLista.append(dictLista)
print (dictLista)
for i in dictLista:
print (i) | [
"[email protected]"
] | |
f1c03590686e9456fadcef9380c4e2b0a2217a08 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03555/s893735211.py | 5ea9b484f2e65e0cb9fc168972602094b06ef1ed | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | c1 = input()
c2 = input()
rc1 = c1[::-1]
rc2 = c2[::-1]
if c2 == rc1 and c1 == rc2:
print('YES')
else:
print('NO') | [
"[email protected]"
] | |
f408e84b593fd962f105e3137ee4041a8f4baee6 | ea4e262f3dc18a089895fef08bedefc60b66e373 | /supervised_learning/0x01-classification/4-neuron.py | 5ceefc3c0dea7f50a83093a29231b625e4e999eb | [] | no_license | d1sd41n/holbertonschool-machine_learning | 777899d4914e315883ba0c887d891c0c8ab01c8a | 5f86dee95f4d1c32014d0d74a368f342ff3ce6f7 | refs/heads/main | 2023-07-17T09:22:36.257702 | 2021-08-27T03:44:24 | 2021-08-27T03:44:24 | 317,399,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,257 | py | #!/usr/bin/env python3
"""[summary]
Raises:
TypeError: [description]
ValueError: [description]
"""
import numpy as np
class Neuron:
"""[summary]
"""
def __init__(self, nx):
"""[summary]
Args:
nx ([type]): [description]
Raises:
TypeError: [description]
ValueError: [description]
"""
if not isinstance(nx, int):
raise TypeError("nx must be an integer")
if nx < 1:
raise ValueError("nx must be a positive integer")
self.__W = np.random.normal(size=(1, nx))
self.__b = 0
self.__A = 0
@property
def W(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.__W
@property
def b(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.__b
@property
def A(self):
"""[summary]
Returns:
[type]: [description]
"""
return self.__A
def forward_prop(self, X):
"""[summary]
Args:
X ([type]): [description]
Returns:
[type]: [description]
"""
Z = np.matmul(self.__W, X) + self.__b
self.__A = self.sigmoid(Z)
return self.__A
def sigmoid(self, Z):
"""[summary]
Args:
Z ([type]): [description]
Returns:
[type]: [description]
"""
return 1 / (1 + np.exp(-Z))
def cost(self, Y, A):
"""[summary]
Args:
Y ([type]): [description]
A ([type]): [description]
Returns:
[type]: [description]
"""
m = Y.shape[1]
cost = -1 * (1 / m) * np.sum(Y * np.log(A) +
(1 - Y) * np.log(1.0000001 - A))
return cost
def evaluate(self, X, Y):
"""[summary]
Args:
X ([type]): [description]
Y ([type]): [description]
Returns:
[type]: [description]
"""
A = self.forward_prop(X)
Y_hat = np.where(A >= 0.5, 1, 0)
cost = self.cost(Y, A)
return Y_hat, cost
| [
"[email protected]"
] | |
11afe53b2ef3fabde5fbadc485b0b55c8afb203a | b5ad428abd2dd1f541a2d442e2246e98653ecfd4 | /tensorflow/examples/speech_commands/freeze.py | c61e564463dfbbf113d2ca0ad4501a46b4b4664f | [
"Apache-2.0"
] | permissive | Yasserrahim/tensorflow | 3e6ff4e59b4312c6fbb14d39f887a828dd3be0a0 | 6f2672744a91d78a5a4a289b65805ffd0c14ffe8 | refs/heads/master | 2020-06-27T19:52:53.555660 | 2019-08-01T09:58:01 | 2019-08-01T10:04:13 | 200,027,678 | 1 | 0 | Apache-2.0 | 2019-08-01T10:12:46 | 2019-08-01T10:12:45 | null | UTF-8 | Python | false | false | 9,273 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Converts a trained checkpoint into a frozen model for mobile inference.
Once you've trained a model using the `train.py` script, you can use this tool
to convert it into a binary GraphDef file that can be loaded into the Android,
iOS, or Raspberry Pi example code. Here's an example of how to run it:
bazel run tensorflow/examples/speech_commands/freeze -- \
--sample_rate=16000 --dct_coefficient_count=40 --window_size_ms=20 \
--window_stride_ms=10 --clip_duration_ms=1000 \
--model_architecture=conv \
--start_checkpoint=/tmp/speech_commands_train/conv.ckpt-1300 \
--output_file=/tmp/my_frozen_graph.pb
One thing to watch out for is that you need to pass in the same arguments for
`sample_rate` and other command line variables here as you did for the training
script.
The resulting graph has an input for WAV-encoded data named 'wav_data', one for
raw PCM data (as floats in the range -1.0 to 1.0) called 'decoded_sample_data',
and the output is called 'labels_softmax'.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os.path
import sys
import tensorflow as tf
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
import input_data
import models
from tensorflow.python.framework import graph_util
# If it's available, load the specialized feature generator. If this doesn't
# work, try building with bazel instead of running the Python script directly.
# bazel run tensorflow/examples/speech_commands:freeze_graph
try:
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op # pylint:disable=g-import-not-at-top
except ImportError:
frontend_op = None
FLAGS = None
def create_inference_graph(wanted_words, sample_rate, clip_duration_ms,
clip_stride_ms, window_size_ms, window_stride_ms,
feature_bin_count, model_architecture, preprocess):
"""Creates an audio model with the nodes needed for inference.
Uses the supplied arguments to create a model, and inserts the input and
output nodes that are needed to use the graph for inference.
Args:
wanted_words: Comma-separated list of the words we're trying to recognize.
sample_rate: How many samples per second are in the input audio files.
clip_duration_ms: How many samples to analyze for the audio pattern.
clip_stride_ms: How often to run recognition. Useful for models with cache.
window_size_ms: Time slice duration to estimate frequencies from.
window_stride_ms: How far apart time slices should be.
feature_bin_count: Number of frequency bands to analyze.
model_architecture: Name of the kind of model to generate.
preprocess: How the spectrogram is processed to produce features, for
example 'mfcc', 'average', or 'micro'.
Raises:
Exception: If the preprocessing mode isn't recognized.
"""
words_list = input_data.prepare_words_list(wanted_words.split(','))
model_settings = models.prepare_model_settings(
len(words_list), sample_rate, clip_duration_ms, window_size_ms,
window_stride_ms, feature_bin_count, preprocess)
runtime_settings = {'clip_stride_ms': clip_stride_ms}
wav_data_placeholder = tf.placeholder(tf.string, [], name='wav_data')
decoded_sample_data = contrib_audio.decode_wav(
wav_data_placeholder,
desired_channels=1,
desired_samples=model_settings['desired_samples'],
name='decoded_sample_data')
spectrogram = contrib_audio.audio_spectrogram(
decoded_sample_data.audio,
window_size=model_settings['window_size_samples'],
stride=model_settings['window_stride_samples'],
magnitude_squared=True)
if preprocess == 'average':
fingerprint_input = tf.nn.pool(
tf.expand_dims(spectrogram, -1),
window_shape=[1, model_settings['average_window_width']],
strides=[1, model_settings['average_window_width']],
pooling_type='AVG',
padding='SAME')
elif preprocess == 'mfcc':
fingerprint_input = contrib_audio.mfcc(
spectrogram,
sample_rate,
dct_coefficient_count=model_settings['fingerprint_width'])
elif preprocess == 'micro':
if not frontend_op:
raise Exception(
'Micro frontend op is currently not available when running TensorFlow'
' directly from Python, you need to build and run through Bazel, for'
' example'
' `bazel run tensorflow/examples/speech_commands:freeze_graph`')
sample_rate = model_settings['sample_rate']
window_size_ms = (model_settings['window_size_samples'] *
1000) / sample_rate
window_step_ms = (model_settings['window_stride_samples'] *
1000) / sample_rate
int16_input = tf.cast(
tf.multiply(decoded_sample_data.audio, 32767), tf.int16)
micro_frontend = frontend_op.audio_microfrontend(
int16_input,
sample_rate=sample_rate,
window_size=window_size_ms,
window_step=window_step_ms,
num_channels=model_settings['fingerprint_width'],
out_scale=1,
out_type=tf.float32)
fingerprint_input = tf.multiply(micro_frontend, (10.0 / 256.0))
else:
raise Exception('Unknown preprocess mode "%s" (should be "mfcc",'
' "average", or "micro")' % (preprocess))
fingerprint_size = model_settings['fingerprint_size']
reshaped_input = tf.reshape(fingerprint_input, [-1, fingerprint_size])
logits = models.create_model(
reshaped_input, model_settings, model_architecture, is_training=False,
runtime_settings=runtime_settings)
# Create an output to use for inference.
tf.nn.softmax(logits, name='labels_softmax')
def main(_):
# Create the model and load its weights.
sess = tf.InteractiveSession()
create_inference_graph(
FLAGS.wanted_words, FLAGS.sample_rate, FLAGS.clip_duration_ms,
FLAGS.clip_stride_ms, FLAGS.window_size_ms, FLAGS.window_stride_ms,
FLAGS.feature_bin_count, FLAGS.model_architecture, FLAGS.preprocess)
if FLAGS.quantize:
tf.contrib.quantize.create_eval_graph()
models.load_variables_from_checkpoint(sess, FLAGS.start_checkpoint)
# Turn all the variables into inline constants inside the graph and save it.
frozen_graph_def = graph_util.convert_variables_to_constants(
sess, sess.graph_def, ['labels_softmax'])
tf.train.write_graph(
frozen_graph_def,
os.path.dirname(FLAGS.output_file),
os.path.basename(FLAGS.output_file),
as_text=False)
tf.logging.info('Saved frozen graph to %s', FLAGS.output_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--sample_rate',
type=int,
default=16000,
help='Expected sample rate of the wavs',)
parser.add_argument(
'--clip_duration_ms',
type=int,
default=1000,
help='Expected duration in milliseconds of the wavs',)
parser.add_argument(
'--clip_stride_ms',
type=int,
default=30,
help='How often to run recognition. Useful for models with cache.',)
parser.add_argument(
'--window_size_ms',
type=float,
default=30.0,
help='How long each spectrogram timeslice is',)
parser.add_argument(
'--window_stride_ms',
type=float,
default=10.0,
help='How long the stride is between spectrogram timeslices',)
parser.add_argument(
'--feature_bin_count',
type=int,
default=40,
help='How many bins to use for the MFCC fingerprint',
)
parser.add_argument(
'--start_checkpoint',
type=str,
default='',
help='If specified, restore this pretrained model before any training.')
parser.add_argument(
'--model_architecture',
type=str,
default='conv',
help='What model architecture to use')
parser.add_argument(
'--wanted_words',
type=str,
default='yes,no,up,down,left,right,on,off,stop,go',
help='Words to use (others will be added to an unknown label)',)
parser.add_argument(
'--output_file', type=str, help='Where to save the frozen graph.')
parser.add_argument(
'--quantize',
type=bool,
default=False,
help='Whether to train the model for eight-bit deployment')
parser.add_argument(
'--preprocess',
type=str,
default='mfcc',
help='Spectrogram processing mode. Can be "mfcc" or "average"')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| [
"[email protected]"
] | |
cb884614e8d42df912c3aefa67005b33d068d5f6 | 73758dde83d1a1823c103e1a4ba71e7c95168f71 | /nsd2003/py01/day02/login2.py | 4eefdabd2111bbcb78f2b762df349d7530f21edb | [] | no_license | tonggh220/md_5_nsd_notes | 07ffdee7c23963a7a461f2a2340143b0e97bd9e1 | a58a021ad4c7fbdf7df327424dc518f4044c5116 | refs/heads/master | 2023-07-02T01:34:38.798929 | 2021-05-12T08:48:40 | 2021-05-12T08:48:40 | 393,885,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | import getpass
username = input('username: ')
password = getpass.getpass('password: ')
if username == 'bob' and password == '123456':
print('\033[32;1m登陆成功\033[0m')
else:
print('\033[31;1m登陆失败\033[0m')
| [
"[email protected]"
] | |
11852eaac17d838c7e11ed135d73821c3fb707bd | 8985adb377e92b4764d1b138b5c23c92fab44244 | /tests/test_open_file.py | 380fbd698fba2697e062433f5e6d758c6238ff2a | [
"MIT"
] | permissive | vuillaut/pyeventio | b8f2b4ecabb9b2c94d1e3f7d651b8c5ef83e4a5f | f7fd0f3546099e34fdab1ccb3f43f5bc515f2143 | refs/heads/master | 2020-04-11T04:44:59.932722 | 2019-01-18T07:51:59 | 2019-01-18T07:51:59 | 161,524,392 | 0 | 0 | null | 2018-12-12T17:45:57 | 2018-12-12T17:45:56 | null | UTF-8 | Python | false | false | 885 | py | import eventio
from os import path
import pkg_resources
def test_is_install_folder_a_directory():
dir_ = path.dirname(eventio.__file__)
assert path.isdir(dir_)
def test_can_open_file():
testfile = 'tests/resources/one_shower.dat'
eventio.EventIOFile(testfile)
def test_file_is_iterable():
testfile = 'tests/resources/one_shower.dat'
f = eventio.EventIOFile(testfile)
for event in f:
pass
def test_file_has_correct_types():
testfile = 'tests/resources/one_shower.dat'
f = eventio.EventIOFile(testfile)
types = [o.header.type for o in f]
assert types == [1200, 1212, 1201, 1202, 1203, 1204, 1209, 1210]
def test_types_gzipped():
testfile = 'tests/resources/one_shower.dat'
f = eventio.EventIOFile(testfile)
types = [o.header.type for o in f]
assert types == [1200, 1212, 1201, 1202, 1203, 1204, 1209, 1210]
| [
"[email protected]"
] | |
05329d547998d9406a4a55e93a03a512da6d1f66 | 494e3fbbdff5cf6edb087f3103ad5f15acbc174f | /company/migrations/0005_employee_checkedin.py | 6dd66f7f62dbe154d700daa4addec76dd04d087a | [] | no_license | TalentoUnicamp/my | 1209048acdedbb916b8ae8ec80761d09f6ad7754 | 3d87a33cd282d97dbbbd5f62658f231456f12765 | refs/heads/master | 2020-03-23T21:12:58.316033 | 2018-08-14T06:11:36 | 2018-08-14T06:11:36 | 142,090,262 | 11 | 0 | null | 2018-08-17T05:13:26 | 2018-07-24T01:53:23 | JavaScript | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.0.3 on 2018-08-12 02:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('company', '0004_company_access_level'),
]
operations = [
migrations.AddField(
model_name='employee',
name='checkedin',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
3fbbfde9112c75c2fabddbc9853ce5a774fe49d7 | c89768bbaa7849e03bc19454c84a63b12e4a97eb | /tools/upgrade/commands/targets_to_configuration.py | cf51670c642a7bf7fcb1d60da45769cd77571942 | [
"MIT"
] | permissive | shannonzhu/pyre-check | c86be8c272a78d637bbcf86cc9fb505c443d1691 | bd49c24cacb36797aed6f0d9e27742a12ff7e0a3 | refs/heads/master | 2022-11-06T06:54:57.418180 | 2020-06-15T23:14:21 | 2020-06-15T23:16:20 | 272,584,459 | 0 | 0 | null | 2020-06-16T01:49:08 | 2020-06-16T01:49:07 | null | UTF-8 | Python | false | false | 12,970 | py | # Copyright (c) 2016-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import subprocess
from pathlib import Path
from typing import List, Optional
import libcst
from typing_extensions import Final
from ..configuration import Configuration
from ..errors import Errors, PartialErrorSuppression
from ..filesystem import (
LocalMode,
add_local_mode,
find_directories,
find_files,
find_targets,
get_filesystem,
remove_non_pyre_ignores,
)
from ..repository import Repository
from .command import CommandArguments, ErrorSuppressingCommand
from .strict_default import StrictDefault
LOG: logging.Logger = logging.getLogger(__name__)
class TargetPyreRemover(libcst.CSTTransformer):
def leave_Call(
self, original_node: libcst.Call, updated_node: libcst.Call
) -> libcst.Call:
check_types = False
uses_pyre = True
updated_fields = []
for field in original_node.args:
name = field.keyword
value = field.value
if not name:
continue
name = name.value
if name == "check_types":
if isinstance(value, libcst.Name):
check_types = check_types or value.value.lower() == "true"
elif name == "check_types_options":
if isinstance(value, libcst.SimpleString):
uses_pyre = uses_pyre and "mypy" not in value.value.lower()
elif name not in ["typing", "typing_options"]:
updated_fields.append(field)
if check_types and uses_pyre:
return updated_node.with_changes(args=updated_fields)
return updated_node
class TargetsToConfiguration(ErrorSuppressingCommand):
def __init__(
self,
command_arguments: CommandArguments,
*,
repository: Repository,
subdirectory: Optional[str],
glob: int,
fixme_threshold: int,
no_commit: bool,
submit: bool,
pyre_only: bool,
strict: bool,
) -> None:
super().__init__(command_arguments, repository)
self._subdirectory: Final[Optional[str]] = subdirectory
self._glob: int = glob
self._fixme_threshold: int = fixme_threshold
self._no_commit: bool = no_commit
self._submit: bool = submit
self._pyre_only: bool = pyre_only
self._strict: bool = strict
@staticmethod
def from_arguments(
arguments: argparse.Namespace, repository: Repository
) -> "TargetsToConfiguration":
command_arguments = CommandArguments.from_arguments(arguments)
return TargetsToConfiguration(
command_arguments,
repository=repository,
subdirectory=arguments.subdirectory,
glob=arguments.glob,
fixme_threshold=arguments.fixme_threshold,
no_commit=arguments.no_commit,
submit=arguments.submit,
pyre_only=arguments.pyre_only,
strict=arguments.strict,
)
@classmethod
def add_arguments(cls, parser: argparse.ArgumentParser) -> None:
super(TargetsToConfiguration, cls).add_arguments(parser)
parser.set_defaults(command=cls.from_arguments)
parser.add_argument(
"--subdirectory", help="Only upgrade TARGETS files within this directory."
)
parser.add_argument(
"--glob",
type=int,
help="Use a toplevel glob target instead of listing individual targets. \
Fall back to individual targets if errors per file ever hits given \
threshold.",
)
parser.add_argument(
"--fixme-threshold",
type=int,
help="Ignore all errors in a file if fixme count exceeds threshold.",
)
parser.add_argument(
"--strict",
action="store_true",
help="Turn on default strict mode if any targets were strict.",
)
parser.add_argument(
"--pyre-only",
action="store_true",
help="Only convert pyre targets to configuration.",
)
parser.add_argument(
"--no-commit", action="store_true", help="Keep changes in working state."
)
parser.add_argument("--submit", action="store_true", help=argparse.SUPPRESS)
def remove_target_typing_fields(self, files: List[Path]) -> None:
LOG.info("Removing typing options from %s targets files", len(files))
if self._pyre_only and not self._glob:
for path in files:
targets_file = Path(path)
source = targets_file.read_text()
output = libcst.parse_module(source).visit(TargetPyreRemover()).code
targets_file.write_text(output)
else:
typing_options_regex = [
r"typing \?=.*",
r"check_types \?=.*",
r"check_types_options \?=.*",
r"typing_options \?=.*",
]
remove_typing_fields_command = [
"sed",
"-i",
"/" + r"\|".join(typing_options_regex) + "/d",
] + [str(file) for file in files]
subprocess.run(remove_typing_fields_command)
def convert_directory(self, directory: Path) -> None:
all_targets = find_targets(directory, pyre_only=self._pyre_only)
if not all_targets:
LOG.warning("No configuration created because no targets found.")
return
if self._glob:
new_targets = ["//" + str(directory) + "/..."]
targets_files = [
directory / path
for path in get_filesystem().list(
str(directory), patterns=[r"**/TARGETS"]
)
]
else:
new_targets = []
targets_files = []
for path, targets in all_targets.items():
targets_files.append(Path(path))
new_targets += [
"//" + path.replace("/TARGETS", "") + ":" + target.name
for target in targets
]
apply_strict = self._strict and any(target.strict for target in targets)
configuration_path = directory / ".pyre_configuration.local"
if configuration_path.exists():
LOG.warning(
"Pyre project already exists at %s.\n\
Amending targets to existing configuration.",
configuration_path,
)
with open(configuration_path) as configuration_file:
configuration = Configuration(
configuration_path, json.load(configuration_file)
)
configuration.add_targets(new_targets)
configuration.deduplicate_targets()
configuration.write()
else:
LOG.info("Creating local configuration at %s.", configuration_path)
configuration_contents = {"targets": new_targets}
configuration = Configuration(configuration_path, configuration_contents)
configuration.write()
# Add newly created configuration files to version control
self._repository.add_paths([configuration_path])
# Remove all type-related target settings
self.remove_target_typing_fields(targets_files)
if not self._pyre_only:
remove_non_pyre_ignores(directory)
all_errors = configuration.get_errors()
error_threshold = self._fixme_threshold
glob_threshold = self._glob
for path, errors in all_errors:
errors = list(errors)
error_count = len(errors)
if glob_threshold and error_count > glob_threshold:
# Fall back to non-glob codemod.
LOG.info(
"Exceeding error threshold of %d; falling back to listing "
"individual targets.",
glob_threshold,
)
self._repository.revert_all(remove_untracked=True)
self._glob = None
return self.run()
if error_threshold and error_count > error_threshold:
LOG.info(
"%d errors found in `%s`. Adding file-level ignore.",
error_count,
path,
)
add_local_mode(path, LocalMode.IGNORE)
else:
try:
self._suppress_errors(Errors(errors))
except PartialErrorSuppression:
LOG.warning(f"Could not suppress all errors in {path}")
LOG.info("Run with --unsafe to force suppression anyway.")
self._repository.revert_all(remove_untracked=True)
if apply_strict:
LOG.info(
"Some targets were running strict type checking. "
"Adding strict setting to configuration."
)
strict_codemod = StrictDefault(
self._command_arguments,
repository=self._repository,
local_configuration=directory,
remove_strict_headers=True,
fixme_threshold=0,
)
strict_codemod.run()
# Lint and re-run pyre once to resolve most formatting issues
if self._lint:
if self._repository.format():
errors = configuration.get_errors(should_clean=False)
try:
self._suppress_errors(errors)
except PartialErrorSuppression:
LOG.warning(f"Could not suppress all errors in {path}")
LOG.info("Run with --unsafe to force suppression anyway.")
self._repository.revert_all(remove_untracked=True)
def run(self) -> None:
# TODO(T62926437): Basic integration testing.
subdirectory = self._subdirectory
subdirectory = Path(subdirectory) if subdirectory else Path.cwd()
LOG.info(
"Converting typecheck targets to pyre configurations in `%s`", subdirectory
)
configuration_directories = self._gather_directories(subdirectory)
converted = []
for directory in configuration_directories:
if all(
str(directory).startswith(str(converted_directory)) is False
for converted_directory in converted
):
self.convert_directory(directory)
converted.append(directory)
summary = self._repository.MIGRATION_SUMMARY
glob = self._glob
if glob:
summary += (
f"\n\nConfiguration target automatically expanded to include "
f"all subtargets, expanding type coverage while introducing "
f"no more than {glob} fixmes per file."
)
title = f"Convert type check targets in {subdirectory} to use configuration"
self._repository.submit_changes(
commit=(not self._no_commit),
submit=self._submit,
title=title,
summary=summary,
set_dependencies=False,
)
def _gather_directories(self, subdirectory: Path) -> List[Path]:
configurations = find_files(subdirectory, ".pyre_configuration.local")
configuration_directories = [
configuration.replace("/.pyre_configuration.local", "")
for configuration in configurations
]
sorted_directories = sorted(
(directory.split("/") for directory in configuration_directories),
key=lambda directory: (len(directory), directory),
)
if len(configuration_directories) == 0:
configuration_directories = [str(subdirectory)]
else:
# Fill in missing coverage
missing_directories = []
current_depth = len(str(subdirectory).split("/"))
for directory in sorted_directories:
if len(directory) <= current_depth:
continue
all_subdirectories = find_directories(
Path("/".join(directory[0:current_depth]))
)
for subdirectory in all_subdirectories:
if all(
not configuration_directory.startswith(str(subdirectory))
for configuration_directory in configuration_directories
):
missing_directories.append(subdirectory)
current_depth += 1
configuration_directories.extend(missing_directories)
return [Path(directory) for directory in configuration_directories]
| [
"[email protected]"
] | |
a03f8b20320b2b0cd2a0c8a40c42c197b83f1048 | 18ad97292b34a679b8dea8a85090541c5bbf6174 | /chess.py | 0fa35ed4de237177a9e5fefa36517a277692e1e0 | [] | no_license | Jyotirm0y/kattis | b941044e39dc36d169450480fc33fd33bd2e0f8e | 2b9c1819ba29419bbea3db2e8ad7851155abbb3a | refs/heads/master | 2023-05-31T21:11:38.350044 | 2021-06-12T08:21:47 | 2021-06-12T08:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | n = int(input())
r = 'ABCDEFGH'
for _ in range(n):
s = input().split()
a = r.index(s[0])
b = int(s[1])-1
x = r.index(s[2])
y = int(s[3])-1
m = abs(x-a)
n = abs(y-b)
if a == x and b == y:
print(0,s[0],s[1])
elif m == n:
print(1, s[0],s[1], s[2],s[3])
elif (m+n)%2 != 0:
print("Impossible")
else:
if m > n:
# move horizontally
extra = (m-n)//2
extraplus = extra + n
if x > a: # move to right
if max(b,y)+extra < 8:
# up first
if y <= b:
print(2, s[0],s[1], r[a+extra],(max(b,y)+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a+extraplus],(max(b,y)+extra+1), s[2],s[3])
elif min(b,y)-extra >= 0:
# down first
if y >= b:
print(2, s[0],s[1], r[a+extra],(min(b,y)-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a+extraplus],(min(b,y)-extra+1), s[2],s[3])
elif x < a: # move to left
if max(b,y)+extra < 8:
# up first
if y <= b:
print(2, s[0],s[1], r[a-extra],(max(b,y)+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a-extraplus],(max(b,y)+extra+1), s[2],s[3])
elif min(b,y)-extra >= 0:
# down first
if y >= b:
print(2, s[0],s[1], r[a-extra],(min(b,y)-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[a-extraplus],(min(b,y)-extra+1), s[2],s[3])
elif m < n:
# move vertically
extra = (n-m)//2
extraplus = extra + m
if y > b: # move up
if max(a,x)+extra < 8:
# right first
if x <= a:
print(2, s[0],s[1], r[max(a,x)+extra],(b+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[max(a,x)+extra],(b+extraplus+1), s[2],s[3])
elif min(a,x)-extra >= 0:
# left first
if x >= a:
print(2, s[0],s[1], r[min(a,x)-extra],(b+extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[min(a,x)-extra],(b+extraplus+1), s[2],s[3])
elif y < b: # move down
if max(a,x)+extra < 8:
# right first
if x <= a:
print(2, s[0],s[1], r[max(a,x)+extra],(b-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[max(a,x)+extra],(b-extraplus+1), s[2],s[3])
elif min(a,x)-extra >= 0:
# left first
if x >= a:
print(2, s[0],s[1], r[min(a,x)-extra],(b-extra+1), s[2],s[3])
else:
print(2, s[0],s[1], r[min(a,x)-extra],(b-extraplus+1), s[2],s[3])
| [
"[email protected]"
] | |
2d0753b8960fa3c7bb7014d2317bc0555cd17999 | fcbe21026e7ae483c535f6eb38ffbfaaa2aa06c2 | /.history/main_20210412094009.py | 7efdf4db3a5b77a3a63e5eb7f311427f5aca19c3 | [] | no_license | robertstrauss/discordAIchatbot | 8e4c85920f73d49daeb1394afbd1ce8baffbb97b | 8347e6a591d352ace1b8fe4c4629e831763eb0ba | refs/heads/master | 2023-04-03T12:55:07.641825 | 2021-04-13T17:54:57 | 2021-04-13T17:54:57 | 357,641,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | import discord
import re
import requests
import random
import time
import asyncio
import pytorch as torch
import pandas as pd
client = discord.Client()
guildID = 755473404643115170
guild = None
channelID = 831188849946394654
talkchannel = None
# async def pingean():
# while True:
# await asyncio.sleep(pinginterval)
# print(pinging)
# if (pinging):
# role = random.choice(ean.roles)
# await pingchannel.send('{}'.format(role.mention))
# def startpingean():
# global pinging
# pinging = True
# def stoppingean():
# global pinging
# pinging = False
@client.event
async def on_ready():
global guild, pingchannel, ean, logan, sam
print('We have logged in as {0.user}'.format(client))
guild = client.get_guild(guildID)
talkchannel = client.get_channel(pingEanID)
@client.event
async def on_message(message):
global pinging, pinginterval, pingchannel, ean
if message.author == client.user:
return
if collecting:
data = data.append({'content': message.content,
'time': message.created_at,
'author': message.author.name}, ignore_index=True)
def collectmessages(channel):
data = pd.DataFrame(columns=['content', 'time', 'author'])
async for msg in channel.history(limit=100000):
if msg.author != client..user:
data = data.append({'content': msg.content,
'time': msg.created_at,
'author': msg.author.name}, ignore_index=True)
if len(data) == limit:
break
data.to_csv('messagelogs/{}'.format(channel.name))
with open('token.txt', 'r') as tokentxt:
asyncio.get_event_loop().create_task(pingean())
client.run(tokentxt.read())
| [
"[email protected]"
] | |
7199acf0e62fd850cfe1c8e99807a92710e8e2f2 | 854d0673d18cf1db557d2b9b27c248dd879ba28a | /test/test1.py | 3c51d8306463a0078a64f3dbd5b8dc13fba22485 | [] | no_license | Miao1127/code-charpter3 | 51e141c0e463f1ea63f371a498d967b520f59853 | 313dae0b53f1f68fb7ce713ac3eab7e1a2d1b001 | refs/heads/master | 2023-07-15T21:27:22.688910 | 2021-08-23T01:13:59 | 2021-08-23T01:13:59 | 398,937,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 381 | py | # _*_ coding:utf-8 _*_
# 开发人员:103中山分队-苗润龙
# 开发时间:2019/12/1620:50
# 文件名:test1.py
# 开发工具:PyCharm
# 功能:测试全局变量
import time
import sys
def show():
global a
print(a)
a = 3
def s():
global a
print(a)
a = 5
if __name__ == '__main__':
a = 1
show()
print(a)
s()
print(a)
| [
"Miao@DESKTOP-AJA95IE"
] | Miao@DESKTOP-AJA95IE |
f3112fea3c598842c6ede97199b3d47c04c3e63e | 5608a9cd3bec8cab1c3f9d7f42896107b78593cc | /tests/unit/cfngin/hooks/docker/image/test_build.py | e62a412af66a44ed3b39391f56e1849bf7f3de60 | [
"Apache-2.0"
] | permissive | troyready/runway | cdee6d94f42173c8aa0bd414620b68be36a510aa | 4fd299961a4b73df39e14f4f19a7236f7be17dd8 | refs/heads/master | 2021-06-18T16:05:30.712211 | 2021-01-14T01:44:32 | 2021-01-14T01:44:32 | 151,314,626 | 0 | 0 | Apache-2.0 | 2018-10-02T19:55:09 | 2018-10-02T19:55:08 | null | UTF-8 | Python | false | false | 8,506 | py | """Test runway.cfngin.hooks.docker.image._build."""
# pylint: disable=no-self-use,protected-access
import sys
from copy import deepcopy
from typing import TYPE_CHECKING
import pytest
from docker.models.images import Image
from mock import MagicMock
from runway.cfngin.hooks.docker.data_models import (
DockerImage,
ElasticContainerRegistryRepository,
)
from runway.cfngin.hooks.docker.hook_data import DockerHookData
from runway.cfngin.hooks.docker.image import build
from runway.cfngin.hooks.docker.image._build import (
DockerImageBuildApiOptions,
ImageBuildArgs,
)
from .....mock_docker.fake_api import FAKE_IMAGE_ID
if sys.version_info.major > 2:
from pathlib import Path # pylint: disable=E
else:
from pathlib2 import Path # type: ignore pylint: disable=E
if TYPE_CHECKING:
from pytest_mock import MockerFixture
from .....factories import MockCFNginContext
MODULE = "runway.cfngin.hooks.docker.image._build"
def test_build(cfngin_context, mocker, tmp_path):
# type: ("MockCFNginContext", "MockerFixture", Path) -> None
"""Test build."""
(tmp_path / "Dockerfile").touch()
mock_image = MagicMock(
spec=Image, id=FAKE_IMAGE_ID, tags=MagicMock(return_value=["latest"])
)
mock_logs = [{"stream": "log message\n"}, {"not-stream": "no log"}]
mock_client = MagicMock(
images=MagicMock(build=MagicMock(return_value=(mock_image, mock_logs)))
)
args = ImageBuildArgs(path=tmp_path)
mocker.patch.object(ImageBuildArgs, "parse_obj", return_value=args)
mocker.patch.object(DockerHookData, "client", mock_client)
docker_hook_data = DockerHookData()
mock_from_cfngin_context = mocker.patch.object(
DockerHookData, "from_cfngin_context", return_value=docker_hook_data
)
mock_update_context = mocker.patch.object(
DockerHookData, "update_context", return_value=docker_hook_data
)
cfngin_context.hook_data["docker"] = docker_hook_data
assert build(context=cfngin_context, **args.dict()) == docker_hook_data
mock_from_cfngin_context.assert_called_once_with(cfngin_context)
mock_client.images.build.assert_called_once_with(
path=str(args.path), **args.docker.dict()
)
mock_image.tag.assert_called_once_with(None, tag="latest")
mock_image.reload.assert_called_once()
assert isinstance(docker_hook_data.image, DockerImage)
assert docker_hook_data.image.image == mock_image
mock_update_context.assert_called_once_with(cfngin_context)
class TestDockerImageBuildApiOptions(object):
"""Test runway.cfngin.hooks.docker.image._build.DockerImageBuildApiOptions."""
def test_init(self): # type: () -> None
"""Test init."""
args = {
"buildargs": {"key": "val"},
"custom_context": True,
"extra_hosts": {"host": "local"},
"forcerm": True,
"isolation": "yes",
"network_mode": "host",
"nocache": True,
"platform": "x86",
"pull": True,
"rm": False,
"squash": True,
"tag": "latest",
"target": "dev",
"timeout": 3,
"use_config_proxy": True,
}
obj = DockerImageBuildApiOptions(**deepcopy(args))
assert obj.dict() == args
def test_init_default(self): # type: () -> None
"""Test init default."""
obj = DockerImageBuildApiOptions()
assert obj.buildargs == {}
assert obj.custom_context is False
assert not obj.extra_hosts
assert obj.forcerm is False
assert not obj.isolation
assert not obj.network_mode
assert obj.nocache is False
assert not obj.platform
assert obj.pull is False
assert obj.rm is True
assert obj.squash is False
assert not obj.tag
assert not obj.target
assert not obj.timeout
assert obj.use_config_proxy is False
class TestImageBuildArgs(object):
"""Test runway.cfngin.hooks.docker.image._build.ImageBuildArgs."""
def test_determine_repo(self):
"""Test determine_repo."""
assert (
ImageBuildArgs.determine_repo(
context=None, ecr_repo={"key": "val"}, repo="something"
)
== "something"
)
def test_determine_repo_ecr(self, mocker):
# type: ("MockerFixture") -> None
"""Test determine_repo ecr."""
repo = ElasticContainerRegistryRepository(
account_id="123456012", aws_region="us-east-1", repo_name="test"
)
mocker.patch(
MODULE + ".ElasticContainerRegistryRepository",
parse_obj=MagicMock(return_value=repo),
)
assert (
ImageBuildArgs.determine_repo(
context=None,
ecr_repo={
"repo_name": repo.name,
"account_id": repo.registry.account_id,
"aws_region": repo.registry.region,
},
repo=None,
)
== repo.fqn
)
def test_init(self, mocker, tmp_path):
# type: ("MockerFixture", Path) -> None
"""Test init."""
args = {
"docker": {"pull": True},
"dockerfile": "./dir/Dockerfile",
"ecr_repo": {"name": "test"},
"path": tmp_path,
"repo": "ecr",
"tags": ["oldest"],
}
context = MagicMock()
mock_validate_dockerfile = mocker.patch.object(
ImageBuildArgs, "_validate_dockerfile", return_value=args["dockerfile"]
)
mock_determine_repo = mocker.patch.object(
ImageBuildArgs, "determine_repo", return_value="repo"
)
obj = ImageBuildArgs.parse_obj(args, context=context)
assert obj.path == args["path"]
mock_validate_dockerfile.assert_called_once_with(tmp_path, args["dockerfile"])
assert obj.dockerfile == args["dockerfile"]
mock_determine_repo.assert_called_once_with(
context=context, ecr_repo=args["ecr_repo"], repo=args["repo"]
)
assert obj.repo == mock_determine_repo.return_value
assert obj.tags == args["tags"]
assert isinstance(obj.docker, DockerImageBuildApiOptions)
assert obj.docker.tag == mock_determine_repo.return_value
def test_init_default(self, mocker):
# type: ("MockerFixture") -> None
"""Test init default values."""
context = MagicMock()
mock_validate_dockerfile = mocker.patch.object(
ImageBuildArgs, "_validate_dockerfile", return_value="./Dockerfile"
)
obj = ImageBuildArgs(context=context)
assert obj.path == Path.cwd()
mock_validate_dockerfile.assert_called_once_with(Path.cwd(), "./Dockerfile")
assert obj.dockerfile == mock_validate_dockerfile.return_value
assert not obj.repo
assert obj.tags == ["latest"]
assert isinstance(obj.docker, DockerImageBuildApiOptions)
def test_validate_dockerfile(self, tmp_path):
# type: (Path) -> None
"""Test _validate_dockerfile."""
(tmp_path / "Dockerfile").touch()
assert (
ImageBuildArgs._validate_dockerfile(tmp_path, "./Dockerfile")
== "./Dockerfile"
)
def test_validate_dockerfile_does_not_exist(self, tmp_path):
# type: (Path) -> None
"""Test _validate_dockerfile does not exist."""
with pytest.raises(ValueError) as excinfo:
ImageBuildArgs._validate_dockerfile(tmp_path, "./Dockerfile")
assert str(excinfo.value).startswith("Dockerfile does not exist at path")
def test_validate_dockerfile_path_is_dockerfile(self, tmp_path):
# type: (Path) -> None
"""Test _validate_dockerfile does not exist."""
path = tmp_path / "Dockerfile"
path.touch()
with pytest.raises(ValueError) as excinfo:
ImageBuildArgs._validate_dockerfile(path, "./Dockerfile")
assert str(excinfo.value).startswith(
"ImageBuildArgs.path should not reference the Dockerfile directly"
)
def test_validate_dockerfile_path_is_zipfile(self, tmp_path):
# type: (Path) -> None
"""Test _validate_dockerfile path is zipfile."""
path = tmp_path / "something.zip"
path.touch()
assert (
ImageBuildArgs._validate_dockerfile(path, "./Dockerfile") == "./Dockerfile"
)
| [
"[email protected]"
] | |
df98aec7cbcf6664ef07942ff574d25f1dc989c7 | 599913671c29ca3c427514fa6cb22260d1420d35 | /st2actions/st2actions/runners/remote_script_runner.py | d0fc84e70bce2ff066dd85ba069a41d4e6435b31 | [
"Apache-2.0"
] | permissive | grengojbo/st2 | 2c9642c157f95b57a95175b04d17de7a312164cf | 24956136df165a8b8ac629ba6be1f6de0d77385a | refs/heads/master | 2020-12-30T21:42:57.664608 | 2015-08-20T14:23:56 | 2015-08-20T14:23:56 | 39,152,248 | 0 | 0 | null | 2015-07-15T17:58:43 | 2015-07-15T17:58:42 | null | UTF-8 | Python | false | false | 3,618 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from oslo_config import cfg
from st2common import log as logging
from st2actions.runners.fabric_runner import BaseFabricRunner
from st2actions.runners.fabric_runner import RUNNER_REMOTE_DIR
from st2common.models.system.action import FabricRemoteScriptAction
__all__ = [
'get_runner',
'RemoteScriptRunner'
]
LOG = logging.getLogger(__name__)
def get_runner():
return RemoteScriptRunner(str(uuid.uuid4()))
class RemoteScriptRunner(BaseFabricRunner):
def run(self, action_parameters):
remote_action = self._get_remote_action(action_parameters)
LOG.debug('Will execute remote_action : %s.', str(remote_action))
result = self._run(remote_action)
LOG.debug('Executed remote_action: %s. Result is : %s.', remote_action, result)
status = self._get_result_status(result, cfg.CONF.ssh_runner.allow_partial_failure)
return (status, result, None)
def _get_remote_action(self, action_parameters):
# remote script actions without entry_point don't make sense, user probably wanted to use
# "run-remote" action
if not self.entry_point:
msg = ('Action "%s" is missing entry_point attribute. Perhaps wanted to use '
'"run-remote" runner?')
raise Exception(msg % (self.action_name))
script_local_path_abs = self.entry_point
pos_args, named_args = self._get_script_args(action_parameters)
named_args = self._transform_named_args(named_args)
env_vars = self._get_env_vars()
remote_dir = self.runner_parameters.get(RUNNER_REMOTE_DIR,
cfg.CONF.ssh_runner.remote_dir)
remote_dir = os.path.join(remote_dir, self.liveaction_id)
return FabricRemoteScriptAction(self.action_name,
str(self.liveaction_id),
script_local_path_abs,
self.libs_dir_path,
named_args=named_args,
positional_args=pos_args,
env_vars=env_vars,
on_behalf_user=self._on_behalf_user,
user=self._username,
password=self._password,
private_key=self._private_key,
remote_dir=remote_dir,
hosts=self._hosts,
parallel=self._parallel,
sudo=self._sudo,
timeout=self._timeout,
cwd=self._cwd)
| [
"[email protected]"
] | |
b2ea215b35210f9d73283dae43957dc12d943e32 | 29345337bf86edc938f3b5652702d551bfc3f11a | /python/src/main/python/pyalink/alink/tests/examples/from_docs/test_softmaxpredictstreamop.py | ed44324ed0eac0718b830e65b094dc71dad0a64e | [
"Apache-2.0"
] | permissive | vacaly/Alink | 32b71ac4572ae3509d343e3d1ff31a4da2321b6d | edb543ee05260a1dd314b11384d918fa1622d9c1 | refs/heads/master | 2023-07-21T03:29:07.612507 | 2023-07-12T12:41:31 | 2023-07-12T12:41:31 | 283,079,072 | 0 | 0 | Apache-2.0 | 2020-07-28T02:46:14 | 2020-07-28T02:46:13 | null | UTF-8 | Python | false | false | 953 | py | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestSoftmaxPredictStreamOp(unittest.TestCase):
def test_softmaxpredictstreamop(self):
df_data = pd.DataFrame([
[2, 1, 1],
[3, 2, 1],
[4, 3, 2],
[2, 4, 1],
[2, 2, 1],
[4, 3, 2],
[1, 2, 1],
[5, 3, 3]
])
batchData = BatchOperator.fromDataframe(df_data, schemaStr='f0 int, f1 int, label int')
dataTest = StreamOperator.fromDataframe(df_data, schemaStr='f0 int, f1 int, label int')
colnames = ["f0","f1"]
lr = SoftmaxTrainBatchOp().setFeatureCols(colnames).setLabelCol("label")
model = batchData.link(lr)
predictor = SoftmaxPredictStreamOp(model).setPredictionCol("pred")
predictor.linkFrom(dataTest).print()
StreamOperator.execute()
pass | [
"[email protected]"
] | |
6b9f62e6d1a35e9191701aaa09cff2627943e80f | 618c43d7bbed328f3a33320b3d50ad0f5817a08f | /tutorgame/tutorgame/wsgi.py | f21e83699306093d510f1520a515e3b3caf91f31 | [] | no_license | tomaccosheep/capstone-draft-8 | fc2546d7d8de272f9dc1bf84bedad88ec5d9c615 | 8bc08bade514a612b3911f487aba519ec9f07975 | refs/heads/master | 2021-05-13T11:46:29.497414 | 2018-02-06T00:17:08 | 2018-02-06T00:17:08 | 117,140,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 396 | py | """
WSGI config for tutorgame project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tutorgame.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
20d782976fa64be463fcafcdcad085eb498f3c78 | 3034e86347c71bf7e7af9e5f7aa44ab5ad61e14b | /pweb/day06/queue2.py | 1e303bf16b69ae88861efeb393b81111e60f2ba4 | [] | no_license | jason12360/AID1803 | bda039b82f43d6609aa8028b0d9598f2037c23d5 | f0c54a3a2f06881b3523fba7501ab085cceae75d | refs/heads/master | 2020-03-17T00:43:42.541761 | 2018-06-29T10:07:44 | 2018-06-29T10:07:44 | 133,127,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 293 | py | from multiprocessing import Process,Queue
import time
#创建消息队列
q = Queue()
def fun1():
time.sleep(1)
q.put('我是进程1')
def fun2():
time.sleep(2)
print('取消息',q.get())
p1 = Process(target = fun1)
p2 = Process(target = fun2)
p1.start()
p2.start()
p1.join()
p2.join() | [
"[email protected]"
] | |
3f84cd518baad07c427aacb915704490f4665028 | 452e376385cf9294c7f416748e353ddbfeb8460c | /migrations/versions/d2d0eb2fe55a_create_info_type_table.py | 41952b64c1710a34e5225043c0bb4c6c68964741 | [] | no_license | huozhihui/atuiguang | e7576de0acc53998a67669f577cb371f386a001d | 7e6db16746e6124cd95a2cf71d063db67a9b0d1f | refs/heads/master | 2021-01-20T08:37:50.479023 | 2017-09-05T14:28:46 | 2017-09-05T14:28:46 | 101,567,814 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | """create info_type table
Revision ID: d2d0eb2fe55a
Revises: 818a0a098c23
Create Date: 2017-09-02 18:54:31.644588
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd2d0eb2fe55a'
down_revision = '818a0a098c23'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('info_types',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(length=30), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('info_types')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
ac4dd52f471a4c7c45164cef50da92fa29647ebb | abc422f58ad053bcbb6653ba15b66e46d220a199 | /serial_scripts/floatingip/__init__.py | a8287683071acc598f539ff0ee825069cba920a6 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | tungstenfabric/tf-test | d3efff59bca931b614d0008260b2c0881d1fc009 | 4b9eca7eb182e5530223131ecab09d3bdf366407 | refs/heads/master | 2023-02-26T19:14:34.345423 | 2023-01-11T08:45:18 | 2023-01-11T10:37:25 | 265,231,958 | 8 | 22 | null | 2023-02-08T00:53:29 | 2020-05-19T11:46:12 | Python | UTF-8 | Python | false | false | 10 | py | """FIP"""
| [
"[email protected]"
] | |
0eee740e8053dfaa728e96765f202ca5904ce08d | 37c3b81ad127c9e3cc26fa9168fda82460ca9bda | /Baekjoon/boj_2628.py | 3860cbf720e66de2d4735521eb108e758f043ee3 | [] | no_license | potomatoo/TIL | 5d85b69fdaed68966db7cfe2a565b7c64ed3e816 | 395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c | refs/heads/master | 2021-07-08T16:19:40.410097 | 2021-04-19T02:33:40 | 2021-04-19T02:33:40 | 238,872,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | w, h = map(int,input().split())
n = int(input())
garo = []
sero = []
for i in range(n):
where, idx = map(int,input().split())
if where == 0:
garo.append(idx)
else:
sero.append(idx)
garo.append(0)
sero.append(0)
garo.append(h)
sero.append(w)
garo.sort()
sero.sort()
ls1 = []
ls2 = []
for i in range(len(garo)-1):
ls1.append(garo[i+1] - garo[i])
for i in range(len(sero)-1):
ls2.append(sero[i+1] - sero[i])
print(max(ls1)*max(ls2)) | [
"[email protected]"
] | |
c76607201a54c4b15b585cebd9ac427bc7e11dd0 | af32cdbbf31d52d01753ddfa5e813e851411403b | /tests/job/test_databaseproperties.py | 5b4cbb469a7497b64814c7ed06d2148275bafbf3 | [
"BSD-3-Clause"
] | permissive | pmrv/pyiron_base | 3e72298df7790f4c3b84d78c990b5d7e12e9016b | af1729708a8226575ca2c84f574e7cb046b7f7cd | refs/heads/master | 2022-12-25T03:15:09.270651 | 2020-09-13T13:56:40 | 2020-09-13T13:56:40 | 294,116,811 | 0 | 0 | BSD-3-Clause | 2020-09-09T13:10:31 | 2020-09-09T13:10:31 | null | UTF-8 | Python | false | false | 5,610 | py | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import datetime
import os
from pyiron_base.project.generic import Project
from pyiron_base.job.core import DatabaseProperties
class TestDatabaseProperties(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.database_entry = {
"id": 150,
"parentid": None,
"masterid": None,
"projectpath": "/Users/jan/PyIron_data/projects/",
"project": "2019-02-14-database-properties/test/",
"job": "vasp",
"subjob": "/vasp",
"chemicalformula": "Fe2",
"status": "finished",
"hamilton": "Vasp",
"hamversion": "5.4",
"username": "pyiron",
"computer": "[email protected]#1",
"timestart": datetime.datetime(2019, 2, 14, 8, 4, 7, 248427),
"timestop": datetime.datetime(2019, 2, 14, 8, 4, 8, 366365),
"totalcputime": 1.0,
}
cls.database_property = DatabaseProperties(job_dict=cls.database_entry)
def test_properties(self):
self.assertEqual(self.database_property.id, 150)
self.assertEqual(self.database_property.parentid, None)
self.assertEqual(self.database_property.masterid, None)
self.assertEqual(
self.database_property.projectpath, "/Users/jan/PyIron_data/projects/"
)
self.assertEqual(
self.database_property.project, "2019-02-14-database-properties/test/"
)
self.assertEqual(self.database_property.job, "vasp")
self.assertEqual(self.database_property.subjob, "/vasp")
self.assertEqual(self.database_property.chemicalformula, "Fe2")
self.assertEqual(self.database_property.status, "finished")
self.assertEqual(self.database_property.hamilton, "Vasp")
self.assertEqual(self.database_property.hamversion, "5.4")
self.assertEqual(self.database_property.username, "pyiron")
self.assertEqual(
self.database_property.computer, "[email protected]#1"
)
self.assertEqual(
self.database_property.timestart,
datetime.datetime(2019, 2, 14, 8, 4, 7, 248427),
)
self.assertEqual(
self.database_property.timestop,
datetime.datetime(2019, 2, 14, 8, 4, 8, 366365),
)
self.assertEqual(self.database_property.totalcputime, 1.0)
def test_dir(self):
self.assertEqual(
sorted(list(self.database_entry.keys())),
sorted(dir(self.database_property)),
)
def test_bool(self):
self.assertTrue(bool(self.database_property))
self.assertFalse(bool(DatabaseProperties()))
with self.assertRaises(AttributeError):
_ = DatabaseProperties().job
class DatabasePropertyIntegration(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.file_location, "database_prop"))
cls.ham = cls.project.create_job('ScriptJob', "job_test_run")
cls.ham.save()
@classmethod
def tearDownClass(cls):
project = Project(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "database_prop")
)
ham = project.load(project.get_job_ids()[0])
ham.remove()
project.remove(enable=True)
def test_properties(self):
job_db_entry_dict = self.ham.project.db.get_item_by_id(self.ham.job_id)
self.assertIsNotNone(job_db_entry_dict)
self.assertEqual(self.ham.database_entry.id, job_db_entry_dict["id"])
self.assertEqual(
self.ham.database_entry.parentid, job_db_entry_dict["parentid"]
)
self.assertEqual(
self.ham.database_entry.masterid, job_db_entry_dict["masterid"]
)
self.assertEqual(self.ham.database_entry.projectpath, self.project.root_path)
self.assertEqual(self.ham.database_entry.project, self.project.project_path)
self.assertEqual(self.ham.database_entry.job, "job_test_run")
self.assertEqual(self.ham.database_entry.subjob, "/job_test_run")
self.assertEqual(self.ham.database_entry.status, "created")
self.assertEqual(self.ham.database_entry.hamilton, 'Script')
self.assertEqual(self.ham.database_entry.hamversion, "0.1")
self.assertEqual(self.ham.database_entry.username, "pyiron")
def test_inspect_job(self):
job_inspect = self.project.inspect(self.ham.job_name)
self.assertIsNotNone(job_inspect)
self.assertEqual(job_inspect.database_entry.parentid, None)
self.assertEqual(job_inspect.database_entry.masterid, None)
self.assertEqual(job_inspect.database_entry.projectpath, self.project.root_path)
self.assertEqual(job_inspect.database_entry.project, self.project.project_path)
self.assertEqual(job_inspect.database_entry.job, "job_test_run")
self.assertEqual(job_inspect.database_entry.subjob, "/job_test_run")
self.assertEqual(job_inspect.database_entry.status, "created")
self.assertEqual(job_inspect.database_entry.hamilton, 'Script')
self.assertEqual(job_inspect.database_entry.hamversion, "0.1")
self.assertEqual(job_inspect.database_entry.username, "pyiron")
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
40607e61fa5779815495a4023b1f384053fd8c9b | 5e1edfaad0b6857393b7a54505a6340f8205772a | /H2TauTau/cfgPython/et/subs2018/et_2018_data.py | 22fcfb3afb39c2ee35851500a2453d9f34b75e65 | [] | no_license | lucastorterotot/cmgtools-lite | ae3b042c008316c7a6253c81fac0ca472adb973d | 849832f279316a6c965b91ffa910f953bde33dc2 | refs/heads/htt_10_4_0_v1 | 2021-06-03T07:31:26.460244 | 2020-02-28T13:11:50 | 2020-03-19T16:03:10 | 128,908,833 | 0 | 2 | null | 2020-04-21T11:21:39 | 2018-04-10T09:35:11 | Python | UTF-8 | Python | false | false | 17,572 | py | import os
import re
import ROOT
import PhysicsTools.HeppyCore.framework.config as cfg
from PhysicsTools.HeppyCore.framework.config import printComps
from PhysicsTools.HeppyCore.framework.heppy_loop import getHeppyOption
import logging
logging.shutdown()
# reload(logging)
logging.basicConfig(level=logging.WARNING)
from PhysicsTools.HeppyCore.framework.event import Event
Event.print_patterns = ['*taus*', '*muons*', '*electrons*', 'veto_*',
'*dileptons_*', '*jets*']
###############
# Options
###############
# Get all heppy options; set via "-o production" or "-o production=True"
# production = True run on batch, production = False run locally
test = getHeppyOption('test', False)
syncntuple = getHeppyOption('syncntuple', False)
data = getHeppyOption('data', False) # set later
embedded = getHeppyOption('embedded', False) # set later
if embedded:
data = True
add_sys = getHeppyOption('add_sys', True)
reapplyJEC = getHeppyOption('reapplyJEC', True)
samples_name = getHeppyOption('samples_name', 'data_single_electron') # options : DY, TTbar, generic_background, data_tau, data_single_muon, data_single_electron, embedded_tt, embedded_mt, embedded_et, sm_higgs, mssm_signals, mc_higgs_susy_bb_amcatnlo
AAA = getHeppyOption('AAA', 'Lyon') # options : global, Lyon
from CMGTools.RootTools.samples.ComponentCreator import ComponentCreator
if AAA == 'Lyon':
ComponentCreator.useLyonAAA = True
else:
ComponentCreator.useAAA = True
if 'data' in samples_name:
data = True
elif 'embedded' in samples_name:
data=True
embedded=True
else:
data=False
embedded=False
###############
# Components
###############
from CMGTools.H2TauTau.heppy.sequence.common import samples_lists
from CMGTools.RootTools.utils.splitFactor import splitFactor
from CMGTools.H2TauTau.proto.samples.fall17.triggers_tauEle import mc_triggers, mc_triggerfilters, embed_triggers, embed_triggerfilters
from CMGTools.H2TauTau.proto.samples.fall17.triggers_tauEle import data_triggers, data_triggerfilters
selectedComponents = samples_lists[samples_name]
# subset_selections = ['']
# selectedComponents_ = []
# for subset_selection in subset_selections:
# selectedComponents_ += [comp for comp in selectedComponents if subset_selection in comp.name]
# selectedComponents = selectedComponents_
n_events_per_job = 1e5
for sample in selectedComponents:
if data:
sample.triggers = data_triggers
sample.triggerobjects = data_triggerfilters
if embedded:
sample.triggerobjects = embed_triggerfilters
else:
sample.triggers = mc_triggers
sample.triggerobjects = mc_triggerfilters
sample.splitFactor = splitFactor(sample, n_events_per_job)
sample.channel = 'et'
if test:
cache = True
selectedComponents = [selectedComponents[0]]
for comp in selectedComponents:
comp.files = comp.files[:1]
comp.splitFactor = 1
comp.fineSplitFactor = 1
# comp.files = ['file1.root']
events_to_pick = []
#KIT's skimming function
def skim_KIT(event):
flags = [
'Flag_goodVertices',
'Flag_globalTightHalo2016Filter',
'Flag_globalSuperTightHalo2016Filter',
'Flag_HBHENoiseFilter',
'Flag_HBHENoiseIsoFilter',
'Flag_EcalDeadCellTriggerPrimitiveFilter',
'Flag_BadPFMuonFilter',
'Flag_BadChargedCandidateFilter',
'Flag_eeBadScFilter',
'Flag_ecalBadCalibFilter']
if embedded or data:
flags = ['Flag_goodVertices','Flag_globalSuperTightHalo2016Filter','Flag_HBHENoiseFilter','Flag_HBHENoiseIsoFilter','Flag_EcalDeadCellTriggerPrimitiveFilter','Flag_BadPFMuonFilter','Flag_BadChargedCandidateFilter','Flag_eeBadScFilter','Flag_ecalBadCalibFilter']
ids = [
'againstElectronVLooseMVA6',
'againstMuonLoose3',
'byVLooseIsolationMVArun2017v2DBoldDMwLT2017']
return all([getattr(event,x)==1 for x in flags]) and\
event.veto_third_lepton_electrons_passed and\
event.veto_third_lepton_muons_passed and\
all([event.dileptons_sorted[0].leg2().tauID(x) for x in ids])
from CMGTools.H2TauTau.heppy.sequence.common import debugger
condition = None # lambda event : len(event.sel_taus)>2
###############
# Analyzers
###############
from CMGTools.H2TauTau.heppy.analyzers.Selector import Selector
def select_tau(tau):
return tau.pt() >= 23 and \
abs(tau.eta()) <= 2.3 and \
abs(tau.leadChargedHadrCand().dz()) < 0.2 and \
tau.tauID('decayModeFinding') > 0.5 and \
abs(tau.charge()) == 1. and \
tau.tauID('byVVLooseIsolationMVArun2017v2DBoldDMwLT2017')
sel_taus = cfg.Analyzer(
Selector,
'sel_taus',
output = 'sel_taus',
src = 'taus',
filter_func = select_tau
)
from CMGTools.H2TauTau.heppy.analyzers.EventFilter import EventFilter
one_tau = cfg.Analyzer(
EventFilter,
'one_tau',
src = 'sel_taus',
filter_func = lambda x : len(x)>0
)
def select_electron(electron):
return electron.pt() >= 25 and \
abs(electron.eta()) <= 2.1 and \
abs(electron.dxy()) < 0.045 and \
abs(electron.dz()) < 0.2 and \
electron.passConversionVeto() and \
electron.gsfTrack().hitPattern().numberOfLostHits(ROOT.reco.HitPattern.MISSING_INNER_HITS) <= 1 and \
electron.id_passes("mvaEleID-Fall17-noIso-V2","wp90")
sel_electrons = cfg.Analyzer(
Selector,
'sel_electrons',
output = 'sel_electrons',
src = 'electrons',
filter_func = select_electron
)
one_electron = cfg.Analyzer(
EventFilter,
'one_electron',
src = 'sel_electrons',
filter_func = lambda x : len(x)>0
)
# dilepton veto ==============================================================
def select_electron_dilepton_veto(electron):
return electron.pt() > 15 and \
abs(electron.eta()) < 2.5 and \
electron.id_passes('cutBasedElectronID-Fall17-94X-V2', 'veto') and \
abs(electron.dxy()) < 0.045 and \
abs(electron.dz()) < 0.2 and \
electron.iso_htt() < 0.3
sel_electrons_dilepton_veto = cfg.Analyzer(
Selector,
'dileptonveto_electrons',
output = 'sel_electrons_dilepton_veto',
src = 'electrons',
filter_func = select_electron_dilepton_veto
)
from CMGTools.H2TauTau.heppy.analyzers.DiLeptonVeto import DiLeptonVeto
dilepton_veto = cfg.Analyzer(
DiLeptonVeto,
output = 'veto_dilepton_passed',
src = 'sel_electrons_dilepton_veto',
drmin = 0.15
)
# ele tau pair ================================================================
from CMGTools.H2TauTau.heppy.analyzers.DiLeptonAnalyzer import DiLeptonAnalyzer
dilepton = cfg.Analyzer(
DiLeptonAnalyzer,
output = 'dileptons',
l1 = 'sel_electrons',
l2 = 'sel_taus',
dr_min = 0.5
)
from CMGTools.H2TauTau.heppy.analyzers.Sorter import Sorter
dilepton_sorted = cfg.Analyzer(
Sorter,
output = 'dileptons_sorted',
src = 'dileptons',
# sort by ele iso, ele pT, tau iso, tau pT
metric = lambda dl: (dl.leg1().iso_htt(),
-dl.leg1().pt(),
-dl.leg2().tauID('byIsolationMVArun2017v2DBoldDMwLTraw2017'),
-dl.leg2().pt()),
reverse = False
)
sequence_dilepton = cfg.Sequence([
sel_taus,
one_tau,
sel_electrons,
one_electron,
sel_electrons_dilepton_veto,
dilepton_veto,
dilepton,
dilepton_sorted,
])
# weights ================================================================
# id weights
from CMGTools.H2TauTau.heppy.analyzers.TauIDWeighter import TauIDWeighter
tauidweighter_general = cfg.Analyzer(
TauIDWeighter,
'TauIDWeighter_general',
taus = lambda event: [event.dileptons_sorted[0].leg2()]
)
tauidweighter = cfg.Analyzer(
TauIDWeighter,
'TauIDWeighter',
taus = lambda event: [event.dileptons_sorted[0].leg2()],
WPs = {'JetToTau':'Tight', # dummy, no weights for jet fakes
'TauID':'Tight',
'MuToTaufake':'Loose',
'EToTaufake':'VLoose'}
)
ws_ele_idiso_vars_dict = {'e_pt':lambda ele:ele.pt(),
'e_eta':lambda ele:ele.eta()}
ws_ele_idiso_func_dict = {'id':'e_id90_kit_ratio',
'iso':'e_iso_kit_ratio',
'trk':'e_trk_ratio'}
from CMGTools.H2TauTau.heppy.analyzers.LeptonsWeighter import LeptonsWeighter
eleidisoweighter = cfg.Analyzer(
LeptonsWeighter,
'EleIDisoWeighter',
workspace_path = '$CMSSW_BASE/src/CMGTools/H2TauTau/data/htt_scalefactors_2017_v2.root',
legs = lambda event: [event.dileptons_sorted[0].leg1()],
leg1_vars_dict = ws_ele_idiso_vars_dict,
leg1_func_dict = ws_ele_idiso_func_dict
)
# trigger weights
ws_ele_vars_dict = {'e_pt':lambda ele:ele.pt(),
'e_eta':lambda ele:ele.eta()}
ws_tau_vars_dict = {'t_pt':lambda tau:tau.pt(),
't_eta':lambda tau:tau.eta(),
't_phi':lambda tau:tau.phi()}
ws_ele_func_dict = {'e':'e_trg27_trg32_trg35_kit_ratio',
'et':'e_trg_EleTau_Ele24Leg_desy_ratio'}
ws_tau_func_dict = {'et':'e_trg24_ratio'}
from CMGTools.H2TauTau.heppy.analyzers.TriggerWeighter import TriggerWeighter
triggerweighter = cfg.Analyzer(
TriggerWeighter,
'TriggerWeighter',
workspace_path = '$CMSSW_BASE/src/CMGTools/H2TauTau/data/htt_scalefactors_2017_v2.root',
legs = lambda event: [event.dileptons_sorted[0].leg1(),event.dileptons_sorted[0].leg2()],
leg1_vars_dict = ws_ele_vars_dict,
leg2_vars_dict = ws_tau_vars_dict,
leg1_func_dict = ws_ele_func_dict,
leg2_func_dict = ws_tau_func_dict
)
# from CMGTools.H2TauTau.heppy.analyzers.FakeFactorAnalyzer import FakeFactorAnalyzer
# fakefactor = cfg.Analyzer(
# FakeFactorAnalyzer,
# 'FakeFactorAnalyzer',
# channel = 'et',
# filepath = '$CMSSW_BASE/src/HTTutilities/Jet2TauFakes/data/MSSM2016/20170628_medium/{}/{}/fakeFactors_20170628_medium.root',
# met = 'pfmet'
# )
# recoil correction =======================================================
wpat = re.compile('W\d?Jet.*')
for comp in selectedComponents:
if any(x in comp.name for x in ['ZZ','WZ','VV','WW','T_','TBar_']):
comp.recoil_correct = False
match = wpat.match(comp.name)
if any(x in comp.name for x in ['DY','Higgs']) or not (match is None):
comp.recoil_correct = True
comp.METSysFile = 'HTT-utilities/RecoilCorrections/data/PFMEtSys_2017.root'
if any(x in comp.name for x in ['TT']):
comp.recoil_correct = False
# embedded ================================================================
from CMGTools.H2TauTau.heppy.analyzers.EmbeddedAnalyzer import EmbeddedAnalyzer
embedded_ana = cfg.Analyzer(
EmbeddedAnalyzer,
name = 'EmbeddedAnalyzer',
channel = 'et'
)
# ntuple ================================================================
if syncntuple:
skim_func = lambda x: True
else:
skim_func = lambda x: skim_KIT
from CMGTools.H2TauTau.heppy.analyzers.NtupleProducer import NtupleProducer
from CMGTools.H2TauTau.heppy.ntuple.ntuple_variables import eletau as event_content_eletau
ntuple = cfg.Analyzer(
NtupleProducer,
name = 'NtupleProducer',
outputfile = 'events.root',
treename = 'events',
event_content = event_content_eletau,
skim_func = skim_func
)
from CMGTools.H2TauTau.heppy.sequence.common import sequence_beforedil, sequence_afterdil, trigger, met_filters, trigger_match, httgenana
sequence = sequence_beforedil
sequence.extend( sequence_dilepton )
sequence.extend( sequence_afterdil )
if embedded:
sequence.append(embedded_ana)
# if data:
# sequence.append(fakefactor)
sequence.append(tauidweighter_general)
sequence.append(tauidweighter)
sequence.append(eleidisoweighter)
sequence.append(triggerweighter)
sequence.append(ntuple)
# if embedded:
# sequence = [x for x in sequence if x.name not in ['JSONAnalyzer']]
if events_to_pick:
from CMGTools.H2TauTau.htt_ntuple_base_cff import eventSelector
eventSelector.toSelect = events_to_pick
sequence.insert(0, eventSelector)
# the following is declared in case this cfg is used in input to the
# heppy.py script
from PhysicsTools.HeppyCore.framework.eventsfwlite import Events
config = cfg.Config(components=selectedComponents,
sequence=sequence,
services=[],
events_class=Events
)
printComps(config.components, True)
### systematics
from CMGTools.H2TauTau.heppy.analyzers.Calibrator import Calibrator
import copy
nominal = config
configs = {'nominal':nominal}
up_down = ['up','down']
### top pT reweighting
# def config_top_pT_reweighting(up_or_down):
# new_config = copy.deepcopy(nominal)
# for cfg in new_config.sequence:
# if cfg.name == 'httgenana':
# cfg.top_systematic = up_or_down
# return new_config
# if samples_name=='TTbar':
# for up_or_down in up_down:
# configs['top_pT_reweighting_{}'.format(up_or_down)] = config_top_pT_reweighting(up_or_down)
### DY pT reweighting
# def config_DY_pT_reweighting(up_or_down):
# new_config = copy.deepcopy(nominal)
# for cfg in new_config.sequence:
# if cfg.name == 'httgenana':
# cfg.DY_systematic = up_or_down
# return new_config
# if samples_name=='DY':
# for up_or_down in up_down:
# configs['DY_pT_reweighting_{}'.format(up_or_down)] = config_DY_pT_reweighting(up_or_down)
### MET recoil
def config_METrecoil(response_or_resolution, up_or_down):
equivalency_dict = {'response':0,
'resolution':1,
'up':0,
'down':1}
response_or_resolution = equivalency_dict[response_or_resolution]
up_or_down = equivalency_dict[up_or_down]
new_config = copy.deepcopy(nominal)
for cfg in new_config.sequence:
if cfg.name == 'PFMetana':
cfg.METSys = [response_or_resolution, up_or_down]
return new_config
response_or_resolution = ['response','resolution']
if not data:
for sys in response_or_resolution:
for up_or_down in up_down:
configs['METrecoil_{}_{}'.format(sys,up_or_down)] = config_METrecoil(sys, up_or_down)
### MET unclustered uncertainty
from CMGTools.H2TauTau.heppy.sequence.common import pfmetana
def config_METunclustered(up_or_down):
new_config = copy.deepcopy(nominal)
for cfg in new_config.sequence:
if cfg.name == 'PFMetana':
cfg.unclustered_sys = up_or_down
return new_config
if not data:
for up_or_down in up_down:
configs['METunclustered_{}'.format(up_or_down)] = config_METunclustered(up_or_down)
### tau energy scale
from CMGTools.H2TauTau.heppy.sequence.common import tauenergyscale
def config_TauEnergyScale(dm_name, gm_name, up_or_down):
tau_energyscale_ana_index = nominal.sequence.index(tauenergyscale)
new_config = copy.deepcopy(nominal)
tau_calibrator = cfg.Analyzer(
Calibrator,
src = 'taus',
calibrator_factor_func = lambda x: getattr(x,'TES_{}_{}_{}'.format(gm_name,dm_name,up_or_down),1.)
)
new_config.sequence.insert(tau_energyscale_ana_index+1, tau_calibrator)
return new_config
TES = [['HadronicTau','1prong0pi0'],
['HadronicTau','1prong1pi0'],
['HadronicTau','3prong0pi0'],
['HadronicTau','3prong1pi0'],
['promptMuon','1prong0pi0'],
['promptEle','1prong0pi0'],
['promptEle','1prong1pi0']]
TES_embed = [['HadronicTau','1prong0pi0'],
['HadronicTau','1prong1pi0'],
['HadronicTau','3prong0pi0']]
if (not data):
for gm_name, dm_name in TES:
configs['TES_{}_{}_up'.format(gm_name, dm_name)] = config_TauEnergyScale(dm_name, gm_name, 'up')
configs['TES_{}_{}_down'.format(gm_name, dm_name)] = config_TauEnergyScale(dm_name, gm_name, 'down')
elif (data and embedded):
for gm_name, dm_name in TES_embed:
configs['TES_{}_{}_up'.format(gm_name, dm_name)] = config_TauEnergyScale(dm_name, gm_name, 'up')
configs['TES_{}_{}_down'.format(gm_name, dm_name)] = config_TauEnergyScale(dm_name, gm_name, 'down')
### Jet energy scale
from CMGTools.H2TauTau.heppy.sequence.common import jets
def config_JetEnergyScale(group, up_or_down):
jets_ana_index = nominal.sequence.index(jets)
new_config = copy.deepcopy(nominal)
jet_calibrator = cfg.Analyzer(
Calibrator,
src = 'jets',
calibrator_factor_func = lambda x: getattr(x,"corr_{}_JEC_{}".format(group,up_or_down), 1./x.rawFactor()) * x.rawFactor()
)
new_config.sequence.insert(jets_ana_index+1, jet_calibrator)
return new_config
JES = ['CMS_scale_j_eta0to5_13Tev',
'CMS_scale_j_eta0to3_13TeV',
'CMS_scale_j_eta3to5_13TeV',
'CMS_scale_j_RelativeBal_13TeV',
'CMS_scale_j_RelativeSample_13TeV']
if not data:
for source in JES:
configs['{}_up'.format(source)] = config_JetEnergyScale(source,'up')
configs['{}_down'.format(source)] = config_JetEnergyScale(source,'down')
### BTagging
from CMGTools.H2TauTau.heppy.sequence.common import btagger
def config_Btagging(up_or_down):
new_config = copy.deepcopy(nominal)
for cfg in new_config.sequence:
if cfg.name == 'btagger':
cfg.sys = up_or_down
return new_config
for up_or_down in up_down:
configs['Btagging_{}'.format(up_or_down)] = config_Btagging(up_or_down)
configs = {'nominal':configs['nominal']}
print configs
| [
"[email protected]"
] | |
3fb2cae71bf50676b6228b742a5f2bc1b3d9856b | 3012e5a0f34dd54fbac568c70506826973192ce1 | /pylib/lines.py | 0568e2c4370b658c9fb453b1094d7360c2fd0918 | [] | no_license | metatab-packages/civicknowledge.com-osm-demosearch | 89999227bda7bae91259c10bd651f220ae35c52f | d4ecb7775662a50413c848c3ae5a901b147ef532 | refs/heads/master | 2023-05-14T12:39:25.328559 | 2021-06-08T13:52:39 | 2021-06-08T13:52:39 | 334,572,037 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,999 | py | """
"""
from itertools import chain
from pathlib import Path
import fiona # Import first to work around an import bug
import geopandas as gpd
import numpy as np
import pandas as pd
import shapely
from demosearch import FileCache
from demosearch.util import run_mp
from shapely.wkt import loads as loads_wkt
from tqdm.notebook import tqdm
from .util import get_cache
tqdm.pandas()
import logging
lines_logger = logging.getLogger(__name__)
class LPError(Exception):
pass
hw_type = {
'residential': 'r',
'primary': '1',
'secondary': '2',
'tertiary': '3',
'motorway': 'm',
'motorway_link ': 'l',
'trunk': 't'
}
# Process each of the separate files, then
# write them back out for later recombination
#
# Write out the lines files into chunks so we can run it in multiple
# processes
def estimate_lines(fp):
"""Estimate the number of lines in a very long line-oriented file"""
lengths = []
means = []
sz = Path(fp).stat().st_size
mean = 1
std = 1
ln = 1
tq = tqdm(total=6000) # SHould take less than 6K line to get estimate
with fp.open() as f:
while True:
l = f.readline()
if not l or (len(l) > 1000 and std < 2):
return int(sz / mean)
lengths.append(len(l))
mean = np.mean(lengths).round(0)
means.append(mean)
std = np.std(means[-500:]).round(0)
tq.update(1)
tq.set_description(f"Est #lines {int(sz / mean)}")
ln += 1
def split_lines(pkg, limit=None):
cache = get_cache(pkg)
try:
# Returned the cached keys if this is already done
return cache.get('splits/splits_keys')
except KeyError:
pass
fp = pkg.reference('lines').resolved_url.fspath
try:
approx_lines = cache.config['lines_file_size']
except KeyError:
approx_lines = estimate_lines(fp)
cache.config['lines_file_size'] = approx_lines
chunksize = 10000
total = int(approx_lines / chunksize)
splits = []
with pd.read_csv(fp, chunksize=chunksize, low_memory=False) as reader:
for i, df in tqdm(enumerate(reader), total=total, desc='Split file'):
if limit and i > limit:
break
key = f'splits/{i}'
if not cache.exists(key):
cache.put_df(key, df)
splits.append(key)
cache.put('splits/splits_keys', splits)
return splits
def ro_key(rec_key):
return f"recombine/{Path(rec_key).name}"
def f_run_overlay(cache_dir, key, okey):
cache = FileCache(cache_dir)
if cache.exists(okey):
return okey
t = cache.get_df(key)
utm = cache.get_df('utm_grid')
t = t[t.highway.isin(list(hw_type.keys()))]
t['highway'] = t.highway.replace(hw_type) # Cuts file size by 100M
t['geometry'] = t.geometry.apply(shapely.wkt.loads)
if len(t) == 0:
return None
gdf = gpd.GeoDataFrame(t, crs=4326)
try:
t = gpd.overlay(gdf, utm)
try:
cache.put_df(okey, t)
except:
if cache.exists(okey):
cache.delete(key)
raise
except IndexError as e:
raise LPError(f"Failed for {key} gdf:{len(gdf)} hashes:{len(utm)}: {e}")
return okey
def run_overlay(pkg, splits, force=False):
cache = get_cache(pkg)
if not force:
try:
# Returned the cached keys if this is already done
recombine_keys = cache.get('recombine/recombine_keys')
if len(recombine_keys) == len(splits):
return recombine_keys
except KeyError:
pass
tasks = [[cache.root, e, ro_key(e)] for e in splits]
recombine_keys = run_mp(f_run_overlay, tasks, desc='Overlay Geohash')
cache.put('recombine/recombine_keys', recombine_keys)
return list(filter(bool, recombine_keys))
def f_simplify_lines(cache_dir, key):
cache = FileCache(cache_dir)
if not key:
return []
try:
df = cache.get_df(key)
except EOFError as e:
raise LPError(f"Failed to load key {key}: {e}")
except AttributeError as e:
raise LPError(f"Failed to load key {key}: {e}")
okeys = []
for idx, g in df.groupby('epsg'):
_, fn = key.split('/')
okey = f'simplified/{idx}/{fn}'
if not cache.exists(okey):
geometry = g.to_crs(epsg=idx).geometry \
.simplify(20, False) \
.to_crs(4326) \
.apply(lambda e: shapely.wkt.dumps(e, rounding_precision=0))
g = pd.DataFrame(g).assign(geometry=geometry)
cache.put_df(okey, g)
okeys.append(okey)
return okeys
def simplify_lines(pkg, recombine_keys):
cache = get_cache(pkg)
try:
# Returned the cached keys if this is already done
return cache.get('simplified/simplified_keys')
except KeyError:
pass
simplified_keys = run_mp(f_simplify_lines, [(cache.root, e) for e in recombine_keys],
desc='Simplify')
simplified_keys = list(chain(*simplified_keys))
cache.put('simplified/simplified_keys', simplified_keys)
return simplified_keys
def write_files(pkg, simplified_keys):
pkg_root = Path(pkg.path).parent
cache = FileCache(pkg_root.joinpath('data', 'cache'))
f1 = pkg_root.joinpath('data', 'residential_roads.csv')
f2 = pkg_root.joinpath('data', 'nonres_roads.csv')
if f1.exists() and f2.exists():
lines_logger.info('Both roads files exists, not writing')
return
t = pd.concat([cache.get_df(e) for e in simplified_keys])
t = t[['zone', 'epsg', 'us_state','cus_state', 'highway', 'geometry']]
residential_roads = t[t.highway == 'r']
nonres_roads = t[t.highway != 'r']
if not f1.exists():
residential_roads.to_csv(f1, index=False)
if not f2.exists():
nonres_roads.to_csv(f2, index=False)
def build_lines(pkg):
cache = open_cache(pkg)
lines_logger.info('Split the input file')
splits = split_lines(pkg)
lines_logger.info(f' {len(splits)} splits keys')
lines_logger.info('Run the overlay process')
recombine_keys = run_overlay(pkg, splits, cache)
print(f' {len(recombine_keys)} recombine keys')
if False:
lines_logger.info('Simplify lines')
simplified_keys = simplify_lines(pkg, recombine_keys)
lines_logger.info(f' {len(simplified_keys)} simplified keys')
else:
simplified_keys = recombine_keys
lines_logger.info('Write the roads files')
write_files(pkg, simplified_keys)
def open_cache(pkg):
cache = get_cache(pkg)
if not cache.exists('hashes'):
hashes = pkg.reference('us_geohashes').geoframe()
cache.put_df('hashes', hashes)
if not cache.exists('utm_grid'):
utm_grid = pkg.reference('utm_grid').geoframe()
cache.put_df('utm_grid', utm_grid)
return cache | [
"[email protected]"
] | |
1049de780f9f5d1abb00009e1767374fd69fa854 | 1b5404b8099de74d4e39e0a41b1d04c61defa8d4 | /fractals/pythagoras_tree.py | 9aaeb7710c9cda5161272fbed91510d7491c7690 | [] | no_license | ipeterov/random-stuff | 5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd | dbb38d42331f636919fd149b23783e02ee2c9afb | refs/heads/master | 2023-05-14T00:41:51.122251 | 2023-05-04T12:10:26 | 2023-05-04T12:10:26 | 206,028,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | import math
from types import SimpleNamespace
import pygame
from abstract_drawer import AbstractDrawer
SIZE = WIDTH, HEIGHT = 500, 500
class PythagorasTreeDrawer(AbstractDrawer):
DEFAULT_PARAMS = {
'angle': 45,
'starting_size': 100,
'max_depth': 2,
}
PARAMS_SCHEMA = {
'angle': float,
'starting_size': float,
'max_depth': int,
}
@staticmethod
def counterclockwise_rotate(points, anchor_index, angle):
rad_angle = math.radians(-angle)
anchor = points[anchor_index]
new_points = []
for point in points:
if point == anchor:
new_points.append(point)
continue
anc_point = point - anchor
new_point = anchor + pygame.math.Vector2(
anc_point.x * math.cos(rad_angle) - anc_point.y * math.sin(rad_angle),
anc_point.x * math.sin(rad_angle) + anc_point.y * math.cos(rad_angle),
)
new_points.append(new_point)
return new_points
def draw_square(self, start, size, lean, angle):
assert lean in ('left', 'right')
if lean == 'left':
left, bottom = start
anchor_index = 0
else:
left, bottom = start[0] - size, start[1]
anchor_index = 1
angle *= -1
points = [
pygame.math.Vector2(left, bottom),
pygame.math.Vector2(left + size, bottom),
pygame.math.Vector2(left + size, bottom - size),
pygame.math.Vector2(left, bottom - size),
]
points = self.counterclockwise_rotate(points, anchor_index, angle)
pygame.draw.polygon(
self.screen,
(255, 255, 255),
points,
)
square = SimpleNamespace()
square.points = points
square.size = size
square.angle = angle
return square
def draw_small_squares(self, big_square, depth):
angle = self.params['angle']
rad_angle = math.radians(angle)
left_square = self.draw_square(
big_square.points[-1],
math.cos(rad_angle) * big_square.size,
lean='left',
angle=big_square.angle + angle,
)
right_square = self.draw_square(
big_square.points[-2],
math.sin(rad_angle) * big_square.size,
lean='right',
angle=90 - angle - big_square.angle,
)
if depth < self.params['max_depth']:
self.draw_small_squares(left_square, depth + 1)
self.draw_small_squares(right_square, depth + 1)
def _get_default_start(self, width, height):
return width / 2 - self.params['starting_size'] / 2, height
def _draw(self, start):
starting_square = self.draw_square(
start,
self.params['starting_size'],
lean='left',
angle=0,
)
self.draw_small_squares(
starting_square,
depth=1,
)
if __name__ == '__main__':
pygame.init()
screen = pygame.display.set_mode(SIZE)
drawer = PythagorasTreeDrawer(screen)
drawer.draw()
pygame.display.flip()
input('Press any key to quit... ')
| [
"[email protected]"
] | |
09e4f881f8f065510e9a5f1375a9ab2d0bc39cd9 | 1aa99ce0775508c3f2bbe321874c24957da5e880 | /python/pyspark/sql/tests/test_group.py | 6de1b8ea0b3cea5cfba394a2910164a781d09d46 | [
"CDDL-1.1",
"CC0-1.0",
"Apache-2.0",
"BSD-3-Clause",
"MPL-1.1",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"Python-2.0",
"CDDL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"LicenseRef-scancode-other-copyleft",
"CPL-1.0",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"CC-BY-SA-3.0",
"CC-PDDC",
"LicenseRef-scancode-unicode",
"LicenseRef-scancode-generic-cla",
"GPL-2.0-only",
"LicenseRef-scancode-free-unknown",
"LGPL-2.0-or-later",
"EPL-1.0",
"Classpath-exception-2.0",
"GCC-exception-3.1",
"NAIST-2003",
"LicenseRef-scancode-unknown"
] | permissive | zzvara/spark-dynamic | c06a8b885646d9e611cdca3591824fcf0fa0ccc2 | 00b4a8644ca89789af1fa47fa6ed871ad902154e | refs/heads/master | 2022-12-14T12:57:53.236482 | 2019-05-24T13:40:12 | 2019-05-27T15:31:03 | 96,672,852 | 3 | 0 | Apache-2.0 | 2022-11-16T11:37:30 | 2017-07-09T09:03:45 | Scala | UTF-8 | Python | false | false | 1,818 | py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql import Row
from pyspark.testing.sqlutils import ReusedSQLTestCase
class GroupTests(ReusedSQLTestCase):
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approx_count_distinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
if __name__ == "__main__":
import unittest
from pyspark.sql.tests.test_group import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| [
"[email protected]"
] | |
86a788c889be2a5e5552a4c61cb4670d20fc02dd | 07c1d8eb58b34a8c17c6a9deef73094b6e077b4d | /action/action_buddy_contact_progress.py | 5720d3f3b06bc1d21dbf391caf352666d52bc449 | [] | no_license | robot-nan/GameLogParse | 061f8d0448c5945bec61b55380d9f2cd883defcf | 151f5dd167b106640cd178373a59b2458e43d80e | refs/heads/master | 2021-11-07T21:27:22.354060 | 2015-09-23T15:32:32 | 2015-09-23T15:32:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | # -*- coding:utf-8 -*-
"""
好友 与好友通讯进度奖励
"""
from action import action_base
from util import game_define
def log(user, add_free_draw, item_str):
"""
输出日志
"""
action = game_define.EVENT_ACTION_REWARD_BUDDY_CONTACT
cur_free_draw = user.player.get_free_draw_material()
log_lst = action_base.log_base(user)
log_lst.append(str(action))
log_lst.append(str(add_free_draw))
log_lst.append(str(cur_free_draw))
log_lst.append(str(item_str))
log_str = '$$'.join(log_lst)
return log_str
def parse(log_part_lst):
"""
解析
"""
result = dict()
result['action'] = int(log_part_lst[0])
result['add_free_draw'] = int(log_part_lst[1])
result['cur_free_draw'] = int(log_part_lst[2])
result['add_item_list'] = action_base.list_parse(log_part_lst[3])
result['old_free_draw'] = result['cur_free_draw'] - result['add_free_draw']
return result | [
"[email protected]"
] | |
033abe55ea3aeb335fead36f85e46744aa5f66d3 | 87119ec9cea61be175f2a1f16f0e37d060cde9af | /django/django_celery/myproject/celery_config/apps.py | d21110a7e22dfd38681d3edc22dc2af5737e15e9 | [] | no_license | atkins126/sample_nullpobug | bce9c1bf2a31921ac665a18dc2a62be3bdef493e | b2ba65f42f717f0ceb2cf14fe28e90c460bfde87 | refs/heads/master | 2023-02-16T11:37:05.290069 | 2021-01-18T14:43:40 | 2021-01-18T14:43:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | from django.apps import AppConfig
class CeleryConfig(AppConfig):
name = 'celery_config'
def ready(self):
from myproject.celery_app import get_celery_app
get_celery_app()
| [
"[email protected]"
] | |
a7b7f440ee7dbc2c11a1270096228f3417cc1960 | 35dbd536a17d7127a1dd1c70a2903ea0a94a84c2 | /src/sentry/services/hybrid_cloud/lost_password_hash/impl.py | e4a57fc261e32c357c600b8110580aea07c1721a | [
"Apache-2.0",
"BUSL-1.1"
] | permissive | nagyist/sentry | efb3ef642bd0431990ca08c8296217dabf86a3bf | d9dd4f382f96b5c4576b64cbf015db651556c18b | refs/heads/master | 2023-09-04T02:55:37.223029 | 2023-01-09T15:09:44 | 2023-01-09T15:09:44 | 48,165,782 | 0 | 0 | BSD-3-Clause | 2022-12-16T19:13:54 | 2015-12-17T09:42:42 | Python | UTF-8 | Python | false | false | 1,070 | py | import datetime
from sentry.models import LostPasswordHash
from sentry.services.hybrid_cloud.lost_password_hash import (
APILostPasswordHash,
LostPasswordHashService,
)
class DatabaseLostPasswordHashService(LostPasswordHashService):
def get_or_create(
self,
user_id: int,
) -> APILostPasswordHash:
# NOTE(mattrobenolt): Some security people suggest we invalidate
# existing password hashes, but this opens up the possibility
# of a DoS vector where then password resets are continually
# requested, thus preventing someone from actually resetting
# their password.
# See: https://github.com/getsentry/sentry/pull/17299
password_hash, created = LostPasswordHash.objects.get_or_create(user_id=user_id)
if not password_hash.is_valid():
password_hash.date_added = datetime.datetime.now()
password_hash.set_hash()
password_hash.save()
return self.serialize_lostpasswordhash(password_hash)
def close(self) -> None:
pass
| [
"[email protected]"
] | |
1d300458ff0cf33fd7c879e31a0162efaa72ae1c | e1efc8e0b0e4629dea61504fbc816c0527691bd9 | /6.redis/redis7-数据类型内部编码规则.py | d206eee2aea9fb4c23d71e8adf6c329ce0a4e930 | [] | no_license | xiongmengmeng/xmind-technology | 2bb67a0bf92cfd660cac01f8ab3a2454423ccba5 | e2fdb6987ef805a65f0a4feb52d84383853f4b77 | refs/heads/main | 2023-07-31T07:10:29.868120 | 2021-09-11T08:18:17 | 2021-09-11T08:18:17 | 307,636,242 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,377 | py | import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="redis"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("hash内部编码规则")
r2=s2.getRootTopic()
r2.setTitle("hash内部编码规则")
content={
'内部编码方式':[
'ziplist',
'dict'
],
'ziplist':[
'紧凑的编码格式,牺牲了部分读取性能以换取极高的空间利用率,适合在元素少时使用',
{'结构':[
{'zlbytes':[
'4字节',
'记录压缩列表总共占用的字节数'
]},
{'zltail':[
'4字节',
'定位list的末尾节点',
'最后一项(entry)在ziplist中的偏移字节数',
'方便尾端快速地执行push或pop操作'
]},
{'zllen':[
'2字节',
'记录ziplist中数据项(entry)的个数'
]},
{'zlentry':[
'存放真实数据,长度不定',
{'4部分':[
'prerawlen:前一个entry的数据长度',
'len:entry中数据的长度)',
'data:真实数据存储',
]}
]},
{'zlend':[
'1字节',
'结束标记,值固定为255'
]}
]}
],
'dict':[
'散列表,redis中哈希表称为dict',
'O(1)时间复杂度的赋值取值',
{'dict':[
{'type':[
]},
{'privdata':[
]},
{'ht[2]':[
'采用双哈希表,用来扩容',
{'dictht[0]':[
'table:数组,数组的节点为dictEntry',
'size:数组长度',
'sizemask:数组长度-1',
'used:已存节点'
]}
]},
{'rehashidx':[
]}
]},
{'dictEntry':[
'key',
'value',
'next'
]}
],
'ziplist转为dict的条件':[
{'hash-max-ziplist-entries=512':[
'ziplist元素个数超过512'
]},
{'hash-max-ziplist-value=64':[
'单个元素大小超过64byte'
]}
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind") | [
"[email protected]"
] | |
e3dcddf28d9cfb6ce8a4fe41d148715f88134be0 | ffc3cf8a1ed64a86a8e92f738a2bf4b10afc63ac | /chainercv/transforms/image/random_rotate.py | 64977ab8c4718cf763aefa775923ffee291960c9 | [
"MIT"
] | permissive | lichnak/chainercv | 04be206b5bda2e601a86b6e7a8684ba1b03698ee | 845a600dd8722788233f2b8e5085a44790bb2be4 | refs/heads/master | 2020-05-23T04:47:12.531295 | 2017-03-12T11:36:58 | 2017-03-12T11:36:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | import numpy as np
def random_rotate(img, return_rotation=False):
"""Randomly rotate images by 90, 180, 270 or 360 degrees.
Args:
img (numpy.ndarray): Arrays that
are flipped.
return_rotation (bool): returns information of rotation.
Returns:
If :obj:`return_rotation = True`, return tuple of the transformed
array and an integer that represents number of times the array
is rotated by 90 degrees.
If :obj:`return_rotation = False`, return the transformed array
only.
"""
k = np.random.randint(4)
img = np.transpose(img, axes=(1, 2, 0))
img = np.rot90(img, k)
img = np.transpose(img, axes=(2, 0, 1))
if return_rotation:
return img, k
else:
return img
| [
"[email protected]"
] | |
e8eba9bfb063ea480def35a81277112987b8a7f4 | 364c65b9c3848d91b8dc15c9ec6f4bd596dae20b | /notifico/views/projects/__init__.py | 54eede340020679d5d96129578b9bca8d9257fdf | [
"MIT"
] | permissive | lhunath/notifico | e5eb90f2d2314cd5c176418c63165a33e5080245 | 59f72c46fa5af94e27c2f512b56fd45e6c432acc | refs/heads/master | 2021-01-16T23:11:26.883271 | 2014-10-02T15:30:29 | 2014-10-02T15:30:29 | 26,810,184 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,194 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""__init__.py
Project related views, such as project creation and details.
"""
from functools import wraps
from flask import (
Blueprint,
render_template,
g,
redirect,
url_for,
abort,
request
)
from flask.ext import wtf
from notifico import db, user_required
from notifico.models import User, Project, Hook, Channel
from notifico.services.hooks import HookService
projects = Blueprint('projects', __name__, template_folder='templates')
class ProjectDetailsForm(wtf.Form):
name = wtf.TextField('Project Name', validators=[
wtf.Required(),
wtf.Length(1, 50),
wtf.Regexp(r'^[a-zA-Z0-9_\-\.]*$', message=(
'Project name must only contain a to z, 0 to 9, dashes'
' and underscores.'
))
])
public = wtf.BooleanField('Public', validators=[
], default=True)
website = wtf.TextField('Project URL', validators=[
wtf.Optional(),
wtf.Length(max=1024),
wtf.validators.URL()
])
class HookDetailsForm(wtf.Form):
service_id = wtf.SelectField('Service', validators=[
wtf.Required()
], coerce=int)
class PasswordConfirmForm(wtf.Form):
password = wtf.PasswordField('Password', validators=[
wtf.Required()
])
def validate_password(form, field):
if not User.login(g.user.username, field.data):
raise wtf.ValidationError('Your password is incorrect.')
class ChannelDetailsForm(wtf.Form):
channel = wtf.TextField('Channel', validators=[
wtf.Required(),
wtf.Length(min=1, max=80)
])
host = wtf.TextField('Host', validators=[
wtf.Required(),
wtf.Length(min=1, max=255)
], default='chat.freenode.net')
port = wtf.IntegerField('Port', validators=[
wtf.NumberRange(1024, 66552)
], default=6667)
ssl = wtf.BooleanField('Use SSL', default=False)
public = wtf.BooleanField('Public', default=True, description=(
'Allow others to see that this channel exists.'
))
def project_action(f):
"""
A decorator for views which act on a project. The function
should take two kwargs, `u` (the username) and `p` (the project name),
which will be resolved and replaced, or a 404 will be raised if either
could not be found.
"""
@wraps(f)
def _wrapped(*args, **kwargs):
u = User.by_username(kwargs.pop('u'))
if not u:
# No such user exists.
return abort(404)
p = Project.by_name_and_owner(kwargs.pop('p'), u)
if not p:
# Project doesn't exist (404 Not Found)
return abort(404)
kwargs['p'] = p
kwargs['u'] = u
return f(*args, **kwargs)
return _wrapped
@projects.route('/<u>/')
def dashboard(u):
"""
Display an overview of all the user's projects with summary
statistics.
"""
u = User.by_username(u)
if not u:
# No such user exists.
return abort(404)
is_owner = (g.user and g.user.id == u.id)
# Get all projects by decending creation date.
projects = (
u.projects
.order_by(False)
.order_by(Project.created.desc())
)
if not is_owner:
# If this isn't the users own page, only
# display public projects.
projects = projects.filter_by(public=True)
return render_template('dashboard.html',
user=u,
is_owner=is_owner,
projects=projects,
page_title='Notifico! - {u.username}\'s Projects'.format(
u=u
)
)
@projects.route('/new', methods=['GET', 'POST'])
@user_required
def new():
"""
Create a new project.
"""
form = ProjectDetailsForm()
if form.validate_on_submit():
p = Project.by_name_and_owner(form.name.data, g.user)
if p:
form.name.errors = [
wtf.ValidationError('Project name must be unique.')
]
else:
p = Project.new(
form.name.data,
public=form.public.data,
website=form.website.data
)
p.full_name = '{0}/{1}'.format(g.user.username, p.name)
g.user.projects.append(p)
db.session.add(p)
if p.public:
# New public projects get added to #commits by default.
c = Channel.new(
'#commits',
'chat.freenode.net',
6667,
ssl=False,
public=True
)
p.channels.append(c)
db.session.commit()
return redirect(url_for('.details', u=g.user.username, p=p.name))
return render_template('new_project.html', form=form)
@projects.route('/<u>/<p>/edit', methods=['GET', 'POST'])
@user_required
@project_action
def edit_project(u, p):
"""
Edit an existing project.
"""
if p.owner.id != g.user.id:
# Project isn't public and the viewer isn't the project owner.
# (403 Forbidden)
return abort(403)
form = ProjectDetailsForm(obj=p)
if form.validate_on_submit():
old_p = Project.by_name_and_owner(form.name.data, g.user)
if old_p and old_p.id != p.id:
form.name.errors = [
wtf.ValidationError('Project name must be unique.')
]
else:
p.name = form.name.data
p.website = form.website.data
p.public = form.public.data
p.full_name = '{0}/{1}'.format(g.user.username, p.name)
db.session.commit()
return redirect(url_for('.dashboard', u=u.username))
return render_template('edit_project.html',
project=p,
form=form
)
@projects.route('/<u>/<p>/delete', methods=['GET', 'POST'])
@user_required
@project_action
def delete_project(u, p):
"""
Delete an existing project.
"""
if p.owner.id != g.user.id:
# Project isn't public and the viewer isn't the project owner.
# (403 Forbidden)
return abort(403)
if request.method == 'POST' and request.form.get('do') == 'd':
db.session.delete(p)
db.session.commit()
return redirect(url_for('.dashboard', u=u.username))
return render_template('delete_project.html', project=p)
@projects.route('/<u>/<p>')
@project_action
def details(u, p):
"""
Show the details for an existing project.
"""
if not p.can_see(g.user):
return redirect(url_for('public.landing'))
can_modify = p.can_modify(g.user)
visible_channels = p.channels
if not can_modify:
visible_channels = visible_channels.filter_by(public=True)
return render_template(
'project_details.html',
project=p,
user=u,
visible_channels=visible_channels,
can_modify=can_modify,
page_title='Notifico! - {u.username}/{p.name}'.format(
u=u,
p=p
)
)
@projects.route('/<u>/<p>/hook/new', defaults={'sid': 10}, methods=[
'GET', 'POST'])
@projects.route('/<u>/<p>/hook/new/<int:sid>', methods=['GET', 'POST'])
@user_required
@project_action
def new_hook(u, p, sid):
if p.owner.id != g.user.id:
# Project isn't public and the viewer isn't the project owner.
# (403 Forbidden)
return abort(403)
hook = HookService.services.get(sid)
form = hook.form()
if form:
form = form()
if form and hook.validate(form, request):
h = Hook.new(sid, config=hook.pack_form(form))
p.hooks.append(h)
db.session.add(h)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
elif form is None and request.method == 'POST':
h = Hook.new(sid)
p.hooks.append(h)
db.session.add(h)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
return render_template('new_hook.html',
project=p,
services=HookService.services,
service=hook,
form=form
)
@projects.route('/<u>/<p>/hook/edit/<int:hid>', methods=['GET', 'POST'])
@user_required
@project_action
def edit_hook(u, p, hid):
if p.owner.id != g.user.id:
return abort(403)
h = Hook.query.get(hid)
if h is None:
# You can't edit a hook that doesn't exist!
return abort(404)
if h.project.owner.id != g.user.id:
# You can't edit a hook that isn't yours!
return abort(403)
hook_service = h.hook()
form = hook_service.form()
if form:
form = form()
if form and hook_service.validate(form, request):
h.config = hook_service.pack_form(form)
db.session.add(h)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
elif form is None and request.method == 'POST':
db.session.add(h)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
elif form:
hook_service.load_form(form, h.config)
return render_template('edit_hook.html',
project=p,
services=HookService.services,
service=hook_service,
form=form
)
@projects.route('/h/<int:pid>/<key>', methods=['GET', 'POST'])
def hook_receive(pid, key):
h = Hook.query.filter_by(key=key, project_id=pid).first()
if not h or not h.project:
# The hook being pushed to doesn't exist, has been deleted,
# or is a leftover from a project cull (which destroyed the project
# but not the hooks associated with it).
return abort(404)
# Increment the hooks message_count....
Hook.query.filter_by(id=h.id).update({
Hook.message_count: Hook.message_count + 1
})
# ... and the project-wide message_count.
Project.query.filter_by(id=h.project.id).update({
Project.message_count: Project.message_count + 1
})
hook = HookService.services.get(h.service_id)
if hook is None:
# TODO: This should be logged somewhere.
return ''
hook._request(h.project.owner, request, h)
db.session.commit()
return ''
@projects.route('/<u>/<p>/hook/delete/<int:hid>', methods=['GET', 'POST'])
@user_required
@project_action
def delete_hook(u, p, hid):
"""
Delete an existing service hook.
"""
h = Hook.query.get(hid)
if not h:
# Project doesn't exist (404 Not Found)
return abort(404)
if p.owner.id != g.user.id or h.project.id != p.id:
# Project isn't public and the viewer isn't the project owner.
# (403 Forbidden)
return abort(403)
if request.method == 'POST' and request.form.get('do') == 'd':
p.hooks.remove(h)
db.session.delete(h)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
return render_template('delete_hook.html',
project=p,
hook=h
)
@projects.route('/<u>/<p>/channel/new', methods=['GET', 'POST'])
@user_required
@project_action
def new_channel(u, p):
if p.owner.id != g.user.id:
# Project isn't public and the viewer isn't the project owner.
# (403 Forbidden)
return abort(403)
form = ChannelDetailsForm()
if form.validate_on_submit():
host = form.host.data.strip().lower()
channel = form.channel.data.strip().lower()
# Make sure this isn't a duplicate channel before we create it.
c = Channel.query.filter_by(
host=host,
channel=channel,
project_id=p.id
).first()
if not c:
c = Channel.new(
channel,
host,
port=form.port.data,
ssl=form.ssl.data,
public=form.public.data
)
p.channels.append(c)
db.session.add(c)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
else:
form.channel.errors = [wtf.ValidationError(
'You cannot have a project in the same channel twice.'
)]
return render_template('new_channel.html',
project=p,
form=form
)
@projects.route('/<u>/<p>/channel/delete/<int:cid>', methods=['GET', 'POST'])
@user_required
@project_action
def delete_channel(u, p, cid):
"""
Delete an existing service hook.
"""
c = Channel.query.filter_by(
id=cid,
project_id=p.id
).first()
if not c:
# Project or channel doesn't exist (404 Not Found)
return abort(404)
if c.project.owner.id != g.user.id or c.project.id != p.id:
# Project isn't public and the viewer isn't the project owner.
# (403 Forbidden)
return abort(403)
if request.method == 'POST' and request.form.get('do') == 'd':
c.project.channels.remove(c)
db.session.delete(c)
db.session.commit()
return redirect(url_for('.details', p=p.name, u=u.username))
return render_template('delete_channel.html',
project=c.project,
channel=c
)
| [
"[email protected]"
] | |
8409d6a53f38fc061c1540e3850f503ef0e11bcf | f2befaae3840bafd181cc712108e3b64caf2696f | /app/portal/horizon/openstack_dashboard/dashboards/f5services/f5ltm/urls.py | 9b38daded614453d6d6e7ac86e6e03ce719522b2 | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | F5Networks/f5-adcaas-openstack | 17d5c408d421dcfe542002e1f850b2d9f29f1663 | 02bd8a606215c0fa08b926bac1b092b5e8b278df | refs/heads/master | 2023-08-28T12:09:54.972191 | 2022-08-12T02:03:43 | 2022-08-12T02:03:43 | 164,592,273 | 4 | 23 | Apache-2.0 | 2022-08-12T02:03:44 | 2019-01-08T07:40:35 | Python | UTF-8 | Python | false | false | 723 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.f5services.f5ltm import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
]
| [
"[email protected]"
] | |
c932bfbb92d2084d65f33c7bad9db63a939d9d64 | 81db4221ab007659d4f117c9320c28c00b0902a7 | /python_repos.py | 02755831cd6da4502e4b411b6b87f57e56f6fc1d | [] | no_license | thewchan/python_data_viz | 69f8c0a502a9ec5ee44b78e1d2db19c8f4fdf9ba | 3226bd133a72fc11e8ee09775665a509b6a5efba | refs/heads/master | 2022-04-16T13:48:55.610209 | 2020-04-14T16:15:13 | 2020-04-14T16:15:13 | 255,659,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 862 | py | import requests
# Make an API call and store the response.
url = 'https://api.github.com/search/repositories?q=language:python&sort=stars'
headers = {'Accept': 'application/vnd.github.v3+json'}
r = requests.get(url, headers=headers)
print(f"Status code: {r.status_code}")
# Store API response in a variable.
response_dict = r.json()
print(f"Total repositories: {response_dict['total_count']}")
# Explore information about the repositories.
repo_dicts = response_dict['items']
print(f"Repositories returned {len(repo_dicts)}")
print("\nSelected information about each repository:")
for repo_dict in repo_dicts:
print(f"Name: {repo_dict['name']}")
print(f"Owner {repo_dict['owner']['login']}")
print(f"Stars: {repo_dict['stargazers_count']}")
print(f"Repository: {repo_dict['html_url']}")
print(f"Description: {repo_dict['description']}")
| [
"[email protected]"
] | |
86053fa3163ebbc4322bb8e014523f01fd9291b8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_266/ch59_2019_03_29_21_49_10_038358.py | 8cb109d5a7ec70aed36067d83e642eb2d9a75f9e | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | def conta_a(palavra):
i=0
contador = 0
while i<len(palavra):
if palavra[i] == 'a':
contador+=1
i+=1
return contador | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.