content
stringlengths 0
1.55M
|
---|
_IMAGE_FEATURES=["Hue" "Brightness" "Saturation"]<line_sep> |
<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>ENCODE3_award testapp<block_start>item={'name':'ABC1234' 'rfa':'ENCODE3' 'project':'ENCODE' 'title':'A Generic ENCODE3 Award'}<line_sep><return>testapp.post_json('/award' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>award_a <block_start><return>{'name':'ENCODE2' }<block_end>@pytest.fixture<def_stmt>award_1 award_a<block_start>item=award_a.copy()<line_sep>item.update({'schema_version':'1' 'rfa':"ENCODE2"})<line_sep><return>item<block_end>@pytest.fixture<def_stmt>award_2 award_1<block_start>item=award_1.copy()<line_sep>item.update({'schema_version':'3' 'viewing_group':'ENCODE' })<line_sep><return>item<block_end>@pytest.fixture<def_stmt>award_5 award_2<block_start>item=award_2.copy()<line_sep>item.update({'schema_version':'6' 'viewing_group':'ENCODE' })<line_sep><return>item<block_end>@pytest.fixture<def_stmt>award testapp<block_start>item={'name':'encode3-award' 'rfa':'ENCODE3' 'project':'ENCODE' 'title':'A Generic ENCODE3 Award' 'viewing_group':'ENCODE3' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>award_modERN testapp<block_start>item={'name':'modERN-award' 'rfa':'modERN' 'project':'modERN' 'title':'A Generic modERN Award' 'viewing_group':'ENCODE3' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>remc_award testapp<block_start>item={'name':'remc-award' 'rfa':'GGR' 'project':'GGR' 'title':'A Generic REMC Award' 'viewing_group':'REMC' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>encode2_award testapp<block_start>item={# upgrade/shared.py ENCODE2_AWARDS
'uuid':'1a4d6443-8e29-4b4a-99dd-f93e72d42418' 'name':'encode2-award' 'rfa':'ENCODE2' 'project':'ENCODE' 'title':'A Generic ENCODE2 Award' 'viewing_group':'ENCODE3' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>encode4_award testapp<block_start>item={'name':'encode4-award' 'rfa':'ENCODE4' 'project':'ENCODE' 'title':'A Generic ENCODE4 Award' 'viewing_group':'ENCODE4' 'component':'mapping' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>award_encode4 testapp<block_start>item={'name':'encode4-award' 'rfa':'ENCODE4' 'project':'ENCODE' 'title':'A Generic ENCODE4 Award' 'viewing_group':'ENCODE4' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>roadmap_award testapp<block_start>item={'name':'roadmap-award' 'rfa':'Roadmap' 'project':'Roadmap' 'title':'A Generic Roadmap Award' 'viewing_group':'REMC' }<line_sep><return>testapp.post_json('/award' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>award_8 award_1<block_start>item=award_1.copy()<line_sep>item.update({'schema_version':'8' 'viewing_group':'ENCODE' })<line_sep><return>item<block_end> |
# Copyright (c) 2014-2016, NVIDIA CORPORATION. All rights reserved.
<import_from_future_stmt> absolute_import<import_from_stmt>digits test_utils<def_stmt>test_caffe_imports <block_start>test_utils.skipIfNotFramework('caffe')<import_stmt>numpy# noqa
<import_stmt>google.protobuf<block_end># noqa
|
<import_stmt>falcon<import_stmt>json<import_stmt>os<import_stmt>hashlib<class_stmt>ServerUtils(object)<block_start>@staticmethod<def_stmt>get_file_location filename<block_start>dirname=os.path.dirname(__file__)<line_sep>relpath=os.path.join(dirname '../builder/latest/output' filename)<line_sep><return>os.path.abspath(relpath)<block_end>@staticmethod<def_stmt>get_file_dir filename<block_start><return>os.path.dirname(ServerUtils.get_file_location(filename))<block_end>@staticmethod<def_stmt>get_my_rsa_key <block_start>path_to_key=os.path.join(os.environ['HOME'] '.ssh/id_rsa.pub')<line_sep><return>open(path_to_key).read().strip()<block_end><block_end><class_stmt>DeviceConfigResource(object)<block_start><def_stmt>md5 self fname<block_start>hash_md5=hashlib.md5()<with_stmt>open(fname "rb")<as>f<block_start><for_stmt>chunk iter(<lambda>:f.read(4096) b"")<block_start>hash_md5.update(chunk)<block_end><block_end><return>hash_md5.hexdigest()<block_end><def_stmt>get_filedescriptor self filename<block_start><return>{'url':"http://%s/images/global/%s"%(os.environ['CATTLEPI_LOCALAPI'] filename) 'md5sum':self.md5(ServerUtils.get_file_location(filename))}<block_end><def_stmt>on_get self req resp deviceid<block_start>resp.status=falcon.HTTP_200<line_sep>body={'initfs':self.get_filedescriptor('initramfs.tgz') 'rootfs':self.get_filedescriptor('rootfs.sqsh') 'bootcode':'' 'usercode':'' 'config':{'ssh':{'pi':{'authorized_keys':[ServerUtils.get_my_rsa_key()]}}}}<line_sep>resp.body=json.dumps(body)<block_end><block_end><class_stmt>TrackAllResource(object)<block_start><def_stmt>on_get self req resp<block_start>resp.status=falcon.HTTP_200<line_sep>resp.body="Ok: dummy response"<block_end><block_end><class_stmt>TrackResource(object)<block_start><def_stmt>on_get self req resp<block_start>resp.status=falcon.HTTP_200<line_sep>resp.body="Ok: dummy response"<block_end><block_end>app=falcon.API()<line_sep>app.add_route('/boot/{deviceid}/config' DeviceConfigResource())<line_sep>app.add_route('/track' TrackAllResource())<line_sep>app.add_route('/track/{deviceid}' TrackResource())<line_sep>app.add_static_route('/images/global' ServerUtils.get_file_dir('initramfs.tgz'))<line_sep> |
<class_stmt>Solution<block_start><def_stmt>minPathSum self grid:List[List[int]]<arrow>int<block_start>m,n=len(grid) len(grid[0])<line_sep>dp=grid[0][:]<for_stmt>i range(1 n)<block_start>dp[i]<augadd>dp[i-1]<block_end><for_stmt>i range(1 m)<block_start><for_stmt>j range(n)<block_start><if_stmt>j<g>0<block_start>dp[j]=grid[i][j]+min(dp[j] dp[j-1])<block_end><else_stmt><block_start>dp[j]=grid[i][j]+dp[j]<block_end><block_end><block_end><return>dp[-1]<block_end><block_end> |
<import_stmt>sys<import_stmt>os<import_stmt>asyncio<import_from_stmt>lbry.blob.blob_manager BlobManager<import_from_stmt>lbry.blob_exchange.server BlobServer<import_from_stmt>lbry.schema.address decode_address<import_from_stmt>lbry.extras.daemon.storage SQLiteStorage<async_keyword><def_stmt>main address:str<block_start><try_stmt><block_start>decode_address(address)<block_end><except_stmt><block_start>print(f"'{address}' is not a valid lbrycrd address")<line_sep><return>1<block_end>loop=asyncio.get_running_loop()<line_sep>storage=SQLiteStorage(os.path.expanduser("~/.lbrynet/lbrynet.sqlite"))<line_sep><await>storage.open()<line_sep>blob_manager=BlobManager(loop os.path.expanduser("~/.lbrynet/blobfiles") storage)<line_sep><await>blob_manager.setup()<line_sep>server=<await>loop.create_server(<lambda>:BlobServer(loop blob_manager address) '0.0.0.0' 4444)<try_stmt><block_start><async_keyword><with_stmt>server<block_start><await>server.serve_forever()<block_end><block_end><finally_stmt><block_start><await>storage.close()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>asyncio.run(main(sys.argv[1]))<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>scipy.integrate odeint# Orbit propagation
<import_from_stmt>scipy.optimize fsolve# For solving TDoA
<import_from_stmt>sgp4.api Satrec<import_from_stmt>astropy units<as>u<import_from_stmt>astropy.time Time<import_from_stmt>astropy.coordinates EarthLocation ITRS ICRS TEME CartesianDifferential CartesianRepresentation<import_from_stmt>orbitdeterminator.doppler.utils.constants *<def_stmt>range_range_rate x_sat:np.ndarray x_obs:np.ndarray<block_start>""" Get range and slant range rate (radial relative velocity component).
Vectorized.
Args:
x_sat (np.ndarray): satellite location (pos, vel).
x_obs (np.ndarray): observer location (pos, vel).
Returns:
r (np.ndarray): range.
rr (np.ndarray): range rate (slant range rate).
"""<if_stmt>len(x_obs.shape)<eq>2# Single observer (6, n)
<block_start>einsum_format='ij,ij->j'<line_sep>d=x_sat-x_obs# Difference
<block_end><elif_stmt>len(x_obs.shape)<eq>3# Multiple observers (6,n,n_obs)
<block_start>einsum_format='ijk,ijk->jk'<line_sep>d=np.repeat(np.expand_dims(x_sat axis=2) x_obs.shape[2] axis=2)-x_obs<block_end># Difference
#d = x_sat - x_obs # Difference
r=np.linalg.norm(d[0:3 ] axis=0)# Range
l=d[0:3 ]/np.linalg.norm(d[0:3 ] axis=0)# Range unit vectors
rr=np.einsum(einsum_format d[3:6 ] l)# Radial range rate
<return>r.T rr.T<block_end><def_stmt>doppler_shift x_sat:np.ndarray x_obs:np.ndarray f_ref:float c:float<block_start>""" Get Doppler shift value for the give satellite and observer vectors.
Vectorized.
Args:
x_sat (np.ndarray): satellite location (pos, vel).
x_obs (np.ndarray): observer location (pos, vel).
f_ref (float): reference frequency.
c (float): propagation speed.
Returns:
df (np.ndarray): frequency shift relative to reference frequenct df
"""<line_sep>_,rv=range_range_rate(x_sat x_obs)<line_sep>df=rv/c<times>f_ref<line_sep><return>df<block_end># Orbit derivative
<def_stmt>orbdyn_2body x:np.ndarray t:float mu:float=3.986004418e14<block_start>""" Orbital (x,y,z,x_dot,y_dot,z_dot) vector derivative
Args:
x (np.ndarray): state vector.
t (float): time.
Returns:
dxdt (np.ndarray): state vector time derivative.
"""<line_sep>r=np.linalg.norm(x[0:3 ] axis=0)<line_sep>dxdt=np.zeros(x.shape)<line_sep>dxdt[0:3 ]=x[3:6 ]<line_sep>dxdt[3:6 ]=-(mu/r<power>3)<times>x[0:3 ]<line_sep><return>dxdt<block_end><def_stmt>orbdyn_2body_stm x:np.ndarray t:float mu:float=3.986004418e14<block_start>""" Orbital (x,y,z,x_dot,y_dot,z_dot) vector and matrix derivative.
Phi_dot = A * Phi.
Args:
x (np.ndarray): state vector and flattened state transition matrix [x, Phi(:)]
Size: (6+6*6,): (42,).
t (float): time.
Returns:
dxdt (np.ndarray): state vector and state transition matrix time derivative.
"""<line_sep>dxdt=np.zeros(x.shape)<line_sep>r=np.linalg.norm(x[0:3 ] axis=0)<line_sep>dxdt[0:3 ]=x[3:6 ]<line_sep>dxdt[3:6 ]=(-mu/r<power>3)<times>x[0:3 ]<line_sep>A=get_matrix_A(x[0:3 ] mu=mu)# (6,6,n)
<if_stmt>len(x.shape)<eq>1<block_start>Phi=x[6: ].reshape((6 6))# (6,6)
Phi_dot=np.matmul(A Phi)<line_sep>dxdt[6: ]=Phi_dot.reshape((36))<block_end><else_stmt><block_start>Phi=x[6: ].reshape((6 6 -1))# (6,6,n)
Phi_dot=np.einsum('ijl,jkl->ikl' A Phi)<line_sep>dxdt[6: ]=Phi_dot.reshape((36 -1))<block_end><return>dxdt<block_end><def_stmt>get_matrix_A x:np.ndarray mu:float=3.986004418e14<block_start>""" Get A matrix (orbital x_dot = A*x). Vectorized.
Args:
x (np.ndarray): orbital state vector (Cartesian).
mu (np.ndarray): standard gravitational parameter. Defaults to 3.98e14 m^3/s^2.
Returns:
A (np.ndarray): A matrix. Size (x_dim, x_dim): (6,6).
"""<line_sep>r=np.linalg.norm(x[0:3 ] axis=0)<line_sep>aa=-mu/r<power>3<line_sep>b=3<times>mu/r<power>5<line_sep>AA=np.array([[aa+b<times>x[0 ]<power>2 b<times>x[0 ]<times>x[1 ] b<times>x[0 ]<times>x[2 ]] [b<times>x[0 ]<times>x[1 ] aa+b<times>x[1 ]<power>2 b<times>x[1 ]<times>x[2 ]] [b<times>x[0 ]<times>x[2 ] b<times>x[1 ]<times>x[2 ] aa+b<times>x[2 ]<power>2 ]])<line_sep>A_z=np.zeros(AA.shape)# Zero parts for A matrix
A_e=np.zeros(AA.shape)# Eye (upper right)
i=np.arange(AA.shape[0])<line_sep>A_e[i i ]=1<line_sep>A=np.concatenate([np.concatenate([A_z A_e] axis=1) np.concatenate([AA A_z] axis=1)] axis=0)<line_sep><return>A<block_end><def_stmt>f_obs_range_rate x_sat:np.ndarray x_obs:np.ndarray<block_start>""" Observation function for range rate.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
rr (np.ndarray): range rate. Size (z_dim, n): (1, n)
H (np.ndarray): Partial of radial range rate w.r.t state vector.
Size (z_dim, x_dim, n): (1, 6, n).
"""<line_sep>_,rr=range_range_rate(x_sat x_obs)<line_sep>H=get_matrix_range_rate_H(x_sat x_obs)<if_stmt>len(x_obs.shape)<eq>2<block_start>rr=np.expand_dims(rr axis=0)<block_end><return>rr H<block_end><def_stmt>f_obs_x_sat x_sat:np.ndarray x_obs:np.ndarray=<none><block_start>""" Observation function for full state vector.
E.g. GPS measurement
Used for debugging.
Args:
x_sat (np.ndarray): set of satellite positions.
Returns:
x_sat (np.ndarray): satellite state vector.
H (np.ndarray): observation matrix (identity).
"""<line_sep>H=np.expand_dims(np.eye(x_sat.shape[0]) axis=2)<line_sep>H=np.repeat(H x_sat.shape[1] axis=2)<line_sep><return>x_sat H<block_end><def_stmt>get_matrix_range_rate_H x_sat:np.ndarray x_obs:np.ndarray<block_start>""" Obtain measurement Jacobian for range rate measurements. Vectorized.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
H (np.ndarray): Partial of radial range rate w.r.t state vector.
Size (z_dim, x_dim, n): (1, 6, n).
"""<if_stmt>len(x_obs.shape)<eq>2# Single observer (6, n)
<block_start>einsum_format='ij,ij->j'<line_sep>d=x_sat-x_obs# Difference
<block_end><elif_stmt>len(x_obs.shape)<eq>3# Multiple observers (6,n,n_obs)
<block_start>einsum_format='ijk,ijk->jk'<line_sep>d=np.repeat(np.expand_dims(x_sat axis=2) x_obs.shape[2] axis=2)-x_obs<block_end># Difference
#d = x_sat - x_obs # Difference
r=np.linalg.norm(d[0:3 ] axis=0)# Range
d_r=d/r# Temporary variable
H=d_r[[3 4 5 0 1 2] ]<line_sep>r_dot_v=np.einsum(einsum_format d[0:3 ] d[3:6])# Dot product position, velocity
H[0:3 :]<augsub>(d[0:3 ]<times>r_dot_v)/r<power>3<if_stmt>len(x_obs.shape)<eq>2# Single observer (6, n)
<block_start>H=np.expand_dims(H axis=0)<block_end><elif_stmt>len(x_obs.shape)<eq>3# Multiple observers (6,n,n_obs)
<block_start>H=np.transpose(H (2 0 1))<block_end><return>H# Transpose before return (H is a single row matrix)
<block_end><def_stmt>tdoa_objective_function vars *data<block_start>""" Objective function for solving Time Differential of Arrival (TDoA).
0 = C * (TDoA + tau) - || x_sat-x_obs ||
Args:
vars (tuple): a tuple of unknowns - xyz satellite position and time offset
(x, y, z, t)
data (tuple): additional arguments - observer positions and TDoA measurements
(x_obs, tdoa)
Returns:
(tuple): tuple of objective function values
"""<line_sep>x,y,z,tau=vars<line_sep>x_sat=np.array([[x] [y] [z]] dtype=np.float64)<line_sep>x_obs,tdoa=data<line_sep>r=C<times>(tdoa+tau)-np.linalg.norm(x_obs-x_sat axis=0)<line_sep><return>(r.item(0) r.item(1) r.item(2) r.item(3))<block_end><def_stmt>get_tdoa_simulated x_sat:np.ndarray x_obs:np.ndarray flag_tof:bool=<false><block_start>""" Get simulated Time Differential of Arrival measurements.
TODO: Take into account time of flight, right now it is instantaneous.
TODO: Flip range and tdoa arrays dimensions to be (n_measurements, n_stations)
Args:
x_sat (np.ndarray): set of satellite state vectors.
x_obs (np.ndarray): set of observer positions.
tof (bool): flag whether to simulate using time of flight (not currently implemented).
Returns:
tdoa (np.ndarray): set of simulated TDoA measurements.
tof (np.ndarray): set of simulate time of flights between the observer and the satellite.
"""<if_stmt>flag_tof<block_start><assert_stmt><false> "Time of flight not implemented!"<block_end><else_stmt><block_start>r,_=range_range_rate(x_sat x_obs)<line_sep>tof=r/C<line_sep>tdoa=tof-tof[0 :]<block_end><return>tdoa tof<block_end><def_stmt>get_tdoa_simulated_r r:np.ndarray<block_start>""" Same as get_tdoa_simulated_r, but only range as argument.
TODO: Flip range and tdoa arrays dimensions to be (n_measurements, n_stations)
Args:
range(np.ndarray): set of observed ranges per station (n_stations, n_measurements).
Returns:
tdoa (np.ndarray): set of simulated TDoA measurements.
tof (np.ndarray): set of simulate time of flights between the observer and the satellite.
"""<line_sep>tof=r/C<line_sep>tdoa=tof-tof[0 :]<line_sep><return>tdoa tof<block_end><def_stmt>solve_tdoa tdoa:np.ndarray x_obs:np.ndarray<block_start>""" Function to solve Time Differential of Arrival (TDoA) measurements.
Args:
tdoa (np.ndarray): array of TDoA measurements. TODO: Array dimensions.
TDoA array must include time differential for the reference station
even being zero.
x_obs (np.ndarray): array of observer positions (6, n, n_obs).
Returns:
p_sat (np.ndarray): array of multilaterated satellite positions.
tau (np.ndarray): array of time offsets for reference station
"""<line_sep>n=x_obs.shape[1]<line_sep>p_sat=np.zeros((3 n))<line_sep>tau=np.zeros(n)<line_sep>x_obs_mean=np.mean(x_obs axis=2)<for_stmt>i range(n)<block_start>vars_0=[x_obs_mean[0 i]<times>1.01 x_obs_mean[1 i]<times>1.01 x_obs_mean[2 i]<times>1.01 5e-3]<line_sep>data=(x_obs[0:3 i :] tdoa[: i])<line_sep>result=fsolve(tdoa_objective_function vars_0 args=data)<line_sep>p_sat[: i]=result[0:3]<line_sep>tau[i]=result[3]<block_end><return>p_sat tau<block_end><def_stmt>verify_sat_orbital x_sat:np.ndarray range_pos:np.ndarray range_vel:np.ndarray<block_start>""" Verifies whether given state vectors represent a valid orbital state.
This function is used to eliminate possible states that violate orbital constraints.
Args:
x_sat (np.ndarray): set of satellite positions.
range_r (np.ndarray): set of valid position vector norms.
range_v (np.ndarray): set of valid velocity vector norms.
Returns:
x_sat_ok (np.ndarray): set of satellite positions.
x_mask (np.ndarray): boolean array indicating the validity of satellite vector.
"""<line_sep>r=np.linalg.norm(x_sat[0:3 ] axis=0)# Norm of the position
v=np.linalg.norm(x_sat[3:6 ] axis=0)# Norm of the velocity
r_mask=(r<ge>range_pos[0])&(r<le>range_pos[1])<line_sep>v_mask=(v<ge>range_vel[0])&(v<le>range_vel[1])<line_sep>x_mask=r_mask&v_mask<line_sep># x_mask = np.logical_and.reduce(r >= range_pos[0], r <= range_pos[1],
# v >= range_vel[0], v <= range_vel[1])
x_sat_ok=x_sat[: x_mask]<line_sep><return>x_sat_ok x_mask<block_end><def_stmt>verify_sat_observer x_sat:np.ndarray x_obs:np.ndarray range_range:np.ndarray<block_start>""" Verifies whether the satellite is within the valid range from the observer.
This function is used to eliminate possible states that violate satellite-observer constraints.
Args:
x_sat (np.ndarray): set of satellite positions.
x_obs (np.ndarray): set of observer positions.
Returns:
x_sat_ok (np.ndarray): set of satellite positions.
x_mask (np.ndarray): boolean array indicating the validity of satellite vector.
"""<line_sep>r,_=range_range_rate(x_sat x_obs)<line_sep>x_mask=(r<ge>range_range[0])&(r<le>range_range[1])<line_sep>x_sat_ok=x_sat[: x_mask]<line_sep><return>x_sat_ok x_mask<block_end><def_stmt>herrick_gibbs p_sat:np.ndarray t:np.ndarray angle_checks=<true><block_start>""" Herrick-Gibbs Initial Orbit Determination Method. Takes three positional observations and corresponding
timesteps and outpus full state vector estimate (position and velocity) for the middle measurement.
Reference: <NAME> - Fundamentals of Astrodynamics and Applications, 4th ed., p.461, 7.5.2 Herrick-Gibbs
Args:
p_sat (np.ndarray): set of satellite positions. Three close positions are required for the method to work.
t (np.ndarray): observation times
angle_checks (bool): flag whether on not to perform angle checks between position vectors
Returns:
x_2 (np.ndarray): estimated satellite state (position + velocity for the second observation)
"""<line_sep>#print(f"Herrick-Gibbs")
error=<none><line_sep>tolerance_angle=10.0/180.0<times>np.pi<line_sep>r=np.linalg.norm(p_sat axis=0)# Magnitude of the observed positions
# Sanity checks
#angle_checks = True
<if_stmt>angle_checks<block_start>p=np.cross(p_sat[: 1] p_sat[: 2])<line_sep>p_n=p/np.linalg.norm(p)<line_sep>x_sat_1n=p_sat[: 0]/r[0]<line_sep>#copa = np.arcsin(np.dot(p_n, x_sat_1n)) # Variable unused in original code
# Check whether the vectors are coplanar
<if_stmt>np.abs(np.dot(x_sat_1n p_n))<g>tolerance_angle<block_start>error=f"Error: not coplanar {np.abs(np.dot(x_sat_1n p_n))} > {tolerance_angle}"<block_end># Calculate angle between vectors
theta_01=np.arccos(np.dot(p_sat[: 0] p_sat[: 1])/(np.linalg.norm(p_sat[: 0])<times>np.linalg.norm(p_sat[: 1])))<line_sep>theta_12=np.arccos(np.dot(p_sat[: 1] p_sat[: 2])/(np.linalg.norm(p_sat[: 1])<times>np.linalg.norm(p_sat[: 2])))<if_stmt>min(theta_01 theta_12)<g>tolerance_angle<block_start>error=f"Error: angles {min(theta_01 theta_12)} > {tolerance_angle}"<block_end><block_end># Herrick-Gibbs Initial Orbit Determination
dt_10,dt_20,dt_21=t[1]-t[0] t[2]-t[0] t[2]-t[1]<line_sep>term=np.array([-dt_21<times>(1.0/(dt_10<times>dt_20))+MU/(12.0<times>r[0]<power>3) (dt_21-dt_10)<times>(1.0/(dt_10<times>dt_21))+MU/(12.0<times>r[1]<power>3) dt_10<times>(1.0/(dt_21<times>dt_20))+MU/(12.0<times>r[2]<power>3) ])<line_sep>#v_sat_1 = term[0]*p_sat[:,0] + term[1]*p_sat[:,1] + term[2]*p_sat[:,2]
v_sat_1=np.sum(term<times>p_sat axis=1)<line_sep>x_sat_1=np.concatenate([p_sat[: 1] v_sat_1])<line_sep><return>x_sat_1 error<block_end><def_stmt>batch x_0:np.ndarray P_bar_0:np.ndarray R:np.ndarray z:np.ndarray t:np.ndarray x_obs:np.ndarray f_obs tolerance:float=1e-8 max_iterations:int=1000<block_start>""" Batch estimation algorithm.
Reference: <NAME>, <NAME>, <NAME> - Statistical Orbit Determination,
Chapter 4.6, p. 196-197 - Computational Algorithm for the Batch Processor.
Args:
x_0 (np.ndarray): Initial state vector, shape (x_dim, 1).
P_bar_0 (np.ndarray): Initial uncertainty, shape (x_dim, x_dim).
R (np.ndarray): Measurement uncertainty, shape (z_dim, z_dim).
z (np.ndarray): Array of measurements, shape (z_dim, n).
t (np.ndarray): Array of time deltas, shape (n,).
x_obs (np.ndarray): Array of observer positions (x_dim, n).
f_obs (): observation function.
tolerance (float): convergence tolerance.
Return:
x_0 (np.ndarray): new estimate for the initial state vector.
"""<line_sep>n=z.shape[1]<line_sep>Phi_0=np.eye(x_0.shape[0])# Initial State Transition Matrix
x_hat_0=np.zeros(x_0.shape)# Nominal trajectory update
x_bar_0=np.zeros(x_0.shape)# Apriori estimate
W=np.linalg.inv(R)<line_sep>W_vec=np.repeat(np.expand_dims(W axis=2) n axis=2)<line_sep>error=1<line_sep>i=0<line_sep>singular=<false><while_stmt>(np.abs(error)<g>tolerance<and>i<l>max_iterations)<block_start>i<augadd>1<line_sep># Check if initial uncertainty has been set up
<if_stmt>np.count_nonzero(P_bar_0)<eq>0<block_start>L=np.zeros(x_0.shape[0] x_0.shape[0])<block_end><else_stmt><block_start>L=np.linalg.inv(P_bar_0)<block_end>N=L.dot(x_bar_0)<line_sep># Propagate, flatten the stm and append to the state vector
x_Phi=np.transpose(odeint(orbdyn_2body_stm np.concatenate([x_0.squeeze() Phi_0.flatten()]) t args=(MU )))<line_sep>X=x_Phi[0:6 ]<line_sep>Phi=x_Phi[6: ].reshape((x_0.shape[0] x_0.shape[0] t.shape[0]))<line_sep># Calculate projected observations (projected measurements and H_tilde)
y,H_t=f_obs(X x_obs)<line_sep>dy=np.expand_dims(z-y axis=1)<line_sep># Calculate H
H_k=np.einsum('ijl,jkl->ikl' H_t Phi)<line_sep>H_kt=np.transpose(H_k axes=(1 0 2))<line_sep># Batch update
L<augadd>np.einsum('ijl,jkl,kml->im' H_kt W_vec H_k)<line_sep>N<augadd>np.einsum('ijl,jkl,kml->im' H_kt W_vec dy)<line_sep>temp=np.copy(x_hat_0)<try_stmt><block_start>x_hat_0=np.linalg.inv(L).dot(N)<block_end><except_stmt>np.linalg.LinAlgError<block_start>print("Singular matrix exception.")<line_sep>singular=<true><line_sep><break><block_end>x_0<augadd>+x_hat_0<line_sep>x_bar_0<augsub>x_hat_0<line_sep>error=np.abs(np.linalg.norm(temp-x_hat_0))<line_sep>np.set_printoptions(precision=2)<block_end>output={'num_it':i 'singular':singular}<line_sep><return>x_0 output<block_end> |
"""simd float32vec"""<def_stmt>get_data <block_start><return>[1.9 1.8 1.7 0.6 0.99 0.88 0.77 0.66]<block_end><def_stmt>main ## the translator knows this is a float32vec because there are more than 4 elements
<block_start>x=y=z=w=22/7<line_sep>a=numpy.array([1.1 1.2 1.3 0.4 x y z w] dtype=numpy.float32)<line_sep>## in this case the translator is not sure what the length of `u` is, so it defaults
## to using a float32vec.
u=get_data()<line_sep>b=numpy.array(u dtype=numpy.float32)<line_sep>c=a+b<line_sep>print(c)<line_sep>TestError(c[0]<eq>3.0)<line_sep>TestError(c[1]<eq>3.0)<line_sep>TestError(c[2]<eq>3.0)<line_sep>TestError(c[3]<eq>1.0)<block_end> |
<import_stmt>unittest<import_stmt>onnxruntime# noqa
<import_stmt>torch<import_stmt>numpy<as>np<import_stmt>io<import_stmt>itertools<import_stmt>copy<import_from_stmt>torch.nn.utils rnn<as>rnn_utils<import_from_stmt>model_defs.lstm_flattening_result LstmFlatteningResult<import_from_stmt>model_defs.rnn_model_with_packed_sequence RnnModelWithPackedSequence<import_from_stmt>test_pytorch_common skipIfUnsupportedMinOpsetVersion disableScriptTest skipIfUnsupportedOpsetVersion skipIfNoLapack skipIfUnsupportedMaxOpsetVersion skipIfONNXShapeInference <import_from_stmt>test_pytorch_common BATCH_SIZE<import_from_stmt>test_pytorch_common RNN_BATCH_SIZE RNN_SEQUENCE_LENGTH RNN_INPUT_SIZE RNN_HIDDEN_SIZE<import_from_stmt>typing List<import_stmt>model_defs.word_language_model<as>word_language_model<import_stmt>torchvision<import_stmt>onnx<def_stmt>to_numpy tensor<block_start><if_stmt>tensor.requires_grad<block_start><return>tensor.detach().cpu().numpy()<block_end><else_stmt><block_start><return>tensor.cpu().numpy()<block_end><block_end><def_stmt>convert_to_onnx model input=<none> opset_version=9 example_outputs=<none> do_constant_folding=<true> keep_initializers_as_inputs=<true> dynamic_axes=<none> input_names=<none> output_names=<none> fixed_batch_size=<false> training=<none> onnx_shape_inference=<false> use_new_jit_passes=<false># export the model to ONNX
<block_start>f=io.BytesIO()<line_sep>input_copy=copy.deepcopy(input)<line_sep>torch.onnx._export(model input_copy f opset_version=opset_version example_outputs=example_outputs do_constant_folding=do_constant_folding keep_initializers_as_inputs=keep_initializers_as_inputs dynamic_axes=dynamic_axes input_names=input_names output_names=output_names fixed_batch_size=fixed_batch_size training=training onnx_shape_inference=onnx_shape_inference use_new_jit_passes=use_new_jit_passes)<line_sep># compute onnxruntime output prediction
ort_sess=onnxruntime.InferenceSession(f.getvalue())<line_sep><return>ort_sess<block_end><def_stmt>run_ort ort_sess input<block_start>input_copy=copy.deepcopy(input)<line_sep>input,_=torch.jit._flatten(input_copy)<line_sep>inputs=list(map(to_numpy input))<line_sep>ort_inputs=dict((ort_sess.get_inputs()[i].name input)<for>i,input enumerate(inputs))<line_sep>ort_outs=ort_sess.run(<none> ort_inputs)<line_sep><return>ort_outs<block_end><def_stmt>ort_compare_with_pytorch ort_outs output rtol atol<block_start>output,_=torch.jit._flatten(output)<line_sep>outputs=list(map(to_numpy output))<line_sep># compare onnxruntime and PyTorch results
<assert_stmt>len(outputs)<eq>len(ort_outs) "number of outputs differ"<line_sep># compare onnxruntime and PyTorch results
[np.testing.assert_allclose(out ort_out rtol=rtol atol=atol)<for>out,ort_out zip(outputs ort_outs)]<block_end><def_stmt>run_model_test self model batch_size=2 state_dict=<none> input=<none> use_gpu=<true> rtol=0.001 atol=1e-7 example_outputs=<none> do_constant_folding=<true> dynamic_axes=<none> test_with_inputs=<none> input_names=<none> output_names=<none> fixed_batch_size=<false><block_start>model.eval()<if_stmt>input<is><none><block_start>input=torch.randn(batch_size 3 224 224 requires_grad=<true>)<block_end><with_stmt>torch.no_grad()<block_start><if_stmt>isinstance(input torch.Tensor)<block_start>input=(input )<block_end># In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy=copy.deepcopy(input)<line_sep>output=model(*input_copy)<if_stmt>isinstance(output torch.Tensor)<block_start>output=(output )<block_end>ort_sess=convert_to_onnx(model input=input opset_version=self.opset_version example_outputs=output do_constant_folding=do_constant_folding keep_initializers_as_inputs=self.keep_initializers_as_inputs dynamic_axes=dynamic_axes input_names=input_names output_names=output_names fixed_batch_size=fixed_batch_size training=<none> onnx_shape_inference=self.onnx_shape_inference use_new_jit_passes=self.use_new_jit_passes)<line_sep>ort_outs=run_ort(ort_sess input)<line_sep>ort_compare_with_pytorch(ort_outs output rtol atol)<line_sep># if additional test inputs are provided run the onnx
# model with these inputs and check the outputs
<if_stmt>test_with_inputs<is><not><none><block_start><for_stmt>test_input test_with_inputs<block_start><if_stmt>isinstance(test_input torch.Tensor)<block_start>test_input=(test_input )<block_end>test_input_copy=copy.deepcopy(test_input)<line_sep>output=model(*test_input_copy)<if_stmt>isinstance(output torch.Tensor)<block_start>output=(output )<block_end>ort_outs=run_ort(ort_sess test_input)<line_sep>ort_compare_with_pytorch(ort_outs output rtol atol)<block_end><block_end><block_end><block_end><class_stmt>TestONNXRuntime(unittest.TestCase)<block_start><import_from_stmt>torch.onnx.symbolic_helper _export_onnx_opset_version<line_sep>opset_version=_export_onnx_opset_version<line_sep>keep_initializers_as_inputs=<true># For IR version 3 type export.
use_new_jit_passes=<false># For testing main code-path
onnx_shape_inference=<false><def_stmt>setUp self<block_start>torch.manual_seed(0)<line_sep>onnxruntime.set_seed(0)<if_stmt>torch.cuda.is_available()<block_start>torch.cuda.manual_seed_all(0)<block_end>np.random.seed(seed=0)<line_sep>self.is_script_test_enabled=<true><block_end><def_stmt>run_test self model input rtol=1e-3 atol=1e-7 do_constant_folding=<true> batch_size=2 use_gpu=<true> dynamic_axes=<none> test_with_inputs=<none> input_names=<none> output_names=<none> fixed_batch_size=<false><block_start><def_stmt>_run_test m<block_start><return>run_model_test(self m batch_size=batch_size input=input use_gpu=use_gpu rtol=rtol atol=atol do_constant_folding=do_constant_folding dynamic_axes=dynamic_axes test_with_inputs=test_with_inputs input_names=input_names output_names=output_names fixed_batch_size=fixed_batch_size)<block_end><if_stmt>self.is_script_test_enabled<and>self.use_new_jit_passes<block_start>script_model=torch.jit.script(model)<line_sep>_run_test(script_model)<block_end>_run_test(model)<block_end><def_stmt>run_model_test_with_external_data self model input rtol=0.001 atol=1e-7 example_outputs=<none> do_constant_folding=<true> dynamic_axes=<none> input_names=<none> output_names=<none> ort_optim_on=<true><block_start><import_stmt>os<import_stmt>tempfile<line_sep>model.eval()<with_stmt>torch.no_grad()<block_start><if_stmt>isinstance(input torch.Tensor)<block_start>input=(input )<block_end># In-place operators will update input tensor data as well.
# Thus inputs are replicated before every forward call.
input_copy=copy.deepcopy(input)<line_sep>output=model(*input_copy)<if_stmt>isinstance(output torch.Tensor)<block_start>output=(output )<block_end># export the model to ONNX
<with_stmt>tempfile.TemporaryDirectory()<as>tmpdirname<block_start>model_file_name=os.path.join(tmpdirname 'model.onnx')<line_sep>input_copy=copy.deepcopy(input)<line_sep>torch.onnx.export(model input_copy model_file_name opset_version=self.opset_version example_outputs=output verbose=<false> do_constant_folding=do_constant_folding keep_initializers_as_inputs=self.keep_initializers_as_inputs dynamic_axes=dynamic_axes input_names=input_names output_names=output_names use_external_data_format=<true>)<line_sep># compute onnxruntime output prediction
ort_sess_opt=onnxruntime.SessionOptions()<line_sep>ort_sess_opt.graph_optimization_level=onnxruntime.GraphOptimizationLevel.ORT_ENABLE_EXTENDED<if>ort_optim_on<else>onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL<line_sep>ort_sess=onnxruntime.InferenceSession(model_file_name sess_options=ort_sess_opt)<line_sep>input_copy=copy.deepcopy(input)<line_sep>ort_outs=run_ort(ort_sess input_copy)<line_sep>ort_compare_with_pytorch(ort_outs output rtol atol)<block_end><block_end><block_end>@skipIfUnsupportedMinOpsetVersion(9)# Because external data format was released with Opset 9.
<def_stmt>test_embedding_model_with_external_data self<block_start><class_stmt>LargeModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(LargeModel self).__init__()<line_sep>dim=15<line_sep>n=4<times>100<line_sep>self.emb=torch.nn.Embedding(n dim)<line_sep>self.lin1=torch.nn.Linear(dim 1)<line_sep>self.seq=torch.nn.Sequential(self.emb self.lin1 )<block_end><def_stmt>forward self input<block_start><return>self.seq(input)<block_end><block_end>model=LargeModel()<line_sep>x=torch.tensor([2] dtype=torch.long)<line_sep>self.run_model_test_with_external_data(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)# Because external data format was released with Opset 9.
<def_stmt>test_mobilenet_v2_with_external_data self<block_start>model=torchvision.models.mobilenet_v2(pretrained=<true>)<line_sep>x=torch.randn(2 3 224 224 requires_grad=<true>)<line_sep># We are turning off Onnx Runtime optimization off in this test,
# because external data format is not supported to in ORT optimizer.
# Once that support is added, we can set ort_optim_on=True (default).
self.run_model_test_with_external_data(model x rtol=1e-3 atol=1e-5 ort_optim_on=<false>)<block_end>@skipIfUnsupportedMinOpsetVersion(9)# Because external data format was released with Opset 9.
<def_stmt>test_attribute_with_external_data self<block_start><class_stmt>LargeModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x+torch.ones(2 1024)<block_end><block_end>x=torch.randn(2 1)<line_sep>self.run_model_test_with_external_data(LargeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)# Because external data format was released with Opset 9.
@unittest.skip("Enable this once large model with subgraph is supported in ORT")<def_stmt>test_subgraph_with_external_data self<block_start><class_stmt>LargeModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><for_stmt>i range(x.size(0))<block_start>x=x+torch.ones(2 1024)<block_end><return>x<block_end><block_end>x=torch.randn(2 1)<line_sep>self.run_model_test_with_external_data(torch.jit.script(LargeModel()) x)<block_end><def_stmt>test_fuse_conv_bn1d self<block_start><class_stmt>Fuse(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Fuse self).__init__()<line_sep>self.conv=torch.nn.Conv1d(16 33 3 stride=2)<line_sep>self.bn=torch.nn.BatchNorm1d(33)<block_end><def_stmt>forward self x<block_start>out=self.conv(x)<line_sep><return>self.bn(out)<block_end><block_end>model=Fuse()<line_sep>x=torch.randn(20 16 50 requires_grad=<true>)<line_sep>self.run_test(model (x ))<block_end><def_stmt>test_fuse_conv_bn2d self<block_start><class_stmt>Fuse(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Fuse self).__init__()<line_sep>self.conv=torch.nn.Conv2d(3 2 kernel_size=1 stride=2 padding=3 bias=<false>)<line_sep>self.bn=torch.nn.BatchNorm2d(2)<block_end><def_stmt>forward self x<block_start>out=self.conv(x)<line_sep><return>self.bn(out)<block_end><block_end>model=Fuse()<line_sep>x=torch.randn(2 3 2 2 requires_grad=<true>)<line_sep>self.run_test(model (x ))<block_end><def_stmt>test_fuse_conv_bn3d self<block_start><class_stmt>Fuse(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Fuse self).__init__()<line_sep>self.conv=torch.nn.Conv3d(3 2 (3 5 2) stride=(2 1 1) padding=(3 2 0) bias=<false>)<line_sep>self.bn=torch.nn.BatchNorm3d(2)<block_end><def_stmt>forward self x<block_start>out=self.conv(x)<line_sep><return>self.bn(out)<block_end><block_end>model=Fuse()<line_sep>x=torch.randn(2 3 10 50 100 requires_grad=<true>)<line_sep>self.run_test(model (x ) rtol=1e-3 atol=1e-6)<block_end><def_stmt>test_reshape_constant_fold self<block_start><class_stmt>Reshape(torch.nn.Module)<block_start><def_stmt>__init__ self <block_start>super(Reshape self).__init__()<line_sep>self.register_buffer("weight" torch.ones(5))<block_end><def_stmt>forward self x<block_start>scale_1=self.weight.reshape(1 -1 1 1)<line_sep><return>x<times>scale_1<block_end><block_end>x=torch.randn(4 5)<line_sep>self.run_test(Reshape() (x ) rtol=1e-3 atol=1e-5)<block_end><def_stmt>run_word_language_model self model_name<block_start>ntokens=50<line_sep>emsize=5<line_sep>nhid=5<line_sep>nlayers=5<line_sep>dropout=0.2<line_sep>tied=<false><line_sep>batchsize=5<line_sep>model=word_language_model.RNNModel(model_name ntokens emsize nhid nlayers dropout tied batchsize)<line_sep>x=torch.arange(0 ntokens).long().view(-1 batchsize)<line_sep># Only support CPU version, since tracer is not working in GPU RNN.
self.run_test(model (x model.hidden))<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()# Faster RCNN model is not scriptable
<def_stmt>test_faster_rcnn self<block_start>model=torchvision.models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=<true> min_size=200 max_size=300)<line_sep>model.eval()<line_sep>x=torch.randn(2 3 200 300 requires_grad=<true>)<line_sep>self.run_test(model (x ) rtol=1e-3 atol=1e-5)<block_end><def_stmt>get_image_from_url self url<block_start><import_stmt>os<import_from_stmt>urllib.parse urlsplit<import_from_stmt>urllib request<import_from_stmt>PIL Image<import_from_stmt>torchvision transforms<import_from_stmt>torch._utils_internal get_writable_path<line_sep>filename=os.path.basename(urlsplit(url)[2])<line_sep>data_dir=get_writable_path(os.path.join(os.path.dirname(__file__)))<line_sep>path=os.path.join(data_dir filename)<line_sep>data=request.urlopen(url timeout=15).read()<with_stmt>open(path 'wb')<as>f<block_start>f.write(data)<block_end>image=Image.open(path).convert("RGB")<line_sep>image=image.resize((300 200) Image.BILINEAR)<line_sep>to_tensor=transforms.ToTensor()<line_sep><return>to_tensor(image)<block_end><def_stmt>get_test_images self<block_start>image_url="http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg"<line_sep>image=self.get_image_from_url(url=image_url)<line_sep>images=[image]<line_sep><return>images<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()<def_stmt>test_mask_rcnn self<block_start>model=torchvision.models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=<true> min_size=200 max_size=300)<line_sep>images=self.get_test_images()<line_sep>self.run_test(model (images ) rtol=1e-3 atol=1e-5)<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()<def_stmt>test_keypoint_rcnn self<block_start>model=torchvision.models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=<true> min_size=200 max_size=300)<line_sep>images=self.get_test_images()<line_sep>self.run_test(model (images ) rtol=1e-3 atol=1e-5)<block_end>@disableScriptTest()<def_stmt>test_word_language_model_RNN_TANH self<block_start>self.run_word_language_model("RNN_TANH")<block_end>@disableScriptTest()<def_stmt>test_word_language_model_RNN_RELU self<block_start>self.run_word_language_model("RNN_RELU")<block_end>@disableScriptTest()<def_stmt>test_word_language_model_LSTM self<block_start>self.run_word_language_model("LSTM")<block_end>@disableScriptTest()<def_stmt>test_word_language_model_GRU self<block_start>self.run_word_language_model("GRU")<block_end><def_stmt>test_index_1d self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[0]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() m1)<block_end><def_stmt>test_index_2d_1dimslice self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[0:1 :]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() m1)<block_end><def_stmt>test_index_2d_sliceint self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[1 :]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() m1)<block_end><def_stmt>test_index_2d_neg_slice self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[0:-1 :]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() m1)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_index_mask self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[torch.tensor([0 1 0] dtype=torch.uint8)]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() m1)<class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[torch.tensor([0 1 0] dtype=torch.bool)]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() m1)<block_end>@disableScriptTest()<def_stmt>test_dict self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self x_in<block_start>x_out={}<line_sep>x_out["test_key_out"]=torch.add(x_in[list(x_in.keys())[0]] list(x_in.keys())[0])<line_sep><return>x_out<block_end><block_end>x={torch.tensor(1.):torch.randn(1 2 3)}<line_sep>self.run_test(MyModel() (x ))<block_end>@disableScriptTest()<def_stmt>test_dict_str self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self x_in<block_start>x_out={}<line_sep>x_out["test_key_out"]=torch.add(x_in["test_key_in"] 2.)<line_sep><return>x_out<block_end><block_end>x={"test_key_in":torch.randn(1 2 3)}<line_sep>self.run_test(MyModel() (x ))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_cste_script self<block_start><class_stmt>MyModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.zeros(x.size(0)) torch.ones((x.size(1) x.size(0)) dtype=torch.int64)<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(MyModel() x)<block_end><def_stmt>test_scalar_tensor self<block_start><class_stmt>test(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.scalar_tensor(input.size(0)) torch.scalar_tensor(input.size(1) dtype=torch.int64)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(7 8 9)<line_sep>model=test()<line_sep>self.run_test(model x test_with_inputs=[y] input_names=['input_1'] dynamic_axes={'input_1':[0 1 2]})<block_end><def_stmt>test_tensor self<block_start><class_stmt>ScalarInputModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>torch.tensor(input.shape[1])<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(ScalarInputModel() x)<class_stmt>TensorInputModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>torch.tensor([input.shape[0] input.shape[1]])<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(TensorInputModel() x)<class_stmt>FloatInputModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>torch.tensor([float(input)])<block_end><block_end>x=torch.randn(1)<line_sep>self.run_test(FloatInputModel() x)<class_stmt>InputWithDtypeModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>torch.tensor(input.shape[1] dtype=torch.long)<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(InputWithDtypeModel() x)<class_stmt>MixedInputModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>torch.tensor([input.shape[0] int(input)])<block_end><block_end>x=torch.randn(1)<line_sep>self.run_test(MixedInputModel() x)<block_end><def_stmt>test_hardtanh self<block_start>model=torch.nn.Hardtanh(-1.5 2.5)<line_sep>x=torch.arange(-5 5).to(dtype=torch.float32)<line_sep>self.run_test(model x)<block_end><def_stmt>test_hardtanh_script_with_default_values self<block_start><class_stmt>MyModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.nn.functional.hardtanh(x)<block_end><block_end>x=torch.arange(-5 5).to(dtype=torch.float32)<line_sep>self.run_test(MyModel() x)<block_end><def_stmt>test_clamp self<block_start><class_stmt>ClampModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.clamp(-0.5 0.5)<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(ClampModel() x)<class_stmt>ClampMinModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.clamp(min=-0.5)<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(ClampMinModel() x)<class_stmt>ClampMaxModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.clamp(max=0.5)<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(ClampMaxModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_clamp_dyn self<block_start><class_stmt>ClampMaxModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>x.clamp(<none> x.size(0))<block_end><block_end>x=torch.arange(16).view(4 4).float()<line_sep>self.run_test(ClampMaxModel() x)<class_stmt>ClampMinModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>x.clamp(x.size(0) <none>)<block_end><block_end>x=torch.arange(16).view(4 4).float()<line_sep>self.run_test(ClampMinModel() x)<class_stmt>ClampMinMaxModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>x.clamp(x.size(0) x.size(1))<block_end><block_end>x=torch.arange(16).view(2 8).float()<line_sep>self.run_test(ClampMinMaxModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_full_trace self<block_start><class_stmt>FullModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.full((3 4) x dtype=torch.long)<block_end><block_end>x=torch.tensor(12)<line_sep>self.run_test(FullModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_full_script self<block_start><class_stmt>FullModelScripting(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.full((3 4) x dtype=torch.long)<block_end><block_end>x=torch.tensor(12)<line_sep>self.run_test(FullModelScripting() x)<block_end><def_stmt>test_fuse_addmm self<block_start><class_stmt>AddmmModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mm(x x)+x<block_end><block_end>x=torch.ones(3 3)<line_sep>self.run_test(AddmmModel() x)<block_end><def_stmt>test_maxpool self<block_start>model=torch.nn.MaxPool1d(2 stride=1)<line_sep>x=torch.randn(20 16 50)<line_sep>self.run_test(model x)<block_end><def_stmt>test_conv self<block_start><class_stmt>TraceModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(TraceModel self).__init__()<line_sep>self.conv1=torch.nn.Conv1d(16 33 3 stride=2)<line_sep>self.conv2=torch.nn.Conv2d(16 33 (3 5) stride=(2 1) padding=(4 2) dilation=(3 1))<line_sep>self.conv3=torch.nn.Conv3d(16 33 (3 5 2) stride=(2 1 1) padding=(4 2 0))<block_end><def_stmt>forward self input1 input2 input3<block_start><return>self.conv1(input1) self.conv2(input2) self.conv3(input3)<block_end><block_end><class_stmt>ScriptModel(torch.jit.ScriptModule)<block_start><def_stmt>__init__ self<block_start>super(ScriptModel self).__init__()<line_sep>self.conv1=torch.nn.Conv1d(16 33 3 stride=2)<line_sep>self.conv2=torch.nn.Conv2d(16 33 (3 5) stride=(2 1) padding=(4 2) dilation=(3 1))<line_sep>self.conv3=torch.nn.Conv3d(16 33 (3 5 2) stride=(2 1 1) padding=(4 2 0))<block_end>@torch.jit.script_method<def_stmt>forward self input1 input2 input3<block_start><return>self.conv1(input1) self.conv2(input2) self.conv3(input3)<block_end><block_end>x1=torch.randn(20 16 50)<line_sep>x2=torch.randn(20 16 50 100)<line_sep>x3=torch.randn(20 16 10 50 100)<line_sep>self.run_test(TraceModel() (x1 x2 x3) atol=10e-5)<line_sep>self.run_test(ScriptModel() (x1 x2 x3) atol=10e-5)<block_end><def_stmt>test_conv_shape_inference self<block_start><class_stmt>Model(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Model self).__init__()<line_sep>self.conv2=torch.nn.Conv2d(16 33 (3 5) stride=(2 1) padding=(4 2) dilation=(3 1))<block_end><def_stmt>forward self input<block_start><return>self.conv2(input)+2<block_end><block_end>x=torch.randn(20 16 50 100)<line_sep>self.run_test(Model() x atol=10e-5 input_names=['x'] dynamic_axes={'x':[0]})<block_end><def_stmt>test_conv_transpose self<block_start><class_stmt>TraceModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(TraceModel self).__init__()<line_sep>self.conv1=torch.nn.ConvTranspose1d(16 33 3 stride=2)<line_sep>self.conv2=torch.nn.ConvTranspose2d(16 33 (3 5) stride=(2 1) padding=(4 2) dilation=(3 1))<line_sep>self.conv3=torch.nn.ConvTranspose3d(16 33 (3 5 2) stride=(2 1 1) padding=(4 2 0))<block_end><def_stmt>forward self input1 input2 input3<block_start><return>self.conv1(input1) self.conv2(input2) self.conv3(input3)<block_end><block_end><class_stmt>ScriptModel(torch.jit.ScriptModule)<block_start><def_stmt>__init__ self<block_start>super(ScriptModel self).__init__()<line_sep>self.conv1=torch.nn.ConvTranspose1d(16 33 3 stride=2)<line_sep>self.conv2=torch.nn.ConvTranspose2d(16 33 (3 5) stride=(2 1) padding=(4 2) dilation=(3 1))<line_sep>self.conv3=torch.nn.ConvTranspose3d(16 33 (3 5 2) stride=(2 1 1) padding=(4 2 0))<block_end>@torch.jit.script_method<def_stmt>forward self input1 input2 input3<block_start><return>self.conv1(input1) self.conv2(input2) self.conv3(input3)<block_end><block_end>x1=torch.randn(20 16 50)<line_sep>x2=torch.randn(20 16 50 100)<line_sep>x3=torch.randn(20 16 10 50 100)<line_sep>self.run_test(TraceModel() (x1 x2 x3) atol=10e-5)<line_sep>self.run_test(ScriptModel() (x1 x2 x3) atol=10e-5)<block_end># Conversion of Transpose depends on input shape to be known.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(<false>)<def_stmt>test_transpose_infer_shape self<block_start><class_stmt>TransposeModule(torch.jit.ScriptModule)<block_start><def_stmt>__init__ self<block_start>super(TransposeModule self).__init__()<line_sep>self.conv=torch.nn.Conv2d(3 1 3 stride=2)<block_end>@torch.jit.script_method<def_stmt>forward self x<block_start>x=self.conv(x)<line_sep><return>x.transpose(0 1)<block_end><block_end>x=torch.randn(32 3 64 64)<line_sep>self.run_test(TransposeModule() x)<block_end><def_stmt>squeeze_model_tests self d x1 x2<block_start><class_stmt>Squeeze(torch.nn.Module)<block_start><def_stmt>__init__ self d<block_start>super(Squeeze self).__init__()<line_sep>self.d=d<block_end><def_stmt>forward self x<block_start><if_stmt>self.d<is><not><none><block_start><return>torch.squeeze(x dim=self.d)<block_end><else_stmt><block_start><return>torch.squeeze(x)<block_end><block_end><block_end>x2=[]<if>x2<is><none><else>[x2]<line_sep>self.run_test(Squeeze(d) x1 input_names=['input'] dynamic_axes={'input':{0:'0' 1:'1' 2:'2'}} test_with_inputs=x2)<block_end><def_stmt>test_squeeze_without_no_op self<block_start>x=torch.randn(2 1 4)<line_sep>self.squeeze_model_tests(1 x <none>)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_squeeze self<block_start>x_squeeze=torch.randn(2 1 4)<line_sep>x_noop=torch.randn(2 2 3)<line_sep>self.squeeze_model_tests(1 x_squeeze x_noop)<block_end><def_stmt>test_squeeze_neg_without_no_op self<block_start>x=torch.randn(2 1 4)<line_sep>self.squeeze_model_tests(-2 x <none>)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_squeeze_neg self<block_start>x_squeeze=torch.randn(2 1 4)<line_sep>x_noop=torch.randn(2 2 3)<line_sep>self.squeeze_model_tests(-2 x_squeeze x_noop)<block_end><def_stmt>test_squeeze_all_dims self<block_start>x_squeeze=torch.randn(2 1 4)<line_sep>x_noop=torch.randn(2 2 3)<line_sep>self.squeeze_model_tests(<none> x_squeeze x_noop)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_squeeze_no_op self<block_start>x_noop=torch.randn(2 1 4)<line_sep>x_squeeze=torch.randn(2 2 1)<line_sep>self.squeeze_model_tests(2 x_noop x_squeeze)<block_end><def_stmt>test_squeeze_no_op_without_additional_inputs self<block_start>x_noop=torch.randn(2 1 4)<line_sep>self.squeeze_model_tests(2 x_noop <none>)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_squeeze_runtime_dim self<block_start><class_stmt>Squeeze(torch.nn.Module)<block_start><def_stmt>forward self d1 d2<block_start>t=torch.zeros(d1[0] d2[0])<line_sep><return>t.squeeze(0)<block_end><block_end>d1=torch.tensor([1])<line_sep>d3=torch.tensor([3])<line_sep>d4=torch.tensor([4])<line_sep>self.run_test(Squeeze() (d1 d4) test_with_inputs=[(d3 d4)])<line_sep>self.run_test(Squeeze() (d3 d4) test_with_inputs=[(d1 d3)])<block_end><def_stmt>test_unsqueeze self<block_start><class_stmt>Unsqueeze(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.unsqueeze(x dim=-2)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Unsqueeze() x)<block_end><def_stmt>test_maxpool_default_stride self<block_start><class_stmt>MaxPoolModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.nn.functional.max_pool2d(x 2)<block_end><block_end>model=MaxPoolModel()<line_sep>x=torch.randn(10 20 16 50)<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(8)<def_stmt>test_maxpool_adaptive self<block_start>model=torch.nn.AdaptiveMaxPool1d((5) return_indices=<false>)<line_sep>x=torch.randn(20 16 50 requires_grad=<true>)<line_sep>self.run_test(model x)<block_end><def_stmt>test_maxpool_2d self<block_start>model=torch.nn.MaxPool2d(5 padding=(1 2))<line_sep>x=torch.randn(1 20 16 50 requires_grad=<true>)<line_sep>self.run_test(model x)<block_end><def_stmt>test_maxpool_1d_ceil self<block_start>model=torch.nn.MaxPool1d(3 2 ceil_mode=<true>)<line_sep>x=torch.randn(20 16 50)<line_sep>self.run_test(model x)<block_end><def_stmt>test_maxpool_2d_ceil self<block_start>model=torch.nn.MaxPool2d(3 2 ceil_mode=<true>)<line_sep>x=torch.randn(20 16 50 32)<line_sep>self.run_test(model x)<block_end><def_stmt>test_maxpool_3d_ceil self<block_start>model=torch.nn.MaxPool3d(3 2 ceil_mode=<true>)<line_sep>x=torch.randn(20 16 50 44 31)<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(8)@disableScriptTest()# Functional module not scriptable
<def_stmt>test_maxpool_with_indices self<block_start>model=torch.nn.MaxPool1d(2 stride=1 return_indices=<true>)<line_sep>x=torch.randn(20 16 50)<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_maxpool_dilation self<block_start>model=torch.nn.MaxPool1d(2 stride=1 dilation=2)<line_sep>x=torch.randn(20 16 50)<line_sep>self.run_test(model x)<block_end><def_stmt>test_avgpool_default_stride self<block_start><class_stmt>AvgPoolModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.nn.functional.avg_pool2d(x 2)<block_end><block_end>model=AvgPoolModel()<line_sep>x=torch.randn(10 20 16 50)<line_sep>self.run_test(model x)<block_end><def_stmt>test_avgpool self<block_start>model=torch.nn.AvgPool1d(2 stride=1)<line_sep>x=torch.randn(20 16 50)<line_sep>self.run_test(model x)<block_end><def_stmt>test_avgpool_1d_ceil self<block_start>model=torch.nn.AvgPool1d(3 2 ceil_mode=<true>)<line_sep>x=torch.randn(1 1 7)<line_sep>self.run_test(model x)<block_end><def_stmt>test_avgpool_2d_ceil self<block_start>model=torch.nn.AvgPool2d(3 2 ceil_mode=<true>)<line_sep>x=torch.randn(20 16 50 32)<line_sep>self.run_test(model x)<block_end><def_stmt>test_avgpool_3d_ceil self<block_start>model=torch.nn.AvgPool3d(3 2 ceil_mode=<true>)<line_sep>x=torch.randn(20 16 50 44 31)<line_sep>self.run_test(model x)<block_end><def_stmt>test_arithmetic self<block_start><class_stmt>ArithmeticModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>x=x+2<line_sep>x=x-4<line_sep>x=x<times>6<line_sep>x=x/8<line_sep><return>x<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(ArithmeticModule() x)<block_end># In scripting the first transpose node do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(<false>)<def_stmt>test_arithmetic_infer_dtype self<block_start><class_stmt>ArithmeticModule(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start>x=x.t()<line_sep>x=x+2<line_sep>x=x-4<line_sep>x=x<times>6<line_sep>x=x/8<line_sep><return>x<block_end><block_end>x=torch.randn(2 3)<line_sep>self.run_test(ArithmeticModule() x)<block_end><def_stmt>test_floor_div self<block_start><class_stmt>FloorDivModule(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>x<floordiv>3 x<floordiv>2. x.to(dtype=torch.float64)<floordiv>3 x.to(dtype=torch.float64)<floordiv>2. x.to(dtype=torch.int64)<floordiv>3 x.to(dtype=torch.int64)<floordiv>2. x<floordiv>(y+1.).to(dtype=torch.int64) x<floordiv>y x.to(dtype=torch.float64)<floordiv>y.to(dtype=torch.int64) x.to(dtype=torch.float64)<floordiv>y.to(dtype=torch.float64) x.to(dtype=torch.int64)<floordiv>y.to(dtype=torch.int64) x.to(dtype=torch.int64)<floordiv>y<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.arange(1 2<times>3<times>4+1).reshape(2 3 4)<line_sep>self.run_test(FloorDivModule() (x y))<block_end><def_stmt>test_floor_div_script self<block_start><class_stmt>FloorDivModule(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x y<block_start><return>x<floordiv>3 x<floordiv>2. x<floordiv>y<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(2 3 4)<line_sep>self.run_test(FloorDivModule() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_floordiv self<block_start><class_stmt>FloordivModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.new_zeros(x.size(2)<floordiv>x.size(1))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(FloordivModule() (x ))<block_end><def_stmt>test_div self<block_start><class_stmt>DivModule(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>x/y torch.true_divide(x y)<block_end><block_end>x=torch.randn(2 3 4).to(torch.int)<line_sep>y=torch.arange(1 2<times>3<times>4+1).reshape(2 3 4).to(torch.int)<line_sep>self.run_test(DivModule() (x y))<line_sep>self.run_test(DivModule() (x.float() y.float()))<block_end># Note: div cannot (generally) be exported via scripting
# since its type promotion logic is dependent on knowing the scalar types
# of the input tensors. That is, the ONNX graph is dependent on the
# data type of the inputs. This makes it appropriate for tracing only.
<def_stmt>test_div_promotion_trace self<block_start><class_stmt>DivModule(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>x/y torch.true_divide(x y)<block_end><block_end>x=torch.randn(2 3 4).to(torch.int)<line_sep>y=torch.arange(1 2<times>3<times>4+1).reshape(2 3 4).to(torch.int)<line_sep>prev_default=torch.get_default_dtype()<line_sep>torch.set_default_dtype(torch.float)<line_sep>self.run_test(torch.jit.trace(DivModule() (x y)) (x y))<line_sep>torch.set_default_dtype(torch.double)<line_sep>self.run_test(torch.jit.trace(DivModule() (x y)) (x y))<line_sep>torch.set_default_dtype(prev_default)<block_end># In scripting x, y do not carry shape and dtype info.
# The following test only works when onnx shape inference is enabled.
@skipIfONNXShapeInference(<false>)<def_stmt>test_div_promotion_script self<block_start><class_stmt>DivModule(torch.nn.Module)<block_start><def_stmt>forward self x y# Add transpose to hide shape/type information
# Otherwise shape and type are still avaiable from input.
<block_start>x=x.transpose(1 2)<line_sep>y=y.transpose(1 2)<line_sep><return>x/y torch.true_divide(x y)<block_end><block_end>x=torch.randn(2 3 4).to(torch.int)<line_sep>y=torch.arange(1 2<times>3<times>4+1).reshape(2 3 4).to(torch.int)<line_sep>prev_default=torch.get_default_dtype()<line_sep># 1. x,y are int, and output is float.
# This can be handled by the default case, where both are cast to float.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.float)<line_sep>self.run_test(torch.jit.script(DivModule()) (x y))<line_sep># 2. x,y are int, and output is double.
# This can be handled by the default case, where both are cast to double.
# It works even if type of x, y are unknown.
torch.set_default_dtype(torch.double)<line_sep>self.run_test(torch.jit.script(DivModule()) (x y))<line_sep># 3. x is int, y is double, and output is double.
# This can only be handled when both type of x and y are known.
torch.set_default_dtype(prev_default)<line_sep>x=torch.randn(2 3 4).to(torch.int)<line_sep>y=torch.arange(1 2<times>3<times>4+1).reshape(2 3 4).to(torch.double)<line_sep>self.run_test(torch.jit.script(DivModule()) (x y))<block_end><def_stmt>test_slice_trace self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x[0:1]<block_end><block_end>x=torch.randn(3)<line_sep>self.run_test(MyModule() x)<block_end><def_stmt>test_slice_neg self<block_start><class_stmt>NegSlice(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x[-1:]<block_end><block_end>x=torch.randn(3 4 5)<line_sep>self.run_test(NegSlice() x)<block_end><def_stmt>test_slice_neg_large self<block_start><class_stmt>NegSlice(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x[: : -3:-1 : -1]<block_end><block_end>x=torch.randn(3 4 5 6 7)<line_sep>self.run_test(NegSlice() x)<block_end><def_stmt>test_slice_neg_large_negone self<block_start><class_stmt>NegSlice(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x[: : : : -1]<block_end><block_end>x=torch.randn(3 4 5 6 7)<line_sep>self.run_test(NegSlice() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_slice_with_input_index self<block_start><class_stmt>InputIndexSlice(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start>x[:y.size(0) 0 :]=y<line_sep><return>x<block_end><block_end>x=torch.zeros((56 6 256))<line_sep>y=torch.rand((22 256))<line_sep>self.run_test(InputIndexSlice() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(10)@disableScriptTest()# scripting tuple/list append
<def_stmt>test_slice_dynamic self<block_start><class_stmt>DynamicSliceExportMod(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>results=[]<for_stmt>i range(4)<block_start>results.append(x[:x.size(0)-i i:x.size(2) i:3])<block_end><return>tuple(results)<block_end><block_end>x=torch.rand(5 5 5)<line_sep>y=torch.randn(6 7 8)<line_sep>self.run_test(DynamicSliceExportMod() x test_with_inputs=[y] input_names=['input_1'] output_names=['output_1'] dynamic_axes={'input_1':[0 1 2] 'output_1':[0 1 2]})<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_slice_dynamic_script self<block_start><class_stmt>DynamicSliceModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>x[1:x.size(1)]<block_end><block_end>x=torch.rand(1 2)<line_sep>self.run_test(DynamicSliceModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_slice_dynamic_shape_script self<block_start><class_stmt>DynamicSliceModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.new_zeros(x.shape[1:x.size(2)])<block_end><block_end>x=torch.rand(1 2 3 4)<line_sep>self.run_test(DynamicSliceModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(10)@disableScriptTest()# scripting tuple/list append
<def_stmt>test_slice_dynamic_to_end self<block_start><class_stmt>DynamicSliceExportMod(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>results=[]<for_stmt>i range(4)<block_start>results.append(x[: i: x.size(2)-5])<block_end><return>tuple(results)<block_end><block_end>x=torch.rand(5 5 5)<line_sep>self.run_test(DynamicSliceExportMod() x dynamic_axes={'input_1':[0 1 2] 'output_1':[0 1 2]})<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_arange_dynamic self<block_start><class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.arange(input.shape[0]) torch.arange(12) torch.arange(start=input.shape[0] end=input.shape[0]+5)<block_end><block_end>x=torch.randn(5 3 2)<line_sep>y=torch.randn(8 3 2)<line_sep>self.run_test(ArangeModel() x test_with_inputs=[y] input_names=['input_1'] output_names=['output_1' 'output_2' 'output_3'] dynamic_axes={'input_1':[0] 'output_1':[0]})<line_sep>self.run_test(torch.jit.script(ArangeModel()) x test_with_inputs=[y] input_names=['input_1'] output_names=['output_1' 'output_2' 'output_3'] dynamic_axes={'input_1':[0] 'output_1':[0]})<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_dynamic_arange_out self<block_start><class_stmt>ArangeOutModel(torch.nn.Module)<block_start><def_stmt>forward self end<block_start>out_t=torch.tensor([1] dtype=torch.int64)<line_sep><return>torch.arange(end out=out_t)<block_end><block_end>x=torch.tensor(8)<line_sep>self.run_test(ArangeOutModel() (x))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_dynamic_arange_start_out self<block_start><class_stmt>ArangeStartOutModel(torch.nn.Module)<block_start><def_stmt>forward self start end<block_start>out_t=torch.tensor([1] dtype=torch.int64)<line_sep><return>torch.arange(start.size(0) end out=out_t)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.tensor(8)<line_sep>self.run_test(ArangeStartOutModel() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange self<block_start><class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self start end<block_start><return>torch.arange(start.size(0) end 1.5 dtype=torch.int64)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.tensor(8.5 dtype=torch.float)<line_sep>self.run_test(ArangeModel() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange_out self<block_start><class_stmt>ArangeOutModel(torch.nn.Module)<block_start><def_stmt>forward self end<block_start>out_t=torch.tensor([1] dtype=torch.float)<line_sep><return>torch.arange(end out=out_t)<block_end><block_end>x=torch.tensor(8.5 dtype=torch.float)<line_sep>self.run_test(ArangeOutModel() (x))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange_start_out self<block_start><class_stmt>ArangeStartOutModel(torch.nn.Module)<block_start><def_stmt>forward self start end<block_start>out_t=torch.tensor([1] dtype=torch.float)<line_sep><return>torch.arange(start.size(0) end out=out_t)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.tensor(8.5 dtype=torch.float)<line_sep>self.run_test(ArangeStartOutModel() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange_no_type self<block_start><class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self end<block_start><return>torch.arange(end) torch.arange(0 end)<block_end><block_end>x=torch.tensor(6.2 dtype=torch.float)<line_sep>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_size self<block_start><class_stmt>SizeModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.arange(input.size(0)) torch.arange(input.size(-1)) torch.ones(input.shape)<block_end><block_end>x=torch.randn(5 3 2)<line_sep>self.run_test(SizeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# x.stride() not scriptable
<def_stmt>test_as_strided self<block_start><class_stmt>Model(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>chunk_size=list(x.size())<line_sep>chunk_size[1]=chunk_size[1]<times>2-1<line_sep>chunk_stride=list(x.stride())<line_sep>chunk_stride[1]=chunk_stride[1]<floordiv>2<line_sep><return>x.as_strided((3 3 3) (1 4 2) storage_offset=2) x.as_strided(chunk_size chunk_stride)<block_end><block_end>x=torch.randn(5 8 7)<line_sep>self.run_test(Model() x)<block_end>@disableScriptTest()# Ellipses followed by tensor indexing not scriptable
<def_stmt>test_tensor_index_advanced_indexing_ellipsis self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[<ellipsis> torch.tensor([2 1]) torch.tensor([0 3])]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() (m1 ))<block_end><def_stmt>test_tensor_index_advanced_indexing self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[: torch.tensor([[0 2] [1 1]]) : torch.tensor([2 1]) torch.tensor([0 3])]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() (m1 ))<class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[: torch.tensor([0 2]) <none> 2:4 torch.tensor([[1 3] [4 0]])]<block_end><block_end>self.run_test(MyModel() (m1 ))<class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[: torch.tensor([0 2]) torch.tensor([1]) 2:4 torch.tensor([[1] [4]])]<block_end><block_end>self.run_test(MyModel() (m1 ))<block_end><def_stmt>test_tensor_index_advanced_indexing_consecutive self<block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input[: torch.tensor([0 2]) torch.tensor([[1 3] [4 0]]) <none>]<block_end><block_end>m1=torch.randn(3 4 5 6 7)<line_sep>self.run_test(MyModel() (m1 ))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_index_put self<block_start><class_stmt>IndexPutModel(torch.nn.Module)<block_start><def_stmt>forward self x ind update<block_start>x[ind]=update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4)<line_sep>ind=torch.tensor([1] dtype=torch.long)<line_sep>update=torch.ones(4)<line_sep>self.run_test(IndexPutModel() (x ind update))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_index_put_accumulate self<block_start><class_stmt>IndexPutModel(torch.nn.Module)<block_start><def_stmt>forward self x ind update<block_start><return>x.index_put((ind ) update accumulate=<true>)<block_end><block_end>x=torch.randn(3 4)<line_sep>ind=torch.tensor([2] dtype=torch.long)<line_sep>update=torch.ones(4)<line_sep>self.run_test(IndexPutModel() (x ind update))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_index_put_slice_index self<block_start><class_stmt>IndexPutModel(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[1:2 1:3 torch.tensor([1])]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.tensor([10 15]).view(1 2 1)<line_sep>self.run_test(IndexPutModel() (x update))<class_stmt>IndexPutModel2(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[torch.tensor([0 2]) torch.tensor([1 2])]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.randn(2 5)<line_sep>self.run_test(IndexPutModel2() (x update))<class_stmt>IndexPutModel3(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[torch.tensor([0 2]) 1:2]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.tensor([10 15]).view(2 1 1)<line_sep>self.run_test(IndexPutModel3() (x update))<class_stmt>IndexPutModel4(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[torch.tensor([0 2]) 2]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.tensor([10 15]).view(2 1)<line_sep>self.run_test(IndexPutModel4() (x update))<class_stmt>IndexPutModel5(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[1:3 torch.tensor([0 2]) 2]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.tensor([10 15]).view(2 1)<line_sep>self.run_test(IndexPutModel5() (x update))<class_stmt>IndexPutModel6(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[1:3 0]=update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.arange(2<times>5).to(torch.float).view(2 5)<line_sep>self.run_test(IndexPutModel6() (x update))<class_stmt>IndexPutModel7(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[1: 0]=update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.arange(2<times>5).to(torch.float).view(2 5)<line_sep>self.run_test(IndexPutModel7() (x update))<class_stmt>IndexPutModel8(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[:3 0]=update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5)<line_sep>update=torch.arange(3<times>5).to(torch.float).view(3 5)<line_sep>self.run_test(IndexPutModel8() (x update))<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()# Ellipses followed by tensor indexing not scriptable
<def_stmt>test_index_put_ellipsis self<block_start><class_stmt>IndexPutModel(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[<ellipsis> torch.tensor([2 1 3]) 2:4]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5 6 7)<line_sep>update=torch.randn(3 1 1 3 2)<line_sep>self.run_test(IndexPutModel() (x update))<class_stmt>IndexPutModel2(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[2 <ellipsis> torch.tensor([2 1 3]) 2:4]<augadd>update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5 6 7)<line_sep>update=torch.randn(4 1 3 2)<line_sep>self.run_test(IndexPutModel2() (x update))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_copy_ self<block_start><class_stmt>CopyModel(torch.nn.Module)<block_start><def_stmt>forward self x data<block_start>x[1:3]=data<line_sep><return>x<block_end><block_end>x=torch.randn(3 4)<line_sep>update=torch.randn(2 4)<line_sep>self.run_test(CopyModel() (x update))<line_sep># mixed slice and select
<class_stmt>CopyModel2(torch.nn.Module)<block_start><def_stmt>forward self x data<block_start>x[1:3 0]=data<line_sep><return>x<block_end><block_end>x=torch.randn(3 4)<line_sep>update=torch.tensor([0] dtype=torch.float32)<line_sep>self.run_test(CopyModel2() (x update))<line_sep>update=torch.tensor([2 3] dtype=torch.float32)<line_sep>self.run_test(CopyModel2() (x update))<line_sep>update=torch.randn(2)<line_sep>self.run_test(CopyModel2() (x update))<class_stmt>CopyModel3(torch.nn.Module)<block_start><def_stmt>forward self x data<block_start>x[1 1:3]=data<line_sep><return>x<block_end><block_end>x=torch.randn(3 4)<line_sep>update=torch.tensor([0] dtype=torch.float32)<line_sep>self.run_test(CopyModel3() (x update))<line_sep>update=torch.tensor([2 3] dtype=torch.float32)<line_sep>self.run_test(CopyModel3() (x update))<line_sep>update=torch.randn(2)<line_sep>self.run_test(CopyModel3() (x update))<class_stmt>CopyModel4(torch.nn.Module)<block_start><def_stmt>forward self x ind data<block_start>x[ind]=data<line_sep><return>x<block_end><block_end>x=torch.randn(3 4)<line_sep>ind=torch.tensor(2)<line_sep>data=torch.randn(4)<line_sep>self.run_test(CopyModel4() (x ind data))<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()# Model not scriptable (output with shape doesn't match the broadcast shape)
<def_stmt>test_copy_tracing self<block_start><class_stmt>CopyModel(torch.nn.Module)<block_start><def_stmt>forward self x data<block_start>x[1 1:3]=data<line_sep><return>x<block_end><block_end>x=torch.randn(3 4)<line_sep>update=torch.randn(1 2)<line_sep>self.run_test(CopyModel() (x update))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_copy_ellipsis self<block_start><class_stmt>CopyModel(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[<ellipsis> 1]=update<line_sep><return>x<block_end><block_end>x=torch.randn(2 3 4)<line_sep>update=torch.ones(1)<line_sep>self.run_test(CopyModel() (x update))<line_sep>x=torch.randn(2 3 4 5 6)<line_sep>update=torch.ones(1)<line_sep>self.run_test(CopyModel() (x update))<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()# Missing input size (with ellipsis indexing)
<def_stmt>test_copy_ellipsis_tracing self<block_start><class_stmt>CopyModel(torch.nn.Module)<block_start><def_stmt>forward self x update<block_start>x[2 <ellipsis> 1:3]=update<line_sep><return>x<block_end><block_end>x=torch.randn(3 4 5 6)<line_sep>update=torch.ones(1)<line_sep>self.run_test(CopyModel() (x update))<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_flip self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.flip(x dims=[0])<block_end><block_end>x=torch.tensor(np.arange(6.0).reshape(2 3))<line_sep>self.run_test(MyModule() x)<block_end><def_stmt>test_random self<block_start><class_stmt>RandN(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x (torch.randn(2 3 4)+x).size(0))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(RandN() x)<class_stmt>Rand(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x (torch.rand(2 3 4)+x).size(0))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Rand() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# symbolic update for randn
<def_stmt>test_random_dynamic_size self<block_start><class_stmt>RandN(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x torch.randn(x.size()).size(1))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(RandN() x)<class_stmt>Rand(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x torch.rand(x.size()).size(1))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Rand() x)<block_end><def_stmt>test_random_like self<block_start><class_stmt>RandNLike(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x torch.randn_like(x).size(0))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(RandNLike() x)<line_sep>self.run_test(torch.jit.script(RandNLike()) x)<class_stmt>RandLike(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x torch.rand_like(x).size(0))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(RandLike() x)<line_sep>self.run_test(torch.jit.script(RandLike()) x)<block_end><def_stmt>test_random_like_dtype self<block_start><class_stmt>RandNLike(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x.to(torch.double) torch.randn_like(x dtype=torch.double).size(0))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(RandNLike() x)<class_stmt>RandLike(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mul(x.to(torch.double) torch.rand_like(x dtype=torch.double).size(0))<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(RandLike() x)<block_end><def_stmt>_interpolate self x mode use_size is_upsample align_corners=<false><block_start><class_stmt>MyModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>scale=2.3<if>is_upsample<else>0.5<if_stmt>len(x.size())<eq>3<block_start>scale_array=2.3<block_end><if_stmt>len(x.size())<eq>4<block_start>scale_array=[2.3 5.1]<block_end><if_stmt>len(x.size())<eq>5<block_start>scale_array=[3.3 2.3 5.1]<block_end><if_stmt>use_size<block_start>size_array=[int(float(v)<times>scale)<for>v x.size()[2:]]<if_stmt>align_corners<block_start><return>torch.nn.functional.interpolate(x mode=mode size=size_array[0] align_corners=<true>) torch.nn.functional.interpolate(x mode=mode size=size_array align_corners=<true>)<block_end><return>torch.nn.functional.interpolate(x mode=mode size=size_array[0]) torch.nn.functional.interpolate(x mode=mode size=size_array)<block_end><if_stmt>align_corners<block_start><return>torch.nn.functional.interpolate(x mode=mode scale_factor=scale align_corners=<true> recompute_scale_factor=<false>) torch.nn.functional.interpolate(x mode=mode scale_factor=scale_array align_corners=<true> recompute_scale_factor=<false>)<block_end><return>torch.nn.functional.interpolate(x mode=mode scale_factor=scale recompute_scale_factor=<false>) torch.nn.functional.interpolate(x mode=mode scale_factor=scale_array recompute_scale_factor=<false>)<block_end><block_end>self.run_test(MyModel() x)<block_end><def_stmt>_interpolate_script self x mode use_size is_upsample align_corners=<false><block_start><class_stmt>MyModel(torch.jit.ScriptModule)<block_start>__constants__=['mode' 'use_size' 'is_upsample' 'size' 'scale' 'size_array' 'scale_array' 'align_corners']<def_stmt>__init__ self mode use_size is_upsample align_corners<block_start>super(MyModel self).__init__()<line_sep>self.mode=mode<line_sep>self.use_size=use_size<line_sep>self.is_upsample=is_upsample<line_sep>self.align_corners=align_corners<line_sep>self.scale=2.0<if>self.is_upsample<else>0.5<line_sep>self.size=24<if>self.is_upsample<else>2<if_stmt>x.dim()<eq>3<block_start>self.scale_array=[2.3]<line_sep>self.size_array=[16]<block_end><elif_stmt>x.dim()<eq>4<block_start>self.scale_array=[2.3 3.1]<line_sep>self.size_array=[16 32]<block_end><else_stmt><block_start>self.scale_array=[2.3 3.1 4.6]<line_sep>self.size_array=[16 32 64]<block_end><block_end>@torch.jit.script_method<def_stmt>forward self x<block_start><if_stmt>self.use_size<block_start><if_stmt>self.align_corners<block_start><return>torch.nn.functional.interpolate(x mode=self.mode size=self.size align_corners=<true>) torch.nn.functional.interpolate(x mode=self.mode size=self.size_array align_corners=<true>)<block_end><return>torch.nn.functional.interpolate(x mode=self.mode size=self.size) torch.nn.functional.interpolate(x mode=self.mode size=self.size_array)<block_end><if_stmt>self.align_corners<block_start><return>torch.nn.functional.interpolate(x mode=self.mode scale_factor=self.scale recompute_scale_factor=<false>) torch.nn.functional.interpolate(x mode=self.mode scale_factor=self.scale_array recompute_scale_factor=<false>)<block_end><return>torch.nn.functional.interpolate(x mode=self.mode scale_factor=self.scale recompute_scale_factor=<false>) torch.nn.functional.interpolate(x mode=self.mode scale_factor=self.scale_array recompute_scale_factor=<false>)<block_end><block_end>model=MyModel(mode use_size is_upsample align_corners)<line_sep>self.run_test(model x atol=1e-6)<block_end><def_stmt>_interpolate_tests self is_upsample# - cubic mode is not supported for opsets below 11;
# - linear mode does not match for opsets below 11;
<block_start>modes=["nearest" "linear" "bicubic"]<if_stmt>self.opset_version<l>11<block_start>modes=["nearest"]<block_end>x=[torch.randn(1 2 6 requires_grad=<true>) torch.randn(1 2 4 6 requires_grad=<true>) torch.randn(1 2 4 4 6 requires_grad=<true>)]<for_stmt>mode modes<block_start><for_stmt>xi x<block_start>mode_i=mode<line_sep># TODO: enable bicubic downsample when ORT precision loss fixed
<if_stmt>mode<eq>"bicubic"<and>xi.dim()<ne>4<block_start><continue><block_end><elif_stmt>mode<eq>"linear"<block_start><if_stmt>xi.dim()<eq>3# TODO : enable when linear mode is implemented for 1d inputs in ORT
<block_start><continue><block_end><elif_stmt>xi.dim()<eq>4<block_start>mode_i="bilinear"<block_end><elif_stmt>xi.dim()<eq>5# TODO : enable when linear mode is implemented for 3d inputs in ORT
<block_start>mode_i="trilinear"<line_sep><continue><block_end><block_end>self._interpolate(xi mode_i <true> is_upsample)<line_sep># test with align_corners if supported
<if_stmt>mode<ne>'nearest'<block_start>self._interpolate(xi mode_i <true> is_upsample <true>)<line_sep>self._interpolate_script(xi mode_i <true> is_upsample <true>)<block_end># the following cases, require dynamic sizes/scales,
# which which is not supported for opset_version < 9
<if_stmt>self.opset_version<ge>9<block_start>self._interpolate_script(xi mode_i <true> is_upsample)<line_sep>self._interpolate(xi mode_i <false> is_upsample)<line_sep># test with align_corners if supported
<if_stmt>mode<ne>'nearest'<block_start>self._interpolate(xi mode_i <false> is_upsample <true>)<line_sep>self._interpolate_script(xi mode_i <false> is_upsample <true>)<block_end>self._interpolate_script(xi mode_i <false> is_upsample)<block_end><block_end><block_end><block_end>@disableScriptTest()<def_stmt>test_interpolate_upsample self<block_start>self._interpolate_tests(<true>)<block_end>@disableScriptTest()@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_interpolate_function_substitution self<block_start><class_stmt>ScriptModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.nn.functional.interpolate(x mode="nearest" scale_factor=2.)<block_end><block_end><class_stmt>ScriptModule(torch.jit.ScriptModule)<block_start><def_stmt>__init__ self<block_start>super(ScriptModule self).__init__()<line_sep>self.submodule=ScriptModel()<block_end>@torch.jit.script_method<def_stmt>forward self input<block_start><return>self.submodule(input)<block_end><block_end>x=torch.randn(1 2 4 4 6)<line_sep>self.run_test(ScriptModule() (x ))<line_sep>@torch.jit.script<def_stmt>script_method x<block_start><return>torch.nn.functional.interpolate(x mode="nearest" scale_factor=2.)<block_end><class_stmt>TracingModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>script_method(x)<block_end><block_end>self.run_test(TracingModule() (x ))<block_end>@skipIfUnsupportedMinOpsetVersion(10)@disableScriptTest()<def_stmt>test_interpolate_downsample self<block_start>self._interpolate_tests(<false>)<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()<def_stmt>test_interpolate_no_shape self<block_start><class_stmt>MyModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x y<block_start>x=torch.add(x x)<line_sep>out1=torch.nn.functional.interpolate(x mode="bilinear" size=(16 16) align_corners=<false>)<line_sep>out2=torch.nn.functional.interpolate(x mode="nearest" size=(int(y.size(0)) int(y.size(1))))<line_sep><return>out1 out2<block_end><block_end>x=torch.randn(1 2 4 4 requires_grad=<true>)<line_sep>y=torch.randn(16 16 requires_grad=<true>)<line_sep>self.run_test(MyModel() (x y))<block_end><def_stmt>test_interpolate_adaptive_pooling_error self<block_start>x=torch.randn(1 2 6 requires_grad=<true>)<with_stmt>self.assertRaises(RuntimeError)<as>cm<block_start>self._interpolate(x "area" <true> <true>)<block_end><with_stmt>self.assertRaises(RuntimeError)<as>cm<block_start>self._interpolate(x "area" <false> <true>)<block_end><block_end><def_stmt>test_groupnorm self<block_start>model=torch.nn.GroupNorm(3 6 0.002)<line_sep>x=torch.randn(4 6 180 180 180)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.GroupNorm(1 6 0.002)<line_sep>x=torch.randn(4 6 180 180)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.GroupNorm(6 6 0.002)<line_sep>x=torch.randn(4 6 180 180)<line_sep>self.run_test(model x)<block_end>@disableScriptTest()<def_stmt>test_groupnorm_noaffine self<block_start>model=torch.nn.GroupNorm(4 8 0.002 affine=<false>)<line_sep>x=torch.randn(3 8 224 224)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.GroupNorm(1 6 0.002 affine=<false>)<line_sep>x=torch.randn(4 6 180 180)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.GroupNorm(6 6 0.002 affine=<false>)<line_sep>x=torch.randn(4 6 180 180)<line_sep>self.run_test(model x)<block_end><def_stmt>test_std self<block_start><class_stmt>StandardDeviation(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.std(input unbiased=<false>)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>model=StandardDeviation()<line_sep>self.run_test(model x)<block_end><def_stmt>test_pow self<block_start><class_stmt>PowModule(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>x.pow(y)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(2 3 4)<line_sep>self.run_test(PowModule() (x y))<line_sep>x=torch.randint(10 (2 3 4))<line_sep>y=torch.randint(10 (2 3 4)).to(dtype=torch.int32)<line_sep>self.run_test(PowModule() (x y))<line_sep>x=torch.randint(10 (2 3 4))<line_sep>y=torch.randint(10 (2 3 4))<line_sep>self.run_test(PowModule() (x y))<line_sep>x=torch.randn(2 3 4).to(dtype=torch.float64)<line_sep>y=torch.randint(10 (2 3 4))<line_sep>self.run_test(PowModule() (x y))<block_end><def_stmt>test_std_along_dims self<block_start><class_stmt>StandardDeviation(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.std(input dim=(0 1) unbiased=<false>)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>model=StandardDeviation()<line_sep>self.run_test(model x)<block_end><def_stmt>test_std_keepdim self<block_start><class_stmt>StandardDeviation(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.std(input dim=(0 1) unbiased=<false> keepdim=<true>)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>model=StandardDeviation()<line_sep>self.run_test(model x)<block_end><def_stmt>test_bitshift self<block_start><class_stmt>BitshiftModel(torch.nn.Module)<block_start><def_stmt>forward self input input2<block_start><return>input<rshift>1 input<lshift>3.1 input2<rshift>torch.tensor([1 2]) input2<lshift>4.2<block_end><block_end>input=torch.arange(24 dtype=torch.float32).reshape(3 4 2)<line_sep>input2=torch.arange(24 dtype=torch.int64).reshape(3 4 2)<line_sep>self.run_test(BitshiftModel() (input input2))<block_end><def_stmt>test_bitshift_other_fp self<block_start><class_stmt>BitshiftModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input<lshift>2.4<block_end><block_end>input=torch.arange(24 dtype=torch.int64).reshape(3 4 2)<line_sep>self.run_test(BitshiftModel() input)<block_end># uint8 not implemented in ORT for Mul used in
# exporting bitshift for opset_version < 10
@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_bitshift_uint8 self<block_start><class_stmt>BitshiftModel(torch.nn.Module)<block_start><def_stmt>forward self input input2<block_start><return>input<rshift>1 input<lshift>3. input2<rshift>torch.tensor([1 2] dtype=torch.uint8) input2<lshift>4.<block_end><block_end>input=torch.arange(24 dtype=torch.uint8).reshape(3 4 2)<line_sep>input2=torch.arange(24 dtype=torch.uint8).reshape(3 4 2)<line_sep>self.run_test(BitshiftModel() (input input2))<block_end><def_stmt>test_narrow self<block_start><class_stmt>NarrowModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.narrow(input 0 0 2)<block_end><block_end>x=torch.randn(3 3 requires_grad=<true>)<line_sep>self.run_test(NarrowModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_narrow_dynamic self<block_start><class_stmt>NarrowModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.narrow(input 0 0 input.shape[0]-1)<block_end><block_end>x=torch.randn(3 3 requires_grad=<true>)<line_sep>self.run_test(NarrowModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_index_fill self<block_start><class_stmt>IndexFillModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>index=torch.tensor([2 0])<line_sep><return>input.index_fill(2 index -1)<block_end><block_end>x=torch.randn(3 4 5 requires_grad=<true>)<line_sep>self.run_test(IndexFillModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_index_copy self<block_start><class_stmt>IndexCopyModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>index=torch.tensor([2 0])<line_sep>source=torch.ones(3 2 5)<line_sep><return>input.index_copy(1 index source)<block_end><block_end>x=torch.randn(3 4 5 requires_grad=<true>)<line_sep>self.run_test(IndexCopyModel() x)<block_end><def_stmt>test_select self<block_start><class_stmt>Select(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x[: 1]<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(Select() x)<block_end><def_stmt>test_select_negative_index self<block_start><class_stmt>Select(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x[: -1]<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(Select() x)<block_end># TODO: enable for opset 10 when ONNXRuntime version will be updated
<def_stmt>test_index_select_constant_scaler_index self<block_start><class_stmt>IndexSelectScalerIndexModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>index=2<line_sep><return>torch.index_select(x 1 torch.tensor(index))<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(IndexSelectScalerIndexModel() x)<block_end><def_stmt>test_index_select_scaler_index self<block_start><class_stmt>IndexSelectScalerIndexModel(torch.nn.Module)<block_start><def_stmt>__init__ self index_base<block_start>super(IndexSelectScalerIndexModel self).__init__()<line_sep>self.index_base=torch.tensor(index_base)<block_end><def_stmt>forward self x index_offset<block_start>index=self.index_base+index_offset<line_sep><return>torch.index_select(x 1 index)<block_end><block_end>x=torch.randn(3 4)<line_sep>offset=2<line_sep>index_offset=torch.tensor(offset)<line_sep>base=1<line_sep>self.run_test(IndexSelectScalerIndexModel(base) (x index_offset))<block_end><def_stmt>test_take self<block_start><class_stmt>TakeModel(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>torch.take(x y)<block_end><block_end>x=torch.randn(6 4 3 3)<line_sep>y=torch.tensor([4 1 7 15 63])<line_sep>self.run_test(TakeModel() (x y))<block_end><def_stmt>test_topk self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.topk(x 3)<block_end><block_end>x=torch.arange(1. 6. requires_grad=<true>)<line_sep>self.run_test(MyModule() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_topk_smallest_unsorted self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self x k# When sorted=False, order of elements in the outout tensors
# are not expected to match between PyTorch and ORT
<block_start>topk_unsorted=torch.topk(x k largest=<false> sorted=<false>)<line_sep>topk_sorted=torch.topk(x k largest=<false> sorted=<true>)<line_sep><return>topk_sorted torch.sort(topk_unsorted.values).values<block_end><block_end>x=torch.arange(1. 6. requires_grad=<true>)<line_sep>k=torch.tensor(3)<line_sep>self.run_test(MyModule() (x k))<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_topk_script self<block_start><class_stmt>MyModuleDynamic(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x k<block_start><return>torch.topk(x k)<block_end><block_end>x=torch.arange(1. 6. requires_grad=<true>)<line_sep>k=torch.tensor(3)<line_sep>self.run_test(MyModuleDynamic() [x k])<block_end>@skipIfUnsupportedOpsetVersion([7])<def_stmt>test_normalize self<block_start><class_stmt>Model(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.nn.functional.normalize(x)<block_end><block_end>x=torch.randn(3 3)<line_sep>self.run_test(Model() x)<block_end><def_stmt>test_layer_norm self<block_start>model=torch.nn.LayerNorm([10 10])<line_sep>x=torch.randn(20 5 10 10)<line_sep>self.run_test(model x)<block_end><def_stmt>test_batchnorm1d self<block_start>x=torch.randn(10 10)<line_sep>model=torch.nn.BatchNorm1d(10 affine=<true>)<line_sep>self.run_test(model x)<line_sep>x=torch.randn(10 10 128)<line_sep>self.run_test(model x)<block_end><def_stmt>test_batchnorm1d_noaffine self<block_start>x=torch.randn(10 10)<line_sep>model=torch.nn.BatchNorm1d(10 affine=<false>)<line_sep>self.run_test(model x)<line_sep>x=torch.randn(10 10 128)<line_sep>self.run_test(model x)<block_end><def_stmt>test_batchnorm2d self<block_start>x=torch.randn(10 3 128 128)<line_sep>model=torch.nn.BatchNorm2d(3 affine=<true>)<line_sep>self.run_test(model x)<block_end><def_stmt>test_batchnorm2d_noaffine self<block_start>x=torch.randn(10 3 128 128)<line_sep>model=torch.nn.BatchNorm2d(3 affine=<false>)<line_sep>self.run_test(model x)<block_end><def_stmt>test_batchnorm3d self<block_start>x=torch.randn(10 3 128 128 128)<line_sep>model=torch.nn.BatchNorm3d(3 affine=<true>)<line_sep>self.run_test(model x)<block_end><def_stmt>test_batchnorm3d_noaffine self<block_start>x=torch.randn(10 3 128 128 128)<line_sep>model=torch.nn.BatchNorm3d(3 affine=<false>)<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_scatter_with_scalar self<block_start><class_stmt>ScatterModel(torch.nn.Module)<block_start><def_stmt>forward self input indices<block_start>values=1.0<line_sep><return>input.scatter(1 indices values)<block_end><block_end>input=torch.tensor([[0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] dtype=torch.float64)<line_sep>indices=torch.tensor([[1 0] [0 1] [0 1]] dtype=torch.int64)<line_sep>self.run_test(ScatterModel() input=(input indices))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_scatter_with_scalar_different_types self# Tests the case when scalar src (updates values) type is different
# from self type. Happens only with scalar src - PyTorch does not
# allow this when src is a tensor.
<block_start><class_stmt>ScatterModel(torch.nn.Module)<block_start><def_stmt>forward self input indices<block_start>values=1.0<line_sep><return>input.scatter(1 indices values)<block_end><block_end>input=torch.tensor([[0. 0. 0.] [0. 0. 0.] [0. 0. 0.]] dtype=torch.float32)<line_sep>indices=torch.tensor([[1 0] [0 1] [0 1]] dtype=torch.int64)<line_sep>self.run_test(ScatterModel() input=(input indices))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_scatter self<block_start><class_stmt>ScatterModel(torch.nn.Module)<block_start><def_stmt>forward self input indices values<block_start><return>input.scatter(1 indices values)<block_end><block_end>input=torch.tensor([[0. 0. 0.] [0. 0. 0.] [0. 0. 0.]])<line_sep>indices=torch.tensor([[1 0] [0 1] [0 1]] dtype=torch.int64)<line_sep>values=torch.tensor([[1.0 1.1] [2.0 2.1] [3.0 3.1]])<line_sep>self.run_test(ScatterModel() input=(input indices values))<line_sep>input=torch.tensor([[0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0]])<line_sep>indices=torch.tensor([[1 0] [0 2] [0 1]] dtype=torch.int64)<line_sep>values=torch.tensor([[1.0 1.1] [2.0 2.1] [3.0 3.1]])<line_sep>self.run_test(ScatterModel() (input indices values))<line_sep>input=torch.zeros(3 4 5 6)<line_sep>indices=torch.tensor([[1 0] [0 2] [0 1]] dtype=torch.int64)<line_sep>indices=indices.view(3 2 1 1).expand(3 2 5 6)<line_sep>values=torch.arange(3<times>2<times>5<times>6 dtype=torch.float32).view(3 2 5 6)<line_sep>self.run_test(ScatterModel() (input indices values))<line_sep>input=torch.zeros(3 4 2)<line_sep>indices=torch.tensor([[[1 0] [0 2]] [[1 1] [0 1]] [[2 1] [2 2]]])<line_sep>values=torch.arange(3<times>2<times>2 dtype=torch.float32).view(3 2 2)<line_sep>self.run_test(ScatterModel() (input indices values))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_scatter_add self<block_start><class_stmt>ScatterModel(torch.nn.Module)<block_start><def_stmt>forward self input indices values<block_start><return>input.scatter_add(1 indices values)<block_end><block_end>input=torch.tensor([[0.0 0.0 0.0] [0.0 0.0 0.0] [0.0 0.0 0.0]])<line_sep>indices=torch.tensor([[1 0] [0 1] [0 1]] dtype=torch.int64)<line_sep>values=torch.tensor([[1.0 1.1] [2.0 2.1] [3.0 3.1]])<line_sep>self.run_test(ScatterModel() input=(input indices values))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_one_hot self<block_start><class_stmt>OneHot(torch.nn.Module)<block_start><def_stmt>__init__ self num_classes<block_start>super().__init__()<line_sep>self.num_classes=num_classes<block_end><def_stmt>forward self x<block_start><return>torch.nn.functional.one_hot(x self.num_classes)<block_end><block_end>x=torch.arange(10)<line_sep>self.run_test(OneHot(15) (x))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_gather self<block_start><class_stmt>GatherModel(torch.nn.Module)<block_start><def_stmt>forward self input indices<block_start><return>input.gather(1 indices)<block_end><block_end>input=torch.tensor([[1. 2. 3.] [4. 5. 6.] [7. 8. 9.]])<line_sep>indices=torch.tensor([[1 0] [0 1] [0 1]] dtype=torch.int64)<line_sep>self.run_test(GatherModel() input=(input indices))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_expand self<block_start><class_stmt>ExpandModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input.expand(2 3 -1)<block_end><block_end>input=torch.randn(2 1 4)<line_sep>self.run_test(ExpandModel() input=(input))<class_stmt>ExpandInferDimModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input.expand(-1 input.size(0))<block_end><block_end>input=torch.randn(3 1)<line_sep>self.run_test(ExpandInferDimModel() input=(input))<class_stmt>ExpandTensorSizeModel(torch.nn.Module)<block_start><def_stmt>forward self input size<block_start><return>input.expand(size)<block_end><block_end>input=torch.randn(3 )<line_sep>size=torch.tensor(-1)<line_sep>self.run_test(ExpandTensorSizeModel() input=(input size))<block_end><def_stmt>test_multinomial self<block_start><class_stmt>Multinomial(torch.nn.Module)<block_start><def_stmt>forward self weight<block_start><return>torch.multinomial(weight 3 replacement=<true>)<block_end><block_end><class_stmt>MultinomialNoReplacement(torch.nn.Module)<block_start><def_stmt>forward self weight<block_start><return>torch.multinomial(weight 1)<block_end><block_end>weight=torch.tensor([[0 10 0 0] [0 0 100 0]] dtype=torch.float)<line_sep>self.run_test(Multinomial() (weight ))<line_sep>self.run_test(MultinomialNoReplacement() (weight ))<block_end><def_stmt>_test_reduced_ops self op<block_start><class_stmt>ReducedOpModule(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>op(input dim=-1)<block_end><block_end><if_stmt>op<ne>torch.mean# torch.mean only supports float types
<block_start>x=torch.randint(10 (4 4) dtype=torch.uint8)<line_sep>self.run_test(ReducedOpModule() x)<line_sep>x=torch.randint(10 (4 4) dtype=torch.int8)<line_sep>self.run_test(ReducedOpModule() x)<line_sep>x=torch.randint(10 (4 4) dtype=torch.int16)<line_sep>self.run_test(ReducedOpModule() x)<line_sep>x=torch.randint(10 (4 4) dtype=torch.int32)<line_sep>self.run_test(ReducedOpModule() x)<line_sep>x=torch.randint(10 (4 4) dtype=torch.int64)<line_sep>self.run_test(ReducedOpModule() x)<block_end># torch.mean only supports float types
# ORT does not support double ReduceProd for double
<if_stmt>op<ne>torch.prod<and>op<ne>torch.mean<block_start>x=torch.randn(4 5 dtype=torch.double)<line_sep>self.run_test(ReducedOpModule() x)<block_end><if_stmt>op<ne>torch.prod# torch.prod not implemented for Half
<block_start>x=torch.randn(4 4 dtype=torch.half)<line_sep>self.run_test(ReducedOpModule() x)<block_end>x=torch.randn(4 5 dtype=torch.float)<line_sep>self.run_test(ReducedOpModule() x)<block_end><def_stmt>test_reduced_sum self<block_start><return>self._test_reduced_ops(op=torch.sum)<block_end><def_stmt>test_reduced_mean self<block_start><return>self._test_reduced_ops(op=torch.mean)<block_end><def_stmt>test_reduced_prod self<block_start><return>self._test_reduced_ops(op=torch.prod)<block_end><def_stmt>test_reduced_min_max self<block_start><class_stmt>ReducedMinMaxModule(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.min(input dim=-1)[0] torch.max(input dim=0)[0]<block_end><block_end>x=torch.randint(10 (4 4) dtype=torch.int32)<line_sep>self.run_test(ReducedMinMaxModule() x)<line_sep>x=torch.randint(10 (4 4) dtype=torch.int64)<line_sep>self.run_test(ReducedMinMaxModule() x)<line_sep>x=torch.randn(4 5 dtype=torch.float)<line_sep>self.run_test(ReducedMinMaxModule() x)<block_end><def_stmt>test_reduce_log_sum_exp self<block_start><class_stmt>ReduceLogSumExpModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>a=torch.logsumexp(input dim=0)<line_sep>b=torch.logsumexp(input dim=(0 1))<line_sep><return>a+b<block_end><block_end>x=torch.randn(4 4 requires_grad=<true>)<line_sep>self.run_test(ReduceLogSumExpModel() x)<block_end><def_stmt>test_softmax self<block_start><for_stmt>i range(-4 3)<block_start>model=torch.nn.Softmax(dim=i)<line_sep>input=torch.randn(3 4 5 6)<line_sep>self.run_test(model input)<class_stmt>SoftmaxUnknownRank(torch.nn.Module)<block_start><def_stmt>__init__ self i<block_start>super().__init__()<line_sep>self.softmax=torch.nn.Softmax(dim=i)<block_end><def_stmt>forward self x<block_start><return>self.softmax(x.reshape(3 4 5 6))<block_end><block_end>model=torch.jit.script(SoftmaxUnknownRank(i))<line_sep>self.run_test(model input)<block_end><block_end><def_stmt>test_softmax_large_values self<block_start>input=torch.tensor([[-1e12 -1e12 -1e12] [1e12 0.0 -5.0] [3.0 4.0 5.0]])<for_stmt>i range(-2 1)<block_start>model=torch.nn.Softmax(dim=i)<line_sep>self.run_test(model input)<class_stmt>SoftmaxUnknownRank(torch.nn.Module)<block_start><def_stmt>__init__ self i<block_start>super().__init__()<line_sep>self.softmax=torch.nn.Softmax(dim=i)<block_end><def_stmt>forward self x<block_start><return>self.softmax(x.reshape(3 3))<block_end><block_end>model=torch.jit.script(SoftmaxUnknownRank(i))<line_sep>self.run_test(model input)<block_end><block_end><def_stmt>test_logsoftmax self<block_start><for_stmt>i range(7)[2:]<block_start>model=torch.nn.LogSoftmax(dim=i-1)<line_sep>dims=[2]<times>(i-2)+[3 4]<line_sep>input=torch.ones(*dims requires_grad=<true>)<line_sep>self.run_test(model input)<block_end><block_end><def_stmt>test_logsoftmax_dim self<block_start><for_stmt>i range(-4 3)<block_start>model=torch.nn.LogSoftmax(dim=i)<line_sep>input=torch.randn(3 4 5 6)<line_sep>self.run_test(model input)<block_end><block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# scripting prim_dtype
<def_stmt>test_lstm_no_hidden self<block_start><class_stmt>LSTMModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.rnn=torch.nn.LSTM(input_size=16 hidden_size=16)<block_end><def_stmt>forward self x<block_start><return>self.rnn(x)<block_end><block_end>input=torch.randn((10 16 16))<line_sep>self.run_test(LSTMModel() (input ))<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()<def_stmt>test_lstm self<block_start>model=torch.nn.LSTM(RNN_INPUT_SIZE RNN_HIDDEN_SIZE 1 bidirectional=<false>)<line_sep>input=torch.randn(RNN_SEQUENCE_LENGTH BATCH_SIZE RNN_INPUT_SIZE)<line_sep>h0=torch.randn(1 BATCH_SIZE RNN_HIDDEN_SIZE)<line_sep>c0=torch.randn(1 BATCH_SIZE RNN_HIDDEN_SIZE)<line_sep>self.run_test(model (input (h0 c0)))<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()<def_stmt>test_lstm_default_init_state self<block_start>model=torch.nn.LSTM(RNN_INPUT_SIZE RNN_HIDDEN_SIZE 1 bidirectional=<false>)<line_sep>input=torch.randn(RNN_SEQUENCE_LENGTH BATCH_SIZE RNN_INPUT_SIZE)<line_sep>self.run_test(model input)<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# LSTMModel model not scriptable
<def_stmt>test_lstm_fixed_batch_size self<block_start><class_stmt>LSTMModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(LSTMModel self).__init__()<line_sep>self.lstm=torch.nn.LSTM(RNN_INPUT_SIZE RNN_HIDDEN_SIZE 1 bidirectional=<false>)<block_end><def_stmt>forward self input<block_start>batch_size=input.size()[1]<line_sep>h0_np=np.ones([1 batch_size RNN_HIDDEN_SIZE]).astype(np.float32)<line_sep>c0_np=np.ones([1 batch_size RNN_HIDDEN_SIZE]).astype(np.float32)<line_sep>h0=torch.from_numpy(h0_np)<line_sep>c0=torch.from_numpy(c0_np)<line_sep><return>self.lstm(input (h0 c0))<block_end><block_end>input=torch.randn(RNN_SEQUENCE_LENGTH BATCH_SIZE RNN_INPUT_SIZE)<line_sep># verify with different input of same batch size
input2=torch.randn(RNN_SEQUENCE_LENGTH BATCH_SIZE RNN_INPUT_SIZE)<line_sep>self.run_test(LSTMModel() input fixed_batch_size=<true> test_with_inputs=[input2])<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()<def_stmt>test_lstm_post_fix_init_state self<block_start><class_stmt>LSTMModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(LSTMModel self).__init__()<line_sep>self.lstm=torch.nn.LSTM(RNN_INPUT_SIZE RNN_HIDDEN_SIZE 1 bidirectional=<false>)<block_end><def_stmt>forward self input<block_start>batch_size=input.size()[1]<line_sep>h0_np=np.ones([1 batch_size RNN_HIDDEN_SIZE]).astype(np.float32)<line_sep>c0_np=np.ones([1 batch_size RNN_HIDDEN_SIZE]).astype(np.float32)<line_sep>h0=torch.from_numpy(h0_np)<line_sep>c0=torch.from_numpy(c0_np)<line_sep><return>self.lstm(input (h0 c0))<block_end><block_end>model=LSTMModel()<line_sep>input=torch.randn(RNN_SEQUENCE_LENGTH 1 RNN_INPUT_SIZE)<line_sep># verify with different input of different batch size
input2=torch.randn(RNN_SEQUENCE_LENGTH BATCH_SIZE RNN_INPUT_SIZE)<line_sep>self.run_test(model input dynamic_axes={'input':{0:'seq' 1:'batch'}} test_with_inputs=[input2])<block_end>@disableScriptTest()<def_stmt>test_lstm_constant_folding self<block_start><class_stmt>LstmNet(torch.nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size num_layers bidirectional<block_start>super(LstmNet self).__init__()<line_sep>self.lstm=torch.nn.LSTM(input_size hidden_size num_layers bidirectional=bidirectional)<block_end><def_stmt>forward self input initial_state<block_start><return>self.lstm(input initial_state)<block_end><block_end><def_stmt>get_LstmNet_model_and_inputs input_size hidden_size num_layers batch_size seq_len bidirectional<block_start>num_directions=2<if>bidirectional<else>1<line_sep>model=LstmNet(input_size hidden_size num_layers bidirectional)<line_sep>input=torch.randn(seq_len batch_size input_size)<line_sep>h0=torch.randn(num_layers<times>num_directions batch_size hidden_size)<line_sep>c0=torch.randn(num_layers<times>num_directions batch_size hidden_size)<line_sep><return>model (input (h0 c0))<block_end>batch_size1=3<line_sep>model1,input1=get_LstmNet_model_and_inputs(7 3 2 batch_size1 5 <true>)<line_sep>self.run_test(model1 input1 do_constant_folding=<true>)<line_sep>batch_size2=4<line_sep>model2,input2=get_LstmNet_model_and_inputs(5 4 3 batch_size2 7 <false>)<line_sep>self.run_test(model2 input2 do_constant_folding=<true>)<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()<def_stmt>test_lstm_no_bias self<block_start><class_stmt>LstmNet(torch.nn.Module)<block_start><def_stmt>__init__ self num_layers bidirectional<block_start>super(LstmNet self).__init__()<line_sep>self.lstm=torch.nn.LSTM(RNN_INPUT_SIZE RNN_HIDDEN_SIZE num_layers bias=<false> bidirectional=bidirectional)<block_end><def_stmt>forward self input initial_state<block_start><return>self.lstm(input initial_state)<block_end><block_end><def_stmt>get_LstmNet_model_and_inputs num_layers bidirectional<block_start>input=torch.randn(RNN_SEQUENCE_LENGTH BATCH_SIZE RNN_INPUT_SIZE)<line_sep>num_directions=2<if>bidirectional<else>1<line_sep>model=LstmNet(num_layers bidirectional)<line_sep>h0=torch.randn(num_layers<times>num_directions BATCH_SIZE RNN_HIDDEN_SIZE)<line_sep>c0=torch.randn(num_layers<times>num_directions BATCH_SIZE RNN_HIDDEN_SIZE)<line_sep><return>model (input (h0 c0))<block_end>num_layers=[1 1 2 3]<line_sep>bidirectional=[<true> <false> <true> <false>]<line_sep>models_and_inputs=[get_LstmNet_model_and_inputs(n b)<for>n,b zip(num_layers bidirectional)]<for_stmt>model,input models_and_inputs<block_start>self.run_test(model input)<block_end><block_end>@disableScriptTest()<def_stmt>test_rnn_no_bias self<block_start><def_stmt>make_model layers packed_sequence<block_start>batch_first=<true><if>packed_sequence<eq>2<else><false><line_sep>model=torch.nn.RNN(RNN_INPUT_SIZE RNN_HIDDEN_SIZE layers bidirectional=<false> batch_first=batch_first bias=<false>)<if_stmt>packed_sequence<eq>1<block_start>model=RnnModelWithPackedSequence(model <false>)<block_end><if_stmt>packed_sequence<eq>2<block_start>model=RnnModelWithPackedSequence(model <true>)<block_end><return>model<block_end><def_stmt>make_input batch_size layers packed_sequence<block_start>batch_first=<true><if>packed_sequence<eq>2<else><false><line_sep>seq_lengths=np.random.randint(1 RNN_SEQUENCE_LENGTH+1 size=batch_size)<line_sep>seq_lengths=list(reversed(sorted(map(int seq_lengths))))<line_sep>inputs=[torch.randn(l RNN_INPUT_SIZE)<for>l seq_lengths]<line_sep>inputs=rnn_utils.pad_sequence(inputs batch_first=batch_first)<line_sep>inputs=[inputs]<line_sep>h0=torch.randn(layers batch_size RNN_HIDDEN_SIZE)<line_sep>inputs.append(h0)<if_stmt>packed_sequence<ne>0<block_start>inputs.append(torch.IntTensor(seq_lengths))<block_end><if_stmt>len(inputs)<eq>1<block_start>input=inputs[0]<block_end><else_stmt><block_start>input=tuple(inputs)<block_end><return>input<block_end>layers=[1 3 1 3 1 3]<line_sep>packed_sequence=[0 0 1 1 2 2]<line_sep>models=[make_model(l p)<for>l,p zip(layers packed_sequence)]<line_sep>inputs=[make_input(RNN_BATCH_SIZE l p)<for>l,p zip(layers packed_sequence)]<for_stmt>model,input zip(models inputs)<block_start>self.run_test(model input batch_size=RNN_BATCH_SIZE)<block_end><block_end><def_stmt>test_gru_no_bias self<block_start><class_stmt>GruNet(torch.nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size num_layers bidirectional<block_start>super(GruNet self).__init__()<line_sep>self.mygru=torch.nn.GRU(input_size hidden_size num_layers bidirectional=bidirectional bias=<false>)<block_end><def_stmt>forward self input initial_state<block_start>out=self.mygru(input initial_state)<line_sep><return>out<block_end><block_end><def_stmt>get_GruNet_model_and_inputs input_size hidden_size num_layers batch_size seq_len bidirectional<block_start>num_directions=2<if>bidirectional<else>1<line_sep>model=GruNet(input_size hidden_size num_layers bidirectional)<line_sep>input=torch.randn(seq_len batch_size input_size)<line_sep>h0=torch.randn(num_layers<times>num_directions batch_size hidden_size)<line_sep><return>model (input h0)<block_end>input_size=[7 5]<line_sep>hidden_size=[3 4]<line_sep>num_layers=[2 3]<line_sep>batch_size=[3 4]<line_sep>seq_len=[5 7]<line_sep>bidirectional=[<true> <false>]<line_sep>models_and_inputs=[get_GruNet_model_and_inputs(i h n b s bi)<for>i,h,n,b,s,bi zip(input_size hidden_size num_layers batch_size seq_len bidirectional)]<for_stmt>model,input models_and_inputs<block_start>self.run_test(model input do_constant_folding=<true>)<block_end><block_end><def_stmt>test_gru_constant_folding self<block_start><class_stmt>GruNet(torch.nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size num_layers bidirectional<block_start>super(GruNet self).__init__()<line_sep>self.mygru=torch.nn.GRU(input_size hidden_size num_layers bidirectional=bidirectional)<block_end><def_stmt>forward self input initial_state<block_start>out=self.mygru(input initial_state)<line_sep><return>out<block_end><block_end><def_stmt>get_GruNet_model_and_inputs input_size hidden_size num_layers batch_size seq_len bidirectional<block_start>num_directions=2<if>bidirectional<else>1<line_sep>model=GruNet(input_size hidden_size num_layers bidirectional)<line_sep>input=torch.randn(seq_len batch_size input_size)<line_sep>h0=torch.randn(num_layers<times>num_directions batch_size hidden_size)<line_sep><return>model (input h0)<block_end>batch_size1=3<line_sep>model1,input1=get_GruNet_model_and_inputs(7 3 2 batch_size1 5 <true>)<line_sep>self.run_test(model1 input1 do_constant_folding=<true>)<line_sep>batch_size2=4<line_sep>model2,input2=get_GruNet_model_and_inputs(5 4 3 batch_size2 7 <false>)<line_sep>self.run_test(model2 input2 do_constant_folding=<true>)<block_end>@skipIfUnsupportedMinOpsetVersion(8)<def_stmt>test_max_tensors self<block_start><class_stmt>MaxModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.max(input other)<block_end><block_end>model=MaxModel()<line_sep>x=torch.randn(4 4 requires_grad=<true>)<line_sep>y=torch.randn(4 1 requires_grad=<true>)<line_sep>self.run_test(model (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_arange_end self<block_start><class_stmt>ArangeScript(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self a<block_start><return>torch.arange(a.size(0) dtype=torch.float).view(-1 1)+a<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>outputs=ArangeScript()(x)<line_sep>self.run_test(ArangeScript() x)<class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self a<block_start><return>torch.arange(a.size(0) dtype=torch.float).view(-1 1)+a<block_end><block_end>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange_end_notype self<block_start><class_stmt>ArangeScript(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self a<block_start><return>torch.arange(a.size(0))<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>outputs=ArangeScript()(x)<line_sep>self.run_test(ArangeScript() x)<class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self a<block_start><return>torch.arange(a.size(0))<block_end><block_end>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_arange_start_end self<block_start><class_stmt>ArangeScript(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self a<block_start><return>torch.arange(2 a.size(0)+2 dtype=torch.float).view(-1 1)+a<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>self.run_test(ArangeScript() x)<class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self a<block_start><return>torch.arange(2 a.size(0)+2 dtype=torch.float).view(-1 1)+a<block_end><block_end>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange_start_end_notype self<block_start><class_stmt>ArangeScript(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self a<block_start><return>torch.arange(2.7 a.size(0)+2).view(-1 1)+a<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>self.run_test(ArangeScript() x)<class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self a<block_start><return>torch.arange(2.7 a.size(0)+2).view(-1 1)+a<block_end><block_end>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_arange_start_end_step self<block_start><class_stmt>ArangeScript(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self a<block_start><return>torch.arange(2 a.size(0)<times>a.size(1)+2 a.size(1) dtype=torch.float).view(-1 1)+a<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>self.run_test(ArangeScript() x)<class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self a<block_start><return>torch.arange(2 a.size(0)<times>a.size(1)+2 a.size(1) dtype=torch.float).view(-1 1)+a<block_end><block_end>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_arange_start_end_step_notype self<block_start><class_stmt>ArangeScript(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self a<block_start><return>torch.arange(2.7 a.size(0)<times>a.size(1)+2 a.size(1)).view(-1 1)+a<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>self.run_test(ArangeScript() x)<class_stmt>ArangeModel(torch.nn.Module)<block_start><def_stmt>forward self a<block_start><return>torch.arange(2.7 a.size(0)<times>a.size(1)+2 a.size(1)).view(-1 1)+a<block_end><block_end>self.run_test(ArangeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test__dim_arange self<block_start><class_stmt>DimArange(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch._dim_arange(input 1)<block_end><block_end>x=torch.ones(5 6)<line_sep>self.run_test(DimArange() x)<block_end><def_stmt>_test_compare_ops self model num_inputs<block_start>x_float=torch.randn(1 2 3 4 requires_grad=<true>)<line_sep>x_int=torch.randint(10 (3 4) dtype=torch.int32)<if_stmt>num_inputs<g>1<block_start>y_float=torch.randn(1 2 3 4 requires_grad=<true>)<line_sep>y_int=torch.randint(10 (3 4) dtype=torch.int32)<line_sep>self.run_test(model (x_float y_float))<line_sep>self.run_test(model (x_float y_int))<line_sep>self.run_test(model (x_int y_float))<line_sep>self.run_test(model (x_int y_int))<block_end><else_stmt><block_start>self.run_test(model x_float)<line_sep>self.run_test(model x_int)<block_end><block_end><def_stmt>test_gt self<block_start><class_stmt>GreaterModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>input<g>other<block_end><block_end>self._test_compare_ops(GreaterModel() 2)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_ge self<block_start><class_stmt>GreaterOrEqualModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>input<ge>other<block_end><block_end>self._test_compare_ops(GreaterOrEqualModel() 2)<block_end><def_stmt>test_gt_scalar self<block_start><class_stmt>GreaterModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input<g>1<block_end><block_end>self._test_compare_ops(GreaterModel() 1)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_ge_scalar self<block_start><class_stmt>GreaterOrEqualModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input<ge>1<block_end><block_end>self._test_compare_ops(GreaterOrEqualModel() 1)<block_end><def_stmt>test_lt self<block_start><class_stmt>LessModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>input<g>other<block_end><block_end>self._test_compare_ops(LessModel() 2)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_le self<block_start><class_stmt>LessOrEqualModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>input<le>other<block_end><block_end>self._test_compare_ops(LessOrEqualModel() 2)<block_end><def_stmt>test_lt_scalar self<block_start><class_stmt>LessModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input<l>1<block_end><block_end>self._test_compare_ops(LessModel() 1)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_le_scalar self<block_start><class_stmt>LessOrEqualModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input<le>1<block_end><block_end>self._test_compare_ops(LessOrEqualModel() 1)<block_end><def_stmt>test_matmul self<block_start><class_stmt>MatmulModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.matmul(input other)<block_end><block_end>x=torch.randn(3 4 requires_grad=<true>)<line_sep>y=torch.randn(4 5 requires_grad=<true>)<line_sep>self.run_test(MatmulModel() (x y))<line_sep>x=torch.randint(10 (3 4))<line_sep>y=torch.randint(10 (4 5))<line_sep>self.run_test(MatmulModel() (x y))<block_end><def_stmt>test_matmul_batch self<block_start><class_stmt>MatmulModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.matmul(input other)<block_end><block_end>x=torch.randn(2 3 4 requires_grad=<true>)<line_sep>y=torch.randn(2 4 5 requires_grad=<true>)<line_sep>self.run_test(MatmulModel() (x y))<line_sep>x=torch.randint(10 (2 3 4))<line_sep>y=torch.randint(10 (2 4 5))<line_sep>self.run_test(MatmulModel() (x y))<block_end><def_stmt>_argmin_argmax_model self input<block_start><class_stmt>ArgminArgmaxModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.argmin(input) torch.argmax(input) torch.argmin(input keepdim=<true>) torch.argmax(input keepdim=<true>)<block_end><block_end>self.run_test(ArgminArgmaxModel() input)<block_end><def_stmt>test_argmin_argmax self<block_start>input=torch.randn(7 3 5)<line_sep>self._argmin_argmax_model(input)<block_end># Argmin and Argmax with "select_last_index" is not supprted before opset 12
# "select_last_index" was added in opset 12 to deal with corner case where the
# same value appears multiple times in the tensor
@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_argmin_argmax_select_last_index self<block_start>input=torch.tensor([[1. 2. 3.] [1. 1. 2.]])<line_sep>self._argmin_argmax_model(input)<line_sep>input=torch.ones(7 3 5)<line_sep>self._argmin_argmax_model(input)<block_end><def_stmt>test_repeat self<block_start><class_stmt>RepeatModel(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start>x2=x.repeat(y.shape[0] 1)<line_sep>y1=y.view(-1 1)<line_sep><return>x2+y1<block_end><block_end>x=torch.tensor([1 2 3])<line_sep>y=torch.tensor([4 5 8 9])<line_sep>self.run_test(RepeatModel() (x y))<block_end><def_stmt>test_view self<block_start><class_stmt>ViewModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>input.view(4 24)<block_end><block_end>x=torch.randint(10 (4 2 3 4) dtype=torch.int32)<line_sep>self.run_test(ViewModel() x)<block_end><def_stmt>test_view_dynamic self<block_start><class_stmt>ViewModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>input.view(other.shape)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>shape=torch.randn(6 4)<line_sep>self.run_test(ViewModel() (x shape))<block_end><def_stmt>test_view_dynamic_zero_dim self<block_start><class_stmt>ViewModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>input=input.view(-1 2)<line_sep><return>input.view(1 -1)<block_end><block_end>x=torch.ones(2)<line_sep>another_x=torch.empty((0 ))<line_sep>self.run_test(ViewModel() x test_with_inputs=[another_x] input_names=['input_1'] dynamic_axes={'input_1':[0 ]})<block_end><def_stmt>test_view_as self<block_start><class_stmt>ViewModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>input.view_as(other)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(6 4)<line_sep>self.run_test(ViewModel() (x y))<block_end>@disableScriptTest()# ONNX Shape inference failure in if/else block for Gemm
<def_stmt>test_weight_norm self<block_start>model=torch.nn.utils.weight_norm(torch.nn.Linear(5 10) dim=1)<line_sep>x=torch.randn(3 4 5 requires_grad=<true>)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.utils.weight_norm(torch.nn.Conv1d(1 1 3))<line_sep>x=torch.randn(1 1 5 requires_grad=<true>)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.utils.weight_norm(torch.nn.Conv1d(1 1 3) dim=-2)<line_sep>x=torch.randn(1 1 5 requires_grad=<true>)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.utils.weight_norm(torch.nn.Conv1d(3 6 3) name='weight')<line_sep>x=torch.randn(3 3 5 requires_grad=<true>)<line_sep>self.run_test(model x)<block_end>@disableScriptTest()# ONNX Shape inference failure in if/else block for Gemm
<def_stmt>test_weight_norm_nodim self<block_start>model=torch.nn.utils.weight_norm(torch.nn.Linear(5 10) dim=<none>)<line_sep>x=torch.randn(3 4 5 requires_grad=<true>)<line_sep>self.run_test(model x)<block_end><def_stmt>test_flatten self<block_start><class_stmt>FlattenModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.flatten(input)<block_end><block_end>x=torch.randint(10 (1 2 3 4))<line_sep>self.run_test(FlattenModel() x)<block_end><def_stmt>test_flatten2d self<block_start><class_stmt>FlattenModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.flatten(input 1)<block_end><block_end>x=torch.randint(10 (1 2 3 4))<line_sep>self.run_test(FlattenModel() x)<block_end><def_stmt>test_flatten2d_neg self<block_start><class_stmt>FlattenModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.flatten(x 1 -1) torch.flatten(x 0 -2) torch.flatten(x 1 -2)<block_end><block_end>x=torch.randint(10 (1 2 3 4))<line_sep>self.run_test(FlattenModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_flatten_dynamic_axes self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.flatten(x start_dim=2 end_dim=3)<block_end><block_end>batch_size=3<line_sep>x=torch.randn(batch_size 5 4 5)<line_sep>y=torch.randn(5 5 4 5)<line_sep>model=MyModule()<line_sep>self.run_test(model x test_with_inputs=[y] input_names=['input'] output_names=['output'] dynamic_axes={'input':{0:'batch_size'} 'output':{0:'batch_size'}})<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_getitem self<block_start><class_stmt>GetItemModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x y z ind# this will create prim::ListConstruct(x, y, z) + aten::__getitem__
<block_start>arr=[x y z]<line_sep><return>arr[ind]<block_end><block_end>x=torch.randn(3 4 5)<line_sep>y=torch.randn(1 4 5)<line_sep>z=torch.randn(2 4 5)<line_sep>ind=torch.tensor(1 dtype=torch.long)<line_sep>self.run_test(GetItemModel() (x y z ind))<line_sep>ind=torch.tensor(-2 dtype=torch.long)<line_sep>self.run_test(GetItemModel() (x y z ind))<block_end><def_stmt>test_unbind self<block_start><class_stmt>UnbindModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>_,out,_=input.unbind()<line_sep><return>out<block_end><block_end>x=torch.randn(3 4 5)<line_sep>self.run_test(UnbindModel() x)<class_stmt>UnbindModel2(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>_,out,_,_=input.unbind(1)<line_sep><return>out<block_end><block_end>x=torch.randn(3 4 5)<line_sep>self.run_test(UnbindModel2() x)<class_stmt>UnbindModel3(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>_,out,_,_=input.unbind(-2)<line_sep><return>out<block_end><block_end>x=torch.randn(3 4 5)<line_sep>self.run_test(UnbindModel3() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_len self<block_start><class_stmt>LenModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>len(input.unbind())+input<block_end><block_end>x=torch.randn(4 5)<line_sep>self.run_test(LenModel() x input_names=['input'] dynamic_axes={'input':{0:'seq'}} test_with_inputs=(torch.randn(5 5) ))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_len_list self<block_start><class_stmt>LenListModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>torch.ones(len(input.shape))<block_end><block_end>x=torch.randn(4 5)<line_sep>self.run_test(LenListModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_unbind_dynamic self<block_start><class_stmt>UnbindModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>input.unbind()[1]<block_end><block_end>x=torch.randn(3 4 5)<line_sep>self.run_test(UnbindModel() x)<class_stmt>UnbindModel2(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>input.unbind(-1)[1]<block_end><block_end>x=torch.randn(3 4 5)<line_sep>self.run_test(UnbindModel2() x)<block_end><def_stmt>test_split self<block_start><class_stmt>SplitModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>out1,out2,out3=input.split([2 1 2])<line_sep><return>out1 out2 out3<block_end><block_end>x=torch.randn(5 4 3)<line_sep>self.run_test(SplitModel() x)<class_stmt>SplitModel2(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>out1,out2,out3=input.split([2 1 1] -2)<line_sep><return>out1 out2 out3<block_end><block_end>x=torch.randn(5 4 3)<line_sep>self.run_test(SplitModel2() x)<class_stmt>SplitModel3(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>out1,out2,out3=input.split([2 1 2])<line_sep><return>out3 out1<block_end><block_end>x=torch.randn(5 4 3)<line_sep>self.run_test(torch.jit.script(SplitModel3()) x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()<def_stmt>test_split_size_as_list self<block_start><class_stmt>SplitModel(torch.nn.Module)<block_start><def_stmt>forward self input split_sizes:List[int]<block_start>out=[]<line_sep>split_list:List[torch.Tensor]=input.split(split_sizes)<for_stmt>ob split_list<block_start>out.append(ob)<block_end><return>torch.cat(out dim=0)<block_end><block_end>x=torch.randn(6 4 3)<line_sep>split_sizes=[torch.tensor(2) torch.tensor(4)]<line_sep>self.run_test(SplitModel() (x split_sizes))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_split_size_with_slice self<block_start><class_stmt>SplitModule(torch.nn.Module)<block_start><def_stmt>forward self x y t<block_start>splits=(x.size(1) y.size(1))<line_sep>out,out2=torch.split(t splits dim=1)<line_sep><return>out out2<block_end><block_end>x=torch.randn(2 3)<line_sep>y=torch.randn(2 4)<line_sep>t=torch.randn(2 7)<line_sep>self.run_test(SplitModule() (x y t))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_split_dynamic self<block_start><class_stmt>SplitModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>input.split(2)[1]<block_end><block_end>x=torch.randn(5 4 3)<line_sep>self.run_test(SplitModel() x)<class_stmt>SplitModel2(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>input.split(2 -3)[1]<block_end><block_end>x=torch.randn(5 4 3)<line_sep>self.run_test(SplitModel2() x)<block_end><def_stmt>test_concat self<block_start><class_stmt>ConcatModel(torch.nn.Module)<block_start><def_stmt>forward self x y z<block_start><return>torch.cat((x y z))<block_end><block_end>x=torch.randn(3 4 5)<line_sep>y=torch.randn(1 4 5)<line_sep>z=torch.randn(2 4 5)<line_sep>self.run_test(ConcatModel() (x y z))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_concat_dynamic self<block_start><class_stmt>ConcatDynamicModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.cat(x.unbind())<block_end><block_end>x=torch.randn(4 5 6)<line_sep>self.run_test(ConcatDynamicModel() x)<block_end><def_stmt>test_stack self<block_start><class_stmt>StackModel(torch.nn.Module)<block_start><def_stmt>forward self x y z<block_start><return>torch.stack((x y z) 1)<block_end><block_end>x=torch.randn(3 4 5)<line_sep>y=torch.randn(3 4 5)<line_sep>z=torch.randn(3 4 5)<line_sep>self.run_test(StackModel() (x y z))<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_stack_dynamic self<block_start><class_stmt>StackDynamicModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.stack(x.unbind() 1)<block_end><block_end>x=torch.randn(4 5 6)<line_sep>self.run_test(StackDynamicModel() x)<block_end><def_stmt>test_loop_dynamic self<block_start><class_stmt>LoopModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><for_stmt>i range(x.size(2))<block_start>x=x+i<block_end><return>x<block_end><block_end>model=LoopModel()<line_sep>inputs=torch.zeros(1 2 3 dtype=torch.long)<line_sep>self.run_test(model inputs)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_loop_nested self<block_start><class_stmt>NestedLoopsModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><for_stmt>i range(5)<block_start>a=0<while_stmt>a<l>4<block_start>a<augadd>1<block_end>x=x+a<block_end><return>x<block_end><block_end>model=NestedLoopsModel()<line_sep>inputs=torch.zeros(1 2 3 dtype=torch.long)<line_sep>self.run_test(model inputs)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_loop_with_list self<block_start><class_stmt>ListLoopModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start>res=[]<line_sep>res1=[]<line_sep>arr=x.split([3 4 1 1 2 3 2] 0)<line_sep>res2=torch.zeros(3 4 dtype=torch.long)<line_sep>res3=[]<line_sep>res4=[]<for_stmt>i range(len(arr))<block_start>res=res.append(arr[i].sum(0 <false>))<line_sep>res1=res1.append(arr[-1-i].sum(0 <false>))<line_sep>res2<augadd>1<line_sep>res3=res3+[arr[i].sum(0 <false>)]<line_sep>res4<augadd>[arr[-1-i].sum(0 <false>)]<block_end><return>torch.stack(res) torch.stack(res1) res2 torch.stack(res3) torch.stack(res4)<block_end><block_end>model=ListLoopModel()<line_sep>inputs=torch.randn(16)<line_sep>self.run_test(model inputs)<block_end>@skipIfONNXShapeInference(<false>)@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_loop_transpose self<block_start><class_stmt>LoopModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>res=torch.zeros_like(x[0])<for_stmt>i range(x.size(0))<block_start>res<augadd>x[0].transpose(0 1)<block_end><return>res<block_end><block_end>model=torch.jit.script(LoopModel())<line_sep>x=torch.randn(5 3 3)<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_list self<block_start><class_stmt>ListModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start>tensors=x.unbind()<line_sep>res=[]<line_sep>res.append(tensors[0])<line_sep>res.append(tensors[1])<line_sep>res.pop(1)<line_sep>res.insert(0 tensors[1])<line_sep>res.append(tensors[2])<line_sep>res<augadd>[tensors[3] tensors[4]]<line_sep>res=res+[tensors[5]]<line_sep><return>torch.ones(len(res))<block_end><block_end>model=ListModel()<line_sep>inputs=torch.randn(16 1)<line_sep>self.run_test(model inputs)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_tensor_factories self<block_start><class_stmt>TensorFactory(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.zeros(x.size())+torch.ones(x.size())<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(TensorFactory() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_tensor_factories_script self<block_start><class_stmt>TensorFactory(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start><return>torch.zeros(x.shape dtype=torch.float)+torch.ones(x.shape dtype=torch.float)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(TensorFactory() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_tensor_like_factories_script self<block_start><class_stmt>TensorFactory(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start>zeros=torch.zeros_like(x dtype=torch.float layout=torch.strided device=torch.device('cpu'))<line_sep>ones=torch.ones_like(x dtype=torch.float layout=torch.strided device=torch.device('cpu'))<line_sep><return>zeros+ones<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(TensorFactory() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_eye self<block_start><class_stmt>TensorFactory(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.eye(x.size()[1] 3) torch.eye(4 4 dtype=torch.long) torch.eye(x.size()[1] 2 dtype=torch.long)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>another_x=torch.randn(5 6 7)<line_sep>self.run_test(TensorFactory() x test_with_inputs=[another_x] input_names=['input_1'] dynamic_axes={'input_1':[0 1 2]})<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_inplace_zero self<block_start><class_stmt>Zero_(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.zero_() x<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Zero_() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_new_zeros self<block_start><class_stmt>Zero_(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.new_zeros(x.shape[1:2]) x.new_zeros(x.shape[2:] dtype=torch.long)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Zero_() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_list_pass self<block_start><class_stmt>Slice(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>x.new_zeros(x.shape[2:]+y.shape[1:])<block_end><block_end>x=torch.randn(2 3 4 5)<line_sep>y=torch.randn(1 2 3 4)<line_sep>self.run_test(Slice() (x y))<class_stmt>Size(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start><return>x.new_zeros(x.shape+y.shape)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(1 2 3)<line_sep>self.run_test(Size() (x y))<class_stmt>Array(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start>arr1=[x.shape[0] x.shape[1] 2]<line_sep>arr2=[y.shape[0] y.shape[1]]<line_sep><return>x.new_zeros(arr1+arr2)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(1 2 3)<line_sep>self.run_test(Array() (x y))<class_stmt>List(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start>l1=list(x.shape)<line_sep>l2=list(y.shape)<line_sep><return>x.new_zeros(l1+l2)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(1 2 3)<line_sep>self.run_test(List() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_new_empty self<block_start><class_stmt>Emtpy(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.new_empty(x.shape[0]).fill_(0) x.new_empty(x.shape[0] dtype=torch.long)<times>0<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Emtpy() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_new_full self<block_start><class_stmt>Full(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.new_full(x.shape[1:2] 5) x.new_full(x.shape[0:1] 1.3 dtype=torch.long)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Full() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_inplace_list self<block_start><class_stmt>Arithmetic(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x y<block_start><return>torch.cat([x.add_(3) y.fill_(0)])<block_end><block_end>x=torch.randn(2 3)<line_sep>y=torch.randn(2 3)<line_sep>self.run_test(Arithmetic() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_inplace_fill self<block_start><class_stmt>Fill_(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.fill_(3) x<block_end><block_end>x=torch.randn(2 3 4)<line_sep>self.run_test(Fill_() x)<block_end><def_stmt>test_inplace_arithmetic self<block_start><class_stmt>Arithmetic(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x y<block_start>x.add_(3)<line_sep>y.mul_(x)<line_sep><return>x y<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.randn(2 3 4)<line_sep>self.run_test(Arithmetic() (x y))<block_end>@disableScriptTest()<def_stmt>test_sort self<block_start><class_stmt>SortModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>out=[]<for_stmt>i range(-2 2)<block_start>out.append(torch.sort(x dim=i descending=<true>))<block_end><return>out<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(SortModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()<def_stmt>test_sort_ascending self<block_start><class_stmt>SortModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>out=[]<for_stmt>i range(-2 2)<block_start>out.append(torch.sort(x dim=i descending=<false>))<block_end><return>out<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(SortModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_masked_fill self<block_start><class_stmt>MaskedFillModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>mask=torch.tensor([[0 0 1] [1 1 0]] dtype=torch.uint8)<line_sep><return>x.masked_fill(mask 2)<block_end><block_end>x=torch.zeros(4 2 3 requires_grad=<true>)<line_sep>self.run_test(MaskedFillModel() x)<class_stmt>MaskedFillModel2(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.masked_fill(x<g>3 -1)<block_end><block_end>x=torch.arange(16).view(2 2 4).to(torch.float32)<line_sep>self.run_test(MaskedFillModel2() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_masked_fill_inplace self<block_start><class_stmt>MaskedFillModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start>mask=torch.tensor([[0 0 1] [1 1 0]] dtype=torch.uint8)<line_sep>x.masked_fill_(mask 2)<line_sep><return>x<block_end><block_end>x=torch.zeros(4 2 3 requires_grad=<true>)<line_sep>self.run_test(MaskedFillModel() x)<class_stmt>MaskedFillModel2(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self x<block_start>x.masked_fill_(x<g>3 -1)<line_sep><return>x<block_end><block_end>x=torch.arange(16).view(2 2 4).to(torch.float32)<line_sep>self.run_test(MaskedFillModel2() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_masked_scatter self<block_start><class_stmt>MaskedScatterModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.masked_scatter(x x.ge(0.5) torch.ones(100 100)<times>5)<block_end><block_end>x=torch.randn(3 4 5 requires_grad=<true>)<line_sep>self.run_test(MaskedScatterModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_masked_select self<block_start><class_stmt>MaskedSelectModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.masked_select(x x.ge(0.5))<block_end><block_end>x=torch.randn(3 4 5 requires_grad=<true>)<line_sep>self.run_test(MaskedSelectModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_pixel_shuffle self<block_start><class_stmt>PixelShuffle(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.pixel_shuffle(x upscale_factor=2)<block_end><block_end>x=torch.randn(2 16 4 3 requires_grad=<true>)<line_sep>self.run_test(PixelShuffle() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_scalar_type self<block_start><class_stmt>ArithmeticModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.size(0)<times>2<times>x<block_end><block_end>x=torch.ones(2 3 dtype=torch.float32)<line_sep>self.run_test(ArithmeticModel() x)<class_stmt>ReciprocalModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.reciprocal(x)<block_end><block_end>x=torch.tensor([2.0 4.0] dtype=torch.double)<line_sep>self.run_test(ReciprocalModel() x)<class_stmt>ComparisonModel(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start>a=torch.tensor([12.0])<line_sep><return>x.lt(1.5)&y.le(2)&x.le(1) x.gt(y) x.lt(y) a.ge(x.size(0))<block_end><block_end>x=torch.ones(2 3 dtype=torch.int32)<line_sep>y=torch.ones(2 3 dtype=torch.float32)<line_sep>self.run_test(ComparisonModel() (x y))<class_stmt>MatMulModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>(torch.mm(x x)+x+torch.mm(x x)+x)<block_end><block_end>x=torch.ones(3 3)<line_sep>self.run_test(MatMulModel() x)<class_stmt>AddMMModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.mm(x x)+x<block_end><block_end>x=torch.ones(3 3)<line_sep>self.run_test(AddMMModel() x)<class_stmt>FullModel(torch.nn.Module)# add is used for exporting full
<block_start><def_stmt>forward self x<block_start><return>torch.full((3 4) x)<block_end><block_end>x=torch.tensor(12.)<line_sep>self.run_test(FullModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# dtype mismatch
<def_stmt>test_full_like self<block_start><class_stmt>FullLikeModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.full_like(x 4)<block_end><block_end>x=torch.tensor(12)<line_sep>self.run_test(FullLikeModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# dtype mismatch
<def_stmt>test_full_like_value self<block_start><class_stmt>FullLikeModel(torch.nn.Module)<block_start><def_stmt>forward self x y<block_start>out=y+2<line_sep><return>torch.full_like(x out)<block_end><block_end>x=torch.tensor(12)<line_sep>y=torch.tensor(2)<line_sep>self.run_test(FullLikeModel() (x y))<block_end><def_stmt>test_l1_norm self<block_start><class_stmt>NormModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.norm(x p=1 dim=-1 keepdim=<false>)<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true>)<line_sep>self.run_test(NormModel() x)<block_end><def_stmt>test_l2_norm self<block_start><class_stmt>NormModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.norm(x p=2 dim=-2 keepdim=<false>)<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true>)<line_sep>self.run_test(NormModel() x)<block_end><def_stmt>test_frobenius_norm self<block_start><class_stmt>NormModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.norm(x p="fro" dim=0 keepdim=<false>)<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true>)<line_sep>self.run_test(NormModel() x)<block_end><def_stmt>test_frobenius_norm_keepdim self<block_start><class_stmt>NormModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.norm(x p="fro" dim=(0 1) keepdim=<true>)<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true>)<line_sep>self.run_test(NormModel() x)<block_end><def_stmt>test_unfold self<block_start><class_stmt>UnfoldModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.unfold(dimension=2 size=2 step=2)<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true>)<line_sep>self.run_test(UnfoldModel() x)<block_end>@skipIfONNXShapeInference(<false>)<def_stmt>test_unfold_infer_shape self<block_start><class_stmt>UnfoldModule(torch.jit.ScriptModule)<block_start><def_stmt>__init__ self<block_start>super(UnfoldModule self).__init__()<line_sep>self.conv=torch.nn.Conv1d(3 1 3 stride=2)<block_end>@torch.jit.script_method<def_stmt>forward self x<block_start>x=self.conv(x)<line_sep><return>x.unfold(dimension=2 size=2 step=2)<block_end><block_end>x=torch.randn(32 3 64)<line_sep>self.run_test(UnfoldModule() x)<block_end><def_stmt>test_remainder self<block_start><class_stmt>RemainderModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.remainder(input other)<block_end><block_end>x=torch.randn(4 2 3)<line_sep>y=torch.randn(1 2 1)<line_sep>self.run_test(RemainderModel() (x y))<block_end><def_stmt>test_remainder_scalar self<block_start><class_stmt>RemainderModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.remainder(input 2.55)<block_end><block_end>x=torch.randint(10 (2 3))<line_sep>self.run_test(RemainderModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_fmod self<block_start><class_stmt>FModModel(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.fmod(input other)<block_end><block_end>x=torch.randn(4 2 3)<line_sep>y=torch.randn(1 2 1)<line_sep>self.run_test(FModModel() (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_fmod_scalar self<block_start><class_stmt>FModModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.fmod(input 2.55)<block_end><block_end>x=torch.randint(10 (2 3))<line_sep>self.run_test(FModModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_gelu self<block_start><class_stmt>GeluModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.nn.functional.gelu(x)<block_end><block_end>x=torch.randn(2 4 5 6 requires_grad=<true>)<line_sep>self.run_test(GeluModel() x)<block_end><def_stmt>test_add_inplace self<block_start><class_stmt>InplaceAddModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>x<augadd>12<line_sep><return>x<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true>)<line_sep>self.run_test(InplaceAddModel() x)<block_end><def_stmt>test_rsqrt self<block_start><class_stmt>RsqrtModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.rsqrt()<block_end><block_end>x=torch.randn(4 2 3 requires_grad=<true> dtype=torch.float64)<line_sep>self.run_test(RsqrtModel() x)<block_end><def_stmt>test_rsqrt_zeros self<block_start><class_stmt>RsqrtModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>x.rsqrt()<block_end><block_end>x=torch.zeros(4 2 3 requires_grad=<true> dtype=torch.float64)<line_sep>self.run_test(RsqrtModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_unique self<block_start><class_stmt>UniqueModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.unique(x sorted=<true> return_inverse=<false> return_counts=<true>)<block_end><block_end>x=torch.tensor([1 3 2 3] dtype=torch.long)<line_sep>self.run_test(UniqueModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_unique_along_dim self<block_start><class_stmt>UniqueModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.unique(x dim=0 sorted=<true> return_inverse=<true> return_counts=<false>)<block_end><block_end>x=torch.tensor([1 3 2 3] dtype=torch.long)<line_sep>self.run_test(UniqueModel() x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_cumsum self<block_start><class_stmt>CumSum(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.cumsum(input dim=0)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>model=CumSum()<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_cumsum_with_cast self<block_start><class_stmt>CumSum(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.cumsum(input dim=0 dtype=torch.float32)<block_end><block_end>model=CumSum()<line_sep>x=torch.tensor([2 3 4] dtype=torch.int32)<line_sep>self.run_test(model x)<line_sep>x=torch.tensor([<false> <true> <true>])<line_sep>self.run_test(model x)<block_end>@disableScriptTest()# error in propagate as assign input shape
@skipIfUnsupportedMinOpsetVersion(10)@skipIfUnsupportedOpsetVersion([12])# Due to ONNX Loop shape inference issue
<def_stmt>test_embedding_bag self<block_start>model=torch.nn.EmbeddingBag(10 5 mode='sum' scale_grad_by_freq=<true>)<line_sep>input=torch.randint(10 (7 ))<line_sep>offset=torch.tensor([0 2 5 6])<line_sep>self.run_test(model (input offset))<line_sep>model=torch.nn.EmbeddingBag(10 5 mode='sum' include_last_offset=<true>)<line_sep>input=torch.randint(10 (7 ))<line_sep>offset=torch.tensor([0 2 5 6])<line_sep>self.run_test(model (input offset))<line_sep>model=torch.nn.EmbeddingBag(10 5 mode='max')<line_sep>input=torch.randint(10 (7 5))<line_sep>self.run_test(model (input))<block_end>@disableScriptTest()# scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(10)@skipIfUnsupportedOpsetVersion([12])# Due to ONNX Loop shape inference issue
<def_stmt>test_embedding_bag_1d_per_sample_weights self<block_start><class_stmt>EmbeddingModel(torch.nn.Module)<block_start><def_stmt>forward self embedding_matrix input offset weights<block_start><return>torch.nn.functional.embedding_bag(input embedding_matrix offsets=offset mode='sum' per_sample_weights=weights)<block_end><block_end>model=EmbeddingModel()<line_sep>x=torch.randint(7 (6 ))<line_sep>w=torch.randn(6 )<line_sep>offset=torch.tensor([0 2 5])<line_sep>embedding_matrix=torch.rand(10 15)<line_sep>self.run_test(model (embedding_matrix x offset w))<block_end>@disableScriptTest()# scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(10)@skipIfUnsupportedOpsetVersion([12])# Due to ONNX Loop shape inference issue
<def_stmt>test_embedding_bag_2d_per_sample_weights self<block_start><class_stmt>EmbeddingModel(torch.nn.Module)<block_start><def_stmt>forward self embedding_matrix input weights<block_start><return>torch.nn.functional.embedding_bag(input embedding_matrix mode='sum' per_sample_weights=weights)<block_end><block_end>embedding_matrix=torch.rand(10 15)<line_sep>model=EmbeddingModel()<line_sep>x=torch.randint(7 (2 3))<line_sep>w=torch.randn(2 3)<line_sep>self.run_test(model (embedding_matrix x w))<block_end>@disableScriptTest()# scripting prim::Uninitialized, prim::dtype, prim::unchecked_cast
@skipIfUnsupportedMinOpsetVersion(11)@unittest.skip("Due to ONNX Loop shape inference issue.")<def_stmt>test_embedding_bag_dynamic_input self<block_start><class_stmt>EmbeddingModel1D(torch.nn.Module)<block_start><def_stmt>forward self embedding_matrix input weights offsets<block_start><return>torch.nn.functional.embedding_bag(input embedding_matrix offsets=offsets mode='sum' per_sample_weights=weights)<block_end><block_end>model=EmbeddingModel1D()<line_sep>x=torch.randint(7 (6 ))<line_sep>w=torch.randn(6 )<line_sep>offsets=torch.tensor([0 2 5] dtype=torch.long)<line_sep>embedding_matrix=torch.rand(10 15)<line_sep>x2=torch.randint(7 (2 ))<line_sep>w2=torch.randn(2 )<line_sep>embedding_matrix2=torch.rand(12 25)<line_sep>offsets2=torch.tensor([0 ] dtype=torch.long)<line_sep>self.run_test(model (embedding_matrix x w offsets) test_with_inputs=[(embedding_matrix2 x2 w2 offsets2)] input_names=['embedding_matrix' 'x' 'offsets' 'w'] dynamic_axes={'embedding_matrix':[0 1] 'x':[0] 'offsets':[0] 'w':[0]})<class_stmt>EmbeddingModel2D(torch.nn.Module)<block_start><def_stmt>forward self embedding_matrix input weights<block_start><return>torch.nn.functional.embedding_bag(input embedding_matrix mode='sum' per_sample_weights=weights)<block_end><block_end>model=EmbeddingModel2D()<line_sep>x=torch.randint(7 (2 3))<line_sep>w=torch.randn(2 3)<line_sep>embedding_matrix=torch.rand(10 15)<line_sep>x2=torch.randint(7 (3 5))<line_sep>w2=torch.randn(3 5)<line_sep>embedding_matrix2=torch.rand(12 25)<line_sep>self.run_test(model (embedding_matrix x w) test_with_inputs=[(embedding_matrix2 x2 w2)] input_names=['embedding_matrix' 'x' 'w'] dynamic_axes={'embedding_matrix':[0 1] 'x':[0 1] 'w':[0 1]})<block_end>@skipIfUnsupportedMinOpsetVersion(8)<def_stmt>test_meshgrid self<block_start><class_stmt>Meshgrid(torch.nn.Module)<block_start><def_stmt>forward self x y z<block_start>output1,output2,output3=torch.meshgrid(x y z)<line_sep><return>output1 output2 output3<block_end><block_end>x=torch.randn(3 requires_grad=<true>)<line_sep>y=torch.zeros(4 requires_grad=<true>)<line_sep>z=torch.randn(5 requires_grad=<true>)<line_sep>self.run_test(Meshgrid() (x y z))<block_end>@skipIfUnsupportedMinOpsetVersion(8)<def_stmt>test_meshgrid_scalar self<block_start><class_stmt>Meshgrid(torch.nn.Module)<block_start><def_stmt>forward self x y z<block_start>output1,output2,output3=torch.meshgrid(x y z)<line_sep><return>output1 output2 output3<block_end><block_end>x=torch.ones(3 requires_grad=<true>)<line_sep>y=torch.zeros(4 requires_grad=<true>)<line_sep>z=torch.tensor(2.0)<line_sep>self.run_test(Meshgrid() (x y z))<block_end><def_stmt>test_baddbmm self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self input batch1 batch2<block_start><return>torch.baddbmm(input batch1 batch2 alpha=torch.tensor(5) beta=3.5)<block_end><block_end>x=torch.randn(10 3 5)<line_sep>batch1=torch.randn(10 3 4)<line_sep>batch2=torch.randn(10 4 5)<line_sep>model=MyModule()<line_sep>self.run_test(model (x batch1 batch2))<block_end><def_stmt>test_baddbmm_dynamic self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self input batch1 batch2 alpha beta<block_start><return>torch.baddbmm(input batch1 batch2 alpha=alpha beta=beta)<block_end><block_end>x=torch.randn(10 3 5)<line_sep>batch1=torch.randn(10 3 4)<line_sep>batch2=torch.randn(10 4 5)<line_sep>alpha=torch.tensor(5)<line_sep>beta=torch.tensor(3.5)<line_sep>model=MyModule()<line_sep>self.run_test(model (x batch1 batch2 alpha beta))<block_end><def_stmt>test_numel self<block_start><class_stmt>MyModule(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>input.numel()<times>input<block_end><block_end>x=torch.randn(2 3 5)<line_sep>model=MyModule()<line_sep>self.run_test(model (x ))<block_end><def_stmt>test_numel_empty self<block_start><class_stmt>MyModule(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start><return>input.numel()<times>input<block_end><block_end>x=torch.randn(0)<line_sep>model=MyModule()<line_sep>self.run_test(model (x ))<block_end><def_stmt>test_cast_to self<block_start><class_stmt>MyModule(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input other<block_start><return>input.to(other)+other<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.tensor([1] dtype=torch.int64)<line_sep>model=MyModule()<line_sep>self.run_test(model (x y))<block_end><def_stmt>test_cast_to_bool self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.cat((input.to(other) other) 0)<block_end><block_end>x=torch.randn(2 3 4)<line_sep>y=torch.zeros([2 3 4] dtype=torch.bool)<line_sep>model=MyModule()<line_sep>self.run_test(model (x y))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_ones_bool self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>true=torch.ones(input.shape dtype=torch.bool)<line_sep><return>input.to(true)&true<block_end><block_end>x=torch.randn(2 3 4)<line_sep>model=MyModule()<line_sep>self.run_test(model x)<block_end><def_stmt>test_log self<block_start><class_stmt>Log(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.log(input)<block_end><block_end>x=torch.rand(2 3 4)<line_sep>model=Log()<line_sep>self.run_test(model x)<block_end><def_stmt>test_log1p self<block_start><class_stmt>Log1p(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.log1p(input)<block_end><block_end>x=torch.rand(2 3 4)<line_sep>model=Log1p()<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_round self<block_start><class_stmt>Round(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.round(x)<block_end><block_end>x=torch.tensor([0.9920 -1.0362 -1.5000 3.5000] requires_grad=<true>)<line_sep>self.run_test(Round() x)<block_end><def_stmt>test_constant_pad self<block_start>model=torch.nn.ConstantPad1d(2 3.5)<line_sep>x=torch.randn(2 4 4)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.ConstantPad2d((3 0 2 1) 3.5)<line_sep>x=torch.randn(2 2 4 4)<line_sep>self.run_test(model x)<block_end># Dynamic padding is added in opset 11
@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()# Functional module not scriptable
<def_stmt>test_pad_types self# Test for different pad integer types
<block_start><class_stmt>Pad(torch.nn.Module)<block_start><def_stmt>forward self x pad<block_start><return>torch.nn.functional.pad(x pad)<block_end><block_end>x=torch.randn(2 2 4 4)<line_sep>y=pad=(torch.tensor(2 dtype=torch.int32) torch.tensor(4 dtype=torch.int32))<line_sep>self.run_test(Pad() (x y))<line_sep>y=pad=(torch.tensor(2 dtype=torch.int64) torch.tensor(4 dtype=torch.int64))<line_sep>self.run_test(Pad() (x y))<block_end>@skipIfUnsupportedMaxOpsetVersion(10)<def_stmt>test_unsupported_pad self<block_start><class_stmt>Pad(torch.nn.Module)<block_start><def_stmt>forward self x pad<block_start><return>torch.nn.functional.pad(x pad)<block_end><block_end><def_stmt>run <block_start>x=torch.randn(2 2 4 4)<line_sep>y=pad=(torch.tensor(2 dtype=torch.int32) torch.tensor(4 dtype=torch.int32))<line_sep>p=Pad()<line_sep>f=io.BytesIO()<line_sep>torch.onnx._export(p (x y) f)<block_end><with_stmt>self.assertRaises(RuntimeError)<as>cm<block_start>run()<block_end>the_exception=cm.exception<line_sep>self.assertEqual('Unsupported: ONNX export of Pad in opset 9. The sizes of the padding must be constant. '+'Please try opset version 11.' the_exception.args[0])<block_end>@disableScriptTest()# export prim::Uninitialized
<def_stmt>test_reflection_pad self<block_start>model=torch.nn.ReflectionPad1d(2)<line_sep>x=torch.randn(2 4 4)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.ReflectionPad2d((3 0 2 1))<line_sep>x=torch.randn(2 2 4 4)<line_sep>self.run_test(model x)<block_end>@disableScriptTest()# export prim::Uninitialized
<def_stmt>test_replication_pad self<block_start>model=torch.nn.ReplicationPad1d(2)<line_sep>x=torch.randn(2 4 4)<line_sep>self.run_test(model x)<line_sep>model=torch.nn.ReplicationPad2d((3 0 2 1))<line_sep>x=torch.randn(2 2 4 4)<line_sep>self.run_test(model x)<block_end>@skipIfUnsupportedMinOpsetVersion(11)@disableScriptTest()# export prim::Uninitialized
<def_stmt>test_im2col self<block_start><class_stmt>Unfold(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.nn.functional.unfold(input kernel_size=(10 15) dilation=2 padding=5 stride=3) torch.nn.functional.unfold(input kernel_size=(2 2) dilation=1 padding=0 stride=3) torch.nn.functional.unfold(input kernel_size=(1 1) dilation=5 padding=2 stride=3)<block_end><block_end>x=torch.rand(1 1 200 100)<line_sep>self.run_test(Unfold() x)<block_end>@skipIfNoLapack@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_det self<block_start><class_stmt>Det(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.det(x)<block_end><block_end>x=torch.randn(2 3 5 5)<line_sep>self.run_test(Det() x)<block_end># This test checks output scalar type in the ONNX graph should not be null
# https://github.com/pytorch/pytorch/issues/28607
@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_trace_script self<block_start>@torch.jit.script<def_stmt>center_slice_helper input h_offset<block_start><return>input[: h_offset:]<block_end><class_stmt>CenterCrop(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>center_slice_helper(input torch.tensor(input.shape[1]-1))<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(CenterCrop() x)<block_end>@skipIfNoLapack@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_logdet self<block_start><class_stmt>LogDet(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.logdet(x)<block_end><block_end>x=torch.randn(2 3 5 5)<line_sep>self.run_test(LogDet() x)<block_end><def_stmt>test_dim self<block_start><class_stmt>DimModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start>out=input<times>2<line_sep>out<augmul>out.dim()<line_sep><return>out<block_end><block_end>empty_input=torch.randn(0 requires_grad=<true>)<line_sep>multi_dim_input=torch.randn(1 2 3 requires_grad=<true>)<line_sep>self.run_test(DimModel() empty_input)<line_sep>self.run_test(DimModel() multi_dim_input)<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# variable number of inputs not scriptable
<def_stmt>test_einsum self<block_start><class_stmt>EinsumModelBatchDiagonal(torch.nn.Module)<block_start><def_stmt>forward self *tensor_list<block_start>eqn='...ii ->...i'<line_sep><return>torch.einsum(eqn *tensor_list)<block_end><block_end>x=torch.randn(3 5 5)<line_sep>self.run_test(EinsumModelBatchDiagonal() input=(x ))<class_stmt>EinsumModelBatchMatmul(torch.nn.Module)<block_start><def_stmt>forward self *tensor_list<block_start>eqn='bij, bjk -> bik'<line_sep><return>torch.einsum(eqn *tensor_list)<block_end><block_end>x=torch.randn(5 2 3)<line_sep>y=torch.randn(5 3 4)<line_sep>self.run_test(EinsumModelBatchMatmul() input=(x y))<class_stmt>EinsumModelInnerProd(torch.nn.Module)<block_start><def_stmt>forward self *tensor_list<block_start>eqn='i,i'<line_sep><return>torch.einsum(eqn *tensor_list)<block_end><block_end>x=torch.randn(5)<line_sep>y=torch.randn(5)<line_sep>self.run_test(EinsumModelInnerProd() input=(x y))<class_stmt>EinsumModelTranspose(torch.nn.Module)<block_start><def_stmt>forward self *tensor_list<block_start>eqn='ij->ji'<line_sep><return>torch.einsum(eqn *tensor_list)<block_end><block_end>x=torch.randn(3 4)<line_sep>self.run_test(EinsumModelTranspose() input=(x ))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_crossentropyloss self<block_start><for_stmt>ignore_index [-100 1]<block_start>x=torch.randn(3 5)<line_sep>y=torch.empty(3 dtype=torch.long).random_(5)<line_sep>y[y<eq>1]=ignore_index<line_sep>self._crossentropyloss(x y ignore_index)<line_sep>x=torch.randn(3 5 2)<line_sep>y=torch.empty(3 2 dtype=torch.long).random_(5)<line_sep>y[y<eq>1]=ignore_index<line_sep>self._crossentropyloss(x y ignore_index)<line_sep>x=torch.randn(3 5 2 7)<line_sep>y=torch.empty(3 2 7 dtype=torch.long).random_(5)<line_sep>y[y<eq>1]=ignore_index<line_sep>self._crossentropyloss(x y ignore_index)<block_end><block_end><def_stmt>_crossentropyloss self x y ignore_index<block_start><class_stmt>CrossEntropyLossNone(torch.nn.Module)<block_start><def_stmt>__init__ self ignore_index<block_start>super(CrossEntropyLossNone self).__init__()<if_stmt>ignore_index<eq>-100<block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='none')<block_end><else_stmt><block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='none' ignore_index=ignore_index)<block_end><block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(CrossEntropyLossNone(ignore_index) input=(x y))<class_stmt>CrossEntropyLossNoneWeight(torch.nn.Module)<block_start><def_stmt>__init__ self ignore_index<block_start>super(CrossEntropyLossNoneWeight self).__init__()<if_stmt>ignore_index<eq>-100<block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='none' weight=torch.randn(5))<block_end><else_stmt><block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='none' weight=torch.randn(5) ignore_index=ignore_index)<block_end><block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(CrossEntropyLossNoneWeight(ignore_index) input=(x y))<class_stmt>CrossEntropyLossSum(torch.nn.Module)<block_start><def_stmt>__init__ self ignore_index<block_start>super(CrossEntropyLossSum self).__init__()<if_stmt>ignore_index<eq>-100<block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='sum')<block_end><else_stmt><block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='sum' ignore_index=ignore_index)<block_end><block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(CrossEntropyLossSum(ignore_index) input=(x y))<class_stmt>CrossEntropyLossSumWeight(torch.nn.Module)<block_start><def_stmt>__init__ self ignore_index<block_start>super(CrossEntropyLossSumWeight self).__init__()<if_stmt>ignore_index<eq>-100<block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='sum' weight=torch.randn(5))<block_end><else_stmt><block_start>self.loss=torch.nn.CrossEntropyLoss(reduction='sum' weight=torch.randn(5) ignore_index=ignore_index)<block_end><block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(CrossEntropyLossSumWeight(ignore_index) input=(x y))<class_stmt>CrossEntropyLossMean(torch.nn.Module)<block_start><def_stmt>__init__ self ignore_index<block_start>super(CrossEntropyLossMean self).__init__()<if_stmt>ignore_index<eq>-100<block_start>self.loss=torch.nn.CrossEntropyLoss()<block_end><else_stmt><block_start>self.loss=torch.nn.CrossEntropyLoss(ignore_index=ignore_index)<block_end><block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(CrossEntropyLossMean(ignore_index) input=(x y))<class_stmt>CrossEntropyLossMeanWeight(torch.nn.Module)<block_start><def_stmt>__init__ self ignore_index<block_start>super(CrossEntropyLossMeanWeight self).__init__()<if_stmt>ignore_index<eq>-100<block_start>self.loss=torch.nn.CrossEntropyLoss(weight=torch.randn(5))<block_end><else_stmt><block_start>self.loss=torch.nn.CrossEntropyLoss(weight=torch.randn(5) ignore_index=ignore_index)<block_end><block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(CrossEntropyLossMeanWeight(ignore_index) input=(x y))<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# Output dtype mismatch
<def_stmt>test_kldiv_loss self<block_start>x=torch.randn(5)<line_sep>y=torch.randn(5)<line_sep>self._kldiv_loss(x y)<line_sep>x=torch.randn(2 3 5)<line_sep>y=torch.randn(2 3 5)<line_sep>self._kldiv_loss(x y)<line_sep>x=torch.randn(2 3 5 7)<line_sep>y=torch.randn(2 3 5 7)<line_sep>self._kldiv_loss(x y)<block_end><def_stmt>_kldiv_loss self x y<block_start><class_stmt>KLDivLossNone(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(KLDivLossNone self).__init__()<line_sep>self.loss=torch.nn.KLDivLoss(reduction='none' log_target=<true>)<block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(KLDivLossNone() input=(x y))<class_stmt>KLDivLossMean(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(KLDivLossMean self).__init__()<line_sep>self.loss=torch.nn.KLDivLoss(reduction='mean' log_target=<false>)<block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(KLDivLossMean() input=(x y))<class_stmt>KLDivLossSum(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(KLDivLossSum self).__init__()<line_sep>self.loss=torch.nn.KLDivLoss(reduction='sum' log_target=<true>)<block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(KLDivLossSum() input=(x y))<class_stmt>KLDivLossBatchMean(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(KLDivLossBatchMean self).__init__()<line_sep>self.loss=torch.nn.KLDivLoss(reduction='batchmean' log_target=<false>)<block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(KLDivLossBatchMean() input=(x y))<class_stmt>KLDivLossMiniBatchMean(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(KLDivLossMiniBatchMean self).__init__()<line_sep>self.loss=torch.nn.KLDivLoss(reduction='batchmean' size_average=<false> log_target=<true>)<block_end><def_stmt>forward self input target<block_start><return>self.loss(input target)<block_end><block_end>self.run_test(KLDivLossMiniBatchMean() input=(x y))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='none')<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(2<times>input) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16)<line_sep>target=torch.empty(N dtype=torch.long).random_(0 C)<line_sep># using test data containing default ignore_index=-100
target[target<eq>1]=-100<line_sep>self.run_test(NLLModel() (input target))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss_2d_none self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='none')<line_sep>self.conv=torch.nn.Conv2d(16 C (3 3))<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(self.conv(input)) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16 10 10)<line_sep>target=torch.empty(N 8 8 dtype=torch.long).random_(0 C)<line_sep># using test data containing default ignore_index=-100
target[target<eq>1]=-100<line_sep>self.run_test(NLLModel() (input target))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss_2d_mean self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='mean')<line_sep>self.conv=torch.nn.Conv2d(16 C (3 3))<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(self.conv(input)) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16 10 10)<line_sep>target=torch.empty(N 8 8 dtype=torch.long).random_(0 C)<line_sep># using test data containing default ignore_index=-100
target[target<eq>1]=-100<line_sep>self.run_test(NLLModel() (input target))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss_2d_sum self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='sum')<line_sep>self.conv=torch.nn.Conv2d(16 C (3 3))<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(self.conv(input)) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16 10 10)<line_sep>target=torch.empty(N 8 8 dtype=torch.long).random_(0 C)<line_sep># using test data containing default ignore_index=-100
target[target<eq>1]=-100<line_sep>self.run_test(NLLModel() (input target))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss_2d_mean_weights self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='mean' weight=torch.randn(C))<line_sep>self.conv=torch.nn.Conv2d(16 C (3 3))<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(self.conv(input)) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16 10 10)<line_sep>target=torch.empty(N 8 8 dtype=torch.long).random_(0 C)<line_sep># using test data containing default ignore_index=-100
target[target<eq>1]=-100<line_sep>self.run_test(NLLModel() (input target))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss_2d_mean_ignore_index self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='mean' ignore_index=1)<line_sep>self.conv=torch.nn.Conv2d(16 C (3 3))<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(self.conv(input)) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16 10 10)<line_sep>target=torch.empty(N 8 8 dtype=torch.long).random_(0 C)<line_sep>self.run_test(NLLModel() (input target))<block_end>@skipIfUnsupportedMinOpsetVersion(12)@disableScriptTest()# shape/type inference
<def_stmt>test_nllloss_2d_mean_ignore_index_weights self<block_start><class_stmt>NLLModel(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(NLLModel self).__init__()<line_sep>self.loss=torch.nn.NLLLoss(reduction='mean' weight=torch.randn(C) ignore_index=1)<line_sep>self.conv=torch.nn.Conv2d(16 C (3 3))<line_sep>self.m=torch.nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self input target<block_start>output=self.loss(self.m(self.conv(input)) target)<line_sep><return>output<block_end><block_end>N,C=5 4<line_sep>input=torch.randn(N 16 10 10)<line_sep>target=torch.empty(N 8 8 dtype=torch.long).random_(0 C)<line_sep>self.run_test(NLLModel() (input target))<block_end><def_stmt>test_torch_mm self<block_start><class_stmt>M(torch.nn.Module)<block_start><def_stmt>forward self mat1 mat2<block_start>mm=torch.mm(mat1 mat2)<line_sep><return>mm<block_end><block_end>mat1=torch.randn(2 3)<line_sep>mat2=torch.randn(3 3)<line_sep>self.run_test(M() input=(mat1 mat2))<block_end>@skipIfUnsupportedMinOpsetVersion(9)# Because where op is not supported for opset < 9.
<def_stmt>test_where_with_bool_tensor self<block_start><class_stmt>M(torch.nn.Module)<block_start><def_stmt>forward self mat1 mat2<block_start>out=torch.where(mat1<g>0 mat1 mat2)<line_sep><return>out<block_end><block_end>mat1=torch.randn(2 3)<line_sep>mat2=torch.ones(2 3)<line_sep>self.run_test(M() input=(mat1 mat2))<block_end>@skipIfUnsupportedMinOpsetVersion(9)# Because where op is not supported for opset < 9.
<def_stmt>test_where_with_byte_tensor self<block_start><class_stmt>M(torch.nn.Module)<block_start><def_stmt>forward self cond mat1 mat2<block_start>out=torch.where(cond mat1 mat2)<line_sep><return>out<block_end><block_end>cond=torch.ones(2 3 dtype=torch.uint8)<line_sep>cond[1 2]=0<line_sep>mat1=torch.randn(2 3)<line_sep>mat2=torch.ones(2 3)<line_sep>self.run_test(M() input=(cond mat1 mat2))<block_end><def_stmt>test_dropout self<block_start><class_stmt>M(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(M self).__init__()<line_sep>self.dropout=torch.nn.Dropout(0.3)<block_end><def_stmt>forward self x<block_start>dropout=self.dropout(x)<line_sep><return>dropout<block_end><block_end>x=torch.randn(10 3 53)<line_sep>self.run_test(M() (x))<block_end><def_stmt>test_shape_constant_fold self<block_start><class_stmt>ShapeModule(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(ShapeModule self).__init__()<line_sep>self.register_buffer("weight" torch.ones(5))<block_end><def_stmt>forward self x<block_start>shape=self.weight.shape[0]<line_sep><return>x+shape<block_end><block_end>x=torch.randn(2 5)<line_sep>self.run_test(ShapeModule() (x ) rtol=1e-3 atol=1e-5)<block_end>@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_celu self<block_start><class_stmt>Celu(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Celu self).__init__()<line_sep>self.celu=torch.nn.CELU(alpha=1.0)<block_end><def_stmt>forward self input<block_start><return>self.celu(input)<block_end><block_end>input=torch.randn(2)<line_sep>self.run_test(Celu() (input ))<block_end>@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_celu_default self<block_start><class_stmt>Celu(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Celu self).__init__()<line_sep>self.celu=torch.nn.CELU()<block_end><def_stmt>forward self input<block_start><return>self.celu(input)<block_end><block_end>input=torch.randn(2)<line_sep>self.run_test(Celu() (input ))<block_end>@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_celu_alpha self<block_start><class_stmt>Celu(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Celu self).__init__()<line_sep>self.celu=torch.nn.CELU(alpha=2.)<block_end><def_stmt>forward self input<block_start><return>self.celu(input)<block_end><block_end>input=torch.randn(2)<line_sep>self.run_test(Celu() (input ))<block_end>@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_celu_cast self<block_start><class_stmt>Celu(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Celu self).__init__()<line_sep>self.celu=torch.nn.CELU()<block_end><def_stmt>forward self input<block_start><return>self.celu(input)<block_end><block_end>input=torch.randn(2 5 7 dtype=torch.float64)<line_sep>self.run_test(Celu() (input ))<block_end>@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>test_where self<block_start><class_stmt>Model(torch.nn.Module)<block_start><def_stmt>forward self cond input other<block_start><return>torch.where(cond input other)<block_end><block_end>x=torch.randint(0 1 (2 3 4) dtype=torch.bool)<line_sep>y=torch.randn(2 1 4)<line_sep>z=torch.ones(2 3 1)<line_sep>self.run_test(Model() (x y z))<block_end>@skipIfUnsupportedMinOpsetVersion(9)@disableScriptTest()# symbolic update needed for unbind: ONNX export of unbind with dynamic number of outputs
<def_stmt>test_where_condition self<block_start><class_stmt>Model1(torch.nn.Module)<block_start><def_stmt>forward self input<block_start><return>torch.stack(torch.where(input<g>0.5) dim=1)<block_end><block_end>x=torch.randint(0 2 (2 3 4) dtype=bool)<line_sep>self.run_test(Model1() (x))<class_stmt>Model2(torch.nn.Module)<block_start><def_stmt>forward self input other<block_start><return>torch.stack(torch.where(input<g>other) dim=1)<block_end><block_end>x=torch.randint(0 1 (2 3 4) dtype=bool)<line_sep>y=torch.randint(1 2 (2 3 4) dtype=bool)<line_sep>self.run_test(Model2() (x y))<block_end><def_stmt>test_empty_branch self<block_start><class_stmt>EmptyBranchModel(torch.jit.ScriptModule)<block_start>@torch.jit.script_method<def_stmt>forward self input<block_start>out=input+1<if_stmt>out.dim()<g>2<block_start><if_stmt>out.dim()<g>3<block_start>out<augadd>3<block_end><else_stmt><block_start><pass><block_end><block_end><else_stmt><block_start><pass><block_end><return>out<block_end><block_end>x=torch.randn(1 2 3 requires_grad=<true>)<line_sep>self.run_test(EmptyBranchModel() x)<block_end>@skipIfONNXShapeInference(<false>)@skipIfUnsupportedMinOpsetVersion(11)<def_stmt>test_if_transpose self<block_start><class_stmt>IfModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start>x=x.transpose(0 1)<if_stmt>x.size(0)<eq>2<block_start><return>x.transpose(0 1)<block_end><else_stmt><block_start><return>x<block_end><block_end><block_end>x=torch.randn(2 3)<line_sep>self.run_test(torch.jit.script(IfModel()) x output_names=['output_1'] dynamic_axes={'output_1':[0 1]})<block_end><def_stmt>test_onnx_proto_checker self<block_start><class_stmt>Model(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Model self).__init__()<block_end><def_stmt>forward self x<block_start><return>2<times>x<block_end><block_end>x=torch.randn(1 2 3 requires_grad=<true>)<line_sep>f=io.BytesIO()<line_sep>torch.onnx._export(Model() x f)<line_sep>model=onnx.load(f)<line_sep>model.ir_version=0<def_stmt>check_proto <block_start>torch._C._check_onnx_proto(model.SerializeToString())<block_end>self.assertRaises(RuntimeError check_proto)<block_end>@disableScriptTest()# dtype mismatch
<def_stmt>test_split_tensor_scalar self<block_start><class_stmt>SplitModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.split(x x.size(1))<block_end><block_end>x=torch.randn(1 2 3 requires_grad=<true>)<line_sep>self.run_test(SplitModel() x)<block_end><def_stmt>test_split_tensor_multi self<block_start><class_stmt>SplitModel(torch.nn.Module)<block_start><def_stmt>forward self x<block_start><return>torch.split(x torch.ones(3))<block_end><block_end>x=torch.randn(1 2 3 requires_grad=<true>)<def_stmt>run_model <block_start>SplitModel(x)<block_end>self.assertRaises(TypeError run_model)<block_end><def_stmt>_dispatch_rnn_test self name *args **kwargs<block_start><if_stmt>name<eq>'elman'<block_start>self._elman_rnn_test(*args **kwargs)<block_end><if_stmt>name<eq>'lstm'<block_start>self._lstm_test(*args **kwargs)<block_end><if_stmt>name<eq>'gru'<block_start>self._gru_test(*args **kwargs)<block_end><block_end><def_stmt>_elman_rnn_test self layers nonlinearity bidirectional initial_state packed_sequence dropout<block_start>batch_first=<true><if>packed_sequence<eq>2<else><false><line_sep>model=torch.nn.RNN(RNN_INPUT_SIZE RNN_HIDDEN_SIZE layers nonlinearity=nonlinearity bidirectional=bidirectional dropout=dropout batch_first=batch_first)<if_stmt>packed_sequence<eq>1<block_start>model=RnnModelWithPackedSequence(model <false>)<block_end><if_stmt>packed_sequence<eq>2<block_start>model=RnnModelWithPackedSequence(model <true>)<block_end><def_stmt>make_input batch_size<block_start>seq_lengths=np.random.randint(1 RNN_SEQUENCE_LENGTH+1 size=batch_size)<line_sep>seq_lengths=list(reversed(sorted(map(int seq_lengths))))<line_sep>inputs=[torch.randn(l RNN_INPUT_SIZE)<for>l seq_lengths]<line_sep>inputs=rnn_utils.pad_sequence(inputs batch_first=batch_first)<line_sep>inputs=[inputs]<line_sep>directions=2<if>bidirectional<else>1<if_stmt>initial_state<block_start>h0=torch.randn(directions<times>layers batch_size RNN_HIDDEN_SIZE)<line_sep>inputs.append(h0)<block_end><if_stmt>packed_sequence<ne>0<block_start>inputs.append(torch.IntTensor(seq_lengths))<block_end><if_stmt>len(inputs)<eq>1<block_start>input=inputs[0]<block_end><else_stmt><block_start>input=tuple(inputs)<block_end><return>input<block_end>input=make_input(RNN_BATCH_SIZE)<line_sep>self.run_test(model input batch_size=RNN_BATCH_SIZE)<line_sep># test that the model still runs with a different batch size
other_input=make_input(RNN_BATCH_SIZE+1)<line_sep>self.run_test(model other_input batch_size=RNN_BATCH_SIZE+1)<block_end><def_stmt>_lstm_test self layers bidirectional initial_state packed_sequence dropout<block_start>batch_first=<true><if>packed_sequence<eq>2<else><false><line_sep>model=LstmFlatteningResult(RNN_INPUT_SIZE RNN_HIDDEN_SIZE layers bidirectional=bidirectional dropout=dropout batch_first=batch_first)<if_stmt>packed_sequence<eq>1<block_start>model=RnnModelWithPackedSequence(model <false>)<block_end><if_stmt>packed_sequence<eq>2<block_start>model=RnnModelWithPackedSequence(model <true>)<block_end><def_stmt>make_input batch_size<block_start>seq_lengths=np.random.randint(1 RNN_SEQUENCE_LENGTH+1 size=batch_size)<line_sep>seq_lengths=list(reversed(sorted(map(int seq_lengths))))<line_sep>inputs=[torch.randn(l RNN_INPUT_SIZE)<for>l seq_lengths]<line_sep>inputs=rnn_utils.pad_sequence(inputs batch_first=batch_first)<line_sep>inputs=[inputs]<line_sep>directions=2<if>bidirectional<else>1<if_stmt>initial_state<block_start>h0=torch.randn(directions<times>layers batch_size RNN_HIDDEN_SIZE)<line_sep>c0=torch.randn(directions<times>layers batch_size RNN_HIDDEN_SIZE)<line_sep>inputs.append((h0 c0))<block_end><if_stmt>packed_sequence<ne>0<block_start>inputs.append(torch.IntTensor(seq_lengths))<block_end><if_stmt>len(inputs)<eq>1<block_start>input=inputs[0]<block_end><else_stmt><block_start>input=tuple(inputs)<block_end><return>input<block_end>input=make_input(RNN_BATCH_SIZE)<line_sep>self.run_test(model input batch_size=RNN_BATCH_SIZE)<line_sep># test that the model still runs with a different batch size
other_input=make_input(RNN_BATCH_SIZE+1)<line_sep>self.run_test(model other_input batch_size=RNN_BATCH_SIZE+1)<block_end><def_stmt>_gru_test self layers bidirectional initial_state packed_sequence dropout<block_start>batch_first=<true><if>packed_sequence<eq>2<else><false><line_sep>model=torch.nn.GRU(RNN_INPUT_SIZE RNN_HIDDEN_SIZE layers bidirectional=bidirectional dropout=dropout batch_first=batch_first)<if_stmt>packed_sequence<eq>1<block_start>model=RnnModelWithPackedSequence(model <false>)<block_end><if_stmt>packed_sequence<eq>2<block_start>model=RnnModelWithPackedSequence(model <true>)<block_end><def_stmt>make_input batch_size<block_start>seq_lengths=np.random.randint(1 RNN_SEQUENCE_LENGTH+1 size=batch_size)<line_sep>seq_lengths=list(reversed(sorted(map(int seq_lengths))))<line_sep>inputs=[torch.randn(l RNN_INPUT_SIZE)<for>l seq_lengths]<line_sep>inputs=rnn_utils.pad_sequence(inputs batch_first=batch_first)<line_sep>inputs=[inputs]<line_sep>directions=2<if>bidirectional<else>1<if_stmt>initial_state<block_start>h0=torch.randn(directions<times>layers batch_size RNN_HIDDEN_SIZE)<line_sep>inputs.append(h0)<block_end><if_stmt>packed_sequence<ne>0<block_start>inputs.append(torch.IntTensor(seq_lengths))<block_end><if_stmt>len(inputs)<eq>1<block_start>input=inputs[0]<block_end><else_stmt><block_start>input=tuple(inputs)<block_end><return>input<block_end>input=make_input(RNN_BATCH_SIZE)<line_sep>self.run_test(model input batch_size=RNN_BATCH_SIZE)<line_sep># test that the model still runs with a different batch size
other_input=make_input(RNN_BATCH_SIZE+1)<line_sep>self.run_test(model other_input batch_size=RNN_BATCH_SIZE+1)<block_end>@skipIfUnsupportedMinOpsetVersion(10)<def_stmt>test_fake_quantize_per_tensor self<block_start><class_stmt>FakeQuantizePerTensorModel(torch.nn.Module)<block_start><def_stmt>forward self input<block_start>scale=1./127<line_sep>zero_point=0<line_sep>quant_min=-128<line_sep>quant_max=127<line_sep><return>torch.fake_quantize_per_tensor_affine(input scale zero_point quant_min quant_max)<block_end><block_end>x=torch.randn(6 4 3 3)<line_sep>self.run_test(FakeQuantizePerTensorModel() (x))<block_end>@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_dropout_training self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(MyModule self).__init__()<line_sep>self.dropout=torch.nn.Dropout(0.4)<block_end><def_stmt>forward self x<block_start>dropout=self.dropout(x)<line_sep><return>dropout<block_end><block_end>model=MyModule()<line_sep>x=torch.randn(10)<line_sep>model.train()<line_sep>ort_sess=convert_to_onnx(model input=(x ) opset_version=self.opset_version training=torch.onnx.TrainingMode.TRAINING)<line_sep>ort_outs=run_ort(ort_sess input=(x ))<assert_stmt><not>torch.all(torch.eq(x torch.from_numpy(ort_outs[0])))<block_end>@skipIfUnsupportedMinOpsetVersion(12)<def_stmt>test_dropout_training_zero self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(MyModule self).__init__()<line_sep>self.dropout=torch.nn.Dropout(0.5)<block_end><def_stmt>forward self x<block_start>dropout=self.dropout(x)<line_sep><return>dropout<block_end><block_end>model=MyModule()<line_sep># ensure there are no zeros in the input
x=torch.randn(10 3 128 128)<line_sep>y=x.numpy()<line_sep>y_mask=np.where(y<eq>0 1 y)<line_sep>input=torch.from_numpy(y_mask)<line_sep>nb_elements=torch.numel(input)<line_sep>model.train()<line_sep>ort_sess=convert_to_onnx(model input=(x ) opset_version=self.opset_version training=torch.onnx.TrainingMode.TRAINING)<line_sep>ort_outs=run_ort(ort_sess input=(x ))<line_sep>y=model(input)<line_sep>output=y.cpu().numpy()<line_sep>ort_mask=np.where(ort_outs[0]<ne>0 1 0)<line_sep>pyt_mask=np.where(output<ne>0 1 0)<line_sep>ratio_pytorch=np.sum(pyt_mask)/nb_elements<line_sep>ratio_ort=np.sum(ort_mask)/nb_elements<line_sep>np.testing.assert_allclose(ratio_pytorch ratio_ort rtol=0.01 atol=0.01)<block_end><def_stmt>test_conv_bn self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(MyModule self).__init__()<line_sep>self.conv=torch.nn.Conv2d(3 16 kernel_size=1 stride=2 padding=3 bias=<true>)<line_sep>self.bn=torch.nn.BatchNorm2d(16 affine=<true>)<block_end><def_stmt>forward self x<block_start>x=self.conv(x)<line_sep>bn=self.bn(x)<line_sep><return>bn<block_end><block_end>model=MyModule()<line_sep>x=torch.randn(10 3 128 128)<line_sep>ort_sess1=convert_to_onnx(model input=(x ) opset_version=self.opset_version training=torch.onnx.TrainingMode.TRAINING)<line_sep>ort_outs1=run_ort(ort_sess1 input=(x ))<line_sep>ort_sess2=convert_to_onnx(model input=(x ) opset_version=self.opset_version training=torch.onnx.TrainingMode.EVAL)<line_sep>ort_outs2=run_ort(ort_sess2 input=(x ))<line_sep>[np.testing.assert_allclose(ort_out1 ort_out2 atol=1e-7 rtol=0.001)<for>ort_out1,ort_out2 zip(ort_outs1 ort_outs2)]<block_end><def_stmt>test_multiple_conv_bn self<block_start><class_stmt>MyModule(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(MyModule self).__init__()<line_sep>self.conv1=torch.nn.Conv2d(3 64 kernel_size=7 stride=2 padding=3 bias=<false>)<line_sep>self.conv2=torch.nn.Conv2d(64 2 kernel_size=1 stride=1 padding=0 bias=<false>)<line_sep>self.conv3=torch.nn.Conv2d(2 2 kernel_size=3 stride=1 padding=1 bias=<false>)<line_sep>self.bn=torch.nn.BatchNorm2d(64)<line_sep>self.bn2=torch.nn.BatchNorm2d(2)<line_sep>self.relu=torch.nn.ReLU(inplace=<true>)<line_sep>self.maxpool=torch.nn.MaxPool2d(kernel_size=3 stride=2 padding=1)<block_end><def_stmt>forward self x<block_start>x=self.conv1(x)<line_sep>x=self.bn(x)<line_sep>x=self.relu(x)<line_sep>x=self.maxpool(x)<line_sep>x=self.conv2(x)<line_sep>x=self.bn2(x)<line_sep>x=self.relu(x)<line_sep>x=self.conv3(x)<line_sep>x=self.bn2(x)<line_sep>x=self.relu(x)<line_sep><return>x<block_end><block_end>model=MyModule()<line_sep>x=torch.randn(2 3 224 224)<line_sep>ort_sess1=convert_to_onnx(model input=(x ) opset_version=self.opset_version training=torch.onnx.TrainingMode.TRAINING)<line_sep>ort_outs1=run_ort(ort_sess1 input=(x ))<line_sep>ort_sess2=convert_to_onnx(model input=(x ) opset_version=self.opset_version training=torch.onnx.TrainingMode.EVAL)<line_sep>ort_outs2=run_ort(ort_sess2 input=(x ))<line_sep>[np.testing.assert_allclose(ort_out1 ort_out2 atol=1e-7 rtol=0.001)<for>ort_out1,ort_out2 zip(ort_outs1 ort_outs2)]<block_end><block_end><def_stmt>make_test name base layer bidirectional initial_state variable_length dropout **extra_kwargs<block_start>test_name=str('_'.join(['test' name layer[1] bidirectional[1] initial_state[1] variable_length[1] dropout[1]]))<line_sep># Cannot export with older opsets because of 'ConstantFill' op
# ConstantFill was a temp op removed at opset 8. This is no longer supported by onnxruntime
@disableScriptTest()# Test code not scriptable
@skipIfUnsupportedMinOpsetVersion(9)<def_stmt>f self<block_start>self._dispatch_rnn_test(base layers=layer[0] bidirectional=bidirectional[0] initial_state=initial_state[0] packed_sequence=variable_length[0] dropout=dropout[0] **extra_kwargs)<block_end>f.__name__=test_name<line_sep>setattr(TestONNXRuntime f.__name__ f)<block_end><def_stmt>setup_rnn_tests <block_start>layers_opts=[(1 'unilayer') (3 'trilayer')]<line_sep>bidirectional_opts=[(<false> 'forward') (<true> 'bidirectional')]<line_sep>initial_state_opts=[(<true> 'with_initial_state') (<false> 'no_initial_state')]<line_sep>variable_length_opts=[(0 'without_sequence_lengths') (1 'with_variable_length_sequences') (2 'with_batch_first_sequence_lengths')]<line_sep>dropout_opts=[(0.2 'with_dropout') (0.0 'without_dropout')]<line_sep>test_count=0<for_stmt>(layer bidirectional initial_state variable_length dropout) itertools.product(layers_opts bidirectional_opts initial_state_opts variable_length_opts dropout_opts )<block_start><for_stmt>base,name,extra_kwargs (('elman' 'elman_relu' {'nonlinearity':u'relu'}) ('elman' 'elman_tanh' {'nonlinearity':u'tanh'}) ('lstm' 'lstm' {}) ('gru' 'gru' {}))<block_start>make_test(name base layer bidirectional initial_state variable_length dropout **extra_kwargs)<line_sep>test_count<augadd>1<block_end><block_end># sanity check that a representative example does exist
TestONNXRuntime.test_gru_trilayer_forward_with_initial_state_without_sequence_lengths_with_dropout<line_sep># make sure no one accidentally disables all the tests without
# noticing
<if_stmt>test_count<ne>192<block_start><raise>ValueError('Expected 192 tests but found {}'.format(test_count))<block_end><block_end>setup_rnn_tests()<line_sep># opset 7 tests
TestONNXRuntime_opset7=type(str("TestONNXRuntime_opset7") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=7))<line_sep># opset 8 tests
TestONNXRuntime_opset8=type(str("TestONNXRuntime_opset8") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=8))<line_sep># opset 10 tests
TestONNXRuntime_opset10=type(str("TestONNXRuntime_opset10") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=10))<line_sep># opset 11 tests
TestONNXRuntime_opset11=type(str("TestONNXRuntime_opset11") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=11))<line_sep># opset 12 tests
TestONNXRuntime_opset12=type(str("TestONNXRuntime_opset12") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=12))<line_sep># opset 9 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset9_IRv4=type(str("TestONNXRuntime_opset9_IRv4") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ keep_initializers_as_inputs=<false>))<line_sep># opset 10 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset10_IRv4=type(str("TestONNXRuntime_opset10_IRv4") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=10 keep_initializers_as_inputs=<false>))<line_sep># opset 11 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset11_IRv4=type(str("TestONNXRuntime_opset11_IRv4") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=11 keep_initializers_as_inputs=<false>))<line_sep># opset 12 tests, with keep_initializers_as_inputs=False for
# IR version 4 style export.
TestONNXRuntime_opset12_IRv4=type(str("TestONNXRuntime_opset12_IRv4") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=12 keep_initializers_as_inputs=<false>))<line_sep># opset 9 tests, with use_new_jit_passes=True for using new jit API,
# and with keep_initializers_as_inputs=False for IR version 4 style export.
TestONNXRuntime_opset9_IRv4_new_jit_API=type(str("TestONNXRuntime_opset9_IRv4_new_jit_API") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ keep_initializers_as_inputs=<false> use_new_jit_passes=<true> onnx_shape_inference=<true>))<line_sep># opset 12 tests, with use_new_jit_passes=True for using new jit API,
# and keep_initializers_as_inputs=False for IR version 4 style export.
TestONNXRuntime_opset12_IRv4_new_jit_API=type(str("TestONNXRuntime_opset12_IRv4_new_jit_API") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=12 keep_initializers_as_inputs=<false> use_new_jit_passes=<true> onnx_shape_inference=<true>))<line_sep># opset 12 tests, with _onnx_shape_inference=True.
TestONNXRuntime_opset12_onnx_shape_inference=type(str("TestONNXRuntime_opset12_onnx_shape_inference") (unittest.TestCase ) dict(TestONNXRuntime.__dict__ opset_version=12 onnx_shape_inference=<true>))<if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# -*- coding: utf-8 -*-
"""Add feature for allowing access to deprecated webhook endpoints."""<import_from_stmt>django.db migrations<line_sep>FEATURE_ID='allow_deprecated_webhooks'<def_stmt>forward_add_feature apps schema_editor<block_start>Feature=apps.get_model('projects' 'Feature')<line_sep>Feature.objects.create(feature_id=FEATURE_ID default_true=<true> )<block_end><def_stmt>reverse_add_feature apps schema_editor<block_start>Feature=apps.get_model('projects' 'Feature')<line_sep>Feature.objects.filter(feature_id=FEATURE_ID).delete()<block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('projects' '0020_add-api-project-proxy') ]<line_sep>operations=[migrations.RunPython(forward_add_feature reverse_add_feature) ]<block_end> |
# !/usr/bin/python
# -*- coding: utf-8 -*-
# @time : 2019/11/12 16:45
# @author : Mo
# @function:
<import_from_stmt>keras_textclassification train<line_sep>train(graph='TextCNN' # 必填, 算法名, 可选"ALBERT","BERT","XLNET","FASTTEXT","TEXTCNN","CHARCNN",
# "TEXTRNN","RCNN","DCNN","DPCNN","VDCNN","CRNN","DEEPMOJI",
# "SELFATTENTION", "HAN","CAPSULE","TRANSFORMER"
label=17 # 必填, 类别数, 训练集和测试集合必须一样
path_train_data=<none> # 必填, 训练数据文件, csv格式, 必须含'label,ques'头文件, 详见keras_textclassification/data
path_dev_data=<none> # 必填, 测试数据文件, csv格式, 必须含'label,ques'头文件, 详见keras_textclassification/data
rate=1 # 可填, 训练数据选取比例
hyper_parameters=<none>)<line_sep># 可填, json格式, 超参数, 默认embedding为'char','random'
|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for merge_header_definitions module."""<import_stmt>unittest<import_from_stmt>pysam libcbcf<import_from_stmt>apache_beam.testing.test_pipeline TestPipeline<import_from_stmt>apache_beam.testing.util assert_that<import_from_stmt>apache_beam.testing.util equal_to<import_from_stmt>apache_beam.transforms Create<import_from_stmt>gcp_variant_transforms.beam_io vcf_header_io<import_from_stmt>gcp_variant_transforms.transforms merge_header_definitions<import_from_stmt>gcp_variant_transforms.libs.vcf_header_definitions_merger Definition<import_from_stmt>gcp_variant_transforms.libs.vcf_header_definitions_merger VcfHeaderDefinitions<class_stmt>MergeHeadersTest(unittest.TestCase)<block_start><def_stmt>_get_header_from_lines self lines file_path<block_start>header=libcbcf.VariantHeader()<for_stmt>line lines[:-1]<block_start>header.add_line(line)<block_end><return>vcf_header_io.VcfHeader(infos=header.info filters=header.filters alts=header.alts formats=header.formats contigs=header.contigs file_path=file_path)<block_end><def_stmt>test_merge_header_definitions_one_header self<block_start>lines=['##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']<line_sep>headers=self._get_header_from_lines(lines 'file1')<line_sep>pipeline=TestPipeline()<line_sep>merged_definitions=(pipeline|Create([headers])|'MergeDefinitions'<rshift>merge_header_definitions.MergeDefinitions())<line_sep>expected=VcfHeaderDefinitions()<line_sep>expected._infos={'NS':{Definition(1 'Integer'):['file1']}}<line_sep>assert_that(merged_definitions equal_to([expected]))<line_sep>pipeline.run()<block_end><def_stmt>test_merge_header_definitions_two_conflicting_headers self<block_start>lines_1=['##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']<line_sep>lines_2=['##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']<line_sep>headers_1=self._get_header_from_lines(lines_1 'file1')<line_sep>headers_2=self._get_header_from_lines(lines_2 'file2')<line_sep>pipeline=TestPipeline()<line_sep>merged_definitions=(pipeline|Create([headers_1 headers_2])|'MergeDefinitions'<rshift>merge_header_definitions.MergeDefinitions())<line_sep>expected=VcfHeaderDefinitions()<line_sep>expected._infos={'NS':{Definition(1 'Integer'):['file1'] Definition(1 'Float'):['file2']}}<line_sep>assert_that(merged_definitions equal_to([expected]))<line_sep>pipeline.run()<block_end><def_stmt>test_merge_header_definitions_no_conflicting_headers self<block_start>lines_1=['##FORMAT=<ID=NS,Number=1,Type=Float,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']<line_sep>lines_2=['##FORMAT=<ID=DP,Number=2,Type=Float,Description="Total Depth">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']<line_sep>headers_1=self._get_header_from_lines(lines_1 'file1')<line_sep>headers_2=self._get_header_from_lines(lines_2 'file2')<line_sep>pipeline=TestPipeline()<line_sep>merged_definitions=(pipeline|Create([headers_1 headers_2])|'MergeDefinitions'<rshift>merge_header_definitions.MergeDefinitions())<line_sep>expected=VcfHeaderDefinitions()<line_sep>expected._formats={'NS':{Definition(1 'Float'):['file1']} 'DP':{Definition(2 'Float'):['file2']}}<line_sep>assert_that(merged_definitions equal_to([expected]))<line_sep>pipeline.run()<block_end><def_stmt>test_merge_header_definitions_same_id_in_info_and_format_headers self<block_start>lines_1=['##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']<line_sep>lines_2=['##FORMAT=<ID=NS,Number=1,Type=Float,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']<line_sep>headers_1=self._get_header_from_lines(lines_1 'file1')<line_sep>headers_2=self._get_header_from_lines(lines_2 'file2')<line_sep>pipeline=TestPipeline()<line_sep>merged_definitions=(pipeline|Create([headers_1 headers_2])|'MergeDefinitions'<rshift>merge_header_definitions.MergeDefinitions())<line_sep>expected=VcfHeaderDefinitions()<line_sep>expected._infos={'NS':{Definition(1 'Integer'):['file1']}}<line_sep>expected._formats={'NS':{Definition(1 'Float'):['file2']}}<line_sep>assert_that(merged_definitions equal_to([expected]))<line_sep>pipeline.run()<block_end><def_stmt>test_merge_header_definitions_save_five_copies self<block_start>lines_1=['##INFO=<ID=NS,Number=1,Type=Float,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample1 Sample2\n']<line_sep>lines_2=['##INFO=<ID=NS,Number=1,Type=Integer,Description="Number samples">\n' '#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT Sample3\n']<line_sep>file_names=['file1' 'file2' 'file3' 'file4' 'file5' 'file6']<line_sep>headers=[]<for_stmt>file_name file_names<block_start>headers.append(self._get_header_from_lines(lines_1 file_name))<block_end>headers.append(self._get_header_from_lines(lines_2 'file7'))<line_sep>pipeline=TestPipeline()<line_sep>merged_definitions=(pipeline|Create(headers reshuffle=<false>)|'MergeDefinitions'<rshift>merge_header_definitions.MergeDefinitions())<line_sep>expected=VcfHeaderDefinitions()<line_sep>expected._infos={'NS':{Definition(1 'Float'):['file1' 'file2' 'file3' 'file4' 'file5'] Definition(1 'Integer'):['file7']}}<line_sep>assert_that(merged_definitions equal_to([expected]))<line_sep>pipeline.run()<block_end><block_end> |
<import_stmt>inspect<import_stmt>os<import_stmt>napkin<def_stmt>generate_markdown_file title src_path<block_start>name,_=os.path.splitext(os.path.basename(src_path))<line_sep>src_file=name+'.py'<line_sep>napkin.generate(output_format='plantuml_png' output_dir='../images')<line_sep>text="""# {title}
Following examples are auto-generated by
[demo/{src_file}](demo/{src_file})
""".format(title=title src_file=src_file)<for_stmt>diagram napkin._collected_seq_diagrams<block_start>text<augadd>"""## {name}

```python
{src}
```
""".format(src=inspect.getsource(diagram.sd_func) name=diagram.name image_file=diagram.name.replace(' ' '%20')+'.png')<block_end>md_file='../{}.md'.format(name.upper())<with_stmt>open(md_file 'wt')<as>f<block_start>f.write(text)<block_end>print('MD file generated : {}'.format(md_file))<block_end> |
"""
Represents tests defined in the draft_kings.output.schema module.
Most tests center around serializing / deserializing output objects using the marshmallow library
"""<line_sep> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_stmt>django_extensions.db.fields<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ]<line_sep>operations=[migrations.CreateModel(name="Conference" fields=[("id" models.AutoField(verbose_name="ID" serialize=<false> auto_created=<true> primary_key=<true> ) ) ("created_at" models.DateTimeField(auto_now_add=<true> verbose_name="Created At") ) ("modified_at" models.DateTimeField(auto_now=<true> verbose_name="Last Modified At") ) ("name" models.CharField(max_length=255 verbose_name="Conference Name") ) ("slug" django_extensions.db.fields.AutoSlugField(editable=<false> populate_from=("name" ) max_length=255 blank=<true> unique=<true> ) ) ("description" models.TextField(default="")) ("start_date" models.DateField(verbose_name="Start Date")) ("end_date" models.DateField(verbose_name="End Date")) ("status" models.PositiveSmallIntegerField(verbose_name="Current Status" choices=[(1 b"Accepting Call for Proposals") (2 b"Closed for Proposals") (3 b"Accepting Votes") (4 b"Schedule Published") ] ) ) ("deleted" models.BooleanField(default=<false> verbose_name="Is Deleted?") ) ("created_by" models.ForeignKey(related_name="created_conference_set" verbose_name="Created By" blank=<true> on_delete=models.deletion.CASCADE to=settings.AUTH_USER_MODEL null=<true> ) ) ("modified_by" models.ForeignKey(related_name="updated_conference_set" verbose_name="Modified By" blank=<true> on_delete=models.deletion.CASCADE to=settings.AUTH_USER_MODEL null=<true> ) ) ] options={"abstract":<false>} bases=(models.Model ) ) migrations.CreateModel(name="ConferenceModerator" fields=[("id" models.AutoField(verbose_name="ID" serialize=<false> auto_created=<true> primary_key=<true> ) ) ("created_at" models.DateTimeField(auto_now_add=<true> verbose_name="Created At") ) ("modified_at" models.DateTimeField(auto_now=<true> verbose_name="Last Modified At") ) ("active" models.BooleanField(default=<true> verbose_name="Is Active?") ) ("conference" models.ForeignKey(to="conferences.Conference" on_delete=models.deletion.CASCADE ) ) ("created_by" models.ForeignKey(related_name="created_conferencemoderator_set" verbose_name="Created By" blank=<true> on_delete=models.deletion.CASCADE to=settings.AUTH_USER_MODEL null=<true> ) ) ("moderator" models.ForeignKey(to=settings.AUTH_USER_MODEL on_delete=models.deletion.CASCADE ) ) ("modified_by" models.ForeignKey(related_name="updated_conferencemoderator_set" verbose_name="Modified By" blank=<true> on_delete=models.deletion.CASCADE to=settings.AUTH_USER_MODEL null=<true> ) ) ] options={"abstract":<false>} bases=(models.Model ) ) migrations.CreateModel(name="ConferenceProposalReviewer" fields=[("id" models.AutoField(verbose_name="ID" serialize=<false> auto_created=<true> primary_key=<true> ) ) ("created_at" models.DateTimeField(auto_now_add=<true> verbose_name="Created At") ) ("modified_at" models.DateTimeField(auto_now=<true> verbose_name="Last Modified At") ) ("active" models.BooleanField(default=<true> verbose_name="Is Active?") ) ("conference" models.ForeignKey(to="conferences.Conference" on_delete=models.deletion.CASCADE ) ) ("created_by" models.ForeignKey(related_name="created_conferenceproposalreviewer_set" verbose_name="Created By" blank=<true> on_delete=models.deletion.CASCADE to=settings.AUTH_USER_MODEL null=<true> ) ) ("modified_by" models.ForeignKey(related_name="updated_conferenceproposalreviewer_set" verbose_name="Modified By" blank=<true> on_delete=models.deletion.CASCADE to=settings.AUTH_USER_MODEL null=<true> ) ) ("reviewer" models.ForeignKey(to=settings.AUTH_USER_MODEL on_delete=models.deletion.CASCADE ) ) ] options={} bases=(models.Model ) ) migrations.AlterUniqueTogether(name="conferenceproposalreviewer" unique_together=set([("conference" "reviewer")]) ) ]<block_end> |
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2017 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_future_stmt> absolute_import<import_from_stmt>..types *<import_from_stmt>..util serialize<import_from_stmt>..util deserialize<class_stmt>WebhookStats(object)<block_start>"""Smartsheet WebhookStats data model."""<def_stmt>__init__ self props=<none> base_obj=<none><block_start>"""Initialize the WebhookStats model."""<line_sep>self._base=<none><if_stmt>base_obj<is><not><none><block_start>self._base=base_obj<block_end>self._last_callback_attempt=Timestamp()<line_sep>self._last_callback_attempt_retry_count=Number()<line_sep>self._last_successful_callback=Timestamp()<if_stmt>props<block_start>deserialize(self props)<block_end># requests package Response object
self.request_response=<none><line_sep>self.__initialized=<true><block_end>@property<def_stmt>last_callback_attempt self<block_start><return>self._last_callback_attempt.value<block_end>@last_callback_attempt.setter<def_stmt>last_callback_attempt self value<block_start>self._last_callback_attempt.value=value<block_end>@property<def_stmt>last_callback_attempt_retry_count self<block_start><return>self._last_callback_attempt_retry_count.value<block_end>@last_callback_attempt_retry_count.setter<def_stmt>last_callback_attempt_retry_count self value<block_start>self._last_callback_attempt_retry_count.value=value<block_end>@property<def_stmt>last_successful_callback self<block_start><return>self._last_successful_callback.value<block_end>@last_successful_callback.setter<def_stmt>last_successful_callback self value<block_start>self._last_successful_callback.value=value<block_end><def_stmt>to_dict self<block_start><return>serialize(self)<block_end><def_stmt>to_json self<block_start><return>json.dumps(self.to_dict())<block_end><def_stmt>__str__ self<block_start><return>self.to_json()<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQM.EcalMonitorTasks.TimingTask_cfi ecalTimingTask<import_from_stmt>DQM.EcalMonitorClient.IntegrityClient_cfi ecalIntegrityClient<line_sep>minChannelEntries=1<line_sep>minTowerEntries=3<line_sep>toleranceMean=2.<line_sep>toleranceRMS=6.<line_sep>minChannelEntriesFwd=8<line_sep>minTowerEntriesFwd=24<line_sep>toleranceMeanFwd=6.<line_sep>toleranceRMSFwd=12.<line_sep>tailPopulThreshold=0.4<line_sep>timeWindow=25.<line_sep>ecalTimingClient=cms.untracked.PSet(params=cms.untracked.PSet(minChannelEntries=cms.untracked.int32(minChannelEntries) minTowerEntries=cms.untracked.int32(minTowerEntries) toleranceMean=cms.untracked.double(toleranceMean) toleranceRMS=cms.untracked.double(toleranceRMS) minChannelEntriesFwd=cms.untracked.int32(minChannelEntriesFwd) minTowerEntriesFwd=cms.untracked.int32(minTowerEntriesFwd) toleranceMeanFwd=cms.untracked.double(toleranceMeanFwd) toleranceRMSFwd=cms.untracked.double(toleranceRMSFwd) tailPopulThreshold=cms.untracked.double(tailPopulThreshold)) sources=cms.untracked.PSet(TimeAllMap=ecalTimingTask.MEs.TimeAllMap TimeMap=ecalTimingTask.MEs.TimeMap TimeMapByLS=ecalTimingTask.MEs.TimeMapByLS ChStatus=ecalIntegrityClient.MEs.ChStatus) MEs=cms.untracked.PSet(RMSAll=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing rms 1D summary') kind=cms.untracked.string('TH1F') otype=cms.untracked.string('Ecal3P') xaxis=cms.untracked.PSet(high=cms.untracked.double(10.0) nbins=cms.untracked.int32(100) low=cms.untracked.double(0.0) title=cms.untracked.string('time (ns)')) btype=cms.untracked.string('User') description=cms.untracked.string('Distribution of per-channel timing RMS. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) ProjEta=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing projection eta%(suffix)s') kind=cms.untracked.string('TProfile') yaxis=cms.untracked.PSet(title=cms.untracked.string('time (ns)')) otype=cms.untracked.string('Ecal3P') btype=cms.untracked.string('ProjEta') description=cms.untracked.string('Projection of per-channel mean timing. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) FwdBkwdDiff=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sTimingTask/%(prefix)sTMT timing %(prefix)s+ - %(prefix)s-') kind=cms.untracked.string('TH1F') otype=cms.untracked.string('Ecal2P') xaxis=cms.untracked.PSet(high=cms.untracked.double(5.0) nbins=cms.untracked.int32(100) low=cms.untracked.double(-5.0) title=cms.untracked.string('time (ns)')) btype=cms.untracked.string('User') description=cms.untracked.string('Forward-backward asymmetry of per-channel mean timing. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) FwdvBkwd=cms.untracked.PSet(kind=cms.untracked.string('TH2F') yaxis=cms.untracked.PSet(high=cms.untracked.double(timeWindow) nbins=cms.untracked.int32(50) low=cms.untracked.double(-timeWindow) title=cms.untracked.string('time (ns)')) otype=cms.untracked.string('Ecal2P') xaxis=cms.untracked.PSet(high=cms.untracked.double(timeWindow) nbins=cms.untracked.int32(50) low=cms.untracked.double(-timeWindow)) btype=cms.untracked.string('User') path=cms.untracked.string('%(subdet)s/%(prefix)sTimingTask/%(prefix)sTMT timing %(prefix)s+ vs %(prefix)s-') description=cms.untracked.string('Forward-backward correlation of per-channel mean timing. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) ProjPhi=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing projection phi%(suffix)s') kind=cms.untracked.string('TProfile') yaxis=cms.untracked.PSet(title=cms.untracked.string('time (ns)')) otype=cms.untracked.string('Ecal3P') btype=cms.untracked.string('ProjPhi') description=cms.untracked.string('Projection of per-channel mean timing. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) MeanSM=cms.untracked.PSet(kind=cms.untracked.string('TH1F') yaxis=cms.untracked.PSet(title=cms.untracked.string('time (ns)')) otype=cms.untracked.string('SM') xaxis=cms.untracked.PSet(high=cms.untracked.double(timeWindow) nbins=cms.untracked.int32(100) low=cms.untracked.double(-timeWindow)) btype=cms.untracked.string('User') path=cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing mean %(sm)s') description=cms.untracked.string('Distribution of per-channel timing mean. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) RMSMap=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing rms %(sm)s') kind=cms.untracked.string('TH2F') zaxis=cms.untracked.PSet(title=cms.untracked.string('rms (ns)')) otype=cms.untracked.string('SM') btype=cms.untracked.string('Crystal') description=cms.untracked.string('2D distribution of per-channel timing RMS. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) QualitySummary=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing quality summary') kind=cms.untracked.string('TH2F') otype=cms.untracked.string('Ecal3P') btype=cms.untracked.string('SuperCrystal') description=cms.untracked.string('Summary of the timing data quality. A 5x5 tower is red if the mean timing of the tower is off by more than '+str(toleranceMean)+' or RMS is greater than '+str(toleranceRMS)+' ('+str(toleranceMeanFwd)+' and '+str(toleranceRMSFwd)+' in forward region). Towers with total entries less than '+str(minTowerEntries)+' are not subject to this evaluation. Since 5x5 tower timings are calculated with a tighter time-window than per-channel timings, a tower can additionally become red if its the sum of per-channel timing histogram entries is greater than per-tower histogram entries by factor '+str(1./(1.-tailPopulThreshold))+' (significant fraction of events fall outside the tight time-window).')) Quality=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sTimingClient/%(prefix)sTMT timing quality %(sm)s') kind=cms.untracked.string('TH2F') otype=cms.untracked.string('SM') btype=cms.untracked.string('Crystal') description=cms.untracked.string('Summary of the timing data quality. A channel is red if its mean timing is off by more than '+str(toleranceMean)+' or RMS is greater than '+str(toleranceRMS)+'. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) MeanAll=cms.untracked.PSet(path=cms.untracked.string('%(subdet)s/%(prefix)sSummaryClient/%(prefix)sTMT%(suffix)s timing mean 1D summary') kind=cms.untracked.string('TH1F') otype=cms.untracked.string('Ecal3P') xaxis=cms.untracked.PSet(high=cms.untracked.double(timeWindow) nbins=cms.untracked.int32(100) low=cms.untracked.double(-timeWindow) title=cms.untracked.string('time (ns)')) btype=cms.untracked.string('User') description=cms.untracked.string('Distribution of per-channel timing mean. Channels with entries less than '+str(minChannelEntries)+' are not considered.')) TrendMean=cms.untracked.PSet(path=cms.untracked.string('Ecal/Trends/TimingClient %(prefix)s timing mean') kind=cms.untracked.string('TProfile') otype=cms.untracked.string('Ecal2P') btype=cms.untracked.string('Trend') description=cms.untracked.string('Trend of timing mean. Plots simple average of all channel timing means at each lumisection.')) TrendRMS=cms.untracked.PSet(path=cms.untracked.string('Ecal/Trends/TimingClient %(prefix)s timing rms') kind=cms.untracked.string('TProfile') otype=cms.untracked.string('Ecal2P') btype=cms.untracked.string('Trend') description=cms.untracked.string('Trend of timing rms. Plots simple average of all channel timing rms at each lumisection.'))))<line_sep> |
<import_stmt>macropy.activate<import_stmt>JeevesLib<import_from_stmt>smt.Z3 *<import_stmt>unittest<import_from_stmt>Auction AuctionContext Bid User<import_stmt>JeevesLib<class_stmt>TestAuction(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>JeevesLib.init()<line_sep>self.aliceUser=User(0)<line_sep>self.bobUser=User(1)<line_sep>self.claireUser=User(2)<block_end><def_stmt>testOwnerCanSee self<block_start>policy=<lambda>oc:<false><line_sep>aliceBid=Bid(3 self.aliceUser policy)<line_sep>ctxt0=AuctionContext(self.aliceUser 0 [])<line_sep>self.assertEqual(3 JeevesLib.concretize(ctxt0 aliceBid.value))<line_sep>ctxt1=AuctionContext(self.bobUser 0 [])<line_sep>self.assertEqual(-1 JeevesLib.concretize(ctxt1 aliceBid.value))<block_end><def_stmt>testTimeSensitiveRelease self<block_start>auctionEndTime=10<line_sep>policy=<lambda>oc:oc.time<g>auctionEndTime<line_sep>aliceBid=Bid(3 self.aliceUser policy)<line_sep>self.assertEqual(3 JeevesLib.concretize(AuctionContext(self.bobUser 11 []) aliceBid.value))<line_sep>self.assertEqual(-1 JeevesLib.concretize(AuctionContext(self.bobUser 10 []) aliceBid.value))<block_end><def_stmt>testSealedAuction self# Function that returns true if the context contains a bid from the given
# user.
<block_start><def_stmt>hasBidFromUser ctxt u<block_start><return>JeevesLib.jhasElt(ctxt.bids <lambda>b:b.owner<eq>u)<block_end>allUsers=[self.aliceUser self.bobUser self.claireUser]<line_sep>policy=<lambda>oc:reduce(<lambda>acc c:JeevesLib.jand(<lambda>:hasBidFromUser(oc c) <lambda>:acc) allUsers)<line_sep>aliceBid=Bid(3 self.aliceUser policy)<line_sep>bobBid=Bid(4 self.bobUser policy)<line_sep>claireBid=Bid(5 self.claireUser policy)<line_sep>self.assertEqual(-1 JeevesLib.concretize(AuctionContext(self.bobUser 11 [aliceBid]) aliceBid.value))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
"""Automated logging support for common scientific libraries."""<line_sep> |
"""
Stand alone GUI free index builder for Leo's full text search system::
python leoftsindex.py <file1> <file2> <file3>...
If the file name starts with @ it's a assumed to be a simple
text file listing files to be indexed.
If <file> does not contain '#' it's assumed to be a .leo file
to index, and is indexed.
If <file> does contain '#' it's assumed to be a .leo file
containing a list of .leo files to index, with the list in
the node indicated by the UNL after the #, e.g.::
path/to/myfile.leo#Lists-->List of outlines
In the latter case, if the node identified by the UNL has children,
the list of files to scan is built from the first line of the body
of each child node of the identified node (works well with bookmarks.py).
If the node identified by the UNL does not have children, the
node's body is assumed to be a simple text listing of paths to .leo files).
.. note::
It may be necessary to quote the "file" on the command line,
as the '#' may be interpreted as a comment delimiter::
python leoftsindex.py "workbook.leo#Links"
"""<import_stmt>sys<line_sep># add folder containing 'leo' folder to path
# sys.path.append("/home/tbrown/Package/leo/bzr/leo.repo/trunk")
<import_stmt>leo.core.leoBridge<as>leoBridge<import_stmt>leo.plugins.leofts<as>leofts<line_sep>controller=leoBridge.controller(gui='nullGui' loadPlugins=<false> # True: attempt to load plugins.
readSettings=<false> # True: read standard settings files.
silent=<false> # True: don't print signon messages.
verbose=<false>)<line_sep>g=controller.globals()<line_sep># list of "files" to process
files=sys.argv[1:]<line_sep># set up leofts
leofts.set_leo(g)<line_sep>g._gnxcache=leofts.GnxCache()<line_sep>fts=leofts.get_fts()<line_sep>fn2c={}# cache to avoid loading same outline twice
done=set()# outlines scanned, to avoid repetition repetition
todo=list(files)<while_stmt>todo<block_start>item=todo.pop(0)<line_sep>print("INDEX: %s"%item)<if_stmt>'#'<in>item<block_start>fn,node=item.split('#' 1)<block_end><else_stmt><block_start>fn,node=item <none><block_end><if_stmt>node<block_start>c=fn2c.setdefault(fn controller.openLeoFile(fn))<line_sep>found,dummy,p=g.recursiveUNLSearch(node.split('-->') c)<if_stmt><not>found<block_start>print("Could not find '%s'"%item)<line_sep><break><block_end><if_stmt><not>p<block_start>p=c.p<block_end><if_stmt>p.hasChildren()# use file named in first node of each child
<block_start>files=[chl.b.strip().split('\n' 1)[0].strip()<for>chl p.children()]<block_end><else_stmt># use all files listed in body
<block_start>files=[i.strip()<for>i p.b.strip().split('\n')]<block_end><block_end><elif_stmt>fn.startswith('@')<block_start>todo.extend(open(fn[1:]).read().strip().split('\n'))<line_sep>files=[]<block_end><else_stmt><block_start>files=[fn]<block_end><for_stmt>fn files# file names may still have '#' if taken from a node list
<block_start>real_name=fn.split('#' 1)[0]<if_stmt>real_name<in>done<block_start><continue><block_end>done.add(real_name)<if_stmt>len(files)<ne>1<block_start>print(" FILE: %s"%real_name)<block_end>c=fn2c.setdefault(real_name controller.openLeoFile(fn))<line_sep>fts.drop_document(real_name)<line_sep>fts.index_nodes(c)<block_end><block_end> |
# python3
# pylint: disable=g-bad-file-header
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for bsuite.utils.wrapper."""<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_from_stmt>bsuite environments<import_from_stmt>bsuite.environments catch<import_from_stmt>bsuite.utils wrappers<import_stmt>dm_env<import_from_stmt>dm_env specs<import_from_stmt>dm_env test_utils<import_stmt>mock<import_stmt>numpy<as>np<class_stmt>FakeEnvironment(environments.Environment)<block_start>"""An environment that returns pre-determined rewards and observations."""<def_stmt>__init__ self time_steps<block_start>"""Initializes a new FakeEnvironment.
Args:
time_steps: A sequence of time step namedtuples. This could represent
one episode, or several. This class just repeatedly plays through the
sequence and doesn't inspect the contents.
"""<line_sep>super().__init__()<line_sep>self.bsuite_num_episodes=1000<line_sep>self._time_steps=time_steps<line_sep>obs=np.asarray(self._time_steps[0].observation)<line_sep>self._observation_spec=specs.Array(shape=obs.shape dtype=obs.dtype)<line_sep>self._step_index=0<line_sep>self._reset_next_step=<true><block_end><def_stmt>reset self<block_start>self._reset_next_step=<false><line_sep>self._step_index=0<line_sep><return>self._time_steps[0]<block_end><def_stmt>step self action<block_start><del_stmt>action<if_stmt>self._reset_next_step<block_start><return>self.reset()<block_end>self._step_index<augadd>1<line_sep>self._step_index<augmod>len(self._time_steps)<line_sep><return>self._time_steps[self._step_index]<block_end><def_stmt>_reset self<block_start><raise>NotImplementedError<block_end><def_stmt>_step self action:int<block_start><raise>NotImplementedError<block_end><def_stmt>observation_spec self<block_start><return>self._observation_spec<block_end><def_stmt>action_spec self<block_start><return>specs.Array(shape=() dtype=np.int32)<block_end><def_stmt>bsuite_info self<block_start><return>{}<block_end><block_end><class_stmt>WrapperTest(absltest.TestCase)<block_start><def_stmt>test_wrapper self<block_start>"""Tests that the wrapper computes and logs the correct data."""<line_sep>mock_logger=mock.MagicMock()<line_sep>mock_logger.write=mock.MagicMock()<line_sep># Make a fake environment that cycles through these time steps.
timesteps=[dm_env.restart([]) dm_env.transition(1 []) dm_env.transition(2 []) dm_env.termination(3 []) ]<line_sep>expected_episode_return=6<line_sep>fake_env=FakeEnvironment(timesteps)<line_sep>env=wrappers.Logging(env=fake_env logger=mock_logger log_every=<true>)# pytype: disable=wrong-arg-types
num_episodes=5<for_stmt>_ range(num_episodes)<block_start>timestep=env.reset()<while_stmt><not>timestep.last()<block_start>timestep=env.step(action=0)<block_end><block_end># We count the number of transitions, hence the -1.
expected_episode_length=len(timesteps)-1<line_sep>expected_calls=[]<for_stmt>i range(1 num_episodes+1)<block_start>expected_calls.append(mock.call(dict(steps=expected_episode_length<times>i episode=i total_return=expected_episode_return<times>i episode_len=expected_episode_length episode_return=expected_episode_return )))<block_end>mock_logger.write.assert_has_calls(expected_calls)<block_end><def_stmt>test_unwrap self<block_start>raw_env=FakeEnvironment([dm_env.restart([])])<line_sep>scale_env=wrappers.RewardScale(raw_env reward_scale=1.)<line_sep>noise_env=wrappers.RewardNoise(scale_env noise_scale=1.)<line_sep>logging_env=wrappers.Logging(noise_env logger=<none>)# pytype: disable=wrong-arg-types
unwrapped=logging_env.raw_env<line_sep>self.assertEqual(id(raw_env) id(unwrapped))<block_end><block_end><class_stmt>ImageObservationTest(parameterized.TestCase)<block_start>@parameterized.parameters(((84 84 4) np.array([1 2])) ((70 90) np.array([[1 0 2 3]])) )<def_stmt>test_to_image self shape observation<block_start>image=wrappers.to_image(shape observation)<line_sep>self.assertEqual(image.shape shape)<line_sep>self.assertCountEqual(np.unique(image) np.unique(observation))<block_end><block_end><class_stmt>ImageWrapperCatchTest(test_utils.EnvironmentTestMixin absltest.TestCase)<block_start><def_stmt>make_object_under_test self<block_start>env=catch.Catch()<line_sep><return>wrappers.ImageObservation(env (84 84 4))<block_end><def_stmt>make_action_sequence self<block_start>actions=[0 1 2]<line_sep>rng=np.random.RandomState(42)<for_stmt>_ range(100)<block_start><yield>rng.choice(actions)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/security-key-spaces/problem
# Difficulty: Easy
# Max Score: 10
# Language: Python
# ========================
# Solution
# ========================
num=(input())<line_sep>e=int(input())<line_sep>print(''.join([str((int(i)+e)%10)<for>i num]))<line_sep> |
"""
ShakeDrop-ResNet for CIFAR/SVHN, implemented in Gluon.
Original paper: 'ShakeDrop Regularization for Deep Residual Learning,' https://arxiv.org/abs/1802.02375.
"""<line_sep>__all__=['CIFARShakeDropResNet' 'shakedropresnet20_cifar10' 'shakedropresnet20_cifar100' 'shakedropresnet20_svhn']<import_stmt>os<import_stmt>numpy<as>np<import_stmt>mxnet<as>mx<import_from_stmt>mxnet cpu<import_from_stmt>mxnet.gluon nn HybridBlock<import_from_stmt>.common conv1x1_block conv3x3_block<import_from_stmt>.resnet ResBlock ResBottleneck<class_stmt>ShakeDrop(mx.autograd.Function)<block_start>"""
ShakeDrop function.
Parameters:
----------
p : float
ShakeDrop specific probability (of life) for Bernoulli random variable.
"""<def_stmt>__init__ self p<block_start>super(ShakeDrop self).__init__()<line_sep>self.p=p<block_end><def_stmt>forward self x<block_start><if_stmt>mx.autograd.is_training()<block_start>b=np.random.binomial(n=1 p=self.p)<line_sep>alpha=mx.nd.random.uniform_like(x.slice(begin=(<none> 0 0 0) end=(<none> 1 1 1)) low=-1.0 high=1.0)<line_sep>y=mx.nd.broadcast_mul(b+alpha-b<times>alpha x)<line_sep>self.save_for_backward(b)<block_end><else_stmt><block_start>y=self.p<times>x<block_end><return>y<block_end><def_stmt>backward self dy<block_start>b,=self.saved_tensors<line_sep>beta=mx.nd.random.uniform_like(dy.slice(begin=(<none> 0 0 0) end=(<none> 1 1 1)) low=0.0 high=1.0)<line_sep><return>mx.nd.broadcast_mul(b+beta-b<times>beta dy)<block_end><block_end><class_stmt>ShakeDropResUnit(HybridBlock)<block_start>"""
ShakeDrop-ResNet unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
bn_use_global_stats : bool
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_prob : float
Residual branch life probability.
"""<def_stmt>__init__ self in_channels out_channels strides bn_use_global_stats bottleneck life_prob **kwargs<block_start>super(ShakeDropResUnit self).__init__(**kwargs)<line_sep>self.life_prob=life_prob<line_sep>self.resize_identity=(in_channels<ne>out_channels)<or>(strides<ne>1)<line_sep>body_class=ResBottleneck<if>bottleneck<else>ResBlock<with_stmt>self.name_scope()<block_start>self.body=body_class(in_channels=in_channels out_channels=out_channels strides=strides bn_use_global_stats=bn_use_global_stats)<if_stmt>self.resize_identity<block_start>self.identity_conv=conv1x1_block(in_channels=in_channels out_channels=out_channels strides=strides bn_use_global_stats=bn_use_global_stats activation=<none>)<block_end>self.activ=nn.Activation("relu")<line_sep># self.shake_drop = ShakeDrop(self.life_prob)
<block_end><block_end><def_stmt>hybrid_forward self F x<block_start><if_stmt>self.resize_identity<block_start>identity=self.identity_conv(x)<block_end><else_stmt><block_start>identity=x<block_end>x=self.body(x)<line_sep>x=ShakeDrop(self.life_prob)(x)+identity<line_sep># x = self.shake_drop(x) + identity
x=self.activ(x)<line_sep><return>x<block_end><block_end><class_stmt>CIFARShakeDropResNet(HybridBlock)<block_start>"""
ShakeDrop-ResNet model for CIFAR from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
life_probs : list of float
Residual branch life probability for each unit.
bn_use_global_stats : bool, default False
Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.
Useful for fine-tuning.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (32, 32)
Spatial size of the expected input image.
classes : int, default 10
Number of classification classes.
"""<def_stmt>__init__ self channels init_block_channels bottleneck life_probs bn_use_global_stats=<false> in_channels=3 in_size=(32 32) classes=10 **kwargs<block_start>super(CIFARShakeDropResNet self).__init__(**kwargs)<line_sep>self.in_size=in_size<line_sep>self.classes=classes<with_stmt>self.name_scope()<block_start>self.features=nn.HybridSequential(prefix="")<line_sep>self.features.add(conv3x3_block(in_channels=in_channels out_channels=init_block_channels bn_use_global_stats=bn_use_global_stats))<line_sep>in_channels=init_block_channels<line_sep>k=0<for_stmt>i,channels_per_stage enumerate(channels)<block_start>stage=nn.HybridSequential(prefix="stage{}_".format(i+1))<with_stmt>stage.name_scope()<block_start><for_stmt>j,out_channels enumerate(channels_per_stage)<block_start>strides=2<if>(j<eq>0)<and>(i<ne>0)<else>1<line_sep>stage.add(ShakeDropResUnit(in_channels=in_channels out_channels=out_channels strides=strides bn_use_global_stats=bn_use_global_stats bottleneck=bottleneck life_prob=life_probs[k]))<line_sep>in_channels=out_channels<line_sep>k<augadd>1<block_end><block_end>self.features.add(stage)<block_end>self.features.add(nn.AvgPool2D(pool_size=8 strides=1))<line_sep>self.output=nn.HybridSequential(prefix="")<line_sep>self.output.add(nn.Flatten())<line_sep>self.output.add(nn.Dense(units=classes in_units=in_channels))<block_end><block_end><def_stmt>hybrid_forward self F x<block_start>x=self.features(x)<line_sep>x=self.output(x)<line_sep><return>x<block_end><block_end><def_stmt>get_shakedropresnet_cifar classes blocks bottleneck model_name=<none> pretrained=<false> ctx=cpu() root=os.path.join("~" ".mxnet" "models") **kwargs<block_start>"""
Create ShakeDrop-ResNet model for CIFAR with specific parameters.
Parameters:
----------
classes : int
Number of classification classes.
blocks : int
Number of blocks.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<assert_stmt>(classes<in>[10 100])<if_stmt>bottleneck<block_start><assert_stmt>((blocks-2)%9<eq>0)<line_sep>layers=[(blocks-2)<floordiv>9]<times>3<block_end><else_stmt><block_start><assert_stmt>((blocks-2)%6<eq>0)<line_sep>layers=[(blocks-2)<floordiv>6]<times>3<block_end>init_block_channels=16<line_sep>channels_per_layers=[16 32 64]<line_sep>channels=[[ci]<times>li<for>(ci li) zip(channels_per_layers layers)]<if_stmt>bottleneck<block_start>channels=[[cij<times>4<for>cij ci]<for>ci channels]<block_end>total_layers=sum(layers)<line_sep>final_death_prob=0.5<line_sep>life_probs=[1.0-float(i+1)/float(total_layers)<times>final_death_prob<for>i range(total_layers)]<line_sep>net=CIFARShakeDropResNet(channels=channels init_block_channels=init_block_channels bottleneck=bottleneck life_probs=life_probs classes=classes **kwargs)<if_stmt>pretrained<block_start><if_stmt>(model_name<is><none>)<or>(<not>model_name)<block_start><raise>ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")<block_end><import_from_stmt>.model_store get_model_file<line_sep>net.load_parameters(filename=get_model_file(model_name=model_name local_model_store_dir_path=root) ctx=ctx)<block_end><return>net<block_end><def_stmt>shakedropresnet20_cifar10 classes=10 **kwargs<block_start>"""
ShakeDrop-ResNet-20 model for CIFAR-10 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<line_sep><return>get_shakedropresnet_cifar(classes=classes blocks=20 bottleneck=<false> model_name="shakedropresnet20_cifar10" **kwargs)<block_end><def_stmt>shakedropresnet20_cifar100 classes=100 **kwargs<block_start>"""
ShakeDrop-ResNet-20 model for CIFAR-100 from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 100
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<line_sep><return>get_shakedropresnet_cifar(classes=classes blocks=20 bottleneck=<false> model_name="shakedropresnet20_cifar100" **kwargs)<block_end><def_stmt>shakedropresnet20_svhn classes=10 **kwargs<block_start>"""
ShakeDrop-ResNet-20 model for SVHN from 'ShakeDrop Regularization for Deep Residual Learning,'
https://arxiv.org/abs/1802.02375.
Parameters:
----------
classes : int, default 10
Number of classification classes.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""<line_sep><return>get_shakedropresnet_cifar(classes=classes blocks=20 bottleneck=<false> model_name="shakedropresnet20_svhn" **kwargs)<block_end><def_stmt>_test <block_start><import_stmt>numpy<as>np<import_stmt>mxnet<as>mx<line_sep>pretrained=<false><line_sep>models=[(shakedropresnet20_cifar10 10) (shakedropresnet20_cifar100 100) (shakedropresnet20_svhn 10) ]<for_stmt>model,classes models<block_start>net=model(pretrained=pretrained)<line_sep>ctx=mx.cpu()<if_stmt><not>pretrained<block_start>net.initialize(ctx=ctx)<block_end># net.hybridize()
net_params=net.collect_params()<line_sep>weight_count=0<for_stmt>param net_params.values()<block_start><if_stmt>(param.shape<is><none>)<or>(<not>param._differentiable)<block_start><continue><block_end>weight_count<augadd>np.prod(param.shape)<block_end>print("m={}, {}".format(model.__name__ weight_count))<assert_stmt>(model<ne>shakedropresnet20_cifar10<or>weight_count<eq>272474)<assert_stmt>(model<ne>shakedropresnet20_cifar100<or>weight_count<eq>278324)<assert_stmt>(model<ne>shakedropresnet20_svhn<or>weight_count<eq>272474)<line_sep>x=mx.nd.zeros((14 3 32 32) ctx=ctx)<line_sep># y = net(x)
<with_stmt>mx.autograd.record()<block_start>y=net(x)<line_sep>y.backward()<block_end><assert_stmt>(y.shape<eq>(14 classes))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>_test()<block_end> |
# Generated by Django 3.2.7 on 2021-10-17 07:36
<import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('x509_pki' '0009_auto_20211017_0921') ]<line_sep>operations=[migrations.RemoveField(model_name='keystore' name='crl' ) migrations.CreateModel(name='CrlStore' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('crl' models.TextField(blank=<true> null=<true> verbose_name='Serialized CRL certificate')) ('certificate' models.OneToOneField(on_delete=django.db.models.deletion.CASCADE to='x509_pki.certificate')) ] ) ]<block_end> |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>LogAnalyticsEmBridgeSummaryReport(object)<block_start>"""
Log-Analytics EM Bridge counts summary.
"""<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new LogAnalyticsEmBridgeSummaryReport object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this LogAnalyticsEmBridgeSummaryReport.
:type compartment_id: str
:param active_em_bridge_count:
The value to assign to the active_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type active_em_bridge_count: int
:param creating_em_bridge_count:
The value to assign to the creating_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type creating_em_bridge_count: int
:param needs_attention_em_bridge_count:
The value to assign to the needs_attention_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type needs_attention_em_bridge_count: int
:param deleted_em_bridge_count:
The value to assign to the deleted_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type deleted_em_bridge_count: int
:param total_em_bridge_count:
The value to assign to the total_em_bridge_count property of this LogAnalyticsEmBridgeSummaryReport.
:type total_em_bridge_count: int
"""<line_sep>self.swagger_types={'compartment_id':'str' 'active_em_bridge_count':'int' 'creating_em_bridge_count':'int' 'needs_attention_em_bridge_count':'int' 'deleted_em_bridge_count':'int' 'total_em_bridge_count':'int'}<line_sep>self.attribute_map={'compartment_id':'compartmentId' 'active_em_bridge_count':'activeEmBridgeCount' 'creating_em_bridge_count':'creatingEmBridgeCount' 'needs_attention_em_bridge_count':'needsAttentionEmBridgeCount' 'deleted_em_bridge_count':'deletedEmBridgeCount' 'total_em_bridge_count':'totalEmBridgeCount'}<line_sep>self._compartment_id=<none><line_sep>self._active_em_bridge_count=<none><line_sep>self._creating_em_bridge_count=<none><line_sep>self._needs_attention_em_bridge_count=<none><line_sep>self._deleted_em_bridge_count=<none><line_sep>self._total_em_bridge_count=<none><block_end>@property<def_stmt>compartment_id self<block_start>"""
**[Required]** Gets the compartment_id of this LogAnalyticsEmBridgeSummaryReport.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this LogAnalyticsEmBridgeSummaryReport.
:rtype: str
"""<line_sep><return>self._compartment_id<block_end>@compartment_id.setter<def_stmt>compartment_id self compartment_id<block_start>"""
Sets the compartment_id of this LogAnalyticsEmBridgeSummaryReport.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this LogAnalyticsEmBridgeSummaryReport.
:type: str
"""<line_sep>self._compartment_id=compartment_id<block_end>@property<def_stmt>active_em_bridge_count self<block_start>"""
**[Required]** Gets the active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of ACTIVE enterprise manager bridges.
:return: The active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""<line_sep><return>self._active_em_bridge_count<block_end>@active_em_bridge_count.setter<def_stmt>active_em_bridge_count self active_em_bridge_count<block_start>"""
Sets the active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of ACTIVE enterprise manager bridges.
:param active_em_bridge_count: The active_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""<line_sep>self._active_em_bridge_count=active_em_bridge_count<block_end>@property<def_stmt>creating_em_bridge_count self<block_start>"""
**[Required]** Gets the creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in CREATING state.
:return: The creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""<line_sep><return>self._creating_em_bridge_count<block_end>@creating_em_bridge_count.setter<def_stmt>creating_em_bridge_count self creating_em_bridge_count<block_start>"""
Sets the creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in CREATING state.
:param creating_em_bridge_count: The creating_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""<line_sep>self._creating_em_bridge_count=creating_em_bridge_count<block_end>@property<def_stmt>needs_attention_em_bridge_count self<block_start>"""
**[Required]** Gets the needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in NEEDS_ATTENTION state.
:return: The needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""<line_sep><return>self._needs_attention_em_bridge_count<block_end>@needs_attention_em_bridge_count.setter<def_stmt>needs_attention_em_bridge_count self needs_attention_em_bridge_count<block_start>"""
Sets the needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in NEEDS_ATTENTION state.
:param needs_attention_em_bridge_count: The needs_attention_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""<line_sep>self._needs_attention_em_bridge_count=needs_attention_em_bridge_count<block_end>@property<def_stmt>deleted_em_bridge_count self<block_start>"""
**[Required]** Gets the deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in DELETED state.
:return: The deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""<line_sep><return>self._deleted_em_bridge_count<block_end>@deleted_em_bridge_count.setter<def_stmt>deleted_em_bridge_count self deleted_em_bridge_count<block_start>"""
Sets the deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Number of enterprise manager bridges in DELETED state.
:param deleted_em_bridge_count: The deleted_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""<line_sep>self._deleted_em_bridge_count=deleted_em_bridge_count<block_end>@property<def_stmt>total_em_bridge_count self<block_start>"""
**[Required]** Gets the total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of enterprise manager bridges.
:return: The total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:rtype: int
"""<line_sep><return>self._total_em_bridge_count<block_end>@total_em_bridge_count.setter<def_stmt>total_em_bridge_count self total_em_bridge_count<block_start>"""
Sets the total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
Total number of enterprise manager bridges.
:param total_em_bridge_count: The total_em_bridge_count of this LogAnalyticsEmBridgeSummaryReport.
:type: int
"""<line_sep>self._total_em_bridge_count=total_em_bridge_count<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end> |
"""Commandline interaction with LSF schedulers.
"""<import_stmt>re<import_stmt>subprocess<line_sep>_jobid_pat=re.compile("Job <(?P<jobid>\d+)> is")<def_stmt>submit_job scheduler_args command<block_start>"""Submit a job to the scheduler, returning the supplied job ID.
"""<line_sep>cl=["bsub"]+scheduler_args+command<line_sep>status=subprocess.check_output(cl)<line_sep>match=_jobid_pat.search(status)<line_sep><return>match.groups("jobid")[0]<block_end><def_stmt>stop_job jobid<block_start>cl=["bkill" jobid]<line_sep>subprocess.check_call(cl)<block_end><def_stmt>are_running jobids<block_start>"""Check if all of the submitted job IDs are running.
"""<line_sep>run_info=subprocess.check_output(["bjobs"])<line_sep>running=[]<for_stmt>parts (l.split()<for>l run_info.split("\n")<if>l.strip())<block_start><if_stmt>len(parts)<ge>3<block_start>pid,_,status=parts[:3]<if_stmt>status.lower()<in>["run"]<block_start>running.append(pid)<block_end><block_end><block_end>want_running=set(running).intersection(set(jobids))<line_sep><return>len(want_running)<eq>len(jobids)<block_end> |
"""Cross-reference object definition.
"""<import_stmt>typing<import_stmt>fastobo<import_from_stmt>.utils.meta roundrepr typechecked<line_sep>__all__=["Xref"]<line_sep>@roundrepr<class_stmt>Xref(object)<block_start>"""A cross-reference to another document or resource.
Cross-references (xrefs for short) can be used to back-up definitions of
entities, synonyms, or to link ontological entities to other resources
they may have been derived from. Although originally intended to provide
links to databases, cross-references in OBO ontologies gained additional
purposes, such as helping for header macros expansion, or being used to
alias external relationships with local unprefixed IDs.
The OBO format version 1.4 expects references to be proper OBO identifiers
that can be translated to actual IRIs, which is a breaking change from the
previous format. Therefore, cross-references are encouraged to be given as
plain IRIs or as prefixed IDs using an ID from the IDspace mapping defined
in the header.
Example:
A cross-reference in the Mammalian Phenotype ontology linking a term
to some related Web resource:
>>> mp = pronto.Ontology.from_obo_library("mp.obo")
>>> mp["MP:0030151"].name
'abnormal buccinator muscle morphology'
>>> mp["MP:0030151"].xrefs
frozenset({Xref('https://en.wikipedia.org/wiki/Buccinator_muscle')})
Caution:
`Xref` instances compare only using their identifiers; this means it
is not possible to have several cross-references with the same
identifier and different descriptions in the same set.
Todo:
Make sure to resolve header macros for xrefs expansion (such as
``treat-xrefs-as-is_a``) when creating an ontology, or provide a
method on `~pronto.Ontology` doing so when called.
"""<line_sep>id:str<line_sep>description:typing.Optional[str]<line_sep>__slots__=("__weakref__" "id" "description")# noqa: E0602
@typechecked()<def_stmt>__init__ self id:str description:typing.Optional[str]=<none><block_start>"""Create a new cross-reference.
Arguments:
id (str): the identifier of the cross-reference, either as a URL,
a prefixed identifier, or an unprefixed identifier.
description (str or None): a human-readable description of the
cross-reference, if any.
"""<line_sep># check the id is valid using fastobo
<if_stmt><not>fastobo.id.is_valid(id)<block_start><raise>ValueError("invalid identifier: {}".format(id))<block_end>self.id:str=id<line_sep>self.description=description<block_end><def_stmt>__eq__ self other:object<arrow>bool<block_start><if_stmt>isinstance(other Xref)<block_start><return>self.id<eq>other.id<block_end><return><false><block_end><def_stmt>__gt__ self other:object<arrow>bool<block_start><if_stmt>isinstance(other Xref)<block_start><return>self.id<g>other.id<block_end><return>NotImplemented<block_end><def_stmt>__ge__ self other:object<arrow>bool<block_start><if_stmt>isinstance(other Xref)<block_start><return>self.id<ge>other.id<block_end><return>NotImplemented<block_end><def_stmt>__lt__ self other:object<arrow>bool<block_start><if_stmt>isinstance(other Xref)<block_start><return>self.id<l>other.id<block_end><return>NotImplemented<block_end><def_stmt>__le__ self other:object<arrow>bool<block_start><if_stmt>isinstance(other Xref)<block_start><return>self.id<le>other.id<block_end><return>NotImplemented<block_end><def_stmt>__hash__ self<block_start><return>hash(self.id)<block_end><block_end> |
<import_from_stmt>blesuite.pybt.roles LECentral LEPeripheral<import_from_stmt>blesuite.pybt.core Connection<import_from_stmt>blesuite.pybt.gatt UUID AttributeDatabase Server<import_from_stmt>blesuite.pybt.gap GAP<import_from_stmt>blesuite.gatt_procedures gatt_procedure_write_handle gatt_procedure_write_handle_async gatt_procedure_read_handle gatt_procedure_read_handle_async gatt_procedure_read_uuid gatt_procedure_read_uuid_async gatt_procedure_discover_primary_services gatt_procedure_discover_secondary_services gatt_procedure_discover_characteristics gatt_procedure_discover_includes gatt_procedure_discover_descriptors gatt_procedure_prepare_write_handle gatt_procedure_prepare_write_handle_async gatt_procedure_execute_write gatt_procedure_execute_write_async gatt_procedure_write_command_handle gatt_procedure_read_multiple_handles gatt_procedure_read_multiple_handles_async gatt_procedure_read_blob_handle gatt_procedure_read_blob_handle_async<import_from_stmt>blesuite.smart_scan blesuite_smart_scan<import_from_stmt>blesuite.entities.gatt_device BLEDevice<import_from_stmt>blesuite.event_handler BTEventHandler<import_stmt>logging<import_stmt>gevent<import_stmt>os<line_sep>logger=logging.getLogger(__name__)<line_sep>logger.addHandler(logging.NullHandler())<line_sep>ROLE_CENTRAL=0x00<line_sep>ROLE_PERIPHERAL=0x01<line_sep>PUBLIC_DEVICE_ADDRESS=0x00<line_sep>RANDOM_DEVICE_ADDRESS=0x01<class_stmt>BLEConnection(object)<block_start>"""
BLEConnection is used to represent a connection between the BLEConnection manager
and a BLE device. This object is commonly returned to the user to represent a connection and is passed
to further BLEConnectionManager functions to interact with the connections.
:param address: The address of the peer BLEDevice that the HCI device is connected to.
:param address_type: The address type of the peer BLEDevice [Central = 0x00 | Peripheral = 0x01]
:param connection_handle: The connection handle used to interact with the associated peer BLE device.
:type address: str
:type address_type: int
:type connection_handle: int
"""<def_stmt>__init__ self address address_type connection_handle=<none><block_start>self.address=address<line_sep>self.address_type=address_type<line_sep>self.connection_handle=connection_handle<line_sep>self.interval_min=<none><line_sep>self.interval_max=<none><line_sep>self.mtu=23<block_end># default as per spec
<def_stmt>__repr__ self<block_start><return>'<{} address={}, type={}>'.format(self.__class__.__name__ self.address {0:"random" 1:"public"}.get(self.address_type "Unknown"))<block_end><block_end><class_stmt>BLEConnectionManager(object)<block_start>"""
BLEConnectionManager is used to manage connections to Bluetooth Low Energy Devices.
The connection manager is associated with an HCI device, such as a Bluetooth USB adapter,
and is responsible for creating the BLE stack and providing a user-friendly interface for
interacting with the BLE stack in order to send and receive packets.
:param adapter: BTLE adapter on host machine to use for connection (defaults to first found adapter). If left blank, the host's default adapter is used.
:param role: Type of role to create for the HCI device [central | peripheral]
:param our_address_type: Type of address for our Bluetooth Adapter. [public | random] (default: "public"). Note: We currently only support static random addresses, not resolvable or non-resolvable private addresses.
:param random_address: If our address type is set to random, supply a random address or one will be randomly generated ("AA:BB:CC:DD:EE:FF") (default: None)
:param psm: Specific PSM (default: 0)
:param mtu: Specific MTU (default: 23 as per spec BLUETOOTH SPECIFICATION Version 4.2 [Vol 3, Part G] 5.2.1)
:param gatt_server: GATT Server from pybt. Used to assign a custom blesuite.pybt.gatt Server object as the server for a peripheral. Alternatively, by default if the peripheral role is configured, a GATT Server object will be created with no services or characteristics that the user can add to through BLEConnectionManager class methods.
:param event_handler: BTEventHandler class instance that will be called when packets are received by the blesuite.pybt.core packet routing class (SocketHandler).
:param att_operation_event_hook: ATT operation hook functions triggered when the ATT server receives an ATT request
:param att_security_event_hook: ATT security hook functions triggered when the ATT server receives an ATT request and security checks are made
:type att_security_event_hook: blesuite.event_handler.ATTSecurityHook
:type att_operation_event_hook: blesuite.event_handler.ATTEventHook
:type adapter: int
:type role: str
:type our_address_type: str
:type random_address: str
:type psm: int
:type mtu: int
:type gatt_server: Server
:type event_handler: BTEventHandler
"""<def_stmt>__init__ self adapter role our_address_type="public" random_address=<none> psm=0 mtu=23 gatt_server=<none> event_handler=<none> att_operation_event_hook=<none> att_security_event_hook=<none><block_start>self.role_name=role<line_sep>self.adapter=adapter<line_sep>self.requester=<none><line_sep>self.responses=[]<line_sep>self.response_counter=0<line_sep>self.psm=psm<line_sep>self.mtu=mtu<line_sep>self.gatt_server=gatt_server<line_sep>self.event_handler=event_handler<line_sep>self.att_operation_event_hook=att_operation_event_hook<line_sep>self.att_security_event_hook=att_security_event_hook<line_sep>self.address=<none><line_sep>self.our_address_type_name=our_address_type<if_stmt>self.our_address_type_name.lower()<eq>"random"<block_start>self.our_address_type=RANDOM_DEVICE_ADDRESS<block_end><else_stmt><block_start>self.our_address_type=PUBLIC_DEVICE_ADDRESS<block_end><if_stmt>self.our_address_type<eq>RANDOM_DEVICE_ADDRESS<and>random_address<is><none><block_start>self.random_address=':'.join(map(<lambda>x:x.encode('hex') os.urandom(6)))<block_end><elif_stmt>self.our_address_type<eq>RANDOM_DEVICE_ADDRESS<block_start>self.random_address=random_address<block_end><else_stmt><block_start>self.random_address=<none><block_end>self.central=<none><line_sep>self.stack_connection=<none><line_sep>self.connections=[]<if_stmt>role<is>'central'<block_start>logger.debug("creating central")<line_sep>self._create_central()<line_sep>logger.debug("creating PyBT connection")<line_sep>self._create_stack_connection(ROLE_CENTRAL)<line_sep>logger.debug("creating listeners")<line_sep>self._start_listeners()<block_end><elif_stmt>role<is>'peripheral'<block_start>logger.debug("creating peripheral role")<line_sep>self._create_peripheral()<line_sep>logger.debug("creating PyBT connection")<line_sep>self._create_stack_connection(ROLE_PERIPHERAL)<line_sep>logger.debug("creating listeners")<line_sep>self._start_listeners()<block_end><else_stmt><block_start>logger.error("Unknown role: %s"%role)<line_sep><raise>RuntimeError("Unknown role: %s"%role)<block_end>self.address=self.role.stack.addr<block_end><def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__del__ self<block_start><if_stmt>self.stack_connection<is><not><none><block_start><for_stmt>connection self.connections<block_start><if_stmt>self.stack_connection.is_connected(connection.connection_handle)<block_start>self.stack_connection.disconnect(connection.connection_handle 0x16)<block_end><block_end>self.stack_connection.destroy()<line_sep>self.stack_connection=<none><block_end><block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>logger.debug("Exiting bleConnectionManager. exc_type:%s exc_val:%s exc_tb:%s"%(exc_type exc_val exc_tb))<if_stmt>self.stack_connection<is><not><none><block_start>self.stack_connection.destroy()<line_sep>self.stack_connection=<none><block_end><if_stmt>self.role<is><not><none><block_start>self.role.destroy()<line_sep>self.role=<none><block_end><block_end><def_stmt>_create_central self<block_start><if_stmt>self.adapter<is><none><block_start>self.role=LECentral(address_type=self.our_address_type random=self.random_address att_operation_event_hook=self.att_operation_event_hook)<block_end><else_stmt><block_start>self.role=LECentral(adapter=self.adapter address_type=self.our_address_type random=self.random_address att_operation_event_hook=self.att_operation_event_hook)<block_end><block_end><def_stmt>_create_peripheral self<block_start><if_stmt>self.gatt_server<is><none><block_start>self.attribute_db=AttributeDatabase(event_handler=self.att_security_event_hook)<line_sep>self.gatt_server=Server(self.attribute_db)<line_sep>self.gatt_server.set_mtu(self.mtu)<block_end><if_stmt>self.adapter<is><none><block_start>self.role=LEPeripheral(self.gatt_server mtu=self.mtu address_type=self.our_address_type random=self.random_address att_operation_event_hook=self.att_operation_event_hook)<block_end><else_stmt><block_start>self.role=LEPeripheral(self.gatt_server adapter=self.adapter mtu=self.mtu address_type=self.our_address_type random=self.random_address att_operation_event_hook=self.att_operation_event_hook)<block_end><block_end><def_stmt>_create_stack_connection self role_type<block_start><if_stmt>self.event_handler<is><none><block_start>self.event_handler=BTEventHandler(self)<block_end>self.stack_connection=Connection(self.role role_type self.event_handler)<block_end><def_stmt>_start_listeners self<block_start>self.stack_connection.start()<block_end><def_stmt>get_address self<block_start>""" Get the address of the HCI device represented by the BLEConnectionManager.
:return: The HCI device address
:rtype: str
"""<line_sep><return>self.address<block_end><def_stmt>get_discovered_devices self<block_start>"""
Get a dictionary of address seen during a scan and the associated advertising data.
:return: Dictionary of seen addresses and advertising data
:rtype: dict {"<address>":(<addressTypeInt>, "<advertisingData>")}
"""<line_sep><return>self.stack_connection.seen<block_end><def_stmt>set_event_handler self event_class<block_start>"""
Set the BTEventHandler for the pybt.core.SocketHandler class that will trigger when a Bluetooth Event
is received by the stack.
:param event_class: Event handler class instance.
:type event_class: BTEventHandler
:return: Success state
:rtype: bool
"""<line_sep>logger.debug("Trying to set event handler")<line_sep>self.event_handler=event_class<if_stmt>self.stack_connection.socket_handler<is><not><none><block_start>logger.debug("Stack connection found, setting event handler")<line_sep>self.stack_connection.set_event_handler(event_class)<line_sep><return><true><block_end><return><false><block_end><def_stmt>set_att_operation_hook self event_class<block_start>"""
Set the ATTEventHook for the pybt.att.AttributeProtocol class that will trigger when an ATT operation
against the ATT database running locally is received.
:param event_class: ATT event class hook instance.
:type event_class: ATTEventHook
:return: Success state
:rtype: bool
"""<line_sep>logger.debug("Trying to set ATT operation hook")<line_sep>self.att_operation_event_hook=event_class<line_sep>self.role.att.event_handler=self.att_operation_event_hook<line_sep><return><true><block_end><def_stmt>set_att_security_hook self event_class<block_start>"""
Set the ATTSecurityHook for the pybt.gatt.AttributeDatabase class that will trigger when a security
check against an ATT operation acting on the ATT database occurs. These checks cover encryption,
authentication, and authorization.
:param event_class: ATT security event hook class instance.
:type event_class: ATTSecurityHook
:return: Success state
:rtype: bool
"""<line_sep>logger.debug("Trying to set ATT security hook")<line_sep>self.att_security_event_hook=event_class<if_stmt>self.gatt_server<is><none><block_start>logger.debug("No GATT server running, setting security hook failed.")<line_sep><return><false><block_end>self.gatt_server.db.att_security_hooks=self.att_security_event_hook<line_sep><return><true><block_end><def_stmt>is_connected self connection<block_start>""" Return whether the specified connection is connected to the peer device.
:return: Return connection status
:rtype: bool
"""<line_sep><return>self.stack_connection.is_connected(connection.connection_handle)<block_end><def_stmt>init_connection self address address_type<block_start>"""
Create BLEConnection object that represents the host's connection to a BLE peripheral.
:param address: BD_ADDR of target BLE Peripheral
:param address_type: Address type of target BLE Peripheral [public | random]
:type address: string
:type address_type: string
:return: Return BLEConnection object that is used in any communication function.
:rtype: BLEConnection
"""<line_sep>address=address.upper()<if_stmt>address_type<eq>"public"<block_start>address_type=PUBLIC_DEVICE_ADDRESS<block_end><elif_stmt>address_type<eq>"private"<block_start>address_type=RANDOM_DEVICE_ADDRESS<block_end>ble_connection=BLEConnection(address address_type)<line_sep>self.connections.append(ble_connection)<line_sep><return>ble_connection<block_end><def_stmt>get_bleconnection_from_connection_handle self connection_handle<block_start>"""
Lookup a BLEConnection based on a supplied connection handle value.
:param connection_handle: Connection handle used to look up an existing BLEConnection
:type connection_handle: int
:return: BLEConnection or None
:rtype: BLEConnection or None
"""<for_stmt>connection self.connections<block_start><if_stmt>connection.connection_handle<is><not><none><and>connection.connection_handle<eq>connection_handle<block_start><return>connection<block_end><block_end><return><none><block_end><def_stmt>connect self ble_connection timeout=15<block_start>"""
Initiate a connection with a peer BLEDevice.
:param ble_connection: BLEConnection that represents the connection between our HCI device and the peer
:type ble_connection: BLEConnection
:param timeout: Connection timeout in seconds (default: 15)
:type timeout: int
:return: Connected status
:rtype: bool
"""<import_stmt>time<line_sep>start=time.time()<if_stmt><not>self.stack_connection.is_connected(ble_connection.connection_handle)<block_start>request=self.stack_connection.connect(ble_connection.connection_handle ble_connection.address kind=ble_connection.address_type)<while_stmt><not>request.has_response()<block_start><if_stmt>timeout<is><not><none><and>time.time()-start<ge>timeout<block_start>logger.debug("Connection failed: Connection timeout reached.")<line_sep><return><false><block_end>logger.debug("Is not connected")<line_sep>gevent.sleep(1)<block_end>ble_connection.connection_handle=request.response.conn_handle<line_sep>logger.debug("Connected")<line_sep><return><true><block_end><block_end><def_stmt>disconnect self connection reason=0x16<block_start>"""
Disconnect from a peer BLE device.
:param connection: BLEConnection to disconnect
:type connection: BLEConnection
:param reason: The reason for the disconnection (default: 0x16 - Connection terminated by local host). Reasons defined in BLUETOOTH SPECIFICATION Version 5.0 | Vol 2, Part E page 777
:type reason: int
"""<line_sep>self.stack_connection.disconnect(connection.connection_handle reason)<block_end><def_stmt>pair self ble_connection timeout=15<block_start>"""
Initiate pairing with a peer BLE device. This method is blocking and will wait
until a paired connection is received, pairing fails, or the timeout is reached.
If custom pairing request parameters are required, configure
the parameters prior to calling this function.
:param ble_connection: The BLEConnection to initiate pairing on
:type ble_connection: BLEConnection
:param timeout: Pairing timeout in seconds (default: 15)
:type timeout: int
:return: Pairing status
:rtype: bool
"""<import_stmt>time<line_sep>self.initiate_pairing(ble_connection)<line_sep>start=time.time()<while_stmt><not>self.role.smp.get_connection_encryption_status(ble_connection.connection_handle)<block_start><if_stmt>self.role.smp.did_pairing_fail(ble_connection.address)<block_start>logger.debug("Pairing Failed")<line_sep><return><false><block_end><if_stmt>timeout<is><not><none><and>time.time()-start<ge>timeout<block_start><return><false><block_end>logger.debug("Pairing in progress. Pairing Failed: %s "%self.role.smp.did_pairing_fail(ble_connection.address))<line_sep>gevent.sleep(1)<block_end>logger.debug("Paired")<line_sep><return><true><block_end><def_stmt>initiate_pairing self ble_connection<block_start>"""
Send pairing request to peer device. This is meant as an asynchronous way for a user to initiate pairing
and manage the connection while waiting for the pairing process to complete. Use BLEConnectionManager.pair
for a synchronous pairing procedure.
:param ble_connection: The BLEConnection to initiate pairing on
:type ble_connection: BLEConnection
:return:
:rtype:
"""<if_stmt><not>self.is_connected(ble_connection)<block_start>self.connect(ble_connection)<block_end>self.role.smp.send_pairing_request(ble_connection.address ble_connection.connection_handle)<block_end><def_stmt>is_pairing_in_progress self ble_connection<block_start>"""
Retrieve pairing status of BLEConnection
:param ble_connection: The BLEConnection to view the pairing status of
:type ble_connection: BLEConnection
:return: Status of BLE pairing
:rtype: bool
"""<line_sep><return>self.role.smp.is_pairing_in_progress(ble_connection.address)<block_end><def_stmt>did_pairing_fail self ble_connection<block_start>"""
Lookup whether a pairing failed status was triggered
:param ble_connection: The BLEConnection to check for a pairing failure
:type ble_connection: BLEConnection
:return: Pairing failure status (True means failure was triggered)
:rtype: bool
"""<line_sep><return>self.role.smp.did_pairing_fail(ble_connection.address)<block_end><def_stmt>is_connection_encrypted self ble_connection<block_start>"""
Retrieve BLEConnection encryption status
:param ble_connection: The BLEConnection to check the encryption status of
:type ble_connection: BLEConnection
:return: Encryption status
:rtype: bool
"""<line_sep><return>self.role.smp.get_connection_encryption_status(ble_connection.connection_handle)<block_end><def_stmt>resume_connection_encryption self ble_connection<block_start>"""
Initiate BLEConnection encryption with encryption keys present in the Security Manager's LongTermKeyDatabase.
Encryption key look-up is done based on the address of the peer device's address.
:param ble_connection: The BLEConnection to resume encryption on
:type ble_connection: BLEConnection
:return: Result of encryption initiation with existing keys (True if encryption initiation was successfully start, False if encryption keys were not found)
:rtype: bool
"""<line_sep>result=self.role.smp.initiate_encryption_with_existing_keys(ble_connection.address ble_connection.address_type ble_connection.connection_handle self.address self.our_address_type self.role)<line_sep><return>result<block_end><def_stmt>get_security_manager_long_term_key_database self<block_start>"""
Retrieve the LongTermKeyDatabase from the Security Manager
:return: LongTermKeyDatabase from the Security Manager
:rtype: blesuite.pybt.sm.LongTermKeyDatabase
"""<line_sep><return>self.role.smp.long_term_key_db<block_end><def_stmt>add_key_to_security_manager_long_term_key_database self address address_type ltk ediv rand irk csrk security_mode security_level<block_start>"""
Add an entry to the LongTermKeyDatabase that will be used for encryption key lookups when encryption
on a BLEConnection is initiated
:param address: Address of peer device (byte form, big-endian)
:type address: str
:param address_type: Address type of peer device
:type address_type: int
:param ltk: Long term key for peer (big-endian)
:type ltk: str
:param ediv: EDIV for peer. Required for LE Legacy encryption resumption
:type ediv: int
:param rand: Encryption Random for peer (big-endian). Required for LE Legacy encryption resumption
:type rand: str
:param irk: IRK for peer (big-endian)
:type irk: str
:param csrk: CSRK for peer
:type csrk: str
:param security_mode: Security mode associated with encryption keys. This mode will be applied to a connection encrypted with these keys.
:type security_mode: int
:param security_level: Security level associated with encryption keys. This level will be applied to a connection encrypted with these keys.
:type security_level: int
:return:
:rtype:
"""<line_sep>self.role.smp.long_term_key_db.add_long_term_key_entry(address address_type ltk ediv rand irk csrk security_mode security_level)<block_end><def_stmt>export_security_manager_long_term_key_database_for_storage self<block_start>"""
Export Security Manager LongTermKeyDatabase as a list of dictionary containing BLE
encryption properties (LTK, EDIV, random,
CSRK, IRK, security mode, security level) with integers and hex encoded strings
:return: LongTermKeyDatabase as a list of dictionaries with integers and hex encoded strings (user-friendly exportable version)
:rtype: dict
"""<line_sep>ltk_db=self.role.smp.long_term_key_db.get_long_term_key_database()<for_stmt>entry ltk_db<block_start>temp=entry['address']<if_stmt>temp<is><not><none><block_start>temp=temp.encode('hex')<block_end>entry['address']=temp<line_sep>temp=entry['ltk']<if_stmt>temp<is><not><none><block_start>temp=temp.encode('hex')<block_end>entry['ltk']=temp<line_sep>temp=entry['rand']<if_stmt>temp<is><not><none><block_start>temp=temp.encode('hex')<block_end>entry['rand']=temp<line_sep>temp=entry['irk']<if_stmt>temp<is><not><none><block_start>temp=temp.encode('hex')<block_end>entry['irk']=temp<line_sep>temp=entry['csrk']<if_stmt>temp<is><not><none><block_start>temp=temp.encode('hex')<block_end>entry['csrk']=temp<block_end><return>ltk_db<block_end><def_stmt>import_long_term_key_database_to_security_manager self long_term_key_database<block_start>"""
Import LongTermKeyDatabase and apply it to the Security Manager. Import database format is identical
to the LongTermKeyDatabase export format with integers and hex encoded strings. The function will perform
some input validation to ensure proper encoding and value types.
:param long_term_key_database: List of dictionaries of LongTermKeyDatabase entries with integers and hex encoded strings
:type long_term_key_database: list of dict
:return:
:rtype:
"""<import_stmt>blesuite.utils.validators<as>validator<for_stmt>entry long_term_key_database<block_start>keys=entry.keys()<if_stmt>'address'<in>keys<block_start>peer_address=entry['address'].decode('hex')<block_end><else_stmt><block_start>peer_address="00"<times>6<block_end><if_stmt>'address_type'<in>keys<block_start>peer_address_type=entry['address_type']<block_end><else_stmt><block_start>peer_address_type=0<block_end><if_stmt>'ltk'<in>keys<block_start>ltk=validator.validate_ltk(entry['ltk']).decode('hex')<block_end><else_stmt><block_start><raise>validator.InvalidSMLTK(<none>)<block_end><if_stmt>'ediv'<in>keys<block_start>ediv=entry['ediv']<block_end><else_stmt><block_start>ediv=0<block_end><if_stmt>'rand'<in>keys<block_start>rand=validator.validate_rand(entry['rand']).decode('hex')<block_end><else_stmt><block_start>rand='\x00'<times>8<block_end><if_stmt>'irk'<in>keys<block_start>irk=validator.validate_irk(entry['irk']).decode('hex')<block_end><else_stmt><block_start>irk='\x00'<times>16<block_end><if_stmt>'csrk'<in>keys<block_start>csrk=validator.validate_csrk(entry['csrk']).decode('hex')<block_end><else_stmt><block_start>csrk='\x00'<times>16<block_end><if_stmt>'security_mode'<in>keys<block_start>mode=entry['security_mode']<block_end><else_stmt><block_start>mode=1<block_end><if_stmt>'security_level'<in>keys<block_start>level=entry['security_level']<block_end><else_stmt><block_start>level=1<block_end>mode,level=validator.validate_att_security_mode(mode level)<line_sep>self.role.smp.long_term_key_db.add_long_term_key_entry(peer_address peer_address_type ltk ediv rand irk csrk mode level)<block_end><block_end><def_stmt>get_security_manager_protocol_default_pairing_parameters self<block_start>"""
Get the default pairing parameters that will be applied to Security Managers by default.
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
(Security Managers are created per BLE connection and can be modified independently)
:return: {io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution}
:rtype: dict
"""<line_sep><return>self.role.smp.get_default_pairing_parameters()<block_end><def_stmt>set_security_manager_protocol_default_pairing_parameters self default_io_cap=0x03 default_oob=0x00 default_mitm=0x00 default_bond=0x01 default_lesc=0x00 default_keypress=0x00 default_ct2=0x01 default_rfu=0x00 default_max_key_size=16 default_initiator_key_distribution=0x01 default_responder_key_distribution=0x01<block_start>"""
Set the default pairing parameters that will be applied to Security Managers by default.
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
(Security Managers are created per BLE connection and can be modified independently)
:param default_io_cap: IO Capabilities (default: 0x03 - No Input, No Output)
:type default_io_cap: int
:param default_oob: Out-of-band Data present and available (default: 0x00)
:type default_oob: int
:param default_mitm: Request man-in-the-middle pairing protections (default: 0x01)
:type default_mitm: int
:param default_bond: Request bonding (default: 0x01)
:type default_bond: int
:param default_lesc: LE Secure Connections supported (default: 0x00)
:type default_lesc: int
:param default_keypress: Keypress notifications (default: 0x00)
:type default_keypress: int
:param default_ct2: CT2 (default: 0x01)
:type default_ct2: int
:param default_rfu: Reserved for future use bits (default: 0x00)
:type default_rfu: int
:param default_max_key_size: Max encryption key size (default: 16)
:type default_max_key_size: int
:param default_initiator_key_distribution: Requested keys to be sent by the initiator (central) (default: 0x01)
:type default_initiator_key_distribution: int
:param default_responder_key_distribution: Requested keys to be sent by the responder (peripheral) (default: 0x01)
:type default_responder_key_distribution: int
:return:
:rtype:
"""<line_sep>self.role.smp.set_default_pairing_parameters(default_io_cap default_oob default_mitm default_bond default_lesc default_keypress default_ct2 default_rfu default_max_key_size default_initiator_key_distribution default_responder_key_distribution)<block_end><def_stmt>get_security_manager_protocol_pairing_parameters_for_connection self ble_connection<block_start>"""
Get the default pairing parameters for the Security Manager associated with a BLEConnection (based on the
peer address).
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
:param ble_connection: BLEConnection to modify Security Manager pairing parameters of
:type ble_connection: BLEConnection
:return: {io_cap, oob, mitm, bond, lesc, keypress, ct2, rfu, max_key_size, initiator_key_distribution, responder_key_distribution}
:rtype: dict
"""<line_sep><return>self.role.smp.get_pairing_parameters_for_connection(ble_connection.address)<block_end><def_stmt>set_security_manager_protocol_pairing_parameters_for_connection self ble_connection io_cap=0x03 oob=0x00 mitm=0x00 bond=0x01 lesc=0x00 keypress=0x0 ct2=0x01 rfu=0x00 max_key_size=16 initiator_key_distribution=0x01 responder_key_distribution=0x01<block_start>"""
Set the default pairing parameters for the Security Manager associated with a BLEConnection (based on the
peer address).
The pairing parameters are used by the devices to determine the type of pairing to use, the temporary key
sharing method (association model), and which keys will be exchanged when pairing is complete (if any).
See BLUETOOTH SPECIFICATION Version 5.0 | Vol 3, Part H
page 2340 - 2342 for more details.
:param ble_connection: BLEConnection to modify Security Manager pairing parameters of
:type ble_connection: BLEConnection
:param io_cap: IO Capabilities (default: 0x03 - No Input, No Output)
:type io_cap: int
:param oob: Out-of-band Data present and available (default: 0x00)
:type oob: int
:param mitm: Request man-in-the-middle pairing protections (default: 0x01)
:type mitm: int
:param bond: Request bonding (default: 0x01)
:type bond: int
:param lesc: LE Secure Connections supported (default: 0x00)
:type lesc: int
:param keypress: Keypress notifications (default: 0x00)
:type keypress: int
:param ct2: CT2 (default: 0x01)
:type ct2: int
:param rfu: Reserved for future use bits (default: 0x00)
:type rfu: int
:param max_key_size: Max encryption key size (default: 16)
:type max_key_size: int
:param initiator_key_distribution: Requested keys to be sent by the initiator (central) (default: 0x01)
:type initiator_key_distribution: int
:param responder_key_distribution: Requested keys to be sent by the responder (peripheral) (default: 0x01)
:type responder_key_distribution: int
:return: Success status of pairing parameter configuration (False is returned if BLEConnection does not have a valid connection or a security manager set)
:rtype: bool
"""<line_sep><return>self.role.smp.set_pairing_parameters_for_connection(ble_connection.address io_cap oob mitm bond lesc keypress ct2 rfu max_key_size initiator_key_distribution responder_key_distribution)<block_end><def_stmt>decode_gap_data self data<block_start>"""
Decode GAP data into GAP class object
:param data: GAP binary data
:type data: str
:return: GAP object containing the GAP data that has been parsed
:rtype: blesuite.pybt.gap.GAP
"""<line_sep>gap=GAP()<try_stmt><block_start>gap.decode(data)<block_end><except_stmt>Exception<as>e<block_start><if_stmt>"Data too short"<in>str(e)<block_start>logger.debug("Data too short, leaving off malformed data")<block_end><else_stmt><block_start><raise>e<block_end><block_end><return>gap<block_end><def_stmt>generate_gap_data_dict self gap<block_start>"""
Generates a dictionary of user-friendly strings that describe the GAP data in the supplied GAP object.
:param gap: GAP object to retrieve data from
:type gap: blesuite.pybt.gap.GAP
:return: Dictionary of readable strings that represent the GAP data stored in the object
:rtype: dict
"""<line_sep><return>gap.gap_dict()<block_end># Scanning/Discovery Functions
<def_stmt>scan self timeout<block_start>"""
Carry-out BLE scan for the specified timeout and return discovered devices.
:param timeout: Scan timeout in seconds
:type timeout: int
:return: Discovered devices
:rtype: dict
"""<import_stmt>time<line_sep>self.start_scan()<line_sep>start=time.time()<times>1000<line_sep>logger.debug("Starting sleep loop")<line_sep># comparing time in ms
<while_stmt>((time.time()<times>1000)-start)<l>timeout<block_start>logger.debug("Scanning...")<line_sep>gevent.sleep(1)<block_end>self.stop_scan()<line_sep>logger.debug("Done scanning!")<line_sep>discovered_devices=self.get_discovered_devices()<line_sep><return>discovered_devices<block_end><def_stmt>start_scan self<block_start>"""
Enable scanning on HCI device.
:return:
:rtype:
"""<line_sep>self.stack_connection.scan("on")<block_end><def_stmt>stop_scan self<block_start>"""
Stop scanning on HCI device
:return:
:rtype:
"""<line_sep>self.stack_connection.scan("off")<block_end><def_stmt>advertise_and_wait_for_connection self<block_start>"""
Begin advertising with the HCI device and wait for a connection to be established.
:return: Status of connection with a peer device and the BLEConnection
:rtype: tuple - bool, (BLEConnection | None)
"""<line_sep>self.start_advertising()<while_stmt>self.is_advertising()<block_start>gevent.sleep(1)<block_end><if_stmt>len(self.stack_connection.connection_statuses.keys())<g>0<block_start>connection_handle=self.stack_connection.connection_statuses.keys()[0]<line_sep>peer_address=self.stack_connection.peer_addresses_by_connection_handle[connection_handle]<line_sep>peer_address_type=self.stack_connection.connected_addr_type_by_connection_handle[connection_handle]<line_sep><return><true> BLEConnection(peer_address peer_address_type connection_handle=connection_handle)<block_end><else_stmt><block_start>logger.error("Advertising stopped and no connections are present. Something went wrong.")<line_sep><return><false> <none><block_end><block_end><def_stmt>start_advertising self<block_start>"""
Enable advertising on HCI device.
:return:
:rtype:
"""<line_sep>self.stack_connection.start_advertising()<block_end><def_stmt>stop_advertising self<block_start>"""
Disable advertising on HCI device.
:return:
:rtype:
"""<line_sep>self.stack_connection.stop_advertising()<block_end><def_stmt>is_advertising self<block_start>"""
Retrieve advertising status of HCI device.
:return: Status of advertising
:rtype: bool
"""<line_sep><return>self.stack_connection.is_advertising()<block_end><def_stmt>set_advertising_data self data<block_start>"""
Set advertising data.
:param data: Data to include in advertising packets
:type data: str
:return:
:rtype:
"""<line_sep>self.stack_connection.set_advertising_data(data)<block_end><def_stmt>set_scan_response_data self data<block_start>"""
Set scan response data.
:param data: Data to return when a scan packet is received.
:type data: str
:return:
:rtype:
"""<line_sep>self.stack_connection.set_scan_response_data(data)<block_end><def_stmt>set_advertising_parameters self advertisement_type channel_map interval_min interval_max destination_addr destination_addr_type<block_start>"""
Set advertising parameters. See: BLUETOOTH SPECIFICATION Version 5.0 | Vol 2, Part E page 1251
:param advertisement_type: Advertising packet type (see blesuite.utils.GAP_ADV_TYPES)
:type advertisement_type: int
:param channel_map: Bit field that indicates the advertising channels to use. (Channel 37 - 0x01, Channel 38 - 0x02, Channel 39 - 0x04, all channels - 0x07)
:type channel_map: int
:param interval_min: Minimum advertising interval for undirected and low duty cycle directed advertising. (Range 0x00020 - 0x4000, default 0x0800 or 1.28 seconds. Time conversion = interval * 0.625ms)
:type interval_min: int
:param interval_max: Maximum advertising interval for undirected and low duty cycle directed advertising. (Range 0x00020 - 0x4000, default 0x0800 or 1.28 seconds. Time conversion = interval * 0.625ms)
:type interval_max: int
:param destination_addr: Destination address for directed advertising (set to 00:00:00:00:00:00 if using undirected advertising)
:type destination_addr: str
:param destination_addr_type: Destination address type (set to 0x00 if using undirected advertising)
:type destination_addr_type: int
:return:
:rtype:
"""<line_sep>self.stack_connection.set_advertising_parameters(advertisement_type channel_map interval_min interval_max destination_addr destination_addr_type)<block_end><def_stmt>set_local_name self name enforce_null_termination=<true><block_start>"""
Set the local name of the HCI device. (Bluetooth Spec says the value needs to be null terminated. If it is
intended to write a string that is not null terminated, then set the enforcement flag to False).
:param name: Local name to write to HCI device
:type name: str
:param enforce_null_termination: Flag to enforce null termination (default: True)
:type enforce_null_termination: bool
:return:
:rtype:
"""<if_stmt>enforce_null_termination<block_start><if_stmt>len(name)<ne>248<block_start>padding=248-len(name)<line_sep>name=name+('\0'<times>padding)<block_end><block_end>self.stack_connection.set_local_name(name)<block_end><def_stmt>get_gatt_server self<block_start>"""
Retrieve the GATT server for the BLEConnectionManager instance.
:return: GATT Server
:rtype: blesuite.pybt.gatt.Server
"""<line_sep><return>self.gatt_server<block_end><def_stmt>set_server_mtu self mtu<block_start>"""
Configures the MTU (max transmission unit) on the GATT server and ATT class instance. MTU is used
to restrict the size of data the stack returns in ATT packets. Note: The MTU used by the class
is determined by the MTUs exchanged by both connected BLE devices (uses the minimum value of the
exchanged MTUs).
:param mtu: MTU size in bytes (Bluetooth Spec default is 23 bytes)
:type mtu: int
:return:
:rtype:
"""<line_sep>self.mtu=mtu<line_sep>self.role.att.set_mtu(mtu)<block_end><def_stmt>get_server_mtu self<block_start>"""
Returns the MTU size from the GATT server.
:return: GATT server MTU (bytes)
:rtype: int
"""<if_stmt>self.role.att.gatt_server<is><not><none><block_start><return>self.role.att.gatt_server.mtu<block_end><block_end><def_stmt>initialize_gatt_server_from_ble_device self ble_device use_handles_from_ble_device=<false><block_start>"""
Initializes the GATT server based on a supplied BLEDevice entity. All services, includes, characteristics,
and descriptors are retrieved from the BLEDevice entity and added to the GATT server using the
properties and permissions configured in the BLEDevice object.
:param ble_device: BLEDevice object to replicate with the GATT server
:type ble_device: BLEDevice
:param use_handles_from_ble_device: Flag to indicate that the GATT server should use the attribute handles specified in each BLE entity withhin the BLEDevice. If set to false (default), then the GATT server will automatically assign handles in the order that entites are added to the server.
:type use_handles_from_ble_device: bool
:return:
:rtype:
"""<import_from_stmt>pybt.gatt GATTService GATTCharacteristic GATTCharacteristicDescriptorDeclaration GATTInclude UUID<if_stmt>self.gatt_server<is><none><block_start>att_db=AttributeDatabase()<line_sep>self.gatt_server=Server(att_db)<line_sep>self.gatt_server.set_mtu(self.mtu)<block_end><for_stmt>service ble_device.get_services()<block_start>gatt_service=GATTService(UUID(service.attribute_type) UUID(service.uuid))<line_sep>gatt_service.start=service.start<line_sep>gatt_service.end=service.end<line_sep>gatt_service.handle=service.start<for_stmt>incl service.get_includes()<block_start>include_1=GATTInclude(incl.included_service_att_handle incl.included_service_end_group_handle UUID(incl.included_service_uuid) incl.include_definition_attribute_properties incl.include_definition_attribute_read_permission incl.include_definition_attribute_write_permission incl.include_definition_attribute_require_authorization)<line_sep>include_1.handle=incl.handle<line_sep>gatt_service.add_include(include_1)<block_end><for_stmt>characteristic service.get_characteristics()# create general characteristic (note: this method doesn't apply permissions and properties to the
# characteristic declaration descriptor)
<block_start>characteristic_1=GATTCharacteristic(characteristic.value characteristic.gatt_properties UUID(characteristic.uuid) characteristic.characteristic_value_attribute_properties characteristic.characteristic_value_attribute_read_permission characteristic.characteristic_value_attribute_write_permission characteristic.characteristic_value_attribute_require_authorization)<line_sep># update characteristic declaration descriptor with configured permissions and authz
characteristic_1.declaration.attribute_properties=characteristic.characteristic_definition_attribute_properties<line_sep>characteristic_1.declaration.attribute_read_permission=characteristic.characteristic_definition_attribute_read_permission<line_sep>characteristic_1.declaration.attribute_write_permission=characteristic.characteristic_definition_attribute_write_permission<line_sep>characteristic_1.declaration.require_authorization=characteristic.characteristic_definition_attribute_require_authorization<line_sep>characteristic_1.declaration.handle=characteristic.handle<line_sep>characteristic_1.declaration.value_attribute_handle=characteristic.value_handle<line_sep>characteristic_1.value_declaration.handle=characteristic.value_handle<for_stmt>descriptor characteristic.get_descriptors()# characteristic declaration is already created when we created the characteristic attribute
<block_start><if_stmt>descriptor.type<eq>0x2803<block_start><pass><block_end>descriptor_1=GATTCharacteristicDescriptorDeclaration(UUID(descriptor.uuid) descriptor.value descriptor.characteristic_descriptor_attribute_properties descriptor.characteristic_descriptor_attribute_read_permission descriptor.characteristic_descriptor_attribute_write_permission descriptor.characteristic_descriptor_attribute_require_authorization)<line_sep>descriptor_1.handle=descriptor.handle<line_sep>characteristic_1.add_descriptor(descriptor_1)<block_end>gatt_service.add_characteristic(characteristic_1)<block_end>self.gatt_server.add_service(gatt_service)<block_end>self.gatt_server.refresh_database(calculate_handles=(<not>use_handles_from_ble_device))<block_end><def_stmt>set_extended_inquiry_response self fec_required=0 formatted_eir_data=<none><block_start>"""
Set the extended inquiry response on the HCI device.
:param fec_required: FEC required (default: 0)
:type fec_required: 0
:param formatted_eir_data: Formatted extended inquiry response data (default: None)
:type formatted_eir_data: str
:return:
:rtype:
"""<line_sep>self.stack_connection.set_eir_response(fec_required=fec_required formatted_eir_data=formatted_eir_data)<block_end><def_stmt>read_remote_used_features self connection<block_start>"""
Issues a read remote used features command to the connected peer device.
:param connection: BLEConnection of target connection
:type connection: BLEConnection
:return:
:rtype:
"""<line_sep>self.stack_connection.read_remote_used_features(connection.connection_handle)<line_sep><return><block_end># ATT Packets / GATT Procedures
<def_stmt>exchange_mtu self connection mtu timeout=15<times>1000<block_start>"""
Sends Exchange MTU packet using the supplied BLEConnection object
and returns a GATTRequest object containing the request or any received errors.
Synchronous method. Note: Sending this packet as a peripheral will not
change the MTU configured on the GATT server.
:param connection: BLEConnection with connection to target device
:param mtu: Desired MTU (bytes)
:param timeout: Timeout for exhange MTU response (in milliseconds)
:type connection: BLEConnection
:type mtu: int
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep>request=self.stack_connection.exchange_mtu_sync(mtu connection.connection_handle timeout=timeout)<if_stmt>request.has_error()<block_start>logger.debug("Exchange MTU Response Error")<block_end><else_stmt><block_start>logger.debug("Exchange MTU Response Data(str): %s"%request.response.data)<block_end><if_stmt><not>request.has_error()<and>request.has_response()<block_start>connection.mtu=mtu<block_end><return>request<block_end><def_stmt>gatt_discover_primary_services self connection device=<none><block_start>"""
Discover primary GATT services of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""<if_stmt>device<is><none><block_start>device=BLEDevice(connection.address)<block_end><return>gatt_procedure_discover_primary_services(self connection device)<block_end><def_stmt>gatt_discover_secondary_services self connection device=<none><block_start>"""
Discover secondary GATT services of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""<if_stmt>device<is><none><block_start>device=BLEDevice(connection.address)<block_end><return>gatt_procedure_discover_secondary_services(self connection device)<block_end><def_stmt>gatt_discover_characteristics self connection device=<none><block_start>"""
Discover GATT characteristics of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""<if_stmt>device<is><none><block_start>device=BLEDevice(connection.address)<block_end><return>gatt_procedure_discover_characteristics(self connection device)<block_end><def_stmt>gatt_discover_includes self connection device=<none><block_start>"""
Discover GATT service includes of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""<if_stmt>device<is><none><block_start>device=BLEDevice(connection.address)<block_end><return>gatt_procedure_discover_includes(self connection device)<block_end><def_stmt>gatt_discover_descriptors self connection device<block_start>"""
Discover GATT characteristic descriptors of a peer GATT server and populate (or generate) a BLEDevice object
with the discovered entities.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:return: Populated BLEDevice
:rtype: BLEDevice
"""<line_sep><return>gatt_procedure_discover_descriptors(self connection device)<block_end><def_stmt>smart_scan self connection device=<none> look_for_device_info=<true> attempt_desc_read=<false> timeout=15<times>1000<block_start>"""
Initiate a BLE Smart Scan, which is an all inclusive way to scan a BLE peripheral for all
services, includes, characteristics, and descriptors. The scan can also attempt to reach from each
attribute handle discovered during the scan (regardless of GATT properties returned by the server) in
order to quickly view data exposed by the device.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param device: BLEDevice to populate. If None is supplied (default) a new BLEDevice object with the discovered entities will be added.
:type device: BLEDevice
:param look_for_device_info: Flag to indicate the scan should scan for several basic types of information based on UUIDs defined by the Bluetooth Special Interest Group (default: True)
:type look_for_device_info: bool
:param attempt_desc_read: Flag to indicate the scan should attempt to read from each attribute discovered during the scan (default: False). Note: This may significantly slow down the scan. If the target peripheral disconnects, the scan will attempt to reconnect to the server.
:type attempt_desc_read: bool
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: Populated BLEDevice
:rtype: BLEDevice
"""<if_stmt>device<is><none><block_start>device=BLEDevice(connection.address)<block_end><return>blesuite_smart_scan(self connection device look_for_device_info=look_for_device_info attempt_desc_read=attempt_desc_read timeout=timeout)<block_end><def_stmt>gatt_write_handle self connection handle data timeout=15<times>1000<block_start>"""
Send an ATT Write request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_write_handle(self.stack_connection connection.connection_handle handle data timeout=timeout)<block_end><def_stmt>gatt_write_handle_async self connection handle data timeout=15<times>1000<block_start>"""
Send an ATT Write request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_write_handle_async(self.stack_connection connection.connection_handle handle data timeout=timeout)<block_end><def_stmt>gatt_write_command_handle self connection handle data<block_start>"""
Send an ATT Write Command request to the peer device associated with the supplied BLEConnection, attribute
handle, and data. This is an asynchronous call that will send the request to the peer device. No GATTRequest
will be generated since this command should not ever receive a response from the peer.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
"""<line_sep>gatt_procedure_write_command_handle(self.stack_connection connection.connection_handle handle data)<block_end><def_stmt>gatt_prepare_write_handle self connection handle data offset timeout=15<times>1000<block_start>"""
Send an ATT Prepare Write request to the peer device associated with the supplied BLEConnection, attribute
handle, offset, and data. This is a synchronous call that will wait for either a successful response,
error response,
or the specified timeout (milliseconds) to be reached.
Note: Prepare write is used in conjunction with execute write to write a large set of data.
The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param offset: Offset to write the data
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_prepare_write_handle(self.stack_connection connection.connection_handle handle data offset timeout=timeout)<block_end><def_stmt>gatt_prepare_write_handle_async self connection handle data offset timeout=15<times>1000<block_start>"""
Send an ATT Prepare Write request to the peer device associated with the supplied BLEConnection, attribute
handle, offset, and data. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
Note: Prepare write is used in conjunction with execute write to write a large set of data.
The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param data: Data to place in ATT write request.
:type data: str
:param offset: Offset to write the data
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_prepare_write_handle_async(self.stack_connection connection.connection_handle handle data offset timeout=timeout)<block_end><def_stmt>gatt_execute_write self connection flags timeout=15<times>1000<block_start>"""
Send an ATT Execute Write request to the peer device associated with the supplied BLEConnection and flag.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
Note: Execute write is used in conjunction with prepare write
to write a large set of data. The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param flags: Specifies which execute write operation should be performed (0x00 - Cancel all prepared writes, 0x01 - Immediately write all pending prepared values.
:type flags: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_execute_write(self.stack_connection connection.connection_handle flags timeout=timeout)<block_end><def_stmt>gatt_execute_write_async self connection flags timeout=15<times>1000<block_start>"""
Send an ATT Execute Write request to the peer device associated with the supplied BLEConnection and flag.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
Note: Execute write is used in conjunction with prepare write
to write a large set of data. The user will send a series of prepare
write requests with data and the correct offsets to set a large value for a write operation. An execute
write request will then be issued to carry out the write. (Permission / Auth checks should happen on the
prepare write request).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param flags: Specifies which execute write operation should be performed (0x00 - Cancel all prepared writes, 0x01 - Immediately write all pending prepared values.
:type flags: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_execute_write_async(self.stack_connection connection.connection_handle flags timeout=timeout)<block_end><def_stmt>gatt_read_handle self connection handle timeout=15<times>1000<block_start>"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and attribute
handle. This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_handle(self.stack_connection connection.connection_handle handle timeout=timeout)<block_end><def_stmt>gatt_read_handle_async self connection handle timeout=15<times>1000<block_start>"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and attribute
handle. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_handle_async(self.stack_connection connection.connection_handle handle timeout=timeout)<block_end><def_stmt>gatt_read_multiple_handles self connection handles timeout=15<times>1000<block_start>"""
Send an ATT Read Multiple request to the peer device associated with the supplied BLEConnection and
a set of attribute handles.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handles: A list of attribute handles for target attributes (0x01 - 0xFFFF)
:type handles: list of int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_multiple_handles(self.stack_connection connection.connection_handle handles timeout=timeout)<block_end><def_stmt>gatt_read_multiple_handles_async self connection handles timeout=15<times>1000<block_start>"""
Send an ATT Read Multiple request to the peer device associated with the supplied BLEConnection and
a set of attribute handles.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitorged for a GATTResponse or GATTError (either through a valid
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handles: A list of attribute handles for target attributes (0x01 - 0xFFFF)
:type handles: list of int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_multiple_handles_async(self.stack_connection connection.connection_handle handles timeout=timeout)<block_end><def_stmt>gatt_read_blob_handle self connection handle offset timeout=15<times>1000<block_start>"""
Send an ATT Blob Read request to the peer device associated with the supplied BLEConnection, attribute
handle, and an offset. This is a synchronous call that will wait for either a successful response,
error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param offset: Offset to begin reading attribute value
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_blob_handle(self.stack_connection connection.connection_handle handle offset timeout=timeout)<block_end><def_stmt>gatt_read_blob_handle_async self connection handle offset timeout=15<times>1000<block_start>"""
Send an ATT Blob Read request to the peer device associated with the supplied BLEConnection, attribute
handle, and an offset. This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param handle: Attribute handle of target attribute (0x01 - 0xFFFF)
:type handle: int
:param offset: Offset to begin reading attribute value
:type offset: int
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_blob_handle_async(self.stack_connection connection.connection_handle handle offset timeout=timeout)<block_end><def_stmt>gatt_read_uuid self connection uuid timeout=15<times>1000<block_start>"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and GATT UUID.
This is a synchronous call that will wait for either a successful response, error response,
or the specified timeout (milliseconds) to be reached.
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param uuid: UUID of target GATT entity (16-bit and 128-bit UUIDs are accepted)
:type uuid: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_uuid(self.stack_connection connection.connection_handle UUID(uuid) timeout=timeout)<block_end><def_stmt>gatt_read_uuid_async self connection uuid timeout=15<times>1000<block_start>"""
Send an ATT Read request to the peer device associated with the supplied BLEConnection and GATT UUID.
This is an asynchronous call that will send the request to the peer device and
return a GATTRequest object that can be monitored for a GATTResponse or GATTError (either through a valid
peer response, peer error response, or timeout error triggering).
:param connection: BLEConnection with the connected GATT server
:type connection: BLEConnection
:param uuid: UUID of target GATT entity (16-bit and 128-bit UUIDs are accepted)
:type uuid: str
:param timeout: Request timeout (milliseconds)
:type timeout: int
:return: GATTRequest that contains the GATTResponse or GATTError result
:rtype: blesuite.pybt.core.GATTRequest
"""<line_sep><return>gatt_procedure_read_uuid_async(self.stack_connection connection.connection_handle UUID(uuid) timeout=timeout)<block_end><def_stmt>att_send_raw self connection body<block_start>"""
Sends a raw ATT packet using the supplied BLEConnection object
and data supplied. The function does not apply a standard ATT header the supplied body, but L2CAP
and HCI encapsulation is handled.
Note: Valid ATT packets can be constructed using
packets defined in scapy.layers.bluetooth
or using random data for fuzzing.
:param connection: BLEConnection to target device
:param body: ATT request body
:rtype: GATTRequest
"""<line_sep>request=self.stack_connection.send_raw_att(body connection.connection_handle)<line_sep><return>request<block_end><def_stmt>l2cap_send_raw self connection body<block_start>"""
Sends a raw L2CAP packet using the supplied BLEConnection object
and data supplied. The function does not apply a standard L2CAP header to the user supplied value,
but HCI encapsulation is applied.
Note: Valid L2CAP packets can be constructed using packets defined in scapy.layers.bluetooth
or using random data for fuzzing.
:param connection: BLEConnection to target device
:param body: L2CAP request body
:rtype: GATTRequest
"""<line_sep>request=self.stack_connection.send_raw_l2cap(body connection.connection_handle)<line_sep><return>request<block_end><block_end> |
<import_from_stmt>indy_node.server.upgrade_log UpgradeLog<import_from_stmt>indy_node.test waits<import_from_stmt>indy_node.test.upgrade.helper checkUpgradeScheduled sdk_ensure_upgrade_sent<import_from_stmt>plenum.common.constants VERSION<import_from_stmt>plenum.common.messages.node_messages Propagate<import_from_stmt>plenum.common.request Request<import_from_stmt>plenum.test.delayers req_delay ppgDelay<import_from_stmt>plenum.test.test_node getNonPrimaryReplicas<def_stmt>test_forced_upgrade_handled_once_if_request_received_after_propagate looper nodeSet sdk_pool_handle sdk_wallet_trustee validUpgradeExpForceTrue<block_start>"""
Verifies that POOL_UPGRADE force=true request is handled one time in case
the node commits the transaction to the ledger but during the 3PC-process
receives the request directly from the client after a PROPAGATE from some
other node
"""<line_sep>slow_node=getNonPrimaryReplicas(nodeSet instId=0)[-1].node<line_sep>slow_node.clientIbStasher.delay(req_delay())<line_sep>slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Beta'))<line_sep>slow_node.nodeIbStasher.delay(ppgDelay(sender_filter='Gamma'))<line_sep>original_process_propagate=slow_node.nodeMsgRouter.routes[Propagate]<line_sep>original_process_request=slow_node.clientMsgRouter.routes[Request]<def_stmt>patched_process_propagate msg:Propagate frm:str<block_start>original_process_propagate(msg frm)<line_sep>slow_node.clientIbStasher.reset_delays_and_process_delayeds()<line_sep>slow_node.nodeMsgRouter.routes[Propagate]=original_process_propagate<block_end><def_stmt>patched_process_request request:Request frm:str<block_start>original_process_request(request frm)<line_sep>slow_node.nodeIbStasher.reset_delays_and_process_delayeds()<line_sep>slow_node.clientMsgRouter.routes[Request]=original_process_request<block_end>slow_node.nodeMsgRouter.routes[Propagate]=patched_process_propagate<line_sep>slow_node.clientMsgRouter.routes[Request]=patched_process_request<line_sep>init_len=len(list(slow_node.upgrader._actionLog))<line_sep>sdk_ensure_upgrade_sent(looper sdk_pool_handle sdk_wallet_trustee validUpgradeExpForceTrue)<line_sep>looper.runFor(waits.expectedUpgradeScheduled())<line_sep>checkUpgradeScheduled([slow_node] validUpgradeExpForceTrue[VERSION])<if_stmt>init_len<eq>0# first upgrade - should be only one scheduled
<block_start><assert_stmt>len(list(slow_node.upgrader._actionLog))<eq>1<block_end><else_stmt># one upgrade were already scheduled. we should cancel it and schedule new one
# so action log should be increased by 2
<block_start><assert_stmt>len(list(slow_node.upgrader._actionLog))<eq>init_len+2<block_end><assert_stmt>slow_node.upgrader._actionLog.last_event.ev_type<eq>UpgradeLog.Events.scheduled<block_end> |
###############################################################################
# WordTokenizer
<import_from_stmt>nimbusml Pipeline FileDataStream<import_from_stmt>nimbusml.datasets get_dataset<import_from_stmt>nimbusml.preprocessing.text WordTokenizer<line_sep># data input (as a FileDataStream)
path=get_dataset("wiki_detox_train").as_filepath()<line_sep>data=FileDataStream.read_csv(path sep='\t')<line_sep>print(data.head())<line_sep># Sentiment SentimentText
# 0 1 ==RUDE== Dude, you are rude upload that carl p...
# 1 1 == OK! == IM GOING TO VANDALIZE WILD ONES WIK...
# 2 1 Stop trolling, zapatancas, calling me a liar m...
# 3 1 ==You're cool== You seem like a really cool g...
# 4 1 ::::: Why are you threatening me? I'm not bein...
tokenize=WordTokenizer(char_array_term_separators=[" "])<lshift>{'wt':'SentimentText'}<line_sep>pipeline=Pipeline([tokenize])<line_sep>tokenize.fit(data)<line_sep>y=tokenize.transform(data)<line_sep>print(y.drop(labels='SentimentText' axis=1).head())<line_sep># Sentiment wt.000 wt.001 wt.002 wt.003 wt.004 wt.005 ... wt.366 wt.367 wt.368 wt.369 wt.370 wt.371 wt.372
# 0 1 ==RUDE== Dude, you are rude upload ... None None None None None None None
# 1 1 == OK! == IM GOING TO ... None None None None None None None
# 2 1 Stop trolling, zapatancas, calling me a ... None None None None None None None
# 3 1 ==You're cool== You seem like a ... None None None None None None None
# 4 1 ::::: Why are you threatening me? ... None None None None None None None
|
<import_stmt>hmac<import_from_stmt>http cookies<import_stmt>json<import_from_stmt>typing Callable TYPE_CHECKING<import_from_stmt>urllib.parse urlencode<import_stmt>thor<import_from_stmt>thor.http HttpClient get_header<import_from_stmt>thor.http.error HttpError<import_from_stmt>redbot.resource HttpResource<import_from_stmt>redbot.type RawHeaderListType<line_sep>token_client=HttpClient()<line_sep>token_client.idle_timeout=30<line_sep>token_client.connect_timeout=10<line_sep>token_client.read_timeout=10<line_sep>token_client.max_server_conn=30<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>redbot.webui RedWebUi<block_end># pylint: disable=cyclic-import,unused-import
<class_stmt>CaptchaHandler<block_start><def_stmt>__init__ self webui:"RedWebUi" client_id:str continue_test:Callable error_response:Callable <arrow><none><block_start>self.webui=webui<line_sep>self.client_id=client_id<line_sep>self.continue_test=continue_test<line_sep>self.error_response=error_response<line_sep>self.secret=webui.config.get("hcaptcha_secret" "").encode("utf-8")<line_sep>self.token_lifetime=webui.config.getint("token_lifetime" fallback=300)<block_end><def_stmt>run self<arrow><none><block_start>captcha_token=self.webui.body_args.get("captcha_token" [<none>])[0]<line_sep>cookie_str=b", ".join(get_header(self.webui.req_headers b"cookie"))<try_stmt><block_start>cookiejar=cookies.SimpleCookie(cookie_str.decode("utf-8" "replace"))<line_sep># type: cookies.SimpleCookie
<block_end><except_stmt>cookies.CookieError<block_start>self.error_response(b"400" b"Bad Request" "Sorry, your cookies appear corrupted. Please try again." f"Cookie Parse Error: {cookie_str.decode('utf-8' 'replace')}" )<line_sep><return><block_end>human_time=cookiejar.get("human_time" <none>)<line_sep>human_hmac=cookiejar.get("human_hmac" <none>)<if_stmt>human_time<and>human_time.value.isdigit()<and>human_hmac<block_start><if_stmt>self.verify_human(int(human_time.value) human_hmac.value)<block_start>self.continue_test()<block_end><else_stmt><block_start>self.error_response(b"403" b"Forbidden" "I need to double-check that you're human; please resubmit." "Invalid human token" )<block_end><block_end><elif_stmt>captcha_token<block_start>self.verify_captcha(captcha_token)<block_end><else_stmt><block_start>self.error_response(b"403" b"Forbidden" "I need to double-check that you're human; please resubmit." "Invalid captcha." )<block_end><block_end><def_stmt>verify_captcha self presented_token:str<arrow><none><block_start>exchange=token_client.exchange()<line_sep>@thor.events.on(exchange)<def_stmt>error err_msg:HttpError<arrow><none><block_start>self.error_response(b"403" b"Forbidden" "There was a problem with the Captcha server; please try again soon." f"Captcha error: {err_msg}." )<block_end>@thor.events.on(exchange)<def_stmt>response_start status:bytes phrase:bytes headers:RawHeaderListType<arrow><none><block_start>exchange.tmp_status=status<block_end>exchange.tmp_res_body=b""<line_sep>@thor.events.on(exchange)<def_stmt>response_body chunk:bytes<arrow><none><block_start>exchange.tmp_res_body<augadd>chunk<block_end>@thor.events.on(exchange)<def_stmt>response_done trailers:RawHeaderListType<arrow><none><block_start><try_stmt><block_start>results=json.loads(exchange.tmp_res_body)<block_end><except_stmt>ValueError<block_start><if_stmt>exchange.tmp_status<ne>b"200"<block_start>e_str=f"Captcha server returned {exchange.tmp_status.decode('utf-8')} status code"<block_end><else_stmt><block_start>e_str=f"Captcha server response error"<block_end>self.error_response(b"500" b"Internal Server Error" e_str e_str )<line_sep><return><block_end><if_stmt>results["success"]<block_start>self.continue_test(self.issue_human())<block_end><else_stmt><block_start>e_str=f"Captcha errors: {', '.join(results.get('error-codes' ['unknown error']))}"<line_sep>self.error_response(b"403" b"Forbidden" e_str e_str )<block_end><block_end>request_form={"secret":self.secret "response":presented_token "remoteip":self.client_id }<line_sep>exchange.request_start(b"POST" b"https://hcaptcha.com/siteverify" [[b"content-type" b"application/x-www-form-urlencoded"]] )<line_sep>exchange.request_body(urlencode(request_form).encode("utf-8" "replace"))<line_sep>exchange.request_done({})<block_end><def_stmt>issue_human self<arrow>RawHeaderListType<block_start>"""
Return cookie headers for later verification that this is a human.
"""<line_sep>human_time=str(int(thor.time())+self.token_lifetime)<line_sep>human_hmac=hmac.new(self.secret bytes(human_time "ascii") "sha512").hexdigest()<line_sep><return>[(b"Set-Cookie" f"human_time={human_time}; Max-Age={self.token_lifetime}; SameSite=Strict".encode("ascii") ) (b"Set-Cookie" f"human_hmac={human_hmac}; Max-Age={self.token_lifetime}; SameSite=Strict".encode("ascii") ) ]<block_end><def_stmt>verify_human self human_time:int human_hmac:str<arrow>bool<block_start>"""
Check the user's human HMAC.
"""<line_sep>computed_hmac=hmac.new(self.secret bytes(str(human_time) "ascii") "sha512")<line_sep>is_valid=human_hmac<eq>computed_hmac.hexdigest()<if_stmt>is_valid<and>human_time<ge>thor.time()<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end> |
<import_stmt>random<import_stmt>time<def_stmt>main request response# no-cache itself to ensure the user agent finds a new version for each update.
<block_start>headers=[(b'Cache-Control' b'no-cache, must-revalidate') (b'Pragma' b'no-cache')]<line_sep>content_type=b''<line_sep>extra_body=u''<line_sep>content_type=b'application/javascript'<line_sep>headers.append((b'Content-Type' content_type))<line_sep>extra_body=u"self.onfetch = (event) => { event.respondWith(fetch(event.request)); };"<line_sep># Return a different script for each access.
<return>headers u'/* %s %s */ %s'%(time.time() random.random() extra_body)<block_end> |
<import_from_stmt>eth2spec.test.context spec_state_test with_all_phases<import_from_stmt>eth2spec.test.helpers.epoch_processing run_epoch_processing_with <def_stmt>run_process_historical_roots_update spec state<block_start><yield><from>run_epoch_processing_with(spec state 'process_historical_roots_update')<block_end>@with_all_phases@spec_state_test<def_stmt>test_historical_root_accumulator spec state# skip ahead to near the end of the historical roots period (excl block before epoch processing)
<block_start>state.slot=spec.SLOTS_PER_HISTORICAL_ROOT-1<line_sep>history_len=len(state.historical_roots)<line_sep><yield><from>run_process_historical_roots_update(spec state)<assert_stmt>len(state.historical_roots)<eq>history_len+1<block_end> |
# - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Transforms.ColumnSelector
"""<import_from_stmt>..utils.entrypoints EntryPoint<import_from_stmt>..utils.utils try_set unlist<def_stmt>transforms_columnselector data output_data=<none> model=<none> keep_columns=<none> drop_columns=<none> keep_hidden=<false> ignore_missing=<false> **params<block_start>"""
**Description**
Selects a set of columns, dropping all others
:param keep_columns: List of columns to keep. (inputs).
:param data: Input dataset (inputs).
:param drop_columns: List of columns to drop. (inputs).
:param keep_hidden: Specifies whether to keep or remove hidden
columns. (inputs).
:param ignore_missing: Specifies whether to ignore columns that
are missing from the input. (inputs).
:param output_data: Transformed dataset (outputs).
:param model: Transform model (outputs).
"""<line_sep>entrypoint_name='Transforms.ColumnSelector'<line_sep>inputs={}<line_sep>outputs={}<if_stmt>keep_columns<is><not><none><block_start>inputs['KeepColumns']=try_set(obj=keep_columns none_acceptable=<true> is_of_type=list is_column=<true>)<block_end><if_stmt>data<is><not><none><block_start>inputs['Data']=try_set(obj=data none_acceptable=<false> is_of_type=str)<block_end><if_stmt>drop_columns<is><not><none><block_start>inputs['DropColumns']=try_set(obj=drop_columns none_acceptable=<true> is_of_type=list is_column=<true>)<block_end><if_stmt>keep_hidden<is><not><none><block_start>inputs['KeepHidden']=try_set(obj=keep_hidden none_acceptable=<true> is_of_type=bool)<block_end><if_stmt>ignore_missing<is><not><none><block_start>inputs['IgnoreMissing']=try_set(obj=ignore_missing none_acceptable=<true> is_of_type=bool)<block_end><if_stmt>output_data<is><not><none><block_start>outputs['OutputData']=try_set(obj=output_data none_acceptable=<false> is_of_type=str)<block_end><if_stmt>model<is><not><none><block_start>outputs['Model']=try_set(obj=model none_acceptable=<false> is_of_type=str)<block_end>input_variables={x<for>x unlist(inputs.values())<if>isinstance(x str)<and>x.startswith("$")}<line_sep>output_variables={x<for>x unlist(outputs.values())<if>isinstance(x str)<and>x.startswith("$")}<line_sep>entrypoint=EntryPoint(name=entrypoint_name inputs=inputs outputs=outputs input_variables=input_variables output_variables=output_variables)<line_sep><return>entrypoint<block_end> |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
<import_from_stmt>spack *<class_stmt>Orfm(AutotoolsPackage)<block_start>"""A simple and not slow open reading frame (ORF) caller. No bells or
whistles like frameshift detection, just a straightforward goal of
returning a FASTA file of open reading frames over a certain length
from a FASTA/Q file of nucleotide sequences."""<line_sep>homepage="https://github.com/wwood/OrfM"<line_sep>url="https://github.com/wwood/OrfM/releases/download/v0.7.1/orfm-0.7.1.tar.gz"<line_sep>version('0.7.1' sha256='19f39c72bcc48127b757613c5eef4abae95ee6c82dccf96b041db527b27f319a')<line_sep>depends_on('zlib' type='link')<block_end> |
<import_stmt>sys<import_stmt>time<import_stmt>typing<as>tp<import_from_stmt>unittest TestCase<import_stmt>hypothesis<as>hp<import_from_stmt>hypothesis strategies<as>st<import_stmt>pypeln<as>pl<line_sep>MAX_EXAMPLES=10<line_sep>T=tp.TypeVar("T")<line_sep>@hp.given(nums=st.lists(st.integers()))@hp.settings(max_examples=MAX_EXAMPLES)<def_stmt>test_concat_basic nums:tp.List[int]<block_start>nums_py=list(map(<lambda>x:x+1 nums))<line_sep>nums_py1=list(map(<lambda>x:x<power>2 nums_py))<line_sep>nums_py2=list(map(<lambda>x:-x nums_py))<line_sep>nums_py=nums_py1+nums_py2<line_sep>nums_pl=pl.task.map(<lambda>x:x+1 nums)<line_sep>nums_pl1=pl.task.map(<lambda>x:x<power>2 nums_pl)<line_sep>nums_pl2=pl.task.map(<lambda>x:-x nums_pl)<line_sep>nums_pl=pl.task.concat([nums_pl1 nums_pl2])<assert_stmt>sorted(nums_pl)<eq>sorted(nums_py)<block_end>@hp.given(nums=st.lists(st.integers()))@hp.settings(max_examples=MAX_EXAMPLES)@pl.task.utils.run_test_async<async_keyword><def_stmt>test_concat_basic_2 nums:tp.List[int]<block_start>nums_py=list(map(<lambda>x:x+1 nums))<line_sep>nums_py1=list(map(<lambda>x:x<power>2 nums_py))<line_sep>nums_py2=list(map(<lambda>x:-x nums_py))<line_sep>nums_py=nums_py1+nums_py2<line_sep>nums_pl=pl.task.map(<lambda>x:x+1 nums)<line_sep>nums_pl1=pl.task.map(<lambda>x:x<power>2 nums_pl)<line_sep>nums_pl2=pl.task.map(<lambda>x:-x nums_pl)<line_sep>nums_pl=<await>pl.task.concat([nums_pl1 nums_pl2])<assert_stmt>sorted(nums_pl)<eq>sorted(nums_py)<block_end># @hp.given(nums=st.lists(st.integers()))
# @hp.settings(max_examples=MAX_EXAMPLES)
<def_stmt>test_concat_multiple nums:tp.List[int]=[1 2 3]<block_start>nums_py=[x+1<for>x nums]<line_sep>nums_py1=nums_py+nums_py<line_sep>nums_py2=nums_py1+nums_py<line_sep>nums_pl=pl.task.map(<lambda>x:x+1 nums)<line_sep>nums_pl1=pl.task.concat([nums_pl nums_pl])<line_sep>nums_pl2=pl.task.concat([nums_pl1 nums_pl])<line_sep># assert sorted(nums_py1) == sorted(list(nums_pl1))
<assert_stmt>sorted(nums_py2)<eq>sorted(list(nums_pl2))<block_end>@pl.task.utils.run_test_async<async_keyword><def_stmt>test_concat_multiple_2 nums:tp.List[int]=[1 2 3]<block_start>nums_py=[x+1<for>x nums]<line_sep>nums_py1=nums_py+nums_py<line_sep>nums_py2=nums_py1+nums_py<line_sep>nums_pl=pl.task.map(<lambda>x:x+1 nums)<line_sep>nums_pl1=pl.task.concat([nums_pl nums_pl])<line_sep>nums_pl2=<await>pl.task.concat([nums_pl1 nums_pl])<line_sep># assert sorted(nums_py1) == sorted(list(nums_pl1))
<assert_stmt>sorted(nums_py2)<eq>sorted(list(nums_pl2))<block_end> |
<import_stmt>gen_context2<line_sep>GLOBAL="GLOBAL"<def_stmt>gen <block_start>print(GLOBAL)<line_sep><yield>1<block_end>gen_context2.call(gen())<line_sep> |
<import_from_stmt>random shuffle<import_stmt>os<import_from_stmt>glob glob<import_stmt>shutil<import_stmt>re<import_stmt>tqdm<import_from_stmt>multiprocessing Pool<import_from_stmt>normalise normalise<line_sep>months={'jan.':'January' 'feb.':'February' 'mar.':'March' 'apr.':'April' 'may':'May' 'jun.':'June' 'jul.':'July' 'aug.':'August' 'sep.':'September' 'oct.':'October' 'nov.':'November' 'dec.':'December' 'jan':'January' 'feb':'February' 'mar':'March' 'apr':'April' 'jun':'June' 'jul':'July' 'aug':'August' 'sep':'September' 'oct':'October' 'nov':'November' 'dec':'December'}<line_sep>replace_words={'&':'and' '¡':'' 'r&b':'R and B' 'funtime':'fun time' 'español':'espanol' "'s":'s' 'palylist':'playlist'}<line_sep>replace_vocab={'ú':'u' 'ñ':'n' 'Ō':'O' 'â':'a'}<line_sep>reservations={'chyi':'chyi' 'Pre-Party':'pre party' 'Chu':'Chu' 'B&B':'B and B' '0944':'nine four four' 'Box':'Box' 'ain’t':'am not' 'Zon':'Zon' 'Yui':'Yui' 'neto':'neto' 'skepta':'skepta' '¡Fiesta':'Fiesta' 'Vue':'Vue' 'iheart':'iheart' 'disco':'disco'}<line_sep>same="klose la mejor música para tus fiestas dubstep dangles drejer listas".split(' ')<for_stmt>word same<block_start>reservations[word]=word<block_end><def_stmt>word_normalise words<block_start>ret=[]<for_stmt>word words<block_start><if_stmt>word.lower()<in>months<block_start>word=months[word.lower()]<block_end><if_stmt>word.lower()<in>replace_words<block_start>word=replace_words[word.lower()]<block_end><for_stmt>regex replace_vocab<block_start>word=re.sub(regex '' word)<block_end>#word = re.sub(r'(\S)([\.\,\!\?])', r'\1 \2', word)
word=re.sub(r'[\.\,\!\?;\/]' '' word)<line_sep>ret.append(word)<block_end><return>ret<block_end><def_stmt>sent_normalise text slots_split=<none><block_start>norm_slots,norm_texts=[] []<line_sep>text_split=text.split(' ')<if_stmt>slots_split<is><none><block_start>slots_split=['O']<times>len(text_split)<block_end><for_stmt>idx range(len(text_split))<block_start><if_stmt>text_split[idx]<in>'.,!?;/]'<block_start><continue><block_end><if_stmt>text_split[idx]<in>reservations<block_start><for_stmt>word reservations[text_split[idx]].split(' ')<block_start>norm_texts.append(word)<line_sep>norm_slots.append(slots_split[idx])<block_end><continue><block_end>norm_text=normalise(word_normalise([text_split[idx]]) variety="AmE" verbose=<false>)<for_stmt>phrase norm_text<block_start><if_stmt>phrase<eq>''<block_start><continue><block_end><for_stmt>word re.split(r' |\-' phrase)<block_start>word=re.sub(r'[\.\,\!\?;\/]' '' word)<if_stmt>word<eq>''<block_start><continue><block_end>norm_texts.append(word)<line_sep>norm_slots.append(slots_split[idx])<block_end><block_end><block_end><return>norm_slots norm_texts<block_end><def_stmt>process_raw_snips_file file out_f<block_start><with_stmt>open(file)<as>f<block_start>content=f.readlines()<block_end>content=[x.strip()<for>x content]<with_stmt>open(out_f 'w')<as>f<block_start><for_stmt>cnt,line enumerate(content)<block_start>text=line.split(' <=> ')[0]<line_sep>intent=line.split(' <=> ')[1]<line_sep>#[r.split(':')[0] if len(r.split(':')) == 2 else ' ' for r in x.split()]
text_split=[x.replace('::' ':').split(':')[0]<if>len(x.replace('::' ':').split(':'))<eq>2<else>' '<for>x text.split()]<line_sep>text_entities=' '.join(text_split)<line_sep>slots_split=[x.replace('::' ':').split(':')[1]<for>x text.split()]<line_sep>slots_entities=' '.join(slots_split)<assert_stmt>len(text_split)<eq>len(slots_split) (text_split slots_split)<line_sep>f.write('%d | BOS %s EOS | O %s | %s\n'%(cnt text_entities slots_entities intent))<block_end><block_end><block_end><def_stmt>remove_IBO_from_snipt_vocab_slot in_f out_f<block_start><with_stmt>open(in_f)<as>f<block_start>content=f.readlines()<block_end>content=[x.strip()<for>x content]<line_sep># get rid of BIO tag from the slots
<for_stmt>idx,line enumerate(content)<block_start><if_stmt>line<ne>'O'<block_start>content[idx]=line[len('B-'):]<block_end><block_end>content=set(content)# remove repeating slots
<with_stmt>open(out_f 'w')<as>f<block_start><for_stmt>line content<block_start>f.write('%s\n'%line)<block_end><block_end><block_end><def_stmt>process_daniel_snips_file content<block_start>content=[x.strip()<for>x content]<line_sep>utt_ids=[x.split('\t' 1)[0]<for>x content]<line_sep>valid_uttids=[x<for>x utt_ids<if>x.split('-')[1]<eq>'valid']<line_sep>test_uttids=[x<for>x utt_ids<if>x.split('-')[1]<eq>'test']<line_sep>train_uttids=[x<for>x utt_ids<if>x.split('-')[1]<eq>'train']<line_sep>utt2text,utt2slots,utt2intent={} {} {}<assert_stmt>len(utt_ids)<eq>len(set(utt_ids))<line_sep># create utt2text, utt2slots, utt2intent
<for_stmt>line content<block_start>uttid,text,slots,intent=line.split('\t')<if_stmt>len(text.split())<ne>len(slots.split())# detect 'empty' in text
<block_start><assert_stmt>len(text.split(' '))<eq>2<line_sep>empty_idx=text.split().index(text.split(' ')[0].split()[-1])+1<line_sep>slots_list=slots.split()<del_stmt>slots_list[empty_idx]<line_sep>cleaned_slots=' '.join(slots_list)<assert_stmt>len(text.split())<eq>len(slots_list)<line_sep>cleaned_text=' '.join(text.split())<line_sep>#print(cleaned_text, cleaned_slots)
<block_end><else_stmt><block_start>(cleaned_text cleaned_slots)=(text slots)<block_end># get rid of the 'intent/' from all slot values
cleaned_slots=' '.join([x.split('/')[1]<if>x<ne>'O'<else>x<for>x cleaned_slots.split()])<line_sep># strip the whitespaces before punctuations
#cleaned_text = re.sub(r'\s([?.!,"](?:\s|$))', r'\1', cleaned_text)
utt2text[uttid]=cleaned_text<line_sep>utt2slots[uttid]=cleaned_slots<line_sep>utt2intent[uttid]=intent<block_end>test_utt2text,test_utt2slots,test_utt2intent={} {} {}<line_sep>valid_utt2text,valid_utt2slots,valid_utt2intent={} {} {}<line_sep>train_utt2text,train_utt2slots,train_utt2intent={} {} {}<for_stmt>utt valid_uttids<block_start>valid_utt2text[utt]=utt2text[utt]<line_sep>valid_utt2slots[utt]=utt2slots[utt]<line_sep>valid_utt2intent[utt]=utt2intent[utt]<block_end><for_stmt>utt test_uttids<block_start>test_utt2text[utt]=utt2text[utt]<line_sep>test_utt2slots[utt]=utt2slots[utt]<line_sep>test_utt2intent[utt]=utt2intent[utt]<block_end><for_stmt>utt train_uttids<block_start>train_utt2text[utt]=utt2text[utt]<line_sep>train_utt2slots[utt]=utt2slots[utt]<line_sep>train_utt2intent[utt]=utt2intent[utt]<block_end><assert_stmt>len(set(valid_utt2intent.values()))<eq>len(set(test_utt2intent.values()))<eq>len(set(train_utt2intent.values()))<eq>7<assert_stmt>len(valid_utt2intent.keys())<eq>len(test_utt2intent.keys())<eq>700<assert_stmt>len(train_utt2intent.keys())<eq>13084<def_stmt>__return_set_of_slots utt2slots<block_start>all_slots=[]<for_stmt>slot utt2slots.values()<block_start>all_slots.extend(slot.split())<block_end>unique_slots=set(all_slots)<line_sep><return>unique_slots<block_end><assert_stmt>len(__return_set_of_slots(valid_utt2slots))<eq>len(__return_set_of_slots(test_utt2slots))<eq>len(__return_set_of_slots(train_utt2slots))<eq>40<line_sep><return>(train_utt2text train_utt2slots train_utt2intent) (valid_utt2text valid_utt2slots valid_utt2intent) (test_utt2text test_utt2slots test_utt2intent)<block_end><def_stmt>map_and_link_snips_audio snips_audio_dir link_dir# traverse through snips_audio_dir
<block_start>result=[y<for>x os.walk(snips_audio_dir)<for>y glob(os.path.join(x[0] '*.mp3'))]<for_stmt>path result<block_start>person=path.split('/')[8].split('_')[1]<line_sep>filename=path.split('/')[-1]<if_stmt>filename[:5]<ne>'snips'<block_start><continue><block_end>uttid=filename.split('.')[0]<line_sep>new_uttid=person+'-'+filename<line_sep>partition=uttid.split('-')[1]<line_sep>destination=os.path.join(link_dir partition new_uttid)<line_sep>shutil.copyfile(path destination)<block_end><block_end><def_stmt>create_multispk_for_snips output_dir<block_start>speakers="<NAME> <NAME>".split(' ')<line_sep>dataset_info=[{'split':'test' 'num_utts':700} {'split':'valid' 'num_utts':700} {'split':'train' 'num_utts':13084}]<line_sep>test_out_f=open(os.path.join(output_dir 'all.iob.snips.txt') 'w')<for_stmt>data dataset_info<block_start>num_utts=data['num_utts']<line_sep>split=data['split']<with_stmt>open(os.path.join(output_dir 'single-matched-snips.%s.w-intent'%split))<as>f<block_start>content=f.readlines()<block_end>utt2line={x.strip().split()[0]:x.strip()<for>x content}<for_stmt>spk speakers<block_start><for_stmt>num range(num_utts)<block_start>uttid="%s-snips-%s-%d"%(spk split num)#mp3.split('/')[-1].split('.')[0]
line=utt2line["snips-%s-%d"%(split num)]#'-'.join(uttid.split('-')[1:])]
text=line.split('\t')[1].upper()<line_sep>slots=line.split('\t')[2]<line_sep>intent=line.split('\t')[3]<line_sep>test_out_f.write('%s BOS %s EOS\tO %s %s\n'%(uttid text slots intent))<block_end><block_end><block_end>test_out_f.close()<block_end><def_stmt>apply_text_norm_and_modify_slots all_tsv output_dir<block_start>train_dirs,valid_dirs,test_dirs=process_daniel_snips_file(all_tsv)<line_sep># test
test_file=open(os.path.join(output_dir 'single-matched-snips.test.w-intent') 'w')<line_sep>vocab_slot={}<for_stmt>uttid tqdm.tqdm(test_dirs[0].keys() desc='Text Normalising on testing set')<block_start>text=test_dirs[0][uttid]<line_sep>slots=test_dirs[1][uttid]<line_sep>intent=test_dirs[2][uttid]<line_sep>slots_split=slots.split()<for_stmt>s slots_split<block_start>vocab_slot.setdefault(s 0)<line_sep>vocab_slot[s]<augadd>1<block_end>norm_slots,norm_texts=sent_normalise(text slots_split)<assert_stmt>len(norm_texts)<eq>len(norm_slots) (norm_texts norm_slots)<line_sep># write to file
test_file.write('%s\t%s\t%s\t%s\n'%(uttid ' '.join(norm_texts).upper() ' '.join(norm_slots) intent))<block_end>test_file.close()<line_sep># valid
valid_file=open(os.path.join(output_dir 'single-matched-snips.valid.w-intent') 'w')<for_stmt>uttid tqdm.tqdm(valid_dirs[0].keys() desc='Text Normalising on validation set')<block_start>text=valid_dirs[0][uttid]<line_sep>slots=valid_dirs[1][uttid]<line_sep>intent=valid_dirs[2][uttid]<line_sep>slots_split=slots.split()<for_stmt>s slots_split<block_start>vocab_slot.setdefault(s 0)<line_sep>vocab_slot[s]<augadd>1<block_end>norm_slots,norm_texts=sent_normalise(text slots_split)<assert_stmt>len(norm_texts)<eq>len(norm_slots) (norm_texts norm_slots)<line_sep># write to file
valid_file.write('%s\t%s\t%s\t%s\n'%(uttid ' '.join(norm_texts).upper() ' '.join(norm_slots) intent))<block_end>valid_file.close()<line_sep># train
train_file=open(os.path.join(output_dir 'single-matched-snips.train.w-intent') 'w')<for_stmt>uttid tqdm.tqdm(train_dirs[0].keys() desc='Text Normalising on training set')<block_start>text=train_dirs[0][uttid]<line_sep>slots=train_dirs[1][uttid]<line_sep>intent=train_dirs[2][uttid]<line_sep>slots_split=slots.split()<for_stmt>s slots_split<block_start>vocab_slot.setdefault(s 0)<line_sep>vocab_slot[s]<augadd>1<block_end>norm_slots,norm_texts=sent_normalise(text slots_split)<assert_stmt>len(norm_texts)<eq>len(norm_slots) (norm_texts norm_slots)<line_sep># write to file
train_file.write('%s\t%s\t%s\t%s\n'%(uttid ' '.join(norm_texts).upper() ' '.join(norm_slots) intent))<block_end>train_file.close()<line_sep>vocab_file=open(os.path.join(output_dir 'slots.txt') 'w')<line_sep>vocab_file.write('\n'.join(sorted(list(vocab_slot.keys()) key=<lambda>x:vocab_slot[x] reverse=<true>)))<block_end><def_stmt>sox_func inputs<block_start>files,root,out_root,speaker=inputs<for_stmt>name tqdm.tqdm(files desc='Process for speaker: '+speaker)<block_start><if_stmt>name.endswith(".mp3")<block_start>split=name.split('-')[1]<line_sep>out_dir=os.path.join(out_root split)<line_sep>os.makedirs(out_dir exist_ok=<true>)<line_sep>orig_file=os.path.join(root name)<line_sep>new_file=os.path.join(out_dir speaker+'-'+name.split('/')[-1].split('.')[0]+'.wav')<line_sep>bashCommand="sox "+orig_file+" -t wav -c 1 -r 16000 -b 16 -e signed-integer "+new_file<line_sep>r=os.popen(bashCommand).read()<block_end><block_end><block_end><def_stmt>sox_mp3_to_wav in_root out_root<block_start>os.makedirs(out_root exist_ok=<true>)<line_sep>pool=Pool(16)<line_sep>inputs=[]<for_stmt>root,dirs,files os.walk(in_root)<block_start>print('[Processing] enter directory %s'%root)<if_stmt><not>len(files)<block_start><continue><block_end>speaker=root.split('/')[-2].split('_')[1]<line_sep>print('[Processing] process %d audio files from speaker %s'%(len(files) speaker))<line_sep>inputs.append((files root out_root speaker))<block_end>pool.map(sox_func inputs)<block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys os<line_sep>mode=sys.argv[1]<if_stmt>mode<eq>'text'<block_start>repo_dir=sys.argv[2]<line_sep>dump_dir=sys.argv[3]<line_sep>os.makedirs(dump_dir exist_ok=<true>)<line_sep>content=[]<line_sep>content<augadd>open(os.path.join(repo_dir 'data/nlu_annotation/valid')).readlines()[1:]<line_sep>content<augadd>open(os.path.join(repo_dir 'data/nlu_annotation/test')).readlines()[1:]<line_sep>content<augadd>open(os.path.join(repo_dir 'data/nlu_annotation/train')).readlines()[1:]<line_sep>apply_text_norm_and_modify_slots(content dump_dir)<line_sep>create_multispk_for_snips(dump_dir)<block_end><elif_stmt>mode<eq>'audio'<block_start>audio_dir=sys.argv[2]<line_sep>dump_dir=sys.argv[3]<line_sep># Step: sox the snips *.mp3 to the correct format
sox_mp3_to_wav(audio_dir dump_dir)<block_end><else_stmt><block_start>print('Usage: python preprocess.py [text|audio] [data_path] [dump_path]')<block_end><block_end> |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""
CLI for listing, enabling, disabling intake drivers
"""<import_from_stmt>intake __version__<import_from_stmt>intake.cli.util Subcommand<import_from_stmt>intake.source.discovery drivers<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<line_sep># -----------------------------------------------------------------------------
# API
# -----------------------------------------------------------------------------
<class_stmt>Drivers(Subcommand)<block_start>"""
List, enable, and disable intake drivers.
"""<line_sep>name="drivers"<def_stmt>initialize self<block_start>sub_parser=self.parser.add_subparsers()<line_sep>list=sub_parser.add_parser('list' help='Show all intake drivers, whether enabled, disabled, '<concat>'or directly inserted into the registry')<line_sep>list.add_argument('-v' '--verbose' action='store_true' help='Show module path.')<line_sep>list.set_defaults(invoke=self._list)<line_sep>enable=sub_parser.add_parser('enable' help='Enable an intake driver.')<line_sep>enable.add_argument('name' type=str help='Driver name')<line_sep>enable.add_argument('driver' type=str default=<none> nargs='?' help='Module path and class name, as in '<concat>'package.submodule.ClassName')<line_sep>enable.set_defaults(invoke=self._enable)<line_sep>disable=sub_parser.add_parser('disable' help='Disable one or more intake drivers.')<line_sep>disable.add_argument('names' type=str help='Driver names' nargs='+')<line_sep>disable.set_defaults(invoke=self._disable)<block_end><def_stmt>invoke self args<block_start>self.parser.print_help()<block_end><def_stmt>_list self args<block_start><if_stmt>drivers.do_scan<block_start>print("Package scan:")<for_stmt>k,v drivers.scanned.items()<block_start>print(f'{k:<30}{v.__module__}.{v.__name__}')<block_end>print()<block_end>print("Entrypoints:")<line_sep>eps=[ep<for>ep drivers.from_entrypoints()<if>ep.name<not><in>drivers.disabled()]<if_stmt>eps<block_start><for_stmt>v eps<block_start>print(f'{v.name:<30}{v.module_name}:{v.object_name}')<block_end><block_end><else_stmt><block_start>print("<none>")<block_end>print()<line_sep>print("From Config:")<line_sep>eps=[ep<for>ep drivers.from_conf()<if>ep.name<not><in>drivers.disabled()]<if_stmt>eps<block_start><for_stmt>v eps<block_start><if_stmt>v.name<not><in>drivers.disabled()<block_start>print(f'{v.name:<30}{v.module_name}:{v.object_name}')<block_end><block_end><block_end><else_stmt><block_start>print("<none>")<block_end>print()<line_sep>print("Disabled: " drivers.disabled()<or>"<none>")<block_end><def_stmt>_enable self args<block_start>drivers.enable(args.name args.driver)<block_end><def_stmt>_disable self args<block_start><for_stmt>name args.names<block_start>drivers.disable(name)<block_end><block_end><block_end> |
<import_from_stmt>archive_service ArchiveService ArchiveException<import_from_stmt>archiveorg ArchiveOrgService<import_from_stmt>archivetoday ArchiveTodayService<line_sep> |
# coding: utf8
"""
This software is licensed under the Apache 2 license, quoted below.
Copyright 2014 Crystalnix Limited
Licensed under the Apache License, Version 2.0 (the "License"); you may not
use this file except in compliance with the License. You may obtain a copy of
the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations under
the License.
"""<import_from_stmt>builtins str<import_stmt>os<import_from_stmt>django.test TestCase<import_from_stmt>django.core.files.uploadedfile SimpleUploadedFile<import_from_stmt>omaha.tests.utils temporary_media_root<import_from_stmt>crash.models Symbols Crash<import_from_stmt>crash.serializers SymbolsSerializer CrashSerializer<line_sep>BASE_DIR=os.path.dirname(__file__)<line_sep>TEST_DATA_DIR=os.path.join(BASE_DIR 'testdata')<line_sep>SYM_FILE=os.path.join(TEST_DATA_DIR 'BreakpadTestApp.sym')<class_stmt>SymbolsSerializerTest(TestCase)<block_start><def_stmt>test_serializer self<block_start>data=dict(file=SimpleUploadedFile('./test.pdb' <false>) debug_id='C1C0FA629EAA4B4D9DD2ADE270A231CC1' debug_file='BreakpadTestApp.pdb')<line_sep>symbols=Symbols.objects.create(**data)<line_sep>self.assertDictEqual(SymbolsSerializer(symbols).data dict(id=symbols.id debug_id='C1C0FA629EAA4B4D9DD2ADE270A231CC1' debug_file='BreakpadTestApp.pdb' file=symbols.file.url file_size=symbols.file_size created=symbols.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ') modified=symbols.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ') ))<block_end>@temporary_media_root(MEDIA_URL='http://cache.pack.google.com/edgedl/chrome/install/782.112/')<def_stmt>test_auto_fill_file_size self<block_start><with_stmt>open(SYM_FILE 'rb')<as>f<block_start>data=dict(file=SimpleUploadedFile('./BreakpadTestApp.sym' f.read()))<block_end>symbols=SymbolsSerializer(data=data)<line_sep>self.assertTrue(symbols.is_valid())<line_sep>symbols_instance=symbols.save()<line_sep>self.assertEqual(symbols_instance.debug_id 'C1C0FA629EAA4B4D9DD2ADE270A231CC1')<line_sep>self.assertEqual(symbols_instance.debug_file 'BreakpadTestApp.pdb')<line_sep>self.assertEqual(symbols_instance.file_size 68149)<block_end><block_end><class_stmt>CrashSerializerTest(TestCase)<block_start>maxDiff=<none><line_sep>@temporary_media_root(CELERY_ALWAYS_EAGER=<false> CELERY_EAGER_PROPAGATES_EXCEPTIONS=<false> )<def_stmt>test_serializer self<block_start>meta=dict(lang='en' version='1.0.0.1' )<line_sep>stacktrace_json=dict(crashing_thread={} )<line_sep>app_id='{D0AB2EBC-931B-4013-9FEB-C9C4C2225C8C}'<line_sep>user_id='{2882CF9B-D9C2-4edb-9AAF-8ED5FCF366F7}'<line_sep>crash=Crash.objects.create(appid=app_id userid=user_id upload_file_minidump=SimpleUploadedFile('./dump.dat' b'') meta=meta stacktrace_json=stacktrace_json)<line_sep>self.assertDictEqual(CrashSerializer(crash).data dict(id=crash.id upload_file_minidump=crash.upload_file_minidump.url archive=<none> appid=str(crash.appid) userid=str(crash.userid) meta=meta signature=crash.signature stacktrace_json=crash.stacktrace_json created=crash.created.strftime('%Y-%m-%dT%H:%M:%S.%fZ') modified=crash.modified.strftime('%Y-%m-%dT%H:%M:%S.%fZ') os=<none> build_number=<none> channel=''))<block_end><block_end> |
<def_stmt>minion_game string<block_start>length=len(string)<line_sep>the_vowel="AEIOU"<line_sep>kevin=0<line_sep>stuart=0<for_stmt>i range(length)<block_start><if_stmt>string[i]<in>the_vowel<block_start>kevin=kevin+length-i<block_end><else_stmt><block_start>stuart=stuart+length-i<block_end><block_end><if_stmt>kevin<g>stuart<block_start>print("Kevin %d"%kevin)<block_end><elif_stmt>kevin<l>stuart<block_start>print("Stuart %d"%stuart)<block_end><else_stmt><block_start>print("Draw")<block_end><block_end> |
<import_stmt>theano.tensor<as>T<class_stmt>MaxNorm(object)<block_start><def_stmt>__init__ self max_norm=5<block_start>self.max_norm=max_norm<block_end><def_stmt>__call__ self grads<block_start>norm=T.sqrt(sum([T.sum(g<power>2)<for>g grads]))<line_sep><return>[self.clip_norm(g self.max_norm norm)<for>g grads]<block_end><def_stmt>clip_norm self g c n<block_start><if_stmt>c<g>0<block_start>g=T.switch(T.ge(n c) g<times>c/n g)<block_end><return>g<block_end><block_end><class_stmt>Clip(object)<block_start><def_stmt>__init__ self clip=5<block_start>self.clip=clip<block_end><def_stmt>__call__ self grads<block_start><return>[T.clip(g -self.clip self.clip)<for>g grads]<block_end><block_end> |
<class_stmt>OnegramException(Exception)<block_start><pass><block_end># TODO [romeira]: Login exceptions {06/03/18 23:07}
<class_stmt>AuthException(OnegramException)<block_start><pass><block_end><class_stmt>AuthFailed(AuthException)<block_start><pass><block_end><class_stmt>AuthUserError(AuthException)<block_start><pass><block_end><class_stmt>NotSupportedError(OnegramException)<block_start><pass><block_end><class_stmt>RequestFailed(OnegramException)<block_start><pass><block_end><class_stmt>RateLimitedError(RequestFailed)<block_start><pass><block_end># TODO [romeira]: Query/action exceptions {06/03/18 23:08}
# TODO [romeira]: Session expired exception {06/03/18 23:08}
# TODO [romeira]: Private user exception/warning {06/03/18 23:09}
# TODO [romeira]: Not found exception {06/03/18 23:12}
# TODO [romeira]: Already following/liked/commented? warnings {06/03/18 23:12}
# TODO [romeira]: Timeout exception {06/03/18 23:12}
|
<import_stmt>itertools<import_stmt>ucd<line_sep>ABC_LINES='''
0040;COMMERCIAL AT;Po;0;ON;;;;;N;;;;;
0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;
0042;LATIN CAPITAL LETTER B;Lu;0;L;;;;;N;;;;0062;
0043;LATIN CAPITAL LETTER C;Lu;0;L;;;;;N;;;;0063;
'''.strip()<def_stmt>test_parse_line <block_start>line_A='0041;LATIN CAPITAL LETTER A;Lu;0;L;;;;;N;;;;0061;'<line_sep>code,name,old_name,words=ucd.parse_line(line_A)<assert_stmt>code<eq>65<assert_stmt>name<eq>'LATIN CAPITAL LETTER A'<assert_stmt>old_name<eq>''<assert_stmt>words<eq>['A' 'CAPITAL' 'LATIN' 'LETTER']<block_end><def_stmt>test_parse_line_with_hyphen_and_field_10 <block_start>cases=[('002D;HYPHEN-MINUS;Pd;0;ES;;;;;N;;;;;' 45 'HYPHEN-MINUS' '' ['HYPHEN' 'MINUS']) ('005F;LOW LINE;Pc;0;ON;;;;;N;SPACING UNDERSCORE;;;;' 95 'LOW LINE' 'SPACING UNDERSCORE' ['LINE' 'LOW' 'SPACING' 'UNDERSCORE']) ('0027;APOSTROPHE;Po;0;ON;;;;;N;APOSTROPHE-QUOTE;;;' 39 'APOSTROPHE' 'APOSTROPHE-QUOTE' ['APOSTROPHE' 'QUOTE']) ]<for_stmt>line,*fields_ok cases<block_start>fields=ucd.parse_line(line)<assert_stmt>fields<eq>tuple(fields_ok)<block_end><block_end><def_stmt>test_parser_top_3 <block_start>records=list(itertools.islice(ucd.parser() 3))<assert_stmt>records<eq>[(32 'SPACE' '' ['SPACE']) (33 'EXCLAMATION MARK' '' ['EXCLAMATION' 'MARK']) (34 'QUOTATION MARK' '' ['MARK' 'QUOTATION']) ]<block_end><def_stmt>test_index <block_start>line='003E;GREATER-THAN SIGN;Sm;0;ON;;;;;Y;;;;;'<line_sep>record=ucd.parse_line(line)<line_sep>idx=ucd.index([record])<assert_stmt>idx<eq>{'GREATER':[62] 'SIGN':[62] 'THAN':[62]}<block_end><def_stmt>test_index_abc <block_start>records=[ucd.parse_line(line)<for>line ABC_LINES.split('\n')]<line_sep>idx=ucd.index(records)<assert_stmt>idx<eq>{'A':[65] 'AT':[64] 'B':[66] 'C':[67] 'CAPITAL':[65 66 67] 'COMMERCIAL':[64] 'LATIN':[65 66 67] 'LETTER':[65 66 67] }<block_end> |
<import_from_stmt>collections namedtuple<import_from_stmt>itertools islice<import_from_stmt>..errors Error<as>DriverError<import_from_stmt>.errors InterfaceError OperationalError ProgrammingError<line_sep>Column=namedtuple('Column' 'name type_code display_size internal_size precision scale null_ok')<class_stmt>Cursor(object)<block_start><class_stmt>States(object)<block_start>(NONE RUNNING FINISHED CURSOR_CLOSED)=range(4)<block_end>_states=States()<def_stmt>__init__ self client connection<block_start>self._client=client<line_sep>self._connection=connection<line_sep>self._reset_state()<line_sep>self.arraysize=1<line_sep># Begin non-PEP attributes
self._columns_with_types=<none><line_sep># End non-PEP attributes
super(Cursor self).__init__()<block_end><def_stmt>__repr__ self<block_start>is_closed=self._state<eq>self._states.CURSOR_CLOSED<line_sep><return>'<cursor object at 0x{0:x}; closed: {1:}>'.format(id(self) is_closed)<block_end># Iteration support.
<def_stmt>__iter__ self<block_start><while_stmt><true><block_start>one=self.fetchone()<if_stmt>one<is><none><block_start><return><block_end><yield>one<block_end><block_end># Context manager integrations.
<def_stmt>__enter__ self<block_start><return>self<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>self.close()<block_end>@property<def_stmt>description self<block_start><if_stmt>self._state<eq>self._states.NONE<block_start><return><none><block_end>columns=self._columns<or>[]<line_sep>types=self._types<or>[]<line_sep><return>[Column(name type_code <none> <none> <none> <none> <true>)<for>name,type_code zip(columns types)]<block_end>@property<def_stmt>rowcount self<block_start>"""
:return: the number of rows that the last .execute*() produced.
"""<line_sep><return>self._rowcount<block_end><def_stmt>close self<block_start>"""
Close the cursor now. The cursor will be unusable from this point
forward; an :data:`~clickhouse_driver.dbapi.Error` (or subclass)
exception will be raised if any operation is attempted with the
cursor.
"""<line_sep>self._client.disconnect()<line_sep>self._state=self._states.CURSOR_CLOSED<try_stmt># cursor can be already closed
<block_start>self._connection.cursors.remove(self)<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end><def_stmt>execute self operation parameters=<none><block_start>"""
Prepare and execute a database operation (query or command).
:param operation: query or command to execute.
:param parameters: sequence or mapping that will be bound to
variables in the operation.
:return: None
"""<line_sep>self._check_cursor_closed()<line_sep>self._begin_query()<try_stmt><block_start>execute,execute_kwargs=self._prepare()<line_sep>response=execute(operation params=parameters with_column_types=<true> **execute_kwargs)<block_end><except_stmt>DriverError<as>orig<block_start><raise>OperationalError(orig)<block_end>self._process_response(response)<line_sep>self._end_query()<block_end><def_stmt>executemany self operation seq_of_parameters<block_start>"""
Prepare a database operation (query or command) and then execute it
against all parameter sequences found in the sequence
`seq_of_parameters`.
:param operation: query or command to execute.
:param seq_of_parameters: sequences or mappings for execution.
:return: None
"""<line_sep>self._check_cursor_closed()<line_sep>self._begin_query()<try_stmt><block_start>execute,execute_kwargs=self._prepare()<line_sep>response=execute(operation params=seq_of_parameters **execute_kwargs)<block_end><except_stmt>DriverError<as>orig<block_start><raise>OperationalError(orig)<block_end>self._process_response(response executemany=<true>)<line_sep>self._end_query()<block_end><def_stmt>fetchone self<block_start>"""
Fetch the next row of a query result set, returning a single sequence,
or None when no more data is available.
:return: the next row of a query result set or None.
"""<line_sep>self._check_query_started()<if_stmt>self._stream_results<block_start><return>next(self._rows <none>)<block_end><else_stmt><block_start><if_stmt><not>self._rows<block_start><return><none><block_end><return>self._rows.pop(0)<block_end><block_end><def_stmt>fetchmany self size=<none><block_start>"""
Fetch the next set of rows of a query result, returning a sequence of
sequences (e.g. a list of tuples). An empty sequence is returned when
no more rows are available.
:param size: amount of rows to return.
:return: list of fetched rows or empty list.
"""<line_sep>self._check_query_started()<if_stmt>size<is><none><block_start>size=self.arraysize<block_end><if_stmt>self._stream_results<block_start><if_stmt>size<eq>-1<block_start><return>list(self._rows)<block_end><else_stmt><block_start><return>list(islice(self._rows size))<block_end><block_end><if_stmt>size<l>0<block_start>rv=self._rows<line_sep>self._rows=[]<block_end><else_stmt><block_start>rv=self._rows[:size]<line_sep>self._rows=self._rows[size:]<block_end><return>rv<block_end><def_stmt>fetchall self<block_start>"""
Fetch all (remaining) rows of a query result, returning them as a
sequence of sequences (e.g. a list of tuples).
:return: list of fetched rows.
"""<line_sep>self._check_query_started()<if_stmt>self._stream_results<block_start><return>list(self._rows)<block_end>rv=self._rows<line_sep>self._rows=[]<line_sep><return>rv<block_end><def_stmt>setinputsizes self sizes# Do nothing.
<block_start><pass><block_end><def_stmt>setoutputsize self size column=<none># Do nothing.
<block_start><pass><block_end># Begin non-PEP methods
@property<def_stmt>columns_with_types self<block_start>"""
:return: list of column names with corresponding types of the last
.execute*(). E.g. [('x', 'UInt64')].
"""<line_sep><return>self._columns_with_types<block_end><def_stmt>set_stream_results self stream_results max_row_buffer<block_start>"""
Toggles results streaming from server. Driver will consume
block-by-block of `max_row_buffer` size and yield row-by-row from each
block.
:param stream_results: enable or disable results streaming.
:param max_row_buffer: specifies the maximum number of rows to buffer
at a time.
:return: None
"""<line_sep>self._stream_results=stream_results<line_sep>self._max_row_buffer=max_row_buffer<block_end><def_stmt>set_settings self settings<block_start>"""
Specifies settings for cursor.
:param settings: dictionary of query settings
:return: None
"""<line_sep>self._settings=settings<block_end><def_stmt>set_types_check self types_check<block_start>"""
Toggles type checking for sequence of INSERT parameters.
Disabled by default.
:param types_check: new types check value.
:return: None
"""<line_sep>self._types_check=types_check<block_end><def_stmt>set_external_table self name structure data<block_start>"""
Adds external table to cursor context.
If the same table is specified more than once the last one is used.
:param name: name of external table
:param structure: list of tuples (name, type) that defines table
structure. Example [(x, 'Int32')].
:param data: sequence of rows of tuples or dicts for transmission.
:return: None
"""<line_sep>self._external_tables[name]=(structure data)<block_end><def_stmt>set_query_id self query_id<block_start>"""
Specifies the query identifier for cursor.
:param query_id: the query identifier.
:return: None
"""<line_sep>self._query_id=query_id<block_end># End non-PEP methods
# Private methods.
<def_stmt>_prepare self<block_start>external_tables=[{'name':name 'structure':structure 'data':data}<for>name,(structure data) self._external_tables.items()]<or><none><line_sep>execute=self._client.execute<if_stmt>self._stream_results<block_start>execute=self._client.execute_iter<line_sep>self._settings=self._settings<or>{}<line_sep>self._settings['max_block_size']=self._max_row_buffer<block_end>execute_kwargs={'settings':self._settings 'external_tables':external_tables 'types_check':self._types_check 'query_id':self._query_id}<line_sep><return>execute execute_kwargs<block_end><def_stmt>_process_response self response executemany=<false><block_start><if_stmt>executemany<block_start>self._rowcount=response<line_sep>response=<none><block_end><if_stmt><not>response<or>isinstance(response int)<block_start>self._columns=self._types=self._rows=[]<if_stmt>isinstance(response int)<block_start>self._rowcount=response<block_end><return><block_end><if_stmt>self._stream_results<block_start>columns_with_types=next(response)<line_sep>rows=response<block_end><else_stmt><block_start>rows,columns_with_types=response<block_end>self._columns_with_types=columns_with_types<line_sep># Only SELECT queries have columns_with_types.
# DDL and INSERT INTO ... SELECT queries have empty columns header.
# We need to obtain rows count only during non-streaming SELECTs.
<if_stmt>columns_with_types<block_start>self._columns,self._types=zip(*columns_with_types)<if_stmt><not>self._stream_results<block_start>self._rowcount=len(rows)<block_end><block_end><else_stmt><block_start>self._columns=self._types=[]<block_end>self._rows=rows<block_end><def_stmt>_reset_state self<block_start>"""
Resets query state and get ready for another query.
"""<line_sep>self._state=self._states.NONE<line_sep>self._columns=<none><line_sep>self._types=<none><line_sep>self._rows=<none><line_sep>self._rowcount=-1<line_sep>self._stream_results=<false><line_sep>self._max_row_buffer=0<line_sep>self._settings=<none><line_sep>self._query_id=<none><line_sep>self._external_tables={}<line_sep>self._types_check=<false><block_end><def_stmt>_begin_query self<block_start>self._state=self._states.RUNNING<block_end><def_stmt>_end_query self<block_start>self._state=self._states.FINISHED<block_end><def_stmt>_check_cursor_closed self<block_start><if_stmt>self._state<eq>self._states.CURSOR_CLOSED<block_start><raise>InterfaceError('cursor already closed')<block_end><block_end><def_stmt>_check_query_started self<block_start><if_stmt>self._state<eq>self._states.NONE<block_start><raise>ProgrammingError('no results to fetch')<block_end><block_end><block_end> |
"""
Examples:
>>> square(1)
1
>>> square(2)
4
>>> square(3)
9
>>> spam = Spam()
>>> spam.eggs()
42
"""<def_stmt>square x<block_start>"""
Examples:
>>> square(1)
1
>>> square(2)
4
>>> square(3)
9
"""<line_sep><return>x<times>x<block_end><class_stmt>Spam(object)<block_start>"""
Examples:
>>> spam = Spam()
>>> spam.eggs()
42
"""<def_stmt>eggs self<block_start>"""
Examples:
>>> spam = Spam()
>>> spam.eggs()
42
"""<line_sep><return>42<block_end><block_end> |
<import_stmt>logging<import_stmt>os<import_from_stmt>typing Optional Tuple List<import_from_stmt>ray.autoscaler.sdk rsync configure_logging<import_from_stmt>ray.util get_node_ip_address<import_from_stmt>ray.util.debug log_once<import_from_stmt>ray.tune.syncer NodeSyncer<import_from_stmt>ray.tune.sync_client SyncClient<import_from_stmt>ray.ray_constants env_integer<line_sep>logger=logging.getLogger(__name__)<class_stmt>DockerSyncer(NodeSyncer)<block_start>"""DockerSyncer used for synchronization between Docker containers.
This syncer extends the node syncer, but is usually instantiated
without a custom sync client. The sync client defaults to
``DockerSyncClient`` instead.
Set the env var `TUNE_SYNCER_VERBOSITY` to increase verbosity
of syncing operations (0, 1, 2, 3). Defaults to 0.
.. note::
This syncer only works with the Ray cluster launcher.
If you use your own Docker setup, make sure the nodes can connect
to each other via SSH, and try the regular SSH-based syncer instead.
Example:
.. code-block:: python
from ray.tune.integration.docker import DockerSyncer
tune.run(train,
sync_config=tune.SyncConfig(
sync_to_driver=DockerSyncer))
"""<line_sep>_cluster_config_file=os.path.expanduser("~/ray_bootstrap_config.yaml")<def_stmt>__init__ self local_dir:str remote_dir:str sync_client:Optional[SyncClient]=<none><block_start>configure_logging(log_style="record" verbosity=env_integer("TUNE_SYNCER_VERBOSITY" 0))<line_sep>self.local_ip=get_node_ip_address()<line_sep>self.worker_ip=<none><line_sep>sync_client=sync_client<or>DockerSyncClient()<line_sep>sync_client.configure(self._cluster_config_file)<line_sep>super(NodeSyncer self).__init__(local_dir remote_dir sync_client)<block_end><def_stmt>set_worker_ip self worker_ip:str<block_start>self.worker_ip=worker_ip<block_end>@property<def_stmt>_remote_path self<arrow>Tuple[str str]<block_start><return>(self.worker_ip self._remote_dir)<block_end><block_end><class_stmt>DockerSyncClient(SyncClient)<block_start>"""DockerSyncClient to be used by DockerSyncer.
This client takes care of executing the synchronization
commands for Docker nodes. In its ``sync_down`` and
``sync_up`` commands, it expects tuples for the source
and target, respectively, for compatibility with docker.
Args:
should_bootstrap: Whether to bootstrap the autoscaler
cofiguration. This may be useful when you are
running into authentication problems; i.e.:
https://github.com/ray-project/ray/issues/17756.
"""<def_stmt>__init__ self should_bootstrap:bool=<true><block_start>self._command_runners={}<line_sep>self._cluster_config=<none><if_stmt>os.environ.get("TUNE_SYNC_DISABLE_BOOTSTRAP")<eq>"1"<block_start>should_bootstrap=<false><line_sep>logger.debug("Skipping bootstrap for docker sync client.")<block_end>self._should_bootstrap=should_bootstrap<block_end><def_stmt>configure self cluster_config_file:str<block_start>self._cluster_config_file=cluster_config_file<block_end><def_stmt>sync_up self source:str target:Tuple[str str] exclude:Optional[List]=<none><arrow>bool<block_start>"""Here target is a tuple (target_node, target_dir)"""<line_sep>target_node,target_dir=target<line_sep># Add trailing slashes for rsync
source=os.path.join(source "")<line_sep>target_dir=os.path.join(target_dir "")<import_stmt>click<try_stmt><block_start>rsync(cluster_config=self._cluster_config_file source=source target=target_dir down=<false> ip_address=target_node should_bootstrap=self._should_bootstrap use_internal_ip=<true>)<block_end><except_stmt>click.ClickException<block_start><if_stmt>log_once("docker_rsync_up_fail")<block_start>logger.warning("Rsync-up failed. Consider using a durable trainable "<concat>"or setting the `TUNE_SYNC_DISABLE_BOOTSTRAP=1` env var.")<block_end><raise><block_end><return><true><block_end><def_stmt>sync_down self source:Tuple[str str] target:str exclude:Optional[List]=<none><arrow>bool<block_start>"""Here source is a tuple (source_node, source_dir)"""<line_sep>source_node,source_dir=source<line_sep># Add trailing slashes for rsync
source_dir=os.path.join(source_dir "")<line_sep>target=os.path.join(target "")<import_stmt>click<try_stmt><block_start>rsync(cluster_config=self._cluster_config_file source=source_dir target=target down=<true> ip_address=source_node should_bootstrap=self._should_bootstrap use_internal_ip=<true>)<block_end><except_stmt>click.ClickException<block_start><if_stmt>log_once("docker_rsync_down_fail")<block_start>logger.warning("Rsync-down failed. Consider using a durable trainable "<concat>"or setting the `TUNE_SYNC_DISABLE_BOOTSTRAP=1` env var.")<block_end><raise><block_end><return><true><block_end><def_stmt>delete self target:str<arrow>bool<block_start><raise>NotImplementedError<block_end><block_end> |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
<import_stmt>os<import_stmt>unittest<import_from_stmt>cdm.enums CdmObjectType<import_from_stmt>cdm.enums.cdm_operation_type CdmOperationType<import_from_stmt>cdm.objectmodel CdmCorpusDefinition CdmFolderDefinition CdmProjection CdmOperationAddCountAttribute CdmOperationAddSupportingAttribute CdmOperationAddTypeAttribute CdmOperationExcludeAttributes CdmOperationArrayExpansion CdmOperationCombineAttributes CdmOperationRenameAttributes CdmOperationReplaceAsForeignKey CdmOperationIncludeAttributes CdmObject<import_from_stmt>cdm.storage LocalAdapter<import_from_stmt>tests.common async_test TestHelper<import_from_stmt>tests.utilities.projection_test_utils ProjectionTestUtils<class_stmt>ProjectionObjectModelTest(unittest.TestCase)<block_start>foundation_json_path='cdm:/foundations.cdm.json'<line_sep># The path between TestDataPath and TestName.
tests_subpath=os.path.join('Cdm' 'Projection')<line_sep>@async_test<async_keyword><def_stmt>test_projection_using_object_model self<block_start>"""Basic test to save projection based entities and then try to reload them and validate that the projections were persisted correctly"""<line_sep>corpus=ProjectionTestUtils.get_local_corpus(self.tests_subpath 'test_projection_using_object_model')<line_sep>corpus.storage.mount('local' LocalAdapter(TestHelper.get_actual_output_folder_path(self.tests_subpath 'test_projection_using_object_model')))<line_sep>local_root=corpus.storage.fetch_root_folder('local')<line_sep>manifest_default=self._create_default_manifest(corpus local_root)<line_sep>entity_test_source=self._create_entity_test_source(corpus manifest_default local_root)<line_sep>entity_test_entity_projection=self._create_entity_test_entity_projection(corpus manifest_default local_root)<line_sep>entity_test_entity_nested_projection=self._create_entity_test_entity_nested_projection(corpus manifest_default local_root)<line_sep>entity_test_entity_attribute_projection=self._create_entity_test_entity_attribute_projection(corpus manifest_default local_root)<line_sep>entity_test_operation_collection=self._create_entity_test_operation_collection(corpus manifest_default local_root)<line_sep># Save manifest and entities
<await>manifest_default.save_as_async('{}.manifest.cdm.json'.format(manifest_default.manifest_name) <true>)<line_sep>expected='TestSource'<line_sep>expected_type=CdmObjectType.PROJECTION_DEF<line_sep>actual=<none><line_sep>actual_type=CdmObjectType.ERROR<line_sep># Try to read back the newly persisted manifest and projection based entities
manifest_read_back=<await>corpus.fetch_object_async('local:/{}.manifest.cdm.json'.format(manifest_default.manifest_name))<line_sep>self.assertEqual(5 len(manifest_read_back.entities))<line_sep>self.assertEqual(entity_test_source.entity_name manifest_read_back.entities[0].entity_name)<line_sep>self.assertEqual(entity_test_entity_projection.entity_name manifest_read_back.entities[1].entity_name)<line_sep>self.assertEqual(entity_test_entity_nested_projection.entity_name manifest_read_back.entities[2].entity_name)<line_sep>self.assertEqual(entity_test_entity_attribute_projection.entity_name manifest_read_back.entities[3].entity_name)<line_sep># Read back the newly persisted manifest and projection based entity TestEntityProjection and validate
entity_test_entity_projection_read_back=<await>corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_entity_projection.entity_name entity_test_entity_projection.entity_name) manifest_read_back)<line_sep>self.assertIsNotNone(entity_test_entity_projection_read_back)<line_sep>actual=entity_test_entity_projection_read_back.extends_entity.explicit_reference.source.named_reference<line_sep>actual_type=entity_test_entity_projection_read_back.extends_entity.explicit_reference.object_type<line_sep>self.assertEqual(expected actual)<line_sep>self.assertEqual(expected_type actual_type)<line_sep># Read back the newly persisted manifest and projection based entity TestEntityNestedProjection and validate
entity_test_entity_nested_projection_read_back=<await>corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_entity_nested_projection.entity_name entity_test_entity_nested_projection.entity_name) manifest_read_back)<line_sep>self.assertIsNotNone(entity_test_entity_nested_projection_read_back)<line_sep>actual=entity_test_entity_nested_projection_read_back.extends_entity.explicit_reference.source.explicit_reference.source.explicit_reference.source.named_reference<line_sep>actual_type=entity_test_entity_nested_projection_read_back.extends_entity.explicit_reference.source.explicit_reference.source.explicit_reference.object_type<line_sep>self.assertEqual(expected actual)<line_sep>self.assertEqual(expected_type actual_type)<line_sep># Read back the newly persisted manifest and projection based entity TestEntityAttributeProjection and validate
entity_test_entity_attribute_projection_read_back=<await>corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_entity_attribute_projection.entity_name entity_test_entity_attribute_projection.entity_name) manifest_read_back)<line_sep>self.assertIsNotNone(entity_test_entity_attribute_projection_read_back)<line_sep>actual=entity_test_entity_attribute_projection_read_back.attributes[0].entity.explicit_reference.source.named_reference<line_sep>actual_type=entity_test_entity_attribute_projection_read_back.attributes[0].entity.explicit_reference.object_type<line_sep>self.assertEqual(expected actual)<line_sep>self.assertEqual(expected_type actual_type)<line_sep># Read back operations collections and validate
entity_test_operation_collection_read_back=<await>corpus.fetch_object_async('local:/{}.cdm.json/{}'.format(entity_test_operation_collection.entity_name entity_test_operation_collection.entity_name) manifest_read_back)<line_sep>self.assertIsNotNone(entity_test_operation_collection_read_back)<line_sep>actual_operation_count=len(entity_test_operation_collection_read_back.extends_entity.explicit_reference.operations)<line_sep>self.assertEqual(9 actual_operation_count)<line_sep>operations=entity_test_operation_collection_read_back.extends_entity.explicit_reference.operations<line_sep>self.assertEqual(CdmOperationType.ADD_COUNT_ATTRIBUTE operations[0].type)<line_sep>self.assertEqual(CdmOperationType.ADD_SUPPORTING_ATTRIBUTE operations[1].type)<line_sep>self.assertEqual(CdmOperationType.ADD_TYPE_ATTRIBUTE operations[2].type)<line_sep>self.assertEqual(CdmOperationType.EXCLUDE_ATTRIBUTES operations[3].type)<line_sep>self.assertEqual(CdmOperationType.ARRAY_EXPANSION operations[4].type)<line_sep>self.assertEqual(CdmOperationType.COMBINE_ATTRIBUTES operations[5].type)<line_sep>self.assertEqual(CdmOperationType.RENAME_ATTRIBUTES operations[6].type)<line_sep>self.assertEqual(CdmOperationType.REPLACE_AS_FOREIGN_KEY operations[7].type)<line_sep>self.assertEqual(CdmOperationType.INCLUDE_ATTRIBUTES operations[8].type)<block_end><def_stmt>_create_default_manifest self corpus:'CdmCorpusDefinition' local_root:'CdmFolderDefinition'<arrow>'CdmManifestDefinition'<block_start>"""Create a default manifest"""<line_sep>manifest_name='default'<line_sep>manifest_doc_name='{}.manifest.cdm.json'.format(manifest_name)<line_sep>manifest_default=corpus.make_object(CdmObjectType.MANIFEST_DEF manifest_name)<line_sep>local_root.documents.append(manifest_default manifest_doc_name)<line_sep><return>manifest_default<block_end><def_stmt>_create_entity_test_source self corpus:'CdmCorpusDefinition' manifest_default:'CdmManifestDefinition' local_root:'CdmFolderDefinition'<arrow>'CdmEntityDefinition'<block_start>"""Create a simple entity called 'TestSource' with a single attribute"""<line_sep>entity_name='TestSource'<line_sep>entity_test_source=corpus.make_object(CdmObjectType.ENTITY_DEF entity_name)<line_sep>attribute_name='TestAttribute'<line_sep>entity_test_attribute=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF attribute_name <false>)<line_sep>entity_test_attribute.data_type=corpus.make_ref(CdmObjectType.DATA_TYPE_REF 'string' <true>)<line_sep>entity_test_attribute.purpose=corpus.make_ref(CdmObjectType.PURPOSE_REF 'hasA' <true>)<line_sep>entity_test_attribute.display_name=attribute_name<line_sep>entity_test_source.attributes.append(entity_test_attribute)<line_sep>entity_test_source_doc=corpus.make_object(CdmObjectType.DOCUMENT_DEF '{}.cdm.json'.format(entity_name) <false>)<line_sep>entity_test_source_doc.imports.append(self.foundation_json_path)<line_sep>entity_test_source_doc.definitions.append(entity_test_source)<line_sep>local_root.documents.append(entity_test_source_doc entity_test_source_doc.name)<line_sep>manifest_default.entities.append(entity_test_source)<line_sep><return>entity_test_source<block_end><def_stmt>_create_projection self corpus:'CdmCorpusDefinition'<arrow>'CdmProjection'<block_start>"""Create a simple projection object"""<line_sep>projection=corpus.make_object(CdmObjectType.PROJECTION_DEF)<line_sep>projection.source=corpus.make_object(CdmObjectType.ENTITY_REF 'TestSource' <true>)<line_sep><return>projection<block_end><def_stmt>_create_nested_projection self corpus:'CdmCorpusDefinition'<arrow>'CdmProjection'<block_start>"""Create a 3-level nested projection object"""<line_sep>projection3=corpus.make_object(CdmObjectType.PROJECTION_DEF)<line_sep>projection3.source=corpus.make_object(CdmObjectType.ENTITY_REF 'TestSource' <true>)<line_sep>inline_projection_entity_ref3=corpus.make_object(CdmObjectType.ENTITY_REF <none>)<line_sep>inline_projection_entity_ref3.explicit_reference=projection3<line_sep>projection2=corpus.make_object(CdmObjectType.PROJECTION_DEF)<line_sep>projection2.source=inline_projection_entity_ref3<line_sep>inline_projection_entity_ref2=corpus.make_object(CdmObjectType.ENTITY_REF <none>)<line_sep>inline_projection_entity_ref2.explicit_reference=projection2<line_sep>projection1=corpus.make_object(CdmObjectType.PROJECTION_DEF)<line_sep>projection1.source=inline_projection_entity_ref2<line_sep><return>projection1<block_end><def_stmt>_create_entity_test_entity_projection self corpus:'CdmCorpusDefinition' manifest_default:'CdmManifestDefinition' local_root:'CdmFolderDefinition'<arrow>'CdmEntityDefinition'<block_start>"""Create an entity 'TestEntityProjection' that extends from a projection"""<line_sep>entity_name='TestEntityProjection'<line_sep>inline_projection_entity_ref=corpus.make_object(CdmObjectType.ENTITY_REF <none>)<line_sep>inline_projection_entity_ref.explicit_reference=self._create_projection(corpus)<line_sep>entity_test_entity_projection=corpus.make_object(CdmObjectType.ENTITY_DEF entity_name)<line_sep>entity_test_entity_projection.extends_entity=inline_projection_entity_ref<line_sep>entity_test_entity_projection_doc=corpus.make_object(CdmObjectType.DOCUMENT_DEF '{}.cdm.json'.format(entity_name) <false>)<line_sep>entity_test_entity_projection_doc.imports.append(self.foundation_json_path)<line_sep>entity_test_entity_projection_doc.imports.append('TestSource.cdm.json')<line_sep>entity_test_entity_projection_doc.definitions.append(entity_test_entity_projection)<line_sep>local_root.documents.append(entity_test_entity_projection_doc entity_test_entity_projection_doc.name)<line_sep>manifest_default.entities.append(entity_test_entity_projection)<line_sep><return>entity_test_entity_projection<block_end><def_stmt>_create_entity_test_entity_nested_projection self corpus:'CdmCorpusDefinition' manifest_default:'CdmManifestDefinition' local_root:'CdmFolderDefinition'<arrow>'CdmEntityDefinition'<block_start>"""Create an entity 'TestEntityNestedProjection' that extends from a projection"""<line_sep>entity_name='TestEntityNestedProjection'<line_sep>inline_projection_entity_ref=corpus.make_object(CdmObjectType.ENTITY_REF <none>)<line_sep>inline_projection_entity_ref.explicit_reference=self._create_nested_projection(corpus)<line_sep>entity_test_entity_nested_projection=corpus.make_object(CdmObjectType.ENTITY_DEF entity_name)<line_sep>entity_test_entity_nested_projection.extends_entity=inline_projection_entity_ref<line_sep>entity_test_entity_nested_projection_doc=corpus.make_object(CdmObjectType.DOCUMENT_DEF '{}.cdm.json'.format(entity_name) <false>)<line_sep>entity_test_entity_nested_projection_doc.imports.append(self.foundation_json_path)<line_sep>entity_test_entity_nested_projection_doc.imports.append('TestSource.cdm.json')<line_sep>entity_test_entity_nested_projection_doc.definitions.append(entity_test_entity_nested_projection)<line_sep>local_root.documents.append(entity_test_entity_nested_projection_doc entity_test_entity_nested_projection_doc.name)<line_sep>manifest_default.entities.append(entity_test_entity_nested_projection)<line_sep><return>entity_test_entity_nested_projection<block_end><def_stmt>_create_entity_test_entity_attribute_projection self corpus:'CdmCorpusDefinition' manifest_default:'CdmManifestDefinition' local_root:'CdmFolderDefinition'<arrow>'CdmEntityDefinition'<block_start>"""Create an entity 'TestEntityAttributeProjection' that contains an entity attribute with a projection as a source entity"""<line_sep>entity_name='TestEntityAttributeProjection'<line_sep>inline_projection_entity_ref=corpus.make_object(CdmObjectType.ENTITY_REF <none>)<line_sep>inline_projection_entity_ref.explicit_reference=self._create_projection(corpus)<line_sep>entity_test_entity_attribute_projection=corpus.make_object(CdmObjectType.ENTITY_DEF entity_name)<line_sep>attribute_name='TestAttribute'<line_sep>entity_test_entity_attribute=corpus.make_object(CdmObjectType.ENTITY_ATTRIBUTE_DEF attribute_name <false>)<line_sep>entity_test_entity_attribute.entity=inline_projection_entity_ref<line_sep>entity_test_entity_attribute_projection.attributes.append(entity_test_entity_attribute)<line_sep>entity_test_entity_attribute_projection_doc=corpus.make_object(CdmObjectType.DOCUMENT_DEF '{}.cdm.json'.format(entity_name) <false>)<line_sep>entity_test_entity_attribute_projection_doc.imports.append(self.foundation_json_path)<line_sep>entity_test_entity_attribute_projection_doc.imports.append('TestSource.cdm.json')<line_sep>entity_test_entity_attribute_projection_doc.definitions.append(entity_test_entity_attribute_projection)<line_sep>local_root.documents.append(entity_test_entity_attribute_projection_doc entity_test_entity_attribute_projection_doc.name)<line_sep>manifest_default.entities.append(entity_test_entity_attribute_projection)<line_sep><return>entity_test_entity_attribute_projection<block_end><def_stmt>_create_projection_with_operation_collection self corpus:'CdmCorpusDefinition' owner:'CdmObject'<arrow>'CdmProjection'<block_start>"""Create a projection object with operations"""<line_sep>projection=corpus.make_object(CdmObjectType.PROJECTION_DEF)<line_sep>projection.source=corpus.make_object(CdmObjectType.ENTITY_REF 'TestSource' <true>)<line_sep># AddCountAttribute Operation
add_count_attribute_op=CdmOperationAddCountAttribute(corpus.ctx)<line_sep>add_count_attribute_op.count_attribute=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF 'countAtt')<line_sep>projection.operations.append(add_count_attribute_op)<line_sep># AddSupportingAttribute Operation
add_supporting_attribute_op=CdmOperationAddSupportingAttribute(corpus.ctx)<line_sep>add_supporting_attribute_op.supporting_attribute=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF 'supportingAtt')<line_sep>projection.operations.append(add_supporting_attribute_op)<line_sep># AddTypeAttribute Operation
add_type_attribute_op=CdmOperationAddTypeAttribute(corpus.ctx)<line_sep>add_type_attribute_op.type_attribute=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF 'typeAtt')<line_sep>projection.operations.append(add_type_attribute_op)<line_sep># ExcludeAttributes Operation
exclude_attributes_op=CdmOperationExcludeAttributes(corpus.ctx)<line_sep>exclude_attributes_op.exclude_attributes=[]<line_sep>exclude_attributes_op.exclude_attributes.append('testAttribute1')<line_sep>projection.operations.append(exclude_attributes_op)<line_sep># ArrayExpansion Operation
array_expansion_op=CdmOperationArrayExpansion(corpus.ctx)<line_sep>array_expansion_op.start_ordinal=0<line_sep>array_expansion_op.end_ordinal=1<line_sep>projection.operations.append(array_expansion_op)<line_sep># CombineAttributes Operation
combine_attributes_op=CdmOperationCombineAttributes(corpus.ctx)<line_sep>combine_attributes_op.select=[]<line_sep>combine_attributes_op.merge_into=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF 'combineAtt')<line_sep>combine_attributes_op.select.append('testAttribute1')<line_sep>projection.operations.append(combine_attributes_op)<line_sep># RenameAttributes Operation
rename_attributes_op=CdmOperationRenameAttributes(corpus.ctx)<line_sep>rename_attributes_op.rename_format='{m}'<line_sep>projection.operations.append(rename_attributes_op)<line_sep># ReplaceAsForeignKey Operation
replace_as_foreign_key_op=CdmOperationReplaceAsForeignKey(corpus.ctx)<line_sep>replace_as_foreign_key_op.reference='testAttribute1'<line_sep>replace_as_foreign_key_op.replace_with=corpus.make_object(CdmObjectType.TYPE_ATTRIBUTE_DEF 'testForeignKey' <false>)<line_sep>projection.operations.append(replace_as_foreign_key_op)<line_sep># IncludeAttributes Operation
include_attributes_op=CdmOperationIncludeAttributes(corpus.ctx)<line_sep>include_attributes_op.include_attributes=[]<line_sep>include_attributes_op.include_attributes.append('testAttribute1')<line_sep>projection.operations.append(include_attributes_op)<line_sep><return>projection<block_end><def_stmt>_create_entity_test_operation_collection self corpus:'CdmCorpusDefinition' manifest_default:'CdmManifestDefinition' local_root:'CdmFolderDefinition'<block_start>"""Create an entity 'TestOperationCollection' that extends from a projection with a collection of operations"""<line_sep>entity_name='TestOperationCollection'<line_sep>inline_projection_entity_ref=corpus.make_object(CdmObjectType.ENTITY_REF <none>)<line_sep>entity_test_operation_collection=corpus.make_object(CdmObjectType.ENTITY_DEF entity_name)<line_sep>inline_projection_entity_ref.explicit_reference=self._create_projection_with_operation_collection(corpus entity_test_operation_collection)<line_sep>entity_test_operation_collection.extends_entity=inline_projection_entity_ref<line_sep>entity_test_operation_collection_doc=corpus.make_object(CdmObjectType.DOCUMENT_DEF '{}.cdm.json'.format(entity_name) <false>)<line_sep>entity_test_operation_collection_doc.imports.append(self.foundation_json_path)<line_sep>entity_test_operation_collection_doc.imports.append('TestSource.cdm.json')<line_sep>entity_test_operation_collection_doc.definitions.append(entity_test_operation_collection)<line_sep>local_root.documents.append(entity_test_operation_collection_doc entity_test_operation_collection_doc.name)<line_sep>manifest_default.entities.append(entity_test_operation_collection)<line_sep><return>entity_test_operation_collection<block_end><block_end> |
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_future_stmt> absolute_import<import_stmt>shlex<class_stmt>TaskFailedException(Exception)<block_start><pass><block_end><class_stmt>Task(object)<block_start>"""Base class for implementing Task
All the args passed can be accessed via self.args
:param args: The program arguments
"""<def_stmt>__init__ self args<block_start><if_stmt>args<block_start>self.args=self.parse_args(list(shlex.split(args)))<block_end><else_stmt><block_start>self.args=self.parse_args([])<block_end><block_end><def_stmt>parse_args self args<block_start>"""Parse args command line arguments.
:param args: The list of arguments as strings.
"""<line_sep><pass><block_end><def_stmt>run self host<block_start>"""This contains the main logic of the task
Please note an exception from this method will completely stop the restart
:param host: the host on which precheck is executed on
:type host: string
"""<line_sep><raise>NotImplementedError("Implemented in subclass")<block_end><block_end><class_stmt>PreStopTask(Task)<block_start>"""Class to be used for any pre stop checks"""<block_end><class_stmt>PostStopTask(Task)<block_start>"""Class to be used for any post stop checks"""<block_end> |
<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>whylogs.core.metrics.regression_metrics RegressionMetrics<import_from_stmt>whylogs.proto RegressionMetricsMessage<line_sep>TEST_DATA_PATH=os.path.abspath(os.path.join(os.path.realpath(os.path.dirname(__file__)) os.pardir os.pardir os.pardir os.pardir "testdata" ))<def_stmt>my_test <block_start>regmet=RegressionMetrics()<assert_stmt>regmet.count<eq>0<assert_stmt>regmet.sum_diff<eq>0.0<assert_stmt>regmet.sum2_diff<eq>0.0<assert_stmt>regmet.sum_abs_diff<eq>0.0<assert_stmt>regmet.mean_squared_error()<is><none><assert_stmt>regmet.mean_absolute_error()<is><none><assert_stmt>regmet.root_mean_squared_error()<is><none><block_end><def_stmt>test_load_parquet <block_start>mean_absolute_error=85.94534216005789<line_sep>mean_squared_error=11474.89611670205<line_sep>root_mean_squared_error=107.12094154133472<line_sep>regmet=RegressionMetrics()<line_sep>df=pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH "metrics" "2021-02-12.parquet")))<line_sep>regmet.add(df["predictions"].to_list() df["targets"].to_list())<assert_stmt>regmet.count<eq>len(df["predictions"].to_list())<assert_stmt>regmet.mean_squared_error()<eq>pytest.approx(mean_squared_error 0.01)<assert_stmt>regmet.mean_absolute_error()<eq>pytest.approx(mean_absolute_error 0.01)<assert_stmt>regmet.root_mean_squared_error()<eq>pytest.approx(root_mean_squared_error 0.01)<line_sep>msg=regmet.to_protobuf()<line_sep>new_regmet=RegressionMetrics.from_protobuf(msg)<assert_stmt>regmet.count<eq>new_regmet.count<assert_stmt>regmet.mean_squared_error()<eq>new_regmet.mean_squared_error()<assert_stmt>regmet.root_mean_squared_error()<eq>new_regmet.root_mean_squared_error()<assert_stmt>regmet.mean_absolute_error()<eq>new_regmet.mean_absolute_error()<block_end><def_stmt>test_empty_protobuf_should_return_none <block_start>empty_message=RegressionMetricsMessage()<assert_stmt>RegressionMetrics.from_protobuf(empty_message)<is><none><block_end><def_stmt>test_merging <block_start>regmet_sum=RegressionMetrics()<line_sep>regmet=RegressionMetrics(prediction_field="predictions" target_field="targets")<line_sep>df=pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH "metrics" "2021-02-12.parquet")))<line_sep>regmet.add(df["predictions"].to_list() df["targets"].to_list())<line_sep>regmet_sum.add(df["predictions"].to_list() df["targets"].to_list())<line_sep>regmet_2=RegressionMetrics(prediction_field="predictions" target_field="targets")<line_sep>df_2=pd.read_parquet(os.path.join(os.path.join(TEST_DATA_PATH "metrics" "2021-02-13.parquet")))<line_sep>regmet_2.add(df_2["predictions"].to_list() df_2["targets"].to_list())<line_sep>regmet_sum.add(df_2["predictions"].to_list() df_2["targets"].to_list())<line_sep>merged_reg_metr=regmet.merge(regmet_2)<assert_stmt>merged_reg_metr.count<eq>regmet_sum.count<assert_stmt>merged_reg_metr.mean_squared_error()<eq>pytest.approx(regmet_sum.mean_squared_error() 0.001)<assert_stmt>merged_reg_metr.root_mean_squared_error()<eq>pytest.approx(regmet_sum.root_mean_squared_error() 0.001)<assert_stmt>merged_reg_metr.mean_absolute_error()<eq>pytest.approx(regmet_sum.mean_absolute_error() 0.001)<block_end> |
<def_stmt>FibonacciSearch arr key<block_start>fib2=0<line_sep>fib1=1<line_sep>fib=fib1+fib2<while_stmt>(fib<l>len(arr))<block_start>fib2=fib1<line_sep>fib1=fib<line_sep>fib=fib1+fib2<block_end>index=-1<while_stmt>(fib<g>1)<block_start>i=min(index+fib2 (len(arr)-1))<if_stmt>(arr[i]<l>key)<block_start>fib=fib1<line_sep>fib1=fib2<line_sep>fib2=fib-fib1<line_sep>index=i<block_end><elif_stmt>(arr[i]<g>key)<block_start>fib=fib2<line_sep>fib1=fib1-fib2<line_sep>fib2=fib-fib1<block_end><else_stmt><block_start><return>i<block_end><block_end><if_stmt>(fib1<and>index<l>(len(arr)-1)<and>arr[index+1]<eq>key)<block_start><return>index+1<block_end><return>-1<block_end>key=15<line_sep>arr=[5 10 15 20 25 30 35]<line_sep>ans=FibonacciSearch(arr key)<line_sep>print(ans)<if_stmt>(ans)<block_start>print("Found at "+str(ans+1)+" position")<block_end><else_stmt><block_start>print("Not Found")<block_end> |
<import_stmt>argparse<import_stmt>json<import_from_stmt>nltk.corpus wordnet<as>wn<import_stmt>torch<import_from_stmt>glove GloVe<def_stmt>getnode x<block_start><return>wn.synset_from_pos_and_offset('n' int(x[1:]))<block_end><def_stmt>getwnid u<block_start>s=str(u.offset())<line_sep><return>'n'+(8-len(s))<times>'0'+s<block_end><def_stmt>getedges s<block_start>dic={x:i<for>i,x enumerate(s)}<line_sep>edges=[]<for_stmt>i,u enumerate(s)<block_start><for_stmt>v u.hypernyms()<block_start>j=dic.get(v)<if_stmt>j<is><not><none><block_start>edges.append((i j))<block_end><block_end><block_end><return>edges<block_end><def_stmt>induce_parents s stop_set<block_start>q=s<line_sep>vis=set(s)<line_sep>l=0<while_stmt>l<l>len(q)<block_start>u=q[l]<line_sep>l<augadd>1<if_stmt>u<in>stop_set<block_start><continue><block_end><for_stmt>p u.hypernyms()<block_start><if_stmt>p<not><in>vis<block_start>vis.add(p)<line_sep>q.append(p)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--input' default='imagenet-split.json')<line_sep>parser.add_argument('--output' default='imagenet-induced-graph.json')<line_sep>args=parser.parse_args()<line_sep>print('making graph ...')<line_sep>xml_wnids=json.load(open('imagenet-xml-wnids.json' 'r'))<line_sep>xml_nodes=list(map(getnode xml_wnids))<line_sep>xml_set=set(xml_nodes)<line_sep>js=json.load(open(args.input 'r'))<line_sep>train_wnids=js['train']<line_sep>test_wnids=js['test']<line_sep>key_wnids=train_wnids+test_wnids<line_sep>s=list(map(getnode key_wnids))<line_sep>induce_parents(s xml_set)<line_sep>s_set=set(s)<for_stmt>u xml_nodes<block_start><if_stmt>u<not><in>s_set<block_start>s.append(u)<block_end><block_end>wnids=list(map(getwnid s))<line_sep>edges=getedges(s)<line_sep>print('making glove embedding ...')<line_sep>glove=GloVe('glove.6B.300d.txt')<line_sep>vectors=[]<for_stmt>wnid wnids<block_start>vectors.append(glove[getnode(wnid).lemma_names()])<block_end>vectors=torch.stack(vectors)<line_sep>print('dumping ...')<line_sep>obj={}<line_sep>obj['wnids']=wnids<line_sep>obj['vectors']=vectors.tolist()<line_sep>obj['edges']=edges<line_sep>json.dump(obj open(args.output 'w'))<block_end> |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
"""
This module exposes an Optimizer wrapper to get regular tf.train.Optimizers to
allow for selecting the slots FP precision independently of the variable type.
Currently only supports Adam
"""<import_stmt>os<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>tensorflow.python.ops math_ops<import_from_stmt>tensorflow.python.training.optimizer _var_key<import_from_stmt>tensorflow.python.training slot_creator<import_from_stmt>tensorflow.python.training.adam AdamOptimizer<import_from_stmt>typing Type<import_from_stmt>logging getLogger<line_sep>tf.disable_v2_behavior()<line_sep>tf.disable_eager_execution()<line_sep>logger=getLogger(os.path.basename(__file__))<def_stmt>SelectableSlotFPFormatOptimizer cls:Type[tf.train.Optimizer]<arrow>Type[tf.train.Optimizer]<block_start><if_stmt><not>issubclass(cls AdamOptimizer)<block_start><raise>ValueError(f'Class {cls} does not inherit from tf.python.training.adam.AdamOptimizer')<block_end><class_stmt>Wrapped(cls)<block_start><def_stmt>__init__ self slots_dtype force_fp32_weight_update=<true> use_nesterov=<false> *args **kwargs<block_start>self.slots_dtype=tf.as_dtype(slots_dtype)<line_sep>self.use_nesterov=use_nesterov<line_sep>self.force_fp32_weight_update=force_fp32_weight_update<line_sep>super(Wrapped self).__init__(*args **kwargs)<block_end><def_stmt>_zeros_slot self var slot_name op_name<block_start>"""Find or create a slot initialized with 0.0.
This is effectively a copy of the original TF optimizer method
excepts this one allows to pass a dtype to `create_zeros_slot`.
Args:
var: A `Variable` object.
slot_name: Name for the slot.
op_name: Name to use when scoping the Variable that
needs to be created for the slot.
Returns:
A `Variable` object.
"""<line_sep>named_slots=self._slot_dict(slot_name)<if_stmt>_var_key(var)<not><in>named_slots<block_start>new_slot_variable=slot_creator.create_zeros_slot(var op_name dtype=self.slots_dtype)<line_sep>self._restore_slot_variable(slot_name=slot_name variable=var slot_variable=new_slot_variable)<line_sep>named_slots[_var_key(var)]=new_slot_variable<block_end><return>tf.cast(named_slots[_var_key(var)] var.dtype)<block_end><def_stmt>_apply_weight_update self grad var m v beta1_power beta2_power lr beta1 beta2 epsilon use_nesterov<block_start><if_stmt>self.force_fp32_weight_update# Cast to fp32 for extra precision
<block_start>weight_update_dtype=tf.float32<block_end><else_stmt><block_start>weight_update_dtype=var.dtype<block_end># cast all variables to the same desired dtype for the update
m_c=tf.convert_to_tensor(tf.cast(m weight_update_dtype))<line_sep>v_c=tf.convert_to_tensor(tf.cast(v weight_update_dtype))<line_sep>var_c=tf.cast(var weight_update_dtype)<line_sep>lr_c=tf.cast(lr weight_update_dtype)<line_sep>beta1_power_c=tf.cast(beta1_power weight_update_dtype)<line_sep>beta2_power_c=tf.cast(beta2_power weight_update_dtype)<line_sep>beta1_c=tf.cast(beta1 weight_update_dtype)<line_sep>beta2_c=tf.cast(beta2 weight_update_dtype)<line_sep>epsilon_c=tf.cast(epsilon weight_update_dtype)<line_sep>grad_c=tf.cast(grad weight_update_dtype)<line_sep># correct for the bias of the first and second order moments
alpha=lr_c<times>math_ops.sqrt(1-beta2_power_c)/(1-beta1_power_c)<line_sep># update the first order moment
m_t=beta1_c<times>m_c+(1.0-beta1_c)<times>grad_c<line_sep># update the second order moment
v_t=beta2_c<times>v_c+(1.0-beta2_c)<times>grad_c<times>grad_c<line_sep># store the moments in the right dtype
assign_m=tf.assign(m tf.cast(m_t self.slots_dtype))<line_sep>assign_v=tf.assign(v tf.cast(v_t self.slots_dtype))<line_sep># update the variable
<with_stmt>tf.control_dependencies([assign_m assign_v])<block_start><if_stmt>use_nesterov<block_start><return>tf.cast(var_c-((grad_c<times>(1.0-beta1_c)+beta1_c<times>m_t)<times>alpha)/(math_ops.sqrt(v_t)+epsilon_c) var.dtype)<block_end><else_stmt><block_start><return>tf.cast(var_c-(m_t<times>alpha)/(math_ops.sqrt(v_t)+epsilon_c) var.dtype)<block_end><block_end><block_end><def_stmt>_resource_apply_dense self grad var<block_start>m=self.get_slot(var "m")<line_sep>v=self.get_slot(var "v")<line_sep>beta1_power,beta2_power=self._get_beta_accumulators()<line_sep><return>var.assign(self._apply_weight_update(grad=grad var=var m=m v=v beta1_power=beta1_power beta2_power=beta2_power lr=self._lr_t beta1=self._beta1_t beta2=self._beta2_t epsilon=self._epsilon_t use_nesterov=self.use_nesterov))<block_end><block_end><return>Wrapped<block_end> |
<import_stmt>os<import_stmt>re<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>random<import_stmt>json<import_stmt>numpy<as>np<def_stmt>get_num_correct preds labels<block_start><return>preds.argmax(dim=1).eq(labels).sum().item()<block_end><class_stmt>ArchLoader()<block_start>'''
load arch from json file
'''<def_stmt>__init__ self path<block_start>super(ArchLoader self).__init__()<line_sep>self.arc_list=[]<line_sep>self.arc_dict={}<line_sep>self.get_arch_list_dict(path)<line_sep>random.shuffle(self.arc_list)<line_sep>self.idx=-1<line_sep>self.level_config={"level1":[4 8 12 16] "level2":[4 8 12 16 20 24 28 32] "level3":[4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 64]}<block_end><def_stmt>get_arch_list self<block_start><return>self.arc_list<block_end><def_stmt>get_arch_dict self<block_start><return>self.arc_dict<block_end><def_stmt>get_random_batch self bs<block_start><return>random.sample(self.arc_list bs)<block_end><def_stmt>get_part_dict self<block_start>keys=list(self.arc_dict.keys())[:10]<line_sep><return>dict([(key self.arc_dict[key])<for>key keys])<block_end><def_stmt>convert_list_arc_str self arc_list<block_start>arc_str=""<line_sep>arc_list=[str(item)+"-"<for>item arc_list]<for_stmt>item arc_list<block_start>arc_str<augadd>item<block_end><return>arc_str[:-1]<block_end><def_stmt>__next__ self<block_start>self.idx<augadd>1<if_stmt>self.idx<ge>len(self.arc_list)<block_start><raise>StopIteration<block_end><return>self.arc_list[self.idx]<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>get_arch_list_dict self path<block_start><with_stmt>open(path "r")<as>f<block_start>self.arc_dict=json.load(f)<block_end>self.arc_list=[]<for_stmt>_,v self.arc_dict.items()<block_start>self.arc_list.append(v["arch"])<block_end><block_end><def_stmt>generate_fair_batch self<block_start>rngs=[]<line_sep>seed=0<line_sep># level1
<for_stmt>i range(0 7)<block_start>seed<augadd>1<line_sep>random.seed(seed)<line_sep>rngs.append(random.sample(self.level_config['level1'] len(self.level_config['level1']))<times>4)<block_end># level2
<for_stmt>i range(7 13)<block_start>seed<augadd>1<line_sep>random.seed(seed)<line_sep>rngs.append(random.sample(self.level_config['level2'] len(self.level_config['level2']))<times>2)<block_end># level3
<for_stmt>i range(13 20)<block_start>seed<augadd>1<line_sep>random.seed(seed)<line_sep>rngs.append(random.sample(self.level_config['level3'] len(self.level_config['level3'])))<block_end><return>np.transpose(rngs)<block_end><def_stmt>generate_niu_fair_batch self<block_start>rngs=[]<line_sep>seed=0<line_sep># level1
<for_stmt>i range(0 7)<block_start>seed<augadd>1<line_sep>random.seed(seed)<line_sep>tmp_rngs=[]<for_stmt>_ range(4)<block_start>tmp_rngs.extend(random.sample(self.level_config['level1'] len(self.level_config['level1'])))<block_end>rngs.append(tmp_rngs)<block_end># level2
<for_stmt>i range(7 13)<block_start>seed<augadd>1<line_sep>random.seed(seed)<line_sep>tmp_rngs=[]<for_stmt>_ range(2)<block_start>tmp_rngs.extend(random.sample(self.level_config['level2'] len(self.level_config['level2'])))<block_end>rngs.append(tmp_rngs)<block_end># level3
<for_stmt>i range(13 20)<block_start>seed<augadd>1<line_sep>random.seed(seed)<line_sep>rngs.append(random.sample(self.level_config['level3'] len(self.level_config['level3'])))<block_end><return>np.transpose(rngs)<block_end><block_end># arch_loader = ArchLoader("Track1_final_archs.json")
# print(arch_loader.generate_niu_fair_batch())
# arc_dc = arch_loader.get_random_batch(1000)
# for i, arc in enumerate(arc_dc):
# print(i, arc)
# cnt = 0
# for i,ac in enumerate(arch_loader):
# print(i,ac)
# cnt += 1
# print(cnt)
<class_stmt>CrossEntropyLabelSmooth(nn.Module)<block_start><def_stmt>__init__ self num_classes epsilon<block_start>super(CrossEntropyLabelSmooth self).__init__()<line_sep>self.num_classes=num_classes<line_sep>self.epsilon=epsilon<line_sep>self.logsoftmax=nn.LogSoftmax(dim=1)<block_end><def_stmt>forward self inputs targets<block_start>log_probs=self.logsoftmax(inputs)<line_sep>targets=torch.zeros_like(log_probs).scatter_(1 targets.unsqueeze(1) 1)<line_sep>targets=(1-self.epsilon)<times>targets+self.epsilon/self.num_classes<line_sep>loss=(-targets<times>log_probs).mean(0).sum()<line_sep><return>loss<block_end><block_end><class_stmt>AvgrageMeter(object)<block_start><def_stmt>__init__ self<block_start>self.reset()<block_end><def_stmt>reset self<block_start>self.avg=0<line_sep>self.sum=0<line_sep>self.cnt=0<line_sep>self.val=0<block_end><def_stmt>update self val n=1<block_start>self.val=val<line_sep>self.sum<augadd>val<times>n<line_sep>self.cnt<augadd>n<line_sep>self.avg=self.sum/self.cnt<block_end><block_end><def_stmt>accuracy output target topk=(1 )<block_start>maxk=max(topk)<line_sep>batch_size=target.size(0)<line_sep>_,pred=output.topk(maxk 1 <true> <true>)<line_sep>pred=pred.t()<line_sep>correct=pred.eq(target.view(1 -1).expand_as(pred))<line_sep>res=[]<for_stmt>k topk<block_start>correct_k=correct[:k].reshape(-1).float().sum(0)<line_sep>res.append(correct_k.mul_(100.0/batch_size))<block_end><return>res<block_end><def_stmt>save_checkpoint state iters tag=''<block_start><if_stmt><not>os.path.exists("./models")<block_start>os.makedirs("./models")<block_end>filename=os.path.join("./models/{}checkpoint-{:06}.pth.tar".format(tag iters))<line_sep>torch.save(state filename)<line_sep># latestfilename = os.path.join(
# "./models/{}checkpoint-latest.pth.tar".format(tag))
# torch.save(state, latestfilename)
<block_end><def_stmt>get_lastest_model <block_start><if_stmt><not>os.path.exists('./models')<block_start>os.mkdir('./models')<block_end>model_list=os.listdir('./models/')<if_stmt>model_list<eq>[]<block_start><return><none> 0<block_end>model_list.sort()<line_sep>lastest_model=model_list[-1]<line_sep>iters=re.findall(r'\d+' lastest_model)<line_sep><return>'./models/'+lastest_model int(iters[0])<block_end><def_stmt>get_parameters model<block_start>group_no_weight_decay=[]<line_sep>group_weight_decay=[]<for_stmt>pname,p model.named_parameters()<block_start><if_stmt>pname.find('weight')<ge>0<and>len(p.size())<g>1# print('include ', pname, p.size())
<block_start>group_weight_decay.append(p)<block_end><else_stmt># print('not include ', pname, p.size())
<block_start>group_no_weight_decay.append(p)<block_end><block_end><assert_stmt>len(list(model.parameters()))<eq>len(group_weight_decay)+len(group_no_weight_decay)<line_sep>groups=[dict(params=group_weight_decay) dict(params=group_no_weight_decay weight_decay=0.)]<line_sep><return>groups<block_end><def_stmt>bn_calibration_init m<block_start>""" calculating post-statistics of batch normalization """<if_stmt>getattr(m 'track_running_stats' <false>)# reset all values for post-statistics
<block_start>m.reset_running_stats()<line_sep># set bn in training mode to update post-statistics
m.training=<true><line_sep># if use cumulative moving average
<if_stmt>getattr(FLAGS 'cumulative_bn_stats' <false>)<block_start>m.momentum=<none><block_end><block_end><block_end> |
"""
Settings specific to development environments
"""<import_from_stmt>os path<import_from_stmt>settings.base PROJECT_DIR MIDDLEWARE_CLASSES INSTALLED_APPS<line_sep>DATABASES={'default':{'ENGINE':'django.db.backends.sqlite3' 'NAME':path.join(PROJECT_DIR 'data' 'data.db') }}<line_sep>DEBUG=<true><line_sep>TEMPLATE_DEBUG=<true><line_sep>SITE_ID=1<line_sep>INCLUDE_DOMAIN='localhost:8000'<line_sep>INCLUDE_URL=INCLUDE_DOMAIN+'/include/'<line_sep>STATIC_URL='/static/'<def_stmt>show_toolbar request<block_start><return><true><block_end>DEBUG_TOOLBAR_CONFIG={'INTERCEPT_REDIRECTS':<false> 'SHOW_TOOLBAR_CALLBACK':show_toolbar }<line_sep>INTERNAL_IPS=('127.0.0.1' '10.0.1.3' )<line_sep>MIDDLEWARE_CLASSES=MIDDLEWARE_CLASSES+['debug_toolbar.middleware.DebugToolbarMiddleware' ]<line_sep>INSTALLED_APPS=INSTALLED_APPS+['debug_toolbar' ]<line_sep>CACHES={'default':{'BACKEND':'django.core.cache.backends.filebased.FileBasedCache' 'LOCATION':path.join(PROJECT_DIR 'cache') 'TIMEOUT':60<times>60<times>24<times>365}}<line_sep>COMPRESS_ENABLED=<true><line_sep> |
<import_stmt>shutil<import_stmt>tempfile<import_stmt>os<import_stmt>networkx<as>nx<import_from_stmt>.generate_output *<import_from_stmt>.isvalid *<import_from_stmt>.__init__ __version__<def_stmt>get_options <block_start><import_stmt>argparse<line_sep>description='Generate multiple sequence alignments after running Panaroo'<line_sep>parser=argparse.ArgumentParser(description=description prog='generate_panaroo_msa')<line_sep>io_opts=parser.add_argument_group('Input/output')<line_sep>io_opts.add_argument("-o" "--out_dir" dest="output_dir" required=<true> help="location of the Panaroo output directory" type=<lambda>x:is_valid_folder(parser x))<line_sep># alignment
core=parser.add_argument_group('Gene alignment')<line_sep>core.add_argument("-a" "--alignment" dest="aln" help=("Output alignments of core genes or all genes. Options are"+" 'core' and 'pan'. Default: 'None'") type=str choices={'core' 'pan'} default='core')<line_sep>core.add_argument("--aligner" dest="alr" help="Specify an aligner. Options:'prank', 'clustal', and default: 'mafft'" type=str choices={'prank' 'clustal' 'mafft'} default="mafft")<line_sep>core.add_argument("--core_threshold" dest="core" help="Core-genome sample threshold (default=0.95)" type=float default=0.95)<line_sep># Other options
parser.add_argument("-t" "--threads" dest="n_cpu" help="number of threads to use (default=1)" type=int default=1)<line_sep>parser.add_argument("--verbose" dest="verbose" help="print additional output" action='store_true' default=<false>)<line_sep>parser.add_argument('--version' action='version' version='%(prog)s '+__version__)<line_sep>args=parser.parse_args()<line_sep><return>(args)<block_end><def_stmt>main <block_start>args=get_options()<line_sep># make sure trailing forward slash is present
args.output_dir=os.path.join(args.output_dir "")<line_sep># Create temporary directory
temp_dir=os.path.join(tempfile.mkdtemp(dir=args.output_dir) "")<line_sep># Load isolate names
seen=set()<line_sep>isolate_names=[]<with_stmt>open(args.output_dir+"gene_data.csv" 'r')<as>infile<block_start>next(infile)<for_stmt>line infile<block_start>iso=line.split(",")[0]<if_stmt>iso<not><in>seen<block_start>isolate_names.append(iso)<line_sep>seen.add(iso)<block_end><block_end><block_end># Load graph
G=nx.read_gml(args.output_dir+"final_graph.gml")<line_sep>#Write out core/pan-genome alignments
<if_stmt>args.aln<eq>"pan"<block_start><if_stmt>args.verbose<block_start>print("generating pan genome MSAs...")<block_end>generate_pan_genome_alignment(G temp_dir args.output_dir args.n_cpu args.alr isolate_names)<line_sep>core_nodes=get_core_gene_nodes(G args.core len(isolate_names))<line_sep>concatenate_core_genome_alignments(core_nodes args.output_dir)<block_end><elif_stmt>args.aln<eq>"core"<block_start><if_stmt>args.verbose<block_start>print("generating core genome MSAs...")<block_end>generate_core_genome_alignment(G temp_dir args.output_dir args.n_cpu args.alr isolate_names args.core len(isolate_names))<block_end># remove temporary directory
shutil.rmtree(temp_dir)<line_sep><return><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
'this crashed pychecker from calendar.py in Python 2.2'<class_stmt>X<block_start>'d'<def_stmt>test self item<block_start><return>[e<for>e item].__getslice__()<block_end><block_end># this crashed in 2.2, but not 2.3
<def_stmt>f a<block_start>a.a=[x<for>x range(2)<if>x<g>1]<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMServices.Core.DQMEDAnalyzer DQMEDAnalyzer<line_sep>EcalPi0MonDQM=DQMEDAnalyzer('DQMSourcePi0' prescaleFactor=cms.untracked.int32(1) FolderName=cms.untracked.string('AlCaReco/EcalPi0') AlCaStreamEBpi0Tag=cms.untracked.InputTag("hltAlCaPi0RegRecHits" "pi0EcalRecHitsEB") AlCaStreamEEpi0Tag=cms.untracked.InputTag("hltAlCaPi0RegRecHits" "pi0EcalRecHitsEE") AlCaStreamEBetaTag=cms.untracked.InputTag("hltAlCaEtaRegRecHits" "etaEcalRecHitsEB") AlCaStreamEEetaTag=cms.untracked.InputTag("hltAlCaEtaRegRecHits" "etaEcalRecHitsEE") isMonEEpi0=cms.untracked.bool(<true>) isMonEBpi0=cms.untracked.bool(<true>) isMonEEeta=cms.untracked.bool(<true>) isMonEBeta=cms.untracked.bool(<true>) SaveToFile=cms.untracked.bool(<false>) FileName=cms.untracked.string('MonitorAlCaEcalPi0.root') clusSeedThr=cms.double(0.5) clusSeedThrEndCap=cms.double(1.0) clusEtaSize=cms.int32(3) clusPhiSize=cms.int32(3) seleXtalMinEnergy=cms.double(-0.15) seleXtalMinEnergyEndCap=cms.double(-0.75) selePtGamma=cms.double(1) selePtPi0=cms.double(2.) seleMinvMaxPi0=cms.double(0.22) seleMinvMinPi0=cms.double(0.06) seleS4S9Gamma=cms.double(0.83) selePi0Iso=cms.double(0.5) ptMinForIsolation=cms.double(1) selePi0BeltDR=cms.double(0.2) selePi0BeltDeta=cms.double(0.05) selePtGammaEndCap=cms.double(0.8) selePtPi0EndCap=cms.double(3.0) seleS4S9GammaEndCap=cms.double(0.9) seleMinvMaxPi0EndCap=cms.double(0.3) seleMinvMinPi0EndCap=cms.double(0.05) ptMinForIsolationEndCap=cms.double(0.5) selePi0IsoEndCap=cms.double(0.5) selePi0BeltDREndCap=cms.double(0.2) selePi0BeltDetaEndCap=cms.double(0.05) selePtGammaEta=cms.double(1.2) selePtEta=cms.double(4.0) seleS4S9GammaEta=cms.double(0.9) seleS9S25GammaEta=cms.double(0.8) seleMinvMaxEta=cms.double(0.8) seleMinvMinEta=cms.double(0.3) ptMinForIsolationEta=cms.double(1.0) seleEtaIso=cms.double(0.5) seleEtaBeltDR=cms.double(0.3) seleEtaBeltDeta=cms.double(0.1) massLowPi0Cand=cms.double(0.104) massHighPi0Cand=cms.double(0.163) selePtGammaEtaEndCap=cms.double(1.5) selePtEtaEndCap=cms.double(5) seleS4S9GammaEtaEndCap=cms.double(0.9) seleS9S25GammaEtaEndCap=cms.double(0.85) seleMinvMaxEtaEndCap=cms.double(0.8) seleMinvMinEtaEndCap=cms.double(0.3) ptMinForIsolationEtaEndCap=cms.double(0.5) seleEtaIsoEndCap=cms.double(0.5) seleEtaBeltDREndCap=cms.double(0.3) seleEtaBeltDetaEndCap=cms.double(0.1) posCalcParameters=cms.PSet(T0_barl=cms.double(5.7) T0_endc=cms.double(3.1) T0_endcPresh=cms.double(1.2) LogWeighted=cms.bool(<true>) W0=cms.double(4.2) X0=cms.double(0.89)))<line_sep> |
<import_stmt>random<def_stmt>rps str<block_start><if_stmt>(str<eq>1)<block_start><return>"Rock"<block_end><elif_stmt>(str<eq>2)<block_start><return>"Paper"<block_end><else_stmt><block_start><return>"Scissor"<block_end><block_end>print("1. Rock 2.Paper 3.Scissor \n")<line_sep>choice=int(input())<line_sep>print("You "+rps(choice))<line_sep>computer=random.randint(1 3)<line_sep>print("Computer "+rps(computer))<line_sep>print()<if_stmt>(choice<eq>computer)<block_start>print('Tie!')<block_end><elif_stmt>(choice<eq>1)<block_start><if_stmt>(computer<eq>2)<block_start>print("Computer Wins!")<line_sep>print(rps(computer) "beat" rps(choice))<block_end><elif_stmt>(computer<eq>3)<block_start>print("You Wins!")<line_sep>print(rps(choice) "beat" rps(computer))<block_end><block_end><elif_stmt>(choice<eq>2)<block_start><if_stmt>(computer<eq>3)<block_start>print("Computer Wins!")<line_sep>print(rps(computer) "beat" rps(choice))<block_end><elif_stmt>(computer<eq>1)<block_start>print("You Wins!")<line_sep>print(rps(choice) "beat" rps(computer))<block_end><block_end><elif_stmt>(choice<eq>3)<block_start><if_stmt>(computer<eq>1)<block_start>print("Computer Wins!")<line_sep>print(rps(computer) "beat" rps(choice))<block_end><elif_stmt>(computer<eq>2)<block_start>print("You Wins!")<line_sep>print(rps(choice) "beat" rps(computer))<block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
"""Video transforms that are used for advanced augmentation methods."""<import_from_stmt>typing Any Callable Dict Optional Tuple<import_stmt>torch<import_stmt>torchvision<import_stmt>torchvision.transforms.functional_tensor<as>F_t<import_from_stmt>torchvision.transforms.functional InterpolationMode<line_sep># Maximum global magnitude used for video augmentation.
_AUGMENTATION_MAX_LEVEL=10<def_stmt>_check_fill_arg kwargs<block_start>"""
Check if kwargs contains key ``fill``.
"""<assert_stmt>"fill"<in>kwargs "Need to have fill in kwargs."<block_end><def_stmt>_autocontrast video:torch.Tensor **kwargs<arrow>torch.Tensor<block_start>"""
Maximize contrast of a video by remapping its pixels per channel so that the lowest
becomes black and the lightest becomes white.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""<line_sep><return>torchvision.transforms.functional.autocontrast(video)<block_end><def_stmt>_equalize video:torch.Tensor **kwargs<arrow>torch.Tensor<block_start>"""
Equalize the histogram of a video by applying a non-linear mapping to the input in
order to create a uniform distribution of grayscale values in the output.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""<if_stmt>video.dtype<ne>torch.uint8<block_start>video_type=video.dtype<line_sep>video=(video<times>255).to(torch.uint8)<line_sep><return>(torchvision.transforms.functional.equalize(video)/255).to(video_type)<block_end><return>torchvision.transforms.functional.equalize(video)<block_end><def_stmt>_invert video:torch.Tensor **kwargs<arrow>torch.Tensor<block_start>"""
Invert the colors of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""<line_sep><return>torchvision.transforms.functional.invert(video)<block_end><def_stmt>_rotate video:torch.Tensor factor:float **kwargs<arrow>torch.Tensor<block_start>"""
Rotate the image by angle.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): The rotation angle value in degrees, counter-clockwise.
"""<line_sep>_check_fill_arg(kwargs)<line_sep><return>torchvision.transforms.functional.rotate(video factor fill=kwargs["fill"] interpolation=InterpolationMode.BILINEAR)<block_end><def_stmt>_solarize video:torch.Tensor factor:float **kwargs<arrow>torch.Tensor<block_start>"""
Solarize an video by inverting all pixel values above a threshold.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
"""<if_stmt>video.dtype<eq>torch.uint8<block_start><return>torchvision.transforms.functional.solarize(video int(factor<times>255.0))<block_end><else_stmt><block_start><return>torchvision.transforms.functional.solarize(video factor)<block_end><block_end><def_stmt>_adjust_contrast video:torch.Tensor factor:float **kwargs<arrow>torch.Tensor<block_start>"""
Adjust contrast of an a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to adjust the contrast. Can be any non-negative
number. 0 gives a solid gray video, 1 gives the original video while 2
increases the contrast by a factor of 2.
"""<line_sep><return>torchvision.transforms.functional.adjust_contrast(video factor)<block_end><def_stmt>_adjust_saturation video:torch.Tensor factor:float **kwargs<arrow>torch.Tensor<block_start>"""
Adjust the saturation of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to adjust the saturation. 0 will give a black and
white video, 1 will give the original video while 2 will enhance the
saturation by a factor of 2.
"""<line_sep><return>torchvision.transforms.functional.adjust_saturation(video factor)<block_end><def_stmt>_adjust_brightness video:torch.Tensor factor:float **kwargs<arrow>torch.Tensor<block_start>"""
Adjust brightness of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
sharpness_factor (float): How much to adjust the sharpness. Can be any
non-negative number. 0 gives a blurred video, 1 gives the original video
while 2 increases the sharpness by a factor of 2.
"""<line_sep><return>torchvision.transforms.functional.adjust_brightness(video factor)<block_end><def_stmt>_adjust_sharpness video:torch.Tensor factor:float **kwargs<arrow>torch.Tensor<block_start>"""
Adjust the sharpness of a video.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to adjust the sharpness. Can be any non-negative
number. 0 gives a blurred video, 1 gives the original video while 2
increases the sharpness by a factor of 2.
"""<line_sep><return>torchvision.transforms.functional.adjust_sharpness(video factor)<block_end><def_stmt>_posterize video:torch.Tensor factor:float **kwargs<block_start>"""
Posterize an image by reducing the number of bits for each color channel.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): The number of bits to keep for each channel (0-8).
"""<if_stmt>factor<ge>8<block_start><return>video<block_end><if_stmt>video.dtype<ne>torch.uint8<block_start>video_type=video.dtype<line_sep>video=(video<times>255).to(torch.uint8)<line_sep><return>(torchvision.transforms.functional.posterize(video factor)/255).to(video_type)<block_end><return>torchvision.transforms.functional.posterize(video factor)<block_end><def_stmt>_shear_x video:torch.Tensor factor:float **kwargs<block_start>"""
Shear the video along the horizontal axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to shear along the horizontal axis using the affine
matrix.
"""<line_sep>_check_fill_arg(kwargs)<line_sep>translation_offset=video.size(-2)<times>factor/2<line_sep><return>F_t.affine(video [1 factor translation_offset 0 1 0] fill=kwargs["fill"] interpolation="bilinear" )<block_end><def_stmt>_shear_y video:torch.Tensor factor:float **kwargs<block_start>"""
Shear the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to shear along the vertical axis using the affine
matrix.
"""<line_sep>_check_fill_arg(kwargs)<line_sep>translation_offset=video.size(-1)<times>factor/2<line_sep><return>F_t.affine(video [1 0 0 factor 1 translation_offset] fill=kwargs["fill"] interpolation="bilinear" )<block_end><def_stmt>_translate_x video:torch.Tensor factor:float **kwargs<block_start>"""
Translate the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much (relative to the image size) to translate along the
vertical axis.
"""<line_sep>_check_fill_arg(kwargs)<line_sep>translation_offset=factor<times>video.size(-1)<line_sep><return>F_t.affine(video [1 0 translation_offset 0 1 0] fill=kwargs["fill"] interpolation="bilinear" )<block_end><def_stmt>_translate_y video:torch.Tensor factor:float **kwargs<block_start>"""
Translate the video along the vertical axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much (relative to the image size) to translate along the
horizontal axis.
"""<line_sep>_check_fill_arg(kwargs)<line_sep>translation_offset=factor<times>video.size(-2)<line_sep><return>F_t.affine(video [1 0 0 0 1 translation_offset] fill=kwargs["fill"] interpolation="bilinear" )<block_end><def_stmt>_randomly_negate magnitude:float<arrow>float<block_start>"""
Negate input value with 50% chance.
Args:
magnitude (float): Input value.
"""<line_sep><return>magnitude<if>torch.rand(1).item()<g>0.5<else>-magnitude<block_end><def_stmt>_increasing_magnitude_to_arg level:int params:Tuple[float float]<arrow>float<block_start>"""
Convert level to transform magnitude. This assumes transform magnitude increases
linearly with level.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum increasing in transform magnitude
when level is at Maxmimum.
"""<line_sep>magnitude=(level/_AUGMENTATION_MAX_LEVEL)<times>params[1]<line_sep><return>(params[0]+magnitude )<block_end><def_stmt>_increasing_randomly_negate_to_arg level:int params:Tuple[float float]<arrow>Tuple[float]<block_start>"""
Convert level to transform magnitude. This assumes transform magnitude increases
(or decreases with 50% chance) linearly with level.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum increasing in transform magnitude
when level is at maxmimum.
"""<line_sep>magnitude=(level/_AUGMENTATION_MAX_LEVEL)<times>params[1]<line_sep><return>(params[0]+_randomly_negate(magnitude) )<block_end><def_stmt>_decreasing_int_to_arg level:int params:Tuple[int int]<arrow>Tuple[int]<block_start>"""
Convert level to transform magnitude. This assumes transform magnitude decreases
linearly with level. The return value is converted to int.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum decreasing in transform magnitude
when level is at maxmimum.
"""<line_sep>magnitude=(level/_AUGMENTATION_MAX_LEVEL)<times>params[1]<line_sep><return>(params[0]-int(magnitude) )<block_end><def_stmt>_decreasing_to_arg level:int params:Tuple[float float]<arrow>Tuple[float]<block_start>"""
Convert level to transform magnitude. This assumes transform magnitude decreases
linearly with level.
Args:
level (int): Level value.
params (Tuple[float, float]): Params contains two values: 1) Base transform
magnitude when level is 0; 2) Maxmimum decreasing in transform magnitude
when level is at maxmimum.
"""<line_sep>magnitude=(level/_AUGMENTATION_MAX_LEVEL)<times>params[1]<line_sep><return>(params[0]-magnitude )<block_end># A dictionary that contains transform names (key) and their corresponding transform
# functions (value).
_NAME_TO_TRANSFORM_FUNC={"AdjustBrightness":_adjust_brightness "AdjustContrast":_adjust_contrast "AdjustSaturation":_adjust_saturation "AdjustSharpness":_adjust_sharpness "AutoContrast":_autocontrast "Equalize":_equalize "Invert":_invert "Rotate":_rotate "Posterize":_posterize "Solarize":_solarize "ShearX":_shear_x "ShearY":_shear_y "TranslateX":_translate_x "TranslateY":_translate_y }<line_sep># A dictionary that contains transform names (key) and their corresponding level
# functions (value), which converts the magnitude to the transform function arguments.
_LEVEL_TO_ARG={"AdjustBrightness":_increasing_randomly_negate_to_arg "AdjustContrast":_increasing_randomly_negate_to_arg "AdjustSaturation":_increasing_randomly_negate_to_arg "AdjustSharpness":_increasing_randomly_negate_to_arg "AutoContrast":<none> "Equalize":<none> "Invert":<none> "Rotate":_increasing_randomly_negate_to_arg "Posterize":_decreasing_int_to_arg "Solarize":_decreasing_to_arg "ShearX":_increasing_randomly_negate_to_arg "ShearY":_increasing_randomly_negate_to_arg "TranslateX":_increasing_randomly_negate_to_arg "TranslateY":_increasing_randomly_negate_to_arg }<line_sep># A dictionary that contains transform names (key) and their corresponding maximum
# transform (value).
_TRANSFORM_MAX_PARAMS={"AdjustBrightness":(1 0.9) "AdjustContrast":(1 0.9) "AdjustSaturation":(1 0.9) "AdjustSharpness":(1 0.9) "AutoContrast":<none> "Equalize":<none> "Invert":<none> "Rotate":(0 30) "Posterize":(4 4) "Solarize":(1 1) "ShearX":(0 0.3) "ShearY":(0 0.3) "TranslateX":(0 0.45) "TranslateY":(0 0.45) }<line_sep># Hyperparameters for sampling magnitude.
SAMPLING_DEFAULT_HPARAS={"sampling_std":0.5}<line_sep># Hyperparameters for transform functions.
TRANSFORM_DEFAULT_HPARAS={"fill":(0.5 0.5 0.5)}<class_stmt>AugmentTransform<block_start><def_stmt>__init__ self transform_name:str magnitude:int=10 prob:float=0.5 name_to_transform_func:Optional[Dict[str Callable]]=<none> level_to_arg:Optional[Dict[str Callable]]=<none> transform_max_paras:Optional[Dict[str Tuple]]=<none> transform_hparas:Optional[Dict[str Any]]=<none> sampling_type:str="gaussian" sampling_hparas:Optional[Dict[str Any]]=<none> <arrow><none><block_start>"""
The AugmentTransform composes a video transform that performs augmentation
based on a maximum magnitude. AugmentTransform also offers flexible ways to
generate augmentation magnitude based on different sampling strategies.
Args:
transform_name (str): The name of the video transform function.
magnitude (int): Magnitude used for transform function.
prob (float): The probablity of applying each transform function.
name_to_transform_func (Optional[Dict[str, Callable]]): A Dictionary that
contains mapping of the transform name to the transform function.
level_to_arg (Optional[Dict[str, Callable]]): A Dictionary that contains
mapping of the transform name to its level function, which converts
the the magnitude to the transform function arguments.
transform_max_paras (Optional[Dict[str, Tuple]]): A Dictionary that
contains mapping of the transform name to its maximum transform
magnitude.
transform_hparas (Optional[Dict[Any]]): Transform hyper parameters.
Needs to have key fill. By default, it uses transform_default_hparas.
sampling_type (str): Sampling method for magnitude of transform. It should
be either gaussian or uniform.
sampling_hparas (Optional[Dict[Any]]): Hyper parameters for sampling. If
gaussian sampling is used, it needs to have key sampling_std. By
default, it uses transform_default_hparas.
"""<assert_stmt>sampling_type<in>["gaussian" "uniform"]<line_sep>name_to_transform_func=name_to_transform_func<or>_NAME_TO_TRANSFORM_FUNC<line_sep>level_to_arg=level_to_arg<or>_LEVEL_TO_ARG<line_sep>transform_max_paras=transform_max_paras<or>_TRANSFORM_MAX_PARAMS<line_sep>self.transform_hparas=transform_hparas<or>TRANSFORM_DEFAULT_HPARAS<line_sep>self.sampling_type=sampling_type<line_sep>self.sampling_hparas=sampling_hparas<or>SAMPLING_DEFAULT_HPARAS<assert_stmt>"fill"<in>self.transform_hparas<if_stmt>self.sampling_type<eq>"gaussian"<block_start><assert_stmt>"sampling_std"<in>self.sampling_hparas<block_end><if_stmt>self.sampling_type<eq>"uniform"<block_start><assert_stmt>"sampling_data_type"<in>self.sampling_hparas<assert_stmt>"sampling_min"<in>self.sampling_hparas<if_stmt>self.sampling_hparas["sampling_data_type"]<eq>"int"<block_start><assert_stmt>isinstance(self.sampling_hparas["sampling_min"] int)<block_end><elif_stmt>self.sampling_hparas["sampling_data_type"]<eq>"float"<block_start><assert_stmt>isinstance(self.sampling_hparas["sampling_min"] (int float))<block_end><block_end><assert_stmt>transform_name<in>name_to_transform_func<line_sep>self.max_level=_AUGMENTATION_MAX_LEVEL<line_sep>self.transform_name=transform_name<line_sep>self.magnitude=magnitude<line_sep>self.transform_fn=name_to_transform_func[transform_name]<line_sep>self.level_fn=level_to_arg[transform_name]<line_sep>self.level_paras=transform_max_paras[transform_name]<line_sep>self.prob=prob<line_sep>self.sampling_type=sampling_type<block_end><def_stmt>_get_magnitude self<arrow>float<block_start>"""
Get magnitude based on sampling type.
"""<if_stmt>self.sampling_type<eq>"gaussian"<block_start><return>max(0 min(self.max_level torch.normal(self.magnitude self.sampling_hparas["sampling_std"] size=(1 )).item() ) )<block_end><elif_stmt>self.sampling_type<eq>"uniform"<block_start><if_stmt>self.sampling_hparas["sampling_data_type"]<eq>"int"<block_start><return>torch.randint(self.sampling_hparas["sampling_min"] self.magnitude+1 size=(1 )).item()<block_end><elif_stmt>self.sampling_hparas["sampling_data_type"]<eq>"float"<block_start><return>(torch.rand(size=(1 )).item()<times>(self.magnitude-self.sampling_hparas["sampling_min"])+self.sampling_hparas["sampling_min"])<block_end><else_stmt><block_start><raise>ValueError("sampling_data_type must be either 'int' or 'float'")<block_end><block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><def_stmt>__call__ self video:torch.Tensor<arrow>torch.Tensor<block_start>"""
The input is a video tensor.
Args:
video (torch.Tensor): Input video tensor with shape (T, C, H, W).
"""<if_stmt>torch.rand(1).item()<g>self.prob<block_start><return>video<block_end>magnitude=self._get_magnitude()<line_sep>level_args=(self.level_fn(magnitude self.level_paras)<if>self.level_fn<is><not><none><else>())<line_sep><return>self.transform_fn(video *level_args **self.transform_hparas)<block_end><block_end> |
<import_stmt>KratosMultiphysics<import_stmt>KratosMultiphysics.GeoMechanicsApplication<as>KratosGeo<def_stmt>Factory settings Model<block_start><if_stmt>(type(settings)<ne>KratosMultiphysics.Parameters)<block_start><raise>Exception("expected input shall be a Parameters object, encapsulating a json string")<block_end><return>GapClosureInterfaceActivationProcess(Model settings["Parameters"])<block_end>## All the python processes should be derived from "python_process"
<class_stmt>GapClosureInterfaceActivationProcess(KratosMultiphysics.Process)<block_start><def_stmt>__init__ self Model settings<block_start>KratosMultiphysics.Process.__init__(self)<line_sep>model_part=Model[settings["model_part_name"].GetString()]<line_sep>params=KratosMultiphysics.Parameters("{}")<line_sep>params.AddValue("model_part_name" settings["model_part_name"])<line_sep>params.AddValue("gap_width_threshold" settings["gap_width_threshold"])<line_sep>params.AddValue("consider_gap_closure" settings["consider_gap_closure"])<line_sep>self.process=KratosGeo.GapClosureInterfaceProcess(model_part params)<block_end><def_stmt>ExecuteInitialize self<block_start>self.process.ExecuteInitialize()<block_end><def_stmt>ExecuteInitializeSolutionStep self<block_start>self.process.ExecuteInitializeSolutionStep()<block_end><def_stmt>ExecuteFinalizeSolutionStep self<block_start>self.process.ExecuteFinalizeSolutionStep()<block_end><def_stmt>ExecuteFinalize self<block_start>self.process.ExecuteFinalize()<block_end><block_end> |
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
"""
Handshake tests using Openssl 0.9.8 s_client against s2nd
"""<import_stmt>argparse<import_stmt>os<import_stmt>sys<import_stmt>subprocess<import_stmt>itertools<import_stmt>multiprocessing<import_stmt>threading<import_stmt>uuid<import_stmt>re<import_stmt>string<import_from_stmt>os environ<import_from_stmt>multiprocessing.pool ThreadPool<import_from_stmt>s2n_test_constants *<import_from_stmt>time sleep<line_sep>S_CLIENT_NEGOTIATED_CIPHER_PREFIX="Cipher : "<line_sep>PROTO_VERS_TO_S_CLIENT_ARG={S2N_TLS10:"-tls1" S2N_TLS11:"-tls1_1" S2N_TLS12:"-tls1_2" }<line_sep>use_corked_io=<false><def_stmt>cleanup_processes *processes<block_start><for_stmt>p processes<block_start>p.kill()<line_sep>p.wait()<block_end><block_end><def_stmt>validate_version expected_version output<block_start><for_stmt>line output.splitlines()<block_start><if_stmt>ACTUAL_VERSION_STR.format(expected_version<or>S2N_TLS10)<in>line<block_start><return>0<block_end><block_end><return>-1<block_end><def_stmt>validate_data_transfer expected_data s_client_out s2nd_out<block_start>"""
Verify that the application data written between s_client and s2nd is encrypted and decrypted successfuly.
"""<line_sep>found=0<for_stmt>line s2nd_out.splitlines()<block_start><if_stmt>expected_data<in>line<block_start>found=1<line_sep><break><block_end><block_end><if_stmt>found<eq>0<block_start>print("Did not find "+expected_data+" in output from s2nd")<line_sep><return>-1<block_end>found=0<for_stmt>line s_client_out.splitlines()<block_start><if_stmt>expected_data<in>line<block_start>found=1<line_sep><break><block_end><block_end><if_stmt>found<eq>0<block_start>print("Did not find "+expected_data+" in output from s_client")<line_sep><return>-1<block_end><return>0<block_end><def_stmt>find_expected_cipher expected_cipher s_client_out<block_start>"""
Make sure s_client and s2nd negotiate the cipher suite we expect
"""<line_sep>s_client_out_len=len(s_client_out)<line_sep>full_expected_string=S_CLIENT_NEGOTIATED_CIPHER_PREFIX+expected_cipher<for_stmt>line s_client_out.splitlines()<block_start><if_stmt>full_expected_string<in>line<block_start><return>0<line_sep><break><block_end><block_end>print("Failed to find "+expected_cipher+" in s_client output")<line_sep><return>-1<block_end><def_stmt>read_process_output_until process marker<block_start>output=""<while_stmt><true><block_start>line=process.stdout.readline().decode("utf-8")<line_sep>output<augadd>line<if_stmt>marker<in>line<block_start><return>output<block_end><block_end><return>output<block_end><def_stmt>try_handshake endpoint port cipher ssl_version server_name=<none> strict_hostname=<false> server_cert=<none> server_key=<none> server_cert_key_list=<none> expected_server_cert=<none> server_cipher_pref=<none> ocsp=<none> sig_algs=<none> curves=<none> resume=<false> no_ticket=<false> prefer_low_latency=<false> enter_fips_mode=<false> client_auth=<none> client_cert=DEFAULT_CLIENT_CERT_PATH client_key=DEFAULT_CLIENT_KEY_PATH expected_cipher=<none> expected_extensions=<none><block_start>"""
Attempt to handshake against s2nd listening on `endpoint` and `port` using Openssl s_client
:param int endpoint: endpoint for s2nd to listen on
:param int port: port for s2nd to listen on
:param str cipher: ciphers for Openssl s_client to offer. See https://www.openssl.org/docs/man1.0.2/apps/ciphers.html
:param int ssl_version: SSL version for s_client to use
:param str server_name: server_name value for s_client to send
:param bool strict_hostname: whether s_client should strictly check to see if server certificate matches the server_name
:param str server_cert: path to certificate for s2nd to use
:param str server_key: path to private key for s2nd to use
:param list server_cert_key_list: a list of (cert_path, key_path) tuples for multicert tests.
:param str expected_server_cert: Path to the expected server certificate should be sent to s_client.
:param str ocsp: path to OCSP response file for stapling
:param str sig_algs: Signature algorithms for s_client to offer
:param str curves: Elliptic curves for s_client to offer
:param bool resume: True if s_client should try to reconnect to s2nd and reuse the same TLS session. False for normal negotiation.
:param bool no_ticket: True if s2n server should not use session ticket to resume the same TLS session.
:param bool prefer_low_latency: True if s2nd should use 1500 for max outgoing record size. False for default max.
:param bool enter_fips_mode: True if s2nd should enter libcrypto's FIPS mode. Libcrypto must be built with a FIPS module to enter FIPS mode.
:param bool client_auth: True if the test should try and use client authentication
:param str client_cert: Path to the client's cert file
:param str client_key: Path to the client's private key file
:param str expected_cipher: the cipher we expect to negotiate
:param list expected_extensions: list of expected extensions that s_client should receive.
:return: 0 on successfully negotiation(s), -1 on failure
"""<line_sep># Override certificate for ECDSA if unspecified. We can remove this when we
# support multiple certificates
<if_stmt>server_cert<is><none><and>server_cert_key_list<is><none><and>"ECDSA"<in>cipher<block_start>server_cert=TEST_ECDSA_CERT<line_sep>server_key=TEST_ECDSA_KEY<block_end># Fire up s2nd
s2nd_cmd=["../../bin/s2nd"]<if_stmt>server_cert<is><not><none><block_start>s2nd_cmd.extend(["--cert" server_cert])<block_end><if_stmt>server_key<is><not><none><block_start>s2nd_cmd.extend(["--key" server_key])<block_end><if_stmt>server_cert_key_list<is><not><none><block_start><for_stmt>cert_key_path server_cert_key_list<block_start>cert_path=cert_key_path[0]<line_sep>key_path=cert_key_path[1]<line_sep>s2nd_cmd.extend(["--cert" cert_path])<line_sep>s2nd_cmd.extend(["--key" key_path])<block_end><block_end><if_stmt>ocsp<is><not><none><block_start>s2nd_cmd.extend(["--ocsp" ocsp])<block_end><if_stmt>prefer_low_latency<eq><true><block_start>s2nd_cmd.append("--prefer-low-latency")<block_end><if_stmt>client_auth<is><not><none><block_start>s2nd_cmd.append("-m")<line_sep>s2nd_cmd.extend(["-t" client_cert])<block_end><if_stmt>use_corked_io<block_start>s2nd_cmd.append("-C")<block_end>s2nd_cmd.extend([str(endpoint) str(port)])<line_sep>s2nd_ciphers="test_all_tls12"<if_stmt>server_cipher_pref<is><not><none><block_start>s2nd_ciphers=server_cipher_pref<block_end><if_stmt>enter_fips_mode<eq><true><block_start>s2nd_ciphers="test_all_fips"<line_sep>s2nd_cmd.append("--enter-fips-mode")<block_end>s2nd_cmd.append("-c")<line_sep>s2nd_cmd.append(s2nd_ciphers)<if_stmt>no_ticket<block_start>s2nd_cmd.append("-T")<block_end>s2nd=subprocess.Popen(s2nd_cmd stdin=subprocess.PIPE stdout=subprocess.PIPE)<line_sep># Make sure s2nd has started
s2nd.stdout.readline()<line_sep>s_client_cmd=["openssl" "s_client" "-connect" str(endpoint)+":"+str(port)]<if_stmt>ssl_version<is><not><none><block_start>s_client_cmd.append(PROTO_VERS_TO_S_CLIENT_ARG[ssl_version])<block_end><if_stmt>cipher<is><not><none><block_start>s_client_cmd.extend(["-cipher" cipher])<block_end># For verifying extensions that s2nd sends expected extensions
s_client_cmd.append("-tlsextdebug")<line_sep># Fire up s_client
s_client=subprocess.Popen(s_client_cmd stdin=subprocess.PIPE stdout=subprocess.PIPE stderr=subprocess.STDOUT)<line_sep>s_client_out=""<line_sep>s2nd_out=""<line_sep>openssl_connect_marker="CONNECTED"<line_sep>openssl_reconnect_marker="drop connection and then reconnect"<line_sep>end_of_msg_marker="__end_of_msg__"<line_sep># Wait until openssl and s2n have finished the handshake and are connected to each other
s_client_out<augadd>read_process_output_until(s_client openssl_connect_marker)<line_sep>s2nd_out<augadd>read_process_output_until(s2nd openssl_connect_marker)<if_stmt>resume<eq><true><block_start><for_stmt>i range(0 5)# Wait for openssl to resume connection 5 times in a row, and verify resumption works.
<block_start>s_client_out<augadd>read_process_output_until(s_client openssl_reconnect_marker)<line_sep>s2nd_out<augadd>read_process_output_until(s2nd openssl_connect_marker)<block_end><block_end>data_to_validate=cipher+" "+str(uuid.uuid4())<line_sep># Write the data to openssl towards s2n server
msg=(data_to_validate+"\n"+end_of_msg_marker+"\n\n").encode("utf-8")<line_sep>s_client.stdin.write(msg)<line_sep>s_client.stdin.flush()<line_sep># Write the data to s2n towards openssl client
s2nd.stdin.write(msg)<line_sep>s2nd.stdin.flush()<line_sep># Wait for the Data transfer to complete between OpenSSL and s2n
s_client_out<augadd>read_process_output_until(s_client end_of_msg_marker)<line_sep>s2nd_out<augadd>read_process_output_until(s2nd end_of_msg_marker)<line_sep>cleanup_processes(s2nd s_client)<if_stmt>validate_data_transfer(data_to_validate s_client_out s2nd_out)<ne>0<block_start><return>-1<block_end><if_stmt>validate_version(ssl_version s2nd_out)<ne>0<block_start><return>-1<block_end><if_stmt>resume<is><true><block_start><if_stmt>validate_resume(s2nd_out)<ne>0<block_start><return>-1<block_end><block_end><if_stmt>ocsp<is><not><none><block_start><if_stmt>validate_ocsp(s_client_out)<ne>0<block_start><return>-1<block_end><block_end><if_stmt>expected_cipher<is><not><none><block_start><if_stmt>find_expected_cipher(expected_cipher s_client_out)<ne>0<block_start><return>-1<block_end><block_end><if_stmt>strict_hostname<is><true><block_start><if_stmt>validate_hostname(s_client_out)<ne>0<block_start><return>-1<block_end><block_end><if_stmt>expected_server_cert<is><not><none><block_start><if_stmt>validate_selected_certificate(s_client_out expected_server_cert)<ne>0<block_start><return>-1<block_end><block_end><if_stmt>expected_extensions<is><not><none><block_start><for_stmt>extension expected_extensions<block_start><if_stmt>extension.s_client_validate(s_client_out)<ne>0<block_start><return>-1<block_end><block_end><block_end><return>0<block_end><def_stmt>cert_path_to_str cert_path# Converts a path to a cert into a string usable for printing to test output
# Example: "./test_certs/rsa_2048_sha256_client_cert.pem" => "RSA-2048-SHA256"
<block_start><return>'-'.join(cert_path[cert_path.rfind('/')+1:].split('_')[:3]).upper()<block_end><def_stmt>print_result result_prefix return_code<block_start>suffix=""<if_stmt>return_code<eq>0<block_start><if_stmt>sys.stdout.isatty()<block_start>suffix="\033[32;1mPASSED\033[0m"<block_end><else_stmt><block_start>suffix="PASSED"<block_end><block_end><else_stmt><block_start><if_stmt>sys.stdout.isatty()<block_start>suffix="\033[31;1mFAILED\033[0m"<block_end><else_stmt><block_start>suffix="FAILED"<block_end><block_end>print(result_prefix+suffix)<block_end><def_stmt>create_thread_pool <block_start>threadpool_size=multiprocessing.cpu_count()<times>4# Multiply by 4 to increase parallelization between integration tests
print("\tCreating ThreadPool of size: "+str(threadpool_size))<line_sep>threadpool=ThreadPool(processes=threadpool_size)<line_sep><return>threadpool<block_end><def_stmt>run_handshake_test host port ssl_version cipher fips_mode no_ticket use_client_auth client_cert_path client_key_path<block_start>cipher_name=cipher.openssl_name<line_sep>cipher_vers=cipher.min_tls_vers<line_sep># Skip the cipher if openssl can't test it. 3DES/RC4 are disabled by default in 1.1.1
<if_stmt><not>cipher.openssl_1_1_1_compatible<block_start><return>0<block_end><if_stmt>ssl_version<and>ssl_version<l>cipher_vers<block_start><return>0<block_end>client_cert_str=str(use_client_auth)<if_stmt>(use_client_auth<is><not><none>)<and>(client_cert_path<is><not><none>)<block_start>client_cert_str=cert_path_to_str(client_cert_path)<block_end>ret=try_handshake(host port cipher_name ssl_version no_ticket=no_ticket enter_fips_mode=fips_mode client_auth=use_client_auth client_cert=client_cert_path client_key=client_key_path)<line_sep>result_prefix="Cipher: %-30s ClientCert: %-16s Vers: %-8s ... "%(cipher_name client_cert_str S2N_PROTO_VERS_TO_STR[ssl_version])<line_sep>print_result(result_prefix ret)<line_sep><return>ret<block_end><def_stmt>handshake_test host port test_ciphers fips_mode no_ticket=<false> use_client_auth=<none> use_client_cert=<none> use_client_key=<none><block_start>"""
Basic handshake tests using all valid combinations of supported cipher suites and TLS versions.
"""<line_sep>print("\n\tRunning handshake tests:")<line_sep>failed=0<for_stmt>ssl_version [S2N_TLS10 <none>]<block_start>print("\n\tTesting ciphers using client version: "+S2N_PROTO_VERS_TO_STR[ssl_version])<line_sep>port_offset=0<line_sep>results=[]<line_sep># Only test non ECC ciphers, openssl 0.9.8 has trouble with ECDHE.
# Only test 1.0/SSLv3 ciphers since 0.9.8 only supports those.
<for_stmt>cipher filter(<lambda>x:"ECDHE"<not><in>x.openssl_name<and>x.min_tls_vers<l>S2N_TLS11 test_ciphers)<block_start>async_result=run_handshake_test(host port+port_offset ssl_version cipher fips_mode no_ticket use_client_auth use_client_cert use_client_key)<line_sep>port_offset<augadd>1<line_sep>results.append(async_result)<block_end><for_stmt>async_result results<block_start><if_stmt>async_result<ne>0<block_start>failed=1<block_end><block_end><block_end><return>failed<block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser(description='Runs TLS server integration tests against s2nd using Openssl s_client')<line_sep>parser.add_argument('host' help='The host for s2nd to bind to')<line_sep>parser.add_argument('port' type=int help='The port for s2nd to bind to')<line_sep>parser.add_argument('--use_corked_io' action='store_true' help='Turn corked IO on/off')<line_sep>parser.add_argument('--libcrypto' default='openssl-1.1.1' choices=S2N_LIBCRYPTO_CHOICES help="""The Libcrypto that s2n was built with. s2n supports different cipher suites depending on
libcrypto version. Defaults to openssl-1.1.1.""")<line_sep>args=parser.parse_args()<line_sep>use_corked_io=args.use_corked_io<line_sep># Retrieve the test ciphers to use based on the libcrypto version s2n was built with
test_ciphers=S2N_LIBCRYPTO_TO_TEST_CIPHERS[args.libcrypto]<line_sep>host=args.host<line_sep>port=args.port<line_sep>libcrypto_version=args.libcrypto<line_sep>fips_mode=<false><if_stmt>environ.get("S2N_TEST_IN_FIPS_MODE")<is><not><none><block_start>fips_mode=<true><line_sep>print("\nRunning s2nd in FIPS mode.")<block_end>print("\nRunning tests with: "+os.popen('openssl version').read())<if_stmt>use_corked_io<eq><true><block_start>print("Corked IO is on")<block_end>failed=0<line_sep>failed<augadd>handshake_test(host port test_ciphers fips_mode)<line_sep><return>failed<block_end><if_stmt>__name__<eq>"__main__"<block_start>sys.exit(main())<block_end> |
<import_stmt>json<import_from_stmt>db.notes getNotes<def_stmt>main event context<block_start><return>{"statusCode":200 "body":json.dumps(getNotes() indent=2)}<block_end> |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for generic image dataset creation."""<import_stmt>os<import_from_stmt>delf.python.datasets utils<class_stmt>ImagesFromList()<block_start>"""A generic data loader that loads images from a list.
Supports images of different sizes.
"""<def_stmt>__init__ self root image_paths imsize=<none> bounding_boxes=<none> loader=utils.default_loader<block_start>"""ImagesFromList object initialization.
Args:
root: String, root directory path.
image_paths: List, relative image paths as strings.
imsize: Integer, defines the maximum size of longer image side.
bounding_boxes: List of (x1,y1,x2,y2) tuples to crop the query images.
loader: Callable, a function to load an image given its path.
Raises:
ValueError: Raised if `image_paths` list is empty.
"""<line_sep># List of the full image filenames.
images_filenames=[os.path.join(root image_path)<for>image_path image_paths]<if_stmt><not>images_filenames<block_start><raise>ValueError("Dataset contains 0 images.")<block_end>self.root=root<line_sep>self.images=image_paths<line_sep>self.imsize=imsize<line_sep>self.images_filenames=images_filenames<line_sep>self.bounding_boxes=bounding_boxes<line_sep>self.loader=loader<block_end><def_stmt>__getitem__ self index<block_start>"""Called to load an image at the given `index`.
Args:
index: Integer, image index.
Returns:
image: Tensor, loaded image.
"""<line_sep>path=self.images_filenames[index]<if_stmt>self.bounding_boxes<is><not><none><block_start>img=self.loader(path self.imsize self.bounding_boxes[index])<block_end><else_stmt><block_start>img=self.loader(path self.imsize)<block_end><return>img<block_end><def_stmt>__len__ self<block_start>"""Implements the built-in function len().
Returns:
len: Number of images in the dataset.
"""<line_sep><return>len(self.images_filenames)<block_end><block_end> |
# Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>pytest<line_sep>V11_SUPPORTED=['11.5.4' '11.6.0' '11.6.1' '11.6.2']<line_sep>V12_SUPPORTED=['12.0.0' '12.1.0']<def_stmt>setup_sshd_test request mgmt_root<block_start><def_stmt>teardown <block_start>d.allow=['ALL']<line_sep>d.banner='disabled'<line_sep>d.bannerText=''<line_sep>d.inactivityTimeout=0<line_sep>d.logLevel='info'<line_sep>d.login='enabled'<if_stmt>pytest.config.getoption('--release')<in>V12_SUPPORTED<block_start>d.port=22<block_end>d.update()<block_end>request.addfinalizer(teardown)<line_sep>d=mgmt_root.tm.sys.sshd.load()<line_sep><return>d<block_end>@pytest.mark.skipif(pytest.config.getoption('--release')<not><in>V11_SUPPORTED reason='Needs v11 TMOS to pass')<class_stmt>TestSshd11(object)<block_start><def_stmt>test_load self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<assert_stmt>ssh1.allow<eq>ssh2.allow<assert_stmt>ssh1.banner<eq>ssh2.banner<assert_stmt>ssh1.inactivityTimeout<eq>ssh2.inactivityTimeout<assert_stmt>ssh1.logLevel<eq>ssh2.logLevel<assert_stmt>ssh1.login<eq>ssh2.login<block_end><def_stmt>test_update_allow self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.allow=['192.168.1.1']<line_sep>ssh1.update()<assert_stmt>['192.168.1.1']<eq>ssh1.allow<assert_stmt>['192.168.1.1']<ne>ssh2.allow<line_sep># Refresh
ssh2.refresh()<assert_stmt>['192.168.1.1']<eq>ssh2.allow<block_end><def_stmt>test_update_banner self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>banners=['enabled' 'disabled']<for_stmt>banner banners<block_start>ssh1.banner=banner<line_sep>ssh1.update()<assert_stmt>banner<eq>ssh1.banner<assert_stmt>banner<ne>ssh2.banner<line_sep># Refresh
ssh2.refresh()<assert_stmt>banner<eq>ssh2.banner<block_end><block_end><def_stmt>test_update_bannerText self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.bannerText='foo banner'<line_sep>ssh1.update()<assert_stmt>'foo banner'<eq>ssh1.bannerText<assert_stmt><not>hasattr(ssh2 'bannerText')<line_sep># Refresh
ssh2.refresh()<assert_stmt>'foo banner'<eq>ssh2.bannerText<block_end><def_stmt>test_update_inactivityTimeout self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.inactivityTimeout=10<line_sep>ssh1.update()<assert_stmt>10<eq>ssh1.inactivityTimeout<assert_stmt>10<ne>ssh2.inactivityTimeout<line_sep># Refresh
ssh2.refresh()<assert_stmt>10<eq>ssh2.inactivityTimeout<block_end><def_stmt>test_update_logLevel self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>levels=['debug' 'debug1' 'debug2' 'debug3' 'error' 'fatal' 'info' 'quiet' 'verbose']<for_stmt>level levels<block_start>ssh1.logLevel=level<line_sep>ssh1.update()<assert_stmt>level<eq>ssh1.logLevel<assert_stmt>level<ne>ssh2.logLevel<line_sep># Refresh
ssh2.refresh()<assert_stmt>level<eq>ssh2.logLevel<block_end><block_end><def_stmt>test_update_login self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>logins=['disabled' 'enabled']<for_stmt>login logins<block_start>ssh1.login=login<line_sep>ssh1.update()<assert_stmt>login<eq>ssh1.login<assert_stmt>login<ne>ssh2.login<line_sep># Refresh
ssh2.refresh()<assert_stmt>login<eq>ssh2.login<block_end><block_end><block_end>@pytest.mark.skipif(pytest.config.getoption('--release')<not><in>V12_SUPPORTED reason='Needs v12 TMOS to pass')<class_stmt>TestSshd12(object)<block_start><def_stmt>test_load self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<assert_stmt>ssh1.allow<eq>ssh2.allow<assert_stmt>ssh1.banner<eq>ssh2.banner<assert_stmt>ssh1.inactivityTimeout<eq>ssh2.inactivityTimeout<assert_stmt>ssh1.logLevel<eq>ssh2.logLevel<assert_stmt>ssh1.login<eq>ssh2.login<assert_stmt>ssh1.port<eq>ssh2.port<block_end><def_stmt>test_update_allow self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.allow=['192.168.1.1']<line_sep>ssh1.update()<assert_stmt>['192.168.1.1']<eq>ssh1.allow<assert_stmt>['192.168.1.1']<ne>ssh2.allow<line_sep># Refresh
ssh2.refresh()<assert_stmt>['192.168.1.1']<eq>ssh2.allow<block_end><def_stmt>test_update_banner self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>banners=['enabled' 'disabled']<for_stmt>banner banners<block_start>ssh1.banner=banner<line_sep>ssh1.update()<assert_stmt>banner<eq>ssh1.banner<assert_stmt>banner<ne>ssh2.banner<line_sep># Refresh
ssh2.refresh()<assert_stmt>banner<eq>ssh2.banner<block_end><block_end><def_stmt>test_update_bannerText self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.bannerText='foo banner'<line_sep>ssh1.update()<assert_stmt>'foo banner'<eq>ssh1.bannerText<assert_stmt><not>hasattr(ssh2 'bannerText')<line_sep># Refresh
ssh2.refresh()<assert_stmt>'foo banner'<eq>ssh2.bannerText<block_end><def_stmt>test_update_inactivityTimeout self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.inactivityTimeout=10<line_sep>ssh1.update()<assert_stmt>10<eq>ssh1.inactivityTimeout<assert_stmt>10<ne>ssh2.inactivityTimeout<line_sep># Refresh
ssh2.refresh()<assert_stmt>10<eq>ssh2.inactivityTimeout<block_end><def_stmt>test_update_logLevel self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>levels=['debug' 'debug1' 'debug2' 'debug3' 'error' 'fatal' 'info' 'quiet' 'verbose']<for_stmt>level levels<block_start>ssh1.logLevel=level<line_sep>ssh1.update()<assert_stmt>level<eq>ssh1.logLevel<assert_stmt>level<ne>ssh2.logLevel<line_sep># Refresh
ssh2.refresh()<assert_stmt>level<eq>ssh2.logLevel<block_end><block_end><def_stmt>test_update_login self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>logins=['disabled' 'enabled']<for_stmt>login logins<block_start>ssh1.login=login<line_sep>ssh1.update()<assert_stmt>login<eq>ssh1.login<assert_stmt>login<ne>ssh2.login<line_sep># Refresh
ssh2.refresh()<assert_stmt>login<eq>ssh2.login<block_end><block_end><def_stmt>test_update_port self request mgmt_root<block_start>ssh1=setup_sshd_test(request mgmt_root)<line_sep>ssh2=setup_sshd_test(request mgmt_root)<line_sep>ssh1.port=1234<line_sep>ssh1.update()<assert_stmt>1234<eq>ssh1.port<assert_stmt>1234<ne>ssh2.port<line_sep># Refresh
ssh2.refresh()<assert_stmt>1234<eq>ssh2.port<block_end><block_end> |
# coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for weight_symmetry.prune."""<import_stmt>glob<import_from_stmt>os path<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing flagsaver<import_from_stmt>rigl.experimental.jax prune<class_stmt>PruneTest(absltest.TestCase)<block_start><def_stmt>test_prune_fixed_schedule self<block_start>"""Tests training/pruning driver with a fixed global sparsity."""<line_sep>experiment_dir=self.create_tempdir().full_path<line_sep>eval_flags=dict(epochs=1 pruning_rate=0.95 experiment_dir=experiment_dir )<with_stmt>flagsaver.flagsaver(**eval_flags)<block_start>prune.main([])<line_sep>outfile=path.join(experiment_dir '*' 'events.out.tfevents.*')<line_sep>files=glob.glob(outfile)<line_sep>self.assertTrue(len(files)<eq>1<and>path.exists(files[0]))<block_end><block_end><def_stmt>test_prune_global_pruning_schedule self<block_start>"""Tests training/pruning driver with a global sparsity schedule."""<line_sep>experiment_dir=self.create_tempdir().full_path<line_sep>eval_flags=dict(epochs=10 pruning_schedule='[(5, 0.33), (7, 0.66), (9, 0.95)]' experiment_dir=experiment_dir )<with_stmt>flagsaver.flagsaver(**eval_flags)<block_start>prune.main([])<line_sep>outfile=path.join(experiment_dir '*' 'events.out.tfevents.*')<line_sep>files=glob.glob(outfile)<line_sep>self.assertTrue(len(files)<eq>1<and>path.exists(files[0]))<block_end><block_end><def_stmt>test_prune_local_pruning_schedule self<block_start>"""Tests training/pruning driver with a single layer sparsity schedule."""<line_sep>experiment_dir=self.create_tempdir().full_path<line_sep>eval_flags=dict(epochs=10 pruning_schedule='{1:[(5, 0.33), (7, 0.66), (9, 0.95)]}' experiment_dir=experiment_dir )<with_stmt>flagsaver.flagsaver(**eval_flags)<block_start>prune.main([])<line_sep>outfile=path.join(experiment_dir '*' 'events.out.tfevents.*')<line_sep>files=glob.glob(outfile)<line_sep>self.assertTrue(len(files)<eq>1<and>path.exists(files[0]))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
__copyright__="Copyright (C) 2014 <NAME>"<line_sep>__license__="""
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""<import_from_stmt>django.utils.translation gettext_lazy<as>_ pgettext <import_from_stmt>django.contrib admin<import_from_stmt>course.models Course Event ParticipationTag Participation ParticipationPermission ParticipationRole ParticipationRolePermission ParticipationPreapproval AuthenticationToken InstantFlowRequest FlowSession FlowPageData FlowPageVisit FlowPageVisitGrade FlowRuleException GradingOpportunity GradeChange InstantMessage Exam ExamTicket <import_from_stmt>django forms<import_from_stmt>relate.utils string_concat<import_from_stmt>course.enrollment approve_enrollment deny_enrollment <import_from_stmt>course.constants participation_permission<as>pperm exam_ticket_states <import_from_stmt>typing Any Text Tuple# noqa
# {{{ permission helpers
<def_stmt>_filter_courses_for_user queryset user<block_start><if_stmt>user.is_superuser<block_start><return>queryset<block_end>z=queryset.filter(participations__user=user participations__roles__permissions__permission=pperm.use_admin_interface)<line_sep><return>z<block_end><def_stmt>_filter_course_linked_obj_for_user queryset user<block_start><if_stmt>user.is_superuser<block_start><return>queryset<block_end><return>queryset.filter(course__participations__user=user course__participations__roles__permissions__permission# noqa
=pperm.use_admin_interface)<block_end><def_stmt>_filter_participation_linked_obj_for_user queryset user<block_start><if_stmt>user.is_superuser<block_start><return>queryset<block_end><return>queryset.filter(participation__course__participations__user=user participation__course__participations__roles__permissions__permission# noqa
=pperm.use_admin_interface)<block_end># }}}
# {{{ list filter helper
<def_stmt>_filter_related_only filter_arg:str<arrow>Tuple[str Any]<block_start><return>(filter_arg admin.RelatedOnlyFieldListFilter)<block_end># }}}
# {{{ course
<class_stmt>UnsafePasswordInput(forms.TextInput)# This sends passwords back to the user--not ideal, but OK for the XMPP
# password.
<block_start>input_type="password"<block_end><class_stmt>CourseAdminForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=Course<line_sep>widgets={"course_xmpp_password":UnsafePasswordInput}<line_sep>exclude=()<block_end><block_end><class_stmt>CourseAdmin(admin.ModelAdmin)<block_start>list_display=("identifier" "number" "name" "time_period" "start_date" "end_date" "hidden" "listed" "accepts_enrollment")<line_sep>list_editable=("number" "name" "time_period" "start_date" "end_date" "hidden" "listed" "accepts_enrollment")<line_sep>list_filter=("number" "time_period" "hidden" "listed" "accepts_enrollment")<line_sep>date_hierarchy="start_date"<line_sep>search_fields=("identifier" "number" "name" "time_period")<line_sep>form=CourseAdminForm<line_sep>save_on_top=<true><line_sep># {{{ permissions
<def_stmt>has_add_permission self request# These are created only through the course creation form.
<block_start><return><false><block_end><def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_courses_for_user(qs request.user)<block_end># }}}
<block_end>admin.site.register(Course CourseAdmin)<line_sep># }}}
# {{{ events
<class_stmt>EventAdmin(admin.ModelAdmin)<block_start>list_display=("course" "kind" "ordinal" "time" "end_time" "shown_in_calendar")<line_sep>list_filter=(_filter_related_only("course") "kind" "shown_in_calendar")<line_sep>date_hierarchy="time"<line_sep>search_fields=("course__identifier" "kind" )<def_stmt>__unicode__ self# pragma: no cover # not used
<block_start><return>"{}{} in {}".format(self.kind " (%s)"%str(self.ordinal)<if>self.ordinal<is><not><none><else>"" self.course)<block_end>__str__=__unicode__<line_sep>list_editable=("ordinal" "time" "end_time" "shown_in_calendar")<line_sep># {{{ permissions
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(Event EventAdmin)<line_sep># }}}
# {{{ participation tags
<class_stmt>ParticipationTagAdmin(admin.ModelAdmin)<block_start>list_filter=(_filter_related_only("course") )<line_sep># {{{ permissions
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(ParticipationTag ParticipationTagAdmin)<line_sep># }}}
# {{{ participations
<class_stmt>ParticipationRolePermissionInline(admin.TabularInline)<block_start>model=ParticipationRolePermission<line_sep>extra=3<block_end><class_stmt>ParticipationRoleAdmin(admin.ModelAdmin)<block_start>inlines=(ParticipationRolePermissionInline )<line_sep>list_filter=(_filter_related_only("course") "identifier")<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<if_stmt>request.user.is_superuser<block_start><return>qs<block_end><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><block_end>admin.site.register(ParticipationRole ParticipationRoleAdmin)<class_stmt>ParticipationPermissionInline(admin.TabularInline)<block_start>model=ParticipationPermission<line_sep>extra=3<block_end><class_stmt>ParticipationForm(forms.ModelForm)<block_start><class_stmt>Meta<block_start>model=Participation<line_sep>exclude=("role" )<block_end><def_stmt>clean self<block_start>super().clean()<for_stmt>tag self.cleaned_data.get("tags" [])<block_start><if_stmt>tag.course<ne>self.cleaned_data.get("course")<block_start><import_from_stmt>django.core.exceptions ValidationError<line_sep><raise>ValidationError({"tags":_("Tags must belong to same course as "<concat>"participation.")})<block_end><block_end><for_stmt>role self.cleaned_data.get("roles" [])<block_start><if_stmt>role.course<ne>self.cleaned_data.get("course")<block_start><import_from_stmt>django.core.exceptions ValidationError<line_sep><raise>ValidationError({"roles":_("Role must belong to same course as "<concat>"participation.")})<block_end><block_end><block_end><block_end><class_stmt>ParticipationAdmin(admin.ModelAdmin)<block_start>form=ParticipationForm<def_stmt>get_roles self obj<block_start><return>", ".join(str(role.name)<for>role obj.roles.all())<block_end>get_roles.short_description=_("Roles")# type: ignore
<def_stmt>get_tags self obj<block_start><return>", ".join(str(tag.name)<for>tag obj.tags.all())<block_end>get_tags.short_description=_("Tags")# type: ignore
# Fixme: This can be misleading when Non-superuser click on the
# link of a user who also attend other courses.
<def_stmt>get_user self obj<block_start><import_from_stmt>django.urls reverse<import_from_stmt>django.conf settings<import_from_stmt>django.utils.html mark_safe<line_sep><return>mark_safe(string_concat("<a href='%(link)s'>" "%(user_fullname)s" "</a>")%{"link":reverse("admin:%s_change"%settings.AUTH_USER_MODEL.replace("." "_").lower() args=(obj.user.id )) "user_fullname":obj.user.get_full_name(force_verbose_blank=<true>) })<block_end>get_user.short_description=pgettext("real name of a user" "Name")# type:ignore # noqa
get_user.admin_order_field="user__last_name"# type: ignore
get_user.allow_tags=<true># type: ignore
list_display=("user" "get_user" "course" "get_roles" "status" "get_tags" )<def_stmt>get_list_filter self request<block_start><if_stmt>request<is><not><none><and>request.user.is_superuser<block_start><return>("course" "roles__name" "status" "tags")<block_end><return>(_filter_related_only("course") _filter_related_only("roles") "status" _filter_related_only("tags"))<block_end>raw_id_fields=("user" )<line_sep>filter_horizontal=("tags" "roles" )<line_sep>search_fields=("course__identifier" "user__username" "user__first_name" "user__last_name" )<line_sep>actions=[approve_enrollment deny_enrollment]<line_sep>inlines=(ParticipationPermissionInline )<line_sep>save_on_top=<true><line_sep># {{{ permissions
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end># Fixme: This seems not to be not reachable
<if_stmt>db_field.name<eq>"tags"<block_start>kwargs["queryset"]=_filter_course_linked_obj_for_user(ParticipationTag.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(Participation ParticipationAdmin)<class_stmt>ParticipationPreapprovalAdmin(admin.ModelAdmin)<block_start><def_stmt>get_roles self obj<block_start><return>", ".join(str(role.name)<for>role obj.roles.all())<block_end>get_roles.short_description=_("Roles")# type: ignore
list_display=("email" "institutional_id" "course" "get_roles" "creation_time" "creator")<line_sep>list_filter=(_filter_related_only("course") _filter_related_only("roles"))<line_sep>search_fields=("email" "institutional_id" )<line_sep># {{{ permissions
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<if_stmt>request.user.is_superuser<block_start><return>qs<block_end><return>_filter_course_linked_obj_for_user(qs request.user)<block_end>exclude=("creator" "creation_time" "role")<def_stmt>save_model self request obj form change<block_start>obj.creator=request.user<line_sep>obj.save()<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(ParticipationPreapproval ParticipationPreapprovalAdmin)<line_sep># }}}
<class_stmt>AuthenticationTokenAdmin(admin.ModelAdmin)<block_start>list_display=("id" "participation" "restrict_to_participation_role" "description" "valid_until" "revocation_time")<line_sep>date_hierarchy="creation_time"<line_sep>search_fields=("id" "description" "participation__user__username")<block_end>admin.site.register(AuthenticationToken AuthenticationTokenAdmin)<class_stmt>InstantFlowRequestAdmin(admin.ModelAdmin)<block_start>list_display=("course" "flow_id" "start_time" "end_time" "cancelled")<line_sep>list_filter=(_filter_related_only("course") )<line_sep>date_hierarchy="start_time"<line_sep>search_fields=("email" )<block_end>admin.site.register(InstantFlowRequest InstantFlowRequestAdmin)<line_sep># {{{ flow sessions
<class_stmt>FlowPageDataInline(admin.TabularInline)<block_start>model=FlowPageData<line_sep>extra=0<block_end><class_stmt>FlowSessionAdmin(admin.ModelAdmin)<block_start><def_stmt>get_participant self obj<block_start><if_stmt>obj.participation<is><none><block_start><return><none><block_end><return>obj.participation.user<block_end>get_participant.short_description=_("Participant")# type: ignore
get_participant.admin_order_field="participation__user"# type: ignore
search_fields=("=id" "flow_id" "access_rules_tag" "participation__user__username" "participation__user__first_name" "participation__user__last_name" "user__username" "user__first_name" "user__last_name" )<line_sep>list_display=("id" "flow_id" "get_participant" "course" "start_time" "completion_time" "access_rules_tag" "in_progress" #"expiration_mode",
)<line_sep>list_display_links=("flow_id" "get_participant" )<line_sep>date_hierarchy="start_time"<line_sep>list_filter=(_filter_related_only("course") "flow_id" "in_progress" "access_rules_tag" "expiration_mode" )<line_sep>inlines=(FlowPageDataInline )<line_sep>raw_id_fields=("participation" "user")<line_sep>save_on_top=<true><line_sep># {{{ permissions
<def_stmt>has_add_permission self request# These are only created automatically.
<block_start><return><false><block_end><def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(FlowSession FlowSessionAdmin)<line_sep># }}}
# {{{ flow page visit
<class_stmt>FlowPageVisitGradeInline(admin.TabularInline)<block_start>model=FlowPageVisitGrade<line_sep>extra=0<block_end><class_stmt>HasAnswerListFilter(admin.SimpleListFilter)<block_start>title="has answer"<line_sep>parameter_name="has_answer"<def_stmt>lookups self request model_admin<block_start><return>(("y" _("Yes")) ("n" _("No")) )<block_end><def_stmt>queryset self request queryset<block_start><if_stmt>self.value()<is><none><block_start><return>queryset<block_end><return>queryset.filter(answer__isnull=self.value()<ne>"y")<block_end><block_end><class_stmt>FlowIdListFilter(admin.SimpleListFilter)<block_start>"""
This is only necessary when flow_id is only accessible by FlowSession, which is
a ForeignKey in the model
"""<line_sep>title=_("Flow ID")<line_sep>parameter_name="flow_id"<def_stmt>lookups self request model_admin<block_start>qs=model_admin.get_queryset(request)<if_stmt><not>request.user.is_superuser<block_start>qs=qs.filter(flow_session__course__participations__user=request.user flow_session__course__participations__roles__permissions__permission# noqa
=pperm.use_admin_interface)<block_end>flow_ids=qs.values_list("flow_session__flow_id" flat=<true>).distinct()<line_sep><return>zip(flow_ids flow_ids)<block_end><def_stmt>queryset self request queryset<block_start><if_stmt>self.value()<block_start><return>queryset.filter(flow_session__flow_id=self.value())<block_end><else_stmt><block_start><return>queryset<block_end><block_end><block_end><class_stmt>FlowPageVisitAdmin(admin.ModelAdmin)<block_start><def_stmt>get_course self obj<block_start><return>obj.flow_session.course<block_end>get_course.short_description=_("Course")# type: ignore
get_course.admin_order_field="flow_session__course"# type: ignore
<def_stmt>get_flow_id self obj<block_start><return>obj.flow_session.flow_id<block_end>get_flow_id.short_description=_("Flow ID")# type: ignore
get_flow_id.admin_order_field="flow_session__flow_id"# type: ignore
<def_stmt>get_page_id self obj<block_start><if_stmt>obj.page_data.page_ordinal<is><none><block_start><return>string_concat("%s/%s (" _("not in use") ")")%(obj.page_data.group_id obj.page_data.page_id)<block_end><else_stmt><block_start><return>"{}/{} ({})".format(obj.page_data.group_id obj.page_data.page_id obj.page_data.page_ordinal)<block_end><block_end>get_page_id.short_description=_("Page ID")# type: ignore
get_page_id.admin_order_field="page_data__page_id"# type: ignore
<def_stmt>get_participant self obj<block_start><if_stmt>obj.flow_session.participation<block_start><return>obj.flow_session.participation.user<block_end><else_stmt><block_start><return>string_concat("(" _("anonymous") ")")<block_end><block_end>get_participant.short_description=_("Owner")# type: ignore
get_participant.admin_order_field="flow_session__participation"# type: ignore
<def_stmt>get_answer_is_null self obj<block_start><return>obj.answer<is><not><none><block_end>get_answer_is_null.short_description=_("Has answer")# type: ignore
get_answer_is_null.boolean=<true># type: ignore
<def_stmt>get_flow_session_id self obj<block_start><return>obj.flow_session.id<block_end>get_flow_session_id.short_description=_("Flow Session ID")# type: ignore
get_flow_session_id.admin_order_field="flow_session__id"# type: ignore
list_filter=(HasAnswerListFilter "is_submitted_answer" "is_synthetic" _filter_related_only("flow_session__participation__course") FlowIdListFilter )<line_sep>date_hierarchy="visit_time"<line_sep>list_display=("id" "get_course" "get_flow_id" "get_page_id" "get_participant" "get_flow_session_id" "visit_time" "get_answer_is_null" "is_submitted_answer" "is_synthetic" "user" "impersonated_by" )<line_sep>list_display_links=("id" )<line_sep>search_fields=("=id" "=flow_session__id" "flow_session__flow_id" "page_data__group_id" "page_data__page_id" "flow_session__participation__user__username" "flow_session__participation__user__first_name" "flow_session__participation__user__last_name" )<line_sep>raw_id_fields=("flow_session" "page_data")<line_sep>inlines=(FlowPageVisitGradeInline )<line_sep>save_on_top=<true><line_sep># {{{ permissions
<def_stmt>has_add_permission self request# These are created only automatically.
<block_start><return><false><block_end><def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<if_stmt>request.user.is_superuser<block_start><return>qs<block_end><return>qs.filter(flow_session__course__participations__user=request.user flow_session__course__participations__roles__permissions__permission# noqa
=pperm.use_admin_interface)<block_end># }}}
<block_end>admin.site.register(FlowPageVisit FlowPageVisitAdmin)<line_sep># }}}
# {{{ flow access
<class_stmt>FlowRuleExceptionAdmin(admin.ModelAdmin)<block_start><def_stmt>get_course self obj<block_start><return>obj.participation.course<block_end>get_course.short_description=_("Course")# type: ignore
get_course.admin_order_field="participation__course"# type: ignore
<def_stmt>get_participant self obj<block_start><return>obj.participation.user<block_end>get_participant.short_description=_("Participant")# type: ignore
get_participant.admin_order_field="participation__user"# type: ignore
ordering=("-creation_time" )<line_sep>search_fields=("flow_id" "participation__user__username" "participation__user__first_name" "participation__user__last_name" "comment" )<line_sep>list_display=("get_participant" "get_course" "flow_id" "kind" "expiration" "creation_time" )<line_sep>list_display_links=("get_participant" "flow_id" )<line_sep>list_filter=(_filter_related_only("participation__course") "flow_id" "kind" )<line_sep>date_hierarchy="creation_time"<line_sep>raw_id_fields=("participation" )<line_sep># {{{ permissions
<def_stmt>has_add_permission self request# These are only created automatically.
<block_start><return><false><block_end><def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_participation_linked_obj_for_user(qs request.user)<block_end>exclude=("creator" "creation_time")<def_stmt>save_model self request obj form change# pragma: no cover
# This won't work since it's not allowed to add
<block_start>obj.creator=request.user<line_sep>obj.save()<block_end># }}}
<block_end>admin.site.register(FlowRuleException FlowRuleExceptionAdmin)<line_sep># }}}
# {{{ grading
<class_stmt>GradingOpportunityAdmin(admin.ModelAdmin)<block_start>list_display=("name" "course" "identifier" "due_time" "shown_in_grade_book" "shown_in_participant_grade_book" )<line_sep>list_filter=(_filter_related_only("course") "shown_in_grade_book" "shown_in_participant_grade_book" )<line_sep>list_editable=("shown_in_grade_book" "shown_in_participant_grade_book" )<line_sep># {{{ permissions
exclude=("creation_time" )<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(GradingOpportunity GradingOpportunityAdmin)<class_stmt>GradeChangeAdmin(admin.ModelAdmin)<block_start><def_stmt>get_course self obj<block_start><return>obj.participation.course<block_end>get_course.short_description=_("Course")# type: ignore
get_course.admin_order_field="participation__course"# type: ignore
<def_stmt>get_opportunity self obj<block_start><return>obj.opportunity.name<block_end>get_opportunity.short_description=_("Opportunity")# type: ignore
get_opportunity.admin_order_field="opportunity"# type: ignore
<def_stmt>get_participant self obj<block_start><return>obj.participation.user<block_end>get_participant.short_description=_("Participant")# type: ignore
get_participant.admin_order_field="participation__user"# type: ignore
<def_stmt>get_percentage self obj<block_start><if_stmt>obj.points<is><none><or>obj.max_points<is><none><block_start><return><none><block_end><else_stmt><block_start><return>round(100<times>obj.points/obj.max_points)<block_end><block_end>get_percentage.short_description="%"# type: ignore
list_display=("get_opportunity" "get_participant" "get_course" "state" "points" "max_points" "get_percentage" "attempt_id" "grade_time" )<line_sep>list_display_links=("get_opportunity" "get_participant" )<line_sep>date_hierarchy="grade_time"<line_sep>search_fields=("opportunity__name" "opportunity__flow_id" "opportunity__identifier" "participation__user__username" "participation__user__first_name" "participation__user__last_name" "attempt_id" )<line_sep>list_filter=(_filter_related_only("opportunity__course") _filter_related_only("opportunity") "state" )<line_sep>raw_id_fields=("participation" "flow_session" "opportunity")<line_sep># {{{ permission
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_participation_linked_obj_for_user(qs request.user)<block_end>exclude=("creator" "grade_time")<def_stmt>save_model self request obj form change<block_start>obj.creator=request.user<line_sep>obj.save()<block_end># }}}
<block_end>admin.site.register(GradeChange GradeChangeAdmin)<line_sep># }}}
# {{{ instant message
<class_stmt>InstantMessageAdmin(admin.ModelAdmin)<block_start><def_stmt>get_course self obj<block_start><return>obj.participation.course<block_end>get_course.short_description=_("Course")# type: ignore
get_course.admin_order_field="participation__course"# type: ignore
<def_stmt>get_participant self obj<block_start><return>obj.participation.user<block_end>get_participant.short_description=_("Participant")# type: ignore
get_participant.admin_order_field="participation__user"# type: ignore
list_filter=(_filter_related_only("participation__course") )<line_sep>list_display=("get_course" "get_participant" "time" "text" )<line_sep>date_hierarchy="time"<line_sep>search_fields=("text" "participation__user__username" "participation__user__first_name" "participation__user__last_name" )<line_sep>raw_id_fields=("participation" )<line_sep># {{{ permissions
<def_stmt>has_add_permission self request# These are created only automatically.
<block_start><return><false><block_end><def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_participation_linked_obj_for_user(qs request.user)<block_end># }}}
<block_end>admin.site.register(InstantMessage InstantMessageAdmin)<line_sep># }}}
# {{{ exam tickets
<class_stmt>ExamAdmin(admin.ModelAdmin)<block_start>list_filter=(_filter_related_only("course") "active" "listed" )<line_sep>list_display=("course" "flow_id" "active" "listed" "no_exams_before" )<line_sep>search_fields=("flow_id" )<line_sep>date_hierarchy="no_exams_before"<line_sep># {{{ permissions
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_course_linked_obj_for_user(qs request.user)<block_end><def_stmt>formfield_for_foreignkey self db_field request **kwargs<block_start><if_stmt>db_field.name<eq>"course"<block_start>kwargs["queryset"]=_filter_courses_for_user(Course.objects request.user)<block_end><return>super().formfield_for_foreignkey(db_field request **kwargs)<block_end># }}}
<block_end>admin.site.register(Exam ExamAdmin)<class_stmt>ExamTicketAdmin(admin.ModelAdmin)<block_start><def_stmt>get_course self obj<block_start><return>obj.participation.course<block_end>get_course.short_description=_("Course")# type: ignore
get_course.admin_order_field="participation__course"# type: ignore
list_filter=(_filter_related_only("participation__course") "state" )<line_sep>raw_id_fields=("participation" )<line_sep>list_display=("get_course" "exam" "participation" "state" "creation_time" "usage_time" )<line_sep>date_hierarchy="usage_time"<line_sep>search_fields=("exam__course__identifier" "exam__flow_id" "exam__description" "participation__user__username" "participation__user__first_name" "participation__user__last_name" )<line_sep># {{{ permissions
<def_stmt>get_queryset self request<block_start>qs=super().get_queryset(request)<line_sep><return>_filter_participation_linked_obj_for_user(qs request.user)<block_end>exclude=("creator" )<def_stmt>save_model self request obj form change<block_start>obj.creator=request.user<line_sep>obj.save()<block_end># }}}
<def_stmt>revoke_exam_tickets self request queryset# noqa
<block_start>queryset.filter(state=exam_ticket_states.valid).update(state=exam_ticket_states.revoked)<block_end>revoke_exam_tickets.short_description=_("Revoke Exam Tickets")# type: ignore
actions=[revoke_exam_tickets]<block_end>admin.site.register(ExamTicket ExamTicketAdmin)<line_sep># }}}
# vim: foldmethod=marker
|
<import_stmt>sys<if_stmt>sys.hexversion<ge>0x03000000<block_start><import_from_stmt>queue Empty<block_end><else_stmt><block_start><import_from_stmt>Queue Empty<block_end><class_stmt>DiskCollectorConsumer(object)<block_start>""" consumes information from the disk collector and provides it for the local
collector classes running in the same subprocess.
"""<def_stmt>__init__ self q<block_start>self.result={}<line_sep>self.cached_result={}<line_sep>self.q=q<block_end><def_stmt>consume self# if we haven't consumed the previous value
<block_start><if_stmt>len(self.result)<ne>0<block_start><return><block_end><try_stmt><block_start>self.result=self.q.get_nowait()<line_sep>self.cached_result=self.result.copy()<block_end><except_stmt>Empty# we are too fast, just do nothing.
<block_start><pass><block_end><else_stmt><block_start>self.q.task_done()<block_end><block_end><def_stmt>fetch self wd<block_start>data=<none><if_stmt>wd<in>self.result<block_start>data=self.result[wd]<del_stmt>self.result[wd]<block_end><elif_stmt>wd<in>self.cached_result<block_start>data=self.cached_result[wd]<block_end><return>data<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>FWCore.ParameterSet.VarParsing<as>VarParsing<line_sep>process=cms.Process("write2DB")<line_sep>options=VarParsing.VarParsing()<line_sep>options.register('unitTest' <false> # default value
VarParsing.VarParsing.multiplicity.singleton # singleton or list
VarParsing.VarParsing.varType.bool # string, int, or float
"are we running the unit test?")<line_sep>options.register('inputFile' "BeamFitResults_Run306171.txt" # default value
VarParsing.VarParsing.multiplicity.singleton # singleton or list
VarParsing.VarParsing.varType.string # string, int, or float
"location of the input data")<line_sep>options.register('inputTag' "myTagName" # default value
VarParsing.VarParsing.multiplicity.singleton # singleton or list
VarParsing.VarParsing.varType.string # string, int, or float
"output tag name")<line_sep>options.register('inputRecord' "BeamSpotOnlineLegacyObjectsRcd" # default value
VarParsing.VarParsing.multiplicity.singleton # singleton or list
VarParsing.VarParsing.varType.string # string, int, or float
"type of record")<line_sep>options.register('startRun' 306171 # default value
VarParsing.VarParsing.multiplicity.singleton # singleton or list
VarParsing.VarParsing.varType.int # string, int, or float
"location of the input data")<line_sep>options.register('startLumi' 497 # default value
VarParsing.VarParsing.multiplicity.singleton # singleton or list
VarParsing.VarParsing.varType.int # string, int, or float
"IOV Start Lumi")<line_sep>options.parseArguments()<line_sep>process.load("FWCore.MessageLogger.MessageLogger_cfi")<import_from_stmt>CondCore.CondDB.CondDB_cfi *<if_stmt>options.unitTest<block_start><if_stmt>options.inputRecord<eq>"BeamSpotOnlineLegacyObjectsRcd"<block_start>tag_name='BSLegacy_tag'<block_end><else_stmt><block_start>tag_name='BSHLT_tag'<block_end><block_end><else_stmt><block_start>tag_name=options.inputTag<block_end>#################################
# Produce a SQLITE FILE
#################################
CondDBBeamSpotObjects=CondDB.clone(connect=cms.string('sqlite_file:test_%s.db'%tag_name))# choose an output name
process.PoolDBOutputService=cms.Service("PoolDBOutputService" CondDBBeamSpotObjects timetype=cms.untracked.string('lumiid') #('lumiid'), #('runnumber')
toPut=cms.VPSet(cms.PSet(record=cms.string(options.inputRecord) # BeamSpotOnline record
tag=cms.string(tag_name))) # choose your favourite tag
loadBlobStreamer=cms.untracked.bool(<false>))<line_sep>process.source=cms.Source("EmptySource")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.beamspotonlinewriter=cms.EDAnalyzer("BeamSpotOnlineRecordsWriter" isHLT=cms.bool((options.inputRecord<eq>"BeamSpotOnlineHLTObjectsRcd")) InputFileName=cms.untracked.string(options.inputFile) # choose your input file
)<if_stmt>(options.startRun<g>0<and>options.startLumi<g>0)<block_start>process.beamspotonlinewriter.IOVStartRun=cms.untracked.uint32(options.startRun)# Customize your Run
process.beamspotonlinewriter.IOVStartLumi=cms.untracked.uint32(options.startLumi)<block_end># Customize your Lumi
process.p=cms.Path(process.beamspotonlinewriter)<line_sep> |
<import_stmt>unittest<import_from_stmt>jmilkfansblog.controllers admin<import_from_stmt>jmilkfansblog.controllers rest_api<import_from_stmt>jmilkfansblog create_app<import_from_stmt>jmilkfansblog.models db<class_stmt>TestURLs(unittest.TestCase)<block_start>"""Unit test for route functions."""<def_stmt>setUp self# Destroy the Flask-Admin and Flask-Result object after delete app
# object
<block_start>admin._views=[]<line_sep>rest_api.resource=[]<line_sep>app=create_app('jmilkfansblog.config.TestConfig')<line_sep>self.client=app.test_client()<line_sep># Using Test app for db
db.app=app<line_sep>db.create_all()<block_end><def_stmt>tearDown self<block_start>db.session.remove()<line_sep>db.drop_all()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# export SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN=<KEY>
# python3 integration_tests/samples/issues/issue_522.py
<import_stmt>asyncio<import_stmt>logging<import_stmt>os<import_from_stmt>slack_sdk.rtm RTMClient<line_sep>logging.basicConfig(level=logging.DEBUG)<line_sep>LOGGER=logging.getLogger(__name__)<line_sep>token=os.environ["SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN"]<async_keyword><def_stmt>sleepy_count name sleep_for<block_start><for_stmt>i range(10)<block_start><await>asyncio.sleep(sleep_for)<line_sep>LOGGER.debug(f"{name} - slept {i+1} times.")<block_end><block_end><async_keyword><def_stmt>slack_client_and_sleeps # real-time-messaging Slack client
<block_start>client=RTMClient(token=token run_async=<true>)<line_sep>sleepy_count_task=asyncio.create_task(sleepy_count("first counter" 1))<line_sep>sleepy_count_task2=asyncio.create_task(sleepy_count("second counter" 3))<line_sep><await>asyncio.gather(client.start() sleepy_count_task sleepy_count_task2)<block_end><async_keyword><def_stmt>slack_client # real-time-messaging Slack client
<block_start>client=RTMClient(token=token run_async=<true>)<line_sep><await>asyncio.gather(client.start())<block_end><async_keyword><def_stmt>sleeps <block_start>sleepy_count_task=asyncio.create_task(sleepy_count("first counter" 1))<line_sep>sleepy_count_task2=asyncio.create_task(sleepy_count("second counter" 3))<line_sep><await>asyncio.gather(sleepy_count_task sleepy_count_task2)<block_end><if_stmt>__name__<eq>"__main__"<block_start>LOGGER.info(f"Try: kill -2 {os.getpid()} or ctrl+c")<if_stmt>len(sys.argv)<g>1<block_start>option=sys.argv[1]<if_stmt>option<eq>"1"# sigint closes program correctly
<block_start>asyncio.run(slack_client())<block_end><elif_stmt>option<eq>"2"# sigint closes program correctly
<block_start>asyncio.run(sleeps())<block_end><elif_stmt>option<eq>"3"# sigint doesn't actually close properly
<block_start>asyncio.run(slack_client_and_sleeps())<block_end><block_end><else_stmt># sigint doesn't actually close properly
<block_start>asyncio.run(slack_client_and_sleeps())<block_end><block_end> |
"""
pending_imports
"""<import_from_future_stmt> absolute_import division print_function unicode_literals<import_from_stmt>PySide QtGui<import_stmt>logging<line_sep>log=logging.getLogger(__name__)<class_stmt>PendingImportsWidget(QtGui.QWidget)<block_start><def_stmt>__init__ self<block_start>super(PendingImportsWidget self).__init__()<line_sep>self.importsListWidget=QtGui.QListView()<line_sep>self.importsListModel=QtGui.QStandardItemModel()<line_sep>self.importsListWidget.setModel(self.importsListModel)<line_sep>self.importsListWidget.clicked.connect(self.listClicked)<line_sep>self.importsListWidget.doubleClicked.connect(self.listDoubleClicked)<block_end><block_end> |
<import_from_stmt>typing Any Callable Dict List Optional Tuple Union Iterable <import_from_stmt>prompt_toolkit.completion CompleteEvent Completer Completion<import_from_stmt>prompt_toolkit.document Document<import_from_stmt>prompt_toolkit.formatted_text HTML<import_from_stmt>prompt_toolkit.shortcuts.prompt PromptSession CompleteStyle<import_from_stmt>prompt_toolkit.styles Style merge_styles<import_from_stmt>prompt_toolkit.lexers SimpleLexer<import_from_stmt>questionary.constants DEFAULT_QUESTION_PREFIX DEFAULT_STYLE<import_from_stmt>questionary.prompts.common build_validator<import_from_stmt>questionary.question Question<class_stmt>WordCompleter(Completer)<block_start>choices_source:Union[List[str] Callable[[] List[str]]]<line_sep>ignore_case:bool<line_sep>meta_information:Dict[str Any]<line_sep>match_middle:bool<def_stmt>__init__ self choices:Union[List[str] Callable[[] List[str]]] ignore_case:bool=<true> meta_information:Optional[Dict[str Any]]=<none> match_middle:bool=<true> <arrow><none><block_start>self.choices_source=choices<line_sep>self.ignore_case=ignore_case<line_sep>self.meta_information=meta_information<or>{}<line_sep>self.match_middle=match_middle<block_end><def_stmt>_choices self<arrow>Iterable[str]<block_start><return>(self.choices_source()<if>callable(self.choices_source)<else>self.choices_source)<block_end><def_stmt>_choice_matches self word_before_cursor:str choice:str<arrow>int<block_start>"""Match index if found, -1 if not. """<if_stmt>self.ignore_case<block_start>choice=choice.lower()<block_end><if_stmt>self.match_middle<block_start><return>choice.find(word_before_cursor)<block_end><elif_stmt>choice.startswith(word_before_cursor)<block_start><return>0<block_end><else_stmt><block_start><return>-1<block_end><block_end>@staticmethod<def_stmt>_display_for_choice choice:str index:int word_before_cursor:str<arrow>HTML<block_start><return>HTML("{}<b><u>{}</u></b>{}").format(choice[:index] choice[index:index+len(word_before_cursor)] choice[index+len(word_before_cursor):len(choice)] )<block_end><def_stmt>get_completions self document:Document complete_event:CompleteEvent<arrow>Iterable[Completion]<block_start>choices=self._choices()<line_sep># Get word/text before cursor.
word_before_cursor=document.text_before_cursor<if_stmt>self.ignore_case<block_start>word_before_cursor=word_before_cursor.lower()<block_end><for_stmt>choice choices<block_start>index=self._choice_matches(word_before_cursor choice)<if_stmt>index<eq>-1# didn't find a match
<block_start><continue><block_end>display_meta=self.meta_information.get(choice "")<line_sep>display=self._display_for_choice(choice index word_before_cursor)<line_sep><yield>Completion(choice start_position=-len(choice) display=display.formatted_text display_meta=display_meta style="class:answer" selected_style="class:selected" )<block_end><block_end><block_end><def_stmt>autocomplete message:str choices:List[str] default:str="" qmark:str=DEFAULT_QUESTION_PREFIX completer:Optional[Completer]=<none> meta_information:Optional[Dict[str Any]]=<none> ignore_case:bool=<true> match_middle:bool=<true> complete_style:CompleteStyle=CompleteStyle.COLUMN validate:Any=<none> style:Optional[Style]=<none> **kwargs:Any <arrow>Question<block_start>"""Prompt the user to enter a message with autocomplete help.
Example:
>>> import questionary
>>> questionary.autocomplete(
... 'Choose ant specie',
... choices=[
... 'Camponotus pennsylvanicus',
... 'Linepithema humile',
... 'Eciton burchellii',
... "Atta colombica",
... 'Polyergus lucidus',
... 'Polyergus rufescens',
... ]).ask()
? Choose ant specie Atta colombica
'Atta colombica'
.. image:: ../images/autocomplete.gif
This is just a really basic example, the prompt can be customised using the
parameters.
Args:
message: Question text
choices: Items shown in the selection, this contains items as strings
default: Default return value (single value).
qmark: Question prefix displayed in front of the question.
By default this is a ``?``
completer: A prompt_toolkit :class:`prompt_toolkit.completion.Completion`
implementation. If not set, a questionary completer implementation
will be used.
meta_information: A dictionary with information/anything about choices.
ignore_case: If true autocomplete would ignore case.
match_middle: If true autocomplete would search in every string position
not only in string begin.
complete_style: How autocomplete menu would be shown, it could be ``COLUMN``
``MULTI_COLUMN`` or ``READLINE_LIKE`` from
:class:`prompt_toolkit.shortcuts.CompleteStyle`.
validate: Require the entered value to pass a validation. The
value can not be submitted until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
:class:`Question`: Question instance, ready to be prompted (using ``.ask()``).
"""<line_sep>merged_style=merge_styles([DEFAULT_STYLE style])<def_stmt>get_prompt_tokens <arrow>List[Tuple[str str]]<block_start><return>[("class:qmark" qmark) ("class:question" " {} ".format(message))]<block_end><def_stmt>get_meta_style meta:Optional[Dict[str Any]]<arrow>Optional[Dict[str Any]]<block_start><if_stmt>meta<block_start><for_stmt>key meta<block_start>meta[key]=HTML("<text>{}</text>").format(meta[key])<block_end><block_end><return>meta<block_end>validator=build_validator(validate)<if_stmt>completer<is><none><block_start><if_stmt><not>choices<block_start><raise>ValueError("No choices is given, you should use Text question.")<block_end># use the default completer
completer=WordCompleter(choices ignore_case=ignore_case meta_information=get_meta_style(meta_information) match_middle=match_middle )<block_end>p=PromptSession(get_prompt_tokens lexer=SimpleLexer("class:answer") style=merged_style completer=completer validator=validator complete_style=complete_style **kwargs )<line_sep>p.default_buffer.reset(Document(default))<line_sep><return>Question(p.app)<block_end> |
<import_stmt>time<import_from_stmt>pubnub.models.consumer.history PNFetchMessagesResult<import_from_stmt>pubnub.models.consumer.pubsub PNPublishResult<import_from_stmt>pubnub.pubnub PubNub<import_from_stmt>tests.helper pnconf_copy<import_from_stmt>tests.integrational.vcr_helper use_cassette_and_stub_time_sleep_native<line_sep>COUNT=120<class_stmt>TestFetchMessages<block_start>@use_cassette_and_stub_time_sleep_native('tests/integrational/fixtures/native_sync/fetch_messages/max_100_single.yaml' filter_query_parameters=['uuid' 'pnsdk' 'l_pub'])<def_stmt>test_fetch_messages_return_max_100_for_single_channel self<block_start>ch="fetch-messages-ch-1"<line_sep>pubnub=PubNub(pnconf_copy())<line_sep>pubnub.config.uuid="fetch-messages-uuid"<for_stmt>i range(COUNT)<block_start>envelope=pubnub.publish().channel(ch).message("hey-%s"%i).sync()<assert_stmt>isinstance(envelope.result PNPublishResult)<assert_stmt>envelope.result.timetoken<g>0<block_end><while_stmt><true><block_start>time.sleep(1)<if_stmt>len(pubnub.history().channel(ch).count(COUNT).sync().result.messages)<ge>100<block_start><break><block_end><block_end>envelope=pubnub.fetch_messages().channels(ch).sync()<assert_stmt>envelope<is><not><none><assert_stmt>isinstance(envelope.result PNFetchMessagesResult)<assert_stmt>len(envelope.result.channels[ch])<eq>100<block_end>@use_cassette_and_stub_time_sleep_native('tests/integrational/fixtures/native_sync/fetch_messages/max_25_multiple.yaml' filter_query_parameters=['uuid' 'pnsdk' 'l_pub'])<def_stmt>test_fetch_messages_return_max_25_for_multiple_channels self<block_start>ch1="fetch-messages-ch-1"<line_sep>ch2="fetch-messages-ch-2"<line_sep>pubnub=PubNub(pnconf_copy())<line_sep>pubnub.config.uuid="fetch-messages-uuid"<for_stmt>i range(COUNT)<block_start>envelope1=pubnub.publish().channel(ch1).message("hey-%s"%i).sync()<assert_stmt>isinstance(envelope1.result PNPublishResult)<assert_stmt>envelope1.result.timetoken<g>0<line_sep>envelope2=pubnub.publish().channel(ch2).message("hey-%s"%i).sync()<assert_stmt>isinstance(envelope2.result PNPublishResult)<assert_stmt>envelope2.result.timetoken<g>0<block_end><while_stmt><true><block_start>time.sleep(1)<if_stmt>len(pubnub.history().channel(ch1).count(COUNT).sync().result.messages)<ge>100<and>len(pubnub.history().channel(ch2).count(COUNT).sync().result.messages)<ge>100<block_start><break><block_end><block_end>envelope=pubnub.fetch_messages().channels([ch1 ch2]).sync()<assert_stmt>isinstance(envelope.result PNFetchMessagesResult)<assert_stmt>len(envelope.result.channels[ch1])<eq>25<assert_stmt>len(envelope.result.channels[ch2])<eq>25<block_end>@use_cassette_and_stub_time_sleep_native('tests/integrational/fixtures/native_sync/fetch_messages/max_25_with_actions.yaml' filter_query_parameters=['uuid' 'pnsdk' 'l_pub'])<def_stmt>test_fetch_messages_actions_return_max_25 self<block_start>ch="fetch-messages-actions-ch-1"<line_sep>pubnub=PubNub(pnconf_copy())<line_sep>pubnub.config.uuid="fetch-messages-uuid"<for_stmt>i range(COUNT)<block_start>envelope=pubnub.publish().channel(ch).message("hey-%s"%i).sync()<assert_stmt>isinstance(envelope.result PNPublishResult)<assert_stmt>envelope.result.timetoken<g>0<block_end><while_stmt><true><block_start>time.sleep(1)<if_stmt>len(pubnub.history().channel(ch).count(COUNT).sync().result.messages)<ge>100<block_start><break><block_end><block_end>envelope=pubnub.fetch_messages().channels(ch).include_message_actions(<true>).sync()<assert_stmt>envelope<is><not><none><assert_stmt>isinstance(envelope.result PNFetchMessagesResult)<assert_stmt>len(envelope.result.channels[ch])<eq>25<block_end><block_end> |
<import_from_stmt>rockstar RockStar<line_sep>ocaml_code='print_string "Hello world!\n";;'<line_sep>rock_it_bro=RockStar(days=400 file_name='hello.ml' code=ocaml_code)<line_sep>rock_it_bro.make_me_a_rockstar()<line_sep> |
<import_stmt>json<def_stmt>api_position db cursor temp principal5 principal30 principal60 principal300 principal900 principal1800 coin_number5 coin_number30 coin_number60 coin_number300 coin_number900 coin_number1800 judge_position sell_amount buy_amount current_price<block_start>all_buyamount=0<line_sep>all_sellamount=0<line_sep>trade_amonut={}<line_sep>flee=0.0025<for_stmt>i temp<block_start><if_stmt>(i<eq>'5')<block_start>trade_amonut['5']=position(coin_number5 principal5 buy_amount sell_amount flee judge_position temp[i] current_price)<if_stmt>(trade_amonut['5']['action']<eq>'buy')<block_start>principal5=trade_amonut['5']['value']['principal']<line_sep>coin_number5=trade_amonut['5']['value']['coin_number']<line_sep>all_buyamount<augadd>trade_amonut['5']['value']['buy_amount']<block_end><if_stmt>(trade_amonut['5']['action']<eq>'sell')<block_start>principal5=trade_amonut['5']['value']['principal']<line_sep>coin_number5=trade_amonut['5']['value']['coin_number']<line_sep>all_sellamount<augadd>trade_amonut['5']['value']['sell_amount']<block_end><block_end><if_stmt>(i<eq>'30')<block_start>trade_amonut['30']=position(coin_number30 principal30 buy_amount sell_amount flee judge_position temp[i] current_price)<if_stmt>(trade_amonut['30']['action']<eq>'buy')<block_start>principal30=trade_amonut['30']['value']['principal']<line_sep>coin_number30=trade_amonut['30']['value']['coin_number']<line_sep>all_buyamount<augadd>trade_amonut['30']['value']['buy_amount']<block_end><if_stmt>(trade_amonut['30']['action']<eq>'sell')<block_start>principal30=trade_amonut['30']['value']['principal']<line_sep>coin_number30=trade_amonut['30']['value']['coin_number']<line_sep>all_sellamount<augadd>trade_amonut['30']['value']['sell_amount']<block_end><block_end><if_stmt>(i<eq>'60')<block_start>trade_amonut['60']=position(coin_number60 principal60 buy_amount sell_amount flee judge_position temp[i] current_price)<if_stmt>(trade_amonut['60']['action']<eq>'buy')<block_start>principal60=trade_amonut['60']['value']['principal']<line_sep>coin_number60=trade_amonut['60']['value']['coin_number']<line_sep>all_buyamount<augadd>trade_amonut['60']['value']['buy_amount']<block_end><if_stmt>(trade_amonut['60']['action']<eq>'sell')<block_start>principal60=trade_amonut['60']['value']['principal']<line_sep>coin_number60=trade_amonut['60']['value']['coin_number']<line_sep>all_sellamount<augadd>trade_amonut['60']['value']['sell_amount']<block_end><block_end><if_stmt>(i<eq>'300')<block_start>trade_amonut['300']=position(coin_number300 principal300 buy_amount sell_amount flee judge_position temp[i] current_price)<if_stmt>(trade_amonut['300']['action']<eq>'buy')<block_start>principal300=trade_amonut['300']['value']['principal']<line_sep>coin_number300=trade_amonut['300']['value']['coin_number']<line_sep>all_buyamount<augadd>trade_amonut['300']['value']['buy_amount']<block_end><if_stmt>(trade_amonut['300']['action']<eq>'sell')<block_start>principal300=trade_amonut['300']['value']['principal']<line_sep>coin_number300=trade_amonut['300']['value']['coin_number']<line_sep>all_sellamount<augadd>trade_amonut['300']['value']['sell_amount']<block_end><block_end><if_stmt>(i<eq>'900')<block_start>trade_amonut['900']=position(coin_number900 principal900 buy_amount sell_amount flee judge_position temp[i] current_price)<if_stmt>(trade_amonut['900']['action']<eq>'buy')<block_start>principal900=trade_amonut['900']['value']['principal']<line_sep>coin_number900=trade_amonut['900']['value']['coin_number']<line_sep>all_buyamount<augadd>trade_amonut['900']['value']['buy_amount']<block_end><if_stmt>(trade_amonut['900']['action']<eq>'sell')<block_start>principal900=trade_amonut['900']['value']['principal']<line_sep>coin_number900=trade_amonut['900']['value']['coin_number']<line_sep>all_sellamount<augadd>trade_amonut['900']['value']['sell_amount']<block_end><block_end><if_stmt>(i<eq>'1800')<block_start>trade_amonut['1800']=position(coin_number1800 principal1800 buy_amount sell_amount flee judge_position temp[i] current_price)<if_stmt>(trade_amonut['1800']['action']<eq>'buy')<block_start>principal1800=trade_amonut['1800']['value']['principal']<line_sep>coin_number1800=trade_amonut['1800']['value']['coin_number']<line_sep>all_buyamount<augadd>trade_amonut['1800']['value']['buy_amount']<block_end><if_stmt>(trade_amonut['1800']['action']<eq>'sell')<block_start>principal1800=trade_amonut['1800']['value']['principal']<line_sep>coin_number1800=trade_amonut['1800']['value']['coin_number']<line_sep>all_sellamount<augadd>trade_amonut['1800']['value']['sell_amount']<block_end><block_end><block_end><if_stmt>(all_buyamount<g>all_sellamount)<block_start>uid=exec('buy' all_buyamount-all_sellamount)<line_sep>sql="INSERT INTO order_table(uid , valuess , timess) VALUES ('%s', '%s', '%s')"%(str(uid) json.dumps({'principal5':principal5 'coin_number5':coin_number5 'principal30':principal30 'coin_number30':coin_number30 'principal60':principal60 'coin_number60':coin_number60 'principal300':principal300 'coin_number300':coin_number300 'principal900':principal900 'coin_number900':coin_number900 'principal1800':principal1800 'coin_number1800':coin_number1800 'result':trade_amonut 'current_price':current_price}) 0)<line_sep>cursor.execute(sql)<line_sep>db.commit()<block_end><if_stmt>(all_sellamount<g>all_buyamount)<block_start>uid=exec('sell' all_sellamount-all_buyamount)<line_sep>sql="INSERT INTO order_table(uid , valuess , timess) VALUES ('%s', '%s', '%s')"%(str(uid) json.dumps({'principal5':principal5 'coin_number5':coin_number5 'principal30':principal30 'coin_number30':coin_number30 'principal60':principal60 'coin_number60':coin_number60 'principal300':principal300 'coin_number300':coin_number300 'principal900':principal900 'coin_number900':coin_number900 'principal1800':principal1800 'coin_number1800':coin_number1800 'result':trade_amonut 'current_price':current_price}) 0)<line_sep>cursor.execute(sql)<line_sep>db.commit()<block_end><return>{'principal5':principal5 'coin_number5':coin_number5 'principal30':principal30 'coin_number30':coin_number30 'principal60':principal60 'coin_number60':coin_number60 'principal300':principal300 'coin_number300':coin_number300 'principal900':principal900 'coin_number900':coin_number900 'principal1800':principal1800 'coin_number1800':coin_number1800}<block_end><def_stmt>position coin_number principal buy_amount sell_amount flee judge_position index current_price<block_start>sposition=(coin_number<times>current_price)/(principal+(coin_number<times>current_price))<if_stmt>((index['buy_index']<g>index['sell_index'])<and>(judge_position<g>sposition))<block_start>buy_amount2=(index['buy_index']/(index['buy_index']+index['sell_index']))<times>buy_amount<if_stmt>(buy_amount2<l>principal)<block_start>coin_number=((buy_amount2-buy_amount2<times>flee)/current_price)+coin_number<line_sep>principal=principal-buy_amount2<block_end><else_stmt><block_start>buy_amount2=principal<line_sep>coin_number=((principal-principal<times>flee)/current_price)+coin_number<line_sep>principal=0<block_end><return>{'action':'buy' 'value':{'buy_amount':buy_amount2 'principal':principal 'coin_number':coin_number}}<block_end><if_stmt>(index['buy_index']<l>index['sell_index']<and>(sposition<g>0))<block_start>sell_amount2=(index['sell_index']/(index['buy_index']+index['sell_index']))<times>sell_amount<if_stmt>((sell_amount2/current_price)<l>coin_number)<block_start>coin_number=coin_number-(sell_amount2/current_price)<line_sep>principal=principal+(sell_amount2-sell_amount2<times>flee)<block_end><else_stmt><block_start>sell_amount2=coin_number<times>current_price<line_sep>principal=principal+(coin_number-coin_number<times>flee)<times>current_price<line_sep>coin_number=0<block_end><return>{'action':'sell' 'value':{'sell_amount':sell_amount2 'principal':principal 'coin_number':coin_number}}<block_end><return>{'action':'none'}<block_end><def_stmt>exec action buy_amount<block_start><return>23231321<block_end> |
# new cookies.py
<import_from_stmt>cookies Cookies Cookie<line_sep>cookies=Cookies(rocky='road')<line_sep># Can also write explicitly: cookies['rocky'] = Cookie['road']
cookies['rocky'].path="/cookie"<assert_stmt>cookies.render_request()<eq>'rocky=road'<line_sep> |
"""Unsupervised Clustering Algorithms"""<import_from_stmt>.k_means KMeans# noqa
<import_from_stmt>.spectral SpectralClustering# noqa
|
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: JingyiXie, RainbowSecret
## Microsoft Research
## <EMAIL>
## Copyright (c) 2019
##
## Code adapted from:
## https://github.com/nv-tlabs/GSCNN/blob/master/utils/f_boundary.py
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>pdb<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>multiprocessing.pool Pool<class_stmt>F1RunningScore(object)<block_start><def_stmt>__init__ self configer=<none> num_classes=<none> boundary_threshold=0.00088 num_proc=15<block_start><assert_stmt>configer<is><not><none><or>num_classes<is><not><none><line_sep>self.configer=configer<if_stmt>configer<is><not><none><block_start>self.n_classes=self.configer.get('data' 'num_classes')<block_end><else_stmt><block_start>self.n_classes=num_classes<block_end>self.ignore_index=-1<line_sep>self.boundary_threshold=boundary_threshold<line_sep>self.pool=Pool(processes=num_proc)<line_sep>self.num_proc=num_proc<line_sep>self._Fpc=0<line_sep>self._Fc=0<line_sep>self.seg_map_cache=[]<line_sep>self.gt_map_cache=[]<block_end><def_stmt>_update_cache self seg_map gt_map<block_start>"""
Append inputs to `seg_map_cache` and `gt_map_cache`.
Returns whether the length reached our pool size.
"""<line_sep>self.seg_map_cache.extend(seg_map)<line_sep>self.gt_map_cache.extend(gt_map)<line_sep><return>len(self.gt_map_cache)<ge>self.num_proc<block_end><def_stmt>_get_from_cache self<block_start>n=self.num_proc<line_sep>seg_map,self.seg_map_cache=self.seg_map_cache[:n] self.seg_map_cache[n:]<line_sep>gt_map,self.gt_map_cache=self.gt_map_cache[:n] self.gt_map_cache[n:]<line_sep><return>seg_map gt_map<block_end><def_stmt>update self seg_map gt_map<block_start><if_stmt>self._update_cache(seg_map gt_map)<block_start>seg_map,gt_map=self._get_from_cache()<line_sep>self._update_scores(seg_map gt_map)<block_end><else_stmt><block_start><return><block_end><block_end><def_stmt>_update_scores self seg_map gt_map<block_start>batch_size=len(seg_map)<if_stmt>batch_size<eq>0<block_start><return><block_end>Fpc=np.zeros(self.n_classes)<line_sep>Fc=np.zeros(self.n_classes)<for_stmt>class_id range(self.n_classes)<block_start>args=[]<for_stmt>i range(batch_size)<block_start><if_stmt>seg_map[i].shape[0]<eq>self.n_classes<block_start>pred_i=seg_map[i][class_id]<g>0.5<line_sep>pred_is_boundary=<true><block_end><else_stmt><block_start>pred_i=seg_map[i]<eq>class_id<line_sep>pred_is_boundary=<false><block_end>args.append([(pred_i).astype(np.uint8) (gt_map[i]<eq>class_id).astype(np.uint8) (gt_map[i]<eq>-1) self.boundary_threshold class_id pred_is_boundary])<block_end>results=self.pool.map(db_eval_boundary args)<line_sep>results=np.array(results)<line_sep>Fs=results[: 0]<line_sep>_valid=~np.isnan(Fs)<line_sep>Fc[class_id]=np.sum(_valid)<line_sep>Fs[np.isnan(Fs)]=0<line_sep>Fpc[class_id]=sum(Fs)<block_end>self._Fc=self._Fc+Fc<line_sep>self._Fpc=self._Fpc+Fpc<block_end><def_stmt>get_scores self<block_start><if_stmt>self.seg_map_cache<is><none><block_start><return>0 0<block_end>self._update_scores(self.seg_map_cache self.gt_map_cache)<line_sep>F_score=np.sum(self._Fpc/self._Fc)/self.n_classes<line_sep>F_score_classwise=self._Fpc/self._Fc<line_sep><return>F_score F_score_classwise<block_end><def_stmt>reset self<block_start>self._Fpc=self._Fc=0<block_end><block_end><def_stmt>db_eval_boundary args<block_start>"""
Compute mean,recall and decay from per-frame evaluation.
Calculates precision/recall for boundaries between foreground_mask and
gt_mask using morphological operators to speed it up.
Arguments:
foreground_mask (ndarray): binary segmentation image.
gt_mask (ndarray): binary annotated image.
Returns:
F (float): boundaries F-measure
P (float): boundaries precision
R (float): boundaries recall
"""<line_sep>foreground_mask,gt_mask,ignore_mask,bound_th,class_id,pred_is_boundary=args<assert_stmt>np.atleast_3d(foreground_mask).shape[2]<eq>1<line_sep>bound_pix=bound_th<if>bound_th<ge>1<else>np.ceil(bound_th<times>np.linalg.norm(foreground_mask.shape))<line_sep># print(bound_pix)
# print(gt.shape)
# print(np.unique(gt))
foreground_mask[ignore_mask]=0<line_sep>gt_mask[ignore_mask]=0<line_sep># Get the pixel boundaries of both masks
<if_stmt>pred_is_boundary<block_start>fg_boundary=foreground_mask<block_end><else_stmt><block_start>fg_boundary=seg2bmap(foreground_mask)<block_end>gt_boundary=seg2bmap(gt_mask)<import_from_stmt>skimage.morphology disk<import_from_stmt>cv2 dilate<def_stmt>binary_dilation x d<block_start><return>dilate(x.astype(np.uint8) d).astype(np.bool)<block_end>fg_dil=binary_dilation(fg_boundary disk(bound_pix))<line_sep>gt_dil=binary_dilation(gt_boundary disk(bound_pix))<line_sep># Get the intersection
gt_match=gt_boundary<times>fg_dil<line_sep>fg_match=fg_boundary<times>gt_dil<line_sep># Area of the intersection
n_fg=np.sum(fg_boundary)<line_sep>n_gt=np.sum(gt_boundary)<line_sep># % Compute precision and recall
<if_stmt>n_fg<eq>0<and>n_gt<g>0<block_start>precision=1<line_sep>recall=0<block_end><elif_stmt>n_fg<g>0<and>n_gt<eq>0<block_start>precision=0<line_sep>recall=1<block_end><elif_stmt>n_fg<eq>0<and>n_gt<eq>0<block_start>precision=1<line_sep>recall=1<block_end><else_stmt><block_start>precision=np.sum(fg_match)/float(n_fg)<line_sep>recall=np.sum(gt_match)/float(n_gt)<block_end># Compute F measure
<if_stmt>precision+recall<eq>0<block_start>F=0<block_end><else_stmt><block_start>F=2<times>precision<times>recall/(precision+recall)<block_end><return>F precision<block_end><def_stmt>seg2bmap seg width=<none> height=<none><block_start>"""
From a segmentation, compute a binary boundary map with 1 pixel wide
boundaries. The boundary pixels are offset by 1/2 pixel towards the
origin from the actual segment boundary.
Arguments:
seg : Segments labeled from 1..k.
width : Width of desired bmap <= seg.shape[1]
height : Height of desired bmap <= seg.shape[0]
Returns:
bmap (ndarray): Binary boundary map.
<NAME> <<EMAIL>>
January 2003
"""<line_sep>seg=seg.astype(np.bool)<line_sep>seg[seg<g>0]=1<assert_stmt>np.atleast_3d(seg).shape[2]<eq>1<line_sep>width=seg.shape[1]<if>width<is><none><else>width<line_sep>height=seg.shape[0]<if>height<is><none><else>height<line_sep>h,w=seg.shape[:2]<line_sep>ar1=float(width)/float(height)<line_sep>ar2=float(w)/float(h)<assert_stmt><not>(width<g>w|height<g>h|abs(ar1-ar2)<g>0.01) 'Can'<concat>'t convert %dx%d seg to %dx%d bmap.'%(w h width height)<line_sep>e=np.zeros_like(seg)<line_sep>s=np.zeros_like(seg)<line_sep>se=np.zeros_like(seg)<line_sep>e[: :-1]=seg[: 1:]<line_sep>s[:-1 :]=seg[1: :]<line_sep>se[:-1 :-1]=seg[1: 1:]<line_sep>b=seg^e|seg^s|seg^se<line_sep>b[-1 :]=seg[-1 :]^e[-1 :]<line_sep>b[: -1]=seg[: -1]^s[: -1]<line_sep>b[-1 -1]=0<if_stmt>w<eq>width<and>h<eq>height<block_start>bmap=b<block_end><else_stmt><block_start>bmap=np.zeros((height width))<for_stmt>x range(w)<block_start><for_stmt>y range(h)<block_start><if_stmt>b[y x]<block_start>j=1+floor((y-1)+height/h)<line_sep>i=1+floor((x-1)+width/h)<line_sep>bmap[j i]=1<block_end><block_end><block_end><block_end><return>bmap<block_end> |
# -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/4/1 10:35
# @author :Mo
# @function :cut sentences
<import_from_stmt>conf.path_config chicken_and_gossip_path td_idf_cut_path td_idf_cut_pinyin<import_from_stmt>utils.text_tools txtWrite txtRead get_syboml strQ2B<import_from_stmt>conf.path_config projectdir<import_from_stmt>gensim corpora models<import_stmt>xpinyin<import_stmt>pickle<import_stmt>jieba<def_stmt>cut_td_idf sources_path target_path<block_start>"""
结巴切词,汉语
:param path:
:return:
"""<line_sep>print("cut_td_idf start! ")<line_sep>corpus=txtRead(sources_path)<line_sep>governments=[]<for_stmt>corpus_one corpus<block_start>corpus_one_clear=corpus_one.replace(' ' '').strip()<line_sep>ques_q2b=strQ2B(corpus_one_clear.strip())<line_sep>ques_q2b_syboml=get_syboml(ques_q2b)<line_sep>governments.append(ques_q2b_syboml.strip())<block_end>government_ques=list(map(<lambda>x:' '.join(jieba.lcut(x)) governments))<line_sep>topic_ques_all=[]<for_stmt>topic_ques_one government_ques<block_start>top_ques_aqlq=topic_ques_one.replace(' ' ' ').replace(' ' ' ').strip()+'\n'<line_sep>topic_ques_all.append(top_ques_aqlq)<block_end>txtWrite(topic_ques_all target_path)<line_sep>print("cut_td_idf ok! "+sources_path)<block_end><def_stmt>cut_td_idf_pinyin sources_path target_path#获取拼音
<block_start>"""
汉语转拼音
:param path:
:return:
"""<line_sep>pin=xpinyin.Pinyin()<line_sep>corpus=txtRead(sources_path)<line_sep>topic_ques_all=[]<line_sep>corpus_count=0<for_stmt>corpus_one corpus<block_start>corpus_count<augadd>1<line_sep># time1 = time.time()
corpus_one_clear=corpus_one.replace(' ' '').strip()<line_sep>ques_q2b=strQ2B(corpus_one_clear.strip())<line_sep>ques_q2b_syboml=get_syboml(ques_q2b)<line_sep>ques_q2b_syboml_pinying=pin.get_pinyin(ques_q2b_syboml.replace(' ' '').replace(' ' '').strip() ' ')<line_sep>topic_ques_all.append(ques_q2b_syboml_pinying+'\n')<line_sep># time2 = time.time()
# print(str(corpus_count) + 'time:' + str(time2 - time1))
<block_end>txtWrite(topic_ques_all target_path)<line_sep>print("cut_td_idf_pinyin ok! "+sources_path)<block_end><def_stmt>init_tfidf_chinese_or_pinyin sources_path<block_start>"""
构建td_idf
:param path:
:return:
"""<line_sep>questions=txtRead(sources_path)<line_sep>corpora_documents=[]<for_stmt>item_text questions<block_start>item_seg=list(jieba.cut(str(item_text).strip()))<line_sep>corpora_documents.append(item_seg)<block_end>dictionary=corpora.Dictionary(corpora_documents)<line_sep>corpus=[dictionary.doc2bow(text)<for>text corpora_documents]<line_sep>tfidf_model=models.TfidfModel(corpus)<line_sep>print("init_tfidf_chinese_or_pinyin ok! "+sources_path)<line_sep>file=open(sources_path.replace(".csv" "_dictionary_model.pkl") 'wb')<line_sep>pickle.dump([dictionary tfidf_model] file)<block_end><if_stmt>__name__<eq>'__main__'# path_text = projectdir + '/Data/chicken_gossip.txt'
# sentences = txtRead(path_text)
# sentences_q = []
# for sentences_one in sentences:
# sentences_one_replace = sentences_one.replace(" ", "").replace("\t", "")
# sentences_one_replace_split = sentences_one_replace.split("|")
# sentence_new = sentences_one_replace_split[0] + "\t" + "".join(sentences_one_replace_split[1:])
# sentences_q.append(sentence_new)
# sentences = txtWrite(sentences_q, projectdir + '/Data/chicken_and_gossip.txt')
<block_start>cut_td_idf(chicken_and_gossip_path td_idf_cut_path)<line_sep>cut_td_idf_pinyin(chicken_and_gossip_path td_idf_cut_pinyin)<line_sep>init_tfidf_chinese_or_pinyin(td_idf_cut_path)<line_sep>init_tfidf_chinese_or_pinyin(td_idf_cut_pinyin)<line_sep>print("corpus ok!")<block_end> |
""" Test __str__ methods. """<import_stmt>pexpect<import_from_stmt>. PexpectTestCase<class_stmt>TestCaseMisc(PexpectTestCase.PexpectTestCase)<block_start><def_stmt>test_str_spawnu self<block_start>""" Exercise spawnu.__str__() """<line_sep># given,
p=pexpect.spawnu('cat')<line_sep># exercise,
value=str(p)<line_sep># verify
<assert_stmt>isinstance(value str)<block_end><def_stmt>test_str_spawn self<block_start>""" Exercise spawn.__str__() """<line_sep># given,
p=pexpect.spawn('cat')<line_sep># exercise,
value=str(p)<line_sep># verify
<assert_stmt>isinstance(value str)<block_end><def_stmt>test_str_before_spawn self<block_start>""" Exercise derived spawn.__str__() """<line_sep># given,
child=pexpect.spawn(<none> <none>)<line_sep>child.read_nonblocking=<lambda>size timeout:b''<try_stmt><block_start>child.expect('alpha' timeout=0.1)<block_end><except_stmt>pexpect.TIMEOUT<as>e<block_start>str(e)# Smoketest
<block_end><else_stmt><block_start><assert_stmt><false> 'TIMEOUT exception expected. No exception raised.'<block_end><block_end><block_end> |
<import_from_stmt>.dashboards Dashboard# NOQA
|
<import_from_stmt>dlib *<import_stmt>numpy<as>np<import_stmt>sys<line_sep>sys.path=['./superfast/build']+sys.path<import_stmt>superfast<line_sep># NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
<def_stmt>discard_all_but_largest_blob img<block_start>labels,num_blobs=label_connected_blobs(img connected_if_both_not_zero=<true>)<line_sep>h=get_histogram(labels num_blobs)<line_sep># ignore background blobs
h[0]=0<line_sep>largest_blob=np.argmax(h)<line_sep>superfast.zero_pixels_not_labeled_with_val(img labels largest_blob)<line_sep><return>img<block_end>#img = load_grayscale_image(sys.argv[1])
# discarding all but largest blob fixes this image
img=load_grayscale_image('./images/find_page/paper22.jpg')<line_sep># What about this image? Need to do something to fix it
#img = load_grayscale_image('./images/find_page/tissue_04.jpg')
ht=hough_transform(300)<line_sep>img=resize_image(img ht.size ht.size)<line_sep>win1=image_window(img)<line_sep>ig=image_gradients(10)<line_sep>x=ig.gradient_x(img)<line_sep>y=ig.gradient_y(img)<line_sep>edges=suppress_non_maximum_edges(x y)<line_sep>win3=image_window(edges)<line_sep>edges=discard_all_but_largest_blob(hysteresis_threshold(edges))# NEW!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
win4=image_window(edges)<line_sep>himg=ht(edges)<line_sep>hits=ht.find_strong_hough_points(himg hough_count_thresh=ht.size/5 angle_nms_thresh=15 radius_nms_thresh=10)<line_sep>lines=[ht.get_line(p)<for>p hits[0:4]]<line_sep>win1.add_overlay(lines)<line_sep>page=extract_image_4points(img lines 200 200)<line_sep>win_page=image_window(page)<line_sep>input("hit enter to exit")<line_sep> |
"""CFNgin init action."""<import_from_future_stmt> annotations<import_stmt>logging<import_from_stmt>typing TYPE_CHECKING Any Optional Union cast<import_from_stmt>...compat cached_property<import_from_stmt>...config.models.cfngin CfnginStackDefinitionModel<import_from_stmt>...core.providers.aws.s3 Bucket<import_from_stmt>..exceptions CfnginBucketAccessDenied<import_from_stmt>. deploy<import_from_stmt>.base BaseAction<if_stmt>TYPE_CHECKING<block_start><import_stmt>threading<import_from_stmt>..._logging RunwayLogger<import_from_stmt>...context CfnginContext<import_from_stmt>..providers.aws.default ProviderBuilder<block_end>LOGGER=cast("RunwayLogger" logging.getLogger(__name__))<class_stmt>Action(BaseAction)<block_start>"""Initialize environment."""<line_sep>NAME="init"<line_sep>DESCRIPTION="Initialize environment"<def_stmt>__init__ self context:CfnginContext provider_builder:Optional[ProviderBuilder]=<none> cancel:Optional[threading.Event]=<none> <block_start>"""Instantiate class.
This class creates a copy of the context object prior to initialization
as some of it can perform destructive actions on the context object.
Args:
context: The context for the current run.
provider_builder: An object that will build a provider that will be
interacted with in order to perform the necessary actions.
cancel: Cancel handler.
"""<line_sep>super().__init__(context=context.copy() provider_builder=provider_builder cancel=cancel)<block_end>@property<def_stmt>_stack_action self<arrow>Any<block_start>"""Run against a step."""<line_sep><return><none><block_end>@cached_property<def_stmt>cfngin_bucket self<arrow>Optional[Bucket]<block_start>"""CFNgin bucket.
Raises:
CfnginBucketRequired: cfngin_bucket not defined.
"""<if_stmt><not>self.context.bucket_name<block_start><return><none><block_end><return>Bucket(self.context name=self.context.bucket_name region=self.context.bucket_region )<block_end>@cached_property<def_stmt>default_cfngin_bucket_stack self<arrow>CfnginStackDefinitionModel<block_start>"""CFNgin bucket stack."""<line_sep><return>CfnginStackDefinitionModel(class_path="runway.cfngin.blueprints.cfngin_bucket.CfnginBucket" in_progress_behavior="wait" name="cfngin-bucket" termination_protection=<true> variables={"BucketName":self.context.bucket_name} )<block_end><def_stmt>run self * concurrency:int=0 dump:Union[bool str]=<false> # pylint: disable=unused-argument
force:bool=<false> # pylint: disable=unused-argument
outline:bool=<false> # pylint: disable=unused-argument
tail:bool=<false> upload_disabled:bool=<true> # pylint: disable=unused-argument
**_kwargs:Any <arrow><none><block_start>"""Run the action.
Args:
concurrency: The maximum number of concurrent deployments.
dump: Not used by this action
force: Not used by this action.
outline: Not used by this action.
tail: Tail the stack's events.
upload_disabled: Not used by this action.
Raises:
CfnginBucketAccessDenied: Could not head cfngin_bucket.
"""<if_stmt><not>self.cfngin_bucket<block_start>LOGGER.info("skipped; cfngin_bucket not defined")<line_sep><return><block_end><if_stmt>self.cfngin_bucket.forbidden<block_start><raise>CfnginBucketAccessDenied(bucket_name=self.cfngin_bucket.name)<block_end><if_stmt>self.cfngin_bucket.exists<block_start>LOGGER.info("cfngin_bucket %s already exists" self.cfngin_bucket.name)<line_sep><return><block_end><if_stmt>self.context.get_stack("cfngin-bucket")<block_start>LOGGER.verbose("found stack for creating cfngin_bucket: cfngin-bucket" )<line_sep>self.context.stack_names=["cfngin-bucket"]<block_end><else_stmt><block_start>LOGGER.notice("using default blueprint to create cfngin_bucket...")<line_sep>self.context.config.stacks=[self.default_cfngin_bucket_stack]<line_sep># clear cached values that were populated by checking the previous condition
self.context._del_cached_property(# pylint: disable=protected-access
"stacks" "stacks_dict")<block_end><if_stmt>self.provider_builder<block_start>self.provider_builder.region=self.context.bucket_region<block_end>deploy.Action(context=self.context provider_builder=self.provider_builder cancel=self.cancel ).run(concurrency=concurrency tail=tail upload_disabled=<true> )<line_sep><return><block_end><def_stmt>pre_run self * dump:Union[bool str]=<false> # pylint: disable=unused-argument
outline:bool=<false> # pylint: disable=unused-argument
**__kwargs:Any <arrow><none><block_start>"""Do nothing."""<block_end><def_stmt>post_run self * dump:Union[bool str]=<false> # pylint: disable=unused-argument
outline:bool=<false> # pylint: disable=unused-argument
**__kwargs:Any <arrow><none><block_start>"""Do nothing."""<block_end><block_end> |
# Copyright 2020 Red Hat, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_stmt>abc<class_stmt>ImageBase(object metaclass=abc.ABCMeta)<block_start>@abc.abstractmethod<def_stmt>get_image_id_by_tag self image_tag image_owner=<none><block_start>"""Get image ID by image tag and owner.
:param image_tag: image tag
:param image_owner: optional image owner
:raises: ImageGetException if no images found with given tag
:return: image id
"""<block_end><block_end> |
<import_from_stmt>hdlConvertorAst.to.verilog.constants SIGNAL_TYPE<class_stmt>SignalTypeSwap()<block_start>"""
An object which is used as a context manager for signalType
inside of :class:`hwt.serializer.verilog.serializer.ToHdlAstVerilog`
"""<def_stmt>__init__ self ctx signalType:SIGNAL_TYPE<block_start>self.ctx=ctx<line_sep>self.signalType=signalType<block_end><def_stmt>__enter__ self<block_start>self.orig=self.ctx.signalType<line_sep>self.ctx.signalType=self.signalType<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>self.ctx.signalType=self.orig<block_end><block_end> |
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for haiku._src.batch_norm."""<import_stmt>os<import_from_stmt>absl.testing absltest<import_from_stmt>haiku._src batch_norm<import_from_stmt>haiku._src test_utils<import_from_stmt>haiku._src transform<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<class_stmt>BatchNormTest(absltest.TestCase)<block_start>@test_utils.transform_and_run<def_stmt>test_basic self<block_start>data=jnp.arange(2<times>3<times>4 dtype=jnp.float32).reshape([2 3 4])<line_sep>norm=batch_norm.BatchNorm(<true> <true> 0.9)<line_sep>result=norm(data is_training=<true>)<line_sep>result_0_replicated=jnp.broadcast_to(result[: : :1] result.shape)<line_sep># Input data is symmetrical variance per-channel.
np.testing.assert_allclose(result result_0_replicated)<line_sep># Running through again in test mode produces same output.
np.testing.assert_allclose(norm(data is_training=<false>) result rtol=2e-2)<block_end>@test_utils.transform_and_run<def_stmt>test_simple_training self<block_start>layer=batch_norm.BatchNorm(create_scale=<false> create_offset=<false> decay_rate=0.9)<line_sep>inputs=np.ones([2 3 3 5])<line_sep>scale=np.full((5 ) 0.5)<line_sep>offset=np.full((5 ) 2.0)<line_sep>result=layer(inputs <true> scale=scale offset=offset)<line_sep>np.testing.assert_equal(result np.full(inputs.shape 2.0))<block_end>@test_utils.transform_and_run<def_stmt>test_simple_training_nchw self<block_start>layer=batch_norm.BatchNorm(create_scale=<false> create_offset=<false> decay_rate=0.9 data_format="NCHW")<line_sep>inputs=np.ones([2 5 3 3])<line_sep>scale=np.full((5 1 1) 0.5)<line_sep>offset=np.full((5 1 1) 2.0)<line_sep>result=layer(inputs <true> scale=scale offset=offset)<line_sep>np.testing.assert_equal(result np.full(inputs.shape 2.0))<block_end>@test_utils.transform_and_run<def_stmt>test_simple_training_normalized_axes self<block_start>layer=batch_norm.BatchNorm(create_scale=<false> create_offset=<false> decay_rate=0.9 axis=[0 2 3])<line_sep># Not the second axis.
# This differs only in the second axis.
inputs=np.stack([2.0<times>np.ones([5 3 3]) np.ones([5 3 3])] 1)<line_sep>result=layer(inputs <true>)<line_sep># Despite not all values being identical, treating slices from the first
# axis separately leads to a fully normalized = equal array.
np.testing.assert_equal(result np.zeros(inputs.shape))<block_end><def_stmt>test_simple_training_cross_replica_axis self<block_start>ldc=jax.local_device_count()<def_stmt>f x is_training=<true><block_start><return>batch_norm.BatchNorm(create_scale=<false> create_offset=<false> decay_rate=0.9 cross_replica_axis="i" )(x is_training=is_training)<block_end>f=transform.transform_with_state(f)<line_sep>inputs=np.arange(ldc<times>4).reshape(ldc 4)<line_sep>key=np.broadcast_to(jax.random.PRNGKey(42) (ldc 2))<line_sep>params,state=jax.pmap(f.init axis_name="i")(key inputs)<line_sep>result,_=jax.pmap(f.apply axis_name="i")(params state key inputs)<line_sep>mean=np.mean(inputs axis=0)<line_sep>std=np.std(inputs axis=0)+1e-10<line_sep>expected=(inputs-mean)/std<line_sep>np.testing.assert_array_almost_equal(result expected)<block_end><def_stmt>test_simple_training_cross_replica_axis_index_groups self<block_start>ldc=jax.local_device_count()<if_stmt>ldc<l>2<block_start>self.skipTest("Cross-replica test requires at least 2 devices.")<block_end>num_groups=ldc<floordiv>2<line_sep>num_group_devices=ldc<floordiv>num_groups<line_sep># for 8 devices this produces [[0, 1], [2, 3], [4, 5], [6, 7]] groups.
groups=np.arange(ldc).reshape(num_groups num_group_devices).tolist()<def_stmt>f x is_training=<true><block_start><return>batch_norm.BatchNorm(create_scale=<false> create_offset=<false> decay_rate=0.9 cross_replica_axis="i" cross_replica_axis_index_groups=groups )(x is_training=is_training)<block_end>f=transform.transform_with_state(f)<line_sep>inputs=np.arange(ldc<times>4).reshape(ldc 4).astype(np.float32)<line_sep>key=np.broadcast_to(jax.random.PRNGKey(42) (ldc 2))<line_sep>params,state=jax.pmap(f.init axis_name="i")(key inputs)<line_sep>result,_=jax.pmap(f.apply axis_name="i")(params state key inputs)<line_sep>expected=np.empty_like(inputs)<for_stmt>g range(num_groups)<block_start>group_inputs=inputs[num_group_devices<times>g:num_group_devices<times>(g+1)]<line_sep>group_mean=np.mean(group_inputs axis=0)<line_sep>group_std=np.std(group_inputs axis=0)+1e-10<line_sep>group_inputs=(group_inputs-group_mean)/group_std<line_sep>expected[num_group_devices<times>g:num_group_devices<times>(g+1)]=group_inputs<block_end>np.testing.assert_array_almost_equal(result expected)<block_end>@test_utils.transform_and_run<def_stmt>test_no_scale_and_offset self<block_start>layer=batch_norm.BatchNorm(create_scale=<false> create_offset=<false> decay_rate=0.9)<line_sep>inputs=jnp.ones([2 5 3 3 3])<line_sep>result=layer(inputs <true>)<line_sep>np.testing.assert_equal(result np.zeros_like(inputs))<block_end>@test_utils.transform_and_run<def_stmt>test_no_scale_and_init_provided self<block_start><with_stmt>self.assertRaisesRegex(ValueError "Cannot set `scale_init` if `create_scale=False`")<block_start>batch_norm.BatchNorm(create_scale=<false> create_offset=<true> decay_rate=0.9 scale_init=jnp.ones)<block_end><block_end>@test_utils.transform_and_run<def_stmt>test_no_offset_beta_init_provided self<block_start><with_stmt>self.assertRaisesRegex(ValueError "Cannot set `offset_init` if `create_offset=False`")<block_start>batch_norm.BatchNorm(create_scale=<true> create_offset=<false> decay_rate=0.9 offset_init=jnp.zeros)<block_end><block_end><def_stmt>test_eps_cast_to_var_dtype self# See https://github.com/google/jax/issues/4718 for more info. In the
# context of this test we need to assert NumPy bf16 params/state and a
# Python float for eps preserve bf16 output.
<block_start><def_stmt>f x is_training<block_start><return>batch_norm.BatchNorm(<true> <true> 0.9 eps=0.1)(x is_training)<block_end>f=transform.transform_with_state(f)<line_sep>x=np.ones([] jnp.bfloat16)<line_sep>key=jax.random.PRNGKey(42)<line_sep>params,state=jax.device_get(f.init(key x <true>))<line_sep>y,_=f.apply(params state <none> x <false>)<line_sep>self.assertEqual(y.dtype jnp.bfloat16)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>_xla_flags=os.environ.get("XLA_FLAGS" "")<line_sep>os.environ["XLA_FLAGS"]=(_xla_flags+" --xla_force_host_platform_device_count=8")<line_sep>absltest.main()<line_sep>os.environ["XLA_FLAGS"]=_xla_flags<block_end> |
<class_stmt>NorthInDTO(object)<block_start><def_stmt>__init__ self<block_start>self.platformIp=<none><line_sep>self.platformPort=<none><block_end><def_stmt>getPlatformIp self<block_start><return>self.platformIp<block_end><def_stmt>setPlatformIp self platformIp<block_start>self.platformIp=platformIp<block_end><def_stmt>getPlatformPort self<block_start><return>self.platformPort<block_end><def_stmt>setPlatformPort self platformPort<block_start>self.platformPort=platformPort<block_end><block_end> |
# -*- coding: utf-8 -*-
__author__="苦叶子"<line_sep>"""
公众号: 开源优测
Email: <EMAIL>
"""<import_from_stmt>flask Blueprint<import_from_stmt>flask_restful Api<line_sep>api_bp=Blueprint('api' __name__)<line_sep>api=Api(api_bp)<import_from_stmt>.auth Auth<line_sep>api.add_resource(Auth "/auth/")<import_from_stmt>.product Product<line_sep>api.add_resource(Product "/product/")<import_from_stmt>.project Project<line_sep>api.add_resource(Project "/project/")<import_from_stmt>.suite Suite<line_sep>api.add_resource(Suite "/suite/")<import_from_stmt>.object Object<line_sep>api.add_resource(Object "/object/")<import_from_stmt>.case Case<line_sep>api.add_resource(Case "/case/")<import_from_stmt>.step Step<line_sep>api.add_resource(Step "/step/")<import_from_stmt>.var Var<line_sep>api.add_resource(Var "/var/")<import_from_stmt>.keyword Keyword<line_sep>api.add_resource(Keyword "/keyword/")<import_from_stmt>.help Help<line_sep>api.add_resource(Help "/help/")<import_from_stmt>.task Task<line_sep>api.add_resource(Task "/task/")<import_from_stmt>.trigger Triggers<line_sep>api.add_resource(Triggers "/trigger/")<import_from_stmt>.stats Stats<line_sep>api.add_resource(Stats "/stats/")<import_from_stmt>.user Users<line_sep>api.add_resource(Users "/user/")<import_from_stmt>.role Roles<line_sep>api.add_resource(Roles "/role/")<import_from_stmt>.user_keyword UserKeywordSuite UserKeyword<line_sep>api.add_resource(UserKeywordSuite "/user_keyword_suite/")<line_sep>api.add_resource(UserKeyword "/user_keyword/")<line_sep> |
# -*- coding: UTF-8 -*-
"""
此脚本用于展示如何利用神经网络解决分类问题
"""<import_stmt>os<import_from_stmt>mlp ANN<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>sklearn.datasets make_blobs make_circles make_moons<import_from_stmt>sklearn.linear_model LogisticRegression<import_from_stmt>sklearn.preprocessing StandardScaler OneHotEncoder<def_stmt>generateData n<block_start>"""
"""<line_sep>np.random.seed(12046)<line_sep>blobs=make_blobs(n_samples=n centers=[[-2 -2] [2 2]])<line_sep>circles=make_circles(n_samples=n factor=.4 noise=.05)<line_sep>moons=make_moons(n_samples=n noise=.05)<line_sep>blocks=np.random.rand(n 2)-0.5<line_sep>y=(blocks[: 0]<times>blocks[: 1]<l>0)+0<line_sep>blocks=(blocks y)<line_sep># 由于神经网络对数据的线性变换不稳定,因此将数据做归一化处理
scaler=StandardScaler()<line_sep>blobs=(scaler.fit_transform(blobs[0]) blobs[1])<line_sep>circles=(scaler.fit_transform(circles[0]) circles[1])<line_sep>moons=(scaler.fit_transform(moons[0]) moons[1])<line_sep>blocks=(scaler.fit_transform(blocks[0]) blocks[1])<line_sep><return>blobs circles moons blocks<block_end><def_stmt>drawData ax data<block_start>"""
将数据可视化
"""<line_sep>X,y=data<line_sep>label1=X[y<g>0]<line_sep>ax.scatter(label1[: 0] label1[: 1] marker="o")<line_sep>label0=X[y<eq>0]<line_sep>ax.scatter(label0[: 0] label0[: 1] marker="^" color="k")<line_sep><return>ax<block_end><def_stmt>drawModel ax model<block_start>"""
将模型的分离超平面可视化
"""<line_sep>x1=np.linspace(ax.get_xlim()[0] ax.get_xlim()[1] 100)<line_sep>x2=np.linspace(ax.get_ylim()[0] ax.get_ylim()[1] 100)<line_sep>X1,X2=np.meshgrid(x1 x2)<line_sep>Y=model.predict_proba(np.c_[X1.ravel() X2.ravel()])[: 1]<line_sep>Y=Y.reshape(X1.shape)<line_sep>ax.contourf(X1 X2 Y levels=[0 0.5] colors=["gray"] alpha=0.4)<line_sep><return>ax<block_end><def_stmt>trainLogit data<block_start>"""
"""<line_sep>X,y=data<line_sep>model=LogisticRegression()<line_sep>model.fit(X y)<line_sep><return>model<block_end><def_stmt>trainANN data logPath<block_start>"""
"""<line_sep>X,y=data<line_sep>enc=OneHotEncoder()<line_sep>y=enc.fit_transform(y.reshape(-1 1)).toarray()<line_sep>model=ANN([4 4 2] logPath)<line_sep>model.fit(X y)<line_sep><return>model<block_end><def_stmt>visualize data<block_start>"""
"""<line_sep># 创建一个图形框
fig=plt.figure(figsize=(10 10) dpi=80)<line_sep>fig1=plt.figure(figsize=(10 10) dpi=80)<line_sep># 在图形框里画四幅图
<for_stmt>i range(len(data))<block_start>ax=fig.add_subplot(2 2 i+1)<line_sep>ax1=fig1.add_subplot(2 2 i+1)<line_sep>drawData(ax data[i])<line_sep># Windows下的存储路径与Linux并不相同
<if_stmt>os.name<eq>"nt"<block_start>drawModel(ax trainANN(data[i] "logs\\data_%s"%(i+1)))<block_end><else_stmt><block_start>drawModel(ax trainANN(data[i] "logs/data_%s"%(i+1)))<block_end>drawData(ax1 data[i])<line_sep>drawModel(ax1 trainLogit(data[i]))<line_sep>ax.get_xaxis().set_visible(<false>)<line_sep>ax.get_yaxis().set_visible(<false>)<line_sep>ax1.get_xaxis().set_visible(<false>)<line_sep>ax1.get_yaxis().set_visible(<false>)<block_end>plt.show()<block_end><if_stmt>__name__<eq>"__main__"<block_start>data=generateData(200)<line_sep>visualize(data)<block_end> |
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 29 10:58:31 2016
@author: <EMAIL>
"""<import_stmt>numpy<as>np<import_stmt>scipy<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<import_from_stmt>matplotlib.patches Ellipse<def_stmt>plot_cov_ellipse cov pos nstd=2 ax=<none> **kwargs<block_start>"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""<def_stmt>eigsorted cov<block_start>vals,vecs=np.linalg.eigh(cov)<line_sep>order=vals.argsort()[::-1]<line_sep><return>vals[order] vecs[: order]<block_end><if_stmt>ax<is><none><block_start>ax=plt.gca()<block_end>vals,vecs=eigsorted(cov)<line_sep>theta=np.degrees(np.arctan2(*vecs[: 0][::-1]))<line_sep># Width and height are "full" widths, not radius
width,height=2<times>nstd<times>np.sqrt(vals)<line_sep>ellip=Ellipse(xy=pos width=width height=height angle=theta **kwargs)<line_sep>ax.add_artist(ellip)<line_sep><return>ellip<block_end> |
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the BC agent."""<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_from_stmt>acme specs<import_from_stmt>acme types<import_from_stmt>acme.agents.jax bc<import_from_stmt>acme.jax networks<as>networks_lib<import_from_stmt>acme.jax utils<import_from_stmt>acme.testing fakes<import_stmt>chex<import_stmt>haiku<as>hk<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_from_stmt>jax.scipy special<import_stmt>numpy<as>np<import_stmt>optax<def_stmt>make_networks spec:specs.EnvironmentSpec discrete_actions:bool=<false><arrow>networks_lib.FeedForwardNetwork<block_start>"""Creates networks used by the agent."""<if_stmt>discrete_actions<block_start>final_layer_size=spec.actions.num_values<block_end><else_stmt><block_start>final_layer_size=np.prod(spec.actions.shape dtype=int)<block_end><def_stmt>_actor_fn obs is_training=<false> key=<none># is_training and key allows to defined train/test dependant modules
# like dropout.
<block_start><del_stmt>is_training<del_stmt>key<if_stmt>discrete_actions<block_start>network=hk.nets.MLP([64 64 final_layer_size])<block_end><else_stmt><block_start>network=hk.Sequential([networks_lib.LayerNormMLP([64 64] activate_final=<true>) networks_lib.NormalTanhDistribution(final_layer_size) ])<block_end><return>network(obs)<block_end>policy=hk.without_apply_rng(hk.transform(_actor_fn))<line_sep># Create dummy observations and actions to create network parameters.
dummy_obs=utils.zeros_like(spec.observations)<line_sep>dummy_obs=utils.add_batch_dim(dummy_obs)<line_sep>network=networks_lib.FeedForwardNetwork(<lambda>key:policy.init(key dummy_obs) policy.apply)<line_sep><return>network<block_end><class_stmt>BCTest(parameterized.TestCase)<block_start>@parameterized.parameters(('logp' ) ('mse' ) ('peerbc' ))<def_stmt>test_continuous_actions self loss_name<block_start><with_stmt>chex.fake_pmap_and_jit()<block_start>num_sgd_steps_per_step=1<line_sep>num_steps=5<line_sep># Create a fake environment to test with.
environment=fakes.ContinuousEnvironment(episode_length=10 bounded=<true> action_dim=6)<line_sep>spec=specs.make_environment_spec(environment)<line_sep>dataset_demonstration=fakes.transition_dataset(environment)<line_sep>dataset_demonstration=dataset_demonstration.map(<lambda>sample:types.Transition(*sample.data))<line_sep>dataset_demonstration=dataset_demonstration.batch(8).as_numpy_iterator()<line_sep># Construct the agent.
network=make_networks(spec)<if_stmt>loss_name<eq>'logp'<block_start>loss_fn=bc.logp(logp_fn=<lambda>dist_params actions:dist_params.log_prob(actions))<block_end><elif_stmt>loss_name<eq>'mse'<block_start>loss_fn=bc.mse(sample_fn=<lambda>dist_params key:dist_params.sample(seed=key))<block_end><elif_stmt>loss_name<eq>'peerbc'<block_start>base_loss_fn=bc.logp(logp_fn=<lambda>dist_params actions:dist_params.log_prob(actions))<line_sep>loss_fn=bc.peerbc(base_loss_fn zeta=0.1)<block_end><else_stmt><block_start><raise>ValueError<block_end>learner=bc.BCLearner(network=network random_key=jax.random.PRNGKey(0) loss_fn=loss_fn optimizer=optax.adam(0.01) demonstrations=dataset_demonstration num_sgd_steps_per_step=num_sgd_steps_per_step)<line_sep># Train the agent
<for_stmt>_ range(num_steps)<block_start>learner.step()<block_end><block_end><block_end>@parameterized.parameters(('logp' ) ('rcal' ))<def_stmt>test_discrete_actions self loss_name<block_start><with_stmt>chex.fake_pmap_and_jit()<block_start>num_sgd_steps_per_step=1<line_sep>num_steps=5<line_sep># Create a fake environment to test with.
environment=fakes.DiscreteEnvironment(num_actions=10 num_observations=100 obs_shape=(10 ) obs_dtype=np.float32)<line_sep>spec=specs.make_environment_spec(environment)<line_sep>dataset_demonstration=fakes.transition_dataset(environment)<line_sep>dataset_demonstration=dataset_demonstration.map(<lambda>sample:types.Transition(*sample.data))<line_sep>dataset_demonstration=dataset_demonstration.batch(8).as_numpy_iterator()<line_sep># Construct the agent.
network=make_networks(spec discrete_actions=<true>)<def_stmt>logp_fn logits actions<block_start>max_logits=jnp.max(logits axis=-1 keepdims=<true>)<line_sep>logits=logits-max_logits<line_sep>logits_actions=jnp.sum(jax.nn.one_hot(actions spec.actions.num_values)<times>logits axis=-1)<line_sep>log_prob=logits_actions-special.logsumexp(logits axis=-1)<line_sep><return>log_prob<block_end><if_stmt>loss_name<eq>'logp'<block_start>loss_fn=bc.logp(logp_fn=logp_fn)<block_end><elif_stmt>loss_name<eq>'rcal'<block_start>base_loss_fn=bc.logp(logp_fn=logp_fn)<line_sep>loss_fn=bc.rcal(base_loss_fn discount=0.99 alpha=0.1)<block_end><else_stmt><block_start><raise>ValueError<block_end>learner=bc.BCLearner(network=network random_key=jax.random.PRNGKey(0) loss_fn=loss_fn optimizer=optax.adam(0.01) demonstrations=dataset_demonstration num_sgd_steps_per_step=num_sgd_steps_per_step)<line_sep># Train the agent
<for_stmt>_ range(num_steps)<block_start>learner.step()<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('services' '0005_auto_20150901_1253') ]<line_sep>operations=[migrations.AlterField(model_name='story' name='content_type' field=models.CharField(blank=<true> max_length=1 null=<true> choices=[(b'T' b'text') (b'U' b'url') (b'I' b'image')]) ) migrations.AlterField(model_name='story' name='date' field=models.DateField(auto_now_add=<true>) ) ]<block_end> |
###############################################################################
#
# Purpose: Use VisIt CLI to iterate over Curves in a material database and
# compute and plot some common difference curves and output the results
# to either a curve or image file format.
#
# Programmer: <NAME>
# Date: Wed May 27 13:15:07 PDT 2009
#
#
# Modifications:
# <NAME>, Mon Jun 15 17:52:15 PDT 2009
# Removed subclassing used to override behavior of Optparse in presence of
# unrecognized options. By using Argv(), VisIt-specific options never wind
# up getting passed to this script.
###############################################################################
<import_stmt>sys re os glob<import_from_stmt>optparse *<line_sep>#
# Convert '#FFCC13" strings to color tuple
#
<def_stmt>ColorTupleFromHexString s<block_start><if_stmt>s[0]<ne>'#'<block_start><return>(0 0 0 255)<block_end><return>(int("0x%s"%s[1:3] 16) int("0x%s"%s[3:5] 16) int("0x%s"%s[5:7] 16) 255)<block_end>#
# Command-line options
#
<def_stmt>BuildCommandLineOptions <block_start>parser=OptionParser()<line_sep>parser.add_option("--image-width" help="Set width of images [%default]." type="int" dest="image_width" default="500" metavar="INT")<line_sep>parser.add_option("--image-height" help="Set height of images [%default]." type="int" dest="image_height" default="500" metavar="INT")<line_sep>parser.add_option("--data-min" type="float" dest="data_min" metavar="FLOAT" help="Mininum data value to be applied to all plots. If no "<concat>"value is specified, the minimum will be allowed to vary "<concat>"as needed from plot to plot.")<line_sep>parser.add_option("--data-max" type="float" dest="data_max" metavar="FLOAT" help="Mininum data value to be applied to all plots. If no "<concat>"value is specified, the minimum will be allowed to vary "<concat>"as needed from plot to plot.")<line_sep>parser.add_option("--log-data" help="Display data (y) axis in log scaling." action="store_true" dest="log_data" default=<false>)<line_sep>parser.add_option("--x-min" type="float" dest="x_min" metavar="FLOAT" help="Mininum positional (x) value to be applied to all plots. If no "<concat>"value is specified, the minimum will be allowed to vary "<concat>"as needed from plot to plot.")<line_sep>parser.add_option("--x-max" type="float" dest="x_max" metavar="FLOAT" help="Maximum positional (x) value to be applied to all plots. If no "<concat>"value is specified, the minimum will be allowed to vary "<concat>"as needed from plot to plot.")<line_sep>parser.add_option("--log-x" help="Display positional (x) axis in log scaling." action="store_true" dest="log_x" default=<false>)<line_sep>parser.add_option("--image-format" help="Set output format for images (e.g. 'tiff', 'png', 'jpeg'). "<concat>"If none specified, no images will be saved." dest="image_format" metavar="STRING")<line_sep>parser.add_option("--curve-format" help="Set output format for curves (e.g. 'ultra', 'curve'). "<concat>"If none specified, no curve files will be saved." dest="curve_format" metavar="STRING")<line_sep>parser.add_option("--color0" help="Set color to be used for first curve plot." dest="color0" metavar="#RRGGBB")<line_sep>parser.add_option("--color1" help="Set color to be used for second curve plot." dest="color1" metavar="#RRGGBB")<line_sep>parser.add_option("--line-width" help="Set line width for curves." type="int" default=0 dest="line_width" metavar="INT")<line_sep>parser.add_option("--point-density" help="Plot symbols representing individual points in curves every Nth point. "<concat>"A value of zero turns the display of points off [%default]." type="int" default=0 dest="point_density" metavar="N")<line_sep>parser.add_option("--point-size" help="Size of symbols representing individual points in curve plots." type="int" default=5 dest="point_size" metavar="INT")<line_sep>parser.add_option("--show-legend" help="Display curve plot legends." action="store_true" dest="show_legend" default=<false>)<line_sep>parser.add_option("--show-labels" help="Display curve plot labels." action="store_true" dest="show_labels" default=<false>)<line_sep>parser.set_usage("matexprs.py [options] dbname")<line_sep><return>parser<block_end>#
# Iterate through curves, finding all unique 'dirs' containing curves.
#
<def_stmt>GetVarMap metadata<block_start>dirMap={}<for_stmt>i range(metadata.GetNumCurves())<block_start>dirinfo=re.search("(.*)/([^/]*)" metadata.GetCurves(i).name)<if_stmt>dirinfo<ne><none><block_start>dirname=dirinfo.group(1)<line_sep>varname=dirinfo.group(2)<line_sep>varMap={}<if_stmt>dirname<in>dirMap<block_start>varMap=dirMap[dirname]<block_end>varMap[varname]=1<line_sep>dirMap[dirname]=varMap<block_end><block_end><return>dirMap<block_end>#
# Begin main program
#
parser=BuildCommandLineOptions()<line_sep>#
# This bit of logic allows users to get usage/help from
# the command 'python matexpers.py --help'. Without it
# using VisIt's cli the '--help' will get interpreted
# in internallauncher and never make it into this script.
#
<if_stmt>"-h"<in>sys.argv<or>"--help"<in>sys.argv<or>"-help"<in>sys.argv<or>"help"<in>sys.argv<block_start>parser.print_help()<line_sep>sys.exit(1)<block_end>#
# Argv() is a function defined by VisIt's cli that
# returns ONLY the options after the argument (filename)
# to the '-s' command-line option. In theory, that
# should be only the arguments that this script itself
# should interpret.
#
(clOpts clArgs)=parser.parse_args(list(Argv()))<line_sep>#
# Set the name of the database. It is the only 'positional'
# argument on the command line.
#
dbname=""<if_stmt>len(clArgs)<g>0<block_start>dbname=clArgs[0]<block_end><if_stmt><not>glob.glob(dbname)<block_start><if_stmt>dbname<eq>""<block_start>sys.stderr.write("No database specified.\n")<block_end><else_stmt><block_start>sys.stderr.write("Invalid database, \"%s\", specified.\n"%dbname)<block_end>parser.print_usage()<line_sep>sys.exit(1)<block_end>#
# Open the database, get metadata, get info on curve 'dirs'
#
OpenDatabase(dbname)<line_sep>metadata=GetMetaData(dbname)<line_sep>dirMap=GetVarMap(metadata)<line_sep>#
# Build up base save window attributes
#
swa=SaveWindowAttributes()<line_sep>swa.family=0<line_sep>swa.width=clOpts.image_width<line_sep>swa.height=clOpts.image_height<line_sep>#
# Build up base curve attributes
#
ca=CurveAttributes()<line_sep>ca.lineWidth=clOpts.line_width<if_stmt>clOpts.color0<ne><none><block_start>ca.color=ColorTupleFromHexString(clOpts.color0)<line_sep>ca.cycleColors=0<block_end>ca.showLabels=clOpts.show_labels<line_sep>#if clOpts.point_density > 0:
# ca.showPoints = 1
#ca.pointSize = clOpts.point_size
ca.showLegend=clOpts.show_legend<line_sep>#ca.symbolDensity = clOpts.point_density
SetDefaultPlotOptions(ca)<line_sep>#
# Iterate through all curve 'dirs', finding instances where
# all essential variables exist. Create expressions and plot 'em
#
<for_stmt>k list(dirMap.keys())<block_start><if_stmt><not>("Ec"<in>dirMap[k]<and>"cEc"<in>dirMap[k]<and>"cEc_fit"<in>dirMap[k])<block_start>print("Ignoring %s because not all required vars are present."%k)<line_sep>#del dirMap[k]
<continue><block_end>DefineCurveExpression("%s/c0"%k "<%s/Ec>-<%s/cEc_fit>"%(k k))<line_sep>DefineCurveExpression("%s/c1"%k "<%s/cEc>-<%s/cEc_fit>"%(k k))<line_sep>AddPlot("Curve" "%s/c0"%k)<line_sep>AddPlot("Curve" "%s/c1"%k)<line_sep>DrawPlots()<line_sep>v=GetViewCurve()<if_stmt>clOpts.x_min<ne><none><block_start>v.domainCoords=(clOpts.x_min v.domainCoords[1])<block_end><if_stmt>clOpts.x_max<ne><none><block_start>v.domainCoords=(v.domainCoords[0] clOpts.x_max)<block_end><if_stmt>clOpts.log_x<block_start>v.domainScale=v.LOG<block_end><if_stmt>clOpts.data_min<ne><none><block_start>v.rangeCoords=(clOpts.data_min v.rangeCoords[1])<block_end><if_stmt>clOpts.data_max<ne><none><block_start>v.rangeCoords=(v.rangeCoords[0] clOpts.data_max)<block_end><if_stmt>clOpts.log_data<block_start>v.rangeScale=v.LOG<block_end>SetViewCurve(v)<if_stmt>clOpts.color1<ne><none><block_start>ca2=CurveAttributes()<line_sep>ca2.color=ColorTupleFromHexString(clOpts.color1)<line_sep>ca2.cycleColors=0<line_sep>SetActivePlots((1 ))<line_sep>SetPlotOptions(ca2)<block_end>DrawPlots()<if_stmt>clOpts.curve_format<ne><none><block_start>swa.format=getattr(swa clOpts.curve_format.upper())<line_sep>swa.fileName=k# .curve is added automatically
SetSaveWindowAttributes(swa)<line_sep>SaveWindow()<block_end><if_stmt>clOpts.image_format<ne><none><block_start>swa.format=getattr(swa clOpts.image_format.upper())<line_sep>#swa.fileName = "%s.%s"%(k,clOpts.image_format.lower())
swa.fileName=k<line_sep>SetSaveWindowAttributes(swa)<line_sep>SaveWindow()<block_end>DeleteAllPlots()<block_end> |
<import_stmt>unittest<import_from_stmt>distutils.version StrictVersion<import_from_stmt>mopidy __version__<class_stmt>VersionTest(unittest.TestCase)<block_start><def_stmt>test_current_version_is_parsable_as_a_strict_version_number self<block_start>StrictVersion(__version__)<block_end><block_end> |
"""
Example script using only the Face detector of Openpose.
"""<import_stmt>PyOpenPose<as>OP<import_stmt>time<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>os<line_sep>OPENPOSE_ROOT=os.environ["OPENPOSE_ROOT"]<def_stmt>ComputeBB face padding=0.4<block_start>minX=np.min(face[: 0])<line_sep>minY=np.min(face[: 1])<line_sep>maxX=np.max(face[: 0])<line_sep>maxY=np.max(face[: 1])<line_sep>width=maxX-minX<line_sep>height=maxY-minY<line_sep>padX=width<times>padding/2<line_sep>padY=height<times>padding/2<line_sep>minX<augsub>padX<line_sep>minY<augsub>padY<line_sep>width<augadd>2<times>padX<line_sep>height<augadd>2<times>padY<line_sep>score=np.mean(face[: 2])<line_sep><return>score [int(minX) int(minY) int(width) int(height)]<block_end><def_stmt>run <block_start>cap=cv2.VideoCapture(0)<line_sep>ret,frame=cap.read()<line_sep>imgSize=list(frame.shape)<line_sep>outSize=imgSize[1::-1]<line_sep>print("Net output size: " outSize)<line_sep>download_heatmaps=<false><line_sep>with_hands=<false><line_sep>with_face=<true><line_sep>op=OP.OpenPose((656 368) (240 240) tuple(outSize) "COCO" OPENPOSE_ROOT+os.sep+"models"+os.sep 0 download_heatmaps OP.OpenPose.ScaleMode.ZeroToOne with_face with_hands)<line_sep>actual_fps=0<line_sep>paused=<false><line_sep>delay={<true>:0 <false>:1}<line_sep>newFaceBB=initFaceBB=faceBB=[240 120 150 150]<line_sep>print("Entering main Loop. Put your hand into the box to start tracking")<while_stmt><true><block_start>start_time=time.time()<try_stmt><block_start>ret,frame=cap.read()<line_sep>rgb=frame[: :outSize[0]]<block_end><except_stmt>Exception<as>e<block_start>print("Failed to grab" e)<line_sep><break><block_end>t=time.time()<line_sep>op.detectFace(rgb np.array(faceBB dtype=np.int32).reshape((1 4)))<line_sep>t=time.time()-t<line_sep>op_fps=1.0/t<line_sep>res=op.render(rgb)<line_sep>cv2.putText(res 'UI FPS = %f, OP-FACE FPS = %f. Press \'r\' to reset.'%(actual_fps op_fps) (20 20) 0 0.5 (0 0 255))<line_sep>cv2.rectangle(res (faceBB[0] faceBB[1]) (faceBB[0]+faceBB[2] faceBB[1]+faceBB[3]) [50 155 50] 2)<line_sep>cv2.rectangle(res (newFaceBB[0] newFaceBB[1]) (newFaceBB[0]+newFaceBB[2] newFaceBB[1]+newFaceBB[3]) [250 55 50] 1)<line_sep>cv2.imshow("OpenPose result" res)<line_sep>face=op.getKeypoints(op.KeypointType.FACE)[0].reshape(-1 3)<line_sep>score,newFaceBB=ComputeBB(face)<line_sep>print("Res Score, faceBB: " score newFaceBB)<if_stmt>score<g>0.5# update BB only when score is good.
<block_start>faceBB=newFaceBB<block_end>key=cv2.waitKey(delay[paused])<if_stmt>key&255<eq>ord('p')<block_start>paused=<not>paused<block_end><if_stmt>key&255<eq>ord('q')<block_start><break><block_end><if_stmt>key&255<eq>ord('r')<block_start>faceBB=initFaceBB<block_end>actual_fps=1.0/(time.time()-start_time)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>run()<block_end> |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>generator generator generate<import_from_stmt>extensions.front.kaldi.tdnn_component_replacer TdnnComponentReplacer<import_from_stmt>mo.utils.ir_engine.compare_graphs compare_graphs<import_from_stmt>unit_tests.utils.graph build_graph regular_op result connect_front const<line_sep>@generator<class_stmt>TdnnComponentReplacerTest(unittest.TestCase)<block_start>@generate(*[([[1 1 1] [4 4 4]] [1 2] [-1 1] ) ([[1 1 1] [4 4 4]] [1 2] [-1 1 2 10 1000] ) ([[1 1 1] [4 4 4]] [1 2] [-1 0]) ])<def_stmt>test_tdnnreplacer self weights biases time_offsets<block_start><def_stmt>generate_offsets <block_start>offset_edges=[]<line_sep>offset_nodes={}<for_stmt>i,t enumerate(time_offsets)<block_start>offset_nodes.update(**regular_op('memoryoffset_'+str(i) {'type':<none>}))<if_stmt>t<ne>0<block_start>offset_edges.append(('placeholder' 'memoryoffset_'+str(i) {'out':0 'in':0}))<line_sep>offset_edges.append(('memoryoffset_'+str(i) 'concat' {'out':0 'in':i}))<block_end><else_stmt><block_start>offset_edges.append(('placeholder' 'concat' {'out':0 'in':i}))<block_end><block_end><return>offset_nodes offset_edges<block_end>offset_nodes,ref_offset_edges=generate_offsets()<line_sep>nodes={**offset_nodes **regular_op('placeholder' {'type':'Parameter'}) **regular_op('tdnncomponent' {'op':'tdnncomponent' 'weights':np.array(weights) 'biases':np.array(biases) 'time_offsets':np.array(time_offsets)}) **const('weights' np.array(weights)) **const('biases' np.array(biases)) **regular_op('concat' {'type':'Concat' 'axis':1}) **regular_op('memoryoffset_0' {'type':<none>}) **regular_op('memoryoffset_1' {'type':<none>}) **regular_op('memoryoffset_2' {'type':<none>}) **regular_op('fully_connected' {'type':'FullyConnected'}) **result('result') }<line_sep>graph=build_graph(nodes [*connect_front('placeholder' 'tdnncomponent') *connect_front('tdnncomponent' 'result')] nodes_with_edges_only=<true>)<line_sep>graph.stage='front'<line_sep>ref_graph=build_graph(nodes [*ref_offset_edges *connect_front('concat' '0:fully_connected') *connect_front('weights' '1:fully_connected') *connect_front('biases' '2:fully_connected') *connect_front('fully_connected' 'result')] nodes_with_edges_only=<true>)<line_sep>TdnnComponentReplacer().find_and_replace_pattern(graph)<line_sep>(flag resp)=compare_graphs(graph ref_graph 'result' check_op_attrs=<true>)<line_sep>self.assertTrue(flag resp)<block_end><block_end> |
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>pytest approx<import_from_stmt>lenskit.topn precision<import_from_stmt>lenskit.util.test demo_recs<import_from_stmt>lenskit topn<def_stmt>_test_prec items rel **k<block_start>recs=pd.DataFrame({'item':items})<line_sep>truth=pd.DataFrame({'item':rel}).set_index('item')<line_sep><return>precision(recs truth **k)<block_end><def_stmt>test_precision_empty_none <block_start>prec=_test_prec([] [1 3])<assert_stmt>prec<is><none><block_end><def_stmt>test_precision_simple_cases <block_start>prec=_test_prec([1 3] [1 3])<assert_stmt>prec<eq>approx(1.0)<line_sep>prec=_test_prec([1] [1 3])<assert_stmt>prec<eq>approx(1.0)<line_sep>prec=_test_prec([1 2 3 4] [1 3])<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec([1 2 3 4] [1 3 5])<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec([1 2 3 4] range(5 10))<assert_stmt>prec<eq>approx(0.0)<line_sep>prec=_test_prec([1 2 3 4] range(4 10))<assert_stmt>prec<eq>approx(0.25)<block_end><def_stmt>test_precision_series <block_start>prec=_test_prec(pd.Series([1 3]) pd.Series([1 3]))<assert_stmt>prec<eq>approx(1.0)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) pd.Series([1 3 5]))<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) pd.Series(range(4 10)))<assert_stmt>prec<eq>approx(0.25)<block_end><def_stmt>test_precision_series_set <block_start>prec=_test_prec(pd.Series([1 2 3 4]) [1 3 5])<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) range(4 10))<assert_stmt>prec<eq>approx(0.25)<block_end><def_stmt>test_precision_series_index <block_start>prec=_test_prec(pd.Series([1 3]) pd.Index([1 3]))<assert_stmt>prec<eq>approx(1.0)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) pd.Index([1 3 5]))<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) pd.Index(range(4 10)))<assert_stmt>prec<eq>approx(0.25)<block_end><def_stmt>test_precision_series_array <block_start>prec=_test_prec(pd.Series([1 3]) np.array([1 3]))<assert_stmt>prec<eq>approx(1.0)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) np.array([1 3 5]))<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec(pd.Series([1 2 3 4]) np.arange(4 10 1 'u4'))<assert_stmt>prec<eq>approx(0.25)<block_end><def_stmt>test_precision_array <block_start>prec=_test_prec(np.array([1 3]) np.array([1 3]))<assert_stmt>prec<eq>approx(1.0)<line_sep>prec=_test_prec(np.array([1 2 3 4]) np.array([1 3 5]))<assert_stmt>prec<eq>approx(0.5)<line_sep>prec=_test_prec(np.array([1 2 3 4]) np.arange(4 10 1 'u4'))<assert_stmt>prec<eq>approx(0.25)<block_end><def_stmt>test_prec_long_rel <block_start>rel=np.arange(100)<line_sep>items=[1 0 150 3 10]<line_sep>r=_test_prec(items rel k=5)<assert_stmt>r<eq>approx(0.8)<block_end><def_stmt>test_prec_long_items <block_start>rel=np.arange(100)<line_sep>items=[1 0 150 3 10 30 120 4 17]<line_sep>r=_test_prec(items rel k=5)<assert_stmt>r<eq>approx(0.8)<block_end><def_stmt>test_prec_short_items <block_start>rel=np.arange(100)<line_sep>items=[1 0 150]<line_sep>r=_test_prec(items rel k=5)<assert_stmt>r<eq>approx(2/3)<block_end><def_stmt>test_recall_bulk_k demo_recs<block_start>"bulk and normal match"<line_sep>train,test,recs=demo_recs<assert_stmt>test['user'].value_counts().max()<g>5<line_sep>rla=topn.RecListAnalysis()<line_sep>rla.add_metric(precision name='pk' k=5)<line_sep>rla.add_metric(precision)<line_sep># metric without the bulk capabilities
rla.add_metric(<lambda>*a **k:precision(*a **k) name='ind_pk' k=5)<line_sep>rla.add_metric(<lambda>*a:precision(*a) name='ind_p')<line_sep>res=rla.compute(recs test)<assert_stmt>res.precision.values<eq>approx(res.ind_p.values)<assert_stmt>res.pk.values<eq>approx(res.ind_pk.values)<block_end> |
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""
Unit tests for the selinuxpermissive module
"""<import_stmt>os<import_stmt>sys<import_stmt>unittest<import_stmt>mock<import_stmt>moduletests.src.selinuxpermissive<try_stmt># Python 2.x
<block_start><import_from_stmt>cStringIO StringIO<block_end><except_stmt>ImportError# Python 3.x
<block_start><import_from_stmt>io StringIO<block_end><if_stmt>sys.hexversion<ge>0x3040000# contextlib.redirect_stdout was introduced in Python 3.4
<block_start><import_stmt>contextlib<block_end><else_stmt># contextlib2 is a backport of contextlib from Python 3.5 and is compatible with Python2/3
<block_start><import_stmt>contextlib2<as>contextlib<block_end><class_stmt>Testselinuxpermissive(unittest.TestCase)<block_start>config_file_path="/etc/selinux/config"<def_stmt>setUp self<block_start>self.output=StringIO()<block_end><def_stmt>tearDown self<block_start>self.output.close()<block_end>@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile" return_value=<false>)<def_stmt>test_detect_no_selinux self isfile_mock<block_start>self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))<line_sep>self.assertTrue(isfile_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.open" mock.mock_open(read_data="SELINUX=enforcing"))<def_stmt>test_detect_problem self isfile_mock<block_start>self.assertTrue(moduletests.src.selinuxpermissive.detect(self.config_file_path))<line_sep>self.assertTrue(isfile_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.open" mock.mock_open(read_data="SELINUX=permissive"))<def_stmt>test_detect_noproblem self isfile_mock<block_start>self.assertFalse(moduletests.src.selinuxpermissive.detect(self.config_file_path))<line_sep>self.assertTrue(isfile_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.open" mock.mock_open(read_data="SELINUX=enforcing"))<def_stmt>test_fix_success self<block_start>self.assertTrue(moduletests.src.selinuxpermissive.fix(self.config_file_path))<block_end>@mock.patch("moduletests.src.selinuxpermissive.open" side_effect=IOError)<def_stmt>test_fix_exception self open_mock<block_start><with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertRaises(IOError moduletests.src.selinuxpermissive.fix self.config_file_path)<block_end>self.assertEqual(self.output.getvalue() "[WARN] Unable to replace contents of /etc/selinux/config\n")<line_sep>self.assertTrue(open_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")@mock.patch("moduletests.src.selinuxpermissive.detect" side_effect=(<true> <false>))@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.backup" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.fix" return_value=<true>)<def_stmt>test_run_success_fixed self fix_mock backup_mock isfile_mock detect_mock config_mock<block_start>config_mock.return_value={"BACKUP_DIR":"/var/tmp/ec2rl" "LOG_DIR":"/var/tmp/ec2rl" "BACKED_FILES":dict() "REMEDIATE":<true>}<with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertTrue(moduletests.src.selinuxpermissive.run())<block_end>self.assertTrue("[SUCCESS] selinux set to permissive"<in>self.output.getvalue())<line_sep>self.assertTrue(fix_mock.called)<line_sep>self.assertTrue(backup_mock.called)<line_sep>self.assertTrue(isfile_mock.called)<line_sep>self.assertTrue(detect_mock.called)<line_sep>self.assertTrue(config_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.get_config_dict" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.detect" return_value=<false>)<def_stmt>test_run_success self detect_mock config_mock<block_start><with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertTrue(moduletests.src.selinuxpermissive.run())<block_end>self.assertTrue("[SUCCESS] selinux is not set to enforcing"<in>self.output.getvalue())<line_sep>self.assertTrue(detect_mock.called)<line_sep>self.assertTrue(config_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")@mock.patch("moduletests.src.selinuxpermissive.detect" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.backup" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.fix" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.restore" return_value=<true>)<def_stmt>test_run_failure_isfile self restore_mock fix_mock backup_mock isfile_mock detect_mock config_mock<block_start>config_mock.return_value={"BACKUP_DIR":"/var/tmp/ec2rl" "LOG_DIR":"/var/tmp/ec2rl" "BACKED_FILES":{self.config_file_path:"/some/path"} "REMEDIATE":<true> "SUDO":<true>}<with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertFalse(moduletests.src.selinuxpermissive.run())<block_end>self.assertTrue("[FAILURE] failed to set selinux set to permissive"<in>self.output.getvalue())<line_sep>self.assertTrue(restore_mock.called)<line_sep>self.assertTrue(fix_mock.called)<line_sep>self.assertTrue(backup_mock.called)<line_sep>self.assertTrue(isfile_mock.called)<line_sep>self.assertTrue(detect_mock.called)<line_sep>self.assertTrue(config_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")@mock.patch("moduletests.src.selinuxpermissive.detect" return_value=<true>)@mock.patch("moduletests.src.selinuxpermissive.os.path.isfile" return_value=<false>)@mock.patch("moduletests.src.selinuxpermissive.fix" return_value=<true>)<def_stmt>test_run_failure self fix_mock isfile_mock detect_mock config_mock<block_start>config_mock.return_value={"BACKUP_DIR":"/var/tmp/ec2rl" "LOG_DIR":"/var/tmp/ec2rl" "BACKED_FILES":dict() "REMEDIATE":<true> "SUDO":<true>}<with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertFalse(moduletests.src.selinuxpermissive.run())<block_end>self.assertTrue("[FAILURE] failed to set selinux set to permissive"<in>self.output.getvalue())<line_sep>self.assertTrue(fix_mock.called)<line_sep>self.assertTrue(isfile_mock.called)<line_sep>self.assertTrue(detect_mock.called)<line_sep>self.assertTrue(config_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.get_config_dict")@mock.patch("moduletests.src.selinuxpermissive.detect" side_effect=IOError)@mock.patch("moduletests.src.selinuxpermissive.restore" return_value=<true>)<def_stmt>test_run_failure_exception self restore_mock detect_mock config_mock<block_start>config_mock.return_value={"BACKUP_DIR":"/var/tmp/ec2rl" "LOG_DIR":"/var/tmp/ec2rl" "BACKED_FILES":{self.config_file_path:"/some/path"} "REMEDIATE":<true>}<with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertFalse(moduletests.src.selinuxpermissive.run())<block_end>self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))<line_sep>self.assertTrue(restore_mock.called)<line_sep>self.assertTrue(detect_mock.called)<line_sep>self.assertTrue(config_mock.called)<block_end>@mock.patch("moduletests.src.selinuxpermissive.get_config_dict" side_effect=IOError)<def_stmt>test_run_failure_config_exception self config_mock<block_start><with_stmt>contextlib.redirect_stdout(self.output)<block_start>self.assertFalse(moduletests.src.selinuxpermissive.run())<block_end>self.assertTrue(self.output.getvalue().endswith("Review the logs to determine the cause of the issue.\n"))<line_sep>self.assertTrue(config_mock.called)<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.