content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import numpy as np
from .transform import sph2vec, vec2sph
def angle_between(ang1, ang2, sign=True):
d = (ang1 - ang2 + np.pi) % (2 * np.pi) - np.pi
if not sign:
d = np.abs(d)
return d
def angdist(v1, v2, zenith=True):
if v1.shape[0] == 2:
v1 = sph2vec(v1, zenith=zenith)
if v2.shape[0] == 2:
v2 = sph2vec(v2, zenith=zenith)
v1 /= np.linalg.norm(v1, axis=0)
v2 /= np.linalg.norm(v2, axis=0)
if v1.ndim > 1 or v2.ndim > 1:
d = np.einsum('ij,ij->j', v1, v2)
else:
d = np.dot(v1.T, v2)
# if d.ndim > 1:
# d = d.diagonal()
return np.absolute(np.arccos(d))
def eledist(v1, v2, zenith=True):
if v1.shape[0] == 3:
v1 = vec2sph(v1, zenith=zenith)
if v2.shape[0] == 3:
v2 = vec2sph(v2, zenith=zenith)
d = (v1[0] - v2[0] + np.pi) % (2 * np.pi) - np.pi
return np.absolute(d)
def azidist(v1, v2, zenith=True):
if v1.shape[0] == 3:
v1 = vec2sph(v1, zenith=zenith)
if v2.shape[0] == 3:
v2 = vec2sph(v2, zenith=zenith)
d = (v1[1] - v2[1] + np.pi) % (2 * np.pi) - np.pi
return np.absolute(d)
| 24.826087 | 53 | 0.535026 | [
"MIT"
] | Foztarz/insectvision | sphere/distance.py | 1,142 | Python |
# Copyright 2020 The Maritime Whale Authors. All rights reserved.
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE.txt file.
#
# Processes wind and vessel data. Performs simple analysis.
from match_wind_data import *
from datetime import *
from meet_and_pass import *
import pandas as pd
import math
import sys
# TODO: need to generalize this to apply to any port desired; will need to
# do the same for main, run, plot, etc
# vessel (AIS) types that should be automatically purged from analysis
# see details at https://api.vesselfinder.com/docs/ref-aistypes.html
AUTO_BLACKLIST = [30, 31, 32, 33, 34, 35, 36, 37, 51, 52, 53, 55, 57, 58, 59]
SUB_PANAMAX = 656 # threshold in feet
M_TO_FT = 3.28 # meters to feet (conversion)
def _sanitize_vmr(df):
"""Filters entries with '511' error, impossibly high speed, abnormally
high vessel width, as well as singletons (only one entry) from vessel
movement DataFrame.
Args:
df: Vessel movement DataFrame.
Returns:
Sanitized vessel movement report DataFrame.
"""
df = df.loc[~df.index.isin(df[df.loc[:, "Beam ft"] >= 500].index), :]
df = df.loc[~df.index.isin(df[df.loc[:, "Course"] == 511].index), :]
df = df.loc[~df.index.isin(df[df.loc[:, "Heading"] == 511].index), :]
df = df.loc[~df.index.isin(df[df.loc[:, "VSPD kn"] >= 40].index), :]
singleton = (df.loc[:, "MMSI"].value_counts() == 1)
single_mmsi = df.loc[:, "MMSI"].value_counts()[singleton].index.values
df = df.loc[~df.loc[:, "MMSI"].isin(single_mmsi), :]
return df
def _wrangle_vmr(df, rename):
"""Rounds, renames, and sanitizes vessel movment DataFrame. Creates new
columns.
Args:
df: Vessel movement DataFrame.
Returns:
Cleaned vessel movement report DataFrame.
"""
df.rename(rename, axis=1, inplace=True)
df.loc[:, "LOA ft"] = (df.loc[:, "A"] + df.loc[:, "B"]) * M_TO_FT
df.loc[:, "LOA ft"] = df.loc[:, "LOA ft"].round(0)
df.loc[:, "Beam ft"] = (df.loc[:, "C"] + df.loc[:, "D"]) * M_TO_FT
df.loc[:, "Beam ft"] = df.loc[:, "Beam ft"].round(0)
df.loc[:, "Latitude"] = df.loc[:, "Latitude"].round(5)
df.loc[:, "Longitude"] = df.loc[:, "Longitude"].round(5)
df = _sanitize_vmr(df)
# filter out sub-panamax class vessels
df = df.loc[df.loc[:, "LOA ft"] >= SUB_PANAMAX, :]
df.loc[:, "Date/Time UTC"] = df.loc[:, "Date/Time UTC"].str.strip("UTC")
df.loc[:, "Date/Time UTC"] = pd.to_datetime(df.loc[:, "Date/Time UTC"])
df = df.loc[:, (["Date/Time UTC", "Name", "MMSI", "LOA ft", "Latitude",
"Longitude", "Course", "AIS Type", "Heading", "VSPD kn",
"Beam ft"])]
return df
def _filter_blacklisters(df, blacklist):
"""Checks vessel AIS types and ommits blacklisted vessel types from the
filtered data. Appends ommitted vessels' MMSI's to blacklist.txt.
Args:
df: Vessel movement DataFrame.
Returns:
Filtered vessel movement DataFrame.
"""
df = df.loc[~df.loc[:, "MMSI"].isin(blacklist), :]
new_blacklisters = []
for j in range(df.shape[0]):
if df.iloc[j]["AIS Type"] in AUTO_BLACKLIST:
new_blacklisters.append(df.iloc[j]["MMSI"])
with open("../cache/blacklist.txt", "a") as f:
contents = [str(mmsi) for mmsi in new_blacklisters]
if contents:
f.write("\n".join(contents) + "\n")
df = df.loc[~df.loc[:, "MMSI"].isin(new_blacklisters), :]
return df
def _fold_vmr(ports, i):
"""Reduces movement report to a DataFrame with a single entry for each
vessel at the point of it's maximum speed in the channel. Includes a column
with the vessel's mean speed.
"""
mean = pd.DataFrame(ports[i].groupby(["Name", "MMSI"])["VSPD kn"]
.mean()).rename({"VSPD kn": "Mean Speed kn"}, axis=1).round(1)
maxes = pd.DataFrame(ports[i].groupby(["Name", "MMSI"])["VSPD kn"]
.max()).rename({"VSPD kn": "Max Speed kn"}, axis=1)
merged_speeds = maxes.merge(mean, on=["Name", "MMSI"])
max_dict = merged_speeds.loc[:, "Max Speed kn"].to_dict()
columns = {"Longitude":[], "Latitude":[], "Date/Time UTC":[],
"LOA ft":[], "Course":[], "AIS Type":[], "WSPD mph":[],
"GST mph":[], "WDIR degT":[], "Buoy Source":[], "Beam ft":[],
"Heading":[], "Course Behavior":[], "Effective Beam ft":[],
"Class":[], "Location":[], "Yaw deg":[], "Transit":[],
"% Channel Occupied":[]}
# grab remaining data based on max speed position
for key, value in max_dict.items():
for k in columns.keys():
columns[k].append(ports[i][(ports[i].loc[:, "Name"] == key[0]) &
(ports[i].loc[:, "VSPD kn"] == value)][k].iloc[0])
for key in columns.keys():
merged_speeds[key] = columns[key]
merged_speeds = merged_speeds.reset_index()
fold_res = merged_speeds
fold_res.sort_values("Max Speed kn", ascending=False, inplace=True)
return fold_res
def _add_channel_occ(ports, i):
"""Creates the channel occupancy column."""
# total channel width for CH and SV are 1000 and 600 ft respectively,
# but vary based on Class and transit condition
channel_width = [[800, 400, 1000, 500], [600, 300, 600, 300]]
# create % channel occupancy column for each vessel position based on
# effective beam, transit, and corresponding channel width
for row in range(len(ports[i])):
vessel_class = ports[i].loc[row, "Class"]
transit_type = ports[i].loc[row, "Transit"]
eff_beam = ports[i].loc[row, "Effective Beam ft"]
if ((vessel_class == "Post-Panamax") &
(transit_type == "One-way Transit")):
occ = (eff_beam / channel_width[i][0]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
elif ((vessel_class == "Post-Panamax") &
(transit_type == "Two-way Transit")):
occ = (eff_beam / channel_width[i][1]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
elif ((vessel_class == "Panamax") &
(transit_type == "One-way Transit")):
occ = (eff_beam / channel_width[i][2]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
elif ((vessel_class == "Panamax") &
(transit_type == "Two-way Transit")):
occ = (eff_beam / channel_width[i][3]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
else:
sys.stderr.write("Error: Undefined Class and " +
"transit combination...\n")
ports[i].loc[row, "% Channel Occupied"] = float("NaN")
return ports[i]
def _add_vessel_class(df):
"""Creates 'Class' column based on vessel LOA ft."""
df.loc[:, "Class"] = "Panamax"
post_row = (df.loc[:, "LOA ft"] > 965)
post_loc = df.loc[post_row, :].index
post_pan = df.index.isin(post_loc)
df.loc[post_pan, "Class"] = "Post-Panamax"
return df
def _course_behavior(df, ranges):
"""Creates 'Course Behavior' column based on channel specific course ranges.
"""
course_behavior = ("Outbound", "Inbound")
# filter on course ranges to isolate inbound and outbound ships only
df = df[(df.loc[:, "Course"] >= ranges[0][0]) &
(df.loc[:, "Course"] <= ranges[0][1]) |
(df.loc[:, "Course"] >= ranges[1][0]) &
(df.loc[:, "Course"] <= ranges[1][1])]
df.loc[:, "Course"] = round(df.loc[:, "Course"]).astype("int")
df.loc[:, "Course Behavior"] = df.loc[:, "Course"].copy()
# replace course values with general inbound and outbound behavior
courses = {}
for behavior, bounds in zip(course_behavior, ranges):
lower_bound = bounds[0]
upper_bound = bounds[1]
for j in range(lower_bound, upper_bound + 1):
courses[j] = behavior
df.loc[:, "Course Behavior"] = (df.loc[:, "Course Behavior"]
.replace(courses).astype("str"))
return df
def process_report(path):
"""Processes data from vessel movement report. Adds data from wind buoys,
performs meeting and passing analysis. Creates other relevant columns.
Args:
path: Relative path to raw vessel movement report (CSV).
Returns:
Two pairs of two DataFrames cooresponding to the movement report.
The first pair of DataFrames contains all vessel movements belonging to
Charleston and Savannah, respectively. The second pair of DataFrames
stores the vessel movement entries at which each vessel achieved
its maximum speed. Again, the first DataFrame in the pair belongs to
Charleston and the second DataFrame belongs to Savannah.
"""
blacklist = [int(mmsi) for mmsi in open("../cache/blacklist.txt",
"r").readlines()]
df = pd.read_csv(path)
df = _wrangle_vmr(df, {"DATETIME (UTC)": "Date/Time UTC", "NAME": "Name",
"LATITUDE": "Latitude", "LONGITUDE": "Longitude",
"SPEED": "VSPD kn", "COURSE": "Course", "HEADING":
"Heading", "AIS TYPE": "AIS Type"})
ch_course_ranges = ((100, 140), (280, 320)) # (outbound, inbound)
sv_course_ranges = ((100, 160), (280, 340)) # (outbound, inbound)
# longitudinal channel midpoint for Charleston and Savannah respectively
channel_midpoint = ((-79.74169), (-80.78522))
course_ranges = (ch_course_ranges, sv_course_ranges)
ports = [None, None] # ch, sv
# Charleston NOAA wind buoy ID (41004)
# Savannah NOAA wind buoy ID (41008)
buoys = [{"41004":None}, {"41008":None}] # main wind buoys
alt_buoys = [{"41008":None}, {"41004":None}] # alternate wind buoys
# split data into Charleston and Savannah DataFrames based on latitude
for i in range(len(ports)):
ch_df = (df.loc[:, "Latitude"] >= 32.033)
sv_df = (df.loc[:, "Latitude"] < 32.033)
ports[i] = df[ch_df] if (i == 0) else df[sv_df]
# if there is no vessel data on a given day (e.g. major holidays)
# return empty DataFrames
if not len(ports[i]):
empty = pd.DataFrame({"Date/Time UTC":[], "Name":[], "MMSI":[],
"Max Speed kn":[], "Mean Speed kn":[],
"LOA ft":[], "Beam ft":[], "Class":[],
"AIS Type":[], "Course":[], "Heading":[],
"Course Behavior":[], "Yaw deg":[],
"Effective Beam ft":[], "WDIR degT":[],
"WSPD mph":[], "GST mph":[], "Buoy Source":[],
"Location":[], "Latitude":[], "Longitude":[],
"Transit":[], "% Channel Occupied":[]})
ports[i] = [empty, empty]
continue
ports[i].loc[:, "Location"] = "Nearshore"
off_row = (ports[i].loc[:, "Longitude"] > channel_midpoint[i])
off_loc = ports[i].loc[off_row, :].index
offshore_indices = ports[i].index.isin(off_loc)
ports[i].loc[offshore_indices, "Location"] = "Offshore"
ports[i] = add_wind(ports, i, buoys, alt_buoys)
ports[i] = _course_behavior(ports[i], course_ranges[i])
ports[i] = _add_vessel_class(ports[i])
# create yaw column based on difference between course and heading
ports[i].loc[:, "Yaw deg"] = abs(ports[i].loc[:, "Course"] -
ports[i].loc[:, "Heading"])
# compute effective beam based on vessel beam, loa, and yaw
eff_beam = []
loa = ports[i].loc[:, "LOA ft"].values
beam = ports[i].loc[:, "Beam ft"].values
yaw = ports[i].loc[:, "Yaw deg"].values
for l in range(ports[i].shape[0]):
# effective beam formula derived using trigonometry and geometry
# of vessel positions
eff_beam.append(round((math.cos(math.radians(90 - yaw[l])) *
loa[l]) + (math.cos(math.radians(yaw[l])) *
beam[l])))
ports[i].loc[:, "Effective Beam ft"] = eff_beam
ports[i].loc[:, "Effective Beam ft"] = ports[i].loc[:,
"Effective Beam ft"].round(0)
# remove unwanted blacklist vessels
ports[i] = _filter_blacklisters(ports[i], blacklist)
# create rounded DateTime column for meetpass analysis
stamps = len(ports[i].loc[:, "Date/Time UTC"]) # number of timestamps
round_times = [ports[i].loc[:, "Date/Time UTC"].iloc[ii].floor("Min")
for ii in range(stamps)]
ports[i].loc[:, "rounded date"] = round_times
# run meetpass analysis and create Transit column based on results
mp = meetpass(ports[i])
two_way = twoway(ports[i], mp)
ports[i].loc[:, "Transit"] = "One-way Transit"
if not isinstance(two_way, type(None)):
two_way_indices = ports[i].index.isin(two_way.index)
ports[i].loc[two_way_indices, "Transit"] = "Two-way Transit"
# reset index to clear previous pandas manipulations
ports[i] = ports[i].reset_index()
ports[i] = _add_channel_occ(ports, i)
# save current format of data as all_res to be used for all positions
all_res = ports[i]
# remove sections of channel where ships turn
if i % 2:
all_res = all_res[(all_res.loc[:, "Latitude"] <= 32.02838) &
(all_res.loc[:, "Latitude"] >= 31.9985) |
(all_res.loc[:, "Latitude"] <= 31.99183)]
else:
all_res = all_res[all_res.loc[:, "Latitude"] >= 32.667473]
fold_res = _fold_vmr(ports, i)
# return max and mean positional data in specified order
fold_res = fold_res.loc[:, ("Date/Time UTC", "Name", "MMSI",
"Max Speed kn", "Mean Speed kn", "LOA ft",
"Beam ft", "Class", "AIS Type", "Course",
"Heading", "Course Behavior", "Yaw deg",
"Effective Beam ft", "WDIR degT",
"WSPD mph", "GST mph", "Buoy Source",
"Location", "Latitude", "Longitude",
"Transit", "% Channel Occupied")]
# return positional data in specified order
all_res = all_res.loc[:, ("Name", "MMSI", "VSPD kn", "WSPD mph",
"Transit", "% Channel Occupied", "Yaw deg",
"Effective Beam ft", "LOA ft", "Beam ft",
"Class", "AIS Type", "Course", "Heading",
"Course Behavior", "WDIR degT", "GST mph",
"Buoy Source", "Location", "Latitude",
"Longitude", "Date/Time UTC")]
# save two copies of daily vmr for each port, one for all vessel
# positions and one for maximum vessel speed positions
ports[i] = [fold_res, all_res]
return ports[0], ports[1] # ch, sv
| 49.545161 | 80 | 0.560258 | [
"MIT"
] | maritime-whale/maritime-whale | src/process_maritime_data.py | 15,359 | Python |
import maya.mel as mm
import maya.cmds as mc
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.layer
import glTools.utils.reference
import glTools.utils.shader
import glTools.utils.shape
import glTools.utils.transform
import re
# ===========
# - Cleanup -
# ===========
def toggleCons(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get List of Con Joints
conList = mc.ls('*Con*_jnt',type='joint')
for conJnt in conList:
# Toggle State
if state:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(conJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(conJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(conJnt+'.radius',se=True):
mc.setAttr(conJnt+'.radius',0.0)
mc.setAttr(conJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(conJnt+'.ro',se=True):
mc.setAttr(conJnt+'.ro',cb=False)
# Return Result
return conList
def toggleEnds(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get list of End joints
endList = mc.ls('*End_jnt',type='joint')
for endJnt in endList:
# Toggle state
if state:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(endJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(endJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(endJnt+'.radius',se=True):
mc.setAttr(endJnt+'.radius',0.0)
mc.setAttr(endJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(endJnt+'.ro',se=True):
mc.setAttr(endJnt+'.ro',cb=False)
# Return Result
return endList
def disableDrawingOverrides(grp):
'''
Disable drawing overrides for all DAG descendents of the specified transform node.
@param state: The transform under which all descendent node drawing overrides will be disabled.
@type state: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(grp):
raise Exception('Transform "'+grp+'" does not exists!')
if not glTools.utils.transform.isTransform(grp):
raise Exception('Object "'+grp+'" is not a valid transform!')
# Get Descendent Node List
nodeList = mc.ls(mc.listRelatives(grp,ad=True, pa=True) or [],dag=True) or []
if not nodeList: return []
# =============================
# - Disable Drawing Overrides -
# =============================
overrideName = 'overrideEnabled'
for node in nodeList:
# Check Override Attribute
overrideAttr = node+'.'+overrideName
if not mc.attributeQuery(overrideName,n=node,ex=True):
print('Override attribute "'+overrideAttr+'" does not exist! Skipping...')
continue
# Check Override Attribute Connections
overrideConn = mc.listConnections(overrideAttr,s=True,d=False) or []
if overrideConn:
print('Found incoming connection for override attribute "'+overrideAttr+'"! ('+overrideConn[0]+')')
print('Disconnecting attribute and disabling drawing overrides...')
mc.disconnectAttr(overrideConn[0],overrideAttr)
# Disable Drawing Overrides
try: mc.setAttr(overrideAttr,0)
except: pass
# =================
# - Return Result -
# =================
return nodeList
# ==========
# - Checks -
# ==========
def uniqueNameCheck(objList=[],transformsOnly=False):
'''
Return a list of nodes with non unique names
@param objList: List of scene objects to check. If empty, use all existing scene nodes.
@type objList: list
@param transformsOnly: Check transform names only
@type transformsOnly: bool
'''
# Get list of scene nodes
if not objList:
objList = mc.ls()
if transformsOnly:
nodeList = mc.ls(objList,transforms=True)
else:
nodeList = mc.ls(objList,dag=True)
# Determine non unique names
nonUniqueList = [i for i in nodeList if i.count('|')]
# Return result
return nonUniqueList
def validNameCheck(objList=[]):
'''
Check for valid names in the specified list of nodes
@param objList: List of objects to check valid names for. If empty use all scene transforms
@type objList: list
'''
# Check geo list
if not objList: objList = mc.ls()
if not objList: return []
# Remove Default Nodes
defNodes = ['dof1','time1','lambert1','postProcessList1','sequenceManager1','lightLinker1','renderGlobalsList1','dynController1','lightList1','particleCloud1','shaderGlow1']
objList = [obj for obj in objList if not defNodes.count(obj)]
objList = [obj for obj in objList if not obj.startswith('default')]
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectTypeFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectNameFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectScriptFilter']
# Check valid names
result = []
for obj in objList:
# Check prefix
#if not obj.startswith('cn_') and not obj.startswith('lf_') and not obj.startswith('rt_'):
# result.append(obj)
# Check "pasted"
if obj.count('pasted'): result.append(obj)
# Check "poly"
if obj.count('poly'): result.append(obj)
# Check double underscore "__"
if obj.count('__'): result.append(obj)
# Check names ending with a digit (0-9)
digitSearch = re.search('(\d+)$', obj)
if digitSearch and glTools.utils.transform.isTransform(obj):
if digitSearch.group(0):
result.append(obj)
# Remove Duplicate Entries
result = list(set(result))
# Return result
return result
def shapeNameCheck( objList = [],
typeList = ['mesh','nurbsCurve','nurbsSurface'],
skipIntermediates = True,
skipMultipleShapes = False,
strict = True ):
'''
Return a list of incorrectly named geometry shape nodes.
@param objList: List of objects to check for valid shape names. If empty, get all nodes of the specified type.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes.
@type skipIntermediates: bool
@param skipMultipleShapes: Skip objects with multiple shape nodes.
@type skipMultipleShapes: bool
@param strict: Shape name must match parent+"Shape" to pass.
@type strict: bool
'''
# ==========
# - Checks -
# ==========
if not objList: objList = mc.ls(type=typeList)
# ====================
# - Build Shape List -
# ====================
shapeList = []
for obj in objList:
# Get Shapes from Transform
if glTools.utils.transform.isTransform(obj):
# Check Multiple Shapes
objShapes = mc.listRelatives(obj,s=True,pa=True)
if not objShapes: continue
if (len(objShapes) > 1) and skipMultipleShapes: continue
# Get Shapes
tShapeList = mc.listRelatives(obj,s=True,ni=skipIntermediates,pa=True)
for shape in tShapeList:
shapeList.append(obj)
elif glTools.utils.shape.isShape(obj):
shapeList.append(obj)
else:
print('Unable to determine shape from object "'+obj+'"! Skipping...')
# =====================
# - Check Shape Names -
# =====================
invalidShapeNameList = []
for shape in shapeList:
# Check Type
if not typeList.count(mc.objectType(shape)): continue
# Check Intermediate Object
if skipIntermediates and mc.getAttr(shape+'.intermediateObject'): continue
# Get transform parent name
parent = mc.listRelatives(shape,p=True,pa=True)[0]
# Get Short Names
shapeSN = mc.ls(shape,sn=True)[0]
parentSN = mc.ls(parent,sn=True)[0]
# Check Shape Name
if strict and (shape != parent+'Shape'):
invalidShapeNameList.append(shape)
if not shapeSN.startswith(parentSN):
invalidShapeNameList.append(shape)
elif not shapeSN.count('Shape'):
invalidShapeNameList.append(shape)
# =================
# - Return Result -
# =================
return invalidShapeNameList
def intermediateShapesCheck(objList=[]):
'''
Return a list of intermediate shapes.
@param objList: List of objects to check for intermediate shapes.
@type objList: list
'''
# Check nodeList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# For each node
result = []
for obj in objList:
# Get All Shapes
shapes = mc.listRelatives(obj,s=True,pa=True)
if not shapes: shapes = []
for shape in shapes:
# Check Intermediate Shapes
if mc.objExists(shape+'.intermediateObject'):
if mc.getAttr(shape+'.intermediateObject'):
result.append(shape)
# Return Result
return result
def multipleShapeCheck(objList=[]):
'''
Return a list of transforms with multiple shape nodes
@param objList: List of objects to check for multiple shapes.
@type objList: list
'''
# Get scene transforms
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,dag=True)
# Iterate over scene transforms
result = []
for transform in objList:
# Check Transform
if not glTools.utils.transform.isTransform(transform):
transform = mc.listRelatives(transform,p=True)[0]
# Get transform shape list
shapeList = mc.listRelatives(transform,s=True)
# Check shape list
if not shapeList: continue
shapeList = mc.ls(shapeList,type=['mesh','nurbsSurface','nurbsCurve'])
# Check number of shapes
if len(shapeList) > 1: result.append(transform)
# Return result
return result
def constructionHistoryCheck(geoList=[]):
'''
Return a list of nodes that contain construction history
@param objList: List of objects to check for construction history.
@type objList: list
'''
# Get Scene Geometry
if not geoList:
geoList = mc.ls(geometry=True)
else:
geoList = mc.listRelatives(geoList,s=True,pa=True)
# For each node
result = []
for geo in geoList:
# Check Construction History
hist = mc.listHistory(geo)
# Remove Self
if hist.count(geo): hist.remove(geo)
# Ignore Node Types
ignore = mc.ls(hist,type=['groupId','shadingEngine','transform'])
hist = list(set(hist)-set(ignore))
# Check History
if hist:
obj = mc.listRelatives(geo,p=True,pa=True)
result.extend(obj)
# Remove Duplicate Names
if result: result = list(set(result))
# Return Result
return result
def userAttrCheck(objList=[],includeShapes=False):
'''
Return a list of user defined attributes for a specified list of nodes (and shapes).
@param objList: List of objects to check for user defined attributes.
@type objList: list
@param includeShapes: Also check shapes for user defined attributes.
@type includeShapes: bool
'''
# Initialize Return List
result = []
# Check objList
if not objList: objList = mc.ls()
# For each node
for obj in objList:
userAttrs = mc.listAttr(obj,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(obj+'.'+attr)
# Check Shapes
if includeShapes:
shapes = mc.listRelatives(obj,s=True)
if not shapes: shapes = []
for shape in shapes:
userAttrs = mc.listAttr(shape,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(shape+'.'+attr)
# Return Result
return result
def emptyGroupCheck(objList=[]):
'''
List empty groups.
@param objList: List of transforms to check.
@type objList: list
'''
# Check objList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# Find Empty Groups
result = []
for grp in objList:
if not mc.listRelatives(grp,ad=True):
result.append(grp)
# Return Result
return result
def emptySetCheck(setList=[]):
'''
Return a list of empty sets
@param setList: List of sets to check.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
result = []
for setName in setList:
# Check Set
if not mc.ls(setName,sets=True): continue
# Skip Default Sets
if setName.startswith('default'): continue
if setName.startswith('initial'): continue
# Check Set
if not mc.sets(setName,q=True):
result.append(setName)
# Return result
return result
def emptyLayerCheck(layerList=[]):
'''
Return a list if empty layers
@param layerList: List of layers to check. If empty, use all existing layers in current scene.
@type layerList: list
'''
# Check Layer List
if not layerList: layerList = mc.ls(type=['displayLayer','renderLayer','animLayer'])
else: layerList = mc.ls(layerList,type=['displayLayer','renderLayer','animLayer'])
# Check Empty Layers
result = []
for layer in layerList:
# Check Layer
if not mc.ls(layer,type=['displayLayer','renderLayer','animLayer']): continue
# Skip Default Layers
if layer.startswith('default'): continue
# Check Membership
if not glTools.utils.layer.memberList(layer):
result.append(layer)
# Return Result
return result
def animCurveCheck(curveTypeList=['animCurveTL','animCurveTA','animCurveTT','animCurveTU','animCurveUL','animCurveUA','animCurveUT','animCurveUU']):
'''
Return a list of all existing animCurves of a specified type.
@param curveList: List of animCurve types to consider.
@type curveList: list
@param curveTypeList: List of animCurve types to consider.
@type curveTypeList: list
'''
# Initialize Return List
animCurves = []
# List AnimCurve Nodes
for curveType in curveTypeList:
curveList = mc.ls(type=curveType)
if curveList:
animCurves.extend(curveList)
# Return Result
return animCurves
def unusedShadingNodeCheck():
'''
Return a list of unused shading nodes.
'''
return glTools.utils.shader.listUnusedShadingNodes()
def noGeometryShaderCheck(geoList=[]):
'''
Return a list of non intermediate geometry shapes with no shader assignment.
@param geoList: List of geometry to check for shader assignments.
@type geoList: list
'''
# Check Geometry List
if not geoList:
geoList = mc.ls(type=['mesh','nurbsSurface'],ni=True)
else:
geoList += mc.ls(mc.listRelatives(geoList,ad=True,pa=True) or [],type=['mesh','nurbsSurface'],ni=True) or []
geoList = mc.ls(geoList,type=['mesh','nurbsSurface'],ni=True)
# Check Shader Assignment
noShaderList = []
for geo in geoList:
SG = glTools.utils.shader.getSG(geo)
if not SG: noShaderList.append(geo)
# Return Result
return noShaderList
def unusedReferenceCheck():
'''
Return a list of unused reference nodes.
'''
# Initialize Return List
result = []
# Get list of existing references
refList = glTools.utils.reference.listReferences()
# Check Unused Reference
for ref in refList:
try: refFile = glTools.utils.reference.getReferenceFile(ref)
except: result.append(ref)
# Return Result
return result
def unknownNodeCheck():
'''
Return a list of unknown nodes.
'''
result = mc.ls(type='unknown')
if not result: result = []
return result
def checkTransforms(objList=[],tol=0.0000000001):
'''
Check for non-zero transforms
@param objList: List of transforms to check.
@type objList: list
@param tol: Value tolerance.
@type tol: float
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
transformList = []
for obj in objList:
# Skip Default Transforms
if obj == 'persp': continue
if obj == 'front': continue
if obj == 'side': continue
if obj == 'top': continue
# Translate
if abs(mc.getAttr(obj+'.tx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ty')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.tz')) > tol:
transformList.append(obj)
continue
# Rotate
if abs(mc.getAttr(obj+'.rx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ry')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.rz')) > tol:
transformList.append(obj)
continue
# Scale
if abs(mc.getAttr(obj+'.sx') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sy') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sz') - 1.0) > tol:
transformList.append(obj)
continue
# Return Result
return transformList
def displayOverridesCheck(objList=[]):
'''
Check all/specified objects for display overrides
@param objList: List of DAG nodes to check. If empty, use all DAG nodes in scene
@type objList: list
'''
# Check Object List
if not objList: objList = mc.ls(dag=True)
else: objList = mc.ls(objList,dag=True)
# Check Display Overrides
displayOverrideList = []
for obj in objList:
if mc.getAttr(obj+'.overrideEnabled'):
displayOverrideList.append(obj)
# Return Result
return displayOverrideList
# =========
# - Fixes -
# =========
def shapeNameFix(shape):
'''
Fix incorrectly named geometry shape node
@param objList: List of objects to check for valid shape names.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes
@type skipIntermediates: bool
'''
# Get Shape Transform Parent
parent = mc.listRelatives(shape,p=True)[0]
# Check Shape Name
shapeName = parent+'Shape'
if mc.objExists(shapeName):
raise Exception('Shape "'+shapeName+'" already exists! Unable to rename shape "'+shape+'"!')
# Rename Shape
newShape = mc.rename(shape,shapeName)
# Return Result
return newShape
def deleteIntermediateShapes(objList=[]):
'''
Delete all intermediate shapes in the scene
'''
# Get list of intermediate shapes
intermediateShapeList = intermediateShapesCheck(objList)
# Delete intermediate shapes
if intermediateShapeList: mc.delete(intermediateShapeList)
# Return result
return intermediateShapeList
def deleteConstructionHistory(geoList=[]):
'''
Delete construction history for specified geometry
@param geoList: List of objects to delete for construction history from.
@type geoList: list
'''
# Get Scene Geometry
if not geoList: geoList = mc.ls(geometry=True)
# Delete History
for geo in geoList: mc.delete(geo,ch=True)
# Return Result
return geoList
def deleteUserAttrs(nodeList=[],includeShapes=False):
'''
Delete user defined attributes from the specified list of nodes
@param nodeList: List of nodes to delete user defined attrs from. If empty, assume all nodes.
@type nodeList: list
@param includeShapes: Delete user attributes
@type includeShapes: bool
'''
# Check nodeList
if not nodeList: nodeList = mc.ls()
# For each node
for node in nodeList:
# Delete user attributes
glTools.utils.attribute.deleteUserAttrs(node)
# Include Shapes
if includeShapes:
# Delete shape user attributes
shapes = mc.listRelatives(node,s=True)
for shape in shapes:
glTools.utils.attribute.deleteUserAttrs(shape)
def deleteEmptyGroups(objList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptyGrpList = emptyGroupCheck(objList=objList)
# Delete Empty Groups
if emptyGrpList: mc.delete(emptyGrpList)
# Return Result
return emptyGrpList
def deleteEmptySets(setList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptySetList = emptySetCheck(setList=setList)
# Delete Empty Groups
if emptySetList: mc.delete(emptySetList)
# Return Result
return emptySetList
def deleteEmptyLayers(layerList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptyLayerList = emptyLayerCheck(layerList=layerList)
# Delete Empty Groups
if emptyLayerList: mc.delete(emptyLayerList)
# Return Result
return emptyLayerList
def deleteUnknownNodes():
'''
Delete all node of type "unknown" in the scene
'''
# Get list of unknown nodes
unknownNodes = unknownNodeCheck() or []
# Delete unknown nodes
for node in unknownNodes:
try:
mc.lockNode(node,l=False)
mc.delete(node)
except:
print('Problem deleting unknown node "'+node+'"!')
# Return Result
return unknownNodes
def deleteNodesByType(nodeTypeList=[]):
'''
Delete nodes of the specified type(s).
@param nodeTypeList: List of node types to delete.
@type nodeTypeList: list
'''
# Check Node Types
if not nodeTypeList: return []
# Get Node List (by type)
nodeList = mc.ls(type=nodeTypeList)
# Delete Nodes
if nodeList: mc.delete(nodeList)
else: nodeList = []
# Return Result
return nodeList
def deleteUnusedReferenceNodes():
'''
Delete all unused reference nodes in the scene
'''
mm.eval('RNdeleteUnused')
def deleteEmptySets(setList=[]):
'''
Delete empty object sets
@param setList: A list of sets to check. If empty, chack all sets in current scene.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
emptySetList = []
for set in setList:
if not mc.sets(set,q=True):
emptySetList.append(set)
# Delete empty sets
for emptySet in emptySetList:
try: mc.delete(emptySet)
except: pass
# Return result
return emptySetList
def deleteAllSets(excludeList=[]):
'''
Delete unused object sets
@param excludeList: A list of sets to exclude from the list of unused sets.
@type excludeList: list
'''
# Get set list
setList = mc.ls(sets=True)
if excludeList:
excludeSetList = mc.ls(excludeList,sets=True)
setList = list(set(setList)-set(excludeSetList))
# Delete unused sets
for deleteSet in setList:
try: mc.delete(deleteSet)
except: pass
# Return result
return setList
def deleteUnusedShadingNodes():
'''
Delete all unused shading nodes in the scene
'''
#texList = mc.ls(tex=True)
#if texList: mc.delete(texList)
mm.eval('MLdeleteUnused')
def deleteDisplayLayers():
'''
Delete all display layers
'''
# Get display layer list
displayLayers = mc.ls(type='displayLayer')
displayLayers.remove('defaultLayer')
# Delete display layers
if displayLayers: mc.delete(displayLayers)
# Return result
return displayLayers
def deleteRenderLayers():
'''
Delete all render layers
'''
# Get render layer list
renderLayers = mc.ls(type='renderLayer')
renderLayers.remove('defaultRenderLayer')
# Delete render layers
if renderLayers: mc.delete(renderLayers)
# Return result
return renderLayers
def assignInitialShadingGroup(geoList=[]):
'''
Assign initialShadingGroup (lambert1) to specified geometry.
@param geoList: List of geometry to apply default shader to. If empty, use all scene geometry
@type geoList: list
'''
# Check geoList
if not geoList: geoList = mc.ls(geometry=True)
if not geoList: return []
# Assign Initial Shading Group
mc.sets(geoList,fe='initialShadingGroup')
# Return result
return geoList
def zeroTransforms(objList=[]):
'''
Reset transform values
@param objList: List of transforms to zero out.
@type objList: list
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
for obj in objList:
# Translate
if mc.getAttr(obj+'.tx',se=True): mc.setAttr(obj+'.tx',0)
if mc.getAttr(obj+'.ty',se=True): mc.setAttr(obj+'.ty',0)
if mc.getAttr(obj+'.tz',se=True): mc.setAttr(obj+'.tz',0)
# Rotate
if mc.getAttr(obj+'.rx',se=True): mc.setAttr(obj+'.rx',0)
if mc.getAttr(obj+'.ry',se=True): mc.setAttr(obj+'.ry',0)
if mc.getAttr(obj+'.rz',se=True): mc.setAttr(obj+'.rz',0)
# Scale
if mc.getAttr(obj+'.sx',se=True): mc.setAttr(obj+'.sx',0)
if mc.getAttr(obj+'.sy',se=True): mc.setAttr(obj+'.sy',0)
if mc.getAttr(obj+'.sz',se=True): mc.setAttr(obj+'.sz',0)
# Return Result
return objList
def copyInputShapeAttrs(geoList=[]):
'''
Copy user defined attributes from an input shape to the output deforming shape.
@param geoList: List of geometry to copy atributes for.
@type geoList: list
'''
# Check Geometry List
if not geoList: geoList = mc.listRelatives(mc.ls(geometry=True) or [],p=True,pa=True) or []
if not geoList: return []
# Copy Input Shape Attrs
for geo in geoList:
# Get Output Shape
geoShape = mc.listRelatives(geo,s=True,ni=True) or []
if not geoShape:
print('No shape found for geometry transform "'+geo+'"!')
continue
# Get Input Shape
geoInputShape = geoShape[0]
try: geoInputShape = glTools.utils.shape.findInputShape(geoShape[0])
except: pass
# Copy User Attributes
if geoInputShape != geoShape[0]:
userAttr = mc.listAttr(geoInputShape,ud=True,s=True) or []
for at in userAttr: glTools.utils.attribute.copyAttr(geoInputShape,geoShape[0],at)
# ========
# - MISC -
# ========
def removeTurtle():
'''
Delete nodes and unload plgin related to the Turtle Renderer.
'''
# Remove Turtle Nodes
turtleNode = 'TurtleDefaultBakeLayer'
if mc.objExists(turtleNode):
print('Removing Turtle nodes...')
mc.lockNode(turtleNode,l=False)
mc.delete(turtleNode)
# Unload Plugin
if mc.pluginInfo('Turtle',q=True,loaded=True):
print('Unloading Turtle plugin...')
try: mc.unloadPlugin('Turtle',f=True)
except: print('Error unloading Turtle plugin!')
| 25.851967 | 174 | 0.699916 | [
"MIT"
] | Lynn5160/glTools | utils/cleanup.py | 24,973 | Python |
import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentation_models_pytorch.base import modules as md
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = md.Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = md.Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = md.Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
if skip.shape[-1] != x.shape[-1]:
skip = F.interpolate(skip, scale_factor=2, mode="nearest")
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
# remove first skip with same spatial resolution
encoder_channels = encoder_channels[1:]
# reverse channels to start from head of encoder
encoder_channels = encoder_channels[::-1]
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(head_channels, head_channels, use_batchnorm=use_batchnorm)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
| 30.790323 | 96 | 0.587742 | [
"MIT"
] | navivokaj/segmentation_models.pytorch | segmentation_models_pytorch/decoders/unet/decoder.py | 3,818 | Python |
import random
import string
import os.path
import jsonpickle
import getopt
import sys
from model.contact import Contact
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["namber of group", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 5
f = "/data/contacts.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters+string.digits+" "*10
return prefix+"".join([random.choice(symbols) for i in range(random.randrange(maxlen))])
testdata = [Contact(firstname="", middlename="", lastname="", nickname="", title="", company="",
address="", homephone="", mobilephone="", workphone="", fax="", address2="",
seconderyphone="", notes="")]+ [Contact(firstname=random_string('firstname', 10),
middlename=random_string('middlename', 10),
lastname=random_string('lastname', 10),
nickname=random_string('nickname', 10),
title=random_string('title', 10),
company=random_string('company', 10),
address=random_string('address', 10),
homephone=random_string('home', 10),
mobilephone=random_string('mobile', 10),
workphone=random_string('work', 10),
fax=random_string('fax', 10),
address2=random_string('address2', 10),
seconderyphone=random_string('phone2', 10),
notes=random_string('notes', 10)) for i in range(5)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), ".."+f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| 44.588235 | 103 | 0.474934 | [
"Apache-2.0"
] | vdenPython/python_training | generator/contact.py | 2,274 | Python |
from ampel.t3.supply.load.T3SimpleDataLoader import T3SimpleDataLoader
from ampel.core.AmpelContext import AmpelContext
def test_instantiate(core_config, patch_mongo, ampel_logger):
"""
AbsT3Loader understands all the aliases in the ampel-core config
"""
ctx = AmpelContext.load(core_config)
aliases = ctx.config.get("alias.t3", dict)
assert len(
directives := T3SimpleDataLoader(
context=ctx,
logger=ampel_logger,
directives=[k[1:] for k in aliases.keys()]
).directives
) == len(aliases)
for d, value in zip(directives, aliases.values()):
assert d.dict(exclude_defaults=True) == value
| 34 | 70 | 0.677941 | [
"BSD-3-Clause"
] | mafn/Ampel-core | ampel/test/test_T3SimpleDataLoader.py | 680 | Python |
<<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in data_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("DynamicPartition")
def _DynamicPartitionGrads(op, *grads):
"""Gradients for DynamicPartition."""
data = op.inputs[0]
indices = op.inputs[1]
num_partitions = op.get_attr("num_partitions")
prefix_shape = array_ops.shape(indices)
original_indices = array_ops.reshape(
math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
partitioned_indices = data_flow_ops.dynamic_partition(
original_indices, indices, num_partitions)
reconstructed = data_flow_ops.parallel_dynamic_stitch(partitioned_indices,
grads)
reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
return [reconstructed, None]
@ops.RegisterGradient("DynamicStitch")
@ops.RegisterGradient("ParallelDynamicStitch")
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch and ParallelDynamicStitch."""
num_values = len(op.inputs) // 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == dtypes.int32 else
math_ops.cast(x, dtypes.int32))
inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
=======
"""Gradients for operators defined in data_flow_ops.py."""
from tensorflow.python.framework import ops
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("DynamicStitch")
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch."""
num_values = len(op.inputs) / 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == types.int32 else
math_ops.cast(x, types.int32))
inputs = [AsInt32(op.inputs[i]) for i in range(num_values)]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
if isinstance(grad, ops.IndexedSlices):
output_shape = array_ops.shape(op.outputs[0])
output_rows = output_shape[0]
grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
values_grad = [array_ops.gather(grad, inp) for inp in inputs]
return indices_grad + values_grad
<<<<<<< HEAD
ops.NotDifferentiable("Queue")
ops.NotDifferentiable("QueueEnqueue")
ops.NotDifferentiable("QueueEnqueueMany")
ops.NotDifferentiable("QueueDequeue")
ops.NotDifferentiable("QueueDequeueMany")
ops.NotDifferentiable("QueueDequeueUpTo")
ops.NotDifferentiable("QueueClose")
ops.NotDifferentiable("QueueSize")
ops.NotDifferentiable("Stack")
ops.NotDifferentiable("StackPush")
ops.NotDifferentiable("StackPop")
ops.NotDifferentiable("StackClose")
ops.NotDifferentiable("GetSessionHandle")
ops.NotDifferentiable("GetSessionHandleV2")
ops.NotDifferentiable("GetSessionTensor")
ops.NotDifferentiable("DeleteSessionTensor")
=======
ops.NoGradient("Queue")
ops.NoGradient("QueueEnqueue")
ops.NoGradient("QueueEnqueueMany")
ops.NoGradient("QueueDequeue")
ops.NoGradient("QueueDequeueMany")
ops.NoGradient("QueueClose")
ops.NoGradient("QueueSize")
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
| 36.289256 | 80 | 0.751765 | [
"Apache-2.0"
] | devsangwoo/tensor | tensorflow/python/ops/data_flow_grad.py | 4,391 | Python |
# -*- coding: utf-8 -*-
from werkzeug.exceptions import abort as _abort, HTTPException
def abort(http_status_code, **kwargs):
try:
_abort(http_status_code)
except HTTPException as e:
if len(kwargs):
e.data = kwargs
raise
| 22.25 | 62 | 0.629213 | [
"MIT"
] | soasme/axe | axe/utils.py | 267 | Python |
import re
import numpy
import math
import sys
#implementing the stop words and
def extractCleanWords(review):
stopWords = ["in", "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you",
"your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she",
"her", "hers", "herself", "it", "its", "itself", "they", "them", "their",
"theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has",
"had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but",
"if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with",
"about", "against", "between", "into", "through", "during", "before", "after",
"above", "below", "to", "from", "up", "down", "out", "on", "off", "over",
"under", "again", "further", "then", "once", "here", "there", "when", "where", "why",
"how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such",
"no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t",
"can", "will", "just", "don", "should", "now"]
words = re.sub("[^\w]", " ", review).split()
cleanWords = [i.lower() for i in words if i not in stopWords]
return cleanWords
#used by bag of words to create the vocab dictionary
def createVocabTokens(reviews):
vocab = []
for review in reviews:
token = extractCleanWords(review)
vocab.extend(token)
vocab = sorted(list(set(vocab)))
return vocab
"""the bag of words for multinomialNB does not need to create
matrixes for each review because it takes too much space and slows
it down and it is not neccesary. The bag of words returns a
dictionary with the frequencies for each word used for the numerator of
P(xi|ci), the total words in the classifier used for the denom of
P(xi|ci), and the number of reviews for the class used to calculate
the prior probabilities for each class"""
def bagOfWords_MultinomialNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
#print("Word bank for reviews: \n{0} \n".format(vocabTokens));
#print(len(vocabTokens))
#bagOfWords(reviewFile)
numReviews = len(reviewList)
#print(len(reviewList))
#print(len(vocabTokens))
vocabDict = dict.fromkeys(vocabTokens, 0)
#matrix = numpy.zeros(shape = (len(reviewList),len(vocabTokens)))
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1
total_words +=1
#if word in vocabTokens:
#bagList[vocabTokens.index(word)] +=1
#print(i, " out of ", len(vocabTokens), " done")
#matrix[i] = bagList
#print("{0}\n{1}\n".format(review,numpy.array(bagList)))
return vocabDict, total_words, numReviews
def bagOfWords_GaussianNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDict = dict.fromkeys(vocabTokens, 0)
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1
sparseMatrix = []
for i in range(len(reviewList)):
#print("Gauss: ", i)
words = extractCleanWords(reviewList[i])
bagList = {}
for word in words:
if word in bagList:
bagList[word] +=1
else:
bagList[word] = 1
sparseMatrix.append(bagList)
return sparseMatrix, vocabDict, numReviews
#calculates the mean and varience using bag of words
def calcMean_Var(txtFile, tfidforBOW):
if tfidforBOW == 1:#using bag of words
sparseMatrix, vocabDict, numReviews = bagOfWords_GaussianNB(txtFile)
else:
sparseMatrix, vocabDict, numReviews = tf_idf(txtFile)
meanVarDict = {}
meanVarTouple = [0,0]
for word in vocabDict:
meanVarTouple[0] = (vocabDict[word] / numReviews)
#print(meanVarTouple[0])
var = 0
for m in sparseMatrix:
if word in m:
var += ((m[word]-meanVarTouple[0])**2)
else:
var += ((-1*meanVarTouple[0])**2)
meanVarTouple[1] = (var / (numReviews -1))
meanVarDict[word] = meanVarTouple
#print("Gauss: ", meanVarTouple)
return meanVarDict
def gaussian_BOW(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,1)
meanVarDictNEG = calcMean_Var(trainDataNeg,1)
testWordFreq = {}
for review in reviewList:
wordsInReview = extractCleanWords(review)
for word in wordsInReview:
if (word in meanVarDictPOS) or (word in meanVarDictNEG):
if word in testWordFreq:
testWordFreq[word] += 1
else:
testWordFreq[word] = 1
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testWordFreq[word] - meanVarDictPOS[word][0])**2)/((meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testWordFreq[word] - meanVarDictNEG[word][0])**2)/((meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def tf_idf(txtFile):
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDictIDF = dict.fromkeys(vocabTokens, 0)
"""for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1"""
totalNumWords = 0
sparseMatrixTFIDF = []
for i in range(len(reviewList)):
#print("TFidf: ", i)
words = extractCleanWords(reviewList[i])
bagListTF = {}
for word in words:
totalNumWords +=1
if word in bagListTF:
bagListTF[word] +=1
else:
bagListTF[word] = 1
for word in list(set(words)):
bagListTF[word] = (bagListTF[word]/totalNumWords)
vocabDictIDF[word]+=1
sparseMatrixTFIDF.append(bagListTF)
#print(i)
#using the tf vlues in the sparse matrix and idf values in
#the vocab dict we can get the tf idf and hold it in sparse matrix
vocabDict = dict.fromkeys(vocabTokens, 0)
for dictTF in sparseMatrixTFIDF:
for word in dictTF:
dictTF[word] = (dictTF[word] * (math.log((len(reviewList)/vocabDictIDF[word]))))
vocabDict[word]+= dictTF[word]
#print(sparseMatrixTFIDF)
return sparseMatrixTFIDF, vocabDict, numReviews
def gaussian_tf_idf(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,0)
meanVarDictNEG = calcMean_Var(trainDataNeg,0)
testSparseTFIDF, testVocabDict, testNumReviews = tf_idf(testData)
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testVocabDict[word] - meanVarDictPOS[word][0])**2)/(2*(meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testVocabDict[word] - meanVarDictNEG[word][0])**2)/(2*(meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def multinomialNB(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
#getting the dict, word count and review count for pos and neg from BOW
posDict, posWordCount, posdocs = bagOfWords_MultinomialNB(trainDataPos)
negDict, negWordCount, negdocs = bagOfWords_MultinomialNB(trainDataNeg)
"""TEST PRINT STATEMENTS
print("Pos dic: ", len(posDict))
print("Neg dic: ", len(negDict))
print("Pos word count: ", posWordCount)
print("Neg word count: ", negWordCount)
print("Pos docs: ", posdocs)
print("Neg docs: ", negdocs)"""
#alpha is the smoothing paramater, through trial i found that a value
#of 18 will have the highest prediction frequency
alpha = 18
#calculating the prior log prob for pos and neg
priorLogPosProb =math.log( posdocs / (negdocs + posdocs))
priorLogNegProb =math.log( negdocs / (negdocs + posdocs))
"""for each review in our test, we extract the words and calculate
the log prob for that word given pos and neg and add this with
the prior log probability, then we compare the pos and neg total
probabilities and assign a 1 if the pos > neg, and 0 for the opposite
We check the prediction list and calculate the accurace for the
given classifier"""
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
logProbPos = 0
logProbNeg = 0
posPercent = 0
negPercent = 0
for word in wordsInReview:
if word not in posDict:
logProbPos += math.log( ((alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word in posDict:
logProbPos += math.log( ((posDict[word] + alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word not in negDict:
logProbNeg += math.log( ((alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
if word in negDict:
logProbNeg += math.log( ((negDict[word] + alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
posPercent = priorLogPosProb + logProbPos
negPercent = priorLogNegProb + logProbNeg
if posPercent > negPercent:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
#setting the arguments
train_pos = sys.argv[1]
train_neg = sys.argv[2]
test_pos = sys.argv[3]
test_neg = sys.argv[4]
#getting the accuracy for multinomial for pos test and neg test
posAcc = multinomialNB(train_pos, train_neg, test_pos,1)
negAcc = multinomialNB(train_pos, train_neg, test_neg,0)
#calculating the average accuracy and printing it out
multinomialAcc = (posAcc+negAcc) / 2
print("MultinomialNB with bag of words accuracy: ", multinomialAcc)
gposAcc = gaussian_BOW(train_pos, train_neg, test_pos,1)
gnegAcc = gaussian_BOW(train_pos, train_neg, test_neg,0)
gaussAcc = (gposAcc+gnegAcc) / 2
print("Gaussian with bag of words accuracy: ", gaussAcc)
#calcMean_Var(train_pos,1)
#tf_idf(train_pos)
tposAcc = gaussian_tf_idf(train_pos, train_neg, test_pos,1)
tnegAcc = gaussian_tf_idf(train_pos, train_neg, test_neg,0)
tgaussAcc = (tposAcc+tnegAcc) / 2
print("Gaussian with tf_idf acc: ", tgaussAcc)
| 39.654867 | 179 | 0.612512 | [
"MIT"
] | atahiraj/MLMovieReviewClassifier | NaiveBayesClassifier.py | 13,443 | Python |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: rack.py
@time: 2018-04-06 18:22
"""
from __future__ import unicode_literals
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
)
from flask_babel import gettext as _
from flask_login import login_required
from app_backend import app
from app_backend import excel
from app_backend.api.inventory import count_inventory
from app_backend.api.rack import (
get_rack_pagination,
get_rack_row_by_id,
add_rack,
edit_rack,
get_rack_choices,
# rack_current_stats,
# rack_former_stats,
)
from app_backend.api.rack import (
get_rack_rows,
# get_distinct_brand,
)
from app_backend.api.warehouse import (
get_warehouse_choices,
)
from app_backend.forms.rack import (
RackSearchForm,
RackAddForm,
RackEditForm,
)
from app_backend.models.model_bearing import Rack
from app_backend.permissions.rack import (
permission_rack_section_add,
permission_rack_section_search,
permission_rack_section_export,
permission_rack_section_get,
permission_rack_section_edit,
permission_rack_section_del,
)
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
# 定义蓝图
bp_rack = Blueprint('rack', __name__, url_prefix='/rack')
# 加载配置
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
"""
货架列表
:return:
"""
template_name = 'rack/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
# 搜索条件
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
# app.logger.info('')
search_condition = [
Rack.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Rack.warehouse_id == form.warehouse_id.data)
if form.name.data:
search_condition.append(Rack.name == form.name.data)
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_rack_section_export.can():
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('rack lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_rack_section_del.can():
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
# 检查是否正在使用
# 库存
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
"""
货架详情
:param rack_id:
:return:
"""
# 详情数据
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
abort(404)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
# 渲染模板
return render_template('rack/info.html', rack_info=rack_info, **document_info)
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
"""
创建货架
:return:
"""
template_name = 'rack/add.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
# 加载创建表单
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
# 进入创建页面
if request.method == 'GET':
# 渲染页面
return render_template(
template_name,
form=form,
**document_info
)
# 处理创建请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验成功
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'create_time': current_time,
'update_time': current_time,
}
result = add_rack(rack_data)
# 创建操作成功
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
# 创建操作失败
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
"""
货架编辑
"""
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
abort(404)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
template_name = 'rack/edit.html'
# 加载编辑表单
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
# 进入编辑页面
if request.method == 'GET':
# 表单赋值
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
# form.create_time.data = rack_info.create_time
# form.update_time.data = rack_info.update_time
# 渲染页面
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
# 处理编辑请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
# 表单校验成功
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
# 编辑操作成功
if result:
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
# 编辑操作失败
else:
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
"""
货架删除
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查删除权限
if not permission_rack_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
rack_id = request.args.get('rack_id', 0, type=int)
if not rack_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查是否正在使用
# 库存
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
"""
货架选项
:return:
"""
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)
# @bp_rack.route('/ajax/stats', methods=['GET', 'POST'])
# @login_required
# def ajax_stats():
# """
# 获取货架统计
# :return:
# """
# time_based = request.args.get('time_based', 'hour')
# result_rack_current = rack_current_stats(time_based)
# result_rack_former = rack_former_stats(time_based)
#
# line_chart_data = {
# 'labels': [label for label, _ in result_rack_current],
# 'datasets': [
# {
# 'label': '在职',
# 'backgroundColor': 'rgba(220,220,220,0.5)',
# 'borderColor': 'rgba(220,220,220,1)',
# 'pointBackgroundColor': 'rgba(220,220,220,1)',
# 'pointBorderColor': '#fff',
# 'pointBorderWidth': 2,
# 'data': [data for _, data in result_rack_current]
# },
# {
# 'label': '离职',
# 'backgroundColor': 'rgba(151,187,205,0.5)',
# 'borderColor': 'rgba(151,187,205,1)',
# 'pointBackgroundColor': 'rgba(151,187,205,1)',
# 'pointBorderColor': '#fff',
# 'pointBorderWidth': 2,
# 'data': [data for _, data in result_rack_former]
# }
# ]
# }
# return json.dumps(line_chart_data, default=json_default)
#
#
# @bp_rack.route('/stats.html')
# @login_required
# @permission_rack_section_stats.require(http_exception=403)
# def stats():
# """
# 货架统计
# :return:
# """
# # 统计数据
# time_based = request.args.get('time_based', 'hour')
# if time_based not in ['hour', 'date', 'month']:
# abort(404)
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('rack stats')
# # 渲染模板
# return render_template(
# 'rack/stats.html',
# time_based=time_based,
# **document_info
# )
#
#
# @bp_rack.route('/<int:rack_id>/stats.html')
# @login_required
# @permission_rack_section_stats.require(http_exception=403)
# def stats_item(rack_id):
# """
# 货架统计明细
# :param rack_id:
# :return:
# """
# rack_info = get_rack_row_by_id(rack_id)
# # 检查资源是否存在
# if not rack_info:
# abort(404)
# # 检查资源是否删除
# if rack_info.status_delete == STATUS_DEL_OK:
# abort(410)
#
# # 统计数据
# rack_stats_item_info = get_rack_row_by_id(rack_id)
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('rack stats item')
# # 渲染模板
# return render_template(
# 'rack/stats_item.html',
# rack_stats_item_info=rack_stats_item_info,
# **document_info
# )
| 29.418557 | 91 | 0.593146 | [
"MIT"
] | zhanghe06/bearing_project | app_backend/views/rack.py | 14,948 | Python |
import argparse
import logging
import os
import sys
from typing import Any
from typing import Optional
from typing import Sequence
from typing import Union
import pre_commit.constants as C
from pre_commit import color
from pre_commit import git
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.clean import clean
from pre_commit.commands.gc import gc
from pre_commit.commands.hook_impl import hook_impl
from pre_commit.commands.init_templatedir import init_templatedir
from pre_commit.commands.install_uninstall import install
from pre_commit.commands.install_uninstall import install_hooks
from pre_commit.commands.install_uninstall import uninstall
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.commands.run import run
from pre_commit.commands.sample_config import sample_config
from pre_commit.commands.try_repo import try_repo
from pre_commit.error_handler import error_handler
from pre_commit.error_handler import FatalError
from pre_commit.logging_handler import logging_handler
from pre_commit.store import Store
from pre_commit.util import CalledProcessError
logger = logging.getLogger('pre_commit')
# https://github.com/pre-commit/pre-commit/issues/217
# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
# to install packages to the wrong place. We don't want anything to deal with
# pyvenv
os.environ.pop('__PYVENV_LAUNCHER__', None)
COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}
def _add_color_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),
type=color.use_color,
metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
help='Whether to use color in output. Defaults to `%(default)s`.',
)
def _add_config_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-c', '--config', default=C.CONFIG_FILE,
help='Path to alternate config file',
)
class AppendReplaceDefault(argparse.Action):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.appended = False
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[str], None],
option_string: Optional[str] = None,
) -> None:
if not self.appended:
setattr(namespace, self.dest, [])
self.appended = True
getattr(namespace, self.dest).append(values)
def _add_hook_type_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-t', '--hook-type', choices=(
'pre-commit', 'pre-merge-commit', 'pre-push',
'prepare-commit-msg', 'commit-msg', 'post-checkout',
),
action=AppendReplaceDefault,
default=['pre-commit'],
dest='hook_types',
)
def _add_run_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument('hook', nargs='?', help='A single hook-id to run')
parser.add_argument('--verbose', '-v', action='store_true', default=False)
mutex_group = parser.add_mutually_exclusive_group(required=False)
mutex_group.add_argument(
'--all-files', '-a', action='store_true', default=False,
help='Run on all the files in the repo.',
)
mutex_group.add_argument(
'--files', nargs='*', default=[],
help='Specific filenames to run hooks on.',
)
parser.add_argument(
'--show-diff-on-failure', action='store_true',
help='When hooks fail, run `git diff` directly afterward.',
)
parser.add_argument(
'--hook-stage', choices=C.STAGES, default='commit',
help='The stage during which the hook is fired. One of %(choices)s',
)
parser.add_argument(
'--from-ref', '--source', '-s',
help=(
'(for usage with `--from-ref`) -- this option represents the '
'original ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch you are pushing '
'to. '
'For `post-checkout` hooks, this represents the branch that was '
'previously checked out.'
),
)
parser.add_argument(
'--to-ref', '--origin', '-o',
help=(
'(for usage with `--to-ref`) -- this option represents the '
'destination ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch being pushed. '
'For `post-checkout` hooks, this represents the branch that is '
'now checked out.'
),
)
parser.add_argument(
'--commit-msg-filename',
help='Filename to check when running during `commit-msg`',
)
parser.add_argument(
'--remote-name', help='Remote name used by `git push`.',
)
parser.add_argument('--remote-url', help='Remote url used by `git push`.')
parser.add_argument(
'--checkout-type',
help=(
'Indicates whether the checkout was a branch checkout '
'(changing branches, flag=1) or a file checkout (retrieving a '
'file from the index, flag=0).'
),
)
def _adjust_args_and_chdir(args: argparse.Namespace) -> None:
# `--config` was specified relative to the non-root working directory
if os.path.exists(args.config):
args.config = os.path.abspath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.abspath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.abspath(args.repo)
try:
toplevel = git.get_root()
except CalledProcessError:
raise FatalError(
'git failed. Is it installed, and are you in a Git repository '
'directory?',
)
else:
if toplevel == '': # pragma: no cover (old git)
raise FatalError(
'git toplevel unexpectedly empty! make sure you are not '
'inside the `.git` directory of your repository.',
)
else:
os.chdir(toplevel)
args.config = os.path.relpath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.relpath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.relpath(args.repo)
def main(argv: Optional[Sequence[str]] = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='pre-commit')
# https://stackoverflow.com/a/8521644/812183
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {C.VERSION}',
)
subparsers = parser.add_subparsers(dest='command')
autoupdate_parser = subparsers.add_parser(
'autoupdate',
help="Auto-update pre-commit config to the latest repos' versions.",
)
_add_color_option(autoupdate_parser)
_add_config_option(autoupdate_parser)
autoupdate_parser.add_argument(
'--bleeding-edge', action='store_true',
help=(
'Update to the bleeding edge of `master` instead of the latest '
'tagged version (the default behavior).'
),
)
autoupdate_parser.add_argument(
'--freeze', action='store_true',
help='Store "frozen" hashes in `rev` instead of tag names',
)
autoupdate_parser.add_argument(
'--repo', dest='repos', action='append', metavar='REPO',
help='Only update this repository -- may be specified multiple times.',
)
clean_parser = subparsers.add_parser(
'clean', help='Clean out pre-commit files.',
)
_add_color_option(clean_parser)
_add_config_option(clean_parser)
hook_impl_parser = subparsers.add_parser('hook-impl')
_add_color_option(hook_impl_parser)
_add_config_option(hook_impl_parser)
hook_impl_parser.add_argument('--hook-type')
hook_impl_parser.add_argument('--hook-dir')
hook_impl_parser.add_argument(
'--skip-on-missing-config', action='store_true',
)
hook_impl_parser.add_argument(dest='rest', nargs=argparse.REMAINDER)
gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')
_add_color_option(gc_parser)
_add_config_option(gc_parser)
init_templatedir_parser = subparsers.add_parser(
'init-templatedir',
help=(
'Install hook script in a directory intended for use with '
'`git config init.templateDir`.'
),
)
_add_color_option(init_templatedir_parser)
_add_config_option(init_templatedir_parser)
init_templatedir_parser.add_argument(
'directory', help='The directory in which to write the hook script.',
)
_add_hook_type_option(init_templatedir_parser)
install_parser = subparsers.add_parser(
'install', help='Install the pre-commit script.',
)
_add_color_option(install_parser)
_add_config_option(install_parser)
install_parser.add_argument(
'-f', '--overwrite', action='store_true',
help='Overwrite existing hooks / remove migration mode.',
)
install_parser.add_argument(
'--install-hooks', action='store_true',
help=(
'Whether to install hook environments for all environments '
'in the config file.'
),
)
_add_hook_type_option(install_parser)
install_parser.add_argument(
'--allow-missing-config', action='store_true', default=False,
help=(
'Whether to allow a missing `pre-commit` configuration file '
'or exit with a failure code.'
),
)
install_hooks_parser = subparsers.add_parser(
'install-hooks',
help=(
'Install hook environments for all environments in the config '
'file. You may find `pre-commit install --install-hooks` more '
'useful.'
),
)
_add_color_option(install_hooks_parser)
_add_config_option(install_hooks_parser)
migrate_config_parser = subparsers.add_parser(
'migrate-config',
help='Migrate list configuration to new map configuration.',
)
_add_color_option(migrate_config_parser)
_add_config_option(migrate_config_parser)
run_parser = subparsers.add_parser('run', help='Run hooks.')
_add_color_option(run_parser)
_add_config_option(run_parser)
_add_run_options(run_parser)
sample_config_parser = subparsers.add_parser(
'sample-config', help=f'Produce a sample {C.CONFIG_FILE} file',
)
_add_color_option(sample_config_parser)
_add_config_option(sample_config_parser)
try_repo_parser = subparsers.add_parser(
'try-repo',
help='Try the hooks in a repository, useful for developing new hooks.',
)
_add_color_option(try_repo_parser)
_add_config_option(try_repo_parser)
try_repo_parser.add_argument(
'repo', help='Repository to source hooks from.',
)
try_repo_parser.add_argument(
'--ref', '--rev',
help=(
'Manually select a rev to run against, otherwise the `HEAD` '
'revision will be used.'
),
)
_add_run_options(try_repo_parser)
uninstall_parser = subparsers.add_parser(
'uninstall', help='Uninstall the pre-commit script.',
)
_add_color_option(uninstall_parser)
_add_config_option(uninstall_parser)
_add_hook_type_option(uninstall_parser)
help = subparsers.add_parser(
'help', help='Show help for a specific command.',
)
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
# argparse doesn't really provide a way to use a `default` subparser
if len(argv) == 0:
argv = ['run']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
with error_handler(), logging_handler(args.color):
if args.command not in COMMANDS_NO_GIT:
_adjust_args_and_chdir(args)
git.check_for_cygwin_mismatch()
store = Store()
store.mark_config_used(args.config)
if args.command == 'autoupdate':
return autoupdate(
args.config, store,
tags_only=not args.bleeding_edge,
freeze=args.freeze,
repos=args.repos,
)
elif args.command == 'clean':
return clean(store)
elif args.command == 'gc':
return gc(store)
elif args.command == 'hook-impl':
return hook_impl(
store,
config=args.config,
color=args.color,
hook_type=args.hook_type,
hook_dir=args.hook_dir,
skip_on_missing_config=args.skip_on_missing_config,
args=args.rest[1:],
)
elif args.command == 'install':
return install(
args.config, store,
hook_types=args.hook_types,
overwrite=args.overwrite,
hooks=args.install_hooks,
skip_on_missing_config=args.allow_missing_config,
)
elif args.command == 'init-templatedir':
return init_templatedir(
args.config, store, args.directory,
hook_types=args.hook_types,
)
elif args.command == 'install-hooks':
return install_hooks(args.config, store)
elif args.command == 'migrate-config':
return migrate_config(args.config)
elif args.command == 'run':
return run(args.config, store, args)
elif args.command == 'sample-config':
return sample_config()
elif args.command == 'try-repo':
return try_repo(args)
elif args.command == 'uninstall':
return uninstall(hook_types=args.hook_types)
else:
raise NotImplementedError(
f'Command {args.command} not implemented.',
)
raise AssertionError(
f'Command {args.command} failed to exit with a returncode',
)
if __name__ == '__main__':
exit(main())
| 35.384428 | 79 | 0.638864 | [
"MIT"
] | ModischFabrications/pre-commit | pre_commit/main.py | 14,543 | Python |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.:wq
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import logging
import os
import zipfile
import time
import mxnet as mx
import horovod.mxnet as hvd
from mxnet import autograd, gluon, nd
from mxnet.test_utils import download
def main():
# Function to get mnist iterator given a rank
def get_mnist_iterator(rank):
data_dir = "data-%d" % rank
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
zip_file_path = download('http://data.mxnet.io/mxnet/data/mnist.zip',
dirname=data_dir)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(data_dir)
input_shape = (1, 28, 28)
batch_size = args.batch_size
train_iter = mx.io.MNISTIter(
image="%s/train-images-idx3-ubyte" % data_dir,
label="%s/train-labels-idx1-ubyte" % data_dir,
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=False,
num_parts=hvd.size(),
part_index=hvd.rank()
)
val_iter = mx.io.MNISTIter(
image="%s/t10k-images-idx3-ubyte" % data_dir,
label="%s/t10k-labels-idx1-ubyte" % data_dir,
input_shape=input_shape,
batch_size=batch_size,
flat=False,
)
return train_iter, val_iter
kernel_size = 5
strides = 2
pool_size = 2
hidden_dim = 512
output_dim = 10
activation = 'relu'
# Function to define neural network
def conv_nets():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=20, kernel_size=kernel_size, activation=activation))
net.add(gluon.nn.MaxPool2D(pool_size=pool_size, strides=strides))
net.add(gluon.nn.Conv2D(channels=50, kernel_size=kernel_size, activation=activation))
net.add(gluon.nn.MaxPool2D(pool_size=pool_size, strides=strides))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(hidden_dim, activation=activation))
net.add(gluon.nn.Dense(output_dim))
return net
# Function to evaluate accuracy for a model
def evaluate(model, data_iter, context):
data_iter.reset()
metric = mx.metric.Accuracy()
for _, batch in enumerate(data_iter):
data = batch.data[0].as_in_context(context)
label = batch.label[0].as_in_context(context)
output = model(data.astype(args.dtype, copy=False))
metric.update([label], [output])
return metric.get()
# Initialize Horovod
hvd.init()
# Horovod: pin context to local rank
context = mx.cpu(hvd.local_rank()) if args.no_cuda else mx.gpu(hvd.local_rank())
num_workers = hvd.size()
# Load training and validation data
train_data, val_data = get_mnist_iterator(hvd.rank())
# Build model
model = conv_nets()
model.cast(args.dtype)
model.hybridize()
# Create optimizer
optimizer_params = {'momentum': args.momentum,
'learning_rate': args.lr * hvd.size()}
opt = mx.optimizer.create('sgd', **optimizer_params)
# Initialize parameters
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in",
magnitude=2)
model.initialize(initializer, ctx=context)
# Horovod: fetch and broadcast parameters
params = model.collect_params()
if params is not None:
hvd.broadcast_parameters(params, root_rank=0)
# Horovod: create DistributedTrainer, a subclass of gluon.Trainer
trainer = hvd.DistributedTrainer(params, opt)
# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
# Global training timing
if hvd.rank() == 0:
global_tic = time.time()
# Train model
for epoch in range(args.epochs):
tic = time.time()
train_data.reset()
metric.reset()
for nbatch, batch in enumerate(train_data, start=1):
data = batch.data[0].as_in_context(context)
label = batch.label[0].as_in_context(context)
with autograd.record():
output = model(data.astype(args.dtype, copy=False))
loss = loss_fn(output, label)
loss.backward()
trainer.step(args.batch_size)
metric.update([label], [output])
if nbatch % 100 == 0:
name, acc = metric.get()
logging.info('[Epoch %d Batch %d] Training: %s=%f' %
(epoch, nbatch, name, acc))
if hvd.rank() == 0:
elapsed = time.time() - tic
speed = nbatch * args.batch_size * hvd.size() / elapsed
logging.info('Epoch[%d]\tSpeed=%.2f samples/s\tTime cost=%f',
epoch, speed, elapsed)
# Evaluate model accuracy
_, train_acc = metric.get()
name, val_acc = evaluate(model, val_data, context)
if hvd.rank() == 0:
logging.info('Epoch[%d]\tTrain: %s=%f\tValidation: %s=%f', epoch, name,
train_acc, name, val_acc)
if hvd.rank() == 0 and epoch == args.epochs - 1:
assert val_acc > 0.96, "Achieved accuracy (%f) is lower than expected\
(0.96)" % val_acc
if hvd.rank()==0:
global_training_time =time.time() - global_tic
print("Global elpased time on training:{}".format(global_training_time))
device = context.device_type + str(num_workers)
logging.info('Device info: %s', device)
if __name__ == "__main__":
# Handling script arguments
parser = argparse.ArgumentParser(description='MXNet MNIST Distributed Example')
parser.add_argument('--batch-size', type=int, default=64,
help='training batch size (default: 64)')
parser.add_argument('--dtype', type=str, default='float32',
help='training data type (default: float32)')
parser.add_argument('--epochs', type=int, default=5,
help='number of training epochs (default: 5)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum (default: 0.9)')
parser.add_argument('--no-cuda', action='store_true', help='disable training on GPU (default: False)')
args = parser.parse_args()
if not args.no_cuda:
# Disable CUDA if there are no GPUs.
if mx.context.num_gpus() == 0:
args.no_cuda = True
logging.basicConfig(level=logging.INFO)
logging.info(args)
main()
| 36.241379 | 106 | 0.610711 | [
"Apache-2.0"
] | AlanBinu007/deep-learning-containers | test/sagemaker_tests/mxnet/training/resources/mnist/horovod_mnist.py | 7,357 | Python |
from django.db.backends.base.features import BaseDatabaseFeatures
class DatabaseFeatures(BaseDatabaseFeatures):
allow_sliced_subqueries_with_in = False
can_introspect_autofield = True
can_introspect_small_integer_field = True
can_return_id_from_insert = True
can_use_chunked_reads = False
for_update_after_from = True
greatest_least_ignores_nulls = True
has_real_datatype = True
has_select_for_update = True
has_select_for_update_nowait = True
has_select_for_update_skip_locked = True
has_zoneinfo_database = False
ignores_table_name_case = True
ignores_quoted_identifier_case = True
requires_literal_defaults = True
requires_sqlparse_for_splitting = False
supports_ignore_conflicts = False
supports_index_on_text_field = False
supports_nullable_unique_constraints = True
supports_paramstyle_pyformat = False
supports_partially_nullable_unique_constraints = False
supports_regex_backreferencing = False
supports_sequence_reset = False
supports_subqueries_in_group_by = False
supports_tablespaces = True
supports_temporal_subtraction = True
supports_timezones = False
supports_transactions = True
uses_savepoints = True
| 36.382353 | 65 | 0.800323 | [
"BSD-3-Clause"
] | dwasyl/django-mssql-backend | sql_server/pyodbc/features.py | 1,237 | Python |
import responses
from urllib.parse import urlencode
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
asset = "BNB"
amount = "100"
params = {"asset": asset}
def test_margin_asset_without_asset():
"""Tests the API endpoint to margin asset without asset"""
client = Client(key, secret)
client.margin_asset.when.called_with("").should.throw(ParameterRequiredError)
@mock_http_response(
responses.GET, "/sapi/v1/margin/asset\\?" + urlencode(params), mock_item, 200
)
def test_margin_asset():
"""Tests the API endpoint to margin asset"""
client = Client(key, secret)
response = client.margin_asset(**params)
response.should.equal(mock_item)
| 24.666667 | 81 | 0.737613 | [
"MIT"
] | 0x000050/binance-connector-python | tests/spot/margin/test_margin_asset.py | 888 | Python |
from collections import defaultdict
class RunningAverage:
"""
Computes exponential moving averages averages.
"""
def __init__(self, mix_rate: float = 0.95):
self.mix_rate = mix_rate
self.avgs = defaultdict(lambda: None)
def record(self, name: str, value: float, ignore_nan=True):
"""
Args:
name: name of value.
value: value to record.
ignore_nan: ignore nan values and do not record them (they will mess up the averages).
"""
if ignore_nan and (value != value or value is None):
return self.avgs[name]
if self.avgs.get(name) is None:
self.avgs[name] = value
else:
self.avgs[name] = self.mix_rate * self.avgs[name] + (1-self.mix_rate) * value
return self.avgs[name]
| 30.851852 | 98 | 0.591837 | [
"Apache-2.0"
] | vzhong/wrangl | wrangl/metrics/running_avg.py | 833 | Python |
import argparse
import torch
from pathlib import Path
import h5py
import logging
from types import SimpleNamespace
import cv2
import numpy as np
from tqdm import tqdm
import pprint
from . import extractors
from .utils.base_model import dynamic_load
from .utils.tools import map_tensor
'''
A set of standard configurations that can be directly selected from the command
line using their name. Each is a dictionary with the following entries:
- output: the name of the feature file that will be generated.
- model: the model configuration, as passed to a feature extractor.
- preprocessing: how to preprocess the images read from disk.
'''
confs = {
'superpoint_aachen': {
'output': 'feats-superpoint-n4096-r1024',
'model': {
'name': 'superpoint',
'nms_radius': 3,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1024,
},
},
'superpoint_inloc': {
'output': 'feats-superpoint-n4096-r1600',
'model': {
'name': 'superpoint',
'nms_radius': 4,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'hfnet_superpoint': {
'output': 'feats-superpoint',
'model': {
'name': 'superpoint',
'nms_radius': 4,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'd2net-ss': {
'output': 'feats-d2net-ss',
'model': {
'name': 'd2net',
'multiscale': False,
},
'preprocessing': {
'grayscale': False,
'resize_max': 1600,
},
},
}
class ImageDataset(torch.utils.data.Dataset):
default_conf = {
'globs': ['*.jpg', '*.png', '*.jpeg', '*.JPG', '*.PNG'],
'grayscale': False,
'resize_max': None,
}
def __init__(self, root, conf):
self.conf = conf = SimpleNamespace(**{**self.default_conf, **conf})
self.root = root
self.paths = []
for g in conf.globs:
self.paths += list(Path(root).glob('**/'+g))
if len(self.paths) == 0:
raise ValueError(f'Could not find any image in root: {root}.')
self.paths = sorted(list(set(self.paths)))
self.paths = [i.relative_to(root) for i in self.paths]
logging.info(f'Found {len(self.paths)} images in root {root}.')
def __getitem__(self, idx):
path = self.paths[idx]
if self.conf.grayscale:
mode = cv2.IMREAD_GRAYSCALE
else:
mode = cv2.IMREAD_COLOR
image = cv2.imread(str(self.root / path), mode)
if not self.conf.grayscale:
image = image[:, :, ::-1] # BGR to RGB
if image is None:
raise ValueError(f'Cannot read image {str(path)}.')
image = image.astype(np.float32)
size = image.shape[:2][::-1]
w, h = size
if self.conf.resize_max and max(w, h) > self.conf.resize_max:
scale = self.conf.resize_max / max(h, w)
h_new, w_new = int(round(h*scale)), int(round(w*scale))
image = cv2.resize(
image, (w_new, h_new), interpolation=cv2.INTER_LINEAR)
if self.conf.grayscale:
image = image[None]
else:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
image = image / 255.
data = {
'name': path.as_posix(),
'image': image,
'original_size': np.array(size),
}
return data
def __len__(self):
return len(self.paths)
class FeatureExtractor(object):
def __init__(self, conf):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(extractors, conf['model']['name'])
self.model = Model(conf['model']).eval().to(self.device)
def extract(self, image):
image = image.astype(np.float32)
size = image.shape[:2][::-1]
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
image = image / 255.
data = {
'image': image,
'original_size': np.array(size),
}
pred = model(map_tensor(data, lambda x: x.to(self.device)))
pred = {k: v[0].cpu().numpy() for k, v in pred.items()}
pred['image_size'] = original_size = data['original_size'][0].numpy()
if 'keypoints' in pred:
size = np.array(data['image'].shape[-2:][::-1])
scales = (original_size / size).astype(np.float32)
pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5
return pred
@torch.no_grad()
def main(conf, image_dir, export_dir, as_half=False):
logging.info('Extracting local features with configuration:'
f'\n{pprint.pformat(conf)}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(extractors, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
loader = ImageDataset(image_dir, conf['preprocessing'])
loader = torch.utils.data.DataLoader(loader, num_workers=1)
feature_path = Path(export_dir, conf['output']+'.h5')
feature_path.parent.mkdir(exist_ok=True, parents=True)
feature_file = h5py.File(str(feature_path), 'a')
for data in tqdm(loader):
pred = model(map_tensor(data, lambda x: x.to(device)))
pred = {k: v[0].cpu().numpy() for k, v in pred.items()}
pred['image_size'] = original_size = data['original_size'][0].numpy()
if 'keypoints' in pred:
size = np.array(data['image'].shape[-2:][::-1])
scales = (original_size / size).astype(np.float32)
pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5
if as_half:
for k in pred:
dt = pred[k].dtype
if (dt == np.float32) and (dt != np.float16):
pred[k] = pred[k].astype(np.float16)
grp = feature_file.create_group(data['name'][0])
for k, v in pred.items():
grp.create_dataset(k, data=v)
del pred
feature_file.close()
logging.info('Finished exporting features.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', type=Path, required=True)
parser.add_argument('--export_dir', type=Path, required=True)
parser.add_argument('--conf', type=str, default='superpoint_aachen',
choices=list(confs.keys()))
args = parser.parse_args()
main(confs[args.conf], args.image_dir, args.export_dir)
| 32.401914 | 79 | 0.562463 | [
"Apache-2.0"
] | oldshuren/Hierarchical-Localization | hloc/extract_features.py | 6,772 | Python |
class TokenNotFound(Exception):
"""
Indicates that a token could not be found in the database
"""
pass | 23.6 | 61 | 0.661017 | [
"MIT"
] | HenryYDJ/flaskAPI | backEnd/app/api/auth/exceptions.py | 118 | Python |
import os
import unittest
from telethon.tl import TLObject
from telethon.extensions import BinaryReader
class UtilsTests(unittest.TestCase):
@staticmethod
def test_binary_writer_reader():
# Test that we can read properly
data = b'\x01\x05\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x88A\x00\x00\x00\x00\x00\x009@\x1a\x1b\x1c\x1d\x1e\x1f ' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x80'
with BinaryReader(data) as reader:
value = reader.read_byte()
assert value == 1, 'Example byte should be 1 but is {}'.format(value)
value = reader.read_int()
assert value == 5, 'Example integer should be 5 but is {}'.format(value)
value = reader.read_long()
assert value == 13, 'Example long integer should be 13 but is {}'.format(value)
value = reader.read_float()
assert value == 17.0, 'Example float should be 17.0 but is {}'.format(value)
value = reader.read_double()
assert value == 25.0, 'Example double should be 25.0 but is {}'.format(value)
value = reader.read(7)
assert value == bytes([26, 27, 28, 29, 30, 31, 32]), 'Example bytes should be {} but is {}' \
.format(bytes([26, 27, 28, 29, 30, 31, 32]), value)
value = reader.read_large_int(128, signed=False)
assert value == 2**127, 'Example large integer should be {} but is {}'.format(2**127, value)
@staticmethod
def test_binary_tgwriter_tgreader():
small_data = os.urandom(33)
small_data_padded = os.urandom(19) # +1 byte for length = 20 (%4 = 0)
large_data = os.urandom(999)
large_data_padded = os.urandom(1024)
data = (small_data, small_data_padded, large_data, large_data_padded)
string = 'Testing Telegram strings, this should work properly!'
serialized = b''.join(TLObject.serialize_bytes(d) for d in data) + \
TLObject.serialize_bytes(string)
with BinaryReader(serialized) as reader:
# And then try reading it without errors (it should be unharmed!)
for datum in data:
value = reader.tgread_bytes()
assert value == datum, 'Example bytes should be {} but is {}'.format(
datum, value)
value = reader.tgread_string()
assert value == string, 'Example string should be {} but is {}'.format(
string, value)
| 41.66129 | 105 | 0.593109 | [
"MIT"
] | Cafelipe/telethon | telethon_tests/utils_test.py | 2,583 | Python |
from antlr4 import *
class PlSqlBaseParser(Parser):
_isVersion10 = False
_isVersion12 = True
def isVersion10(self):
return self._isVersion10
def isVersion12(self):
return self._isVersion12
def setVersion10(self, value):
self._isVersion10 = value
def setVersion12(self, value):
self._isVersion12 = value | 20.222222 | 34 | 0.673077 | [
"Apache-2.0"
] | Pro-v-7/code-generation | js_production_rule_gen/grammars-v4-master/sql/plsql/Python3/PlSqlBaseParser.py | 364 | Python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Tuple
from unittest import mock
import numpy as np
import pytest
import torch
from flash import Trainer
from flash.__main__ import main
from flash.core.data.data_pipeline import DataPipeline
from flash.core.data.data_source import DefaultDataKeys
from flash.core.utilities.imports import _IMAGE_AVAILABLE
from flash.image import SemanticSegmentation
from flash.image.segmentation.data import SemanticSegmentationPreprocess
from tests.helpers.utils import _IMAGE_TESTING, _SERVE_TESTING
# ======== Mock functions ========
class DummyDataset(torch.utils.data.Dataset):
size: Tuple[int, int] = (224, 224)
num_classes: int = 8
def __getitem__(self, index):
return {
DefaultDataKeys.INPUT: torch.rand(3, *self.size),
DefaultDataKeys.TARGET: torch.randint(self.num_classes - 1, self.size),
}
def __len__(self) -> int:
return 10
# ==============================
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_smoke():
model = SemanticSegmentation(num_classes=1)
assert model is not None
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("num_classes", [8, 256])
@pytest.mark.parametrize("img_shape", [(1, 3, 224, 192), (2, 3, 128, 256)])
def test_forward(num_classes, img_shape):
model = SemanticSegmentation(
num_classes=num_classes,
backbone="resnet50",
head="fpn",
)
B, C, H, W = img_shape
img = torch.rand(B, C, H, W)
out = model(img)
assert out.shape == (B, num_classes, H, W)
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_init_train(tmpdir):
model = SemanticSegmentation(num_classes=10)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.finetune(model, train_dl, strategy="freeze_unfreeze")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_non_existent_backbone():
with pytest.raises(KeyError):
SemanticSegmentation(2, "i am never going to implement this lol")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_freeze():
model = SemanticSegmentation(2)
model.freeze()
for p in model.backbone.parameters():
assert p.requires_grad is False
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_unfreeze():
model = SemanticSegmentation(2)
model.unfreeze()
for p in model.backbone.parameters():
assert p.requires_grad is True
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_tensor():
img = torch.rand(1, 3, 64, 64)
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="tensors", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_numpy():
img = np.ones((1, 3, 64, 64))
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="numpy", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("jitter, args", [(torch.jit.trace, (torch.rand(1, 3, 32, 32),))])
def test_jit(tmpdir, jitter, args):
path = os.path.join(tmpdir, "test.pt")
model = SemanticSegmentation(2)
model.eval()
model = jitter(model, *args)
torch.jit.save(model, path)
model = torch.jit.load(path)
out = model(torch.rand(1, 3, 32, 32))
assert isinstance(out, torch.Tensor)
assert out.shape == torch.Size([1, 2, 32, 32])
@pytest.mark.skipif(not _SERVE_TESTING, reason="serve libraries aren't installed.")
@mock.patch("flash._IS_TESTING", True)
def test_serve():
model = SemanticSegmentation(2)
# TODO: Currently only servable once a preprocess has been attached
model._preprocess = SemanticSegmentationPreprocess()
model.eval()
model.serve()
@pytest.mark.skipif(_IMAGE_AVAILABLE, reason="image libraries are installed.")
def test_load_from_checkpoint_dependency_error():
with pytest.raises(ModuleNotFoundError, match=re.escape("'lightning-flash[image]'")):
SemanticSegmentation.load_from_checkpoint("not_a_real_checkpoint.pt")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_available_pretrained_weights():
assert SemanticSegmentation.available_pretrained_weights("resnet18") == ["imagenet", "ssl", "swsl"]
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_cli():
cli_args = ["flash", "semantic-segmentation", "--trainer.fast_dev_run", "True"]
with mock.patch("sys.argv", cli_args):
try:
main()
except SystemExit:
pass
| 34.408046 | 103 | 0.719559 | [
"Apache-2.0"
] | sumanmichael/lightning-flash | tests/image/segmentation/test_model.py | 5,987 | Python |
from engine.steps.IStep import IStep
from keras.models import Model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
class config_model(IStep):
"""config model"""
create_Optimizer_func = None
create_loss_func = None
def __init__(self, output_channel, name, create_Optimizer_func, create_loss_func):
super().__init__(self, output_channel, name)
self.create_Optimizer_func = create_Optimizer_func
self.create_loss_func = create_loss_func
def IRun(self):
if self.create_Optimizer_func == None:
raise Exception( "No create optimizer function!" )
if self.create_loss_func == None:
self.create_loss_func = self._default_categorical_crossentropy
try:
opt = self.create_Optimizer_func(self)
loss = self.create_loss_func(self)
model = self.output_channel['model']
"""
if self.train_only_top_layer:
for layer in base_model.layers:
layer.trainable = False
"""
model.compile(optimizer=opt, loss=loss, metrics=[self.metrics] )
except Exception as e:
self.output_channel['Error'] = "fatal error occur: " + e.message
self.output_channel['ErrorType'] = "fatal"
def IParseConfig( self, config_json ):
self.epochs = config_json['epochs']
self.learning_ratio = config_json['learning_ratio']
self.batch_size = config_json['batch_size']
self.metrics = config_json['metrics']
self.output_channel['epochs'] = self.epochs
self.output_channel['learning_ratio'] = self.learning_ratio
self.output_channel['batch_size'] = self.batch_size
def IDispose( self ):
pass
def _default_categorical_crossentropy():
return "categorical_crossentropy"
class config_model_adam_categorical_crossentropy(config_model):
""" config model: optimizer=Adam, loss = 'categorical_crossentropy' """
def __init__(self, output_channel, name=None ):
super().__init__(self, output_channel, name, self.create_Adam, self.create_loss )
def create_Adam( self ):
return Adam(lr=self.learning_ratio, decay=self.learning_ratio / self.epochs )
def create_loss( self ):
""" create loss function """
return "categorical_crossentropy"
| 33.28 | 89 | 0.654647 | [
"MIT"
] | Borrk/DeepLearning-Engine | source/engine/steps/config_model.py | 2,496 | Python |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDomainQuotaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'quotas': 'QuotaResult'
}
attribute_map = {
'quotas': 'quotas'
}
def __init__(self, quotas=None):
"""ShowDomainQuotaResponse - a model defined in huaweicloud sdk"""
super(ShowDomainQuotaResponse, self).__init__()
self._quotas = None
self.discriminator = None
if quotas is not None:
self.quotas = quotas
@property
def quotas(self):
"""Gets the quotas of this ShowDomainQuotaResponse.
:return: The quotas of this ShowDomainQuotaResponse.
:rtype: QuotaResult
"""
return self._quotas
@quotas.setter
def quotas(self, quotas):
"""Sets the quotas of this ShowDomainQuotaResponse.
:param quotas: The quotas of this ShowDomainQuotaResponse.
:type: QuotaResult
"""
self._quotas = quotas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDomainQuotaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.247788 | 79 | 0.560247 | [
"Apache-2.0"
] | huaweicloud/huaweicloud-sdk-python-v3 | huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/show_domain_quota_response.py | 3,079 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, [email protected]
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import PredefinedScript
import json
import logging
class PPMovePredefinedScriptIntoGroup(BaseHandler):
def _move(self):
_request = json.loads(self.request.body)
_group_uuid = str(_request.get("group_uuid"))
_script_uuid = _request.get("script_uuid")
if _script_uuid == None or len(_script_uuid) == 0:
self.setErrorCode(API_ERR.NO_PARA)
return
_script = redis_hash_to_dict(self.application.redis, PredefinedScript, _script_uuid)
if _script == None:
logging.error("No such script: %s" % _script_uuid)
return
_old_group_uuid = str(_script.get("group_uuid"))
_key = PredefinedScript.__tablename__ + ".group_uuid." + _old_group_uuid
self.application.redis.srem(_key, _script_uuid)
_row = PredefinedScript(uuid=_script_uuid, group_uuid=_group_uuid)
_row.async_update()
_row.update_redis_keys(self.application.redis)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPMovePredefinedScriptIntoGroup, self)._Task()
self._move()
return
| 30.843137 | 92 | 0.684043 | [
"MIT"
] | x-debug/ppmessage_fork | ppmessage/api/handlers/ppmovepredefinedscriptintogroup.py | 1,573 | Python |
_base_ = [
'../_base_/models/fcn_litehrxv3_no-aggregator.py', '../_base_/datasets/hrf_extra.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_step_40k_ml.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
decode_head=[
dict(type='FCNHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=False,
enable_loss_equalizer=True,
loss_decode=[
dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=4.0),
dict(type='GeneralizedDiceLoss',
smooth=1.0,
gamma=5.0,
alpha=0.5,
beta=0.5,
focal_gamma=1.0,
loss_jitter_prob=0.01,
loss_weight=4.0),
]),
dict(type='OCRHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
ocr_channels=60,
sep_conv=True,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=True,
loss_decode=[
dict(type='AMSoftmaxLoss',
scale_cfg=dict(
type='PolyScalarScheduler',
start_scale=30,
end_scale=5,
num_iters=30000,
power=1.2
),
margin_type='cos',
margin=0.5,
gamma=2.0,
t=1.0,
target_loss='ce',
pr_product=False,
conf_penalty_weight=dict(
type='PolyScalarScheduler',
start_scale=0.2,
end_scale=0.15,
num_iters=20000,
power=1.2
),
loss_jitter_prob=0.01,
border_reweighting=False,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=1.0),
]),
],
auxiliary_head=[
dict(type='AuxOCRHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
ocr_channels=60,
sep_conv=True,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=True,
loss_decode=[
dict(type='AMSoftmaxLoss',
scale_cfg=dict(
type='PolyScalarScheduler',
start_scale=30,
end_scale=5,
num_iters=30000,
power=1.2
),
margin_type='cos',
margin=0.5,
gamma=2.0,
t=1.0,
target_loss='ce',
pr_product=False,
conf_penalty_weight=dict(
type='PolyScalarScheduler',
start_scale=0.2,
end_scale=0.15,
num_iters=20000,
power=1.2
),
loss_jitter_prob=0.01,
border_reweighting=False,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.25, p=1.7),
loss_weight=1.0),
]),
dict(type='FCNHead',
in_channels=[18, 60, 80, 160, 320],
in_index=[0, 1, 2, 3, 4],
input_transform='multiple_select',
channels=60,
kernel_size=1,
num_convs=0,
concat_input=False,
dropout_ratio=-1,
num_classes=2,
norm_cfg=norm_cfg,
align_corners=False,
enable_aggregator=True,
aggregator_min_channels=60,
aggregator_merge_norm=None,
aggregator_use_concat=False,
enable_out_norm=False,
enable_loss_equalizer=True,
loss_target='gt_class_borders',
loss_decode=[
dict(type='CrossEntropyLoss',
use_sigmoid=False,
loss_jitter_prob=0.01,
sampler=dict(type='MaxPoolingPixelSampler', ratio=0.1, p=1.7),
loss_weight=5.0),
dict(type='GeneralizedDiceLoss',
smooth=1.0,
gamma=5.0,
alpha=0.5,
beta=0.5,
focal_gamma=1.0,
loss_jitter_prob=0.01,
loss_weight=5.0),
]),
],
train_cfg=dict(
mix_loss=dict(
enable=False,
weight=0.1
),
mutual_loss=[
dict(type='MutualLoss',
head_a_name='decode_1',
head_b_name='aux_0',
sampler=dict(type='OHEMPixelSampler', kept_ratio=0.1),
loss_weight=2.0),
],
loss_reweighting=dict(
weights={'decode_0.loss_seg': 0.5,
'decode_1.loss_seg': 1.0,
'aux_0.loss_seg': 0.9,
'aux_1.loss_seg': 0.5,
'loss_mutual': 0.5},
momentum=0.1
),
),
test_cfg=dict(
mode='slide',
crop_size=(1024, 1024),
stride=(680, 680)
),
)
evaluation = dict(
metric='mDice',
)
| 35.695431 | 89 | 0.436576 | [
"Apache-2.0"
] | evgeny-izutov/mmsegmentation | configs/litehrnet/ocr_litehrxv3_split_aux-border_256x256_40k_hrf_amsoftmax.py | 7,032 | Python |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import NestcoinTestFramework
from test_framework.blocktools import create_raw_transaction
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolSpendCoinbaseTest(NestcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [self.nodes[0].getblockhash(n) for n in range(101, 103)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| 40.719298 | 124 | 0.720379 | [
"MIT"
] | MonkeyD-Core/Nestcoin | test/functional/mempool_spend_coinbase.py | 2,321 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlResourceSqlStoredProcedureResult',
'AwaitableGetSqlResourceSqlStoredProcedureResult',
'get_sql_resource_sql_stored_procedure',
]
@pulumi.output_type
class GetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
"""
def __init__(__self__, id=None, location=None, name=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.SqlStoredProcedureGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlStoredProcedureResult(GetSqlResourceSqlStoredProcedureResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlStoredProcedureResult(
id=self.id,
location=self.location,
name=self.name,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_sql_resource_sql_stored_procedure(account_name: Optional[str] = None,
container_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
stored_procedure_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['storedProcedureName'] = stored_procedure_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20200401:getSqlResourceSqlStoredProcedure', __args__, opts=opts, typ=GetSqlResourceSqlStoredProcedureResult).value
return AwaitableGetSqlResourceSqlStoredProcedureResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
resource=__ret__.resource,
tags=__ret__.tags,
type=__ret__.type)
| 39.608696 | 505 | 0.653128 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/documentdb/v20200401/get_sql_resource_sql_stored_procedure.py | 5,466 | Python |
from kaffe.tensorflow import Network
class GoogleNet(Network):
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1_7x7_s2')
.max_pool(3, 3, 2, 2, name='pool1_3x3_s2')
.lrn(2, 2e-05, 0.75, name='pool1_norm1')
.conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='conv2_3x3')
.lrn(2, 2e-05, 0.75, name='conv2_norm2')
.max_pool(3, 3, 2, 2, name='pool2_3x3_s2')
.conv(1, 1, 64, 1, 1, name='inception_3a_1x1'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce')
.conv(3, 3, 128, 1, 1, name='inception_3a_3x3'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce')
.conv(5, 5, 32, 1, 1, name='inception_3a_5x5'))
(self.feed('pool2_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_3a_pool')
.conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj'))
(self.feed('inception_3a_1x1',
'inception_3a_3x3',
'inception_3a_5x5',
'inception_3a_pool_proj')
.concat(3, name='inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_1x1'))
(self.feed('inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='inception_3b_3x3'))
(self.feed('inception_3a_output')
.conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce')
.conv(5, 5, 96, 1, 1, name='inception_3b_5x5'))
(self.feed('inception_3a_output')
.max_pool(3, 3, 1, 1, name='inception_3b_pool')
.conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj'))
(self.feed('inception_3b_1x1',
'inception_3b_3x3',
'inception_3b_5x5',
'inception_3b_pool_proj')
.concat(3, name='inception_3b_output')
.max_pool(3, 3, 2, 2, name='pool3_3x3_s2')
.conv(1, 1, 192, 1, 1, name='inception_4a_1x1'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce')
.conv(3, 3, 208, 1, 1, name='inception_4a_3x3'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce')
.conv(5, 5, 48, 1, 1, name='inception_4a_5x5'))
(self.feed('pool3_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_4a_pool')
.conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj'))
(self.feed('inception_4a_1x1',
'inception_4a_3x3',
'inception_4a_5x5',
'inception_4a_pool_proj')
.concat(3, name='inception_4a_output')
.conv(1, 1, 160, 1, 1, name='inception_4b_1x1'))
(self.feed('inception_4a_output')
.conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce')
.conv(3, 3, 224, 1, 1, name='inception_4b_3x3'))
(self.feed('inception_4a_output')
.conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4b_5x5'))
(self.feed('inception_4a_output')
.max_pool(3, 3, 1, 1, name='inception_4b_pool')
.conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj'))
(self.feed('inception_4b_1x1',
'inception_4b_3x3',
'inception_4b_5x5',
'inception_4b_pool_proj')
.concat(3, name='inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_1x1'))
(self.feed('inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce')
.conv(3, 3, 256, 1, 1, name='inception_4c_3x3'))
(self.feed('inception_4b_output')
.conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4c_5x5'))
(self.feed('inception_4b_output')
.max_pool(3, 3, 1, 1, name='inception_4c_pool')
.conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj'))
(self.feed('inception_4c_1x1',
'inception_4c_3x3',
'inception_4c_5x5',
'inception_4c_pool_proj')
.concat(3, name='inception_4c_output')
.conv(1, 1, 112, 1, 1, name='inception_4d_1x1'))
(self.feed('inception_4c_output')
.conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce')
.conv(3, 3, 288, 1, 1, name='inception_4d_3x3'))
(self.feed('inception_4c_output')
.conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4d_5x5'))
(self.feed('inception_4c_output')
.max_pool(3, 3, 1, 1, name='inception_4d_pool')
.conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj'))
(self.feed('inception_4d_1x1',
'inception_4d_3x3',
'inception_4d_5x5',
'inception_4d_pool_proj')
.concat(3, name='inception_4d_output')
.conv(1, 1, 256, 1, 1, name='inception_4e_1x1'))
(self.feed('inception_4d_output')
.conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_4e_3x3'))
(self.feed('inception_4d_output')
.conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_4e_5x5'))
(self.feed('inception_4d_output')
.max_pool(3, 3, 1, 1, name='inception_4e_pool')
.conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj'))
(self.feed('inception_4e_1x1',
'inception_4e_3x3',
'inception_4e_5x5',
'inception_4e_pool_proj')
.concat(3, name='inception_4e_output')
.max_pool(3, 3, 2, 2, name='pool4_3x3_s2')
.conv(1, 1, 256, 1, 1, name='inception_5a_1x1'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_5a_3x3'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5a_5x5'))
(self.feed('pool4_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_5a_pool')
.conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj'))
(self.feed('inception_5a_1x1',
'inception_5a_3x3',
'inception_5a_5x5',
'inception_5a_pool_proj')
.concat(3, name='inception_5a_output')
.conv(1, 1, 384, 1, 1, name='inception_5b_1x1'))
(self.feed('inception_5a_output')
.conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce')
.conv(3, 3, 384, 1, 1, name='inception_5b_3x3'))
(self.feed('inception_5a_output')
.conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5b_5x5'))
(self.feed('inception_5a_output')
.max_pool(3, 3, 1, 1, name='inception_5b_pool')
.conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj'))
(self.feed('inception_5b_1x1',
'inception_5b_3x3',
'inception_5b_5x5',
'inception_5b_pool_proj')
.concat(3, name='inception_5b_output')
.avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1')
.fc(1000, relu=False, name='loss3_classifier')
.softmax(name='prob'))
| 41.539683 | 72 | 0.531907 | [
"MIT"
] | 1989Ryan/Semantic_SLAM | Third_Part/PSPNet_Keras_tensorflow/caffe-tensorflow/examples/imagenet/models/googlenet.py | 7,851 | Python |
from time import sleep
from random import randint
numeros = []
def sorteio():
c = 0
while True:
n = randint(0, 20)
numeros.append(n)
c = c+1
if c == 5:
break
print('=-'*20)
print('SORTEANDO OS 5 VALORES DA LISTA:', end=' ')
for n in numeros:
sleep(0.5)
print(n, end=' ')
print()
def somapar():
soma = 0
for n in numeros:
if n % 2 == 0:
soma = soma + n
sleep(2)
print(f'Somando os valores PARES de {numeros}: {soma}')
sorteio()
somapar()
| 17.060606 | 59 | 0.50444 | [
"MIT"
] | joaoschweikart/python_projects | ex100 sorteio e soma.py | 563 | Python |
import os
import sys
import time
import _ollyapi
def addscriptpath(script):
"""
Add the path part of the scriptfile to the system path to
allow modules to be loaded from the same place.
Each path is added only once.
"""
pathfound = 0
scriptpath = os.path.dirname(script)
for pathitem in sys.path:
if pathitem == scriptpath:
pathfound = 1
break
if pathfound == 0:
sys.path.append(scriptpath)
def runscript(script):
"""
Run the specified script after adding its directory path to
system path.
This function is used by the low-level plugin code.
"""
addscriptpath(script)
watchdog.reset()
argv = sys.argv
sys.argv = [ script ]
execfile(script, globals())
sys.argv = argv
#-----------------------------------------------------------
# Take over the standard text outputs
#-----------------------------------------------------------
class MyStdOut:
"""
Dummy file-like class that receives stout and stderr
"""
def write(self, text):
# OllyDbg can't handle newlines so strip them out
fixed = text.replace('\n', '')
if fixed != '':
_ollyapi.Addtolist(0, 0, fixed)
def flush(self):
pass
def isatty(self):
return False
# Redirect stderr and stdout to the OllyDbg log window
sys.stdout = sys.stderr = MyStdOut()
# Assign a default sys.argv
sys.argv = [ "" ]
# Have to make sure Python finds our modules
sys.path.append(OLLYPYTHON_PATH)
from ollyapi import *
from ollyutils import *
#-------------------------------------------------------------
# Watchdog to catch runaway scripts after a specified timeout
#
# Usage:
# watchdog.install()
# watchdog.activate(10) # Use 10-second timeout
#
# Note: The watchdog only works for code running inside
# functions, not in global/module namespace.
#-------------------------------------------------------------
class WatchDog():
"""
Python tracer-based watchdog class
"""
def __init__(self, timeout=10):
self.timestamp = 0
self.timeout = timeout
self.installed = False
self.active = False
def install(self):
""" Install the tracer function, required for the watchdog """
if not self.installed:
sys.settrace(self.tracer)
self.installed = True
def activate(self, timeout=None):
""" Activate the watchdog, with optional timeout change """
assert self.installed, "WatchDog must be installed before activating"
if timeout:
self.timeout = timeout
self.reset()
self.active = True
def deactivate(self):
""" Deactivate the watchdog """
self.active = True
def reset(self):
""" Reset the timer, useful for long-running scripts """
self.timestamp = time.clock()
def tracer(self, frame, event, arg):
""" Tracer function that receives the tracing events """
if not self.active:
return None
#if event == 'line':
# if time.clock() - self.timestamp > self.timeout:
# if AskYN(0, "The script has not finished in %d seconds\nWould you like to stop it now?" % self.timeout) == 1:
# raise KeyboardInterrupt
# else:
# self.timestamp = time.clock()
return self.tracer
watchdog = WatchDog(10)
# Load the users personal init file
# Plugin callback handlers
ollypython_shortcuts = []
def add_shortcut_handler(func):
# Need to also make sure the function is the right type
ollypython_shortcuts.append(func)
def remove_shortcut_handler(func):
ollypython_shortcuts.remove(func) | 28.071942 | 127 | 0.563557 | [
"BSD-3-Clause"
] | exported/ollypython | python/init.py | 3,902 | Python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
READS_LOCATION = 'genestack.location:reads'
READS_LINK = 'genestack.url:reads'
class Key(object):
SPACE = 'space'
FORMAT = 'format'
TYPE = 'type'
class Space(object):
BASESPACE = 'basespace'
COLORSPACE = 'colorspace'
class Format(object):
PHRED33 = 'phred33'
PHRED64 = 'phred64'
FASTA_QUAL = 'fasta-qual'
SRA = 'sra'
SFF = 'sff'
FAST5 = 'fast5'
class Type(object):
SINGLE = 'single'
PAIRED = 'paired'
PAIRED_WITH_UNPAIRED = 'paired-with-unpaired'
def compose_format_map(space, file_format, file_type):
return {Key.SPACE: space,
Key.FORMAT: file_format,
Key.TYPE: file_type}
| 21.422222 | 54 | 0.698133 | [
"MIT"
] | genestack/python-client | genestack_client/unaligned_reads.py | 964 | Python |
import os
import multiprocessing
def num_cpus():
cpus = 0
try:
cpus = os.sysconf("SC_NPROCESSORS_ONLN")
except:
cpus = multiprocessing.cpu_count()
return cpus or 3
name = 'django'
bind = '0.0.0.0:8000'
workers = num_cpus() * 2 + 1
debug = True
daemon = False
loglevel = 'debug'
| 15.7 | 48 | 0.633758 | [
"MIT"
] | bradojevic/django-prod | gunicorn/gunicorn_config.py | 314 | Python |
"""The tests for the Script component."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch, Mock
from homeassistant.components import script
from homeassistant.components.script import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, SERVICE_RELOAD, SERVICE_TOGGLE,
SERVICE_TURN_OFF, SERVICE_TURN_ON, EVENT_SCRIPT_STARTED)
from homeassistant.core import Context, callback, split_entity_id
from homeassistant.loader import bind_hass
from homeassistant.setup import setup_component, async_setup_component
from tests.common import get_test_home_assistant
ENTITY_ID = 'script.test'
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
"""Toggle the script.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
"""Reload script component.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_with_invalid_configs(self):
"""Test setup with invalid configs."""
for value in (
{'test': {}},
{
'test hello world': {
'sequence': [{'event': 'bla'}]
}
},
{
'test': {
'sequence': {
'event': 'test_event',
'service': 'homeassistant.turn_on',
}
}
},
):
assert not setup_component(self.hass, 'script', {
'script': value
}), 'Script loaded with wrong config {}'.format(value)
assert 0 == len(self.hass.states.entity_ids('script'))
def test_turn_on_service(self):
"""Verify that the turn_on service."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
# Calling turn_on a second time should not advance the script
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert 0 == len(events)
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
state = self.hass.states.get('group.all_scripts')
assert state is not None
assert state.attributes.get('entity_id') == (ENTITY_ID,)
def test_toggle_service(self):
"""Test the toggling of a service."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_passing_variables(self):
"""Test different ways of passing in variables."""
calls = []
context = Context()
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': {
'service': 'test.script',
'data_template': {
'hello': '{{ greeting }}',
},
},
},
},
})
turn_on(self.hass, ENTITY_ID, {
'greeting': 'world'
}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data['hello'] == 'world'
self.hass.services.call('script', 'test', {
'greeting': 'universe',
}, context=context)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data['hello'] == 'universe'
def test_reload_service(self):
"""Verify that the turn_on service."""
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}
}
})
assert self.hass.states.get(ENTITY_ID) is not None
assert self.hass.services.has_service(script.DOMAIN, 'test')
with patch('homeassistant.config.load_yaml_config_file', return_value={
'script': {
'test2': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}}}):
with patch('homeassistant.config.find_config_file',
return_value=''):
reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get(ENTITY_ID) is None
assert not self.hass.services.has_service(script.DOMAIN, 'test')
assert self.hass.states.get("script.test2") is not None
assert self.hass.services.has_service(script.DOMAIN, 'test2')
async def test_shared_context(hass):
"""Test that the shared context is passed down the chain."""
event = 'test_event'
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(hass, 'script', {
'script': {
'test': {
'sequence': [
{'event': event}
]
}
}
})
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
context=context)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) == 'test'
assert args[0].data.get(ATTR_ENTITY_ID) == 'script.test'
# Ensure context carries through the event
args, kwargs = event_mock.call_args
assert args[0].context == context
# Ensure the script state shares the same context
state = hass.states.get('script.test')
assert state is not None
assert state.context == context
| 29.544554 | 79 | 0.535188 | [
"Apache-2.0"
] | 27tech/home-assistant | tests/components/test_script.py | 8,952 | Python |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Flask-Resources is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
from werkzeug.datastructures import MIMEAccept
from werkzeug.http import parse_accept_header
from flask_resources.content_negotiation import ContentNegotiator
# Test content negotiation by Accept header
# NOTE: By scoping down we remove the need to check for HTTP method
def test_choose_provided_and_accepted_mimetype():
# Should choose mimetype that is accepted by client and served by server
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header(
"text/plain,application/json,*/*", MIMEAccept
)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
client_mimetypes = parse_accept_header(
"text/plain,application/marcxml+xml,*/*", MIMEAccept
)
assert "application/marcxml+xml" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
def test_favour_specificity_over_quality():
# favour more specific but lower quality mimetype over
# less specific (e.g. wildcard) but higher quality
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header(
"text/plain, application/json;q=0.5, */*", MIMEAccept
)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
def test_favour_quality_over_same_specificity():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header(
"application/json;q=0.5, application/marcxml+xml", MIMEAccept
)
assert "application/marcxml+xml" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
client_mimetypes = parse_accept_header(
"application/marcxml+xml;q=0.4, application/json;q=0.6", MIMEAccept
)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
def test_choose_default_if_no_match_and_wildcard_accepted():
# choose default if no match and client accepts wildcard
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("text/plain,*/*", MIMEAccept)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes, default="application/json"
)
def test_choose_none_if_no_match_and_wildcard_not_accepted():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("text/plain", MIMEAccept)
mime_type = ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes, default="application/json"
)
assert mime_type is None
def test_choose_default_if_nothing_accepted():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("", MIMEAccept)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes, default="application/json"
)
# Test content negotiation by URL argument
# NOTE: By scoping down we remove the need to check for HTTP method
def test_choose_query_mimetype():
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = "marcxml" # this is the query
assert "application/marcxml+xml" == ContentNegotiator.match_by_format(
formats_map, fmt
)
fmt = "json"
assert "application/json" == ContentNegotiator.match_by_format(formats_map, fmt)
fmt = "foo"
mime_type = ContentNegotiator.match_by_format(formats_map, fmt)
assert mime_type is None
# Test top-level ContentNegotiator.match
def test_favour_query_mimetype_over_header_mimetype():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("application/json", MIMEAccept)
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = "marcxml"
assert "application/marcxml+xml" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
client_mimetypes = parse_accept_header("application/marcxml+xml", MIMEAccept)
fmt = "json"
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
def test_favour_header_mimetype_if_no_query_mimetype():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("application/json", MIMEAccept)
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = None
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
formats_map = {}
fmt = "marcxml"
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
def test_choose_default_if_no_query_and_no_header():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("", MIMEAccept)
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = None
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt, default="application/json"
)
| 32.55618 | 88 | 0.72459 | [
"MIT"
] | fenekku/flask-resources | tests/test_content_negotiation.py | 5,795 | Python |
from setuptools import setup
setup(
name='expressvpn-python',
version='1.1',
packages=['expressvpn'],
install_requires=['Flask','flask_restful'],
url='https://github.com/philipperemy/expressvpn-python',
license='MIT',
author='Philippe Remy',
author_email='[email protected]',
description='ExpressVPN - Python Wrapper'
)
| 25.928571 | 60 | 0.688705 | [
"MIT"
] | ezekri/expressvpn-python | setup.py | 363 | Python |
# The MIT License (MIT)
#
# Copyright (c) 2015-present, vn-crypto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .deribit_gateway import DeribitGateway
import importlib_metadata
__version__ = importlib_metadata.version("vnpy_deribit") | 44.75 | 80 | 0.783719 | [
"MIT"
] | NovelResearchInvestment/vnpy_deribit | vnpy_deribit/__init__.py | 1,253 | Python |
# Copyright(c) 2016 Nippon Telegraph and Telephone Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pbr import version as pbr_version
MONITORS_VENDOR = "OpenStack Foundation"
MONITORS_PRODUCT = "OpenStack Masakari Monitors"
MONITORS_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr_version.VersionInfo('masakari-monitors')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import configparser
from oslo_config import cfg
from oslo_log import log as logging
global loaded, MONITORS_VENDOR, MONITORS_PRODUCT, MONITORS_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = configparser.RawConfigParser()
cfg.read(cfgfile)
if cfg.has_option("Masakarimonitors", "vendor"):
MONITORS_VENDOR = cfg.get("Masakarimonitors", "vendor")
if cfg.has_option("Masakarimonitors", "product"):
MONITORS_PRODUCT = cfg.get("Masakarimonitors", "product")
if cfg.has_option("Masakarimonitors", "package"):
MONITORS_PACKAGE = cfg.get("Masakarimonitors", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error("Failed to load %(cfgfile)s: %(ex)s",
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return MONITORS_VENDOR
def product_string():
_load_config()
return MONITORS_PRODUCT
def package_string():
_load_config()
return MONITORS_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| 27.954023 | 74 | 0.704359 | [
"Apache-2.0"
] | iorchard/masakari-monitors | masakarimonitors/version.py | 2,432 | Python |
"""sukh_site_v1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('mysite.urls')),
]
| 34.75 | 77 | 0.708633 | [
"MIT"
] | sbhuller98/main_django | sukh_site_v1/sukh_site_v1/urls.py | 834 | Python |
from selenium.webdriver.common.by import By
osd_sizes = ("512", "2048", "4096")
login = {
"ocp_page": "Overview · Red Hat OpenShift Container Platform",
"username": ("inputUsername", By.ID),
"password": ("inputPassword", By.ID),
"click_login": ("//button[text()='Log in']", By.XPATH),
"flexy_kubeadmin": ('a[title="Log in with kube:admin"]', By.CSS_SELECTOR),
}
deployment = {
"click_install_ocs": ('a[data-test-id="operator-install-btn"]', By.CSS_SELECTOR),
"choose_ocs_version": (
'a[data-test="ocs-operator-ocs-catalogsource-openshift-marketplace"]',
By.CSS_SELECTOR,
),
"search_operators": ('input[placeholder="Filter by keyword..."]', By.CSS_SELECTOR),
"operators_tab": ("//button[text()='Operators']", By.XPATH),
"operatorhub_tab": ("OperatorHub", By.LINK_TEXT),
"installed_operators_tab": ("Installed Operators", By.LINK_TEXT),
"storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"ocs_operator_installed": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"search_operator_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"thin_sc": ('a[id="thin-link"]', By.CSS_SELECTOR),
"gp2_sc": ('a[id="gp2-link"]', By.CSS_SELECTOR),
"managed-premium_sc": ('a[id="managed-premium-link"]', By.CSS_SELECTOR),
"osd_size_dropdown": ('button[data-test-id="dropdown-button"]', By.CSS_SELECTOR),
"512": ('button[data-test-dropdown-menu="512Gi"]', By.CSS_SELECTOR),
"2048": ('button[data-test-dropdown-menu="2Ti"]', By.CSS_SELECTOR),
"4096": ('button[data-test-dropdown-menu="4Ti"]', By.CSS_SELECTOR),
"all_nodes": ('input[aria-label="Select all rows"]', By.CSS_SELECTOR),
"wide_encryption": ('//*[@id="cluster-wide-encryption"]', By.XPATH),
"class_encryption": ('//*[@id="storage-class-encryption"]', By.XPATH),
"advanced_encryption": ('//*[@id="advanced-encryption"]', By.XPATH),
"kms_service_name": ('//*[@id="kms-service-name"]', By.XPATH),
"kms_address": ('//*[@id="kms-address"]', By.XPATH),
"kms_address_port": ('//*[@id="kms-address-port"]', By.XPATH),
"kms_token": ('//*[@id="kms-token"]', By.XPATH),
"create_on_review": ("//button[text()='Create']", By.XPATH),
"search_ocs_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"all_nodes_lso": (
'input[id="auto-detect-volume-radio-all-nodes"]',
By.CSS_SELECTOR,
),
"lv_name": ('input[id="create-lvs-volume-set-name"]', By.CSS_SELECTOR),
"sc_name": ('input[id="create-lvs-storage-class-name"]', By.CSS_SELECTOR),
"all_nodes_create_sc": ('input[id="create-lvs-radio-all-nodes"]', By.CSS_SELECTOR),
"storage_class_dropdown_lso": (
'button[id="storage-class-dropdown"]',
By.CSS_SELECTOR,
),
"localblock_sc": ('a[id="localblock-link"]', By.CSS_SELECTOR),
"choose_local_storage_version": (
'a[data-test="local-storage-operator-redhat-operators-openshift-marketplace"]',
By.CSS_SELECTOR,
),
"click_install_lso": ('a[data-test-id="operator-install-btn"]', By.CSS_SELECTOR),
"yes": ("//*[contains(text(), 'Yes')]", By.XPATH),
"next": ("//*[contains(text(), 'Next')]", By.XPATH),
}
deployment_4_6 = {
"click_install_ocs_page": ("//button[text()='Install']", By.XPATH),
"create_storage_cluster": ("//button[text()='Create Storage Cluster']", By.XPATH),
"internal_mode": ('input[value="Internal"]', By.CSS_SELECTOR),
"internal-attached_devices": (
'input[value="Internal - Attached Devices"]',
By.CSS_SELECTOR,
),
"storage_class_dropdown": (
'button[id="ceph-sc-dropdown"]',
By.CSS_SELECTOR,
),
"enable_encryption": ('//span[@class="pf-c-switch__toggle"]', By.XPATH),
"click_install_lso_page": ("//button[text()='Install']", By.XPATH),
"project_dropdown": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"OpenShift Container Storage": ('a[id="openshift-storage-link"]', By.CSS_SELECTOR),
"Local Storage": ('a[id="openshift-local-storage-link"]', By.CSS_SELECTOR),
}
deployment_4_7 = {
"click_install_ocs_page": ('button[data-test="install-operator"]', By.CSS_SELECTOR),
"create_storage_cluster": ('button[data-test="item-create"]', By.CSS_SELECTOR),
"internal_mode": ('input[data-test="Internal-radio-input"]', By.CSS_SELECTOR),
"internal-attached_devices": (
'input[data-test="Internal - Attached Devices-radio-input"]',
By.CSS_SELECTOR,
),
"storage_class_dropdown": (
'button[data-test="storage-class-dropdown"]',
By.CSS_SELECTOR,
),
"enable_encryption": ('input[data-test="encryption-checkbox"]', By.CSS_SELECTOR),
"click_install_lso_page": ('button[data-test="install-operator"]', By.CSS_SELECTOR),
}
generic_locators = {
"project_selector": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"select_openshift-storage_project": (
'a[id="openshift-storage-link"]',
By.CSS_SELECTOR,
),
"create_resource_button": ("yaml-create", By.ID),
"search_resource_field": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"first_dropdown_option": (
'a[data-test="dropdown-menu-item-link"]',
By.CSS_SELECTOR,
),
"actions": ('button[data-test-id="actions-menu-button"]', By.CSS_SELECTOR),
"confirm_action": ("confirm-action", By.ID),
"submit_form": ('button[type="submit"]', By.CSS_SELECTOR),
"ocs_operator": ('//h1[text()="OpenShift Container Storage"]', By.XPATH),
"kebab_button": ('button[data-test-id="kebab-button"', By.CSS_SELECTOR),
"resource_status": ('span[data-test="status-text"]', By.CSS_SELECTOR),
"check_first_row_checkbox": ('input[name="checkrow0"]', By.CSS_SELECTOR),
"remove_search_filter": ('button[aria-label="close"]', By.CSS_SELECTOR),
"delete_resource_kebab_button": ('//*[contains(text(), "Delete")]', By.XPATH),
}
ocs_operator_locators = {
"backingstore_page": (
'a[data-test-id="horizontal-link-Backing Store"]',
By.CSS_SELECTOR,
),
"namespacestore_page": (
'a[data-test-id="horizontal-link-Namespace Store"]',
By.CSS_SELECTOR,
),
"bucketclass_page": (
'a[data-test-id="horizontal-link-Bucket Class"]',
By.CSS_SELECTOR,
),
}
mcg_stores = {
"store_name": ('input[data-test*="store-name"]', By.CSS_SELECTOR),
"provider_dropdown": ('button[data-test*="store-provider"]', By.CSS_SELECTOR),
"aws_provider": ("AWS S3-link", By.ID),
"aws_region_dropdown": ("region", By.ID),
"us_east_2_region": ("us-east-2-link", By.ID),
"aws_secret_dropdown": ("secret-dropdown", By.ID),
"aws_secret_search_field": (
'input[data-test-id="dropdown-text-filter"]',
By.CSS_SELECTOR,
),
"target_bucket": ("target-bucket", By.ID),
}
bucketclass = {
"standard_type": ("Standard", By.ID),
"namespace_type": ("Namespace", By.ID),
"bucketclass_name": ("bucketclassname-input", By.ID),
"spread_policy": ('input[data-test="placement-policy-spread1"]', By.CSS_SELECTOR),
"mirror_policy": ('input[data-test="placement-policy-mirror1"]', By.CSS_SELECTOR),
"single_policy": ("Single", By.ID),
"multi_policy": ("Multi", By.ID),
"cache_policy": ("Cache", By.ID),
"nss_dropdown": ('button[data-test="nns-dropdown-toggle"]', By.CSS_SELECTOR),
"nss_option_template": ('button[data-test="{}"]', By.CSS_SELECTOR),
"bs_dropdown": ('button[data-test="nbs-dropdown-toggle"]', By.CSS_SELECTOR),
"first_bs_dropdown_option": (
'button[data-test="mybs-dropdown-item"]',
By.CSS_SELECTOR,
),
"ttl_input": ("ttl-input", By.ID),
"ttl_time_unit_dropdown": ("timetolive-input", By.ID),
"ttl_minute_time_unit_button": ("MIN-link", By.ID),
}
obc = {
"storageclass_dropdown": ("sc-dropdown", By.ID),
"storageclass_text_field": (
'input[placeholder="Select StorageClass"]',
By.CSS_SELECTOR,
),
"bucketclass_dropdown": ("bc-dropdown", By.ID),
"bucketclass_text_field": (
'input[placeholder="Select BucketClass"]',
By.CSS_SELECTOR,
),
"default_bucketclass": ("noobaa-default-bucket-class-link", By.ID),
"obc_name": ("obc-name", By.ID),
"first_obc_link": ('a[class="co-resource-item__resource-name"]', By.CSS_SELECTOR),
"delete_obc": (
'button[data-test-action="Delete Object Bucket Claim"]',
By.CSS_SELECTOR,
),
}
pvc = {
"pvc_project_selector": (
'button[class="pf-c-dropdown__toggle pf-m-plain"]',
By.CSS_SELECTOR,
),
"select_openshift-storage_project": (
'a[id="openshift-storage-link"]',
By.CSS_SELECTOR,
),
"pvc_create_button": ('button[data-test="item-create"]', By.CSS_SELECTOR),
"pvc_storage_class_selector": (
'button[data-test="storageclass-dropdown"]',
By.CSS_SELECTOR,
),
"storage_class_name": ('//*[text()="{}"]', By.XPATH),
"ocs-storagecluster-ceph-rbd": (
'a[id="ocs-storagecluster-ceph-rbd-link"]',
By.CSS_SELECTOR,
),
"ocs-storagecluster-cephfs": (
'a[id="ocs-storagecluster-cephfs-link"]',
By.CSS_SELECTOR,
),
"ocs-storagecluster-ceph-rbd-thick": (
"a[id='ocs-storagecluster-ceph-rbd-thick-link'] div[class='text-muted small']",
By.CSS_SELECTOR,
),
"pvc_name": ('input[data-test="pvc-name"]', By.CSS_SELECTOR),
"ReadWriteOnce": (
'input[data-test="Single User (RWO)-radio-input"]',
By.CSS_SELECTOR,
),
"ReadWriteMany": (
'input[data-test="Shared Access (RWX)-radio-input"]',
By.CSS_SELECTOR,
),
"ReadOnlyMany": ('input[data-test="Read Only (ROX)-radio-input"]', By.CSS_SELECTOR),
"pvc_size": ('input[data-test="pvc-size"]', By.CSS_SELECTOR),
"pvc_create": ('button[data-test="create-pvc"]', By.CSS_SELECTOR),
"pvc_actions": ('button[data-test-id="actions-menu-button"]', By.CSS_SELECTOR),
"pvc_delete": (
'button[data-test-action="Delete PersistentVolumeClaim"]',
By.CSS_SELECTOR,
),
"confirm_pvc_deletion": ('button[data-test="confirm-action"]', By.CSS_SELECTOR),
"search_pvc": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
}
pvc_4_7 = {
"test-pvc-fs": ('a[data-test-id="test-pvc-fs"]', By.CSS_SELECTOR),
"test-pvc-rbd": ("a[title='test-pvc-rbd']", By.CSS_SELECTOR),
"Block": ("input[value='Block']", By.CSS_SELECTOR),
"Filesystem": ("input[value='Filesystem']", By.CSS_SELECTOR),
"search-project": ("input[placeholder='Select Project...']", By.CSS_SELECTOR),
"expand_pvc": ("button[data-test-action='Expand PVC']", By.CSS_SELECTOR),
"resize-value": ("//input[@name='requestSizeValue']", By.XPATH),
"expand-btn": ("#confirm-action", By.CSS_SELECTOR),
"pvc-status": (
"dd[data-test-id='pvc-status'] span[data-test='status-text']",
By.CSS_SELECTOR,
),
"test-project-link": ("//a[normalize-space()='{}']", By.XPATH),
"expected-capacity": (
"//dd[contains(text(),'{}') and @data-test='pvc-requested-capacity']",
By.XPATH,
),
"new-capacity": (
"//dd[contains(text(),'{}') and @data-test-id='pvc-capacity']",
By.XPATH,
),
}
pvc_4_8 = {
"ReadWriteMany": ("input[value='ReadWriteMany']", By.CSS_SELECTOR),
"pvc_actions": ("button[aria-label='Actions']", By.CSS_SELECTOR),
"ReadWriteOnce": ("input[value='ReadWriteOnce']", By.CSS_SELECTOR),
"test-pvc-fs": ("a[title='test-pvc-fs']", By.CSS_SELECTOR),
"test-pvc-rbd-thick": ("a[title='test-pvc-rbd-thick']", By.CSS_SELECTOR),
"resize-pending": (
"div[class ='col-xs-4 col-sm-2 col-md-2'] span",
By.CSS_SELECTOR,
),
"search_pvc": ("input[placeholder='Search by name...']", By.CSS_SELECTOR),
}
page_nav = {
"Home": ("//button[text()='Home']", By.XPATH),
"overview_page": ("Overview", By.LINK_TEXT),
"projects_page": ("Projects", By.LINK_TEXT),
"search_page": ("Search", By.LINK_TEXT),
"explore_page": ("Explore", By.LINK_TEXT),
"events_page": ("Events", By.LINK_TEXT),
"Operators": ("//button[text()='Operators']", By.XPATH),
"operatorhub_page": ("OperatorHub", By.LINK_TEXT),
"installed_operators_page": ("Installed Operators", By.LINK_TEXT),
"Storage": ("//button[text()='Storage']", By.XPATH),
"persistentvolumes_page": ("PersistentVolumes", By.LINK_TEXT),
"persistentvolumeclaims_page": ("PersistentVolumeClaims", By.LINK_TEXT),
"storageclasses_page": ("StorageClasses", By.LINK_TEXT),
"volumesnapshots_page": ("VolumeSnapshots", By.LINK_TEXT),
"volumesnapshotclasses_page": ("VolumeSnapshotClasses", By.LINK_TEXT),
"volumesnapshotcontents_page": ("VolumeSnapshotContents", By.LINK_TEXT),
"object_buckets_page": ("Object Buckets", By.LINK_TEXT),
"object_bucket_claims_page": ("Object Bucket Claims", By.LINK_TEXT),
"Monitoring": ("//button[text()='Monitoring']", By.XPATH),
"alerting_page": ("Alerting", By.LINK_TEXT),
"metrics_page": ("Metrics", By.LINK_TEXT),
"dashboards_page": ("Dashboards", By.LINK_TEXT),
"Workloads": ("//button[text()='Workloads']", By.XPATH),
"Pods": ("Pods", By.LINK_TEXT),
"quickstarts": ('a[href="/quickstart"]', By.CSS_SELECTOR),
"block_pool_link": (
'a[data-test-id="horizontal-link-Block Pools"]',
By.CSS_SELECTOR,
),
}
add_capacity = {
"ocs_operator": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"kebab_storage_cluster": ('button[data-test-id="kebab-button"', By.CSS_SELECTOR),
"add_capacity_button": ('button[data-test-action="Add Capacity"]', By.CSS_SELECTOR),
"select_sc_add_capacity": (
'button[data-test="add-cap-sc-dropdown"]',
By.CSS_SELECTOR,
),
"thin_sc": ('a[id="thin-link"]', By.CSS_SELECTOR),
"gp2_sc": ('a[id="gp2-link"]', By.CSS_SELECTOR),
"managed-premium_sc": ('a[id="managed-premium-link"]', By.CSS_SELECTOR),
"confirm_add_capacity": ('button[data-test="confirm-action"', By.CSS_SELECTOR),
"filter_pods": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
}
block_pool = {
"create_block_pool": ("Create BlockPool", By.LINK_TEXT),
"new_pool_name": (
'input[data-test="new-pool-name-textbox"]',
By.CSS_SELECTOR,
),
"first_select_replica": ('button[data-test="replica-dropdown"]', By.CSS_SELECTOR),
"second_select_replica_2": ("//button[text()='2-way Replication']", By.XPATH),
"second_select_replica_3": ("//button[text()='3-way Replication']", By.XPATH),
"conpression_checkbox": (
'input[data-test="compression-checkbox"]',
By.CSS_SELECTOR,
),
"pool_confirm_create": ('button[data-test-id="confirm-action"]', By.CSS_SELECTOR),
"actions_inside_pool": ('button[aria-label="Actions"]', By.CSS_SELECTOR),
"edit_pool_inside_pool": (
'button[data-test-action="Edit BlockPool"]',
By.CSS_SELECTOR,
),
"delete_pool_inside_pool": (
'button[data-test-action="Delete BlockPool"]',
By.CSS_SELECTOR,
),
"confirm_delete_inside_pool": ("//button[text()='Delete']", By.XPATH),
"replica_dropdown_edit": ('button[data-test="replica-dropdown"]', By.CSS_SELECTOR),
"compression_checkbox_edit": (
'input[data-test="compression-checkbox"]',
By.CSS_SELECTOR,
),
"save_pool_edit": ('button[data-test-id="confirm-action"]', By.CSS_SELECTOR),
"pool_state_inside_pool": ('span[data-test="status-text"]', By.CSS_SELECTOR),
}
storageclass = {
"create_storageclass_button": ("Create StorageClass", By.LINK_TEXT),
"input_storageclass_name": ('input[id="storage-class-name"]', By.CSS_SELECTOR),
"provisioner_dropdown": (
'button[data-test="storage-class-provisioner-dropdown"]',
By.CSS_SELECTOR,
),
"rbd_provisioner": ("openshift-storage.rbd.csi.ceph.com", By.LINK_TEXT),
"pool_dropdown": ('button[id="pool-dropdown-id"]', By.CSS_SELECTOR),
"save_storageclass": ('button[id="save-changes"]', By.CSS_SELECTOR),
"action_inside_storageclass": (
'button[data-test-id="actions-menu-button"]',
By.CSS_SELECTOR,
),
"delete_inside_storageclass": (
'button[data-test-action="Delete StorageClass"]',
By.CSS_SELECTOR,
),
"confirm_delete_inside_storageclass": ("//button[text()='Delete']", By.XPATH),
}
validation = {
"object_service_button": ("//button[text()='Object Service']", By.XPATH),
"data_resiliency_button": ("//button[text()='Data Resiliency']", By.XPATH),
"search_ocs_installed": ('input[data-test-id="item-filter"]', By.CSS_SELECTOR),
"ocs_operator_installed": (
'a[data-test-operator-row="OpenShift Container Storage"]',
By.CSS_SELECTOR,
),
"osc_subscription_tab": (
'a[data-test-id="horizontal-link-olm~Subscription"]',
By.CSS_SELECTOR,
),
"osc_all_instances_tab": (
'a[data-test-id="horizontal-link-olm~All instances"]',
By.CSS_SELECTOR,
),
"osc_storage_cluster_tab": (
'a[data-test-id="horizontal-link-Storage Cluster"]',
By.CSS_SELECTOR,
),
"osc_backing_store_tab": (
'a[data-test-id="horizontal-link-Backing Store"]',
By.CSS_SELECTOR,
),
"osc_bucket_class_tab": (
'a[data-test-id="horizontal-link-Bucket Class"]',
By.CSS_SELECTOR,
),
}
validation_4_7 = {
"object_service_tab": (
'a[data-test-id="horizontal-link-Object Service"]',
By.CSS_SELECTOR,
),
"persistent_storage_tab": (
'a[data-test-id="horizontal-link-Persistent Storage"]',
By.CSS_SELECTOR,
),
}
validation_4_8 = {
"object_service_tab": (
'a[data-test-id="horizontal-link-Object"]',
By.CSS_SELECTOR,
),
"persistent_storage_tab": (
'a[data-test-id="horizontal-link-Block and File"]',
By.CSS_SELECTOR,
),
}
locators = {
"4.8": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_7},
"generic": generic_locators,
"ocs_operator": ocs_operator_locators,
"obc": obc,
"bucketclass": bucketclass,
"mcg_stores": mcg_stores,
"pvc": {**pvc, **pvc_4_7, **pvc_4_8},
"validation": {**validation, **validation_4_8},
"add_capacity": add_capacity,
"block_pool": block_pool,
"storageclass": storageclass,
},
"4.7": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_7},
"pvc": {**pvc, **pvc_4_7},
"add_capacity": add_capacity,
"validation": {**validation, **validation_4_7},
},
"4.6": {
"login": login,
"page": page_nav,
"deployment": {**deployment, **deployment_4_6},
"pvc": pvc,
},
}
| 40.0125 | 88 | 0.6272 | [
"MIT"
] | keesturam/ocs-ci | ocs_ci/ocs/ui/views.py | 19,207 | Python |
"""
``name_index`` builds an inverted index mapping words to sets of Unicode
characters which contain that word in their names. For example::
>>> index = name_index(32, 65)
>>> sorted(index['SIGN'])
['#', '$', '%', '+', '<', '=', '>']
>>> sorted(index['DIGIT'])
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
>>> index['DIGIT'] & index['EIGHT']
{'8'}
"""
# tag::CHARINDEX[]
import sys
import re
import unicodedata
from typing import Dict, Set, Iterator
RE_WORD = re.compile('\w+')
STOP_CODE = sys.maxunicode + 1
def tokenize(text: str) -> Iterator[str]: # <1>
"""return iterable of uppercased words"""
for match in RE_WORD.finditer(text):
yield match.group().upper()
def name_index(start: int = 32, end: int = STOP_CODE) -> Dict[str, Set[str]]:
index: Dict[str, Set[str]] = {} # <2>
for char in (chr(i) for i in range(start, end)):
if name := unicodedata.name(char, ''): # <3>
for word in tokenize(name):
index.setdefault(word, set()).add(char)
return index
# end::CHARINDEX[]
| 30.027778 | 77 | 0.572618 | [
"MIT"
] | eumiro/example-code-2e | 08-def-type-hints/charindex.py | 1,081 | Python |
from typing import Any, Dict, Mapping, Optional, Set
from pydantic import validator
from transformer.transformers.abstract import ExtraHashableModel, Transformer
from transformer.transformers.flatters import Flatter, FlatterConfig, Unflatter
class ReportMissingData(Exception):
def __init__(self, keys: Set[str]):
self.keys = keys
self.message = f"The keys f{self.keys} are missing in the payload."
class MapKeysConfig(ExtraHashableModel):
"""
This is the configuration for the MapKeys transformer.
In order to call this transformer pass the name "map-keys" and a mapping dict.
"""
mapping: Mapping[str, str]
preserve_unmapped: bool = True
ignore_missing_data: bool = True
level_separator: str = "."
return_plain: bool = False
@validator("mapping")
def backwards_compatibility(cls, mapping: Mapping[str, str]):
return {
key.replace(".$[", "["): value.replace(".$[", "[")
for key, value in mapping.items()
}
class MapKeys(Transformer[MapKeysConfig]):
"""
The MapKeys is a complete dict re-designer.
It lets you rename the keys and also restructure the entire dict. Creating new nested data where there wasn't
and also flattening data that was previously nested is possible, all that preserving the data from the input
dictionary.
"""
def __init__(self, config: MapKeysConfig) -> None:
super().__init__(config)
self.__flatters_config = FlatterConfig(level_separator=config.level_separator)
self.__flatter = Flatter(self.__flatters_config)
self.__unflatter = Unflatter(self.__flatters_config)
def transform(
self, payload: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None
):
"""
The mapping is done in 4 major steps:
1. Flattens the data.
2. Metadata Replacers:
Some key mapping parameters are specified in the metadata. Keys that have placeholders like
${metadata_key} will be substituted by values on the specified metadata key.
3. Map Data.
In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the
payload and self._config.mapping have matching keys. Maybe not all keys in payload are in
self._config.mapping, in which case we choose what to do with those extra keys with the config
self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present
in the payload, the configuration self._config.ignore_missing_data chooses what should be done.
4. Unflattens the data.
:return: transformed and restructured data.
"""
flat_data = self.__flatter.transform(payload)
translated_dict: Dict = {}
map_keys_set = set(self._config.mapping.keys())
for map_key in map_keys_set.intersection(flat_data.keys()):
map_value = self._config.mapping[map_key]
if metadata is not None:
for meta_key, meta_value in metadata.items():
map_key = map_key.replace("@{" + meta_key + "}", str(meta_value))
map_value = map_value.replace(
"@{" + meta_key + "}", str(meta_value)
)
translated_dict[map_value] = flat_data[map_key]
if not self._config.ignore_missing_data:
missing_keys = map_keys_set - flat_data.keys()
if missing_keys:
raise ReportMissingData(missing_keys)
if self._config.preserve_unmapped:
for unmapped_key in flat_data.keys() - self._config.mapping.keys():
translated_dict[unmapped_key] = flat_data[unmapped_key]
if self._config.return_plain:
return translated_dict, metadata
if metadata is None:
return self.__unflatter.transform(translated_dict)
return self.__unflatter.transform(translated_dict, metadata)
| 40.4 | 117 | 0.661634 | [
"MIT"
] | santunioni/Transformer | transformer/transformers/map_keys.py | 4,040 | Python |
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import argparse
import pandas as pd
from tqdm.auto import tqdm
from datetime import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from utils.functions import compute_exact_tau, compute_exact_tau_distr
from models.toy_gmm_multid import ToyGMMMultiDLoader
model_dict = {
'gmm': ToyGMMMultiDLoader
}
def main(d_obs, run, rep, alpha, sample_size_obs, n_sampled_true_tau, debug=False, seed=7, verbose=False,
marginal=False, size_marginal=1000, size_check=10000):
# Changing values if debugging
rep = rep if not debug else 2
n_sampled_true_tau = n_sampled_true_tau if not debug else 10
model_obj = model_dict[run](d_obs=d_obs, marginal=marginal, size_marginal=size_marginal)
# Get the correct functions
grid_param = model_obj.grid
gen_obs_func = model_obj.sample_sim
gen_sample_func = model_obj.generate_sample
or_func = model_obj.compute_exact_or
t0_grid = model_obj.pred_grid
tp_func = model_obj.compute_exact_prob
t0_val = model_obj.true_param
# Loop over repetitions and classifiers
# Each time we train the different classifiers, we build the intervals and we record
# whether the point is in or not.
np.random.seed(seed)
out_val = []
out_cols = ['d_obs', 'run', 'rep', 'classifier', 'sample_size_obs', 't0_true_val', 'theta_0_current', 'on_true_t0',
'in_true_interval', 'size_true_int', 'true_entropy']
pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s' % sample_size_obs)
for jj in range(rep):
# Creating sample to check entropy about
sample_check = gen_sample_func(sample_size=size_check, marginal=False)
theta_vec = sample_check[:, :model_obj.d]
x_vec = sample_check[:, (model_obj.d + 1):]
bern_vec = sample_check[:, model_obj.d]
true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)
entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1
else np.log(1 - true_prob_vec[kk])
for kk, el in enumerate(bern_vec)])
# TRUE CONFIDENCE INTERVAL
# print('------ Calculate true Confidence Interval')
# Generates samples for each t0 values, so to be able to check both coverage and power
x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)
# # Calculate the true LRT value
tau_obs = np.array([compute_exact_tau(
or_func=or_func, x_obs=x_obs, t0_val=theta_0, t1_linspace=grid_param) for theta_0 in t0_grid])
tau_distr = np.apply_along_axis(arr=t0_grid.reshape(-1, model_obj.d), axis=1,
func1d=lambda t0: compute_exact_tau_distr(
gen_obs_func=gen_obs_func, or_func=or_func, t0_val=t0,
t1_linspace=grid_param, n_sampled=n_sampled_true_tau,
sample_size_obs=sample_size_obs, d_obs=model_obj.d_obs))
assert tau_distr.shape == (t0_grid.shape[0], n_sampled_true_tau)
quantile_pred_tau = np.quantile(a=tau_distr, q=alpha, axis=1)
true_interval = (tau_obs > quantile_pred_tau).astype(int)
true_interval_size = (np.sum(true_interval) / true_interval.shape[0])
# At this point all it's left is to record
for kk, theta_0_current in enumerate(t0_grid):
out_val.append([
d_obs, run, jj, 'Exact', sample_size_obs,
t0_val, theta_0_current, int(t0_val == theta_0_current),
true_interval[kk], true_interval_size, entropy_est
])
pbar.update(1)
# Saving the results
out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)
out_dir = 'sims/classifier_power_multid/'
out_filename = 'truth_classifier_power_multid%s_%s_%srep_alpha%s_sampleobs%s_t0val%s_%ssampletau_%s.csv' % (
d_obs, run, rep, str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'), n_sampled_true_tau,
datetime.strftime(datetime.today(), '%Y-%m-%d')
)
out_df.to_csv(out_dir + out_filename)
# Print results
cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'in_true_interval', 'true_entropy', 'size_true_int']]
print(cov_df.groupby(['classifier']).agg({'in_true_interval': [np.average],
'size_true_int': [np.average, np.std],
'true_entropy': [np.average, np.std]}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action="store", type=int, default=7,
help='Random State')
parser.add_argument('--d_obs', action="store", type=int, default=2,
help='Dimensionality of the observed data (feature space)')
parser.add_argument('--rep', action="store", type=int, default=10,
help='Number of Repetitions for calculating the Pinball loss')
parser.add_argument('--alpha', action="store", type=float, default=0.1,
help='Statistical confidence level')
parser.add_argument('--run', action="store", type=str, default='gmm',
help='Problem to run')
parser.add_argument('--debug', action='store_true', default=False,
help='If true, a very small value for the sample sizes is fit to make sure the'
'file can run quickly for debugging purposes')
parser.add_argument('--verbose', action='store_true', default=False,
help='If true, logs are printed to the terminal')
parser.add_argument('--sample_size_obs', action="store", type=int, default=10,
help='Sample size of the actual observed data.')
parser.add_argument('--n_sampled_true_tau', action="store", type=int, default=100,
help='Number of Monte Carlo samples for calculating distribution of tau sample.')
argument_parsed = parser.parse_args()
main(
d_obs=argument_parsed.d_obs,
run=argument_parsed.run,
rep=argument_parsed.rep,
alpha=argument_parsed.alpha,
debug=argument_parsed.debug,
sample_size_obs=argument_parsed.sample_size_obs,
seed=argument_parsed.seed,
verbose=argument_parsed.verbose,
n_sampled_true_tau=argument_parsed.n_sampled_true_tau
)
| 47.784173 | 119 | 0.647094 | [
"MIT"
] | Mr8ND/ACORE-LFI | acore/classifier_power_multid_truth.py | 6,642 | Python |
"""
"""
# Created on 2016.08.09
#
# Author: Giovanni Cannata
#
# Copyright 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
# Validators return True if value is valid, False if value is not valid,
# or a value different from True and False that is a valid value to substitute to the input value
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(name, input_value):
return True
def validate_generic_single_value(name, input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
if len(input_value) == 1:
return True
return False
def validate_integer(name, input_value):
return check_type(input_value, int)
def validate_bytes(name, input_value):
return check_type(input_value, bytes)
def validate_boolean(name, input_value):
# it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed
if validate_generic_single_value(name, input_value):
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(name, input_value):
# if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC
changed = False
sequence = True # indicates if a sequence must be returned
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
valid_values = []
for element in input_value:
if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time
if isinstance(format_time(element), datetime): # valid Generalized Time string
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo: # a datetime with a timezone
valid_values.append(element.strftime('%Y%m%d%H%M%SZ%z'))
else: # datetime without timezone, assumed local and adjusted to UTC
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
| 32.714286 | 103 | 0.645261 | [
"MIT"
] | crav7/ProjectDjango | lib/python2.7/site-packages/ldap3/protocol/formatters/validators.py | 3,893 | Python |
import py
from ctypes import *
from support import BaseCTypesTestChecker
import os
import ctypes
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
def setup_module(mod):
import conftest
_ctypes_test = str(conftest.sofile)
func = CDLL(_ctypes_test).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
mod.func = func
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
class TestC:
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
assert (name, i, getattr(b, name)) == (name, i, func(byref(b), name))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
assert (name, i, getattr(b, name)) == (name, i, func(byref(b), name))
class TestBitField:
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
assert sizeof(X) == sizeof(c_longlong)
x = X()
x.a, x.b, x.c = -1, 7, -1
assert (x.a, x.b, x.c) == (-1, 7, -1)
x = X()
x.a, x.b, x.c = -1, -7, -1
assert (x.a, x.b, x.c) == (-1, -7, -1)
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
assert sizeof(X) == sizeof(c_longlong)
x = X()
assert (x.a, x.b, x.c) == (0, 0, 0)
x.a, x.b, x.c = 7, 2305843009213693953, 7
assert (x.a, x.b, x.c) == (1, 2305843009213693953, 1)
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
assert sizeof(X) == sizeof(c_typ)*2
x = X()
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 0, 0)
x.a = -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, -1, 0, 0)
x.a, x.b = 0, -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, -1, 0)
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
assert sizeof(X) == sizeof(c_typ)
x = X()
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 0, 0)
x.a = -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, 7, 0, 0)
x.a, x.b = 0, -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 7, 0)
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
assert result == (TypeError, 'bit fields not allowed for type c_char_p')
result = self.fail_fields(("a", c_void_p, 1))
assert result == (TypeError, 'bit fields not allowed for type c_void_p')
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
assert result == (TypeError, 'bit fields not allowed for type LP_c_int')
result = self.fail_fields(("a", c_char, 1))
assert result == (TypeError, 'bit fields not allowed for type c_char')
try:
c_wchar
except NameError:
pass
else:
result = self.fail_fields(("a", c_wchar, 1))
assert result == (TypeError, 'bit fields not allowed for type c_wchar')
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
assert result == (TypeError, 'bit fields not allowed for type Dummy')
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
assert result == (ValueError, 'number of bits invalid for bit field')
result = self.fail_fields(("a", c_typ, 0))
assert result == (ValueError, 'number of bits invalid for bit field')
class X(Structure):
_fields_ = [("a", c_typ, 1)]
assert sizeof(X) == sizeof(c_typ)
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
assert sizeof(X) == sizeof(c_typ)
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
assert result == (ValueError, 'number of bits invalid for bit field')
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
assert sizeof(X) == sizeof(c_short)
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
assert sizeof(X) == sizeof(c_short)*3
assert X.a.offset == 0
assert X.a1.offset == sizeof(c_short)
assert X.b.offset == sizeof(c_short)*2
assert X.c.offset == sizeof(c_short)*2
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
assert sizeof(X) == sizeof(c_short)*3
assert X.a.offset == sizeof(c_short)*0
assert X.b.offset == sizeof(c_short)*1
assert X.c.offset == sizeof(c_short)*2
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception as detail:
import traceback
traceback.print_exc()
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name in ("nt", "ce"):
assert sizeof(X) == sizeof(c_int)*2
else:
assert sizeof(X) == sizeof(c_int)
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
assert sizeof(X) == sizeof(c_int)*2
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
assert sizeof(X) == sizeof(c_byte)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
def test_set_fields_attr(self):
class A(Structure):
pass
A._fields_ = [("a", c_byte),
("b", c_ubyte)]
def test_set_fields_attr_bitfields(self):
class A(Structure):
pass
A._fields_ = [("a", POINTER(A)),
("b", c_ubyte, 4)]
def test_set_fields_cycle_fails(self):
class A(Structure):
pass
import pytest
pytest.raises(AttributeError, """
A._fields_ = [("a", A)]
""")
| 32.980159 | 85 | 0.47395 | [
"MIT"
] | igormcoelho/neo-boa | idea2/pypyjs-3/deps/pypy/pypy/module/test_lib_pypy/ctypes_tests/test_bitfields.py | 8,311 | Python |
from __future__ import print_function, division
import logging
import numpy as np
from . import operators
from . import utils
from . import algorithms
def delta_data(A, S, Y, W=1):
return W*(A.dot(S) - Y)
def grad_likelihood_A(A, S, Y, W=1):
D = delta_data(A, S, Y, W=W)
return D.dot(S.T)
def grad_likelihood_S(S, A, Y, W=1):
D = delta_data(A, S, Y, W=W)
return A.T.dot(D)
# executes one proximal step of likelihood gradient, followed by prox_g
def prox_likelihood_A(A, step, S=None, Y=None, prox_g=None, W=1):
return prox_g(A - step*grad_likelihood_A(A, S, Y, W=W), step)
def prox_likelihood_S(S, step, A=None, Y=None, prox_g=None, W=1):
return prox_g(S - step*grad_likelihood_S(S, A, Y, W=W), step)
def prox_likelihood(X, step, Xs=None, j=None, Y=None, WA=None, WS=None, prox_S=operators.prox_id, prox_A=operators.prox_id):
if j == 0:
return prox_likelihood_A(X, step, S=Xs[1], Y=Y, prox_g=prox_A, W=WA)
else:
return prox_likelihood_S(X, step, A=Xs[0], Y=Y, prox_g=prox_S, W=WS)
class Steps_AS:
def __init__(self, WA=1, WS=1, slack=0.1, max_stride=100):
"""Helper class to compute the Lipschitz constants of grad f.
The __call__ function compute the spectral norms of A or S, which
determine the Lipschitz constant of the respective update steps.
If a weight matrix is used, the stepsize will be upper bounded by
assuming the maximum value of the weights. In the case of varying
weights, it is generally advised to normalize the weight matrix
differently for the A and S updates, therefore two maximum numbers
(WAMax, WSmax) can be set.
Because the spectral norm is expensive to compute, it will only update
the step_size if relative changes of L exceed slack/2.
If not, which is usually the case after only a few iterations, it will
report a previous value for the next several iterations. The stride
between updates is set by
stride -> stride * (slack/2 / rel_error
i.e. it increases more strongly if the rel_error is much below the
slack budget.
"""
import scipy.sparse
if WA is 1:
self.WA = WA
else:
self.WA = scipy.sparse.diags(WA.reshape(-1))
if WS is 1:
self.WS = WS
else:
self.WS = scipy.sparse.diags(WS.reshape(-1))
# two independent caches for Lipschitz constants
self._cb = [utils.ApproximateCache(self._one_over_lipschitzA, slack=slack, max_stride=max_stride),
utils.ApproximateCache(self._one_over_lipschitzS, slack=slack, max_stride=max_stride)]
def _one_over_lipschitzA(self, Xs):
A,S = Xs
if self.WA is 1:
return 1./utils.get_spectral_norm(S.T)
else: # full weight matrix, need to serialize S along k
import scipy.sparse
Ss = scipy.sparse.block_diag([S.T for b in range(len(A))])
# Lipschitz constant for grad_A = || S Sigma_1 S.T||_s
SSigma_1S = Ss.T.dot(self.WA.dot(Ss))
LA = np.real(scipy.sparse.linalg.eigs(SSigma_1S, k=1, return_eigenvectors=False)[0])
return 1./LA
def _one_over_lipschitzS(self, Xs):
A,S = Xs
if self.WA is 1:
return 1./utils.get_spectral_norm(A)
else:
import scipy.sparse
N = S.shape[1]
As = scipy.sparse.bmat([[scipy.sparse.identity(N) * A[b,k] for k in range(A.shape[1])] for b in range(A.shape[0])])
ASigma_1A = As.T.dot(self.WS.dot(As))
LS = np.real(scipy.sparse.linalg.eigs(ASigma_1A, k=1, return_eigenvectors=False)[0])
return 1./LS
def __call__(self, j, Xs):
return self._cb[j](Xs)
def normalizeMatrix(M, axis):
if axis == 1:
norm = np.sum(M, axis=axis)
norm = np.broadcast_to(norm, M.T.shape)
norm = norm.T
else:
norm = np.sum(M, axis=axis)
norm = np.broadcast_to(norm, M.shape)
return norm
def nmf(Y, A, S, W=None, prox_A=operators.prox_plus, prox_S=operators.prox_plus, proxs_g=None, steps_g=None, Ls=None, slack=0.9, update_order=None, steps_g_update='steps_f', max_iter=1000, e_rel=1e-3, e_abs=0, traceback=None):
"""Non-negative matrix factorization.
This method solves the NMF problem
minimize || Y - AS ||_2^2
under an arbitrary number of constraints on A and/or S.
Args:
Y: target matrix MxN
A: initial amplitude matrix MxK, will be updated
S: initial source matrix KxN, will be updated
W: (optional weight matrix MxN)
prox_A: direct projection contraint of A
prox_S: direct projection constraint of S
proxs_g: list of constraints for A or S for ADMM-type optimization
[[prox_A_0, prox_A_1...],[prox_S_0, prox_S_1,...]]
steps_g: specific value of step size for proxs_g (experts only!)
Ls: list of linear operators for the constraint functions proxs_g
If set, needs to have same format as proxs_g.
Matrices can be numpy.array, scipy.sparse, or None (for identity).
slack: tolerance for (re)evaluation of Lipschitz constants
See Steps_AS() for details.
update_order: list of factor indices in update order
j=0 -> A, j=1 -> S
max_iter: maximum iteration number, irrespective of current residuals
e_rel: relative error threshold for primal and dual residuals
e_abs: absolute error threshold for primal and dual residuals
traceback: utils.Traceback to hold variable histories
Returns:
converged: convence test for A,S
errors: difference between latest and previous iterations for A,S
See also:
algorithms.bsdmm for update_order and steps_g_update
utils.AcceleratedProxF for Nesterov acceleration
Reference:
Moolekamp & Melchior, 2017 (arXiv:1708.09066)
"""
# create stepsize callback, needs max of W
if W is not None:
# normalize in pixel and band directions to have similar update speeds
WA = normalizeMatrix(W, 1)
WS = normalizeMatrix(W, 0)
else:
WA = WS = 1
steps_f = Steps_AS(WA=WA, WS=WS, slack=slack)
# gradient step, followed by direct application of prox_S or prox_A
from functools import partial
f = partial(prox_likelihood, Y=Y, WA=WA, WS=WS, prox_S=prox_S, prox_A=prox_A)
X = [A, S]
# use accelerated block-PGM if there's no proxs_g
if proxs_g is None or not utils.hasNotNone(proxs_g):
return algorithms.bpgm(X, f, steps_f, accelerated=True, update_order=update_order, max_iter=max_iter, e_rel=e_rel, traceback=traceback)
else:
return algorithms.bsdmm(X, f, steps_f, proxs_g, steps_g=steps_g, Ls=Ls, update_order=update_order, steps_g_update=steps_g_update, max_iter=max_iter, e_rel=e_rel, e_abs=e_abs, traceback=traceback)
| 42.012048 | 226 | 0.652853 | [
"MIT"
] | herjy/proxmin | proxmin/nmf.py | 6,974 | Python |
import json
import logging
import math
import os
import random
import warnings
from dataclasses import asdict
from multiprocessing import Pool, cpu_count
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from tensorboardX import SummaryWriter
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm, trange
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoTokenizer,
BartConfig,
BartForConditionalGeneration,
BartTokenizer,
BertConfig,
BertForMaskedLM,
BertModel,
BertTokenizer,
CamembertConfig,
CamembertModel,
CamembertTokenizer,
DistilBertConfig,
DistilBertModel,
DistilBertTokenizer,
ElectraConfig,
ElectraModel,
ElectraTokenizer,
EncoderDecoderConfig,
EncoderDecoderModel,
LongformerConfig,
LongformerModel,
LongformerTokenizer,
MarianConfig,
MarianMTModel,
MarianTokenizer,
MobileBertConfig,
MobileBertModel,
MobileBertTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
RobertaConfig,
RobertaModel,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from simpletransformers.config.global_args import global_args
from simpletransformers.config.model_args import Seq2SeqArgs
from simpletransformers.seq2seq.seq2seq_utils import Seq2SeqDataset, SimpleSummarizationDataset
try:
import wandb
wandb_available = True
except ImportError:
wandb_available = False
logger = logging.getLogger(__name__)
MODEL_CLASSES = {
"auto": (AutoConfig, AutoModel, AutoTokenizer),
"bart": (BartConfig, BartForConditionalGeneration, BartTokenizer),
"bert": (BertConfig, BertModel, BertTokenizer),
"camembert": (CamembertConfig, CamembertModel, CamembertTokenizer),
"distilbert": (DistilBertConfig, DistilBertModel, DistilBertTokenizer),
"electra": (ElectraConfig, ElectraModel, ElectraTokenizer),
"longformer": (LongformerConfig, LongformerModel, LongformerTokenizer),
"mobilebert": (MobileBertConfig, MobileBertModel, MobileBertTokenizer),
"marian": (MarianConfig, MarianMTModel, MarianTokenizer),
"roberta": (RobertaConfig, RobertaModel, RobertaTokenizer),
}
class Seq2SeqModel:
def __init__(
self,
encoder_type=None,
encoder_name=None,
decoder_name=None,
encoder_decoder_type=None,
encoder_decoder_name=None,
config=None,
args=None,
use_cuda=True,
cuda_device=-1,
**kwargs,
):
"""
Initializes a Seq2SeqModel.
Args:
encoder_type (optional): The type of model to use as the encoder.
encoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
decoder_name (optional): The exact architecture and trained weights to use. This may be a Hugging Face Transformers compatible pre-trained model, a community model, or the path to a directory containing model files.
Must be the same "size" as the encoder model (base/base, large/large, etc.)
encoder_decoder_type (optional): The type of encoder-decoder model. (E.g. bart)
encoder_decoder_name (optional): The path to a directory containing the saved encoder and decoder of a Seq2SeqModel. (E.g. "outputs/") OR a valid BART or MarianMT model.
config (optional): A configuration file to build an EncoderDecoderModel.
args (optional): Default args will be used if this parameter is not provided. If provided, it should be a dict containing the args that should be changed in the default args.
use_cuda (optional): Use GPU if available. Setting to False will force model to use CPU only.
cuda_device (optional): Specific GPU that should be used. Will use the first available GPU by default.
**kwargs (optional): For providing proxies, force_download, resume_download, cache_dir and other options specific to the 'from_pretrained' implementation where this will be supplied.
""" # noqa: ignore flake8"
if not config:
# if not ((encoder_name and decoder_name) or encoder_decoder_name) and not encoder_type:
if not ((encoder_name and decoder_name) or encoder_decoder_name):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name OR \t \t"
"encoder_type and encoder_decoder_name"
)
elif not (encoder_type or encoder_decoder_type):
raise ValueError(
"You must specify a Seq2Seq config \t OR \t"
"encoder_type, encoder_name, and decoder_name \t OR \t"
"encoder_type and encoder_decoder_name"
)
self.args = self._load_model_args(encoder_decoder_name)
if isinstance(args, dict):
self.args.update_from_dict(args)
elif isinstance(args, Seq2SeqArgs):
self.args = args
if "sweep_config" in kwargs:
sweep_config = kwargs.pop("sweep_config")
sweep_values = {key: value["value"] for key, value in sweep_config.as_dict().items() if key != "_wandb"}
self.args.update_from_dict(sweep_values)
if self.args.manual_seed:
random.seed(self.args.manual_seed)
np.random.seed(self.args.manual_seed)
torch.manual_seed(self.args.manual_seed)
if self.args.n_gpu > 0:
torch.cuda.manual_seed_all(self.args.manual_seed)
if use_cuda:
if torch.cuda.is_available():
if cuda_device == -1:
self.device = torch.device("cuda")
else:
self.device = torch.device(f"cuda:{cuda_device}")
else:
raise ValueError(
"'use_cuda' set to True when cuda is unavailable."
"Make sure CUDA is available or set `use_cuda=False`."
)
else:
self.device = "cpu"
self.results = {}
if not use_cuda:
self.args.fp16 = False
# config = EncoderDecoderConfig.from_encoder_decoder_configs(config, config)
if encoder_decoder_type:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_decoder_type]
else:
config_class, model_class, tokenizer_class = MODEL_CLASSES[encoder_type]
if encoder_decoder_type in ["bart", "marian"]:
self.model = model_class.from_pretrained(encoder_decoder_name)
if encoder_decoder_type == "bart":
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
elif encoder_decoder_type == "marian":
if self.args.base_marian_model_name:
self.encoder_tokenizer = tokenizer_class.from_pretrained(self.args.base_marian_model_name)
else:
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_decoder_name)
self.decoder_tokenizer = self.encoder_tokenizer
self.config = self.model.config
else:
if encoder_decoder_name:
# self.model = EncoderDecoderModel.from_pretrained(encoder_decoder_name)
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
os.path.join(encoder_decoder_name, "encoder"), os.path.join(encoder_decoder_name, "decoder")
)
self.model.encoder = model_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.model.decoder = BertForMaskedLM.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
self.encoder_tokenizer = tokenizer_class.from_pretrained(os.path.join(encoder_decoder_name, "encoder"))
self.decoder_tokenizer = BertTokenizer.from_pretrained(os.path.join(encoder_decoder_name, "decoder"))
else:
self.model = EncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_name, decoder_name, config=config
)
self.encoder_tokenizer = tokenizer_class.from_pretrained(encoder_name)
self.decoder_tokenizer = BertTokenizer.from_pretrained(decoder_name)
self.encoder_config = self.model.config.encoder
self.decoder_config = self.model.config.decoder
if self.args.wandb_project and not wandb_available:
warnings.warn("wandb_project specified but wandb is not available. Wandb disabled.")
self.args.wandb_project = None
if encoder_decoder_name:
self.args.model_name = encoder_decoder_name
# # Checking if we are loading from a saved model or using a pre-trained model
# if not saved_model_args and encoder_decoder_type == "marian":
# Need to store base pre-trained model name to get the tokenizer when loading a saved model
self.args.base_marian_model_name = encoder_decoder_name
elif encoder_name and decoder_name:
self.args.model_name = encoder_name + "-" + decoder_name
else:
self.args.model_name = "encoder-decoder"
if encoder_decoder_type:
self.args.model_type = encoder_decoder_type
elif encoder_type:
self.args.model_type = encoder_type + "-bert"
else:
self.args.model_type = "encoder-decoder"
def train_model(
self, train_data, output_dir=None, show_running_loss=True, args=None, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model using 'train_data'
Args:
train_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
show_running_loss (optional): Set to False to prevent running loss from being printed to console. Defaults to True.
args (optional): Optional changes to the args dict of the model. Any changes made will persist for the model.
eval_data (optional): A DataFrame against which evaluation will be performed when evaluate_during_training is enabled. Is required if evaluate_during_training is enabled.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down training significantly as the predicted sequences need to be generated.
Returns:
None
""" # noqa: ignore flake8"
if args:
self.args.update_from_dict(args)
# if self.args.silent:
# show_running_loss = False
if self.args.evaluate_during_training and eval_data is None:
raise ValueError(
"evaluate_during_training is enabled but eval_data is not specified."
" Pass eval_data to model.train_model() if using evaluate_during_training."
)
if not output_dir:
output_dir = self.args.output_dir
if os.path.exists(output_dir) and os.listdir(output_dir) and not self.args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty."
" Set args.overwrite_output_dir = True to overcome.".format(output_dir)
)
self._move_model_to_device()
train_dataset = self.load_and_cache_examples(train_data, verbose=verbose)
os.makedirs(output_dir, exist_ok=True)
global_step, tr_loss = self.train(
train_dataset,
output_dir,
show_running_loss=show_running_loss,
eval_data=eval_data,
verbose=verbose,
**kwargs,
)
self._save_model(self.args.output_dir, model=self.model)
# model_to_save = self.model.module if hasattr(self.model, "module") else self.model
# model_to_save.save_pretrained(output_dir)
# self.encoder_tokenizer.save_pretrained(output_dir)
# self.decoder_tokenizer.save_pretrained(output_dir)
# torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if verbose:
logger.info(" Training of {} model complete. Saved to {}.".format(self.args.model_name, output_dir))
def train(
self, train_dataset, output_dir, show_running_loss=True, eval_data=None, verbose=True, **kwargs,
):
"""
Trains the model on train_dataset.
Utility function to be used by the train_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
tb_writer = SummaryWriter(logdir=args.tensorboard_dir)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=self.args.dataloader_num_workers,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = []
custom_parameter_names = set()
for group in self.args.custom_parameter_groups:
params = group.pop("params")
custom_parameter_names.update(params)
param_group = {**group}
param_group["params"] = [p for n, p in model.named_parameters() if n in params]
optimizer_grouped_parameters.append(param_group)
for group in self.args.custom_layer_parameters:
layer_number = group.pop("layer")
layer = f"layer.{layer_number}."
group_d = {**group}
group_nd = {**group}
group_nd["weight_decay"] = 0.0
params_d = []
params_nd = []
for n, p in model.named_parameters():
if n not in custom_parameter_names and layer in n:
if any(nd in n for nd in no_decay):
params_nd.append(p)
else:
params_d.append(p)
custom_parameter_names.add(n)
group_d["params"] = params_d
group_nd["params"] = params_nd
optimizer_grouped_parameters.append(group_d)
optimizer_grouped_parameters.append(group_nd)
if not self.args.train_custom_parameters_only:
optimizer_grouped_parameters.extend(
[
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if n not in custom_parameter_names and any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
)
warmup_steps = math.ceil(t_total * args.warmup_ratio)
args.warmup_steps = warmup_steps if args.warmup_steps == 0 else args.warmup_steps
# TODO: Use custom optimizer like with BertSum?
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if (
args.model_name
and os.path.isfile(os.path.join(args.model_name, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name, "scheduler.pt")))
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info(" Training started")
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.silent, mininterval=0)
epoch_number = 0
best_eval_metric = None
early_stopping_counter = 0
steps_trained_in_current_epoch = 0
epochs_trained = 0
if args.model_name and os.path.exists(args.model_name):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name.split("/")[-1].split("-")
if len(checkpoint_suffix) > 2:
checkpoint_suffix = checkpoint_suffix[1]
else:
checkpoint_suffix = checkpoint_suffix[-1]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (
len(train_dataloader) // args.gradient_accumulation_steps
)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the current epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
if args.evaluate_during_training:
training_progress_scores = self._create_training_progress_scores(**kwargs)
if args.wandb_project:
wandb.init(project=args.wandb_project, config={**asdict(args)}, **args.wandb_kwargs)
wandb.watch(self.model)
if args.fp16:
from torch.cuda import amp
scaler = amp.GradScaler()
model.train()
for current_epoch in train_iterator:
if epochs_trained > 0:
epochs_trained -= 1
continue
train_iterator.set_description(f"Epoch {epoch_number + 1} of {args.num_train_epochs}")
batch_iterator = tqdm(
train_dataloader,
desc=f"Running Epoch {epoch_number} of {args.num_train_epochs}",
disable=args.silent,
mininterval=0,
)
for step, batch in enumerate(batch_iterator):
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
if args.fp16:
with amp.autocast():
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
else:
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
current_loss = loss.item()
if show_running_loss:
batch_iterator.set_description(
f"Epochs {epoch_number}/{args.num_train_epochs}. Running Loss: {current_loss:9.4f}"
)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
if args.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.wandb_project:
wandb.log(
{
"Training loss": current_loss,
"lr": scheduler.get_lr()[0],
"global_step": global_step,
}
)
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training and (
args.evaluate_during_training_steps > 0
and global_step % args.evaluate_during_training_steps == 0
):
# Only evaluate when single GPU otherwise metrics may not average well
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
output_dir_current = os.path.join(output_dir, "checkpoint-{}".format(global_step))
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, model=model, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(
os.path.join(args.output_dir, "training_progress_scores.csv"), index=False,
)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(
args.best_model_dir, optimizer, scheduler, model=model, results=results
)
early_stopping_counter = 0
else:
if args.use_early_stopping:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
epoch_number += 1
output_dir_current = os.path.join(output_dir, "checkpoint-{}-epoch-{}".format(global_step, epoch_number))
if args.save_model_every_epoch or args.evaluate_during_training:
os.makedirs(output_dir_current, exist_ok=True)
if args.save_model_every_epoch:
self._save_model(output_dir_current, optimizer, scheduler, model=model)
if args.evaluate_during_training:
results = self.eval_model(
eval_data,
verbose=verbose and args.evaluate_during_training_verbose,
silent=args.evaluate_during_training_silent,
**kwargs,
)
if args.save_eval_checkpoints:
self._save_model(output_dir_current, optimizer, scheduler, results=results)
training_progress_scores["global_step"].append(global_step)
training_progress_scores["train_loss"].append(current_loss)
for key in results:
training_progress_scores[key].append(results[key])
report = pd.DataFrame(training_progress_scores)
report.to_csv(os.path.join(args.output_dir, "training_progress_scores.csv"), index=False)
if args.wandb_project:
wandb.log(self._get_last_metrics(training_progress_scores))
if not best_eval_metric:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
if best_eval_metric and args.early_stopping_metric_minimize:
if results[args.early_stopping_metric] - best_eval_metric < args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
else:
if results[args.early_stopping_metric] - best_eval_metric > args.early_stopping_delta:
best_eval_metric = results[args.early_stopping_metric]
if args.save_best_model:
self._save_model(args.best_model_dir, optimizer, scheduler, model=model, results=results)
early_stopping_counter = 0
else:
if args.use_early_stopping and args.early_stopping_consider_epochs:
if early_stopping_counter < args.early_stopping_patience:
early_stopping_counter += 1
if verbose:
logger.info(f" No improvement in {args.early_stopping_metric}")
logger.info(f" Current step: {early_stopping_counter}")
logger.info(f" Early stopping patience: {args.early_stopping_patience}")
else:
if verbose:
logger.info(f" Patience of {args.early_stopping_patience} steps reached")
logger.info(" Training terminated.")
train_iterator.close()
return global_step, tr_loss / global_step
return global_step, tr_loss / global_step
def eval_model(self, eval_data, output_dir=None, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_data. Saves results to output_dir.
Args:
eval_data: Pandas DataFrame containing the 2 columns - `input_text`, `target_text`.
- `input_text`: The input text sequence.
- `target_text`: The target text sequence.
output_dir: The directory where model files will be saved. If not given, self.args.output_dir will be used.
verbose: If verbose, results will be printed to the console on completion of evaluation.
silent: If silent, tqdm progress bars will be hidden.
**kwargs: Additional metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
results: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
if not output_dir:
output_dir = self.args.output_dir
self._move_model_to_device()
eval_dataset = self.load_and_cache_examples(eval_data, evaluate=True, verbose=verbose, silent=silent)
os.makedirs(output_dir, exist_ok=True)
result = self.evaluate(eval_dataset, output_dir, verbose=verbose, silent=silent, **kwargs)
self.results.update(result)
if self.args.evaluate_generated_text:
to_predict = eval_data["input_text"].tolist()
preds = self.predict(to_predict)
result = self.compute_metrics(eval_data["target_text"].tolist(), preds, **kwargs)
self.results.update(result)
if verbose:
logger.info(self.results)
return self.results
def evaluate(self, eval_dataset, output_dir, verbose=True, silent=False, **kwargs):
"""
Evaluates the model on eval_dataset.
Utility function to be used by the eval_model() method. Not intended to be used directly.
"""
model = self.model
args = self.args
eval_output_dir = output_dir
results = {}
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, disable=args.silent or silent, desc="Running Evaluation"):
# batch = tuple(t.to(device) for t in batch)
inputs = self._get_inputs_dict(batch)
with torch.no_grad():
outputs = model(**inputs)
loss = outputs[0]
eval_loss += loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
results["eval_loss"] = eval_loss
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
return results
def predict(self, to_predict):
"""
Performs predictions on a list of text.
Args:
to_predict: A python list of text (str) to be sent to the model for prediction. Note that the prefix should be prepended to the text.
Returns:
preds: A python list of the generated sequences.
""" # noqa: ignore flake8"
self._move_model_to_device()
all_outputs = []
# Batching
for batch in [
to_predict[i : i + self.args.eval_batch_size] for i in range(0, len(to_predict), self.args.eval_batch_size)
]:
if self.args.model_type == "marian":
input_ids = self.encoder_tokenizer.prepare_translation_batch(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
else:
input_ids = self.encoder_tokenizer.batch_encode_plus(
batch, max_length=self.args.max_seq_length, pad_to_max_length=True, return_tensors="pt",
)["input_ids"]
input_ids = input_ids.to(self.device)
if self.args.model_type in ["bart", "marian"]:
outputs = self.model.generate(
input_ids=input_ids,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
else:
outputs = self.model.generate(
input_ids=input_ids,
decoder_start_token_id=self.model.config.decoder.pad_token_id,
num_beams=self.args.num_beams,
max_length=self.args.max_length,
length_penalty=self.args.length_penalty,
early_stopping=self.args.early_stopping,
repetition_penalty=self.args.repetition_penalty,
do_sample=self.args.do_sample,
top_k=self.args.top_k,
top_p=self.args.top_p,
num_return_sequences=self.args.num_return_sequences,
)
all_outputs.extend(outputs.cpu().numpy())
if self.args.use_multiprocessed_decoding:
self.model.to("cpu")
with Pool(self.args.process_count) as p:
outputs = list(
tqdm(
p.imap(self._decode, all_outputs, chunksize=self.args.multiprocessing_chunksize),
total=len(all_outputs),
desc="Decoding outputs",
disable=self.args.silent,
)
)
self._move_model_to_device()
else:
outputs = [
self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for output_id in all_outputs
]
if self.args.num_return_sequences > 1:
return [
outputs[i : i + self.args.num_return_sequences]
for i in range(0, len(outputs), self.args.num_return_sequences)
]
else:
return outputs
def _decode(self, output_id):
return self.decoder_tokenizer.decode(output_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
def compute_metrics(self, labels, preds, **kwargs):
"""
Computes the evaluation metrics for the model predictions.
Args:
labels: List of target sequences
preds: List of model generated outputs
**kwargs: Custom metrics that should be used. Pass in the metrics as keyword arguments (name of metric: function to use).
A metric function should take in two parameters. The first parameter will be the true labels, and the second parameter will be the predictions. Both inputs
will be lists of strings. Note that this will slow down evaluation significantly as the predicted sequences need to be generated.
Returns:
result: Dictionary containing evaluation results.
""" # noqa: ignore flake8"
# assert len(labels) == len(preds)
results = {}
for metric, func in kwargs.items():
results[metric] = func(labels, preds)
return results
def load_and_cache_examples(self, data, evaluate=False, no_cache=False, verbose=True, silent=False):
"""
Creates a T5Dataset from data.
Utility function for train() and eval() methods. Not intended to be used directly.
"""
encoder_tokenizer = self.encoder_tokenizer
decoder_tokenizer = self.decoder_tokenizer
args = self.args
if not no_cache:
no_cache = args.no_cache
if not no_cache:
os.makedirs(self.args.cache_dir, exist_ok=True)
mode = "dev" if evaluate else "train"
if args.dataset_class:
CustomDataset = args.dataset_class
return CustomDataset(encoder_tokenizer, decoder_tokenizer, args, data, mode)
else:
if args.model_type in ["bart", "marian"]:
return SimpleSummarizationDataset(encoder_tokenizer, self.args, data, mode)
else:
return Seq2SeqDataset(encoder_tokenizer, decoder_tokenizer, self.args, data, mode,)
def _create_training_progress_scores(self, **kwargs):
extra_metrics = {key: [] for key in kwargs}
training_progress_scores = {
"global_step": [],
"eval_loss": [],
"train_loss": [],
**extra_metrics,
}
return training_progress_scores
def _get_last_metrics(self, metric_values):
return {metric: values[-1] for metric, values in metric_values.items()}
def _save_model(self, output_dir=None, optimizer=None, scheduler=None, model=None, results=None):
if not output_dir:
output_dir = self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info(f"Saving model into {output_dir}")
if model and not self.args.no_save:
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
self._save_model_args(output_dir)
if self.args.model_type in ["bart", "marian"]:
os.makedirs(os.path.join(output_dir), exist_ok=True)
model_to_save.save_pretrained(output_dir)
self.config.save_pretrained(output_dir)
if self.args.model_type == "bart":
self.encoder_tokenizer.save_pretrained(output_dir)
else:
os.makedirs(os.path.join(output_dir, "encoder"), exist_ok=True)
os.makedirs(os.path.join(output_dir, "decoder"), exist_ok=True)
self.encoder_config.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_config.save_pretrained(os.path.join(output_dir, "decoder"))
model_to_save = (
self.model.encoder.module if hasattr(self.model.encoder, "module") else self.model.encoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "encoder"))
model_to_save = (
self.model.decoder.module if hasattr(self.model.decoder, "module") else self.model.decoder
)
model_to_save.save_pretrained(os.path.join(output_dir, "decoder"))
self.encoder_tokenizer.save_pretrained(os.path.join(output_dir, "encoder"))
self.decoder_tokenizer.save_pretrained(os.path.join(output_dir, "decoder"))
torch.save(self.args, os.path.join(output_dir, "training_args.bin"))
if optimizer and scheduler and self.args.save_optimizer_and_scheduler:
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
if results:
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
def _move_model_to_device(self):
self.model.to(self.device)
def _get_inputs_dict(self, batch):
device = self.device
if self.args.model_type in ["bart", "marian"]:
pad_token_id = self.encoder_tokenizer.pad_token_id
source_ids, source_mask, y = batch["source_ids"], batch["source_mask"], batch["target_ids"]
y_ids = y[:, :-1].contiguous()
lm_labels = y[:, 1:].clone()
lm_labels[y[:, 1:] == pad_token_id] = -100
inputs = {
"input_ids": source_ids.to(device),
"attention_mask": source_mask.to(device),
"decoder_input_ids": y_ids.to(device),
"lm_labels": lm_labels.to(device),
}
else:
lm_labels = batch[1]
lm_labels_masked = lm_labels.clone()
lm_labels_masked[lm_labels_masked == self.decoder_tokenizer.pad_token_id] = -100
inputs = {
"input_ids": batch[0].to(device),
"decoder_input_ids": lm_labels.to(device),
"labels": lm_labels_masked.to(device),
}
return inputs
def _save_model_args(self, output_dir):
os.makedirs(output_dir, exist_ok=True)
self.args.save(output_dir)
def _load_model_args(self, input_dir):
args = Seq2SeqArgs()
args.load(input_dir)
return args
def get_named_parameters(self):
return [n for n, p in self.model.named_parameters()]
| 46.231527 | 227 | 0.58212 | [
"Apache-2.0"
] | AliOsm/simpletransformers | simpletransformers/seq2seq/seq2seq_model.py | 46,925 | Python |
from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTest(TestCase):
def test_create_user_with_email_successful(self):
"""이메일로 유저 생성을 성공하는 테스트"""
email = '[email protected]'
password = 'testpassword'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""이메일이 표준 형식으로 들어오는 테스트"""
email = '[email protected]'
user = get_user_model().objects.create_user(email, 'testpw123')
self.assertEqual(user.email, email.lower())
def test_new_user_missing_email(self):
"""이메일이 입력되지 않았을 때 에러가 발생하는 테스트"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'testpw123')
def test_create_new_superuser(self):
"""Superuser를 생성하는 테스트"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'testpw123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 30.65 | 71 | 0.644372 | [
"MIT"
] | jacobjlee/simple-shopping | shoppingmall/core/tests/test_models.py | 1,352 | Python |
# Generated by Django 2.1.2 on 2019-02-05 08:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0038_merge_20190203_1423'),
('core', '0039_auto_20190205_0609'),
]
operations = [
]
| 17.866667 | 47 | 0.641791 | [
"MIT"
] | metabolism-of-cities/ARCHIVED-metabolism-of-cities-platform-v3 | src/core/migrations/0040_merge_20190205_0807.py | 268 | Python |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import datetime
import glob
import optparse
import os
import re
import sys
if sys.version_info < (2, 6, 0):
sys.stderr.write("python 2.6 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_make
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import NACLPORTS_DIR, GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
CYGTAR = os.path.join(NACL_DIR, 'build', 'cygtar.py')
NACLPORTS_URL = 'https://naclports.googlecode.com/svn/trunk/src'
NACLPORTS_REV = 1152
GYPBUILD_DIR = 'gypbuild'
options = None
def GetGlibcToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'toolchain_%s_x86.tar.bz2' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetNewlibToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_%s_x86.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetBionicToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_%s_arm_bionic.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetPNaClToolchain():
tcdir = os.path.join(NACL_DIR, 'toolchain', '.tars')
tcname = 'naclsdk_pnacl_%s_x86.tgz' % getos.GetPlatform()
return os.path.join(tcdir, tcname)
def GetToolchainNaClInclude(tcname, tcpath, arch):
if arch == 'x86':
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'include')
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetGypGenDir(xarch):
if xarch == 'arm':
build_dir = GYPBUILD_DIR + '-arm'
else:
build_dir = GYPBUILD_DIR
return os.path.join(OUT_DIR, build_dir, 'Release', 'gen')
def GetGypBuiltLib(tcname, xarch=None):
if tcname == 'pnacl':
tcname = 'pnacl_newlib'
if not xarch:
xarch = ''
return os.path.join(GetGypGenDir(xarch), 'tc_' + tcname, 'lib' + xarch)
def GetToolchainNaClLib(tcname, tcpath, xarch):
if tcname == 'pnacl':
return os.path.join(tcpath, 'sdk', 'lib')
elif xarch == '32':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif xarch == '64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif xarch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
def GetToolchainDirName(tcname, xarch):
if tcname == 'pnacl':
return '%s_%s' % (getos.GetPlatform(), tcname)
elif xarch == 'arm':
return '%s_arm_%s' % (getos.GetPlatform(), tcname)
else:
return '%s_x86_%s' % (getos.GetPlatform(), tcname)
def GetGypToolchainLib(tcname, xarch):
tcpath = os.path.join(GetGypGenDir(xarch), 'sdk', 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetOutputToolchainLib(pepperdir, tcname, xarch):
tcpath = os.path.join(pepperdir, 'toolchain',
GetToolchainDirName(tcname, xarch))
return GetToolchainNaClLib(tcname, tcpath, xarch)
def GetPNaClNativeLib(tcpath, arch):
if arch not in ['arm', 'x86-32', 'x86-64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
return os.path.join(tcpath, 'lib-' + arch)
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running download_toolchains.py')
download_script = os.path.join('build', 'download_toolchains.py')
args = [sys.executable, download_script, '--no-arm-trusted',
'--arm-untrusted', '--keep']
if 'bionic' in toolchains:
args.append('--allow-bionic')
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
buildbot_common.RemoveDir(pepperdir_old)
buildbot_common.RemoveDir(pepperdir)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
if 'newlib' in toolchains:
# Untar the newlib toolchains
tarfile = GetNewlibToolchain()
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
srcdir = os.path.join(tmpdir, 'sdk', 'nacl-sdk')
tcname = platform + '_x86_newlib'
newlibdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, newlibdir)
if 'bionic' in toolchains:
# Untar the bionic toolchains
tarfile = GetBionicToolchain()
tcname = platform + '_arm_bionic'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
srcdir = os.path.join(tmpdir, tcname)
bionicdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, bionicdir)
if 'arm' in toolchains:
# Copy the existing arm toolchain from native_client tree
tcname = platform + '_arm_newlib'
arm_toolchain = os.path.join(NACL_DIR, 'toolchain', tcname)
arm_toolchain_sdk = os.path.join(pepperdir, 'toolchain',
os.path.basename(arm_toolchain))
buildbot_common.CopyDir(arm_toolchain, arm_toolchain_sdk)
if 'glibc' in toolchains:
# Untar the glibc toolchains
tarfile = GetGlibcToolchain()
tcname = platform + '_x86_glibc'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
srcdir = os.path.join(tmpdir, 'toolchain', platform + '_x86')
glibcdir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(srcdir, glibcdir)
# Untar the pnacl toolchains
if 'pnacl' in toolchains:
tmpdir = os.path.join(tmpdir, 'pnacl')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
tarfile = GetPNaClToolchain()
tcname = platform + '_pnacl'
buildbot_common.Run([sys.executable, CYGTAR, '-C', tmpdir, '-xf', tarfile],
cwd=NACL_DIR)
# Then rename/move it to the pepper toolchain directory
pnacldir = os.path.join(pepperdir, 'toolchain', tcname)
buildbot_common.Move(tmpdir, pnacldir)
buildbot_common.RemoveDir(tmpdir)
if options.gyp and platform != 'win':
# If the gyp options is specified we install a toolchain
# wrapper so that gyp can switch toolchains via a commandline
# option.
bindir = os.path.join(pepperdir, 'toolchain', tcname, 'bin')
wrapper = os.path.join(SDK_SRC_DIR, 'tools', 'compiler-wrapper.py')
buildbot_common.MakeDir(bindir)
buildbot_common.CopyFile(wrapper, bindir)
# Module 'os' has no 'symlink' member (on Windows).
# pylint: disable=E1101
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-g++'))
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-gcc'))
os.symlink('compiler-wrapper.py', os.path.join(bindir, 'i686-nacl-ar'))
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_thread.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_thread.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/irt_ppapi.h', ''),
],
'host': []
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if type(file_spec) == str:
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tc_name):
"""Copies NaCl headers to expected locations in the toolchain."""
if tc_name == 'arm':
# arm toolchain header should be the same as the x86 newlib
# ones
tc_name = 'newlib'
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[tc_name])
def MakeNinjaRelPath(path):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), path)
TOOLCHAIN_LIBS = {
'newlib' : [
'crti.o',
'crtn.o',
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
],
'glibc': [
'libminidump_generator.a',
'libminidump_generator.so',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_dyncode.so',
'libnacl_exception.a',
'libnacl_exception.so',
'libnacl_list_mappings.a',
'libnacl_list_mappings.so',
'libppapi.a',
'libppapi.so',
'libppapi_stub.a',
],
'pnacl': [
'libminidump_generator.a',
'libnacl.a',
'libnacl_dyncode.a',
'libnacl_exception.a',
'libnacl_list_mappings.a',
'libnosys.a',
'libppapi.a',
'libppapi_stub.a',
'libpthread.a',
]
}
def GypNinjaInstall(pepperdir, toolchains):
build_dir = GYPBUILD_DIR
ninja_out_dir = os.path.join(OUT_DIR, build_dir, 'Release')
tools_files = [
['sel_ldr', 'sel_ldr_x86_32'],
['ncval_new', 'ncval'],
['irt_core_newlib_x32.nexe', 'irt_core_x86_32.nexe'],
['irt_core_newlib_x64.nexe', 'irt_core_x86_64.nexe'],
]
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
tools_files.append(['sel_ldr64', 'sel_ldr_x86_64'])
if platform == 'linux':
tools_files.append(['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32'])
tools_files.append(['nacl_helper_bootstrap64',
'nacl_helper_bootstrap_x86_64'])
buildbot_common.MakeDir(os.path.join(pepperdir, 'tools'))
# Add .exe extensions to all windows tools
for pair in tools_files:
if platform == 'win' and not pair[0].endswith('.nexe'):
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(ninja_out_dir, os.path.join(pepperdir, 'tools'), tools_files)
for tc in set(toolchains) & set(['newlib', 'glibc', 'pnacl']):
if tc == 'pnacl':
xarches = (None,)
else:
xarches = ('arm', '32', '64')
for xarch in xarches:
if tc == 'glibc' and xarch == 'arm':
continue
src_dir = GetGypBuiltLib(tc, xarch)
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, TOOLCHAIN_LIBS[tc])
if tc != 'pnacl':
src_dir = GetGypToolchainLib(tc, xarch)
InstallFiles(src_dir, dst_dir, ['crt1.o'])
def GypNinjaBuild_NaCl(rel_out_dir):
gyp_py = os.path.join(NACL_DIR, 'build', 'gyp_nacl')
nacl_core_sdk_gyp = os.path.join(NACL_DIR, 'build', 'nacl_core_sdk.gyp')
all_gyp = os.path.join(NACL_DIR, 'build', 'all.gyp')
out_dir = MakeNinjaRelPath(rel_out_dir)
out_dir_arm = MakeNinjaRelPath(rel_out_dir + '-arm')
GypNinjaBuild('ia32', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir)
GypNinjaBuild('arm', gyp_py, nacl_core_sdk_gyp, 'nacl_core_sdk', out_dir_arm)
GypNinjaBuild('ia32', gyp_py, all_gyp, 'ncval_new', out_dir)
platform = getos.GetPlatform()
if platform == 'win':
NinjaBuild('sel_ldr64', out_dir)
else:
out_dir_64 = MakeNinjaRelPath(rel_out_dir + '-64')
GypNinjaBuild('x64', gyp_py, nacl_core_sdk_gyp, 'sel_ldr', out_dir_64)
# We only need sel_ldr from the 64-bit out directory.
# sel_ldr needs to be renamed, so we'll call it sel_ldr64.
files_to_copy = [('sel_ldr', 'sel_ldr64')]
if platform == 'linux':
files_to_copy.append(('nacl_helper_bootstrap', 'nacl_helper_bootstrap64'))
for src, dst in files_to_copy:
buildbot_common.CopyFile(
os.path.join(SRC_DIR, out_dir_64, 'Release', src),
os.path.join(SRC_DIR, out_dir, 'Release', dst))
def GypNinjaBuild_Breakpad(rel_out_dir):
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if getos.GetPlatform() == 'win':
return
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'breakpad', 'breakpad.gyp')
build_list = ['dump_syms', 'minidump_dump', 'minidump_stackwalk']
GypNinjaBuild('ia32', gyp_py, gyp_file, build_list, out_dir)
def GypNinjaBuild_PPAPI(arch, rel_out_dir):
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client',
'native_client.gyp')
GypNinjaBuild(arch, gyp_py, gyp_file, 'ppapi_lib', out_dir)
def GypNinjaBuild_Pnacl(rel_out_dir, target_arch):
# TODO(binji): This will build the pnacl_irt_shim twice; once as part of the
# Chromium build, and once here. When we move more of the SDK build process
# to gyp, we can remove this.
gyp_py = os.path.join(SRC_DIR, 'build', 'gyp_chromium')
out_dir = MakeNinjaRelPath(rel_out_dir)
gyp_file = os.path.join(SRC_DIR, 'ppapi', 'native_client', 'src',
'untrusted', 'pnacl_irt_shim', 'pnacl_irt_shim.gyp')
targets = ['pnacl_irt_shim_aot']
GypNinjaBuild(target_arch, gyp_py, gyp_file, targets, out_dir, False)
def GypNinjaBuild(arch, gyp_py_script, gyp_file, targets,
out_dir, force_arm_gcc=True):
gyp_env = dict(os.environ)
gyp_env['GYP_GENERATORS'] = 'ninja'
gyp_defines = []
if options.mac_sdk:
gyp_defines.append('mac_sdk=%s' % options.mac_sdk)
if arch:
gyp_defines.append('target_arch=%s' % arch)
if arch == 'arm':
if getos.GetPlatform() == 'linux':
gyp_env['CC'] = 'arm-linux-gnueabihf-gcc'
gyp_env['CXX'] = 'arm-linux-gnueabihf-g++'
gyp_env['AR'] = 'arm-linux-gnueabihf-ar'
gyp_env['AS'] = 'arm-linux-gnueabihf-as'
gyp_env['CC_host'] = 'cc'
gyp_env['CXX_host'] = 'c++'
gyp_defines += ['armv7=1', 'arm_thumb=0', 'arm_neon=1',
'arm_float_abi=hard']
if force_arm_gcc:
gyp_defines.append('nacl_enable_arm_gcc=1')
if getos.GetPlatform() == 'mac':
gyp_defines.append('clang=1')
gyp_env['GYP_DEFINES'] = ' '.join(gyp_defines)
for key in ['GYP_GENERATORS', 'GYP_DEFINES', 'CC']:
value = gyp_env.get(key)
if value is not None:
print '%s="%s"' % (key, value)
gyp_generator_flags = ['-G', 'output_dir=%s' % (out_dir,)]
gyp_depth = '--depth=.'
buildbot_common.Run(
[sys.executable, gyp_py_script, gyp_file, gyp_depth] + \
gyp_generator_flags,
cwd=SRC_DIR,
env=gyp_env)
NinjaBuild(targets, out_dir)
def NinjaBuild(targets, out_dir):
if type(targets) is not list:
targets = [targets]
out_config_dir = os.path.join(out_dir, 'Release')
buildbot_common.Run(['ninja', '-C', out_config_dir] + targets, cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('SDK Items')
GypNinjaBuild_NaCl(GYPBUILD_DIR)
GypNinjaBuild_Breakpad(GYPBUILD_DIR)
platform = getos.GetPlatform()
newlibdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_newlib')
glibcdir = os.path.join(pepperdir, 'toolchain', platform + '_x86_glibc')
armdir = os.path.join(pepperdir, 'toolchain', platform + '_arm_newlib')
pnacldir = os.path.join(pepperdir, 'toolchain', platform + '_pnacl')
if set(toolchains) & set(['glibc', 'newlib']):
GypNinjaBuild_PPAPI('ia32', GYPBUILD_DIR)
if 'arm' in toolchains:
GypNinjaBuild_PPAPI('arm', GYPBUILD_DIR + '-arm')
GypNinjaInstall(pepperdir, toolchains)
if 'newlib' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', newlibdir, 'x86'),
'newlib')
if 'glibc' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('glibc', glibcdir, 'x86'),
'glibc')
if 'arm' in toolchains:
InstallNaClHeaders(GetToolchainNaClInclude('newlib', armdir, 'arm'),
'arm')
if 'pnacl' in toolchains:
# NOTE: For ia32, gyp builds both x86-32 and x86-64 by default.
for arch in ('ia32', 'arm'):
# Fill in the latest native pnacl shim library from the chrome build.
build_dir = GYPBUILD_DIR + '-pnacl-' + arch
GypNinjaBuild_Pnacl(build_dir, arch)
if arch == 'ia32':
nacl_arches = ['x86-32', 'x86-64']
elif arch == 'arm':
nacl_arches = ['arm']
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
for nacl_arch in nacl_arches:
release_build_dir = os.path.join(OUT_DIR, build_dir, 'Release',
'gen', 'tc_pnacl_translate',
'lib-' + nacl_arch)
buildbot_common.CopyFile(
os.path.join(release_build_dir, 'libpnacl_irt_shim.a'),
GetPNaClNativeLib(pnacldir, nacl_arch))
InstallNaClHeaders(GetToolchainNaClInclude('pnacl', pnacldir, 'x86'),
'newlib')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
if toolchains:
toolchains = toolchains[:]
# arm isn't a valid toolchain for build_projects
if 'arm' in toolchains:
toolchains.remove('arm')
if 'host' in toolchains:
toolchains.remove('host')
toolchains.append(getos.GetPlatform())
filters['TOOLS'] = toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, directory):
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Debug',
clean=True, config='Debug')
BuildStepMakeAll(pepperdir, directory, 'Build Libraries Release',
clean=True, config='Release')
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
buildbot_common.Archive(tarname, bucket_path, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
buildbot_common.Archive(tarname + '.json', bucket_path, OUT_DIR,
step_link=False)
def BuildStepArchiveSDKTools():
# Only push up sdk_tools.tgz and nacl_sdk.zip on the linux buildbot.
builder_name = os.getenv('BUILDBOT_BUILDERNAME', '')
if builder_name == 'linux-sdk-multi':
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/%s' % (
build_version.ChromeVersion(),)
buildbot_common.Archive('sdk_tools.tgz', bucket_path, OUT_DIR,
step_link=False)
buildbot_common.Archive('nacl_sdk.zip', bucket_path, OUT_DIR,
step_link=False)
def BuildStepSyncNaClPorts():
"""Pull the pinned revision of naclports from SVN."""
buildbot_common.BuildStep('Sync naclports')
if not os.path.exists(NACLPORTS_DIR):
# checkout new copy of naclports
cmd = ['svn', 'checkout', '-q', '-r', str(NACLPORTS_REV), NACLPORTS_URL,
'naclports']
buildbot_common.Run(cmd, cwd=os.path.dirname(NACLPORTS_DIR))
else:
# sync existing copy to pinned revision.
cmd = ['svn', 'update', '-r', str(NACLPORTS_REV)]
buildbot_common.Run(cmd, cwd=NACLPORTS_DIR)
def BuildStepBuildNaClPorts(pepper_ver, pepperdir):
"""Build selected naclports in all configurations."""
# TODO(sbc): currently naclports doesn't know anything about
# Debug builds so the Debug subfolders are all empty.
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['PEPPER_DIR'] = os.path.basename(pepperdir) # pepper_NN
env['NACLPORTS_NO_ANNOTATE'] = "1"
env['NACLPORTS_NO_UPLOAD'] = "1"
build_script = 'build_tools/naclports-linux-sdk-bundle.sh'
buildbot_common.BuildStep('Build naclports')
bundle_dir = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
out_dir = os.path.join(bundle_dir, 'pepper_%s' % pepper_ver)
# Remove the sdk_bundle directory to remove stale files from previous builds.
buildbot_common.RemoveDir(bundle_dir)
buildbot_common.Run([build_script], env=env, cwd=NACLPORTS_DIR)
# Some naclports do not include a standalone LICENSE/COPYING file
# so we explicitly list those here for inclusion.
extra_licenses = ('tinyxml/readme.txt',
'jpeg-8d/README',
'zlib-1.2.3/README')
src_root = os.path.join(NACLPORTS_DIR, 'out', 'repository-i686')
output_license = os.path.join(out_dir, 'ports', 'LICENSE')
GenerateNotice(src_root , output_license, extra_licenses)
readme = os.path.join(out_dir, 'ports', 'README')
oshelpers.Copy(['-v', os.path.join(SDK_SRC_DIR, 'README.naclports'), readme])
def BuildStepTarNaClPorts(pepper_ver, tarfile):
"""Create tar archive containing headers and libs from naclports build."""
buildbot_common.BuildStep('Tar naclports Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
pepper_dir = 'pepper_%s' % pepper_ver
archive_dirs = [os.path.join(pepper_dir, 'ports')]
ports_out = os.path.join(NACLPORTS_DIR, 'out', 'sdk_bundle')
cmd = [sys.executable, CYGTAR, '-C', ports_out, '-cjf', tarfile]
cmd += archive_dirs
buildbot_common.Run(cmd, cwd=NACL_DIR)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = optparse.OptionParser(description=__doc__)
parser.add_option('--bionic', help='Add bionic build.',
action='store_true')
parser.add_option('--tar', help='Force the tar step.',
action='store_true')
parser.add_option('--archive', help='Force the archive step.',
action='store_true')
parser.add_option('--gyp',
help='Use gyp to build examples/libraries/Makefiles.',
action='store_true')
parser.add_option('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_option('--build-ports',
help='Build naclport bundle.', action='store_true')
parser.add_option('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_option('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_option('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_option('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options, args = parser.parse_args(args[1:])
if args:
parser.error("Unexpected arguments: %s" % str(args))
generate_make.use_gyp = options.gyp
if buildbot_common.IsSDKBuilder():
options.archive = True
options.build_ports = True
options.build_app_engine = True
options.tar = True
toolchains = ['newlib', 'glibc', 'arm', 'pnacl', 'host']
if options.bionic:
toolchains.append('bionic')
print 'Building: ' + ' '.join(toolchains)
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_' + getos.GetPlatform() + '.tar.bz2'
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
BuildStepBuildToolchains(pepperdir, toolchains)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir, 'src')
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
ports_tarfile = os.path.join(OUT_DIR, 'naclports.tar.bz2')
BuildStepSyncNaClPorts()
BuildStepBuildNaClPorts(pepper_ver, pepperdir)
if options.tar:
BuildStepTarNaClPorts(pepper_ver, ports_tarfile)
if options.build_app_engine and getos.GetPlatform() == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
# Archive on non-trybots.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
if options.build_ports and getos.GetPlatform() == 'linux':
BuildStepArchiveBundle('naclports', pepper_ver, chrome_revision,
nacl_revision, ports_tarfile)
BuildStepArchiveSDKTools()
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| 34.692611 | 80 | 0.692386 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | davgit/chromium.src | native_client_sdk/src/build_tools/build_sdk.py | 35,213 | Python |
# Copyright (c) 2016, 2020, Oracle and/or its affiliates.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Implementation of MySQL Authentication Plugin."""
import hashlib
import struct
from .helpers import hexlify
def xor_string(hash1, hash2, hash_size):
"""Encrypt/Decrypt function used for password encryption in
authentication, using a simple XOR.
Args:
hash1 (str): The first hash.
hash2 (str): The second hash.
Returns:
str: A string with the xor applied.
"""
xored = [h1 ^ h2 for (h1, h2) in zip(hash1, hash2)]
return struct.pack("{0}B".format(hash_size), *xored)
class BaseAuthPlugin(object):
"""Base class for implementing the authentication plugins."""
def __init__(self, username=None, password=None):
self._username = username
self._password = password
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
raise NotImplementedError
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
raise NotImplementedError
class MySQL41AuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Native Password authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "MySQL 4.1 Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "MYSQL41"
def auth_data(self, data):
"""Hashing for MySQL 4.1 authentication.
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
"""
if self._password:
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha1(password).digest()
hash2 = hashlib.sha1(hash1).digest()
xored = xor_string(hash1, hashlib.sha1(data + hash2).digest(), 20)
return "{0}\0{1}\0*{2}\0".format("", self._username, hexlify(xored))
return "{0}\0{1}\0".format("", self._username)
class PlainAuthPlugin(BaseAuthPlugin):
"""Class implementing the MySQL Plain authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "Plain Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "PLAIN"
def auth_data(self):
"""Returns the authentication data.
Returns:
str: The authentication data.
"""
return "\0{0}\0{1}".format(self._username, self._password)
class Sha256MemoryAuthPlugin(BaseAuthPlugin):
"""Class implementing the SHA256_MEMORY authentication plugin."""
def name(self):
"""Returns the plugin name.
Returns:
str: The plugin name.
"""
return "SHA256_MEMORY Authentication Plugin"
def auth_name(self):
"""Returns the authentication name.
Returns:
str: The authentication name.
"""
return "SHA256_MEMORY"
def auth_data(self, data):
"""Hashing for SHA256_MEMORY authentication.
The scramble is of the form:
SHA256(SHA256(SHA256(PASSWORD)),NONCE) XOR SHA256(PASSWORD)
Args:
data (str): The authentication data.
Returns:
str: The authentication response.
"""
password = self._password.encode("utf-8") \
if isinstance(self._password, str) else self._password
hash1 = hashlib.sha256(password).digest()
hash2 = hashlib.sha256(hashlib.sha256(hash1).digest() + data).digest()
xored = xor_string(hash2, hash1, 32)
return "\0{0}\0{1}".format(self._username, hexlify(xored))
| 31.034286 | 80 | 0.643528 | [
"MIT"
] | Abdullah9340/Geese-Migration | backend/env/Lib/site-packages/mysqlx/authentication.py | 5,431 | Python |
from os.path import realpath
def main():
inpString = open(f'{realpath(__file__)[:-2]}txt').read()
inpString += '0' * (3 - len(inpString) % 3) # Padding to make it divisible by 3
inp = list(inpString)
for i in range(len(inp)):
if inp[i] not in '0123456789abcdef':
inp[i] = '0'
inp = ''.join(inp)
v = len(inp)//3
for i in range(0, len(inp), v):
print(inp[i : i + 2], end='') # Print first 2 char of every 1/3rd part of input
if __name__ == '__main__':
main() | 23.956522 | 105 | 0.53539 | [
"MIT"
] | PROxZIMA/AquaQ-Challenge-Hub | 01/01.py | 551 | Python |
from bughunter.action.core import *
import cgum.statement
class ModifyAssignment(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ModifyAssignment, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
modified = [a.frm() for a in actions['ModifyStatement'] \
if isinstance(a.frm(), cgum.statement.ExprStatement)]
assigns = []
for bef in modified:
assigns += \
bef.collect(lambda n: isinstance(n, cgum.expression.Assignment))
assigns = [(frm, patch.was_is(frm)) for frm in assigns]
actions['ModifyAssignment'] = \
[ModifyAssignment(frm, to) for (frm, to) in assigns if not to is None]
def parts(self):
return [self.to()]
class ReplaceAssignmentLHS(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentLHS, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if not frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentLHS'] = \
[ReplaceAssignmentLHS(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_lhs(self):
return self.frm_assignment().lhs()
def to_lhs(self):
return self.to_assignment().lhs()
def parts(self):
return [self.to_lhs()]
class ReplaceAssignmentRHS(ReplaceRepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentRHS, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if not frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentRHS'] = \
[ReplaceAssignmentRHS(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_rhs(self):
return self.frm_assignment().rhs()
def to_rhs(self):
return self.to_assignment().rhs()
def parts(self):
return [self.to_rhs()]
class ReplaceAssignmentOp(RepairAction):
@staticmethod
def from_json(jsn, before, after):
return ReplaceRepairAction.from_json(ReplaceAssignmentOp, jsn, before, after)
@staticmethod
def detect(patch, stmts_bef, stmts_aft, actions):
l = [(a.frm(), a.to()) for a in actions['ModifyAssignment']]
l = [(frm, to) for (frm, to) in l \
if not frm.op().equivalent(to.op())]
l = [(frm, to) for (frm, to) in l \
if frm.lhs().equivalent(to.lhs())]
l = [(frm, to) for (frm, to) in l \
if frm.rhs().equivalent(to.rhs())]
actions['ReplaceAssignmentOp'] = \
[ReplaceAssignmentOp(frm, to) for (frm, to) in l]
def frm_assignment(self):
return self.frm()
def to_assignment(self):
return self.to()
def frm_op(self):
return self.frm_assignment().op()
def to_op(self):
return self.to_assignment().op()
def parts(self):
return [self.to_op()]
| 35.254545 | 86 | 0.596699 | [
"MIT"
] | ChrisTimperley/BugCollector | bughunter/action/assignment.py | 3,878 | Python |
# This file is part of postcipes
# (c) Timofey Mukha
# The code is released under the MIT Licence.
# See LICENCE.txt and the Legal section in the README for more information
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .postcipe import Postcipe
import turbulucid as tbl
import numpy as np
import h5py
__all__ = ["BackwardFacingStep"]
class BackwardFacingStep(Postcipe):
def __init__(self, path, nu, uRef):
Postcipe.__init__(self)
self.case = tbl.Case(path)
self.nu = nu
self.uRef = uRef
self.h = np.sum(tbl.edge_lengths(self.case, "stepB"))
self.H = np.sum(tbl.edge_lengths(self.case, "outletB")) - self.h
self.eRatio = (self.H + self.h)/self.H
self.tau1 = \
self.case.boundary_data("lowB1")[1]["wallShearStressMean"][:, 0]
self.tau2 = \
self.case.boundary_data("lowB2")[1]["wallShearStressMean"][:, 0]
self.tau = np.append(self.tau1, self.tau2)
self.x1 = self.case.boundary_data("lowB1")[0][:, 0]
self.x2 = self.case.boundary_data("lowB2")[0][:, 0]
self.x = np.append(self.x1, self.x2)
self.idx105h = np.argmin(np.abs(self.x1 + 1.05*self.h))
self.uTop = self.case.boundary_data("upB")[1]['UMean'][:, 0]
self.theta = None
self.delta99 = None
self.edgeU = None
def compute_delta99(self, u0='max', interpolate=True):
self.delta99 = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.delta99[i] = tbl.delta_99(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reDelta99 = self.delta99*self.edgeU/self.nu
self.reTau = self.delta99*np.sqrt(np.abs(self.tau1))/self.nu
self.delta99105h = self.delta99[self.idx105h]
return 0
def compute_theta(self, u0='max', interpolate=True):
self.theta = np.zeros(self.x1.shape[0])
self.edgeU = np.zeros(self.x1.shape[0])
for i in range(self.x1.shape[0]):
x = self.x1[i]
y, v = tbl.profile_along_line(self.case, (x, -1), (x, 10),
correctDistance=True)
self.theta[i] = tbl.momentum_thickness(y, v['UMean'][:, 0], u0=u0,
interpolate=interpolate)
if u0 is 'max':
self.edgeU[i] = np.max(v['UMean'][:, 0])
elif u0 is 'last':
self.edgeU[i] = v['UMean'][-1, 0]
self.reTheta = self.theta*self.edgeU/self.nu
self.reTheta105h = self.reTheta[self.idx105h]
return 0
def save(self, name):
f = h5py.File(name, 'w')
f.attrs["h"] = self.h
f.attrs["H"] = self.H
f.attrs["nu"] = self.nu
f.attrs["eRatio"] = self.eRatio
f.attrs["uRef"] = self.uRef
f.attrs["idx105h"] = self.idx105h
f.create_dataset("x1", data=self.x1)
f.create_dataset("x2", data=self.x2)
f.create_dataset("x", data=self.x)
f.create_dataset("uTop", data=self.uTop)
f.create_dataset("tau1", data=self.tau1)
f.create_dataset("tau2", data=self.tau2)
f.create_dataset("tau", data=self.tau)
if self.theta is None:
self.compute_theta()
if self.delta99 is None:
self.compute_delta99()
f.create_dataset("theta", data=self.theta)
f.create_dataset("delta99", data=self.delta99)
f.create_dataset("reTheta", data=self.reTheta)
f.create_dataset("reTau", data=self.reTau)
f.create_dataset("reDelta99", data=self.reDelta99)
f.close()
def load(self, name):
f = h5py.File(name, 'r')
self.h = f.attrs["h"]
self.H = f.attrs["H"]
self.nu = f.attrs["nu"]
self.eRatio = f.attrs["eRatio"]
self.uRef = f.attrs["uRef"]
self.idx105h = f.attrs["idx105h"]
self.x1 = f["x1"][:]
self.x2 = f["x2"][:]
self.x = f["x"][:]
self.uTop = f["uTop"][:]
self.tau1 = f["tau1"][:]
self.tau2 = f["tau2"][:]
self.tau = f["tau"][:]
self.theta = f["theta"][:]
self.delta99 = f["delta99"][:]
self.reTheta = f["reTheta"][:]
self.reTau = f["reTau"][:]
self.reDelta99 = f["reDelta99"][:]
f.close()
| 33.707483 | 79 | 0.52775 | [
"MIT"
] | Mopolino8/postcipes | postcipes/bfs.py | 4,955 | Python |
from django.contrib import admin
from .models import Post, Reply
# Register your models here.
admin.site.register(Post)
admin.site.register(Reply)
| 18.625 | 32 | 0.791946 | [
"MIT"
] | EvanPatrick423/Showcase | showcase/post/admin.py | 149 | Python |
from gym_pybullet_drones.envs.multi_agent_rl.BaseMultiagentAviary import BaseMultiagentAviary
from gym_pybullet_drones.envs.multi_agent_rl.FlockAviary import FlockAviary
from gym_pybullet_drones.envs.multi_agent_rl.LeaderFollowerAviary import LeaderFollowerAviary
from gym_pybullet_drones.envs.multi_agent_rl.MeetupAviary import MeetupAviary
from gym_pybullet_drones.envs.multi_agent_rl.PayloadCoop import PayloadCoop | 83.4 | 93 | 0.918465 | [
"MIT"
] | MFadhilArkan/gym-pybullet-drones | gym_pybullet_drones/envs/multi_agent_rl/__init__.py | 417 | Python |
from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='ba_samples',
package_dir={"": "src"},
packages=find_packages('src'),
version='0.1.0-dev0',
description='Examples using ArcGIS Business Analyst with Python.',
long_description=long_description,
author='Joel McCune',
license='Apache 2.0',
)
| 25.375 | 70 | 0.684729 | [
"Apache-2.0"
] | knu2xs/business-analyst-python-api-examples | setup.py | 406 | Python |
# Listing_19-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Trying out sounds in Pygame
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000) # Wait a second for the mixer to finish initializing
splat = pygame.mixer.Sound("splat.wav") # Create the Sound object
splat.play() # Play the sound
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| 30.086957 | 82 | 0.628613 | [
"Apache-2.0"
] | axetang/AxePython | FatherSon/HelloWorld2_source_code/Listing_19-1.py | 692 | Python |
"""
With these settings, tests run faster.
"""
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env(
"DJANGO_SECRET_KEY",
default="4UWuZDCfH6ezPZd4iO85Yg3eZnzistCGFxpeeWPhqlIkDtdbY2rEPnZb9ZPTNZ79",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#test-runner
TEST_RUNNER = "django.test.runner.DiscoverRunner"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = ["django.contrib.auth.hashers.MD5PasswordHasher"]
# TEMPLATES
# ------------------------------------------------------------------------------
TEMPLATES[-1]["OPTIONS"]["loaders"] = [ # type: ignore[index] # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = "django.core.mail.backends.locmem.EmailBackend"
# Your stuff...
# ------------------------------------------------------------------------------
| 33.857143 | 80 | 0.510549 | [
"MIT"
] | aightmunam/portfolio | config/settings/test.py | 1,422 | Python |
import copy
from functools import wraps
import numpy as np
import wandb
import torchvision
import torch
import torch.nn.functional as F
from kornia import enhance, filters
from torchvision.transforms import RandomApply, RandomChoice
from atariari.methods.utils import EarlyStopping
from torch import nn
from torch.utils.data import BatchSampler, RandomSampler
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
def get_module_device(module):
return next(module.parameters()).device
def set_requires_grad(model, val):
for p in model.parameters():
p.requires_grad = val
# loss fn
def loss_fn(x, y):
x = F.normalize(x, dim=-1, p=2)
y = F.normalize(y, dim=-1, p=2)
return 2 - 2 * (x * y).sum(dim=-1)
# augmentation utils
# class RandomApply(nn.Module):
# def __init__(self, fn, p):
# super().__init__()
# self.fn = fn
# self.p = p
# def forward(self, x):
# if random.random() > self.p:
# return x
# return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# MLP class for projector and predictor
class MLP(nn.Module):
def __init__(self, dim, projection_size, hidden_size=4096):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, hidden_size),
nn.BatchNorm1d(hidden_size),
nn.ReLU(inplace=True),
nn.Linear(hidden_size, projection_size)
)
def forward(self, x):
return self.net(x)
# a wrapper class for the base neural network
# will manage the interception of the hidden layer output
# and pipe it into the projecter and predictor nets
class NetWrapper(nn.Module):
def __init__(self, net, projection_size, projection_hidden_size, layer=-2):
super().__init__()
self.net = net
self.layer = layer # final avg-pooling layer
self.projector = None
self.projection_size = projection_size
self.projection_hidden_size = projection_hidden_size
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = flatten(output)
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
@singleton('projector')
def _get_projector(self, hidden):
_, dim = hidden.shape
projector = MLP(dim, self.projection_size, self.projection_hidden_size)
return projector.to(hidden)
def get_representation(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
def forward(self, x):
representation = self.get_representation(x)
projector = self._get_projector(representation)
projection = projector(representation)
return projection
# main class
class BYOL(nn.Module):
def __init__(self, net, image_size, grayscale=True, num_frame_stack=1, batch_size=64, hidden_layer=-2, projection_size=256, projection_hidden_size=4096, augment_fn=None, augment_fn2=None, moving_average_decay=0.99, wandb=None, patience=15):
super().__init__()
# default SimCLR augmentation
#####
# IMPORTANT for kornia: parameters are often float!! e.g. 1. vs 1
# DEFAULT_AUG = nn.Sequential(
# RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
# augs.RandomHorizontalFlip(),
# RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
# input tensor: float + normalized range [0,1]
# augs.RandomResizedCrop(
# size=(image_size, image_size), scale=(0.84, 1.), ratio=(1.,1.), p=1.0)
# augs.Normalize(mean=torch.tensor(
# [0.485, 0.456, 0.406]), std=torch.tensor([0.229, 0.224, 0.225]))
# )
kernel_size = (9, 9) # has to be ODD
kernel_std = np.random.uniform(low=0.1, high=2.0)
kernel_std = (kernel_std,)*2
aug_transform = torchvision.transforms.Compose([
RandomChoice(
[enhance.AdjustBrightness(0.4),
enhance.AdjustBrightness(0.3),
enhance.AdjustBrightness(0.2),
enhance.AdjustBrightness(0.1),
enhance.AdjustBrightness(0.0)]
),
RandomChoice(
[enhance.AdjustContrast(1.0),
enhance.AdjustContrast(0.9),
enhance.AdjustContrast(0.8),
enhance.AdjustContrast(0.7),
enhance.AdjustContrast(0.6)]
),
RandomApply([filters.GaussianBlur2d(
kernel_size, kernel_std)], p=0.5)
# RandomChoice(
# [enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# enhance.AdjustContrast(1.0),
# filters.GaussianBlur2d((1, 1), (1, 1)),
# filters.GaussianBlur2d((3, 3), (1.5, 1.5))]
# )
])
self.augment1 = default(augment_fn, aug_transform)
self.augment2 = default(augment_fn2, self.augment1)
self.online_encoder = NetWrapper(
net, projection_size, projection_hidden_size, layer=hidden_layer)
self.target_encoder = None
self.target_ema_updater = EMA(moving_average_decay)
self.online_predictor = MLP(
projection_size, projection_size, projection_hidden_size)
self.batch_size = batch_size
# get device of network and make wrapper same device
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
print(f"Device is {self.device.type}")
self.to(self.device)
self.wandb = wandb
self.early_stopper = EarlyStopping(
patience=patience, verbose=False, wandb=self.wandb, name="encoder-byol")
if self.wandb:
wandb.watch(self.online_encoder, self.target_encoder,
self.online_predictor)
# send a mock image tensor to instantiate singleton parameters
assert grayscale
nr_channels = num_frame_stack
self.forward(torch.rand(batch_size, nr_channels,
210, 160, device=self.device))
self.opt = torch.optim.Adam(self.parameters(), lr=3e-4)
print(
f"Finished Initialization of BYOL with model {self.online_encoder.net.__class__.__name__}")
@singleton('target_encoder')
def _get_target_encoder(self):
target_encoder = copy.deepcopy(self.online_encoder)
set_requires_grad(target_encoder, False)
return target_encoder
def reset_moving_average(self):
del self.target_encoder
self.target_encoder = None
def update_moving_average(self):
assert self.target_encoder is not None, 'target encoder has not been created yet'
update_moving_average(self.target_ema_updater,
self.target_encoder, self.online_encoder)
def forward(self, x):
image_one, image_two = self.augment1(x), self.augment2(x)
online_proj_one = self.online_encoder(image_one)
online_proj_two = self.online_encoder(image_two)
online_pred_one = self.online_predictor(online_proj_one)
online_pred_two = self.online_predictor(online_proj_two)
with torch.no_grad():
target_encoder = self._get_target_encoder()
target_proj_one = target_encoder(image_one)
target_proj_two = target_encoder(image_two)
loss_one = loss_fn(online_pred_one, target_proj_two.detach())
loss_two = loss_fn(online_pred_two, target_proj_one.detach())
loss = loss_one + loss_two
return loss.mean()
def logResults(self, epoch_idx, epoch_loss, prefix=""):
print(f"{prefix} Epoch: {epoch_idx}, Loss: {epoch_loss}")
if self.wandb:
self.wandb.log({prefix + '_loss': epoch_loss},
step=epoch_idx, commit=False)
def doOneEpoch(self, nr_epoch, episodes):
mode = "train" if self.training else "val"
data_generator = generate_batch(episodes, self.batch_size, self.device)
for steps, batch in enumerate(data_generator):
print(f"batch nr {steps} for mode {mode}")
loss = self(batch)
self.opt.zero_grad()
loss.backward()
self.opt.step()
self.update_moving_average() # update moving average of target encoder
self.logResults(nr_epoch, loss / steps, prefix=mode)
if mode == "val":
self.early_stopper(-loss / steps, self.online_encoder)
def generate_batch(episodes, batch_size, device):
total_steps = sum([len(e) for e in episodes])
print('Total Steps: {}'.format(total_steps))
# Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler = BatchSampler(RandomSampler(range(len(episodes)),
replacement=True, num_samples=total_steps),
batch_size, drop_last=True)
for nr, indices in enumerate(sampler):
x = []
episodes_batch = [episodes[i] for i in indices]
# print(f"indices in sampler nr {nr} are {*indices,}")
for e in episodes_batch:
t = np.random.randint(0, len(e))
x.append(e[t])
yield torch.stack(x).float().to(device) / 255. # SCALING!!!! | 34.246914 | 244 | 0.620944 | [
"MIT"
] | mariodoebler/byol-pytorch | byol_pytorch/byol_pytorch.py | 11,096 | Python |
# SPDX-License-Identifier: MIT
#!/usr/bin/env python3
import os
import sys
from ply import yacc
from ply.lex import TOKEN
from .slexer import SLexer
from ..lib import dbg
from .symbol import (
BinaryOperatorSymbol, ConstraintSymbol, FieldSymbol, ArraySymbol,
CallSymbol, IDSymbol, ConcreteIntSymbol, StringLiteralSymbol
)
# for LALR table reuse
ROOT = os.path.dirname(__file__)
sys.path.append(ROOT)
class SParser(object):
# Precedence rules for the arithmetic operators
precedence = (
('left', 'LOR'),
('left', 'LAND'),
('left', 'OR'),
('left', 'XOR'),
('left', 'AND'),
('left', 'EQ', 'NE'),
('left', 'GT', 'GE', 'LT', 'LE'),
('left', 'RSHIFT', 'LSHIFT'),
('left', 'PLUS', 'MINUS'),
('left', 'TIMES', 'DIVIDE', 'MOD')
)
def __init__(self, **kwargs):
self.slex = SLexer()
self.slex.build()
self.tokens = self.slex.tokens
self.yacc = yacc.yacc(module=self)
def p_expression_1(self, p):
''' expression : binary_expression '''
p[0] = p[1]
def p_binary_expression_1(self, p):
''' binary_expression : cast_expression '''
p[0] = p[1]
def p_binary_expression_2(self, p):
''' binary_expression : binary_expression TIMES binary_expression
| binary_expression DIVIDE binary_expression
| binary_expression MOD binary_expression
| binary_expression PLUS binary_expression
| binary_expression MINUS binary_expression
| binary_expression RSHIFT binary_expression
| binary_expression LSHIFT binary_expression
| binary_expression LT binary_expression
| binary_expression LE binary_expression
| binary_expression GE binary_expression
| binary_expression GT binary_expression
| binary_expression EQ binary_expression
| binary_expression NE binary_expression
| binary_expression AND binary_expression
| binary_expression OR binary_expression
| binary_expression XOR binary_expression
| binary_expression LAND binary_expression
| binary_expression LOR binary_expression
'''
p[0] = BinaryOperatorSymbol(p[1], p[2], p[3])
def p_binary_expression_3(self, p):
# expr CONSTRAINT_OP constraints
''' expression : expression CONSTRAINT_OP LBRACE constraint_list RBRACE '''
p[0] = ConstraintSymbol(p[1], p[4])
def p_constraint(self, p):
''' constraint : LBRACKET concrete_integer_expression COMMA concrete_integer_expression RBRACKET '''
p[0] = (p[2], p[4])
def p_constraint_list(self, p):
''' constraint_list : constraint_list COMMA constraint
| constraint '''
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def p_cast_expression_1(self, p):
""" cast_expression : unary_expression """
p[0] = p[1]
def p_unary_expression_1(self, p):
""" unary_expression : postfix_expression """
p[0] = p[1]
def p_unary_expression_2(self, p):
""" unary_expression : AND postfix_expression """
# XXX : needs to handle & operator
p[0] = p[2]
def p_postfix_expression_1(self, p):
''' postfix_expression : primary_expression '''
p[0] = p[1]
def p_postfix_expression_2(self, p):
''' postfix_expression : postfix_expression ARROW ID'''
p[0] = FieldSymbol(p[1], p[3])
def p_postfix_expression3(self, p):
''' postfix_expression : postfix_expression LBRACKET expression RBRACKET '''
p[0] = ArraySymbol(p[1], p[3])
def p_postfix_expression4(self, p):
''' postfix_expression : postfix_expression LPAREN argument_list RPAREN '''
p[0] = CallSymbol(p[1], p[3])
def p_primary_expression_1(self, p):
''' primary_expression : ID '''
p[0] = IDSymbol(p[1])
def p_primary_expression_2(self, p):
''' primary_expression : concrete_integer_expression '''
p[0] = ConcreteIntSymbol(p[1])
def p_primary_expression_3(self, p):
'''primary_expression : LPAREN expression RPAREN'''
p[0] = p[2]
def p_primary_expression_4(self, p):
''' primary_expression : STRING_LITERAL '''
p[0] = StringLiteralSymbol(p[1])
def p_concrete_integer(self, p):
''' concrete_integer_expression : INT_CONST_DEC
| MINUS INT_CONST_DEC '''
if len(p) == 3:
p[0] = -int(p[2])
else:
p[0] = int(p[1])
def p_argument_list(self, p):
''' argument_list :
| expression
| argument_list COMMA expression '''
if len(p) == 1:
p[0] = []
elif len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1]
p[1].append(p[3])
def parse(self, text):
self.last_text = text
return self.yacc.parse(input = text,
lexer = self.slex)
def p_error(self, p):
#dbg.debug('Illegal token %s' % repr(p))
#dbg.debug('Text : %s' % self.last_text)
return
if __name__ == '__main__':
parser = SParser()
tests = ["\"String Literal\\n\"",
"malloc(256)@={ [0, 0] }",
"malloc(256)@={ [0, 0], [2, 18446744073709551615] }"]
for test in tests:
print(parse_symbol(test))
| 35.005848 | 109 | 0.538423 | [
"MIT"
] | oslab-swrc/apisan | analyzer/apisan/parse/sparser.py | 5,986 | Python |
""" StreamSort Projects Extension -- Constants
Copyright (c) 2021 IdmFoundInHim, under MIT License
"""
SINGLE_MAX_MS = 15 * 60 * 1000
SINGLE_MAX_TRACKS = 4
| 22.571429 | 51 | 0.740506 | [
"MIT"
] | IdmFoundInHim/streamsort | projects/constants.py | 158 | Python |
# -*- coding: utf-8 -*-
""" XIO plugin for the minicbf format of images (DECTRIS-PILATUS).
"""
__version__ = "0.2.1"
__author__ = "Pierre Legrand ([email protected])"
__date__ = "23-09-2012"
__copyright__ = "Copyright (c) 2009-2012 Pierre Legrand"
__license__ = "New BSD, http://www.opensource.org/licenses/bsd-license.php"
import time
HEADER_KEYS = ["Detector:", "Pixel_size", "Silicon", "Exposure_time",
"Exposure_period", "Tau", "Count_cutoff", "Threshold_setting",
"N_excluded_pixels","Excluded_pixels:", "Flat_field:", "Trim_directory:",
"Wavelength", "Energy_range", "Detector_distance", "Detector_Voffset",
"Beam_xy","Flux","Filter_transmission","Start_angle", "Angle_increment",
"Detector_2theta", "Polarization", "Alpha", "Kappa", "Phi", "Chi",
"Oscillation_axis", "N_oscillations"]
def date_time(timestr):
"from str return timestr + msec"
t_a, t_b = timestr.split(".")
return time.strptime(t_a, "%Y/%b/%d %H:%M:%S"), float("0."+t_b)
def date_seconds(timestr):
"from str return seconds"
t_a, msec = date_time(timestr)
return time.mktime(t_a) + msec
def get_edge_resolution(pixel_x, width, distance, wavelength):
"Calculate EdgeResolution"
from math import sin, atan
if abs(DISTANCE(distance)) > 0.0:
rad = 0.5 * float(FLOAT2(pixel_x)) * int(width)
return FLOAT1(wavelength)/(2*sin(0.5*atan(rad/DISTANCE(distance))))
else:
return 0.
FLOAT1 = lambda x: float(x.split()[0])
FLOAT2 = lambda x: float(x.split()[0])*1e3
def DISTANCE(inp):
args = inp.split()
try:
if args[1] == "m": return float(args[0])*1e3
except:
return float(args[0])
BEAMX = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[0])\
*FLOAT2(y)
BEAMY = lambda x, y: float(x[x.find("(")+1:x.find(")")-1].split(",")[1])\
*FLOAT2(y)
class Interpreter:
"Dummy class, container for standard Dict and Function."
HTD = {
# The adsc Header Translator Dictionary.
# Potential problems:
# - There are multiple SIZE1, SIZE2 instances.
# = The orientation of SIZE1 and SIZE2 is unknown
# Not a problem as long as SIZE1 = SIZE2..
'ExposureTime':(['Exposure_time'], FLOAT1),
'BeamX':(['Beam_xy', 'Pixel_size'], BEAMX),
'BeamY':(['Beam_xy', 'Pixel_size'], BEAMY),
'Distance':(['Detector_distance'], DISTANCE),
'Wavelength':(['Wavelength'], FLOAT1),
'PixelX':(['Pixel_size'], FLOAT2),
'PixelY':(['Pixel_size'], FLOAT2),
'Width':(['Binary-Size-Fastest-Dimension'], int),
'Height':(['Binary-Size-Second-Dimension'], int),
#'Message':(['MESSAGE'], lambda x: x.split(';')),
'PhiStart':(['Start_angle'], FLOAT1),
'PhiEnd':(['Start_angle', 'Angle_increment'], \
lambda x, y: FLOAT1(x)+FLOAT1(y)),
'PhiWidth':(['Angle_increment'], FLOAT1),
'EdgeResolution':(['Pixel_size','Binary-Size-Second-Dimension','Detector_distance','Wavelength'], \
get_edge_resolution),
# Added keys from Graeme's convention.
'TwoTheta':(['Detector_2theta'], FLOAT1), # No example yet...
'SerialNumber':(['Detector:'], str),
'HeaderSize':(['HEADER_SIZE'], int),
'OscAxis':(['Oscillation_axis'], lambda x: x.split(",")[0].lower().strip()),
'DateStr':(['DATE'], str),
'DateSeconds':(['DATE'], date_seconds),
}
SpecialRules = {
# No special rules for now
}
Identifiers = {
# Based on Serial Number. Contains (Synchrotron,BLname,DetectorType)
#413:('ESRF','ID14EH2','ADSC Q4'),
#420:('ESRF','ID14EH4','ADSC Q4R'),
}
def __init__(self):
self.raw_head_dict = None
def getRawHeadDict(self, raw_head):
"Intepret the ascii structure of the minicbf image header."
i_1 = 28+raw_head.find("_array_data.header_contents")
i_2 = raw_head.find("_array_data.data", i_1)
i_3 = raw_head.find("--CIF-BINARY-FORMAT-SECTION--", i_2)+29
i_4 = i_3+500
lis = [line[2:].strip().split(" ", 1) \
for line in raw_head[i_1:i_2].splitlines() \
if line and line[0]=="#"]
lis2 = [line[2:].strip().split(": ", 1) \
for line in raw_head[i_3:i_4].splitlines() \
if line and line[0:2]=="X-"]
# Filling the raw_header_dict with some default values,
# in case they are missing in the header.
self.raw_head_dict = {'Detector_2theta': "0.", 'MESSAGE': ''}
for val in lis:
if (val[0] in HEADER_KEYS):
if len(val) == 2:
self.raw_head_dict[val[0]] = val[1]
else:
self.raw_head_dict[val[0]] = None
self.raw_head_dict.update(dict([ val for val in lis2 \
if "Binary-" in val[0]]))
# Add some default values
self.raw_head_dict.update({'HEADER_SIZE': i_3})
self.raw_head_dict.update({'DATE': " ".join(lis[1])})
#self.raw_head_dict.update({'MESSAGE': '', 'TWO_THETA': '0',
# 'Beam_xy':"(1330.30, 1314.90)",
# 'Detector_distance': "0.4 m",
# 'Wavelength':"0.980 A",
# 'Angle_increment':"0.2 deg",
# 'Start_angle': "0. deg",
# 'Detector_2theta': "0. deg"})
return self.raw_head_dict
| 40.172662 | 103 | 0.566082 | [
"BSD-3-Clause"
] | harumome/kamo | yamtbx/dataproc/XIO/plugins/minicbf_interpreter.py | 5,584 | Python |
from src.model import unpool_resize,unpool_deconv, unpool_checkerboard, unpool_simple
from tensorflow.keras.layers import Input, UpSampling2D
from tensorflow.keras.models import Model
input = Input(shape=(20, 20, 3))
out1 = unpool_resize(input)
model1 = Model(inputs=input, outputs=out1)
print("")
out2 = unpool_deconv(input,512)
model2 = Model(inputs=input, outputs=out2)
print("")
out3 = UpSampling2D((2,2))(input)
out3 = unpool_checkerboard(out3)
model3 = Model(inputs=input, outputs=out3)
print("")
| 25.4 | 85 | 0.769685 | [
"MIT"
] | rcmalli/polimi-dl-project | tools/unpool_test.py | 508 | Python |
# -*- coding: utf-8 -
#
# This file is part of gaffer. See the NOTICE for more information.
import os
import sys
from setuptools import setup, find_packages, Extension
py_version = sys.version_info[:2]
if py_version < (2, 6):
raise RuntimeError('On Python 2, Gaffer requires Python 2.6 or better')
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Topic :: System :: Boot',
'Topic :: System :: Monitoring',
'Topic :: System :: Systems Administration',
'Topic :: Software Development :: Libraries']
# read long description
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f:
long_description = f.read()
DATA_FILES = [
('gaffer', ["LICENSE", "MANIFEST.in", "NOTICE", "README.rst",
"THANKS", "UNLICENSE"])
]
setup(name='gaffer',
version="0.4.1",
description = 'simple system process manager',
long_description = long_description,
classifiers = CLASSIFIERS,
license = 'BSD',
url = 'http://github.com/benoitc/gaffer',
author = 'Benoit Chesneau',
author_email = '[email protected]',
packages=find_packages(),
ext_modules = [
Extension("gaffer.sync", ["gaffer/sync.c"])
],
install_requires = [
'pyuv>=0.8.3',
'six',
'psutil',
'tornado==2.4',
'colorama',
'setproctitle'
],
data_files = DATA_FILES,
entry_points="""
[console_scripts]
gafferd=gaffer.node.gafferd:run
gafferctl=gaffer.node.gafferctl:run
gaffer=gaffer.pm.main:main
""")
| 28.333333 | 75 | 0.6 | [
"MIT",
"Unlicense"
] | mikiec84/gaffer | setup.py | 2,125 | Python |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This example performs several tasks on Google Compute Platform. It can be
# run directly or can be imported into an interactive python session. This
# can also serve as live integration tests.
#
# To run directly, use python 2.7 or greater:
# - $ python gce_demo.py --help # to see the help screen
# - $ python gce_demo.py # to run all demos / tests
#
# To run interactively:
# - Make sure you have valid values in secrets.py
# (For more information about setting up your credentials, see the
# libcloud/common/google.py docstring)
# - Run 'python' in this directory, then:
# import gce_demo
# gce = gce_demo.get_gce_driver()
# gce.list_nodes()
# etc.
# - Or, to run the full demo from the interactive python shell:
# import gce_demo
# gce_demo.CLEANUP = False # optional
# gce_demo.MAX_NODES = 4 # optional
# gce_demo.DATACENTER = 'us-central1-a' # optional
# gce_demo.main_compute() # 'compute' only demo
# gce_demo.main_load_balancer() # 'load_balancer' only demo
# gce_demo.main_dns() # 'dns only demo
# gce_demo.main() # all demos / tests
import os.path
import sys
import datetime
import time
try:
import argparse
except:
print('This script uses the python "argparse" module. Please use Python '
'2.7 or greater.')
raise
try:
import secrets
except ImportError:
print('"demos/secrets.py" not found.\n\n'
'Please copy secrets.py-dist to secrets.py and update the GCE* '
'values with appropriate authentication information.\n'
'Additional information about setting these values can be found '
'in the docstring for:\n'
'libcloud/common/google.py\n')
sys.exit(1)
# Add parent dir of this file's dir to sys.path (OS-agnostically)
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__),
os.path.pardir)))
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import ResourceNotFoundError
from libcloud.loadbalancer.types import Provider as Provider_lb
from libcloud.loadbalancer.providers import get_driver as get_driver_lb
from libcloud.dns.types import Provider as Provider_dns
from libcloud.dns.providers import get_driver as get_driver_dns
from libcloud.dns.base import Record, Zone
from libcloud.utils.py3 import PY3
if PY3:
import urllib.request as url_req
else:
import urllib2 as url_req
# Maximum number of 1-CPU nodes to allow to run simultaneously
MAX_NODES = 5
# String that all resource names created by the demo will start with
# WARNING: Any resource that has a matching name will be destroyed.
DEMO_BASE_NAME = 'lct'
# Datacenter to create resources in
DATACENTER = 'us-central1-f'
# Clean up resources at the end (can be set to false in order to
# inspect resources at the end of the run). Resources will be cleaned
# at the beginning regardless.
CLEANUP = True
args = getattr(secrets, 'GCE_PARAMS', ())
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
# Add datacenter to kwargs for Python 2.5 compatibility
kwargs = kwargs.copy()
kwargs['datacenter'] = DATACENTER
# ==== HELPER FUNCTIONS ====
def get_gce_driver():
driver = get_driver(Provider.GCE)(*args, **kwargs)
return driver
def get_gcelb_driver(gce_driver=None):
# The GCE Load Balancer driver uses the GCE Compute driver for all of its
# API calls. You can either provide the driver directly, or provide the
# same authentication information so the LB driver can get its own
# Compute driver.
if gce_driver:
driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver)
else:
driver = get_driver_lb(Provider_lb.GCE)(*args, **kwargs)
return driver
def get_dns_driver(gce_driver=None):
# The Google DNS driver uses the GCE Compute driver for all of its
# API calls. You can either provide the driver directly, or provide the
# same authentication information so the LB driver can get its own
# Compute driver.
if gce_driver:
driver = get_driver_dns(Provider_dns.GOOGLE)(gce_driver=gce_driver)
else:
driver = get_driver_dns(Provider_dns.GOOGLE)(*args, **kwargs)
return driver
def display(title, resource_list=[]):
"""
Display a list of resources.
:param title: String to be printed at the heading of the list.
:type title: ``str``
:param resource_list: List of resources to display
:type resource_list: Any ``object`` with a C{name} attribute
"""
print('=> %s' % title)
for item in resource_list:
if isinstance(item, Record):
if item.name.startswith(DEMO_BASE_NAME):
print('=> name=%s, type=%s' % (item.name, item.type))
else:
print(' name=%s, type=%s' % (item.name, item.type))
elif isinstance(item, Zone):
if item.domain.startswith(DEMO_BASE_NAME):
print('=> name=%s, dnsname=%s' % (item.id, item.domain))
else:
print(' name=%s, dnsname=%s' % (item.id, item.domain))
elif hasattr(item, 'name'):
if item.name.startswith(DEMO_BASE_NAME):
print('=> %s' % item.name)
else:
print(' %s' % item.name)
else:
if item.startswith(DEMO_BASE_NAME):
print('=> %s' % item)
else:
print(' %s' % item)
def cleanup_only():
start_time = datetime.datetime.now()
display('Clean-up start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
def clean_up(gce, base_name, node_list=None, resource_list=None):
"""
Destroy all resources that have a name beginning with 'base_name'.
:param base_name: String with the first part of the name of resources
to destroy
:type base_name: ``str``
:keyword node_list: List of nodes to consider for deletion
:type node_list: ``list`` of :class:`Node`
:keyword resource_list: List of resources to consider for deletion
:type resource_list: ``list`` of I{Resource Objects}
"""
if node_list is None:
node_list = []
if resource_list is None:
resource_list = []
# Use ex_destroy_multiple_nodes to destroy nodes
del_nodes = []
for node in node_list:
if node.name.startswith(base_name):
del_nodes.append(node)
result = gce.ex_destroy_multiple_nodes(del_nodes)
for i, success in enumerate(result):
if success:
display(' Deleted %s' % del_nodes[i].name)
else:
display(' Failed to delete %s' % del_nodes[i].name)
# Destroy everything else with just the destroy method
for resrc in resource_list:
if resrc.name.startswith(base_name):
try:
resrc.destroy()
except ResourceNotFoundError:
display(' Not found: %s (%s)' % (resrc.name,
resrc.__class__.__name__))
except:
class_name = resrc.__class__.__name__
display(' Failed to Delete %s (%s)' % (resrc.name,
class_name))
raise
# ==== COMPUTE CODE STARTS HERE ====
def main_compute():
start_time = datetime.datetime.now()
display('Compute demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# == Get Lists of Everything and Display the lists (up to 10) ==
# These can either just return values for the current datacenter (zone)
# or for everything.
all_nodes = gce.list_nodes(ex_zone='all')
display('Nodes:', all_nodes)
all_addresses = gce.ex_list_addresses(region='all')
display('Addresses:', all_addresses)
all_volumes = gce.list_volumes(ex_zone='all')
display('Volumes:', all_volumes)
# This can return everything, but there is a large amount of overlap,
# so we'll just get the sizes from the current zone.
sizes = gce.list_sizes()
display('Sizes:', sizes)
# These are global
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
images = gce.list_images()
display('Images:', images)
locations = gce.list_locations()
display('Locations:', locations)
zones = gce.ex_list_zones()
display('Zones:', zones)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
# == Clean up any old demo resources ==
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
all_addresses + all_volumes + firewalls + networks + snapshots)
# == Create Node with disk auto-created ==
if MAX_NODES > 1:
display('Creating a node with boot/local-ssd using GCE structure:')
name = '%s-gstruct' % DEMO_BASE_NAME
img_url = "projects/debian-cloud/global/images/"
img_url += "backports-debian-7-wheezy-v20141205"
disk_type_url = "projects/%s/zones/us-central1-f/" % project.name
disk_type_url += "diskTypes/local-ssd"
gce_disk_struct = [
{
"type": "PERSISTENT",
"deviceName": '%s-gstruct' % DEMO_BASE_NAME,
"initializeParams": {
"diskName": '%s-gstruct' % DEMO_BASE_NAME,
"sourceImage": img_url
},
"boot": True,
"autoDelete": True
},
{
"type": "SCRATCH",
"deviceName": '%s-gstruct-lssd' % DEMO_BASE_NAME,
"initializeParams": {
"diskType": disk_type_url
},
"autoDelete": True
}
]
node_gstruct = gce.create_node(name, 'n1-standard-1', None,
'us-central1-f',
ex_disks_gce_struct=gce_disk_struct)
num_disks = len(node_gstruct.extra['disks'])
display(' Node %s created with %d disks' % (node_gstruct.name,
num_disks))
display('Creating Node with auto-created SSD:')
name = '%s-np-node' % DEMO_BASE_NAME
node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7',
ex_tags=['libcloud'], ex_disk_type='pd-ssd',
ex_disk_auto_delete=False)
display(' Node %s created' % name)
# == Create, and attach a disk ==
display('Creating a new disk:')
disk_name = '%s-attach-disk' % DEMO_BASE_NAME
volume = gce.create_volume(10, disk_name)
if volume.attach(node_1):
display(' Attached %s to %s' % (volume.name, node_1.name))
display(' Disabled auto-delete for %s on %s' % (volume.name,
node_1.name))
gce.ex_set_volume_auto_delete(volume, node_1, auto_delete=False)
if CLEANUP:
# == Detach the disk ==
if gce.detach_volume(volume, ex_node=node_1):
display(' Detached %s from %s' % (volume.name,
node_1.name))
# == Create Snapshot ==
display('Creating a snapshot from existing disk:')
# Create a disk to snapshot
vol_name = '%s-snap-template' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
vol = gce.create_volume(None, vol_name, image=image)
display('Created disk %s to shapshot:' % DEMO_BASE_NAME)
# Snapshot volume
snapshot = vol.snapshot('%s-snapshot' % DEMO_BASE_NAME)
display(' Snapshot %s created' % snapshot.name)
# == Create Node with existing disk ==
display('Creating Node with existing disk:')
name = '%s-persist-node' % DEMO_BASE_NAME
# Use objects this time instead of names
# Get latest Debian 7 image
image = gce.ex_get_image('debian-7')
# Get Machine Size
size = gce.ex_get_size('n1-standard-1')
# Create Disk from Snapshot created above
volume_name = '%s-boot-disk' % DEMO_BASE_NAME
volume = gce.create_volume(None, volume_name, snapshot=snapshot)
display(' Created %s from snapshot' % volume.name)
# Create Node with Disk
node_2 = gce.create_node(name, size, image, ex_tags=['libcloud'],
ex_boot_disk=volume,
ex_disk_auto_delete=False)
display(' Node %s created with attached disk %s' % (node_2.name,
volume.name))
# == Update Tags for Node ==
display('Updating Tags for %s:' % node_2.name)
tags = node_2.extra['tags']
tags.append('newtag')
if gce.ex_set_node_tags(node_2, tags):
display(' Tags updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New tags: %s' % check_node.extra['tags'])
# == Setting Metadata for Node ==
display('Setting Metadata for %s:' % node_2.name)
if gce.ex_set_node_metadata(node_2, {'foo': 'bar', 'baz': 'foobarbaz'}):
display(' Metadata updated for %s' % node_2.name)
check_node = gce.ex_get_node(node_2.name)
display(' New Metadata: %s' % check_node.extra['metadata'])
# == Create Multiple nodes at once ==
base_name = '%s-multiple-nodes' % DEMO_BASE_NAME
number = MAX_NODES - 2
if number > 0:
display('Creating Multiple Nodes (%s):' % number)
multi_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number,
ex_tags=['libcloud'],
ex_disk_auto_delete=True)
for node in multi_nodes:
display(' Node %s created' % node.name)
# == Create a Network ==
display('Creating Network:')
name = '%s-network' % DEMO_BASE_NAME
cidr = '10.10.0.0/16'
network_1 = gce.ex_create_network(name, cidr)
display(' Network %s created' % network_1.name)
# == Create a Firewall ==
display('Creating a Firewall:')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['3141']}]
firewall_1 = gce.ex_create_firewall(name, allowed, network=network_1,
source_tags=['libcloud'])
display(' Firewall %s created' % firewall_1.name)
# == Create a Static Address ==
display('Creating an Address:')
name = '%s-address' % DEMO_BASE_NAME
address_1 = gce.ex_create_address(name)
display(' Address %s created with IP %s' % (address_1.name,
address_1.address))
# == List Updated Resources in current zone/region ==
display('Updated Resources in current zone/region')
nodes = gce.list_nodes()
display('Nodes:', nodes)
addresses = gce.ex_list_addresses()
display('Addresses:', addresses)
firewalls = gce.ex_list_firewalls()
display('Firewalls:', firewalls)
networks = gce.ex_list_networks()
display('Networks:', networks)
snapshots = gce.ex_list_snapshots()
display('Snapshots:', snapshots)
if CLEANUP:
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
addresses + firewalls + networks + snapshots)
volumes = gce.list_volumes()
clean_up(gce, DEMO_BASE_NAME, None, volumes)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
# ==== LOAD BALANCER CODE STARTS HERE ====
def main_load_balancer():
start_time = datetime.datetime.now()
display('Load-balancer demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gcelb = get_gcelb_driver(gce)
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# Existing Balancers
balancers = gcelb.list_balancers()
display('Load Balancers', balancers)
# Protocols
protocols = gcelb.list_protocols()
display('Protocols', protocols)
# Healthchecks
healthchecks = gcelb.ex_list_healthchecks()
display('Health Checks', healthchecks)
# This demo is based on the GCE Load Balancing Quickstart described here:
# https://developers.google.com/compute/docs/load-balancing/lb-quickstart
# == Clean-up and existing demo resources ==
all_nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up any "%s" resources' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, all_nodes,
balancers + healthchecks + firewalls)
# == Create 3 nodes to balance between ==
startup_script = ('apt-get -y update && '
'apt-get -y install apache2 && '
'hostname > /var/www/index.html')
tag = '%s-www' % DEMO_BASE_NAME
base_name = '%s-www' % DEMO_BASE_NAME
image = gce.ex_get_image('debian-7')
size = gce.ex_get_size('n1-standard-1')
number = 3
display('Creating %d nodes' % number)
metadata = {'items': [{'key': 'startup-script',
'value': startup_script}]}
lb_nodes = gce.ex_create_multiple_nodes(base_name, size, image,
number, ex_tags=[tag],
ex_metadata=metadata,
ex_disk_auto_delete=True,
ignore_errors=False)
display('Created Nodes', lb_nodes)
# == Create a Firewall for instances ==
display('Creating a Firewall')
name = '%s-firewall' % DEMO_BASE_NAME
allowed = [{'IPProtocol': 'tcp',
'ports': ['80']}]
firewall = gce.ex_create_firewall(name, allowed, source_tags=[tag])
display(' Firewall %s created' % firewall.name)
# == Create a Health Check ==
display('Creating a HealthCheck')
name = '%s-healthcheck' % DEMO_BASE_NAME
# These are all the default values, but listed here as an example. To
# create a healthcheck with the defaults, only name is required.
hc = gcelb.ex_create_healthcheck(name, host=None, path='/', port='80',
interval=5, timeout=5,
unhealthy_threshold=2,
healthy_threshold=2)
display('Healthcheck %s created' % hc.name)
# == Create Load Balancer ==
display('Creating Load Balancer')
name = '%s-lb' % DEMO_BASE_NAME
port = 80
protocol = 'tcp'
algorithm = None
members = lb_nodes[:2] # Only attach the first two initially
healthchecks = [hc]
balancer = gcelb.create_balancer(name, port, protocol, algorithm, members,
ex_healthchecks=healthchecks)
display(' Load Balancer %s created' % balancer.name)
# == Attach third Node ==
display('Attaching additional node to Load Balancer')
member = balancer.attach_compute_node(lb_nodes[2])
display(' Attached %s to %s' % (member.id, balancer.name))
# == Show Balancer Members ==
members = balancer.list_members()
display('Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
# == Remove a Member ==
display('Removing a Member')
detached = members[0]
detach = balancer.detach_member(detached)
if detach:
display(' Member %s detached from %s' % (detached.id,
balancer.name))
# == Show Updated Balancer Members ==
members = balancer.list_members()
display('Updated Load Balancer Members')
for member in members:
display(' ID: %s IP: %s' % (member.id, member.ip))
# == Reattach Member ==
display('Reattaching Member')
member = balancer.attach_member(detached)
display(' Member %s attached to %s' % (member.id, balancer.name))
# == Test Load Balancer by connecting to it multiple times ==
PAUSE = 60
display('Sleeping for %d seconds for LB members to serve...' % PAUSE)
time.sleep(PAUSE)
rounds = 200
url = 'http://%s/' % balancer.ip
line_length = 75
display('Connecting to %s %s times' % (url, rounds))
for x in range(rounds):
response = url_req.urlopen(url)
if PY3:
output = str(response.read(), encoding='utf-8').strip()
else:
output = response.read().strip()
if 'www-001' in output:
padded_output = output.center(line_length)
elif 'www-002' in output:
padded_output = output.rjust(line_length)
else:
padded_output = output.ljust(line_length)
sys.stdout.write('\r%s' % padded_output)
sys.stdout.flush()
time.sleep(.25)
print ""
if CLEANUP:
balancers = gcelb.list_balancers()
healthchecks = gcelb.ex_list_healthchecks()
nodes = gce.list_nodes(ex_zone='all')
firewalls = gce.ex_list_firewalls()
display('Cleaning up %s resources created' % DEMO_BASE_NAME)
clean_up(gce, DEMO_BASE_NAME, nodes,
balancers + healthchecks + firewalls)
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
# ==== GOOGLE DNS CODE STARTS HERE ====
def main_dns():
start_time = datetime.datetime.now()
display('DNS demo/test start time: %s' % str(start_time))
gce = get_gce_driver()
gdns = get_dns_driver()
# Get project info and print name
project = gce.ex_get_project()
display('Project: %s' % project.name)
# Get list of managed zones
zones = gdns.iterate_zones()
display('Zones', zones)
# Get list of records
zones = gdns.iterate_zones()
for z in zones:
records = gdns.iterate_records(z)
display('Records for managed zone "%s"' % z.id, records)
# TODO(erjohnso): Finish this DNS section. Challenging in that you need to
# own a domain, so testing will require user customization. Perhaps a new
# command-line required flag unless --skip-dns is supplied. Also, real
# e2e testing should try to do DNS lookups on new records, but DNS TTL
# and propagation delays will introduce limits on what can be tested.
end_time = datetime.datetime.now()
display('Total runtime: %s' % str(end_time - start_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Google Cloud Platform Demo / Live Test Script')
parser.add_argument("--compute",
help="perform compute demo / live tests",
dest="compute", action="store_true")
parser.add_argument("--load-balancer",
help="perform load-balancer demo / live tests",
dest="lb", action="store_true")
parser.add_argument("--dns",
help="perform DNS demo / live tests",
dest="dns", action="store_true")
parser.add_argument("--cleanup-only",
help="perform clean-up (skips all tests)",
dest="cleanup", action="store_true")
cl_args = parser.parse_args()
if cl_args.cleanup:
cleanup_only()
else:
if cl_args.compute:
main_compute()
if cl_args.lb:
main_load_balancer()
if cl_args.dns:
main_dns()
| 37.892396 | 78 | 0.613381 | [
"Apache-2.0"
] | Verizon/libcloud | demos/gce_demo.py | 26,411 | Python |
from django.shortcuts import render
from django.http import JsonResponse
from django.core.files.storage import FileSystemStorage
import requests
# Create your views here.
def cnn(request):
return render(request, 'CNN/cnn.html')
def change(request):
########################################################################
if request.method == 'POST' and request.FILES['origin']:
myfile = request.FILES['origin']
fs = FileSystemStorage('./bssets/inputs/') #defaults to MEDIA_ROOT
filename = fs.save(myfile.name, myfile)
###############################################################
# # Here we know the file is in
api_host = 'http://35.221.233.111:8000/'
headers = {'Content-Type': 'application/json'}
photo = './bssets/inputs/'+ filename # file name = file_name + randomNumber
files = {'file': (filename, open(photo, 'rb'), 'image/jpeg')}
response = requests.post(api_host, files=files)
return JsonResponse(response, safe=False)
| 32.59375 | 83 | 0.568552 | [
"MIT"
] | suyongeum/PML | CNN/views.py | 1,043 | Python |
"""
Compatibility tools for differences between Python 2 and 3
"""
import functools
import itertools
import sys
import urllib
PY3 = (sys.version_info[0] >= 3)
PY3_2 = sys.version_info[:2] == (3, 2)
if PY3:
import builtins
from collections import namedtuple
from io import StringIO, BytesIO
import inspect
cStringIO = StringIO
import pickle as cPickle
pickle = cPickle
import urllib.request
import urllib.parse
from urllib.request import HTTPError, urlretrieve, URLError
import io
bytes = bytes
str = str
asunicode = lambda x, _ : str(x)
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
def asstr(s):
if isinstance(s, str):
return s
return s.decode('latin1')
def asstr2(s): #added JP, not in numpy version
if isinstance(s, str):
return s
elif isinstance(s, bytes):
return s.decode('latin1')
else:
return str(s)
def isfileobj(f):
return isinstance(f, io.FileIO)
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
strchar = 'U'
# have to explicitly put builtins into the namespace
range = range
map = map
zip = zip
filter = filter
reduce = functools.reduce
long = int
unichr = chr
zip_longest = itertools.zip_longest
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
urlopen = urllib.request.urlopen
urljoin = urllib.parse.urljoin
urlretrieve = urllib.request.urlretrieve
urlencode = urllib.parse.urlencode
string_types = str
input = input
ArgSpec= namedtuple('ArgSpec', ['args', 'varargs', 'keywords', 'defaults'])
def getargspec(func):
"""
Simple workaroung for getargspec deprecation that returns
an ArgSpec-like object
"""
sig = inspect.signature(func)
parameters = sig.parameters
args, defaults = [], []
varargs, keywords = None, None
for key in parameters:
parameter = parameters[key]
if parameter.kind == inspect.Parameter.VAR_POSITIONAL:
varargs = key
elif parameter.kind == inspect.Parameter.VAR_KEYWORD:
keywords = key
else:
args.append(key)
if parameter.default is not parameter.empty:
defaults.append(parameter.default)
defaults = None if len(defaults) == 0 else defaults
return ArgSpec(args, varargs, keywords, defaults)
else:
import __builtin__ as builtins
# not writeable when instantiated with string, doesn't handle unicode well
from cStringIO import StringIO as cStringIO
# always writeable
from StringIO import StringIO
from inspect import getargspec
BytesIO = StringIO
import cPickle
pickle = cPickle
import urllib2
import urlparse
bytes = str
str = str
asbytes = str
asstr = str
asstr2 = str
strchar = 'S'
def isfileobj(f):
return isinstance(f, file)
def asunicode(s, encoding='ascii'):
if isinstance(s, unicode):
return s
return s.decode(encoding)
def open_latin1(filename, mode='r'):
return open(filename, mode=mode)
# import iterator versions of these functions
range = xrange
zip = itertools.izip
filter = itertools.ifilter
map = itertools.imap
reduce = reduce
long = long
unichr = unichr
zip_longest = itertools.izip_longest
# Python 2-builtin ranges produce lists
lrange = builtins.range
lzip = builtins.zip
lmap = builtins.map
lfilter = builtins.filter
urlopen = urllib2.urlopen
urljoin = urlparse.urljoin
urlencode = urllib.urlencode
HTTPError = urllib2.HTTPError
URLError = urllib2.URLError
string_types = basestring
input = raw_input
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, str)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, str)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
def iteritems(obj, **kwargs):
"""replacement for six's iteritems for Python2/3 compat
uses 'iteritems' if available and otherwise uses 'items'.
Passes kwargs to method.
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def get_function_name(func):
try:
return func.im_func.func_name
except AttributeError:
#Python 3
return func.__name__
def get_class(func):
try:
return func.im_class
except AttributeError:
#Python 3
return func.__self__.__class__
try:
combinations = itertools.combinations
except:
# Python 2.6 only
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(lrange(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = lrange(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(lrange(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
| 24.766917 | 79 | 0.611718 | [
"BSD-3-Clause"
] | Aziiz1989/statsmodels | statsmodels/compat/python.py | 6,588 | Python |
from vektonn import Vektonn
from vektonn.dtos import Attribute, AttributeValue, Vector, InputDataPoint, SearchQuery
vektonn_client = Vektonn('http://localhost:8081')
input_data_points = [
InputDataPoint(
attributes=[
Attribute(key='id', value=AttributeValue(int64=1)),
Attribute(key='payload', value=AttributeValue(string='first data point')),
],
vector=Vector(is_sparse=False, coordinates=[0.0, 1.0])),
InputDataPoint(
attributes=[
Attribute(key='id', value=AttributeValue(int64=2)),
Attribute(key='payload', value=AttributeValue(string='second data point')),
],
vector=Vector(is_sparse=False, coordinates=[1.0, 0.0])),
InputDataPoint(
attributes=[
Attribute(key='id', value=AttributeValue(int64=3)),
Attribute(key='payload', value=AttributeValue(string='third data point')),
],
vector=Vector(is_sparse=False, coordinates=[-0.5, 0.0])),
]
vektonn_client.upload(
data_source_name='QuickStart.Source',
data_source_version='1.0',
input_data_points=input_data_points)
k = 2
search_query = SearchQuery(k=k, query_vectors=[
Vector(is_sparse=False, coordinates=[0.0, 2.0]),
])
search_results = vektonn_client.search(
index_name='QuickStart.Index',
index_version='1.0',
search_query=search_query)
print(f'For query vector {search_results[0].query_vector.coordinates} {k} nearest data points are:')
for fdp in search_results[0].nearest_data_points:
attrs = {x.key : x.value for x in fdp.attributes}
distance, vector, dp_id, payload = fdp.distance, fdp.vector, attrs['id'].int64, attrs['payload'].string
print(f' - "{payload}" with id = {dp_id}, vector = {vector.coordinates}, distance = {distance}')
'''
Expected output:
For query vector [0.0, 2.0] 2 nearest data points are:
- "first data point" with id = 1, vector = [0.0, 1.0], distance = 1.0
- "third data point" with id = 3, vector = [-0.5, 0.0], distance = 4.25
'''
| 36.727273 | 107 | 0.668812 | [
"Apache-2.0"
] | vektonn/vektonn-examples | quick-start/python/quick-start.py | 2,020 | Python |
from rest_framework.permissions import SAFE_METHODS, BasePermission
from artists.models import Artists
class IsAuthenticatedAndIsOwner(BasePermission):
message = "current user not matching user trying to update user"
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
if request.user.is_authenticated():
if hasattr(obj, "user"):
return request.user == obj.user
else:
artist = Artists.objects.get(user=request.user)
return artist == obj.artist
| 31.842105 | 68 | 0.659504 | [
"MIT"
] | KeserOner/where-artists-share | was/permissions.py | 605 | Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies and contributors
# License: MIT. See LICENSE
# import frappe
from frappe.model.document import Document
class WebPageBlock(Document):
pass
| 19 | 58 | 0.741627 | [
"MIT"
] | 15937823/frappe | frappe/website/doctype/web_page_block/web_page_block.py | 209 | Python |
INFO_PREFIX = "> [ info ] "
ERROR_PREFIX = "> [warning] "
WARNING_PREFIX = "> [ error ] "
INPUT_KEY = "input"
LATENT_KEY = "latent"
PREDICTIONS_KEY = "predictions"
LABEL_KEY = "labels"
SQLITE_FILE = "sqlite.db"
| 19.454545 | 31 | 0.668224 | [
"MIT"
] | carefree0910/carefree-learn-deploy | cflearn_deploy/constants.py | 214 | Python |
from builtins import range
from builtins import object
import numpy as np
from past.builtins import xrange
class KNearestNeighbor(object):
""" a kNN classifier with L2 distance """
def __init__(self):
pass
def train(self, X, y):
"""
Train the classifier. For k-nearest neighbors this is just
memorizing the training data.
Inputs:
- X: A numpy array of shape (num_train, D) containing the training data
consisting of num_train samples each of dimension D.
- y: A numpy array of shape (N,) containing the training labels, where
y[i] is the label for X[i].
"""
self.X_train = X
self.y_train = y
def predict(self, X, k=1, num_loops=0):
"""
Predict labels for test data using this classifier.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data consisting
of num_test samples each of dimension D.
- k: The number of nearest neighbors that vote for the predicted labels.
- num_loops: Determines which implementation to use to compute distances
between training points and testing points.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
if num_loops == 0:
dists = self.compute_distances_no_loops(X)
elif num_loops == 1:
dists = self.compute_distances_one_loop(X)
elif num_loops == 2:
dists = self.compute_distances_two_loops(X)
else:
raise ValueError("Invalid value %d for num_loops" % num_loops)
return self.predict_labels(dists, k=k)
def compute_distances_two_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a nested loop over both the training data and the
test data.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
for j in range(num_train):
#####################################################################
# TODO: #
# Compute the l2 distance between the ith test point and the jth #
# training point, and store the result in dists[i, j]. You should #
# not use a loop over dimension, nor use np.linalg.norm(). #
#####################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, j] = np.sqrt(sum((X[i, ] - self.X_train[j, ]) ** 2))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_one_loop(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using a single loop over the test data.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in range(num_test):
#######################################################################
# TODO: #
# Compute the l2 distance between the ith test point and all training #
# points, and store the result in dists[i, :]. #
# Do not use np.linalg.norm(). #
#######################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists[i, :] = np.sqrt(np.sum((self.X_train - X[i, :]) ** 2, 1))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def compute_distances_no_loops(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train using no explicit loops.
Input / Output: Same as compute_distances_two_loops
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
#########################################################################
# TODO: #
# Compute the l2 distance between all test points and all training #
# points without using any explicit loops, and store the result in #
# dists. #
# #
# You should implement this function using only basic array operations; #
# in particular you should not use functions from scipy, #
# nor use np.linalg.norm(). #
# #
# HINT: Try to formulate the l2 distance using matrix multiplication #
# and two broadcast sums. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
dists = np.sqrt(
np.sum((self.X_train[np.newaxis, :] - X[np.newaxis, :].reshape((num_test, 1, X.shape[1]))) ** 2, 2))
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
#########################################################################
# TODO: #
# Use the distance matrix to find the k nearest neighbors of the ith #
# testing point, and use self.y_train to find the labels of these #
# neighbors. Store these labels in closest_y. #
# Hint: Look up the function numpy.argsort. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
closest_y = self.y_train[dists[i, ].argsort()[:k]]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#########################################################################
# TODO: #
# Now that you have found the labels of the k nearest neighbors, you #
# need to find the most common label in the list closest_y of labels. #
# Store this label in y_pred[i]. Break ties by choosing the smaller #
# label. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
unique, counts = np.unique(closest_y, return_counts=True)
y_pred[i] = unique[np.argmax(counts)]
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
| 47.392473 | 112 | 0.473511 | [
"MIT"
] | Michellemingxuan/stanford_cs231n | assignments/2021/assignment1/cs231n/classifiers/k_nearest_neighbor.py | 8,815 | Python |
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Wrapper for selecting the navigation environment that we want to train and
test on.
"""
import os
import glob
import logging
from research.cognitive_mapping_and_planning.render import swiftshader_renderer as renderer
from research.cognitive_mapping_and_planning.src import file_utils as fu
from research.cognitive_mapping_and_planning.src import utils as utils
def get_dataset(dataset_name):
dataset = None
if dataset_name == 'sbpd':
dataset = StanfordBuildingParserDataset(dataset_name)
else:
logging.fatal('Not one of sbpd')
return dataset
class Loader():
def get_data_dir(self):
pass
def get_meta_data(self, file_name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
full_file_name = os.path.join(data_dir, 'meta', file_name)
assert (fu.exists(full_file_name)), \
'{:s} does not exist'.format(full_file_name)
ext = os.path.splitext(full_file_name)[1]
ls = None
if ext == '.txt':
ls = []
with fu.fopen(full_file_name, 'r') as f:
for l in f:
ls.append(l.rstrip())
elif ext == '.pkl':
ls = utils.load_variables(full_file_name)
return ls
def load_building(self, name, data_dir=None):
if data_dir is None:
data_dir = self.get_data_dir()
out = {'name': name, 'data_dir': data_dir,
'room_dimension_file': os.path.join(data_dir, 'room-dimension',
name + '.pkl'),
'class_map_folder': os.path.join(data_dir, 'class-maps')}
return out
def load_building_meshes(self, building):
dir_name = os.path.join(building['data_dir'], 'mesh', building['name'])
mesh_file_name = glob.glob1(dir_name, '*.obj')[0]
mesh_file_name_full = os.path.join(dir_name, mesh_file_name)
logging.error('Loading building from obj file: %s', mesh_file_name_full)
shape = renderer.Shape(mesh_file_name_full, load_materials=True,
name_prefix=building['name'] + '_')
return [shape]
class StanfordBuildingParserDataset(Loader):
def __init__(self, ver):
self.ver = ver
self.data_dir = None
def get_data_dir(self):
if self.data_dir is None:
self.data_dir = 'data/stanford_building_parser_dataset/'
return self.data_dir
def get_benchmark_sets(self):
return self._get_benchmark_sets()
def get_split(self, split_name):
if self.ver == 'sbpd':
return self._get_split(split_name)
else:
logging.fatal('Unknown version.')
@staticmethod
def _get_benchmark_sets():
sets = ['train1', 'val', 'test']
return sets
@staticmethod
def _get_split(split_name):
train = ['area1', 'area5a', 'area5b', 'area6']
train1 = ['area1']
val = ['area3']
test = ['area4']
sets = {'train': train, 'train1': train1, 'val': val, 'test': test,
'all': sorted(list(set(train + val + test)))}
return sets[split_name]
| 34.954955 | 91 | 0.620619 | [
"Apache-2.0"
] | jdavidagudelo/tensorflow-models | research/cognitive_mapping_and_planning/datasets/factory.py | 3,880 | Python |
import pandas as pd
import numpy as np
def gloriosafuncao(df):
df = pd.DataFrame([df])
numerico = [
11, "email", 1, 2, 3, 7,
8, 9, 12, 10, 13, 14,
15, 16, 17, 18, 19, 20, 21, 4, 5, 6
]
df.columns = numerico
labels = [
'email',
'PPI',
'ProgramasSociais',
'ModalidadeEnsino',
# 'Beneficiario',
'QtdDependentes',
'EscolaridadePai',
'EscolaridadeMae',
'RendaPerCapita',
'AtividadeRemunerada',
'SituacaoFinanceira',
'QtdResponsaveisFinanceiros',
'CondicaoTrabalho',
'CondicaoRenda',
'MoraCidadeCampus',
'CondMoradia',
'TipoTransporte',
'NConducoes',
'DoencaCronica',
'Medicacao',
'Deficiencia',
'FDoencaCronica',
'FMedicacao',
]
nomes_ordenados = [df.columns.to_list()[0]] + df.columns.to_list()[2:]
nomes_ordenados.sort()
nomes_ordenados = [df.columns.to_list()[1]] + nomes_ordenados
df = df[nomes_ordenados]
df.columns = labels
condicoes = [
'Desempregado',
'Trabalhador Informal',
'Trabalhador Autônomo',
'Aposentado',
'Empregado CLT',
# 'Pescador/agricultor familiar',
'Beneficiário INSS',
'Funcionário Público'
]
rotulos = [
'Desempregado',
'Informal',
'Autonomo',
'Aposentado',
'CLT',
# 'PescAgriF',
'INSS',
'FuncionarioPublico'
]
for rotulo, cond in zip(rotulos, condicoes):
df[rotulo] = df['CondicaoTrabalho'].map(
lambda x: 'sim' if cond in x else 'nao')
df['MoraCidadeCampus'] = df['MoraCidadeCampus'].apply(
lambda x: x.split(',')[0].lower())
df['TipoTransporte'] = df['TipoTransporte'].apply(
lambda x: ''.join(x.split()[1]).capitalize())
df['AteDois'] = df['QtdResponsaveisFinanceiros']\
.apply(lambda x: 'sim' if ' '
.join(x.split()[:-1]) == '1' or ' '
.join(x.split()[:-1]) == '2' else 'nao')
df[['TipoTransporte', 'QtdResponsaveisFinanceiros',
'MoraCidadeCampus', 'AteDois']].head()
binario = [
'PPI',
'ProgramasSociais',
# 'Beneficiario',
'AtividadeRemunerada',
'MoraCidadeCampus',
'DoencaCronica',
'Medicacao',
'Deficiencia',
'FDoencaCronica',
'FMedicacao',
'AteDois',
'Desempregado',
'Informal',
'Autonomo',
'Aposentado',
'CLT',
# 'PescAgriF',
'INSS',
'FuncionarioPublico'
]
df_binario = pd.DataFrame()
for elemento in binario:
df_binario[elemento] = df[elemento].replace(
['sim', 'nao'], [1, 0]).astype(int)
modalidade_map = {
'Graduação': 1,
'Médio Integrado EJA': 2,
'Médio Técnico Integrado': 4,
'Técnico Subsequente': 3,
}
transporte_map = {
'Pé': 1,
'Próprio': 1,
'Público': 2,
'Alternativo': 3
}
escolaridade_map = {
'Desconheço': 4,
'Não se aplica': 4,
'Sem escolaridade': 4,
'Ensino fundamental': 3,
'Ensino médio': 2,
'Ensino superior': 1,
}
moradia_map = {
'Própria': 1,
'Cedida': 2,
'Financiada': 3,
'Alugada': 4,
'Outros': 4
}
categorias = df['RendaPerCapita'].astype(
'category').cat.categories.tolist()
valores = [3, 2, 9, 8, 7, 6, 5, 4, 10, 1]
renda_percapita_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['SituacaoFinanceira'].astype(
'category').cat.categories.tolist()
valores = [4, 2, 2, 1, 4, 5, 1]
situacao_fin_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['QtdDependentes'].astype(
'category').cat.categories.tolist()
valores = [2, 3, 4, 5, 1]
dependentes_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['NConducoes'].astype('category').cat.categories.tolist()
valores = [2, 3, 1]
conducoes_map = {k: v for k, v in zip(categorias, valores)}
categorias = df['CondicaoRenda'].astype('category').cat.categories.tolist()
valores = [1, 2, 3]
cond_renda_map = {k: v for k, v in zip(categorias, valores)}
labels = [
'CondMoradia',
'TipoTransporte',
'RendaPerCapita',
'SituacaoFinanceira',
'NConducoes',
'CondicaoRenda',
"ModalidadeEnsino",
"EscolaridadeMae",
"EscolaridadePai",
"QtdDependentes"
]
label_encode = df[labels].copy()
label_encode['CondMoradia'].replace(moradia_map, inplace=True)
label_encode['TipoTransporte'].replace(transporte_map, inplace=True)
label_encode['EscolaridadePai'].replace(escolaridade_map, inplace=True)
label_encode['EscolaridadeMae'].replace(escolaridade_map, inplace=True)
label_encode['SituacaoFinanceira'].replace(situacao_fin_map, inplace=True)
label_encode['RendaPerCapita'].replace(renda_percapita_map, inplace=True)
label_encode['QtdDependentes'].replace(dependentes_map, inplace=True)
label_encode['NConducoes'].replace(conducoes_map, inplace=True)
label_encode['CondicaoRenda'].replace(cond_renda_map, inplace=True)
label_encode['ModalidadeEnsino'].replace(modalidade_map, inplace=True)
qtd = pd.DataFrame()
qtd_res = ['ResFin_1', 'ResFin_2', 'ResFin_3', 'ResFin_4ouMais']
opcs = [
'1 membro',
'2 membros',
'3 membros',
'4 ou mais membros'
]
df['QtdResponsaveisFinanceiros'].replace(opcs, qtd_res)
for iqtd in qtd_res:
qtd[iqtd] = df['QtdResponsaveisFinanceiros'].map(
lambda x: int(1) if iqtd in x else int(0))
dados_limpos = pd.concat([df_binario, label_encode, qtd], axis=1)
ordem = ['PPI',
'ProgramasSociais',
'AtividadeRemunerada',
'MoraCidadeCampus',
'DoencaCronica',
'Medicacao',
'Deficiencia',
'FDoencaCronica',
'FMedicacao',
'AteDois',
'Desempregado',
'Informal',
'Autonomo',
'Aposentado',
'CLT',
'INSS',
'FuncionarioPublico',
'ModalidadeEnsino',
'CondMoradia',
'TipoTransporte',
'EscolaridadeMae',
'EscolaridadePai',
'RendaPerCapita',
'SituacaoFinanceira',
'QtdDependentes',
'NConducoes',
'CondicaoRenda',
'ResFin_1',
'ResFin_2',
'ResFin_3',
'ResFin_4ouMais']
dados_limpos = dados_limpos[ordem]
dados_limpos['email'] = df['email']
return np.array(dados_limpos.loc[0]).reshape(1, -1)
| 27.72 | 79 | 0.553535 | [
"MIT"
] | willidert/aux_est_micro | data-clean/clean.py | 6,947 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020-2022 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Benedikt Ziemons <[email protected]>, 2020-2021
# - Radu Carpa <[email protected]>, 2021
# - Mayank Sharma <[email protected]>, 2021-2022
# - Simon Fayer <[email protected]>, 2021
# - Rakshita Varadarajan <[email protected]>, 2021
# - Mario Lassnig <[email protected]>, 2021
# - Cedric Serfon <[email protected]>, 2021
# - Cedric Serfon <[email protected]>, 2021-2022
from __future__ import print_function
import traceback
import pytest
# local imports in the fixtures to make this file loadable in e.g. client tests
@pytest.fixture(scope='session')
def vo():
from rucio.tests.common_server import get_vo
return get_vo()
@pytest.fixture(scope='session')
def second_vo():
from rucio.common.config import config_get_bool
from rucio.core.vo import vo_exists, add_vo
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
pytest.skip('multi_vo mode is not enabled. Running multi_vo tests in single_vo mode would result in failures.')
new_vo = 'new'
if not vo_exists(vo=new_vo):
add_vo(vo=new_vo, description='Test', email='[email protected]')
return new_vo
@pytest.fixture(scope='session')
def long_vo():
from rucio.tests.common import get_long_vo
return get_long_vo()
@pytest.fixture(scope='module')
def replica_client():
from rucio.client.replicaclient import ReplicaClient
return ReplicaClient()
@pytest.fixture(scope='module')
def rucio_client():
from rucio.client import Client
return Client()
@pytest.fixture(scope='module')
def did_client():
from rucio.client.didclient import DIDClient
return DIDClient()
@pytest.fixture
def rest_client():
from rucio.tests.common import print_response
from flask.testing import FlaskClient
from rucio.web.rest.flaskapi.v1.main import application
class WrappedFlaskClient(FlaskClient):
def __init__(self, *args, **kwargs):
super(WrappedFlaskClient, self).__init__(*args, **kwargs)
def open(self, path='/', *args, **kwargs):
print(kwargs.get('method', 'GET'), path)
response = super(WrappedFlaskClient, self).open(path, *args, **kwargs)
try:
print_response(response)
except Exception:
traceback.print_exc()
return response
_testing = application.testing
application.testing = True
application.test_client_class = WrappedFlaskClient
with application.test_client() as client:
yield client
application.test_client_class = None
application.testing = _testing
@pytest.fixture
def auth_token(rest_client, long_vo):
from rucio.tests.common import vohdr, headers, loginhdr
auth_response = rest_client.get('/auth/userpass', headers=headers(loginhdr('root', 'ddmlab', 'secret'), vohdr(long_vo)))
assert auth_response.status_code == 200
token = auth_response.headers.get('X-Rucio-Auth-Token')
assert token
return str(token)
@pytest.fixture(scope='module')
def mock_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('mock', vo=vo)
@pytest.fixture(scope='module')
def test_scope(vo):
from rucio.common.types import InternalScope
return InternalScope('test', vo=vo)
@pytest.fixture(scope='module')
def root_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('root', vo=vo)
@pytest.fixture(scope='module')
def jdoe_account(vo):
from rucio.common.types import InternalAccount
return InternalAccount('jdoe', vo=vo)
@pytest.fixture(scope="module")
def containerized_rses(rucio_client):
"""
Detects if containerized rses for xrootd & ssh are available in the testing environment.
:return: A list of (rse_name, rse_id) tuples.
"""
from rucio.common.exception import InvalidRSEExpression
rses = []
try:
xrd_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_xrd=True')]
xrd_rses = [rucio_client.get_rse(rse) for rse in xrd_rses]
xrd_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in xrd_rses if "xrd" in rse_obj['rse'].lower()]
xrd_containerized_rses.sort()
rses.extend(xrd_containerized_rses)
ssh_rses = [x['rse'] for x in rucio_client.list_rses(rse_expression='test_container_ssh=True')]
ssh_rses = [rucio_client.get_rse(rse) for rse in ssh_rses]
ssh_containerized_rses = [(rse_obj['rse'], rse_obj['id']) for rse_obj in ssh_rses if "ssh" in rse_obj['rse'].lower()]
ssh_containerized_rses.sort()
rses.extend(ssh_containerized_rses)
except InvalidRSEExpression as invalid_rse_expression:
print("{ex}. Note that containerized RSEs will not be available in non-containerized test environments"
.format(ex=invalid_rse_expression))
traceback.print_exc()
return rses
@pytest.fixture
def rse_factory(vo):
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
yield factory
@pytest.fixture(scope="class")
def rse_factory_unittest(request, vo):
"""
unittest classes can get access to rse_factory fixture via this fixture
"""
from rucio.tests.temp_factories import TemporaryRSEFactory
with TemporaryRSEFactory(vo=vo) as factory:
request.cls.rse_factory = factory
yield factory
factory.cleanup()
@pytest.fixture
def did_factory(vo, mock_scope):
from rucio.tests.temp_factories import TemporaryDidFactory
with TemporaryDidFactory(vo=vo, default_scope=mock_scope) as factory:
yield factory
@pytest.fixture
def file_factory(tmp_path_factory):
from rucio.tests.temp_factories import TemporaryFileFactory
with TemporaryFileFactory(pytest_path_factory=tmp_path_factory) as factory:
yield factory
@pytest.fixture
def scope_factory():
from rucio.common.utils import generate_uuid
from rucio.core.scope import add_scope
from rucio.common.types import InternalAccount, InternalScope
def create_scopes(vos, account_name=None):
scope_uuid = str(generate_uuid()).lower()[:16]
scope_name = 'shr_%s' % scope_uuid
created_scopes = []
for vo in vos:
scope = InternalScope(scope_name, vo=vo)
add_scope(scope, InternalAccount(account_name if account_name else 'root', vo=vo))
created_scopes.append(scope)
return scope_name, created_scopes
return create_scopes
@pytest.fixture
def db_session():
from rucio.db.sqla import session
db_session = session.get_session()
yield db_session
db_session.commit()
db_session.close()
def __get_fixture_param(request):
fixture_param = getattr(request, "param", None)
if not fixture_param:
# Parametrize support is incomplete for legacy unittest test cases
# Manually retrieve the parameters from the list of marks:
mark = next(iter(filter(lambda m: m.name == 'parametrize', request.instance.pytestmark)), None)
if mark:
fixture_param = mark.args[1][0]
return fixture_param
@pytest.fixture
def core_config_mock(request):
"""
Fixture to allow having per-test core.config tables without affecting the other parallel tests.
This override works only in tests which use core function calls directly, not in the ones working
via the API, because the normal config table is not touched and the rucio instance answering API
calls is not aware of this mock.
This fixture acts by creating a new copy of the "config" sql table using the :memory: sqlite engine.
Accesses to the "models.Config" table are then redirected to this temporary table via mock.patch().
"""
from unittest import mock
from rucio.common.utils import generate_uuid
from sqlalchemy.pool import StaticPool
from rucio.db.sqla.models import ModelBase, BASE, Column, String, PrimaryKeyConstraint
from rucio.db.sqla.session import get_session, get_maker, get_engine, create_engine, declarative_base
# Get the fixture parameters
table_content = []
params = __get_fixture_param(request)
if params:
table_content = params.get("table_content", table_content)
# Create an in-memory dropdown replacement table for the "models.Config" table
engine = create_engine('sqlite://', connect_args={'check_same_thread': False}, poolclass=StaticPool)
InMemoryBase = declarative_base(bind=engine)
class InMemoryConfig(InMemoryBase, ModelBase):
__tablename__ = 'configs_' + generate_uuid()
section = Column(String(128))
opt = Column(String(128))
value = Column(String(4000))
_table_args = (PrimaryKeyConstraint('section', 'opt', name='CONFIGS_PK'), )
InMemoryBase.metadata.create_all()
# Register the new table with the associated engine into the sqlalchemy sessionmaker
# In theory, this code must be protected by rucio.db.scla.session._LOCK, but this code will be executed
# during test case initialization, so there is no risk here to have concurrent calls from within the
# same process
current_engine = get_engine()
get_maker().configure(binds={BASE: current_engine, InMemoryBase: engine})
# Fill the table with the requested mock data
session = get_session()()
for section, option, value in (table_content or []):
InMemoryConfig(section=section, opt=option, value=value).save(flush=True, session=session)
session.commit()
with mock.patch('rucio.core.config.models.Config', new=InMemoryConfig):
yield
@pytest.fixture
def file_config_mock(request):
"""
Fixture which allows to have an isolated in-memory configuration file instance which
is not persisted after exiting the fixture.
This override works only in tests which use config calls directly, not in the ones working
via the API, as the server config is not changed.
"""
from unittest import mock
from rucio.common.config import Config, config_set, config_has_section, config_add_section
# Get the fixture parameters
overrides = []
params = __get_fixture_param(request)
if params:
overrides = params.get("overrides", overrides)
parser = Config().parser
with mock.patch('rucio.common.config.get_config', side_effect=lambda: parser):
for section, option, value in (overrides or []):
if not config_has_section(section):
config_add_section(section)
config_set(section, option, value)
yield
@pytest.fixture
def caches_mock(request):
"""
Fixture which overrides the different internal caches with in-memory ones for the duration
of a particular test.
This override works only in tests which use core function calls directly, not in the ones
working via API.
The fixture acts by by mock.patch the REGION object in the provided list of modules to mock.
"""
from unittest import mock
from contextlib import ExitStack
from dogpile.cache import make_region
caches_to_mock = []
params = __get_fixture_param(request)
if params:
caches_to_mock = params.get("caches_to_mock", caches_to_mock)
with ExitStack() as stack:
mocked_caches = []
for module in caches_to_mock:
region = make_region().configure('dogpile.cache.memory', expiration_time=600)
stack.enter_context(mock.patch(module, new=region))
mocked_caches.append(region)
yield mocked_caches
@pytest.fixture
def metrics_mock():
"""
Overrides the prometheus metric registry and allows to verify if the desired
prometheus metrics were correctly recorded.
"""
from unittest import mock
from prometheus_client import CollectorRegistry
with mock.patch('rucio.core.monitor.REGISTRY', new=CollectorRegistry()) as registry, mock.patch('rucio.core.monitor.COUNTERS', new={}):
yield registry
| 33.542328 | 139 | 0.713621 | [
"Apache-2.0"
] | R-16Bob/rucio | lib/rucio/tests/conftest.py | 12,679 | Python |
# -*- coding: utf-8 -*-
"""
Copyright © 2019-present Lenovo
This file is licensed under both the BSD-3 license for individual/non-commercial use and
EPL-1.0 license for commercial use. Full text of both licenses can be found in
COPYING.BSD and COPYING.EPL files.
"""
import time
from copy import deepcopy
from fnmatch import fnmatch
# Consts
ADMIN = 'ADMIN'
ADMINISTRATORNAME = 'ADMINISTRATORNAME'
COMMONNAME = 'COMMONNAME'
EMAIL = 'EMAIL'
GECOS = 'GECOS'
GIDNUMBER = 'GIDNUMBER'
GIVENNAME = 'GIVENNAME'
GROUP = 'GROUP'
GROUPNAME = 'GROUPNAME'
GROUPPASSWORD = 'GROUPPASSWORD'
HOMEDIRECTORY = 'HOMEDIRECTORY'
HOMEPHONE = 'HOMEPHONE'
LOGINSHELL = 'LOGINSHELL'
MEMBERNAME = 'MEMBERNAME'
PROMPT = 'PROMPT'
ROOMNUMBER = 'ROOMNUMBER'
SHADOWEXPIRE = 'SHADOWEXPIRE'
SHADOWFLAG = 'SHADOWFLAG'
SHADOWINACTIVE = 'SHADOWINACTIVE'
SHADOWLASTCHANGE = 'SHADOWLASTCHANGE'
SHADOWMAX = 'SHADOWMAX'
SHADOWMIN = 'SHADOWMIN'
SHADOWNAME = 'SHADOWNAME'
SHADOWPASSWORD = 'SHADOWPASSWORD'
SHADOWWARNING = 'SHADOWWARNING'
SN = 'SN'
TELEPHONENUMBER = 'TELEPHONENUMBER'
UIDNUMBER = 'UIDNUMBER'
USER = 'USER'
USERNAME = 'USERNAME'
USERPASSWORD = 'USERPASSWORD'
UT_NAMESIZE = 'UT_NAMESIZE'
VALUE_INVALID_ID = 'VALUE_INVALID_ID'
# a const
class DEFAULT_NO_KEY(object):
def __init__(self, *args, **karg):
pass
class MockEnt(object):
pk = 'AbstructAttrunite'
@classmethod
def init_subclass(cls):
for sub in cls.__subclasses__():
sub.objects = dict()
def __new__(cls, pk):
obj = cls.objects.get(pk, None)
if obj is None:
obj = super(MockEnt, cls).__new__(cls)
obj.pk = pk
return obj
def __init__(self, pk):
self.data = {self.__class__.pk: pk}
def __getattr__(self, key):
return getattr(self.data, key)
def __setitem__(self, key, val):
self.data[key] = val
def __getitem__(self, key):
return self.data[key]
def __repr__(self):
return '<{}:{}: {}>'.format(
self.__class__.__name__,
self.pk,
self.data
)
def __copy__(self):
return self
def __deepcopy__(self, pk):
return self
def __iter__(self):
return iter(self.data)
def save(self, overwrite=True):
default = deepcopy(self.default)
# if no pk
default[self.__class__.pk] = [self.pk]
# set default
for k, v in default.items():
if k not in self.data:
self.data[k] = v
# pre save callback
for k, v in self.presave.items():
if callable(v):
result = v(self, self.data.get(k, None))
if result is not None:
self.data[k] = result
# format
for k, v in self.data.items():
if not isinstance(v, list):
v = [v]
self.data[k] = v
if v[0] is DEFAULT_NO_KEY or isinstance(v[0], DEFAULT_NO_KEY):
del self.data[k]
# not overwrite and exists
if not overwrite and self.pk in self.__class__.objects:
msg = 'error creating a LDAP directory entry: Already exists'
raise RuntimeError(msg)
self.__class__.objects[self.pk] = self
class UserEnt(MockEnt):
pk = USERNAME
default = {
# USERNAME: None,
USERPASSWORD: '{CRYPT}!!',
UIDNUMBER: None,
GIDNUMBER: None,
GECOS: None,
# HOMEDIRECTORY: None,
LOGINSHELL: '/bin/bash/',
# SHADOWPASSWORD: None,
SHADOWLASTCHANGE: None,
SHADOWMIN: None,
SHADOWMAX: None,
SHADOWWARNING: None,
SHADOWINACTIVE: None,
SHADOWEXPIRE: None,
SHADOWFLAG: None,
COMMONNAME: None,
# SN: None,
# ROOMNUMBER: None,
# TELEPHONENUMBER: None,
# HOMEPHONE: None,
# EMAIL: None,
}
def s_USERNAME(self, value):
if not value:
return self.pk
def s_UIDNUMBER(self, value):
if not value:
return int(time.time() % 1 * 10000000)
def s_GIDNUMBER(self, value):
# XXX if create same name group ??
if not value:
return int(time.time() % 1 * 10000000)
def s_GIVENNAME(self, value):
# e[libuser.SN] required by inetOrgPerson schema, but not provided
if (GIVENNAME in self.data) and (SN not in self.data):
raise RuntimeError
def s_GECOS(self, value):
if isinstance(value, list):
value = value[0]
if value:
attrs = value.split(',') if (value and ',' in value) else []
attrs.extend([None] * (4 - len(attrs)))
common_name, building, phone_office, phone_home = attrs
if common_name is not None:
self.data[COMMONNAME] = common_name
else:
return None
def s_SHADOWPASSWORD(self, value):
self.data.pop(SHADOWPASSWORD, None)
def s_HOMEDIRECTORY(self, value):
# testUserAdd5
if value is None:
if self.pk[0] == '.':
raise RuntimeError
else:
return '/home/' + self.pk
presave = {
USERNAME: s_USERNAME,
UIDNUMBER: s_UIDNUMBER,
GIDNUMBER: s_GIDNUMBER,
GIVENNAME: s_GIVENNAME,
GECOS: s_GECOS,
SHADOWPASSWORD: s_SHADOWPASSWORD,
HOMEDIRECTORY: s_HOMEDIRECTORY
}
class GroupEnt(MockEnt):
pk = GROUPNAME
default = {
GIDNUMBER: None,
GROUPPASSWORD: DEFAULT_NO_KEY,
}
def s_GROUPNAME(self, value):
if not value:
return self.pk
def s_MEMBERNAME(self, value):
membername = self.data.get(MEMBERNAME, None)
if isinstance(membername, basestring):
membername = [membername, ]
return membername
def s_GIDNUMBER(self, value):
if not value:
return int(time.time() % 1 * 10000000)
def s_GROUPPASSWORD(self, value):
'''if set USERPASSWORD of group GROUPPASSWORD same as it
if not any value set, key should not exists
'''
if value in (None, DEFAULT_NO_KEY):
user_pwd = self.data.get(USERPASSWORD, None)
if user_pwd is not None:
return user_pwd
else:
return DEFAULT_NO_KEY
presave = {
GROUPNAME: s_GROUPNAME,
MEMBERNAME: s_MEMBERNAME,
GIDNUMBER: s_GIDNUMBER,
# USERPASSWORD: d_G,
GROUPPASSWORD: s_GROUPPASSWORD, # default no key
}
class MockLibuserAdmin(object):
pk = USERNAME
def __init__(self):
pass
def initUser(self, name):
return UserEnt(name)
def initGroup(self, name):
return GroupEnt(name)
def addUser(self, ent, create_home=True, create_mail_spool=True):
ent.save(overwrite=False)
return 1
def addGroup(self, ent):
ent.save(overwrite=False)
return 1
def setpassUser(self, ent, password, use_crypt=True):
return self._setpassEnt(USERPASSWORD, ent, password, use_crypt)
def setpassGroup(self, ent, password, use_crypt=True):
return self._setpassEnt(GROUPPASSWORD, ent, password, use_crypt)
def _setpassEnt(self, password_attr, ent, password, use_crypt):
if not use_crypt: # ...
pass
# password = crypt(password)
if password_attr not in ent:
ent[password_attr] = [None]
if len(ent[password_attr]) == 1:
ent[password_attr] = ['{CRYPT}' + password]
elif len(ent[password_attr]) == 2:
ent[password_attr][1] = '{CRYPT}' + password
else:
raise Exception('What ?')
ent.save()
def lookupUserByName(self, name):
for i in UserEnt.objects.values():
if i[USERNAME] == [name]:
deepcopy(i)
return deepcopy(i)
return None
def lookupGroupByName(self, name):
for i in GroupEnt.objects.values():
if i[GROUPNAME] == [name]:
return deepcopy(i)
return None
def enumerateUsersFull(self, name=None):
return [
deepcopy(v)
for k, v in UserEnt.objects.items()
if (name is None) or fnmatch(k, name)
]
def enumerateUsers(self, name=None):
return [
deepcopy(i)
for i in UserEnt.objects
if (name is None) or fnmatch(i, name)
]
def enumerateGroupsFull(self, name=None):
return [
deepcopy(v)
for k, v in GroupEnt.objects.items()
if (name is None) or fnmatch(k, name)
]
def enumerateGroups(self, name=None):
return [
deepcopy(i)
for i in GroupEnt.objects
if (name is None) or fnmatch(i, name)
]
def enumerateGroupsByUserFull(self, name):
user = self.lookupUserByName(name)
gid = user[GIDNUMBER]
return [
i
for i in GroupEnt.objects.values()
if (i[GIDNUMBER] == gid) or (name in i.get(MEMBERNAME, []))
]
def enumerateGroupsByUser(self, name):
return [i[GROUPNAME][0]
for i in self.enumerateGroupsByUserFull(name)]
def lookupUserById(self, id):
for i in UserEnt.objects.values():
if i[UIDNUMBER] == [id]:
return deepcopy(i)
return None
def lookupGroupById(self, id):
for i in GroupEnt.objects.values():
if i[GIDNUMBER] == [id]:
return deepcopy(i)
return None
def enumerateUsersByGroupFull(self, name):
group = self.lookupGroupByName(name)
gid = group[GIDNUMBER]
users_gid_match = [
deepcopy(i)
for i in UserEnt.objects.values()
if i[GIDNUMBER] == gid
]
users_member_match = [
self.lookupUserByName(i) for i in group.get(MEMBERNAME, [])
]
# remove repeated
users = {i.pk: i for i in (users_gid_match + users_member_match)}
return users.values()
def enumerateUsersByGroup(self, name):
return [i[USERNAME][0]
for i in self.enumerateUsersByGroupFull(name)]
def modifyUser(self, ent, renew_home=False):
self._modifyEnt(ent)
def modifyGroup(self, ent):
self._modifyEnt(ent)
def _modifyEnt(self, ent):
# new pk attr value != current pk value
old_pk = ent.pk
new_pk = ent[ent.__class__.pk][0]
if new_pk != old_pk:
ent.pk = new_pk
if new_pk in ent.objects: # other exists
raise RuntimeError
else:
del ent.objects[old_pk] # remove old
ent.save()
def deleteUser(self, ent, remove_hone=False, remove_mail_spool=False):
del UserEnt.objects[ent.pk]
def deleteGroup(self, ent):
if not hasattr(ent, 'pk'):
return True
del GroupEnt.objects[ent.pk]
return True
def removepassUser(self, ent):
self._removepassEnt(USERPASSWORD, ent)
def removepassGroup(self, ent):
self._removepassEnt(GROUPPASSWORD, ent)
def _removepassEnt(self, password_attr, ent):
if len(ent[password_attr]) == 1:
ent[password_attr] = '{CRYPT}'
elif len(ent[password_attr]) == 2:
if '{CRYPT}' in ent[password_attr][1]:
ent[password_attr][1] = '{CRYPT}'
else:
ent[password_attr] = ['{CRYPT}']
else:
raise Exception('What ?')
ent[SHADOWLASTCHANGE] = 10000 + 1 # testUserRemovepass1
ent.save()
def lockUser(self, ent):
return self._lockEnt(USERPASSWORD, ent)
def lockGroup(self, ent):
return self._lockEnt(GROUPPASSWORD, ent)
def _lockEnt(self, password_attr, ent):
password = ent[password_attr][0]
if '{CRYPT}' not in password:
raise RuntimeError
password = password.replace('{CRYPT}!', '{CRYPT}')
password = password.replace('{CRYPT}', '{CRYPT}!')
ent[password_attr] = password
ent.save()
def unlockUser(self, ent, empty_passwrod=False):
return self._unlockEnt(USERPASSWORD, ent, empty_passwrod)
def unlockGroup(self, ent, empty_passwrod=False):
return self._unlockEnt(GROUPPASSWORD, ent, empty_passwrod)
def _unlockEnt(self, password_attr, ent, empty_passwrod=False):
password = ent[password_attr][0]
if '{CRYPT}' not in password:
raise RuntimeError
if empty_passwrod:
if password == '{CRYPT}!':
raise RuntimeError
password = password.replace('{CRYPT}!', '{CRYPT}')
else:
password = password.replace('{CRYPT}!', '{CRYPT}')
ent[password_attr] = password
ent.save()
def userIsLocked(self, ent):
password = ent[USERPASSWORD][0]
return '{CRYPT}!' in password
def groupIsLocked(self, ent):
password = ent[GROUPPASSWORD][0]
return '{CRYPT}!' in password
MockEnt.init_subclass()
mock_admin = MockLibuserAdmin()
def admin(prompt=None):
return mock_admin
| 28.385928 | 88 | 0.580335 | [
"EPL-1.0",
"BSD-3-Clause"
] | CarrotXin/Antilles | antilles-core/openHPC_web_project/tests/user/mock_libuser.py | 13,314 | Python |
# Generated by Django 1.11.24 on 2019-10-16 22:48
from typing import Any, Set, Union
import ujson
from django.conf import settings
from django.contrib.auth.hashers import check_password, make_password
from django.db import migrations
from django.db.backends.postgresql.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from django.utils.timezone import now as timezone_now
from zerver.lib.cache import cache_delete, user_profile_by_api_key_cache_key
from zerver.lib.queue import queue_json_publish
from zerver.lib.utils import generate_api_key
def ensure_no_empty_passwords(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
"""With CVE-2019-18933, it was possible for certain users created
using social login (e.g. Google/GitHub auth) to have the empty
string as their password in the Zulip database, rather than
Django's "unusable password" (i.e. no password at all). This was a
serious security issue for organizations with both password and
Google/GitHub authentication enabled.
Combined with the code changes to prevent new users from entering
this buggy state, this migration sets the intended "no password"
state for any users who are in this buggy state, as had been
intended.
While this bug was discovered by our own development team and we
believe it hasn't been exploited in the wild, out of an abundance
of caution, this migration also resets the personal API keys for
all users where Zulip's database-level logging cannot **prove**
that user's current personal API key was never accessed using this
bug.
There are a few ways this can be proven: (1) the user's password
has never been changed and is not the empty string,
or (2) the user's personal API key has changed since that user last
changed their password (which is not ''). Both constitute proof
because this bug cannot be used to gain the access required to change
or reset a user's password.
Resetting those API keys has the effect of logging many users out
of the Zulip mobile and terminal apps unnecessarily (e.g. because
the user changed their password at any point in the past, even
though the user never was affected by the bug), but we're
comfortable with that cost for ensuring that this bug is
completely fixed.
To avoid this inconvenience for self-hosted servers which don't
even have EmailAuthBackend enabled, we skip resetting any API keys
if the server doesn't have EmailAuthBackend configured.
"""
UserProfile = apps.get_model('zerver', 'UserProfile')
RealmAuditLog = apps.get_model('zerver', 'RealmAuditLog')
# Because we're backporting this migration to the Zulip 2.0.x
# series, we've given it migration number 0209, which is a
# duplicate with an existing migration already merged into Zulip
# master. Migration 0247_realmauditlog_event_type_to_int.py
# changes the format of RealmAuditLog.event_type, so we need the
# following conditional block to determine what values to use when
# searching for the relevant events in that log.
event_type_class = RealmAuditLog._meta.get_field('event_type').get_internal_type()
if event_type_class == 'CharField':
USER_PASSWORD_CHANGED: Union[int, str] = 'user_password_changed'
USER_API_KEY_CHANGED: Union[int, str] = 'user_api_key_changed'
else:
USER_PASSWORD_CHANGED = 122
USER_API_KEY_CHANGED = 127
# First, we do some bulk queries to collect data we'll find useful
# in the loop over all users below.
# Users who changed their password at any time since account
# creation. These users could theoretically have started with an
# empty password, but set a password later via the password reset
# flow. If their API key has changed since they changed their
# password, we can prove their current API key cannot have been
# exposed; we store those users in
# password_change_user_ids_no_reset_needed.
password_change_user_ids = set(RealmAuditLog.objects.filter(
event_type=USER_PASSWORD_CHANGED).values_list("modified_user_id", flat=True))
password_change_user_ids_api_key_reset_needed: Set[int] = set()
password_change_user_ids_no_reset_needed: Set[int] = set()
for user_id in password_change_user_ids:
# Here, we check the timing for users who have changed
# their password.
# We check if the user changed their API key since their first password change.
query = RealmAuditLog.objects.filter(
modified_user=user_id, event_type__in=[USER_PASSWORD_CHANGED,
USER_API_KEY_CHANGED]
).order_by("event_time")
earliest_password_change = query.filter(event_type=USER_PASSWORD_CHANGED).first()
# Since these users are in password_change_user_ids, this must not be None.
assert earliest_password_change is not None
latest_api_key_change = query.filter(event_type=USER_API_KEY_CHANGED).last()
if latest_api_key_change is None:
# This user has never changed their API key. As a
# result, even though it's very likely this user never
# had an empty password, they have changed their
# password, and we have no record of the password's
# original hash, so we can't prove the user's API key
# was never affected. We schedule this user's API key
# to be reset.
password_change_user_ids_api_key_reset_needed.add(user_id)
elif earliest_password_change.event_time <= latest_api_key_change.event_time:
# This user has changed their password before
# generating their current personal API key, so we can
# prove their current personal API key could not have
# been exposed by this bug.
password_change_user_ids_no_reset_needed.add(user_id)
else:
password_change_user_ids_api_key_reset_needed.add(user_id)
if password_change_user_ids_no_reset_needed and settings.PRODUCTION:
# We record in this log file users whose current API key was
# generated after a real password was set, so there's no need
# to reset their API key, but because they've changed their
# password, we don't know whether or not they originally had a
# buggy password.
#
# In theory, this list can be recalculated using the above
# algorithm modified to only look at events before the time
# this migration was installed, but it's helpful to log it as well.
with open("/var/log/zulip/0209_password_migration.log", "w") as log_file:
line = "No reset needed, but changed password: {}\n"
log_file.write(line.format(password_change_user_ids_no_reset_needed))
AFFECTED_USER_TYPE_EMPTY_PASSWORD = 'empty_password'
AFFECTED_USER_TYPE_CHANGED_PASSWORD = 'changed_password'
MIGRATION_ID = '0209_user_profile_no_empty_password'
def write_realm_audit_log_entry(user_profile: Any,
event_time: Any, event_type: Any,
affected_user_type: str) -> None:
RealmAuditLog.objects.create(
realm=user_profile.realm,
modified_user=user_profile,
event_type=event_type,
event_time=event_time,
extra_data=ujson.dumps({
'migration_id': MIGRATION_ID,
'affected_user_type': affected_user_type,
})
)
# If Zulip's built-in password authentication is not enabled on
# the server level, then we plan to skip resetting any users' API
# keys, since the bug requires EmailAuthBackend.
email_auth_enabled = 'zproject.backends.EmailAuthBackend' in settings.AUTHENTICATION_BACKENDS
# A quick note: This query could in theory exclude users with
# is_active=False, is_bot=True, or realm__deactivated=True here to
# accessing only active human users in non-deactivated realms.
# But it's better to just be thorough; users can be reactivated,
# and e.g. a server admin could manually edit the database to
# change a bot into a human user if they really wanted to. And
# there's essentially no harm in rewriting state for a deactivated
# account.
for user_profile in UserProfile.objects.all():
event_time = timezone_now()
if check_password('', user_profile.password):
# This user currently has the empty string as their password.
# Change their password and record that we did so.
user_profile.password = make_password(None)
update_fields = ["password"]
write_realm_audit_log_entry(user_profile, event_time,
USER_PASSWORD_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
if email_auth_enabled and not user_profile.is_bot:
# As explained above, if the built-in password authentication
# is enabled, reset the API keys. We can skip bot accounts here,
# because the `password` attribute on a bot user is useless.
reset_user_api_key(user_profile)
update_fields.append("api_key")
event_time = timezone_now()
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_EMPTY_PASSWORD)
user_profile.save(update_fields=update_fields)
continue
elif email_auth_enabled and \
user_profile.id in password_change_user_ids_api_key_reset_needed:
# For these users, we just need to reset the API key.
reset_user_api_key(user_profile)
user_profile.save(update_fields=["api_key"])
write_realm_audit_log_entry(user_profile, event_time,
USER_API_KEY_CHANGED,
AFFECTED_USER_TYPE_CHANGED_PASSWORD)
def reset_user_api_key(user_profile: Any) -> None:
old_api_key = user_profile.api_key
user_profile.api_key = generate_api_key()
cache_delete(user_profile_by_api_key_cache_key(old_api_key))
# Like with any API key change, we need to clear any server-side
# state for sending push notifications to mobile app clients that
# could have been registered with the old API key. Fortunately,
# we can just write to the queue processor that handles sending
# those notices to the push notifications bouncer service.
event = {'type': 'clear_push_device_tokens',
'user_profile_id': user_profile.id}
queue_json_publish("deferred_work", event)
class Migration(migrations.Migration):
atomic = False
dependencies = [
('zerver', '0208_add_realm_night_logo_fields'),
]
operations = [
migrations.RunPython(ensure_no_empty_passwords,
reverse_code=migrations.RunPython.noop),
]
| 48.73913 | 97 | 0.692507 | [
"Apache-2.0"
] | Bpapman/zulip | zerver/migrations/0209_user_profile_no_empty_password.py | 11,210 | Python |
from pyspark import SparkContext, SparkConf
def isNotHeader(line: str):
return not (line.startswith("host") and "bytes" in line)
if __name__ == "__main__":
conf = SparkConf().setAppName("unionLogs").setMaster("local[*]")
sc = SparkContext(conf = conf)
julyFirstLogs = sc.textFile("in/nasa_19950701.tsv")
augustFirstLogs = sc.textFile("in/nasa_19950801.tsv")
aggregatedLogLines = julyFirstLogs.union(augustFirstLogs)
cleanLogLines = aggregatedLogLines.filter(isNotHeader)
sample = cleanLogLines.sample(withReplacement = True, fraction = 0.1)
sample.saveAsTextFile("out/sample_nasa_logs.csv")
| 31.7 | 73 | 0.733438 | [
"MIT"
] | shubozhang/pyspark-tutorial | rdd/nasaApacheWebLogs/UnionLogSolutions.py | 634 | Python |
from flask import g
import logging
from datetime import datetime
import config
def get_logger(name):
# type: (str) -> logging.Logger
logging.basicConfig()
logger = logging.getLogger(name)
logger.setLevel(config.GLOBAL_LOGGING_LEVEL)
ch = logging.StreamHandler()
ch.setLevel(config.GLOBAL_LOGGING_LEVEL)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
# logger.addHandler(ch)
return logger
logger = get_logger('util')
def get_db_client(conn_pool, *args, **kws):
logger.debug("Getting DB Connection")
if 'db' not in g:
logger.debug("Creating new DB connection")
g.db = conn_pool.get()
return g.db
def teardown_db(conn_pool):
db = g.pop('db', None)
if db is not None:
conn_pool.put(db) | 26.4 | 87 | 0.715909 | [
"Apache-2.0"
] | Bhaskers-Blu-Org1/long-way-home-callforcode | app_util.py | 792 | Python |
# coding: utf-8
"""
TheTVDB API v2
API v3 targets v2 functionality with a few minor additions. The API is accessible via https://api.thetvdb.com and provides the following REST endpoints in JSON format. How to use this API documentation ---------------- You may browse the API routes without authentication, but if you wish to send requests to the API and see response data, then you must authenticate. 1. Obtain a JWT token by `POST`ing to the `/login` route in the `Authentication` section with your API key and credentials. 1. Paste the JWT token from the response into the \"JWT Token\" field at the top of the page and click the 'Add Token' button. You will now be able to use the remaining routes to send requests to the API and get a response. Language Selection ---------------- Language selection is done via the `Accept-Language` header. At the moment, you may only pass one language abbreviation in the header at a time. Valid language abbreviations can be found at the `/languages` route.. Authentication ---------------- Authentication to use the API is similar to the How-to section above. Users must `POST` to the `/login` route with their API key and credentials in the following format in order to obtain a JWT token. `{\"apikey\":\"APIKEY\",\"username\":\"USERNAME\",\"userkey\":\"USERKEY\"}` Note that the username and key are ONLY required for the `/user` routes. The user's key is labled `Account Identifier` in the account section of the main site. The token is then used in all subsequent requests by providing it in the `Authorization` header. The header will look like: `Authorization: Bearer <yourJWTtoken>`. Currently, the token expires after 24 hours. You can `GET` the `/refresh_token` route to extend that expiration date. Versioning ---------------- You may request a different version of the API by including an `Accept` header in your request with the following format: `Accept:application/vnd.thetvdb.v$VERSION`. This documentation automatically uses the version seen at the top and bottom of the page. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Movie(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'artworks': 'list[MovieArtwork]',
'genres': 'list[MovieGenre]',
'id': 'int',
'people': 'MoviePeople',
'release_dates': 'list[MovieReleaseDate]',
'remoteids': 'list[MovieRemoteId]',
'runtime': 'int',
'trailers': 'list[MovieTrailer]',
'translations': 'list[MovieTranslation]',
'url': 'str'
}
attribute_map = {
'artworks': 'artworks',
'genres': 'genres',
'id': 'id',
'people': 'people',
'release_dates': 'release_dates',
'remoteids': 'remoteids',
'runtime': 'runtime',
'trailers': 'trailers',
'translations': 'translations',
'url': 'url'
}
def __init__(self, artworks=None, genres=None, id=None, people=None, release_dates=None, remoteids=None, runtime=None, trailers=None, translations=None, url=None): # noqa: E501
"""Movie - a model defined in Swagger""" # noqa: E501
self._artworks = None
self._genres = None
self._id = None
self._people = None
self._release_dates = None
self._remoteids = None
self._runtime = None
self._trailers = None
self._translations = None
self._url = None
self.discriminator = None
if artworks is not None:
self.artworks = artworks
if genres is not None:
self.genres = genres
if id is not None:
self.id = id
if people is not None:
self.people = people
if release_dates is not None:
self.release_dates = release_dates
if remoteids is not None:
self.remoteids = remoteids
if runtime is not None:
self.runtime = runtime
if trailers is not None:
self.trailers = trailers
if translations is not None:
self.translations = translations
if url is not None:
self.url = url
@property
def artworks(self):
"""Gets the artworks of this Movie. # noqa: E501
:return: The artworks of this Movie. # noqa: E501
:rtype: list[MovieArtwork]
"""
return self._artworks
@artworks.setter
def artworks(self, artworks):
"""Sets the artworks of this Movie.
:param artworks: The artworks of this Movie. # noqa: E501
:type: list[MovieArtwork]
"""
self._artworks = artworks
@property
def genres(self):
"""Gets the genres of this Movie. # noqa: E501
:return: The genres of this Movie. # noqa: E501
:rtype: list[MovieGenre]
"""
return self._genres
@genres.setter
def genres(self, genres):
"""Sets the genres of this Movie.
:param genres: The genres of this Movie. # noqa: E501
:type: list[MovieGenre]
"""
self._genres = genres
@property
def id(self):
"""Gets the id of this Movie. # noqa: E501
:return: The id of this Movie. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Movie.
:param id: The id of this Movie. # noqa: E501
:type: int
"""
self._id = id
@property
def people(self):
"""Gets the people of this Movie. # noqa: E501
:return: The people of this Movie. # noqa: E501
:rtype: MoviePeople
"""
return self._people
@people.setter
def people(self, people):
"""Sets the people of this Movie.
:param people: The people of this Movie. # noqa: E501
:type: MoviePeople
"""
self._people = people
@property
def release_dates(self):
"""Gets the release_dates of this Movie. # noqa: E501
:return: The release_dates of this Movie. # noqa: E501
:rtype: list[MovieReleaseDate]
"""
return self._release_dates
@release_dates.setter
def release_dates(self, release_dates):
"""Sets the release_dates of this Movie.
:param release_dates: The release_dates of this Movie. # noqa: E501
:type: list[MovieReleaseDate]
"""
self._release_dates = release_dates
@property
def remoteids(self):
"""Gets the remoteids of this Movie. # noqa: E501
:return: The remoteids of this Movie. # noqa: E501
:rtype: list[MovieRemoteId]
"""
return self._remoteids
@remoteids.setter
def remoteids(self, remoteids):
"""Sets the remoteids of this Movie.
:param remoteids: The remoteids of this Movie. # noqa: E501
:type: list[MovieRemoteId]
"""
self._remoteids = remoteids
@property
def runtime(self):
"""Gets the runtime of this Movie. # noqa: E501
:return: The runtime of this Movie. # noqa: E501
:rtype: int
"""
return self._runtime
@runtime.setter
def runtime(self, runtime):
"""Sets the runtime of this Movie.
:param runtime: The runtime of this Movie. # noqa: E501
:type: int
"""
self._runtime = runtime
@property
def trailers(self):
"""Gets the trailers of this Movie. # noqa: E501
:return: The trailers of this Movie. # noqa: E501
:rtype: list[MovieTrailer]
"""
return self._trailers
@trailers.setter
def trailers(self, trailers):
"""Sets the trailers of this Movie.
:param trailers: The trailers of this Movie. # noqa: E501
:type: list[MovieTrailer]
"""
self._trailers = trailers
@property
def translations(self):
"""Gets the translations of this Movie. # noqa: E501
:return: The translations of this Movie. # noqa: E501
:rtype: list[MovieTranslation]
"""
return self._translations
@translations.setter
def translations(self, translations):
"""Sets the translations of this Movie.
:param translations: The translations of this Movie. # noqa: E501
:type: list[MovieTranslation]
"""
self._translations = translations
@property
def url(self):
"""Gets the url of this Movie. # noqa: E501
:return: The url of this Movie. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Movie.
:param url: The url of this Movie. # noqa: E501
:type: str
"""
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Movie, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Movie):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.928571 | 2,040 | 0.591132 | [
"MIT"
] | h3llrais3r/tvdb_api | tvdb_api/models/movie.py | 10,825 | Python |
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.
Source:
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -24 deg 24 deg
3 Pole Velocity At Tip -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees
Cart Position is more than 2.4 (center of the cart reaches the edge of the display)
Episode length is greater than 200
Solved Requirements
Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second' : 50
}
def __init__(self, force=10.0, length=0.5, mass=0.1):
self.gravity = 9.8
self.masscart = 1.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
if isinstance(force, list):
self.force_mag_set = force
else:
self.force_mag_set = None
self.force_mag = force
if isinstance(length, list):
self.length_set = length
else:
self.length_set = None
self.length = length
if isinstance(mass, list):
self.masspole_set = mass
else:
self.masspole_set = None
self.masspole = mass
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
self.steps_in_episode = 0
def _sample_parameter(self):
if self.force_mag_set is not None:
set_index = self.np_random.randint(len(self.force_mag_set))
self.force_mag = self.np_random.uniform(
self.force_mag_set[set_index][0],
self.force_mag_set[set_index][1])
if self.length_set is not None:
set_index = self.np_random.randint(len(self.length_set))
self.length = self.np_random.uniform(self.length_set[set_index][0],
self.length_set[set_index][1])
if self.masspole_set is not None:
set_index = self.np_random.randint(len(self.masspole_set))
self.masspole = self.np_random.uniform(
self.masspole_set[set_index][0],
self.masspole_set[set_index][1])
self.polemass_length = (self.masspole * self.length)
self.total_mass = (self.masspole + self.masscart)
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
state = self.state
x, x_dot, theta, theta_dot = state
force = self.force_mag * action[0]
costheta = math.cos(theta)
sintheta = math.sin(theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta* temp) / (self.length * (4.0/3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = (x,x_dot,theta,theta_dot)
done = x < -self.x_threshold \
or x > self.x_threshold \
or theta < -self.theta_threshold_radians \
or theta > self.theta_threshold_radians
done = bool(done)
self.steps_in_episode += 1
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
info = {}
info['success'] = self.steps_in_episode >= 195
return np.array(self.state), reward, done, info
def reset(self):
self._sample_parameter()
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
self.steps_in_episode = 0
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold*2
scale = screen_width/world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l,r,t,b = -cartwidth/2, cartwidth/2, cartheight/2, -cartheight/2
axleoffset =cartheight/4.0
cart = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole = rendering.FilledPolygon([(l,b), (l,t), (r,t), (r,b)])
pole.set_color(.8,.6,.4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth/2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5,.5,.8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0,carty), (screen_width,carty))
self.track.set_color(0,0,0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None: return None
# Edit the pole polygon vertex
pole = self._pole_geom
l,r,t,b = -polewidth/2,polewidth/2,polelen-polewidth/2,-polewidth/2
pole.v = [(l,b), (l,t), (r,t), (r,b)]
x = self.state
cartx = x[0]*scale+screen_width/2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array = mode=='rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
| 39.72973 | 245 | 0.593424 | [
"MIT"
] | takuseno/configurable-control-gym | configurable_control_gym/envs/cartpole.py | 8,820 | Python |
from odoo import models, api
class AccountUnreconcile(models.TransientModel):
_name = "account.unreconcile"
_description = "Account Unreconcile"
def trans_unrec(self):
context = dict(self._context or {})
if context.get('active_ids', False):
self.env['account.move.line'].browse(context.get('active_ids')).remove_move_reconcile()
return {'type': 'ir.actions.act_window_close'}
| 32.846154 | 99 | 0.686183 | [
"MIT"
] | LucasBorges-Santos/docker-odoo | odoo/base-addons/account/wizard/account_unreconcile.py | 427 | Python |
# Copyright (c) 2015 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
with open(
os.path.join(
os.path.dirname(__file__), 'VERSION')) as version_file:
version = version_file.read().strip()
__version__ = version
| 40.605263 | 71 | 0.755023 | [
"BSD-2-Clause"
] | 1M15M3/yubikey-manager | ykman/__init__.py | 1,543 | Python |
#
# Spec2Vec
#
# Copyright 2019 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numba
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy import spatial
# Add multi core parallelization
from concurrent.futures import ThreadPoolExecutor #, as_completed
# TODO better use joblib ? or dask?
def mol_sim_matrix(fingerprints1,
fingerprints2,
method='cosine',
filename=None,
max_size=1000,
print_progress=True):
"""Create Matrix of all molecular similarities (based on molecular fingerprints).
If filename is not None, the result will be saved as npy.
To create molecular fingerprints see mol_fingerprints() function from MS_functions.
Args:
----
fingerprints1: list
List of molecular fingerprints (numpy arrays).
fingerprints2: list
List of molecular fingerprints (numpy arrays).
method: str
Method to compare molecular fingerprints. Can be 'cosine', 'dice' etc.
(see scipy.spatial.distance.cdist).
filename: str
Filename to save results to. OR: If file already exists it will be
loaded instead.
max_size: int
Maximum size of (sub) all-vs-all matrix to handle in one go. Will split
up larger matrices into
max_size x max_size matrices.
print_progress: bool, optional
If True, print phase of the run to indicate progress. Default = True.
"""
if filename is not None:
try:
molecular_similarities = np.load(filename)
print("Molecular similarity scores found and loaded.")
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename)
print("Molecular scores will be calculated from scratch.")
collect_new_data = True
else:
collect_new_data = True
if collect_new_data:
# Create array of all finterprints
fingerprints_arr1 = np.array(fingerprints1)
fingerprints_arr2 = np.array(fingerprints2)
# Calculate all-vs-all similarity matrix (similarity here= 1-distance )
matrix_size = (fingerprints_arr1.shape[0], fingerprints_arr2.shape[0])
molecular_similarities = np.zeros(matrix_size)
# Split large matrices up into smaller ones to track progress
splits = int(np.ceil(matrix_size[0]/max_size) * np.ceil(matrix_size[1]/max_size))
count_splits = 0
for i in range(int(np.ceil(matrix_size[0]/max_size))):
low1 = i * max_size
high1 = min((i + 1) * max_size, matrix_size[0])
for j in range(int(np.ceil(matrix_size[1]/max_size))):
low2 = j * max_size
high2 = min((j + 1) * max_size, matrix_size[1])
molecular_similarities[low1:high1, low2:high2] = 1 - spatial.distance.cdist(
fingerprints_arr1[low1:high1],
fingerprints_arr2[low2:high2],
method
)
# Track progress:
count_splits += 1
if print_progress:
print('\r',
"Calculated submatrix {} out of {}".format(count_splits, splits),
end="")
if print_progress:
print(20 * '--')
print("Succesfully calculated matrix with all-vs-all molecular similarity values.")
if filename is not None:
np.save(filename, molecular_similarities)
print("Matrix was saved under:", filename)
return molecular_similarities
# --------------------------------------------------------------------------------------------------
# ---------------------------- classical spectra similarity measures -------------------------------
# --------------------------------------------------------------------------------------------------
def cosine_score_greedy(spec1,
spec2,
mass_shift,
tol,
min_intens=0,
use_numba=True):
"""Calculate cosine score between spectrum1 and spectrum2.
If mass_shifted = True it will shift the spectra with respect to each other
by difference in their parentmasses.
Args:
----
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
"""
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
# normalize intensities:
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
# filter, if wanted:
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
if use_numba:
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
else:
zero_pairs = find_pairs(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
if use_numba:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
else:
nonzero_pairs = find_pairs(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
used1 = set()
used2 = set()
score = 0.0
used_matches = []
for m in matching_pairs:
if not m[0] in used1 and not m[1] in used2:
score += m[2]
used1.add(m[0])
used2.add(m[1])
used_matches.append(m)
# Normalize score:
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
return score, used_matches
def cosine_score_hungarian(spec1,
spec2,
mass_shift,
tol,
min_intens=0):
"""Taking full care of weighted bipartite matching problem.
Use Hungarian algorithm (slow...)
Args:
--------
spec1: Spectrum peaks and intensities as numpy array.
spec2: Spectrum peaks and intensities as numpy array.
mass_shift: float
Difference in parent mass of both spectra to account for. Set to 'None'
when no shifting is desired --> back to normal cosine score.
tol: float
Tolerance value to define how far two peaks can be apart to still count as match.
min_intens: float
Minimum intensity (relative to max.intensity peak in spectrum). Peaks with lower
intensity will be ignored --> higher min_intens is faster, but less precise.
"""
if spec1.shape[0] == 0 or spec2.shape[0] == 0:
return 0.0, []
# Normalize intensities:
spec1[:, 1] = spec1[:, 1]/max(spec1[:, 1])
spec2[:, 1] = spec2[:, 1]/max(spec2[:, 1])
# Filter, if wanted:
spec1 = spec1[spec1[:, 1] > min_intens, :]
spec2 = spec2[spec2[:, 1] > min_intens, :]
zero_pairs = find_pairs_numba(spec1, spec2, tol, shift=0.0)
if mass_shift is not None \
and mass_shift != 0.0:
nonzero_pairs = find_pairs_numba(spec1, spec2, tol, shift=mass_shift)
matching_pairs = zero_pairs + nonzero_pairs
else:
matching_pairs = zero_pairs
matching_pairs = sorted(matching_pairs, key=lambda x: x[2], reverse=True)
# Use Hungarian_algorithm:
used_matches = []
list1 = list(set([x[0] for x in matching_pairs]))
list2 = list(set([x[1] for x in matching_pairs]))
matrix_size = (len(list1), len(list2))
matrix = np.ones(matrix_size)
if len(matching_pairs) > 0:
for m in matching_pairs:
matrix[list1.index(m[0]), list2.index(m[1])] = 1 - m[2]
# Use hungarian agorithm to solve the linear sum assignment problem
row_ind, col_ind = linear_sum_assignment(matrix)
score = len(row_ind) - matrix[row_ind, col_ind].sum()
used_matches = [(list1[x], list2[y]) for (x, y) in zip(row_ind, col_ind)]
# Normalize score:
score = score/max(np.sum(spec1[:, 1]**2), np.sum(spec2[:, 1]**2))
else:
score = 0.0
return score, used_matches
def cosine_matrix_fast(spectra,
tol,
max_mz,
min_mz=0):
"""Calculates cosine similarity matrix.
Be careful! Binning is here done by creating one-hot vectors.
It is hence really actual "bining" and different from the tolerance-based
approach used for the cosine_matrix or molnet_matrix!
Also: tol here is about tol/2 when compared to cosine_matrix or molnet_matrix...
"""
for i, spectrum in enumerate(spectra):
spec = np.array(spectrum.peaks.copy(), dtype=float)
# Normalize intensities:
spec[:, 1] = spec[:, 1]/np.max(spec[:, 1])
if i == 0:
vector = one_hot_spectrum(spec, tol, max_mz, shift=0, min_mz=min_mz, method='max')
spec_vectors = np.zeros((len(spectra), vector.shape[0]))
spec_vectors[0, :] = vector
else:
spec_vectors[i, :] = one_hot_spectrum(spec, tol,
max_mz, shift=0,
min_mz=min_mz,
method='max')
Cdist = spatial.distance.cdist(spec_vectors, spec_vectors, 'cosine')
return 1 - Cdist
def cosine_score_matrix(spectra,
tol,
max_mz=1000.0,
# min_mz=0,
min_intens=0,
mass_shifting=False,
method='hungarian',
num_workers=4,
filename=None,
safety_points=None):
"""Create Matrix of all modified cosine similarities.
Takes some time to calculate, so better only do it once and save as npy.
Now implemented: parallelization of code using concurrent.futures and numba options.
spectra: list
List of spectra (of Spectrum class)
tol: float
Tolerance to still count peaks a match (mz +- tolerance).
max_mz: float
Maxium m-z mass to take into account
#min_mz: float
# Minimum m-z mass to take into account
min_intens: float
Sets the minimum relative intensity peaks must have to be looked at for
potential matches.
mass_shifting: bool
Set to 'True' if mass difference between spectra should be accounted for
--> "modified cosine" score
Set to 'False' for --> "normal cosine" score
method: 'greedy', 'greedy-numba', 'hungarian'
"greedy" will use Simon's molnet scoring which is faster than hungarian,
but not 100% accurate
regarding the weighted bipartite matching problem.
"hungarian" will use the Hungarian algorithm, which is more accurate.
Since its slower, numba is used here to compile in time.
"greedy-numba" will use a (partly) numba compiled version of greedy.
Much faster, but needs numba.
num_workers: int
Number of threads to use for calculation.
filename: str/ None
Filename to look for existing npy-file with molent matrix. Or, if not
found, to use to save the newly calculated matrix.
safety_points: int
Number of safety points, i.e. number of times the modcos-matrix is saved
during process. Set to 'None' to avoid saving matrix on the way.
"""
if filename is not None:
if filename[-4:] != '.npy':
filename = filename + '.npy'
# Try loading saved data
try:
print("Loading similarity scores from", filename)
modcos_sim = np.load(filename)
print("Loading min_match values from", filename[:-4]+ "_matches.npy")
modcos_matches = np.load(filename[:-4] + "_matches.npy")
# Check if matrix was calculated to the end:
diagonal = modcos_sim.diagonal()
if np.min(diagonal) == 0:
print("Uncomplete cosine similarity scores found and loaded.")
missing_scores = np.where(diagonal == 0)[0].astype(int)
print("Missing cosine scores will be calculated.")
counter_total = int((len(spectra)**2)/2)
counter_init = counter_total - np.sum(len(spectra) - missing_scores)
print("About ", 100*(counter_init/counter_total),
"% of the values already completed.")
collect_new_data = True
else:
print("Complete cosine similarity scores found and loaded.")
missing_scores = []
counter_init = 0
collect_new_data = False
except FileNotFoundError:
print("Could not find file ", filename, "or file",
filename[:-4] + "_matches.npy")
if mass_shifting:
print("Modified cosine scores will be calculated from scratch.")
else:
print("Cosine scores will be calculated from scratch.")
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
else:
collect_new_data = True
missing_scores = np.arange(0, len(spectra))
counter_init = 0
if collect_new_data:
if counter_init == 0:
modcos_sim = np.zeros((len(spectra), len(spectra)))
modcos_matches = np.zeros((len(spectra), len(spectra)))
counter = counter_init
if safety_points is not None:
# Save modcos-matrix along process
safety_save = int(((len(spectra)**2)/2)/safety_points)
print("Calculate pairwise scores by", num_workers, "number of workers.")
for i in missing_scores: #range(n_start, len(spectra)):
spec1 = np.array(spectra[i].peaks, dtype=float)
spec1 = spec1[spec1[:, 0] < max_mz, :]
parameter_collection = []
for j in range(i, len(spectra)):
spec2 = np.array(spectra[j].peaks, dtype=float)
spec2 = spec2[spec2[:, 0] < max_mz, :]
if mass_shifting:
mass_shift = spectra[i].parent_mz - spectra[j].parent_mz
else:
mass_shift = None
parameter_collection.append([spec1, spec2, i, j,
mass_shift, tol, min_intens,
method, counter])
counter += 1
# Create a pool of processes. For instance one for each CPU in your machine.
modcos_pairs = []
with ThreadPoolExecutor(max_workers=num_workers) as executor:
futures = [executor.submit(modcos_pair, X, len(spectra)) for X in parameter_collection]
modcos_pairs.append(futures)
for m, future in enumerate(modcos_pairs[0]):
_, _, ind_i, ind_j, _, _, _, _, counting = parameter_collection[m]
modcos_sim[ind_i, ind_j] = future.result()[0]
modcos_matches[ind_i, ind_j] = future.result()[1]
if filename is not None \
and safety_points is not None:
if (counting+1) % safety_save == 0:
np.save(filename, modcos_sim)
np.save(filename[:-4] + "_matches.npy", modcos_matches)
# Symmetric matrix --> fill
for i in range(1, len(spectra)):
for j in range(i):
modcos_sim[i, j] = modcos_sim[j, i]
modcos_matches[i, j] = modcos_matches[j, i]
# Save final results
if filename is not None:
np.save(filename, modcos_sim)
np.save(filename[:-4]+ "_matches.npy", modcos_matches)
return modcos_sim, modcos_matches
def modcos_pair(X, len_spectra):
"""Single molnet pair calculation
"""
spectra_i, spectra_j, i, j, mass_shift, tol, min_intens, method, counter = X
if method == 'greedy':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=False)
elif method == 'greedy-numba':
molnet_pair, used_matches = cosine_score_greedy(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens,
use_numba=True)
elif method == 'hungarian':
molnet_pair, used_matches = cosine_score_hungarian(spectra_i, spectra_j,
mass_shift, tol,
min_intens=min_intens)
else:
print("Given method does not exist...")
if (counter+1) % 1000 == 0 or counter == len_spectra-1:
print('\r',
' Calculated MolNet for pair {} -- {}'.format(i, j),
'. ( ', np.round(200*(counter+1)/len_spectra**2, 2), ' % done).',
end="")
return molnet_pair, len(used_matches)
def one_hot_spectrum(spec,
tol,
max_mz,
shift=0,
min_mz=0,
method='max'):
"""Convert spectrum peaks into on-hot-vector
method: str
'max' take highest intensity peak within every bin.
'sum' take sum of all peaks within every bin.
"""
dim_vector = int((max_mz - min_mz)/tol)
one_hot_spec = np.zeros((dim_vector))
idx = ((spec[:, 0] + shift)*1/tol).astype(int)
idx[idx >= dim_vector] = 0
idx[idx < 0] = 0
if method == 'max':
for id1 in set(idx):
one_hot_spec[id1] = np.max(spec[(idx == id1), 1])
elif method == 'sum':
for id1 in set(idx):
one_hot_spec[id1] = np.sum(spec[(idx == id1), 1])
else:
print("Method not known...")
return one_hot_spec
@numba.njit
def find_pairs_numba(spec1, spec2, tol, shift=0):
"""Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
matching_pairs = []
for idx in range(len(spec1)):
intensity = spec1[idx, 1]
matches = np.where((np.abs(spec2[:, 0] - spec1[idx, 0] + shift) <= tol))[0]
for match in matches:
matching_pairs.append((idx, match, intensity*spec2[match][1]))
return matching_pairs
def find_pairs(spec1, spec2, tol, shift=0):
"""Find matching pairs between two spectra.
Args
----
spec1 : list of tuples
List of (mz, intensity) tuples.
spec2 : list of tuples
List of (mz, intensity) tuples.
tol : float
Tolerance. Peaks will be considered a match when < tol appart.
shift : float, optional
Shift spectra peaks by shift. The default is 0.
Returns
-------
matching_pairs : list
List of found matching peaks.
"""
# Sort peaks and losses by m/z
spec1 = spec1[np.lexsort((spec1[:, 1], spec1[:, 0])), :]
spec2 = spec2[np.lexsort((spec2[:, 1], spec2[:, 0])), :]
matching_pairs = []
spec2lowpos = 0
spec2length = len(spec2)
for idx in range(len(spec1)):
mz = spec1[idx, 0]
intensity = spec1[idx, 1]
# Do we need to increase the lower idx?
while spec2lowpos < spec2length and spec2[spec2lowpos][0] + shift < mz - tol:
spec2lowpos += 1
if spec2lowpos == spec2length:
break
spec2pos = spec2lowpos
while(spec2pos < spec2length and spec2[spec2pos][0] + shift < mz + tol):
matching_pairs.append((idx, spec2pos, intensity * spec2[spec2pos][1]))
spec2pos += 1
return matching_pairs | 37.783688 | 103 | 0.57114 | [
"Apache-2.0"
] | matchms/old-iomega-spec2vec | matchms/old/ms_similarity_classical.py | 21,310 | Python |
from leapp.models import Model, fields
from leapp.topics import BootPrepTopic, SystemInfoTopic
from leapp.utils.deprecation import deprecated
class DracutModule(Model):
"""
Specify a dracut module that should be included into the initramfs
The specified dracut module has to be compatible with the target system.
See the description of UpgradeInitramfsTasks and TargetInitramfsTasks
for more information about the role of initramfs in the in-place upgrade
process.
"""
topic = BootPrepTopic
name = fields.String()
"""
Name of the dracut module that should be added (--add option of dracut)
when a initramfs is built.
"""
module_path = fields.Nullable(fields.String(default=None))
"""
module_path specifies dracut modules that are supposed to be copied
If the path is not set, the given name will just be activated. IOW,
if the dracut module is stored outside the /usr/lib/dracut/modules.d/
directory, set the absolute path to it, so leapp will manage it during
the upgrade to ensure the module will be added into the initramfs.
The module has to be stored on the local storage. In such a case, it is
recommended to store it into the 'files' directory of an actor generating
this object.
Note: It's expected to set the full path from the host POV. In case
of actions inside containers, the module is still copied from the HOST
into the container workspace.
"""
class UpgradeInitramfsTasks(Model):
"""
Influence generating of the (leapp) upgrade initramfs
The upgrade initramfs is used during the crucial part of the upgrade,
in which the original rpms are upgraded, configuration of applications
are migrated, etc. To be able to boot into the leapp upgrade environment
correctly, it is expected all needed drivers, configuration files, ... are
included inside the upgrade initramfs. Produce this message with
expected content to influence the upgrade initramfs.
If some specific rpms or content is required to be able to build the
upgrade initramfs, see the <container-model>.
Note: The built initramfs is composed of stuff for the target system.
In example, if you are on RHEL 7 and plan the upgrade to RHEL 8, you need
to provide content (e.g. drivers, dracut modules) compatible with
RHEL 8 system.
"""
topic = BootPrepTopic
include_files = fields.List(fields.String(), default=[])
"""
List of files (cannonical filesystem paths) to include in the initramfs
"""
include_dracut_modules = fields.List(fields.Model(DracutModule), default=[])
"""
List of dracut modules that should be installed in the initramfs.
See the DracutModule model for more information.
"""
class TargetInitramfsTasks(UpgradeInitramfsTasks):
"""
Analogy to UpgradeInitramfsTasks, but referring to the target initram disk.
Target initramfs is the one, that will be used to boot to your upgraded
system. If you want to ensure that you are able to boot into the target
(upgraded) system, it is possible you need to add same stuff as you added
into the upgrade initramfs.
If some specific rpms are required to be able to build the upgrade
initramfs, install these via the RpmTransactionTasks model.
"""
@deprecated(since='2021-04-01', message='Replaced by TargetInitramfsTasks.')
class InitrdIncludes(Model):
"""
List of files (cannonical filesystem paths) to include in RHEL-8 initramfs
"""
topic = SystemInfoTopic
files = fields.List(fields.String())
@deprecated(since='2021-04-01', message='Replaced by UpgradeInitramfsTasks.')
class UpgradeDracutModule(Model):
"""
Specify a dracut module that should be included into the (leapp) upgrade initramfs.
The upgrade initramfs is used during the crucial part of the upgrade,
in which the original rpms are upgraded. If a dracut module is required to
be included inside the upgrade initramfs (e.g. because it is needed
to handle/initialize your storage properly), produce this msg.
"""
topic = BootPrepTopic
name = fields.String()
"""
Name of the dracut module that should be added (--add option of dracut)
"""
module_path = fields.Nullable(fields.String(default=None))
"""
module_path specifies dracut modules that are to be copied
If the path is not set, the given name will just be activated.
"""
| 36.112903 | 87 | 0.724654 | [
"Apache-2.0"
] | JohnKepplers/leapp-repository | repos/system_upgrade/common/models/initramfs.py | 4,478 | Python |
# -*- coding: utf-8 -*-
"""
Defines the unit tests for the :mod:`colour.colorimetry.luminance` module.
"""
import numpy as np
import unittest
from colour.colorimetry import (
luminance_Newhall1943, intermediate_luminance_function_CIE1976,
luminance_CIE1976, luminance_ASTMD1535, luminance_Fairchild2010,
luminance_Fairchild2011, luminance_Abebe2017)
from colour.colorimetry.luminance import luminance
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'TestLuminanceNewhall1943', 'TestLuminanceASTMD1535',
'TestIntermediateLuminanceFunctionCIE1976', 'TestLuminanceCIE1976',
'TestLuminanceFairchild2010', 'TestLuminanceFairchild2011',
'TestLuminanceAbebe2017', 'TestLuminance'
]
class TestLuminanceNewhall1943(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition unit tests methods.
"""
def test_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition.
"""
self.assertAlmostEqual(
luminance_Newhall1943(4.08244375), 12.550078816731881, places=7)
self.assertAlmostEqual(
luminance_Newhall1943(5.39132685), 23.481252371310738, places=7)
self.assertAlmostEqual(
luminance_Newhall1943(2.97619312), 6.4514266875601924, places=7)
def test_n_dimensional_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition n-dimensional arrays support.
"""
V = 4.08244375
Y = luminance_Newhall1943(V)
V = np.tile(V, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
V = np.reshape(V, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
V = np.reshape(V, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_Newhall1943(V), Y, decimal=7)
def test_domain_range_scale_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition domain and range scale support.
"""
Y = luminance_Newhall1943(4.08244375)
d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Newhall1943(4.08244375 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Newhall1943(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Newhall1943`
definition nan support.
"""
luminance_Newhall1943(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceASTMD1535(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition unit tests methods.
"""
def test_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition.
"""
self.assertAlmostEqual(
luminance_ASTMD1535(4.08244375), 12.236342675366036, places=7)
self.assertAlmostEqual(
luminance_ASTMD1535(5.39132685), 22.893999867280378, places=7)
self.assertAlmostEqual(
luminance_ASTMD1535(2.97619312), 6.2902253509053132, places=7)
def test_n_dimensional_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition n-dimensional arrays support.
"""
V = 4.08244375
Y = luminance_ASTMD1535(V)
V = np.tile(V, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
V = np.reshape(V, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
V = np.reshape(V, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_ASTMD1535(V), Y, decimal=7)
def test_domain_range_scale_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition domain and range scale support.
"""
Y = luminance_ASTMD1535(4.08244375)
d_r = (('reference', 1, 1), (1, 0.1, 0.01), (100, 10, 1))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_ASTMD1535(4.08244375 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_ASTMD1535(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_ASTMD1535`
definition nan support.
"""
luminance_ASTMD1535(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestIntermediateLuminanceFunctionCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition unit tests methods.
"""
def test_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition.
"""
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.495929964178047),
12.197225350000002,
places=7)
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.613072093530391),
23.042767810000004,
places=7)
self.assertAlmostEqual(
intermediate_luminance_function_CIE1976(0.394876333449113),
6.157200790000001,
places=7)
def test_n_dimensional_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition n-dimensional arrays
support.
"""
f_Y_Y_n = 0.495929964178047
Y = intermediate_luminance_function_CIE1976(f_Y_Y_n)
f_Y_Y_n = np.tile(f_Y_Y_n, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
f_Y_Y_n = np.reshape(f_Y_Y_n, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(f_Y_Y_n), Y, decimal=7)
def test_domain_range_scale_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition domain and range scale
support.
"""
Y = intermediate_luminance_function_CIE1976(41.527875844653451, 100)
for scale in ('reference', 1, 100):
with domain_range_scale(scale):
np.testing.assert_almost_equal(
intermediate_luminance_function_CIE1976(
41.527875844653451, 100),
Y,
decimal=7)
@ignore_numpy_errors
def test_nan_intermediate_luminance_function_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.\
intermediate_luminance_function_CIE1976` definition nan support.
"""
intermediate_luminance_function_CIE1976(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceCIE1976(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_CIE1976` definition
unit tests methods.
"""
def test_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition.
"""
self.assertAlmostEqual(
luminance_CIE1976(41.527875844653451),
12.197225350000002,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(55.116362849525402),
23.042767810000004,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(29.805654680097106), 6.157200790000001, places=7)
self.assertAlmostEqual(
luminance_CIE1976(56.480581732417676, 50),
12.197225349999998,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(47.317620274162735, 75),
12.197225350000002,
places=7)
self.assertAlmostEqual(
luminance_CIE1976(42.519930728120940, 95),
12.197225350000005,
places=7)
def test_n_dimensional_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition n-dimensional arrays support.
"""
L_star = 41.527875844653451
Y = luminance_CIE1976(L_star)
L_star = np.tile(L_star, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
L_star = np.reshape(L_star, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
L_star = np.reshape(L_star, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_CIE1976(L_star), Y, decimal=7)
def test_domain_range_scale_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition domain and range scale support.
"""
Y = luminance_CIE1976(41.527875844653451, 100)
d_r = (('reference', 1), (1, 0.01), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_CIE1976(41.527875844653451 * factor, 100),
Y * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_CIE1976(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_CIE1976`
definition nan support.
"""
luminance_CIE1976(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceFairchild2010(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition unit tests methods.
"""
def test_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition.
"""
self.assertAlmostEqual(
luminance_Fairchild2010(31.996390226262736),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(60.203153682783302),
0.23042767809999998,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(11.836517240976489),
0.06157200790000001,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(24.424283249379986, 2.75),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(100.019986327374240),
1008.00000024,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2010(100.019999997090270),
100799.92312466,
places=7)
def test_n_dimensional_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition n-dimensional arrays support.
"""
L_hdr = 31.996390226262736
Y = luminance_Fairchild2010(L_hdr)
L_hdr = np.tile(L_hdr, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
luminance_Fairchild2010(L_hdr), Y, decimal=7)
def test_domain_range_scale_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition domain and range scale support.
"""
Y = luminance_Fairchild2010(31.996390226262736)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Fairchild2010(31.996390226262736 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Fairchild2010(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2010`
definition nan support.
"""
luminance_Fairchild2010(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceFairchild2011(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition unit tests methods.
"""
def test_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition.
"""
self.assertAlmostEqual(
luminance_Fairchild2011(51.852958445912506),
0.12197225350000007,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(65.275207956353853),
0.23042767809999998,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(39.818935510715917),
0.061572007900000038,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(0.13268968410139345, 2.75),
0.12197225350000002,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(234.72925681957565),
1008.00000000,
places=7)
self.assertAlmostEqual(
luminance_Fairchild2011(245.57059778237573),
100800.00000000,
places=7)
def test_n_dimensional_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition n-dimensional arrays support.
"""
L_hdr = 51.852958445912506
Y = luminance_Fairchild2011(L_hdr)
L_hdr = np.tile(L_hdr, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
L_hdr = np.reshape(L_hdr, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(
luminance_Fairchild2011(L_hdr), Y, decimal=7)
def test_domain_range_scale_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition domain and range scale support.
"""
Y = luminance_Fairchild2011(26.459509817572265)
d_r = (('reference', 1, 1), (1, 0.01, 1), (100, 1, 100))
for scale, factor_a, factor_b in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Fairchild2011(26.459509817572265 * factor_a),
Y * factor_b,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Fairchild2011(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Fairchild2011`
definition nan support.
"""
luminance_Fairchild2011(
np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]))
class TestLuminanceAbebe2017(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition unit tests methods.
"""
def test_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition.
"""
self.assertAlmostEqual(
luminance_Abebe2017(0.486955571109229),
12.197225350000004,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.474544792145434, method='Stevens'),
12.197225350000025,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.286847428534793, 1000),
12.197225350000046,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.192145492588158, 4000),
12.197225350000121,
places=7)
self.assertAlmostEqual(
luminance_Abebe2017(0.170365211220992, 4000, method='Stevens'),
12.197225349999933,
places=7)
def test_n_dimensional_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition n-dimensional arrays support.
"""
L = 0.486955571109229
Y = luminance_Abebe2017(L)
L = np.tile(L, 6)
Y = np.tile(Y, 6)
np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)
L = np.reshape(L, (2, 3))
Y = np.reshape(Y, (2, 3))
np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)
L = np.reshape(L, (2, 3, 1))
Y = np.reshape(Y, (2, 3, 1))
np.testing.assert_almost_equal(luminance_Abebe2017(L), Y, decimal=7)
def test_domain_range_scale_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition domain and range scale support.
"""
L = luminance_Abebe2017(0.486955571109229)
d_r = (('reference', 1), (1, 1), (100, 1))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance_Abebe2017(0.486955571109229 * factor,
100 * factor),
L * factor,
decimal=7)
@ignore_numpy_errors
def test_nan_luminance_Abebe2017(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance_Abebe2017`
definition nan support.
"""
luminance_Abebe2017(
*[np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])] * 2)
class TestLuminance(unittest.TestCase):
"""
Defines :func:`colour.colorimetry.luminance.luminance` definition unit
tests methods.
"""
def test_domain_range_scale_luminance(self):
"""
Tests :func:`colour.colorimetry.luminance.luminance` definition
domain and range scale support.
"""
m = ('Newhall 1943', 'ASTM D1535', 'CIE 1976', 'Fairchild 2010',
'Fairchild 2011', 'Abebe 2017')
v = [luminance(41.527875844653451, method, Y_n=100) for method in m]
d_r = (('reference', 1), (1, 0.01), (100, 1))
for method, value in zip(m, v):
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_almost_equal(
luminance(
41.527875844653451 * factor, method, Y_n=100),
value * factor,
decimal=7)
if __name__ == '__main__':
unittest.main()
| 32.215071 | 79 | 0.620194 | [
"BSD-3-Clause"
] | colour-science/colour | colour/colorimetry/tests/test_luminance.py | 20,521 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2010, 2021. All Rights Reserved.
""" setup.py for resilient-circuits Python module """
import io
from os import path
from setuptools import find_packages, setup
this_directory = path.abspath(path.dirname(__file__))
with io.open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="resilient_circuits",
use_scm_version={"root": "../", "relative_to": __file__},
setup_requires=[
"setuptools_scm < 6.0.0;python_version<'3.0'",
"setuptools_scm >= 6.0.0;python_version>='3.0'"
],
license="MIT",
packages=find_packages(),
include_package_data=True,
# Runtime Dependencies
install_requires=[
"stompest>=2.3.0",
"circuits",
"pytz",
"jinja2~=2.0",
"pysocks",
"filelock>=2.0.5",
"watchdog>=0.9.0, <1.0.0; python_version < '3.6.0'",
"watchdog>=0.9.0; python_version >= '3.6.0'",
"resilient>=42.0.0",
"resilient-lib>=42.0.0"
],
entry_points={
"console_scripts": ["res-action-test = resilient_circuits.bin.res_action_test:main",
"resilient-circuits = resilient_circuits.bin.resilient_circuits_cmd:main"]
},
# PyPI metadata
author="IBM SOAR",
description="Framework used to run IBM SOAR Apps and Integrations.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ibmresilient/resilient-python-api/tree/master/resilient-circuits",
project_urls={
"Documentation": "https://ibm.biz/soar-docs",
"API Docs": "https://ibm.biz/soar-python-docs",
"IBM Community": "https://ibm.biz/soarcommunity",
"Change Log": "https://ibm.biz/resilient-circuits-changes"
},
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6"
],
keywords="ibm soar resilient circuits resilient-circuits"
)
| 31.676923 | 102 | 0.63186 | [
"MIT"
] | ibmresilient/resilient-python-api | resilient-circuits/setup.py | 2,059 | Python |
import pyglet
class Resources:
# --- Player Parameters ---
player_animation_started = False
player_images = []
player_animation_time = 1. / 9.
player_animation_index = 0
# --- Obstacle Parameters ---
obstacle_images = []
# --- Player Methods ---
# loads the images needed for the player animation if they haven't been loaded already
@staticmethod
def load_images():
if len(Resources.player_images) == 0:
Resources.player_images.append(pyglet.image.load("res/dinosaur_left.png"))
Resources.player_images.append(pyglet.image.load("res/dinosaur_right.png"))
Resources.player_images.append(pyglet.image.load("res/dinosaur_normal.png"))
if len(Resources.obstacle_images) == 0:
Resources.obstacle_images.append(pyglet.image.load("res/cactus_small.png"))
Resources.obstacle_images.append(pyglet.image.load("res/cactus_big.png"))
Resources.start_player_animation()
# starts the player's running animation by scheduling recurring updates to the player's image index
@staticmethod
def start_player_animation():
if not Resources.player_animation_started:
pyglet.clock.schedule_interval(Resources.trigger_player_update, Resources.player_animation_time)
Resources.player_animation_started = True
# updates the player's image index
@staticmethod
def trigger_player_update(_):
Resources.player_animation_index = 1 - Resources.player_animation_index
# returns the current image for the running player
@staticmethod
def player_running_image():
return Resources.player_images[Resources.player_animation_index]
# returns the image for the jumping player
@staticmethod
def player_jumping_image():
return Resources.player_images[2]
| 36.211538 | 108 | 0.698885 | [
"MIT"
] | lukDev/dinosaur | src/dinosaur/game/resources.py | 1,883 | Python |
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The unique worker functionality for this service is contained here.
The entry-point is the **cb_nr_subscription_handler**
The design and flow leverage a few constraints that are placed upon it
by NATS Streaming and using AWAIT on the default loop.
- NATS streaming queues require one message to be processed at a time.
- AWAIT on the default loop effectively runs synchronously
If these constraints change, the use of Flask-SQLAlchemy would need to change.
Flask-SQLAlchemy currently allows the base model to be changed, or reworking
the model to a standalone SQLAlchemy usage with an async engine would need
to be pursued.
"""
import json
import os
from typing import Dict
import nats
from auth_api.models import Affiliation as AffiliationModel
from auth_api.models import Entity as EntityModel
from auth_api.models import Org as OrgModel
from auth_api.models import db
from auth_api.services.rest_service import RestService
from auth_api.utils.enums import CorpType
from dateutil import parser
from entity_queue_common.service import QueueServiceManager
from entity_queue_common.service_utils import QueueException, logger
from flask import Flask # pylint: disable=wrong-import-order
from business_events_listener import config
async def cb_nr_subscription_handler(msg: nats.aio.client.Msg):
"""Use Callback to process Queue Msg objects."""
try:
logger.info('Received raw message seq:%s, data= %s', msg.sequence, msg.data.decode())
event_message = json.loads(msg.data.decode('utf-8'))
logger.debug('Event Message Received: %s', event_message)
await process_event(event_message, FLASK_APP)
except Exception: # noqa pylint: disable=broad-except
# Catch Exception so that any error is still caught and the message is removed from the queue
logger.error('Queue Error: %s', json.dumps(event_message), exc_info=True)
async def process_event(event_message, flask_app):
"""Render the org status."""
if not flask_app:
raise QueueException('Flask App not available.')
with flask_app.app_context():
message_type = event_message.get('type', None)
if message_type == 'bc.registry.names.events':
await process_name_events(event_message)
async def process_name_events(event_message: Dict[str, any]):
"""Process name events.
1. Check if the NR already exists in entities table, if yes apply changes. If not create entity record.
2. Check if new status is DRAFT, if yes call pay-api and get the account details for the payments against the NR.
3. If an account is found, affiliate to that account.
Args:
event_message (object): cloud event message, sample below.
{
'specversion': '1.0.1',
'type': 'bc.registry.names.events',
'source': '/requests/6724165',
'id': id,
'time': '',
'datacontenttype': 'application/json',
'identifier': '781020202',
'data': {
'request': {
'nrNum': 'NR 5659951',
'newState': 'APPROVED',
'previousState': 'DRAFT'
}
}
}
"""
logger.debug('>>>>>>>process_name_events>>>>>')
request_data = event_message.get('data').get('request')
nr_number = request_data['nrNum']
nr_status = request_data['newState']
nr_entity = EntityModel.find_by_business_identifier(nr_number)
if nr_entity is None:
logger.info('Entity doesn''t exist, creating a new entity.')
nr_entity = EntityModel(
business_identifier=nr_number,
corp_type_code=CorpType.NR.value
)
nr_entity.status = nr_status
nr_entity.name = request_data.get('name', '') # its not part of event now, this is to handle if they include it.
nr_entity.last_modified_by = None # TODO not present in event message.
nr_entity.last_modified = parser.parse(event_message.get('time'))
if nr_status == 'DRAFT' and AffiliationModel.find_affiliations_by_business_identifier(nr_number) is None:
logger.info('Status is DRAFT, getting invoices for account')
# Find account details for the NR.
invoices = RestService.get(
f'{APP_CONFIG.PAY_API_URL}/payment-requests?businessIdentifier={nr_number}',
token=RestService.get_service_account_token()
).json()
# Ideally there should be only one or two (priority fees) payment request for the NR.
if invoices and (auth_account_id := invoices['invoices'][0].get('paymentAccount').get('accountId')) \
and str(auth_account_id).isnumeric():
logger.info('Account ID received : %s', auth_account_id)
# Auth account id can be service account value too, so doing a query lookup than find_by_id
org: OrgModel = db.session.query(OrgModel).filter(OrgModel.id == auth_account_id).one_or_none()
if org:
nr_entity.pass_code_claimed = True
# Create an affiliation.
logger.info('Creating affiliation between Entity : %s and Org : %s', nr_entity, org)
affiliation: AffiliationModel = AffiliationModel(entity=nr_entity, org=org)
affiliation.flush()
nr_entity.save()
logger.debug('<<<<<<<process_name_events<<<<<<<<<<')
qsm = QueueServiceManager() # pylint: disable=invalid-name
APP_CONFIG = config.get_named_config(os.getenv('DEPLOYMENT_ENV', 'production'))
FLASK_APP = Flask(__name__)
FLASK_APP.config.from_object(APP_CONFIG)
db.init_app(FLASK_APP)
| 43.527778 | 117 | 0.686662 | [
"Apache-2.0"
] | saravanpa-aot/sbc-auth | queue_services/business-events-listener/src/business_events_listener/worker.py | 6,269 | Python |
##############################################################################
# Copyright 2019 Parker Berberian and Others #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
##############################################################################
from st2tests.base import BaseActionTestCase
from actions.actions import get_task_list
import json
class GetTaskListTestCase(BaseActionTestCase):
action_cls = get_task_list.Task_List_Action
def setUp(self):
super(GetTaskListTestCase, self).setUp()
self.action = self.get_action_instance()
def test_tasklist_multiple_tasks(self):
self.action.action_service.set_value("job_1", json.dumps({
"access": {
"task1": "asdf",
"task2": "fdsa"
}
}), local=False)
result = self.action.run(job_id=1, type="access")
self.assertEqual(set(result), set(["task1", "task2"]))
def test_tasklist_single_task(self):
self.action.action_service.set_value("job_1", json.dumps({
"access": {"task1": "asdf"},
"hardware": {"task10": "asdf"}
}), local=False)
result = self.action.run(job_id=1, type="hardware")
self.assertEqual(set(result), set(["task10"]))
def test_empty_tasklist(self):
self.action.action_service.set_value("job_1", json.dumps({
"access": {"task1": "asdf"},
"hardware": {"task10": "asdf"}
}), local=False)
result = self.action.run(job_id=1, type="unknown")
self.assertFalse(result)
| 47.150943 | 78 | 0.502201 | [
"Apache-2.0"
] | opnfv/laas-reflab | laas/tests/test_action_get_task_list.py | 2,499 | Python |
from datetime import date
import pytest
from dateutil.parser import parse as dt_parse
from freezegun import freeze_time
from app.models.alert_date import AlertDate
def test_AlertDate_properties():
sample_datetime = dt_parse('2021-03-02T10:30:00Z')
alerts_date = AlertDate(sample_datetime)
assert alerts_date.as_lang == 'at 10:30am on Tuesday 2 March 2021'
assert alerts_date.as_iso8601 == '2021-03-02T10:30:00+00:00'
assert alerts_date.as_utc_datetime == dt_parse('2021-03-02T10:30:00Z')
assert alerts_date.as_local_datetime == dt_parse('2021-03-02T10:30:00Z')
assert alerts_date.as_url == '2-mar-2021'
def test_AlertDate_properties_work_with_bst():
sample_datetime = dt_parse('2021-04-20T23:30:00Z')
alerts_date = AlertDate(sample_datetime)
assert alerts_date.as_lang == 'at 12:30am on Wednesday 21 April 2021'
assert alerts_date.as_iso8601 == '2021-04-21T00:30:00+01:00'
assert alerts_date.as_utc_datetime == dt_parse('2021-04-20T23:30:00Z')
assert alerts_date.as_local_datetime == dt_parse('2021-04-21T00:30:00+01:00')
assert alerts_date.as_local_date == date(2021, 4, 21)
assert alerts_date.as_url == '21-apr-2021'
@pytest.mark.parametrize('hour, minute, expected_lang', (
('00', '00', 'at midnight on Sunday 21 March 2021'),
('12', '00', 'at midday on Sunday 21 March 2021'),
('23', '59', 'at 11:59pm on Sunday 21 March 2021'), # 12 hour clock
))
def test_AlertDate_at_midday_and_midnight(hour, minute, expected_lang):
sample_datetime = dt_parse(f'2021-03-21T{hour}:{minute}:00Z')
alerts_date = AlertDate(sample_datetime)
assert alerts_date.as_lang == expected_lang
@pytest.mark.parametrize('now, sample, expected_is_today', (
# GMT
('2021-01-01T00:00:00Z', '2021-12-31T23:59:59Z', False),
('2021-01-01T00:00:00Z', '2021-01-01T00:00:00Z', True),
('2021-01-01T23:59:59Z', '2021-01-01T00:00:00Z', True),
('2021-01-01T00:00:00Z', '2021-01-01T23:59:59Z', True),
('2021-01-01T23:59:59Z', '2021-01-01T23:59:59Z', True),
('2021-01-01T23:59:59Z', '2021-01-02T00:00:00Z', False),
# BST
('2021-05-31T23:00:00Z', '2021-05-31T22:59:59Z', False),
('2021-05-31T23:00:00Z', '2021-05-31T23:00:00Z', True),
('2021-06-01T22:59:59Z', '2021-05-31T23:00:00Z', True),
('2021-05-31T23:00:00Z', '2021-06-01T22:59:59Z', True),
('2021-06-01T22:59:59Z', '2021-06-01T22:59:59Z', True),
('2021-06-01T22:59:59Z', '2021-06-01T23:00:00Z', False),
))
def test_AlertDate_is_today(now, sample, expected_is_today):
with freeze_time(now):
assert AlertDate(dt_parse(sample)).is_today == expected_is_today
| 43.196721 | 81 | 0.696395 | [
"MIT"
] | alphagov/notifications-govuk-alerts | tests/app/models/test_alert_date.py | 2,635 | Python |
# Copyright 2018 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This template creates an external load balancer. """
import copy
from hashlib import sha1
import json
def set_optional_property(destination, source, prop_name):
""" Copies the property value if present. """
if prop_name in source:
destination[prop_name] = source[prop_name]
def get_backend_service(properties, backend_spec, res_name, project_id):
""" Creates the backend service. """
name = backend_spec.get('resourceName', res_name)
backend_name = backend_spec.get('name', name)
backend_properties = {
'name': backend_name,
'project': project_id,
'loadBalancingScheme': 'EXTERNAL',
'protocol': get_protocol(properties),
}
backend_resource = {
'name': name,
'type': 'backend_service.py',
'properties': backend_properties
}
optional_properties = [
'description',
'backends',
'timeoutSec',
'sessionAffinity',
'connectionDraining',
'backends',
'healthCheck',
'healthChecks',
'portName',
'enableCDN',
'affinityCookieTtlSec'
]
for prop in optional_properties:
set_optional_property(backend_properties, backend_spec, prop)
return [backend_resource], [
{
'name': 'backendServiceName',
'value': backend_name,
},
{
'name': 'backendServiceSelfLink',
'value': '$(ref.{}.selfLink)'.format(name),
},
]
def get_forwarding_rule(properties, target, res_name, project_id):
""" Creates the forwarding rule. """
name = '{}-forwarding-rule'.format(res_name)
rule_properties = {
'name': properties.get('name', res_name),
'project': project_id,
'loadBalancingScheme': 'EXTERNAL',
'target': '$(ref.{}.selfLink)'.format(target['name']),
'IPProtocol': 'TCP',
}
rule_resource = {
'name': name,
'type': 'forwarding_rule.py',
'properties': rule_properties,
'metadata': {
'dependsOn': [target['name']],
},
}
optional_properties = [
'description',
'IPAddress',
'ipVersion',
'portRange',
]
for prop in optional_properties:
set_optional_property(rule_properties, properties, prop)
return [rule_resource], [
{
'name': 'forwardingRuleName',
'value': rule_properties['name'],
},
{
'name': 'forwardingRuleSelfLink',
'value': '$(ref.{}.selfLink)'.format(name),
},
{
'name': 'IPAddress',
'value': '$(ref.{}.IPAddress)'.format(name),
},
]
def get_backend_services(properties, res_name, project_id):
""" Creates all backend services to be used by the load balancer. """
backend_resources = []
backend_outputs_map = {
'backendServiceName': [],
'backendServiceSelfLink': []
}
backend_specs = properties['backendServices']
for backend_spec in backend_specs:
backend_res_name = '{}-backend-service-{}'.format(res_name, sha1(json.dumps(backend_spec).encode('utf-8')).hexdigest()[:10])
resources, outputs = get_backend_service(properties, backend_spec, backend_res_name, project_id)
backend_resources += resources
# Merge outputs with the same name.
for output in outputs:
backend_outputs_map[output['name']].append(output['value'])
backend_outputs = []
for key, value in backend_outputs_map.items():
backend_outputs.append({'name': key + 's', 'value': value})
return backend_resources, backend_outputs
def get_ref(name, prop='selfLink'):
""" Creates reference to a property of a given resource. """
return '$(ref.{}.{})'.format(name, prop)
def update_refs_recursively(properties):
""" Replaces service names with the service selflinks recursively. """
for prop in properties:
value = properties[prop]
if prop == 'defaultService' or prop == 'service':
is_regular_name = not '.' in value and not '/' in value
if is_regular_name:
properties[prop] = get_ref(value)
elif isinstance(value, dict):
update_refs_recursively(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
update_refs_recursively(item)
def get_url_map(properties, res_name, project_id):
""" Creates a UrlMap resource. """
spec = copy.deepcopy(properties)
spec['project'] = project_id
spec['name'] = properties.get('name', res_name)
update_refs_recursively(spec)
resource = {
'name': res_name,
'type': 'url_map.py',
'properties': spec,
}
self_link = '$(ref.{}.selfLink)'.format(res_name)
return self_link, [resource], [
{
'name': 'urlMapName',
'value': '$(ref.{}.name)'.format(res_name)
},
{
'name': 'urlMapSelfLink',
'value': self_link
}
]
def get_target_proxy(properties, res_name, project_id, bs_resources):
""" Creates a target proxy resource. """
protocol = get_protocol(properties)
depends = []
if 'HTTP' in protocol:
urlMap = copy.deepcopy(properties['urlMap'])
if 'name' not in urlMap and 'name' in properties:
urlMap['name'] = '{}-url-map'.format(properties['name'])
target, resources, outputs = get_url_map(
urlMap,
'{}-url-map'.format(res_name),
project_id
)
depends.append(resources[0]['name'])
else:
depends.append(bs_resources[0]['name'])
target = get_ref(bs_resources[0]['name'])
resources = []
outputs = []
name = '{}-target'.format(res_name)
proxy = {
'name': name,
'type': 'target_proxy.py',
'properties': {
'name': '{}-target'.format(properties.get('name', res_name)),
'project': project_id,
'protocol': protocol,
'target': target,
},
'metadata': {
'dependsOn': [depends],
},
}
for prop in ['proxyHeader', 'quicOverride']:
set_optional_property(proxy['properties'], properties, prop)
outputs.extend(
[
{
'name': 'targetProxyName',
'value': '$(ref.{}.name)'.format(name)
},
{
'name': 'targetProxySelfLink',
'value': '$(ref.{}.selfLink)'.format(name)
},
{
'name': 'targetProxyKind',
'value': '$(ref.{}.kind)'.format(name)
}
]
)
if 'ssl' in properties:
ssl_spec = properties['ssl']
proxy['properties']['ssl'] = ssl_spec
creates_new_certificate = not 'url' in ssl_spec['certificate']
if creates_new_certificate:
outputs.extend(
[
{
'name': 'certificateName',
'value': '$(ref.{}.certificateName)'.format(name)
},
{
'name': 'certificateSelfLink',
'value': '$(ref.{}.certificateSelfLink)'.format(name)
}
]
)
return [proxy] + resources, outputs
def get_protocol(properties):
""" Finds what network protocol to use. """
is_web = 'urlMap' in properties
is_secure = 'ssl' in properties
if is_web:
if is_secure:
return 'HTTPS'
return 'HTTP'
if is_secure:
return 'SSL'
return 'TCP'
def generate_config(context):
""" Entry point for the deployment resources. """
properties = context.properties
project_id = properties.get('project', context.env['project'])
# Forwarding rule + target proxy + backend service = ELB
bs_resources, bs_outputs = get_backend_services(properties, context.env['name'], project_id)
target_resources, target_outputs = get_target_proxy(properties, context.env['name'], project_id, bs_resources)
rule_resources, rule_outputs = get_forwarding_rule(
properties,
target_resources[0],
context.env['name'],
project_id
)
return {
'resources': bs_resources + target_resources + rule_resources,
'outputs': bs_outputs + target_outputs + rule_outputs,
}
| 29.336538 | 132 | 0.57839 | [
"Apache-2.0"
] | Dileepbodapati/cloud-foundation-toolkit | dm/templates/external_load_balancer/external_load_balancer.py | 9,153 | Python |
#python exceptions let you deal with
#unexpected results
try:
print(a) #this will throw an exception since a is not found
except:
print("a is not defined!")
#there are specific errors in python
try:
print(a) #this will throw a NameError
except NameError:
print("a is still not defined")
except:
print("Something else went wrong.")
#this will break our program
#since a is not defined
print(a) | 21.210526 | 61 | 0.744417 | [
"MIT"
] | Adriantsh/astr-119 | exceptions.py | 403 | Python |
"""
.. autoclass:: ppci.arch.arch.Architecture
:members:
.. autoclass:: ppci.arch.arch_info.ArchInfo
:members:
.. autoclass:: ppci.arch.arch.Frame
:members:
.. autoclass:: ppci.arch.isa.Isa
:members:
.. autoclass:: ppci.arch.registers.Register
:members: is_colored
.. autoclass:: ppci.arch.encoding.Instruction
:members:
"""
import sys
import platform
from .arch import Architecture, Frame
from .isa import Isa
def get_current_arch():
""" Try to get the architecture for the current platform """
if sys.platform.startswith("win"):
machine = platform.machine()
if machine == "AMD64":
return get_arch("x86_64:wincc")
elif sys.platform in ("linux", "darwin"):
if platform.architecture()[0] == "64bit":
return get_arch("x86_64")
def get_arch(arch):
"""Try to return an architecture instance.
Args:
arch: can be a string in the form of arch:option1:option2
.. doctest::
>>> from ppci.api import get_arch
>>> arch = get_arch('msp430')
>>> arch
msp430-arch
>>> type(arch)
<class 'ppci.arch.msp430.arch.Msp430Arch'>
"""
if isinstance(arch, Architecture):
return arch
elif isinstance(arch, str):
# Horrific import cycle created. TODO: restructure this
from .target_list import create_arch
if ":" in arch:
# We have target with options attached
parts = arch.split(":")
return create_arch(parts[0], options=tuple(parts[1:]))
else:
return create_arch(arch)
raise ValueError("Invalid architecture {}".format(arch))
__all__ = ["Architecture", "Frame", "Isa", "get_arch", "get_current_arch"]
| 23.4 | 74 | 0.618803 | [
"BSD-2-Clause"
] | darleybarreto/ppci-mirror | ppci/arch/__init__.py | 1,755 | Python |
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import petstore_api
from petstore_api.model.composed_bool import ComposedBool
class TestComposedBool(unittest.TestCase):
"""ComposedBool unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def test_ComposedBool(self):
"""Test ComposedBool"""
# FIXME: construct object with mandatory attributes with example values
# model = ComposedBool() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.078947 | 174 | 0.684148 | [
"Apache-2.0"
] | AndreasA/openapi-generator | samples/openapi3/client/petstore/python-experimental/test/test_composed_bool.py | 839 | Python |
from warnings import warn
from functools import partial
from tqdm import tqdm
import torch
import numpy as np
from torch.optim import Adam
from torch.nn import MSELoss
from odl.contrib.torch import OperatorModule
from dival.reconstructors import IterativeReconstructor
from dival.reconstructors.networks.unet import UNet
from dival.util.torch_losses import poisson_loss, tv_loss
from dival.util.constants import MU_MAX
MIN = -1000
MAX = 1000
class DeepImagePriorCTReconstructor(IterativeReconstructor):
"""
CT reconstructor applying DIP with TV regularization (see [2]_).
The DIP was introduced in [1]_.
References
----------
.. [1] V. Lempitsky, A. Vedaldi, and D. Ulyanov, 2018, "Deep Image Prior".
IEEE/CVF Conference on Computer Vision and Pattern Recognition.
https://doi.org/10.1109/CVPR.2018.00984
.. [2] D. Otero Baguer, J. Leuschner, M. Schmidt, 2020, "Computed
Tomography Reconstruction Using Deep Image Prior and Learned
Reconstruction Methods". Inverse Problems.
https://doi.org/10.1088/1361-6420/aba415
"""
HYPER_PARAMS = {
'lr':
{'default': 1e-3,
'range': [1e-5, 1e-1]},
'gamma':
{'default': 1e-4,
'range': [1e-7, 1e-0],
'grid_search_options': {'num_samples': 20}},
'scales':
{'default': 4,
'choices': [3, 4, 5, 6, 7]},
'channels':
{'default': [128] * 5},
'skip_channels':
{'default': [4] * 5},
'iterations':
{'default': 5000,
'range': [1, 50000]},
'loss_function':
{'default': 'mse',
'choices': ['mse', 'poisson']},
'photons_per_pixel': # used by 'poisson' loss function
{'default': 4096,
'range': [1000, 10000]},
'mu_max': # used by 'poisson' loss function
{'default': MU_MAX,
'range': [1., 10000.]}
}
def __init__(self, ray_trafo, callback_func=None,
callback_func_interval=100, show_pbar=True,
torch_manual_seed=10, **kwargs):
"""
Parameters
----------
ray_trafo : `odl.tomo.operators.RayTransform`
The forward operator
callback_func : callable, optional
Callable with signature
``callback_func(iteration, reconstruction, loss)`` that is called
after every `callback_func_interval` iterations, starting
after the first iteration. It is additionally called after the
last iteration.
Note that it differs from the inherited
`IterativeReconstructor.callback` (which is also supported) in that
the latter is of type :class:`odl.solvers.util.callback.Callback`,
which only receives the reconstruction, such that the loss would
have to be recomputed.
callback_func_interval : int, optional
Number of iterations between calls to `callback_func`.
Default: `100`.
show_pbar : bool, optional
Whether to show a tqdm progress bar during reconstruction.
torch_manual_seed : int, optional
Fixed seed to set by ``torch.manual_seed`` before reconstruction.
The default is `10`. It can be set to `None` or `False` to disable
the manual seed.
"""
super().__init__(
reco_space=ray_trafo.domain, observation_space=ray_trafo.range,
**kwargs)
self.callback_func = callback_func
self.ray_trafo = ray_trafo
self.ray_trafo_module = OperatorModule(self.ray_trafo)
self.callback_func = callback_func
self.callback_func_interval = callback_func_interval
self.show_pbar = show_pbar
self.torch_manual_seed = torch_manual_seed
def get_activation(self, layer_index):
return self.model.layer_output(self.net_input, layer_index)
def _reconstruct(self, observation, *args, **kwargs):
if self.torch_manual_seed:
torch.random.manual_seed(self.torch_manual_seed)
output_depth = 1
input_depth = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net_input = 0.1 * \
torch.randn(input_depth, *self.reco_space.shape)[None].to(device)
self.model = UNet(
input_depth,
output_depth,
channels=self.channels[:self.scales],
skip_channels=self.skip_channels[:self.scales],
use_sigmoid=True,
use_norm=True).to(device)
self.optimizer = Adam(self.model.parameters(), lr=self.lr)
y_delta = torch.tensor(np.asarray(observation), dtype=torch.float32)
y_delta = y_delta.view(1, 1, *y_delta.shape)
y_delta = y_delta.to(device)
if self.loss_function == 'mse':
criterion = MSELoss()
elif self.loss_function == 'poisson':
criterion = partial(poisson_loss,
photons_per_pixel=self.photons_per_pixel,
mu_max=self.mu_max)
else:
warn('Unknown loss function, falling back to MSE')
criterion = MSELoss()
best_loss = np.inf
best_output = self.model(self.net_input).detach()
for i in tqdm(range(self.iterations),
desc='DIP', disable=not self.show_pbar):
self.optimizer.zero_grad()
output = self.model(self.net_input)
loss = criterion(self.ray_trafo_module(output),
y_delta) + self.gamma * tv_loss(output)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)
self.optimizer.step()
for p in self.model.parameters():
p.data.clamp_(MIN, MAX)
if loss.item() < best_loss:
best_loss = loss.item()
best_output = output.detach()
if (self.callback_func is not None and
(i % self.callback_func_interval == 0
or i == self.iterations-1)):
self.callback_func(
iteration=i,
reconstruction=best_output[0, 0, ...].cpu().numpy(),
loss=best_loss)
if self.callback is not None:
self.callback(self.reco_space.element(
best_output[0, 0, ...].cpu().numpy()))
return self.reco_space.element(best_output[0, 0, ...].cpu().numpy())
| 37.382022 | 79 | 0.585512 | [
"MIT"
] | MBaltz/dival | dival/reconstructors/dip_ct_reconstructor.py | 6,654 | Python |
"""
PAIPASS Oauth2 backend
"""
import re
from .oauth import BaseOAuth2
from ..utils import handle_http_errors, url_add_parameters
from ..exceptions import AuthCanceled, AuthUnknownError
class PaipassOAuth2(BaseOAuth2):
"""Facebook OAuth2 authentication backend"""
name = "paipass"
ID_KEY = "email"
REDIRECT_STATE = False
STATE_PARAMETER = False
ACCESS_TOKEN_METHOD = "POST"
SCOPE_SEPARATOR = r" "
AUTHORIZATION_URL = "https://api.demo.p19dev.com/oauth/authorize"
ACCESS_TOKEN_URL = "https://api.demo.p19dev.com/oauth/token"
USER_DATA_URL = "https://api.demo.p19dev.com/attributes/paipass/user.data/0"
EXTRA_DATA = [("expires", "expires"), ]
def auth_complete_credentials(self):
return self.get_key_and_secret()
def get_user_details(self, response):
"""Return user details from Facebook account"""
email = response.get("email")
return {"email": email, "username": email.split("@")[0]}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
params = self.setting("PROFILE_EXTRA_PARAMS", {})
response = kwargs.get('response') or {}
params["access_token"] = access_token
headers = {
"Authorization": "%s %s" % (
response.get("token_type", "Bearer").capitalize(),
access_token),
"Accept": 'application/json',
"Content-type": 'application/json;charset=utf-8'}
return self.get_json(self.USER_DATA_URL,
params=params, headers=headers)
def auth_params(self, state=None):
params = super(PaipassOAuth2, self).auth_params(state)
regex = re.compile(r"\:(80|443)\/")
params["redirect_uri"] = regex.sub("/", params["redirect_uri"])
return params
def get_redirect_uri(self, state=None):
"""Build redirect with redirect_state parameter."""
regex = re.compile(r"\:(80|443)\/")
uri = regex.sub("/", self.redirect_uri)
if self.REDIRECT_STATE and state:
uri = url_add_parameters(uri, {'redirect_state': state})
return uri
@handle_http_errors
def do_auth(self, access_token, *args, **kwargs):
"""Finish the auth process once the access_token was retrieved"""
data = self.user_data(access_token, *args, **kwargs)
response = kwargs.get('response') or {}
response.update(data or {})
if 'access_token' not in response:
response['access_token'] = access_token
kwargs.update({'response': response, 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
| 38.898551 | 80 | 0.634128 | [
"BSD-3-Clause"
] | everchain-ontech/social-core | social_core/backends/paipass.py | 2,684 | Python |
"""ConnManagerMQTT containing script"""
import _thread
import time
import random
import logging
from .uconn_mqtt import UConnMQTT
from . import exceptions
class ConnManagerMQTT(object):
"""
UconnMQTT wrapper that guarantee delivery to addressee
"""
_SENDER = 'sender'
_DESTINATION = 'destination'
_MESSAGE = 'message'
def __init__(self):
"""
Initialization of ConnManager
"""
logging.info('Initializing ConnmanagerMQTT')
self.__connection = UConnMQTT()
self.__message_number = random.randint(0, 65536)
self.__sent_messages = dict()
self.__callback = None
self.__callback_object = None
def disconnect(self):
"""
Disconnection from server
"""
logging.info('Disconnecting...')
self.__connection.disconnect()
def subscribe(self, topic, callback_object, callback):
"""
Subscribe on topic
:param str topic: Topic for subscription
:param method callback: Callback for received message
"""
logging.info("Subscribing for {0}".format(topic))
if not callable(callback):
raise exceptions.UtimUncallableCallbackError
self.__callback = callback
self.__callback_object = callback_object
self.__connection.subscribe(topic, self, ConnManagerMQTT._on_message)
def unsubscribe(self, topic):
"""
Unsubscribe from topic
:param str topic: Topic for subscription cancelling
"""
logging.info("Unsubscribing from {0}".format(topic))
self.__connection.unsubscribe(topic)
def publish(self, sender, destination, message):
"""
Publish message
:param sender: Message sender
:param destination: Message destination
:param message: The message
"""
id = self.__message_number
self.__message_number = (self.__message_number + 1) % 65536
out_message = b'\x01' + id.to_bytes(2, 'big') + message
logging.info("Publishing {0} to topic {1}".format(message, destination))
self.__connection.publish(sender, destination, out_message)
self.__sent_messages[id] = {self._SENDER: sender,
self._DESTINATION: destination,
self._MESSAGE: message}
_thread.start_new_thread(self._republish, (id,))
def _republish(self, id):
"""
Check if message was delivered and republish if not
:param id: Message ID
"""
logging.info("_publish for {0} started".format(id))
time.sleep(10)
while id in self.__sent_messages.keys():
try:
logging.info("Message {0} wasn\'t delivered".format(id))
message = self.__sent_messages[id]
self.__connection.publish(message[self._SENDER], message[self._DESTINATION],
b'\x01' + id.to_bytes(2, 'big') + message[self._MESSAGE])
time.sleep(5)
except KeyError:
logging.error("Message was already deleted from republish")
break
logging.info("Message {0} was delivered".format(id))
def _on_message(self, sender, message):
"""
Message receiving callback
:param sender: Message sender
:param message: The message
"""
logging.info("Received message {0} from {1}".format(message, sender))
if len(message) < 3:
logging.info('Message is too short to be something!')
else:
if message[:1] == b'\x02':
try:
logging.info('Received ack, deleting message from sent')
id = int.from_bytes(message[1:3], 'big')
if id in self.__sent_messages.keys():
self.__sent_messages.pop(id)
except KeyError:
logging.error("Message was already deleted from republish")
else:
logging.info('Received message, sending ack...')
ack_message = b'\x02' + message[1:3]
self.__connection.publish(b'ack', sender.decode(), ack_message)
self.__callback(self.__callback_object, sender, message[3:])
| 35.024194 | 99 | 0.588994 | [
"Apache-2.0"
] | connax-utim/uhost-micropython | utilities/connmanagermqtt.py | 4,343 | Python |
from typing import Tuple
import torch as th
import torch.nn as nn
from torchvision import transforms
from autoencoding_rl.latent_extractors.autoencoder.SimpleEncoder import SimpleEncoder
from autoencoding_rl.latent_extractors.autoencoder.SimpleDecoder import SimpleDecoder
from autoencoding_rl.utils import Transition
class DynAutoencoder(nn.Module):
def __init__(self, observation_width: int,
observation_height: int,
observation_channels_num: int,
dyn_encoding_size: int,
static_encoding_size: int,
action_size: int,
dynamics_nn_arch: Tuple[int, int]):
super().__init__()
self._observation_height = observation_height
self._observation_width = observation_width
self._dyn_encoding_size = dyn_encoding_size
self._static_encoding_size = static_encoding_size
self._action_size = action_size
self._observation_channels_num = observation_channels_num
self._dynamics_nn_arch = dynamics_nn_arch
self._dynEncoder = SimpleEncoder(encoding_size = self._dyn_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
if self._static_encoding_size != 0:
self._staticEncoder = SimpleEncoder(encoding_size = self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_input_width = self._observation_width,
net_input_height = self._observation_height)
else:
self._staticEncoder = None
self._dynamics_net = th.nn.Sequential( th.nn.Linear(self._dyn_encoding_size+self._action_size, self._dynamics_nn_arch[0]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[0], self._dynamics_nn_arch[1]),
th.nn.ReLU(),
th.nn.Linear(self._dynamics_nn_arch[1], self._dyn_encoding_size+1))
self._decoder = SimpleDecoder( encoding_size = self._dyn_encoding_size + self._static_encoding_size,
image_channels_num = self._observation_channels_num,
net_output_width = self._observation_width,
net_output_height = self._observation_height)
self._resizeToInput = transforms.Resize((self._observation_height,self._observation_width))
@property
def observation_height(self):
return self._observation_height
@property
def observation_width(self):
return self._observation_width
@property
def dyn_encoding_size(self):
return self._dyn_encoding_size
@property
def static_encoding_size(self):
return self._static_encoding_size
@property
def action_size(self):
return self._action_size
def forward(self, transition_batch : Transition):
observation_batch = transition_batch.observation
action_batch = transition_batch.action
assert action_batch.size()[0] == observation_batch.size()[0], \
f"Observation batch and action batch should have the same length. Action batch size = {action_batch.size()[0]}, observation batch size = {observation_batch.size()[0]}. Action tensor size = {action_batch.size()[0]}. Observation tensor size = {observation_batch.size()[0]}"
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
assert action_batch.size()[1] == self._action_size, \
f"Each action should have size {self._action_size}, not {action_batch.size()[1]}. Tensor has size {action_batch.size()}"
#Compute 'static' features encoding
state_s_0_batch = self.encode_static(observation_batch) #Gives a (batch_size, static_encoding_size) output
#Compute 'dynamic' features encoding
state_d_0_batch = self.encode_dynamic(observation_batch) #Gives a (batch_size, dyn_encoding_size) output
state_d_1_batch, reward_d_1_batch = self.predict_dynamics(state_d_0_batch, action_batch)
#state_d_1_batch now has size (batch_size, dyn_encoding_size)
#reward_d_1_batch now has size (batch_size, 1) (still 2-dimensional)
#Will now use 'static' features vectors and predicted states to predict the observation
observation_1_batch = self.decode(state_s_0_batch,state_d_1_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output
return observation_1_batch, reward_d_1_batch
def encode_dynamic(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
return self._dynEncoder(observation_batch)
def encode_static(self, observation_batch : th.Tensor):
assert observation_batch.size() == (observation_batch.size()[0], self._observation_channels_num, self._observation_height, self._observation_width), \
f"Observation size should be (Any, {self._observation_channels_num}, {self._observation_height}, {self._observation_width}), instead it is {observation_batch.size()}"
if self._staticEncoder is not None:
return self._staticEncoder(observation_batch)
else:
return th.empty([observation_batch.size()[0],0]).to(observation_batch.device)
def decode(self, static_encoding_batch : th.Tensor, dynamic_encoding_batch : th.Tensor):
assert static_encoding_batch.size()[0] == dynamic_encoding_batch.size()[0], \
f"static encoding batch and dynamic encoding batch have different sizes, respectively {static_encoding_batch.size()[0]} and {dynamic_encoding_batch.size()[0]}"
assert dynamic_encoding_batch.size() == (dynamic_encoding_batch.size()[0], self._dyn_encoding_size), \
f"dynamic_encoding have wrong size, should be {(dynamic_encoding_batch.size()[0], self._dyn_encoding_size)}, but it's {dynamic_encoding_batch.size()}"
assert static_encoding_batch.size() == (static_encoding_batch.size()[0], self._static_encoding_size), \
f"static_encoding_batch have wrong size, should be {(static_encoding_batch.size()[0], self._static_encoding_size)}, but it's {static_encoding_batch.size()}"
#Combine the two vectors
state_batch = th.cat((static_encoding_batch, dynamic_encoding_batch), 1) #Gives a (batch_size, dyn_encoding_size+static_encoding_size) output
#Predict the observation
return self._decoder(state_batch) #Gives a (batch_size, observations_channels_num, observation_height, observation_width) output
def predict_dynamics(self, state_batch : th.Tensor, action_batch : th.Tensor):
assert state_batch.size()[0] == action_batch.size()[0], \
f"state batch and action batch have different sizes, respectively {state_batch.size()[0]} and {action_batch.size()[0]}"
assert state_batch.size()[1] == self._dyn_encoding_size, \
f"States have wrong size, should be {self._dyn_encoding_size}, but it's {state_batch.size()[1]}"
assert action_batch.size()[1] == self._action_size, \
f"Actions have wrong size, should be {self._action_size} but it's {action_batch.size()[1]}"
#Concatenate states and actions
state_action_batch = th.cat((state_batch, action_batch), 1) #Gives a (batch_size, dyn_encoding_size+action_size) output
nextstate_reward_batch = self._dynamics_net(state_action_batch) #Gives a (batch_size, dyn_encoding_size+1) output
nextstate_batch, reward_batch = th.split(nextstate_reward_batch, [self._dyn_encoding_size, 1], 1)
#nextstate_batch now has size (batch_size, dyn_encoding_size)
#reward_batch now has size (batch_size, 1) (still 2-dimensional)
return nextstate_batch, reward_batch
def preprocess_observations(self, observation_batch : th.Tensor):
resized_batch = self._resizeToInput(observation_batch)
# Input should be in the [0,1] range, as this is what torchvision.transforms.ToTensor does
# We move it to [-1,1]
normalized = resized_batch*2 - 1
return normalized
#return resized_batch
def postprocess_observations(self, observation_batch : th.Tensor):
return (observation_batch + 1)/2
| 57.654321 | 287 | 0.678051 | [
"MIT"
] | c-rizz/autoencoding_rl | src/autoencoding_rl/latent_extractors/dyn_autoencoder/DynAutoencoder.py | 9,340 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 10 18:18:26 2021
@author: Paolo Cozzi <[email protected]>
"""
import click
import logging
import datetime
from pathlib import Path
from mongoengine.errors import DoesNotExist
from src import __version__
from src.data.common import WORKING_ASSEMBLIES, PLINK_SPECIES_OPT
from src.features.smarterdb import (
global_connection, SmarterInfo)
logger = logging.getLogger(__name__)
@click.command()
def main():
"""Update SMARTER database statuses"""
logger.info(f"{Path(__file__).name} started")
try:
database = SmarterInfo.objects.get(id="smarter")
logger.debug(f"Found: {database}")
except DoesNotExist:
logger.warning("Smarter database status was never tracked")
database = SmarterInfo(id="smarter")
# update stuff
database.version = __version__
database.working_assemblies = WORKING_ASSEMBLIES
database.plink_specie_opt = PLINK_SPECIES_OPT
database.last_updated = datetime.datetime.now()
database.save()
logger.info("Database status updated")
logger.info(f"{Path(__file__).name} ended")
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
# connect to database
global_connection()
main()
| 23 | 68 | 0.707246 | [
"MIT"
] | cnr-ibba/SMARTER-database | src/data/update_db_status.py | 1,380 | Python |
from django import forms
class LoginForm(forms.Form):
username = forms.CharField(widget=forms.TextInput())
password = forms.CharField(widget=forms.PasswordInput(render_value=False)) | 39.2 | 79 | 0.760204 | [
"MIT"
] | X-ELE/DjangoSales | DjangoSales/apps/users/forms.py | 196 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.